1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
|
#include <QList>
#include <QtConcurrent>
#include "../raytracer/raytracer.h"
#include "../vec4ops/vec4ops.h"
#include "settings.h"
struct pixelRoutineArgs {
glm::vec4 pCamera;
glm::vec4 dCamera;
const RayTraceScene &scene;
RayTracer *rt;
};
static RGBA pixelRoutine(pixelRoutineArgs args);
void RayTracer::renderParallel(RGBA *imageData, const RayTraceScene &scene)
{
Camera camera = scene.getCamera();
float cameraDepth = 1.f;
float viewplaneHeight = 2.f*cameraDepth*std::tan(camera.getHeightAngle() / 2.f);
float viewplaneWidth = cameraDepth*viewplaneHeight*((float)scene.width()/(float)scene.height());
float viewplaneDepth = cameraDepth*viewplaneHeight * ((float)scene.depth() / (float)scene.height());
QList<pixelRoutineArgs> l{};
// for (int imageRow = 0; imageRow < scene.height(); imageRow++) {
// for (int imageCol = 0; imageCol < scene.width(); imageCol++) {
// float xCameraSpace = viewplaneWidth *
// (-.5f + (imageCol + .5f) / scene.width());
// float yCameraSpace = viewplaneHeight *
// (-.5f + (imageRow + .5f) / scene.height());
// glm::vec4 pixelDirCamera{xCameraSpace, -yCameraSpace, -cameraDepth, 0.f}; //w=0 for dir
// glm::vec4 eyeCamera{0.f, 0.f, 0.f, 1.f}; // w=1.f for point
// pixelRoutineArgs args{
// eyeCamera,
// pixelDirCamera,
// scene,
// this
// };
// l.append(args);
// }
// }
for (int imageRow = 0; imageRow < scene.height(); imageRow++) {
for (int imageCol = 0; imageCol < scene.width(); imageCol++) {
// FIXME: for now, use height as depth
for (int imageDepth = 0; imageDepth < scene.depth(); imageDepth++) {
// compute the ray
float x = (imageCol - scene.width()/2.f) * viewplaneWidth / scene.width();
float y = (imageRow - scene.height()/2.f) * viewplaneHeight / scene.height();
float z = (imageDepth - scene.width()/2.f) * viewplaneDepth / scene.width();
glm::vec4 pWorld = Vec4Ops::transformPoint4(glm::vec4(0.f), camera.getViewMatrix(), camera.getTranslationVector());
glm::vec4 dWorld = Vec4Ops::transformDir4(glm::vec4(x, y, z, -1.0), camera.getViewMatrix());
// get the pixel color
glm::vec4 pixelColor = getPixelFromRay(pWorld, dWorld, scene, 0);
// set the pixel color
int index = imageRow * scene.width() + imageCol;
imageData[index] = RGBA{
(std::uint8_t) (pixelColor.r * 255.f),
(std::uint8_t) (pixelColor.g * 255.f),
(std::uint8_t) (pixelColor.b * 255.f),
(std::uint8_t) (pixelColor.a * 255.f)
};
}
}
}
QList<RGBA> pixels = QtConcurrent::blockingMapped(l, pixelRoutine);
QtConcurrent::blockingMap(l, pixelRoutine);
// get the slice relating to z == 0 and set it into int the iamge data array
// int currentSlice = settings.w + 100.f * (5.f / 2.f);
int currentSlice = 0;
int ptr = currentSlice * scene.width() * scene.height();
for (int i = 0; i < scene.width() * scene.height(); i++) {
imageData[i] = pixels[ptr + i];
}
if (m_enableAntiAliasing)
{
filterBlur(imageData, scene.width(), scene.height());
}
}
RGBA pixelRoutine(pixelRoutineArgs args)
{
auto eyeCamera = args.pCamera;
auto pixelDirCamera = args.dCamera;
auto scene = args.scene;
auto rt = args.rt;
// convert camera space to world space
auto inv = scene.getCamera().getInverseViewMatrix();
glm::vec4 pWorld = inv * eyeCamera;
glm::vec4 dWorld = glm::normalize(inv * pixelDirCamera);
if (rt->m_enableDepthOfField)
{
// if we're doing depth of field, we need to shoot multiple rays, see camera.cpp
return RayTracer::toRGBA(rt->secondaryRays(pWorld, dWorld, scene));
}
if (rt->m_enableSuperSample)
{
// if we're doing super sampling, we need to shoot multiple rays, see raytracer.cpp
return rt->superSample(eyeCamera, pixelDirCamera, scene);
}
// shoot ray!
RGBA pixel = RayTracer::toRGBA(rt->getPixelFromRay(pWorld, dWorld, scene, 0));
return pixel;
}
|