aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorsotech117 <michael_foiani@brown.edu>2023-12-07 16:23:20 -0500
committersotech117 <michael_foiani@brown.edu>2023-12-07 16:23:20 -0500
commitcaa765bff49d54217b75aaf0e7acf4e5392a11e4 (patch)
tree9b92914dfb88b99599e8e60e4512e9e9ea9a25db /src
parenta9274459443f1d560d7580a162deb581549980cb (diff)
upload base code
Diffstat (limited to 'src')
-rw-r--r--src/.DS_Storebin0 -> 8196 bytes
-rw-r--r--src/accelerate/bvh.cpp139
-rw-r--r--src/accelerate/bvh.h20
-rw-r--r--src/accelerate/kdtree.cpp273
-rw-r--r--src/accelerate/kdtree.h53
-rw-r--r--src/accelerate/myqtconcurrent.cpp80
-rw-r--r--src/accelerate/myqthreads.cpp130
-rw-r--r--src/aliasing/filter.cpp114
-rw-r--r--src/aliasing/supersample.cpp119
-rw-r--r--src/camera/camera.cpp72
-rw-r--r--src/camera/camera.h49
-rw-r--r--src/illuminate/illuminate.cpp304
-rw-r--r--src/illuminate/reflect.cpp115
-rw-r--r--src/illuminate/shadow.cpp58
-rw-r--r--src/intersect/intersect.cpp265
-rw-r--r--src/intersect/normals.cpp97
-rw-r--r--src/main.cpp86
-rw-r--r--src/raytracer/raytracer.cpp150
-rw-r--r--src/raytracer/raytracer.h140
-rw-r--r--src/raytracer/raytracescene.cpp56
-rw-r--r--src/raytracer/raytracescene.h42
-rw-r--r--src/texture/texture.cpp180
-rw-r--r--src/utils/raytracerutils.cpp21
-rw-r--r--src/utils/rgba.h10
-rw-r--r--src/utils/scenedata.h179
-rw-r--r--src/utils/scenefilereader.cpp1073
-rw-r--r--src/utils/scenefilereader.h50
-rw-r--r--src/utils/sceneparser.cpp136
-rw-r--r--src/utils/sceneparser.h31
29 files changed, 4042 insertions, 0 deletions
diff --git a/src/.DS_Store b/src/.DS_Store
new file mode 100644
index 0000000..cef448c
--- /dev/null
+++ b/src/.DS_Store
Binary files differ
diff --git a/src/accelerate/bvh.cpp b/src/accelerate/bvh.cpp
new file mode 100644
index 0000000..ce104a0
--- /dev/null
+++ b/src/accelerate/bvh.cpp
@@ -0,0 +1,139 @@
+#include "raytracer/raytracer.h"
+#include "bvh.h"
+
+bvh::bvh(
+ const std::vector<KdShape>& p_shapes,
+ int p_dimension)
+{
+ dimension = p_dimension;
+ if (p_shapes.empty()) {
+ return;
+ }
+
+ // compute the new bouding ragion from the shapes and add shapes to the root
+ BoundingRegion tmp {
+ glm::vec4(-FINF, -FINF, -FINF, 1.f),
+ glm::vec4(FINF, FINF, FINF, 1.f),
+ glm::vec4(-FINF, -FINF, -FINF, 1.f),
+ glm::vec4(FINF, FINF, FINF, 1.f),
+ glm::vec4(-FINF, -FINF, -FINF, 1.f),
+ glm::vec4(FINF, FINF, FINF, 1.f),
+ glm::vec4(0.f, 0.f, 0.f, 1.f)
+ };
+ shapes = std::vector<KdShape>();
+ for (const auto& shape : p_shapes) {
+ tmp.xmax = glm::max(tmp.xmax, shape.region.xmax);
+ tmp.xmin = glm::min(tmp.xmin, shape.region.xmin);
+ tmp.ymax = glm::max(tmp.ymax, shape.region.ymax);
+ tmp.ymin = glm::min(tmp.ymin, shape.region.ymin);
+ tmp.zmax = glm::max(tmp.zmax, shape.region.zmax);
+ tmp.zmin = glm::min(tmp.zmin, shape.region.zmin);
+ tmp.center.x = (tmp.xmax.x + tmp.xmin.x) / 2.f;
+ tmp.center.y = (tmp.ymax.y + tmp.ymin.y) / 2.f;
+ tmp.center.z = (tmp.zmax.z + tmp.zmin.z) / 2.f;
+
+ shapes.push_back(shape);
+ }
+ region = tmp;
+
+ // split the shapes into two groups, if more than two shapes
+ if (shapes.size() <= 2) {
+ return;
+ }
+ std::vector<KdShape> leftShapes;
+ std::vector<KdShape> rightShapes;
+ for (const auto& shape : shapes) {
+ if (shape.region.center[dimension] < region.center[dimension]) {
+ leftShapes.push_back(shape);
+ }
+ else if (shape.region.center[dimension] > region.center[dimension]) {
+ rightShapes.push_back(shape);
+ } else {
+ if (leftShapes.size() < rightShapes.size()) {
+ leftShapes.push_back(shape);
+ } else {
+ rightShapes.push_back(shape);
+ }
+ }
+ }
+
+ // make the children
+ leftChild = new bvh(leftShapes, (dimension + 1) % 3);
+ rightChild = new bvh(rightShapes, (dimension + 1) % 3);
+}
+
+float intersectRegion(
+ glm::vec4 p,
+ glm::vec4 d,
+ BoundingRegion region)
+{
+ float tXmin = (region.xmin.x - p.x) / d.x;
+ float tXmax = (region.xmax.x - p.x) / d.x;
+ float tYmin = (region.ymin.y - p.y) / d.y;
+ float tYmax = (region.ymax.y - p.y) / d.y;
+ float tZmin = (region.zmin.z - p.z) / d.z;
+ float tZmax = (region.zmax.z - p.z) / d.z;
+
+ float tMin = std::max(std::max(std::min(tXmin, tXmax), std::min(tYmin, tYmax)), std::min(tZmin, tZmax));
+ float tMax = std::min(std::min(std::max(tXmin, tXmax), std::max(tYmin, tYmax)), std::max(tZmin, tZmax));
+
+ if (tMin > tMax) {
+ return FINF;
+ }
+ return tMin;
+}
+
+float RayTracer::traverseBVH(
+ glm::vec4 p,
+ glm::vec4 d,
+ RenderShapeData &testShape,
+ bvh *root)
+{
+ std::vector<bvh*> stack = std::vector<bvh*>();
+ stack.push_back(root);
+ float minT = FINF;
+
+ while (!stack.empty())
+ {
+ auto current = *stack.back();
+ stack.pop_back();
+
+ if (current.leftChild == nullptr && current.rightChild == nullptr) {
+ for (const auto &shape: current.shapes) {
+ glm::vec4 pObject = shape.shape.inverseCTM * p;
+ glm::vec4 dObject = glm::normalize(shape.shape.inverseCTM * d);
+
+ glm::vec4 intersection = findIntersection(pObject, dObject, shape.shape);
+ if (intersection.w == 0.f) {
+ continue;
+ }
+ intersection = shape.shape.ctm * intersection;
+ // check within bounds
+ float tWorld = (intersection.x - p.x) / d.x;
+ if (tWorld < minT)
+ {
+ minT = tWorld;
+ testShape = shape.shape;
+ }
+ }
+ } else {
+ float leftIntersect = intersectRegion(p, d, current.leftChild->region);
+ float rightIntersect = intersectRegion(p, d, current.rightChild->region);
+ if (leftIntersect != FINF && rightIntersect != FINF) {
+ if (leftIntersect < rightIntersect) {
+ stack.push_back(current.rightChild);
+ stack.push_back(current.leftChild);
+ } else {
+ stack.push_back(current.leftChild);
+ stack.push_back(current.rightChild);
+ }
+ } else if (leftIntersect != FINF) {
+ stack.push_back(current.leftChild);
+ } else if (rightIntersect != FINF) {
+ stack.push_back(current.rightChild);
+ }
+ }
+ }
+
+ return minT;
+} \ No newline at end of file
diff --git a/src/accelerate/bvh.h b/src/accelerate/bvh.h
new file mode 100644
index 0000000..062f748
--- /dev/null
+++ b/src/accelerate/bvh.h
@@ -0,0 +1,20 @@
+#include "raytracer/raytracer.h"
+
+#ifndef PROJECTS_RAY_BVH_H
+
+class bvh
+{
+public:
+ bvh(const std::vector<KdShape> &shapes, int dimension);
+
+ std::vector<KdShape> shapes;
+ int dimension;
+ BoundingRegion region{};
+ bvh *leftChild;
+ bvh *rightChild;
+};
+
+
+#define PROJECTS_RAY_BVH_H
+
+#endif //PROJECTS_RAY_BVH_H
diff --git a/src/accelerate/kdtree.cpp b/src/accelerate/kdtree.cpp
new file mode 100644
index 0000000..4156c98
--- /dev/null
+++ b/src/accelerate/kdtree.cpp
@@ -0,0 +1,273 @@
+#include "kdtree.h"
+#include "raytracer/raytracer.h"
+
+// Constructor
+KdTree::KdTree(int pDimension, float pSplitCoord) {
+ empty = true;
+ dimension = pDimension;
+ splitCoord = pSplitCoord;
+
+ shapesWithinBounds = std::vector<KdShape>();
+
+ leftChild = nullptr;
+ rightChild = nullptr;
+}
+
+void KdTree::insert(KdShape shape)
+{
+ // first, add shape to this node
+ shapesWithinBounds.push_back(shape);
+
+ if (empty) {
+ empty = false;
+ return;
+ }
+
+ int nextDimension = (dimension + 1) % 3;
+ if (dimension == 0) // x split
+ {
+ if (
+ shape.region.xmin.x > splitCoord
+ )
+ // bound box is strictly to the right, only add right
+ {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is y
+ }
+ }
+ else if (
+ shape.region.xmin.x < splitCoord
+ && shape.region.xmax.x > splitCoord
+ )
+ // bounding box overlaps center, need to add to both children
+ {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is y
+ }
+
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is y
+ }
+
+ }
+ else if (
+ shape.region.xmax.x < splitCoord
+ )
+ // bounding box strictly to the left, only add left
+ {
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is y
+ }
+ }
+ }
+
+ else if (dimension == 1) // y split
+ {
+ if (shape.region.ymin.y > splitCoord) {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is z
+ }
+ }
+ else if (
+ shape.region.ymin.y < splitCoord
+ && shape.region.ymax.y > splitCoord
+ ) {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is z
+ }
+
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is z
+ }
+ }
+ else if (
+ shape.region.ymax.y < splitCoord
+ ) {
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is z
+ }
+ }
+ }
+
+ else if (dimension == 2) // z split
+ {
+ if (shape.region.zmin.z > splitCoord) {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is x
+ }
+ }
+ else if (
+ shape.region.zmin.z < splitCoord
+ && shape.region.zmax.z > splitCoord
+ ) {
+ if (rightChild == nullptr) {
+ rightChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is x
+ }
+
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is x
+ }
+ }
+ else if (
+ shape.region.zmax.z < splitCoord
+ ) {
+ if (leftChild == nullptr) {
+ leftChild = new KdTree(
+ nextDimension,
+ shape.region.center[nextDimension]); // next dim is x
+ }
+ }
+ }
+
+ // now, add shape to children
+ if (leftChild != nullptr) {
+ leftChild->insert(shape);
+ }
+ if (rightChild != nullptr) {
+ rightChild->insert(shape);
+ }
+}
+
+BoundingRegion KdTree::transformBoundingRegion(BoundingRegion region, glm::mat4 transformationMatrix, glm::vec3 basis)
+{
+ std::vector<glm::vec4> transformedPoints = std::vector<glm::vec4>();
+ transformedPoints.push_back(transformationMatrix * region.xmax);
+ transformedPoints.push_back(transformationMatrix * region.xmin);
+ transformedPoints.push_back(transformationMatrix * region.ymax);
+ transformedPoints.push_back(transformationMatrix * region.ymin);
+ transformedPoints.push_back(transformationMatrix * region.zmax);
+ transformedPoints.push_back(transformationMatrix * region.zmin);
+
+ BoundingRegion transformedRegion{
+ glm::vec4(-FINF),
+ glm::vec4(FINF),
+ glm::vec4(-FINF),
+ glm::vec4(FINF),
+ glm::vec4(-FINF),
+ glm::vec4(FINF),
+ glm::vec4(0.f)
+ }; // just init values, will be set to be correct
+
+ // these are the new bound points, but they may have been rotated or reflected
+ // this also ensures axis aligned bounding boxes, given the dots with the basis
+ for (glm::vec4 point: transformedPoints) {
+ if (point.x * basis.x > transformedRegion.xmax.x) {
+ transformedRegion.xmax = point;
+ }
+ if (point.x * basis.x < transformedRegion.xmin.x) {
+ transformedRegion.xmin = point;
+ }
+ if (point.y * basis.y > transformedRegion.ymax.y) {
+ transformedRegion.ymax = point;
+ }
+ if (point.y * basis.y < transformedRegion.ymin.y) {
+ transformedRegion.ymin = point;
+ }
+ if (point.z * basis.z > transformedRegion.zmax.z) {
+ transformedRegion.zmax = point;
+ }
+ if (point.z * basis.z < transformedRegion.zmin.z) {
+ transformedRegion.zmin = point;
+ }
+ }
+
+ transformedRegion.center = transformationMatrix * region.center;
+ return transformedRegion;
+}
+
+// TODO: return the float with the shape
+float RayTracer::traverse(
+ glm::vec4 p,
+ glm::vec4 d,
+ float tStart,
+ float tEnd,
+ RenderShapeData &testShape,
+ KdTree *tree)
+{
+ if (tree == nullptr) {
+ return FINF;
+ }
+
+ // leaf node
+ if ( tree->shapesWithinBounds.size() <= 2 ||
+ tree->leftChild == nullptr || tree->rightChild == nullptr)
+ {
+ float minT = FINF;
+ for (const auto &shape: tree->shapesWithinBounds) {
+ glm::vec4 pObject = shape.shape.inverseCTM * p;
+ glm::vec4 dObject = glm::normalize(shape.shape.inverseCTM * d);
+
+ glm::vec4 intersection = findIntersection(pObject, dObject, shape.shape);
+ if (intersection.w == 0.f) {
+ continue;
+ }
+ intersection = shape.shape.ctm * intersection;
+ // check within bounds
+ if (
+ intersection.x <= shape.region.xmax.x && intersection.x >= shape.region.xmin.x
+ &&
+ intersection.y <= shape.region.ymax.y && intersection.y >= shape.region.ymin.y
+ &&
+ intersection.z <= shape.region.zmax.z && intersection.z >= shape.region.zmin.z
+ )
+ {
+ float tWorld = (intersection.x - p.x) / d.x;
+ if (tWorld < minT) {
+ minT = tWorld;
+ testShape = shape.shape;
+ }
+ }
+ }
+ return minT;
+ }
+
+ // solve for t, only in current 1d-dimension
+ float t = (tree->splitCoord - p[tree->dimension]) / d[tree->dimension];
+
+ // There are three cases:
+ // 1) only intersects with left (front) child (t <= tEnd)
+ // 2) only intersects with right (back) child (t <= tStart)
+ // 3) intersects with both children (tStart <= t <= tEnd)
+ // on last case, we need to traverse both children,
+ // but gain a significant speedup by traversing the one that is closer first.
+
+ if (t <= tStart && tree->rightChild != nullptr) // case 1)
+ {
+ return traverse(p, d, tStart, tEnd, testShape, tree->rightChild);
+ }
+ else if (t >= tEnd && tree->leftChild != nullptr) // case 2)
+ {
+ return traverse(p, d, tStart, tEnd, testShape, tree->leftChild);
+ }
+ else // case 3)
+ {
+ float t_hit = traverse(p, d, tStart, t, testShape, tree->leftChild);
+ if (t_hit < t)
+ { // this is where we save time!
+ return t_hit;
+ }
+ return traverse(p, d, t, tEnd, testShape, tree->rightChild);
+ }
+} \ No newline at end of file
diff --git a/src/accelerate/kdtree.h b/src/accelerate/kdtree.h
new file mode 100644
index 0000000..e33aa59
--- /dev/null
+++ b/src/accelerate/kdtree.h
@@ -0,0 +1,53 @@
+#ifndef KDTREE_H
+#define KDTREE_H
+#include "utils/sceneparser.h"
+#include <queue>
+#include <vector>
+
+typedef struct {
+ glm::vec4 xmax;
+ glm::vec4 xmin;
+ glm::vec4 ymax;
+ glm::vec4 ymin;
+ glm::vec4 zmax;
+ glm::vec4 zmin;
+ glm::vec4 center;
+} BoundingRegion;
+
+typedef struct KdShape
+{
+ RenderShapeData shape;
+ BoundingRegion region;
+} KdShape;
+
+class KdTree
+{
+public:
+
+ KdTree(int pDimension, float pSplitCoord);
+
+ bool empty;
+ int dimension;
+ float splitCoord;
+ std::vector<KdShape> shapesWithinBounds;
+ void insert(KdShape shape);
+
+ KdTree *leftChild;
+ KdTree *rightChild;
+
+ // todo: make basis a matrix
+ static BoundingRegion transformBoundingRegion(BoundingRegion region, glm::mat4 transformationMatrix, glm::vec3 basis=glm::vec3(1.0f, 1.0f, 1.0f));
+};
+
+const static BoundingRegion OBJECT_BOUNDS{
+ glm::vec4(.5f, 0.f, 0.f, 1.f),
+ glm::vec4(-.5f, 0.f, 0.f, 1.f),
+ glm::vec4(0.f, .5f, 0.f, 1.f),
+ glm::vec4(0.f, -.5f, 0.f, 1.f),
+ glm::vec4(0.f, 0.f, .5f, 1.f),
+ glm::vec4(0.f, 0.f, -.5f, 1.f),
+ glm::vec4(0.f, 0.f, 0.f, 1.f)
+};
+
+
+#endif // KDTREE_H \ No newline at end of file
diff --git a/src/accelerate/myqtconcurrent.cpp b/src/accelerate/myqtconcurrent.cpp
new file mode 100644
index 0000000..1dff0e0
--- /dev/null
+++ b/src/accelerate/myqtconcurrent.cpp
@@ -0,0 +1,80 @@
+
+#include <QList>
+#include <QtConcurrent>
+#include "raytracer/raytracer.h"
+
+struct pixelRoutineArgs {
+ glm::vec4 pCamera;
+ glm::vec4 dCamera;
+ const RayTraceScene &scene;
+ RayTracer rt;
+};
+static RGBA pixelRoutine(pixelRoutineArgs args);
+
+void RayTracer::renderParallel(RGBA *imageData, const RayTraceScene &scene)
+{
+ Camera camera = scene.getCamera();
+ float cameraDepth = 1.f;
+ float viewplaneHeight = 2.f*cameraDepth*std::tan(camera.getHeightAngle() / 2.f);
+ float viewplaneWidth = cameraDepth*viewplaneHeight*((float)scene.width()/(float)scene.height());
+
+ QList<pixelRoutineArgs> l{};
+ for (int imageRow = 0; imageRow < scene.height(); imageRow++) {
+ for (int imageCol = 0; imageCol < scene.width(); imageCol++) {
+ float xCameraSpace = viewplaneWidth *
+ (-.5f + (imageCol + .5f) / scene.width());
+ float yCameraSpace = viewplaneHeight *
+ (-.5f + (imageRow + .5f) / scene.height());
+
+ glm::vec4 pixelDirCamera{xCameraSpace, -yCameraSpace, -cameraDepth, 0.f}; //w=0 for dir
+ glm::vec4 eyeCamera{0.f, 0.f, 0.f, 1.f}; // w=1.f for point
+ l.append({
+ eyeCamera, // eye
+ pixelDirCamera, // direction
+ scene,
+ *this
+ });
+
+ }
+ }
+ QList<RGBA> pixels = QtConcurrent::blockingMapped(l, pixelRoutine);
+ QtConcurrent::blockingMap(l, pixelRoutine);
+ int index = 0;
+ for (RGBA p : pixels) {
+ imageData[index++] = p;
+ }
+
+ if (m_config.enableAntiAliasing)
+ {
+ filterBlur(imageData, scene.width(), scene.height());
+ }
+}
+
+
+RGBA pixelRoutine(pixelRoutineArgs args)
+{
+ auto eyeCamera = args.pCamera;
+ auto pixelDirCamera = args.dCamera;
+ auto scene = args.scene;
+ auto rt = args.rt;
+
+ // convert camera space to world space
+ auto inv = scene.getCamera().getInverseViewMatrix();
+ glm::vec4 pWorld = inv * eyeCamera;
+ glm::vec4 dWorld = glm::normalize(inv * pixelDirCamera);
+
+ if (rt.m_config.enableDepthOfField)
+ {
+ // if we're doing depth of field, we need to shoot multiple rays, see camera.cpp
+ return RayTracer::toRGBA(rt.secondaryRays(pWorld, dWorld, scene));
+ }
+ if (rt.m_config.enableSuperSample)
+ {
+ // if we're doing super sampling, we need to shoot multiple rays, see raytracer.cpp
+ return rt.superSample(eyeCamera, pixelDirCamera, scene);
+ }
+
+ // shoot ray!
+ RGBA pixel = RayTracer::toRGBA(rt.getPixelFromRay(pWorld, dWorld, scene, 0));
+ return pixel;
+}
diff --git a/src/accelerate/myqthreads.cpp b/src/accelerate/myqthreads.cpp
new file mode 100644
index 0000000..ead3aec
--- /dev/null
+++ b/src/accelerate/myqthreads.cpp
@@ -0,0 +1,130 @@
+#include "raytracer/raytracer.h"
+#include <QThread>
+
+/**
+ * Extra credit -> own implementation of multithreading using QThreads.
+ * NOT USED for illuminate (not any faster than QT's version), but was used in intersect.
+ */
+
+//struct intersectRoutineArgs {
+// RenderShapeData shape;
+// glm::vec4 pWorld;
+// glm::vec4 dWorld;
+//};
+//
+//struct intersectData {
+// float distance;
+// glm::vec4 intersectionWorld;
+// glm::vec4 intersectionObj;
+// RenderShapeData intersectedShape;
+//};
+//
+//Q_DECLARE_METATYPE(intersectData);
+//
+//class IntersectWorker : public QThread
+//{
+// Q_OBJECT
+// void run() override {
+// exec();
+// /* ... here is the expensive or blocking operation ... */
+// glm::vec4 pObject = glm::inverse(a.shape.ctm) * a.pWorld;
+// glm::vec4 dObject = glm::normalize(glm::inverse(a.shape.ctm) * a.dWorld);
+//
+// glm::vec4 intersectionObj = RayTracer::findIntersection(pObject, dObject, a.shape);
+// if (intersectionObj.w == 0) // no hit
+// {
+// const intersectData response{
+// FINF,
+// glm::vec4(0.f),
+// glm::vec4(0.f),
+// a.shape
+// };
+// ps.append(response);
+// emit data(response);
+// } else {
+// auto intersectionWorld = a.shape.ctm * intersectionObj;
+// float distance = glm::distance(intersectionWorld, a.pWorld);
+//
+// const intersectData response{
+// distance,
+// intersectionWorld,
+// intersectionObj,
+// a.shape
+// };
+// ps.append(response);
+// emit data(response);
+// }
+// emit finished();
+// }
+//public:
+// intersectRoutineArgs a;
+// QList<intersectData> &ps;
+// IntersectWorker(intersectRoutineArgs args, QList<intersectData> &p) : ps(p)
+// {
+// a = args;
+// }
+// signals:
+// void data(const intersectData &s);
+// void finished();
+//};
+//
+//
+//class IntersectController : public QObject
+//{
+// Q_OBJECT
+//public:
+// std::vector<QThread*> qthreads;
+// QList<intersectData> intersectPoints;
+// IntersectController(const std::vector<RenderShapeData> &shapes, glm::vec4 pWorld, glm::vec4 dWorld) {
+// qRegisterMetaType<const intersectData&>("myType");
+// int id = 0;
+// for (const RenderShapeData &shape: shapes) {
+// const intersectRoutineArgs threadArgs{shape, pWorld, dWorld};
+// IntersectWorker *thread = new IntersectWorker(threadArgs, intersectPoints);
+//
+// connect(thread, &IntersectWorker::data, this, &IntersectController::addIntersectionPoint);
+// connect(thread, &IntersectWorker::finished, thread, &QThread::quit);
+//
+// connect(thread, &IntersectWorker::finished, thread, &QThread::deleteLater);
+//
+// qthreads.push_back(thread);
+// }
+// }
+// ~IntersectController() {
+// for (QThread* workerThread: qthreads) {
+// workerThread->exit();
+// }
+// qthreads.clear();
+// intersectPoints.clear();
+// }
+// void getClosestIntersection(float &minDist, glm::vec4 &closestIntersectionWorld, glm::vec4 &closestIntersectionObj, RenderShapeData intersectedShape) {
+// // start then wait
+// for (QThread* thread: qthreads) {
+// thread->start();
+// }
+// for (QThread* thread: qthreads) {
+// thread->quit();
+// thread->wait();
+// }
+//
+//
+// // once all threads are done, find the closest
+// for (const intersectData &i : intersectPoints) {
+// if (i.distance < minDist) {
+// minDist = i.distance;
+//
+// intersectedShape = i.intersectedShape;
+// closestIntersectionObj = i.intersectionObj;
+// closestIntersectionWorld = i.intersectionWorld;
+// }
+// }
+//}
+//public slots:
+// void addIntersectionPoint(const intersectData &s) {
+// intersectPoints.append(s);
+// }
+// signals:
+// void operate(intersectRoutineArgs a);
+//};
+//
+//#include "myqthreads.moc" \ No newline at end of file
diff --git a/src/aliasing/filter.cpp b/src/aliasing/filter.cpp
new file mode 100644
index 0000000..1732dc4
--- /dev/null
+++ b/src/aliasing/filter.cpp
@@ -0,0 +1,114 @@
+#include "raytracer/raytracer.h"
+
+/**
+ * Extra credit.
+ * Code from filter project to offer antialiasing.
+ * FilterBlur at bottom of file used in raytracer.
+ */
+
+enum KERNEL_CHANNEL {
+ RED,
+ GREEN,
+ BLUE,
+ NONE=-1,
+};
+
+struct Kernel1D {
+ std::function<double(double, KERNEL_CHANNEL)> getWeight;
+ double radius;
+};
+
+enum CONVOLVE_DIRECTION {
+ HORIZONTAL,
+ VERTICAL
+};
+
+RGBA getPixelWrapped(std::vector<RGBA> &data, int width, int height, int x, int y) {
+ int newX = (x < 0) ? x + width : x % width;
+ int newY = (y < 0) ? y + height : y % height;
+ return data[width * newY + newX];
+}
+
+std::uint8_t floatToUint8(float x) {
+ x = std::min(255.f, x);
+ return round(x * 255.f);
+}
+
+std::vector<RGBA> convolve1D(std::vector<RGBA> data, int width, int height, Kernel1D kernel, CONVOLVE_DIRECTION direction) {
+ // need to assign then set, since the direction could be either way
+ std::vector<RGBA> result;
+ result.assign(width*height, RGBA{0, 0, 0, 255});
+
+ // get the order of the for loop, based on the bound
+ int outerBound = direction == CONVOLVE_DIRECTION::HORIZONTAL ? height : width;
+ int innerBound = direction == CONVOLVE_DIRECTION::HORIZONTAL ? width : height;
+
+ for (int i = 0; i < outerBound; i++) {
+ for (int j = 0; j < innerBound; j++) {
+ float redAcc = 0.f, greenAcc = 0.f, blueAcc = 0.f;
+ for (int k = -kernel.radius; k <= kernel.radius; k++) {
+ // get the weight for each channel, at this kernel index
+ double rWeight = kernel.getWeight(k, KERNEL_CHANNEL::RED);
+ double gWeight = kernel.getWeight(k, KERNEL_CHANNEL::GREEN);
+ double bWeight = kernel.getWeight(k, KERNEL_CHANNEL::BLUE);
+
+ // determine the pixel location on the canvas
+ int pixelX = direction == CONVOLVE_DIRECTION::HORIZONTAL ? j + k : i;
+ int pixelY = direction == CONVOLVE_DIRECTION::HORIZONTAL ? i : j + k;
+
+ // get the pixel to compute this inner index of convolution.
+ // if out of bounds, get the wrapped
+ RGBA pixel;
+ if (pixelX < 0 || pixelX >= width || pixelY < 0 || pixelY >= height)
+ pixel = getPixelWrapped(data, width, height, pixelX, pixelY);
+ else
+ pixel = data.at(width * pixelY + pixelX);
+
+ // sum the weights on each channel
+ redAcc += rWeight * pixel.r/255.f;
+ greenAcc += gWeight * pixel.g/255.f;
+ blueAcc += bWeight * pixel.b/255.f;
+ }
+
+ // get location then set the pixel into the result
+ int pixelOnCanvas = direction == CONVOLVE_DIRECTION::HORIZONTAL ? width * i + j : width * j + i;
+ result[pixelOnCanvas] = RGBA{floatToUint8(redAcc), floatToUint8(greenAcc), floatToUint8(blueAcc), 255};
+ }
+ }
+
+ return result;
+}
+
+double triangleFilter(double x, double a) {
+ double radius;
+ if (a < 1) {
+ radius = 1/a;
+ } else {
+ radius = 1;
+ }
+
+ if (x < -radius || x > radius)
+ return 0;
+
+ return (1 - std::fabs(x)/radius) / radius;
+}
+
+void RayTracer::filterBlur(RGBA *imageData, int width, int height, float blurRadius) {
+ // make triangle filter
+ // note: 1/blurRadius for the "radius" of the filter will normalize the area under it to 1
+ Kernel1D triangleKernel;
+ triangleKernel.radius = blurRadius;
+ triangleKernel.getWeight = [blurRadius](double x, int c) { return triangleFilter(x, 1/blurRadius); };
+
+ std::vector<RGBA> data{};
+ for (int i = 0; i < width*height; i++) {
+ data.push_back(imageData[i]);
+ }
+
+ std::vector<RGBA> res = convolve1D(data, width, height, triangleKernel, HORIZONTAL);
+ res = convolve1D(res, width,height, triangleKernel, VERTICAL);
+
+ for (int i = 0; i < res.size(); i++) {
+ imageData[i] = res[i];
+ }
+}
diff --git a/src/aliasing/supersample.cpp b/src/aliasing/supersample.cpp
new file mode 100644
index 0000000..aa8e9d3
--- /dev/null
+++ b/src/aliasing/supersample.cpp
@@ -0,0 +1,119 @@
+#include "raytracer/raytracer.h"
+
+/**
+ * Extra credit -> Super Sampling
+ */
+
+const float SUPERSAMPLE_DISTANCE_FROM_CENTER = .25f; // note: max of .5f, unless overlapping with other pixels
+bool SUPER_SAMPLE = false;
+bool ADAPTIVE_SUPER_SAMPLING = false;
+
+RGBA RayTracer::superSample(
+ glm::vec4 eyeCamera,
+ glm::vec4 pixelDirCamera,
+ const RayTraceScene &scene) {
+ // get the color value at value between center and four corners
+ float x_delta = SUPERSAMPLE_DISTANCE_FROM_CENTER / (scene.width());
+ float y_delta = SUPERSAMPLE_DISTANCE_FROM_CENTER / (scene.height());
+ // TL == TOP LEFT
+ // BR = BOTTOM RIGHT, not Battle Royale :)
+ glm::vec4 pixelTL = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ glm::vec4 pixelTR = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x + x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ glm::vec4 pixelBL = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y + y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ glm::vec4 pixelBR = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x + x_delta, pixelDirCamera.y + y_delta, pixelDirCamera.z, 0.f),
+ scene);
+
+ if (!ADAPTIVE_SUPER_SAMPLING) {
+ return toRGBA((pixelTL + pixelTR + pixelBL + pixelBR) / 4.f);
+ }
+
+ // ADAPTIVE SUPER SAMPLING
+ // make the region from the center of pixel smaller until we hit something
+ RGBA nohit = {0, 0, 0, 0}; // just here to say that a is 0 if no hit...
+ float TRAVERSE_DISTANCE = .025f;
+ float num_pixels = 4.f;
+ if (pixelTL.a == 0) {
+ num_pixels--;
+ float smallerDist = SUPERSAMPLE_DISTANCE_FROM_CENTER - TRAVERSE_DISTANCE;
+ while (smallerDist < TRAVERSE_DISTANCE) {
+ float x_delta = smallerDist / (scene.width());
+ float y_delta = smallerDist / (scene.height());
+ pixelTL = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ if (pixelTL.a != 0) {
+ num_pixels++;
+ break;
+ }
+ smallerDist -= TRAVERSE_DISTANCE;
+ }
+ }
+ if (pixelTR.a == 0) {
+ num_pixels--;
+ float smallerDist = SUPERSAMPLE_DISTANCE_FROM_CENTER - TRAVERSE_DISTANCE;
+ while (smallerDist < TRAVERSE_DISTANCE) {
+ float x_delta = smallerDist / (scene.width());
+ float y_delta = smallerDist / (scene.height());
+ pixelTR = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ if (pixelTR.a != 0) {
+ num_pixels += 1;
+ break;
+ }
+ smallerDist -= TRAVERSE_DISTANCE;
+ }
+ }
+ if (pixelBL.a == 0) {
+ num_pixels--;
+ float smallerDist = SUPERSAMPLE_DISTANCE_FROM_CENTER - TRAVERSE_DISTANCE;
+ while (smallerDist < TRAVERSE_DISTANCE) {
+ float x_delta = smallerDist / (scene.width());
+ float y_delta = smallerDist / (scene.height());
+ pixelBL = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ if (pixelBL.a != 0) {
+ num_pixels += 1;
+ break;
+ }
+ smallerDist -= TRAVERSE_DISTANCE;
+ }
+ }
+ if (pixelBR.a == 0) {
+ num_pixels--;
+ float smallerDist = SUPERSAMPLE_DISTANCE_FROM_CENTER - TRAVERSE_DISTANCE;
+ while (smallerDist < TRAVERSE_DISTANCE) {
+ float x_delta = smallerDist / (scene.width());
+ float y_delta = smallerDist / (scene.height());
+ pixelBR = getPixelFromRay(
+ eyeCamera,
+ glm::vec4(pixelDirCamera.x - x_delta, pixelDirCamera.y - y_delta, pixelDirCamera.z, 0.f),
+ scene);
+ if (pixelBR.a != 0) {
+ num_pixels += 1;
+ break;
+ }
+ smallerDist -= TRAVERSE_DISTANCE;
+ }
+ }
+
+ if (num_pixels == 0.f) {
+ return nohit;
+ }
+ return toRGBA((pixelTL + pixelTR + pixelBL + pixelBR) / num_pixels);
+}
diff --git a/src/camera/camera.cpp b/src/camera/camera.cpp
new file mode 100644
index 0000000..62e8021
--- /dev/null
+++ b/src/camera/camera.cpp
@@ -0,0 +1,72 @@
+#include <stdexcept>
+#include "camera.h"
+
+Camera::Camera(SceneCameraData cameraData) :
+ m_pos(cameraData.pos),
+ m_heightAngle(cameraData.heightAngle),
+ m_focalLength(cameraData.focalLength),
+ m_aperture(cameraData.aperture)
+{
+ // VIEW MATRIX INTIALIZATION
+ // cast to 3 for dots & cross
+ glm::vec3 look3{cameraData.look.x, cameraData.look.y, cameraData.look.z};
+ glm::vec3 up3{cameraData.up.x, cameraData.up.y, cameraData.up.z};
+
+ // calculate new basis
+ glm::vec3 e0 = -glm::normalize(look3);
+ glm::vec3 e1 = glm::normalize(up3 - glm::dot(up3, e0) * e0);
+ glm::vec3 e2 = glm::cross(e1, e0);
+
+ glm::mat4 alignment
+ {
+ e2.x, e1.x, e0.x, 0.f,
+ e2.y, e1.y, e0.y, 0.f,
+ e2.z, e1.z, e0.z, 0.f,
+ 0.f, 0.f, 0.f, 1.f
+ };
+ glm::mat4 translation
+ {
+ 1.f, 0.f, 0.f, 0.f,
+ 0.f, 1.f, 0.f, 0.f,
+ 0.f, 0.f, 1.f, 0.f,
+ -cameraData.pos.x, -cameraData.pos.y, -cameraData.pos.z, 1.f
+ };
+
+ m_viewMatrix = alignment * translation;
+ m_inverse = glm::inverse(m_viewMatrix);
+}
+
+
+glm::mat4 Camera::getViewMatrix() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_viewMatrix;
+}
+
+glm::mat4 Camera::getInverseViewMatrix() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_inverse;
+}
+
+float Camera::getAspectRatio() const {
+ // Optional TODO: implement the getter or make your own design
+ throw std::runtime_error("not implemented");
+}
+
+float Camera::getHeightAngle() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_heightAngle;
+}
+
+float Camera::getFocalLength() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_focalLength;
+}
+
+float Camera::getAperture() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_aperture;
+}
+
+glm::vec3 Camera::getPos() const {
+ return m_pos;
+}
diff --git a/src/camera/camera.h b/src/camera/camera.h
new file mode 100644
index 0000000..e0cd013
--- /dev/null
+++ b/src/camera/camera.h
@@ -0,0 +1,49 @@
+#pragma once
+
+#include "utils/scenedata.h"
+#include <glm/glm.hpp>
+
+// A class representing a virtual camera.
+
+// Feel free to make your own design choices for Camera class, the functions below are all optional / for your convenience.
+// You can either implement and use these getters, or make your own design.
+// If you decide to make your own design, feel free to delete these as TAs won't rely on them to grade your assignments.
+
+class Camera {
+public:
+ Camera(SceneCameraData cameraData);
+ // Returns the view matrix for the current camera settings.
+ // You might also want to define another function that return the inverse of the view matrix.
+ glm::mat4 getViewMatrix() const;
+ glm::mat4 getInverseViewMatrix() const;
+
+ // Returns the aspect ratio of the camera.
+ float getAspectRatio() const;
+
+ // Returns the height angle of the camera in RADIANS.
+ float getHeightAngle() const;
+
+ // Returns the focal length of this camera.
+ // This is for the depth of field extra-credit feature only;
+ // You can ignore if you are not attempting to implement depth of field.
+ float getFocalLength() const;
+
+ // Returns the focal length of this camera.
+ // This is for the depth of field extra-credit feature only;
+ // You can ignore if you are not attempting to implement depth of field.
+ float getAperture() const;
+
+ glm::vec3 getPos() const;
+
+ float cameraDepth = -1.f;
+
+private:
+ glm::mat4 m_viewMatrix;
+ glm::mat4 m_inverse;
+ float m_heightAngle;
+ glm::vec3 m_pos;
+
+ float m_focalLength;
+ float m_aperture;
+};
+
diff --git a/src/illuminate/illuminate.cpp b/src/illuminate/illuminate.cpp
new file mode 100644
index 0000000..d6d43c8
--- /dev/null
+++ b/src/illuminate/illuminate.cpp
@@ -0,0 +1,304 @@
+#include "raytracer/raytracer.h"
+
+glm::vec4 RayTracer::illuminationFromPointLight(
+ const SceneLightData &light,
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene
+ )
+{
+ auto directionFromIntersectionToLight = light.pos.xyz() - intersectionWorld;
+ directionFromIntersectionToLight = glm::normalize(directionFromIntersectionToLight);
+
+ // check if this light is blocked by an object
+ auto distanceToLight = glm::distance(light.pos.xyz(), intersectionWorld);
+ bool isShadow = RayTracer::isShadowed(
+ light.pos,
+ distanceToLight,
+ glm::vec4(directionFromIntersectionToLight, 0.f),
+ glm::vec4(intersectionWorld, 1.f),
+ scene);
+ if (isShadow)
+ {
+ // if this is a shadow, then no light contribution
+ return glm::vec4(0.f);
+ }
+
+ // calculate attenuation
+ float c1 = light.function.x;
+ float c2 = light.function.y;
+ float c3 = light.function.z;
+ float attenuation = std::min(1.f, 1.f / (c1 + distanceToLight * c2 + (distanceToLight * distanceToLight) * c3));
+
+ return phong(
+ light.color,
+ attenuation,
+ directionFromIntersectionToLight,
+ directionToCamera,
+ intersectionWorld,
+ normalWorld,
+ shape,
+ scene);
+}
+
+glm::vec4 RayTracer::illuminationFromSpotLight(
+ const SceneLightData &light,
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene
+)
+{
+ auto distance = glm::distance(light.pos.xyz(), intersectionWorld);
+
+ // calculate the angle from the shape to the spot light
+ auto directionFromIntersectionToLight = glm::normalize(light.pos.xyz() - intersectionWorld);
+
+ // calculate intensity, based on angle. apply falloff if necessary
+ auto lightDirection = glm::normalize(light.dir.xyz());
+ // invert the direction of the intersection to light for dot product to work correctly
+ auto cosTheta = glm::dot(-directionFromIntersectionToLight, lightDirection);
+ auto theta = glm::acos(cosTheta);
+
+ // determine intensity, based on location on spot cone
+ glm::vec4 intensity;
+ float inner = light.angle - light.penumbra;
+ if (theta <= inner)
+ {
+ intensity = light.color;
+ }
+ else if
+ (
+ theta > inner
+ && theta <= light.angle
+ )
+ {
+ // inside the penumbra, need to apply falloff
+ float falloff = -2 * std::pow(theta - inner, 3) / std::pow(light.penumbra, 3) +
+ 3 * std::pow(theta - inner, 2) / std::pow(light.penumbra, 2);
+ intensity = light.color * (1 - falloff);
+ }
+ else // theta > light.angle
+ {
+ return glm::vec4(0.f);
+ }
+
+ // if the light is within the cone, see if it's a shadow
+ auto distanceToLight = glm::distance(light.pos.xyz(), intersectionWorld);
+ bool isShadow = RayTracer::isShadowed(
+ light.pos,
+ distanceToLight,
+ glm::vec4(directionFromIntersectionToLight, 0.f),
+ glm::vec4(intersectionWorld, 1.f),
+ scene);
+ if (isShadow)
+ {
+ // if this is a shadow, then no light contribution
+ return glm::vec4(0.f);
+ }
+
+ // calculate attenuation
+ float c1 = light.function.x;
+ float c2 = light.function.y;
+ float c3 = light.function.z;
+ float attenuation = std::min(1.f, 1.f / (c1 + distance * c2 + (distance * distance) * c3));
+
+ return phong(
+ intensity,
+ attenuation,
+ directionFromIntersectionToLight,
+ directionToCamera,
+ intersectionWorld,
+ normalWorld,
+ shape,
+ scene);
+}
+
+glm::vec4 RayTracer::illuminationFromDirectionalLight(
+ const SceneLightData &light,
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene
+)
+{
+ // define direction and distance of directional light
+ auto directionFromIntersectionToLight = - light.dir;
+ directionFromIntersectionToLight = glm::normalize(directionFromIntersectionToLight);
+ float distanceToLight = FINF; // directional light infinitely far away
+
+ // check if an object blocks ours
+ bool isShadow = RayTracer::isShadowed(
+ light.pos,
+ distanceToLight,
+ directionFromIntersectionToLight,
+ glm::vec4(intersectionWorld, 1.f),
+ scene);
+ if (isShadow)
+ {
+ // if this is a shadow, then no light contribution
+ return glm::vec4(0.f);
+ }
+
+ float attenuation = 1.f; // directional lights don't attenuate
+ return phong(
+ light.color,
+ attenuation,
+ directionFromIntersectionToLight,
+ directionToCamera,
+ intersectionWorld,
+ normalWorld,
+ shape,
+ scene);
+}
+
+
+
+// Calculates the RGBA of a pixel from intersection infomation and globally-defined coefficients
+glm::vec4 RayTracer::illuminatePixel(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData& shape,
+ const RayTraceScene &scene,
+ int depth)
+{
+ // Normalizing directions
+ normalWorld = glm::normalize(normalWorld);
+ directionToCamera = glm::normalize(directionToCamera);
+
+ // to be summed then returned
+ glm::vec4 illumination(0, 0, 0, 1.f);
+
+ // add the ambient term
+ float ka = scene.getGlobalData().ka;
+ illumination += ka*shape.primitive.material.cAmbient;
+
+ for (const SceneLightData &light : scene.getLights()) {
+ switch (light.type) {
+ case LightType::LIGHT_POINT:
+ illumination +=
+ illuminationFromPointLight(light, intersectionWorld, normalWorld, directionToCamera, shape, scene);
+ continue;
+ case LightType::LIGHT_DIRECTIONAL:
+ illumination +=
+ illuminationFromDirectionalLight(light, intersectionWorld, normalWorld, directionToCamera, shape, scene);
+ continue;
+ case LightType::LIGHT_SPOT:
+ illumination +=
+ illuminationFromSpotLight(light, intersectionWorld, normalWorld, directionToCamera, shape, scene);
+ continue;
+ case LightType::LIGHT_AREA:
+ illumination +=
+ illuminationFromAreaLight(light, intersectionWorld, normalWorld, directionToCamera, shape, scene);
+ continue;
+ default:
+ continue;
+ }
+ }
+
+ auto incidentDir = -directionToCamera;
+ // recursive raytracing for the reflection and refraction (see reflect.cpp)
+ illumination += refract(intersectionWorld, normalWorld, incidentDir, shape, scene, depth + 1);
+ illumination += reflect(intersectionWorld, normalWorld, incidentDir, shape, scene, depth + 1);
+
+ return illumination;
+}
+
+// helper function to handle the diffuse and specular terms
+// also handles the texture within that diffuse term
+glm::vec4 RayTracer::phong(
+ glm::vec4 lightColor,
+ float attenuation,
+ glm::vec3 directionFromIntersectionToLight,
+ glm::vec3 directionToCamera,
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene)
+{
+ float kd = scene.getGlobalData().kd;
+ float ks = scene.getGlobalData().ks;
+ auto material = shape.primitive.material;
+
+ glm::vec4 illumination(0.f);
+
+ // calculate diffuse term
+ auto dotDiffuse = glm::dot(normalWorld, directionFromIntersectionToLight);
+ if (dotDiffuse > 0) // ensure not facing away
+ {
+ auto diffuse = (kd * material.cDiffuse);
+ if (material.textureMap.isUsed)
+ {
+ glm::vec4 pObject = shape.inverseCTM * glm::vec4(intersectionWorld, 1.f);
+ diffuse = interpolateTexture(pObject, shape, diffuse);
+ }
+ illumination += (attenuation * lightColor) * dotDiffuse * diffuse;
+ }
+
+ // add specular term
+ auto reflectedDirOverNormal =
+ 2 * glm::dot(directionFromIntersectionToLight, normalWorld) * normalWorld -
+ directionFromIntersectionToLight;
+ auto dotSpecular = glm::dot(reflectedDirOverNormal, directionToCamera);
+ auto toPow = std::pow(dotSpecular, material.shininess);
+ if (dotSpecular > 0) {
+ illumination += (attenuation * lightColor) * toPow * (ks * material.cSpecular);
+ }
+
+ return illumination;
+}
+
+// EXTRA CREDIT -> AREA LIGHT
+glm::vec4 RayTracer::illuminationFromAreaLight(
+ const SceneLightData &light,
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene
+) {
+ // select a random point within the light's height and width
+ float width = light.width;
+ float height = light.height;
+ float x = ((float) rand() / (float) RAND_MAX) * width - width / 2.f;
+ float y = ((float) rand() / (float) RAND_MAX) * height - height / 2.f;
+ glm::vec4 lightPosition = light.pos + glm::vec4(x, y, 0.f, 0.f);
+
+ auto directionFromIntersectionToLight = lightPosition.xyz() - intersectionWorld;
+ directionFromIntersectionToLight = glm::normalize(directionFromIntersectionToLight);
+
+ // check if this light is blocked by an object
+ auto distanceToLight = glm::distance(lightPosition.xyz(), intersectionWorld);
+ bool isShadow = RayTracer::isShadowed(
+ lightPosition,
+ distanceToLight,
+ glm::vec4(directionFromIntersectionToLight, 0.f),
+ glm::vec4(intersectionWorld, 1.f),
+ scene);
+ if (isShadow)
+ {
+ // if this is a shadow, then shoow a ray to a random point in the light
+ return glm::vec4(0.f);
+ }
+
+ // calculate attenuation
+ float c1 = light.function.x;
+ float c2 = light.function.y;
+ float c3 = light.function.z;
+ float attenuation = std::min(1.f, 1.f / (c1 + distanceToLight * c2 + (distanceToLight * distanceToLight) * c3));
+
+ return phong(
+ light.color,
+ attenuation,
+ directionFromIntersectionToLight,
+ directionToCamera,
+ intersectionWorld,
+ normalWorld,
+ shape,
+ scene);
+}
diff --git a/src/illuminate/reflect.cpp b/src/illuminate/reflect.cpp
new file mode 100644
index 0000000..c7fea98
--- /dev/null
+++ b/src/illuminate/reflect.cpp
@@ -0,0 +1,115 @@
+//
+// Created by Michael Foiani on 11/4/23.
+//
+
+#include "raytracer/raytracer.h"
+
+// helper that reflects vectors
+glm::vec3 reflectVector(
+ glm::vec3 incidentDir,
+ glm::vec3 normal)
+{
+ return incidentDir - 2.f * glm::dot(incidentDir, normal) * normal;
+}
+
+glm::vec4 RayTracer::reflect(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 incidentDir,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene,
+ int depth)
+{
+ auto material = shape.primitive.material;
+ // check if the material is reflective
+ if (material.cReflective == glm::vec4(0.f))
+ {
+ return glm::vec4(0.f);
+ }
+ auto reflectedDir = reflectVector(incidentDir, normalWorld);
+
+ // shoot a ray from the intersection point in the reflected direction
+ auto reflectColors = getPixelFromRay(glm::vec4(intersectionWorld + .001f * reflectedDir, 1.f), glm::vec4(reflectedDir, 0.f), scene, depth + 1);
+ return scene.getGlobalData().ks * material.cReflective * reflectColors;
+}
+
+// EXTRA CREDIT -> refracting
+
+// TRUE REFRACTING
+// get the reflection coefficient from fresnel's equations
+bool REAL_REFRACTING = false;
+float fresnels(
+ float currentMediumIor,
+ float otherMediumIor,
+ float cosAngleIncident,
+ float cosAngleTransmitted)
+{
+ float rPerp = (currentMediumIor * cosAngleIncident - otherMediumIor * cosAngleTransmitted) /
+ (currentMediumIor * cosAngleIncident + otherMediumIor * cosAngleTransmitted);
+ rPerp *= rPerp;
+ float rPara = (otherMediumIor * cosAngleIncident - currentMediumIor * cosAngleTransmitted) /
+ (otherMediumIor * cosAngleIncident + currentMediumIor * cosAngleTransmitted);
+ rPara *= rPara;
+ return (rPerp + rPara) / 2.f;
+}
+
+// Your refracting
+glm::vec4 RayTracer::refract(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 incidentDir,
+ const RenderShapeData& shape,
+ const RayTraceScene &scene,
+ int depth
+)
+{
+ auto material = shape.primitive.material;
+ // check if the material is transparent
+ if (material.cTransparent == glm::vec4(0.f))
+ {
+ return glm::vec4(0.f);
+ }
+
+ // apply snells law to find the sin of refracted angle (squared)
+ incidentDir = glm::normalize(incidentDir);
+ float cosAngleIncident = glm::dot(incidentDir, normalWorld);
+ float currentMediumIor = mediumIor;
+ float otherMediumIor = material.ior;
+
+ if (cosAngleIncident < 0)
+ {
+ // outside the object
+ cosAngleIncident = -cosAngleIncident;
+ }
+ else
+ {
+ // inside the object, invert the normal and swap the Iors
+ normalWorld = -normalWorld;
+ std::swap(currentMediumIor, otherMediumIor);
+ }
+
+ float iorRatio = currentMediumIor / otherMediumIor;
+ float sinAngleTransmittedSquared = iorRatio * iorRatio * (1 - cosAngleIncident * cosAngleIncident);
+ if (sinAngleTransmittedSquared > 1.f) // total internal reflection, not considered
+ {
+ return glm::vec4(0.f);
+ }
+
+ auto cosAngleTransmitted = glm::sqrt(1 - sinAngleTransmittedSquared);
+
+ // compute refracted ray according to snell's law
+ auto refractedDir = glm::normalize(
+ incidentDir * iorRatio
+ + (iorRatio * cosAngleIncident - cosAngleTransmitted) * normalWorld);
+
+ // send a ray in the refracted direction to get the colors
+ auto refractedColors = getPixelFromRay(
+ glm::vec4(intersectionWorld + .001f * refractedDir, 1.f),
+ glm::vec4(refractedDir, 0.f),
+ scene,
+ depth + 1);
+
+ float fresnel = fresnels(currentMediumIor, otherMediumIor, cosAngleIncident, cosAngleTransmitted);
+ auto color = scene.getGlobalData().kt * material.cTransparent * refractedColors * (1 - fresnel);
+ return color;
+} \ No newline at end of file
diff --git a/src/illuminate/shadow.cpp b/src/illuminate/shadow.cpp
new file mode 100644
index 0000000..99e2b29
--- /dev/null
+++ b/src/illuminate/shadow.cpp
@@ -0,0 +1,58 @@
+#include "raytracer/raytracer.h"
+
+bool RayTracer::isShadowed(
+ glm::vec4 lightPosition,
+ float distanceToLight,
+ glm::vec4 directionFromIntersectionToLight,
+ glm::vec4 intersectionWorld,
+ const RayTraceScene &scene)
+{
+ // normalize direction
+ directionFromIntersectionToLight = glm::normalize(directionFromIntersectionToLight);
+
+ // acceleration causes "bad jaggies" so we disable it for now
+ if (m_config.enableAcceleration)
+ {
+ RenderShapeData shapeData;
+ auto pBias = intersectionWorld + .001f * directionFromIntersectionToLight;
+ float t = traverseBVH(pBias, directionFromIntersectionToLight, shapeData, scene.m_bvh);
+ return t != FINF;
+ }
+
+ for (const RenderShapeData &s: scene.getShapes()) {
+ // convert this world ray to object space
+ glm::vec4 dObject = glm::normalize(
+ s.inverseCTM * directionFromIntersectionToLight);
+ glm::vec4 pObject = s.inverseCTM * intersectionWorld;
+
+ // see if there is an intersection
+ glm::vec4 newIntersectionObj = findIntersection(pObject, dObject, s);
+
+ if (newIntersectionObj.w == 1.f) // hit!
+ {
+ // check if the intersection is the same as the pObject
+ if (floatEquals(glm::distance(newIntersectionObj, pObject), 0.f, 0.001f))
+ {
+ // don't consider self-intersections
+ continue;
+ }
+
+ // check if this intersection is closer than the direction to the light
+ auto newIntersectionWorld = s.ctm * newIntersectionObj;
+ if (distanceToLight == FINF)
+ {
+ // if the light is infinitely far away light, then any non-self intersection is valid
+ return true;
+ }
+
+ float newDist = glm::distance(newIntersectionWorld, lightPosition);
+ if (newDist < distanceToLight - 0.001f)
+ {
+ // an object in front of the camera is the way -> shadow
+ return true;
+ }
+ }
+ }
+
+ return false;
+} \ No newline at end of file
diff --git a/src/intersect/intersect.cpp b/src/intersect/intersect.cpp
new file mode 100644
index 0000000..3a39a87
--- /dev/null
+++ b/src/intersect/intersect.cpp
@@ -0,0 +1,265 @@
+#include "raytracer/raytracer.h"
+
+/**
+ * @brief This source file handles intersection calculations to be used by the ray tracer.
+ * The implementation for findIntersection in the RayTracer namespace is at the end of the file.
+ */
+
+// TODO: implement mesh
+
+glm::vec4 intersectCircle(
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape)
+{
+ // implicit: x^2 + y^2 + z^2 - r^2 = 0, all directions
+ float radius = 0.5f;
+ float a = d.x*d.x + d.y*d.y + d.z*d.z;
+ float b = 2.f * (p.x*d.x + p.y*d.y + p.z*d.z);
+ float c = p.x*p.x + p.y*p.y + p.z*p.z - radius*radius;
+
+ float discriminant = b*b - 4*a*c;
+ if (discriminant < 0) // no solution
+ {
+ return glm::vec4(0.f);
+ }
+
+ float t1 = (-b - std::sqrt(discriminant)) / (2.f*a);
+ float t2 = (-b + std::sqrt(discriminant)) / (2.f*a);
+ if (t1 <= 0 && t2 <= 0) // both behind camera
+ {
+ return glm::vec4(0.f);
+ } else if (t1 <= 0) // t2 in front of camera
+ {
+ return p + t2*d;
+ } else if (t2 <= 0) // t1 in front of camera
+ {
+ return p + t1*d;
+ } else {
+ float t = std::min(t1, t2);
+ return p + t*d; // want best intersection point
+ }
+}
+
+glm::vec4 intersectCone(
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape)
+{
+ float t = FINF;
+
+ // implicit: x^2 + y^2 - z^2 = 0, conic top
+ float radius = 0.5f;
+ float a = d.x*d.x + d.z*d.z - .25f*(d.y*d.y);
+ float b = 2.f*(p.x*d.x + p.z*d.z) - .5f*(p.y*d.y) + .25f*d.y;
+ float c = p.x*p.x + p.z*p.z - .25f*(p.y*p.y) + .25f*p.y - 1/16.f;
+
+ float discriminant = b*b - 4*a*c;
+ if (discriminant >= 0)
+ {
+ float t1 = (-b - std::sqrt(discriminant)) / (2.f*a);
+ float t2 = (-b + std::sqrt(discriminant)) / (2.f*a);
+
+ auto p1Top = p + t1 * d;
+ if (
+ t1 > 0 &&
+ p1Top.y >= -.5f && p1Top.y <= .5f)
+
+ {
+ t = std::min(t1, t);
+ }
+
+ auto p2Top = p + t2 * d;
+ if (
+ t2 > 0 &&
+ p2Top.y >= -.5f && p2Top.y <= .5f)
+
+ {
+ t = std::min(t2, t);
+ }
+ }
+
+
+ // implicit p_y + t*d_y = -.5f, top base
+ float tBase = (- .5f - p.y) / d.y;
+ auto pBase = p + tBase * d;
+ if (
+ tBase > 0 &&
+ pBase.x*pBase.x + pBase.z*pBase.z <= radius*radius
+ )
+ {
+ t = std::min(t, tBase);
+ }
+
+ return t == FINF ? glm::vec4(0.f) : p + t*d;
+}
+
+glm::vec4 intersectCylinder(
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape)
+{
+ float t = FINF;
+
+ // implicit: x^2 + z^2 = 0, y between -.5, 5 rectuangular side
+ float radius = 0.5f;
+ float a = d.x*d.x + d.z*d.z;
+ float b = 2.f * (p.x*d.x + p.z*d.z);
+ float c = p.x*p.x + p.z*p.z - radius*radius;
+
+ float discriminant = b*b - 4*a*c;
+ if (discriminant >= 0)
+ {
+ float t1 = (-b - std::sqrt(discriminant)) / (2.f*a);
+ float t2 = (-b + std::sqrt(discriminant)) / (2.f*a);
+
+ auto p1Top = p + t1 * d;
+ if (
+ t1 > 0 &&
+ p1Top.y >= -.5f && p1Top.y <= .5f)
+ {
+ t = std::min(t1, t);
+ }
+
+ auto p2Top = p + t2 * d;
+ if (
+ t2 > 0 &&
+ p2Top.y >= -.5f && p2Top.y <= .5f)
+ {
+ t = std::min(t2, t);
+ }
+ }
+
+
+ // implicit p_y + t*d_y = -.5f, top base
+ float tTop = (.5f - p.y) / d.y;
+ auto pTop = p + tTop * d;
+ if (
+ tTop > 0 &&
+ pTop.x*pTop.x + pTop.z*pTop.z <= radius*radius
+ )
+ {
+ t = std::min(t, tTop);
+ }
+
+
+ // implicit p_y + t*d_y = -.5f, top base
+ float tBase = (- .5f - p.y) / d.y;
+ auto pBase = p + tBase * d;
+ if (
+ tBase > 0 &&
+ pBase.x*pBase.x + pBase.z*pBase.z <= radius*radius
+ )
+ {
+ t = std::min(t, tBase);
+ }
+
+ return t == FINF ? glm::vec4(0.f) : p + t*d;
+}
+
+glm::vec4 intersectCube (
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape)
+{
+ // float t = FINF;
+ float apothem = .5f;
+
+ // start with x-dir
+ float tmin = (-apothem - p.x) / d.x;
+ float tmax = (apothem - p.x) / d.x;
+
+ // see if it hits top or bottom
+ if (tmin > tmax)
+ {
+ std::swap(tmin, tmax);
+ }
+
+ // y-dir
+ float tymin = (-apothem - p.y) / d.y;
+ float tymax = (apothem - p.y) / d.y;
+
+ if (tymin > tymax)
+ {
+ std::swap(tymin, tymax);
+ }
+
+ if ((tmin > tymax) || (tymin > tmax))
+ { // no hit
+ return glm::vec4(0.f);
+ }
+
+ if (tymin > tmin)
+ {
+ tmin = tymin;
+ }
+ if (tymax < tmax)
+ {
+ tmax = tymax;
+ }
+
+ // z-dir
+ float tzmin = (-apothem - p.z) / d.z;
+ float tzmax = (apothem - p.z) / d.z;
+
+ if (tzmin > tzmax)
+ {
+ std::swap(tzmin, tzmax);
+ }
+
+ if ((tmin > tzmax) || (tzmin > tmax))
+ { // no hit
+ return glm::vec4(0.f);
+ }
+
+ if (tzmin > tmin)
+ {
+ tmin = tzmin;
+ }
+ if (tzmax < tmax)
+ {
+ tmax = tzmax;
+ }
+
+ if (tmin <= 0 && tmax <= 0) // both behind camera
+ {
+ return glm::vec4(0.f);
+ } else if (tmin > 0) // tmin in front of camera
+ {
+ return p + tmin*d;
+ } else if (tmin <= 0) // tmax in front of camera
+ {
+ return p + tmax*d;
+ }
+
+ return glm::vec4(0.f);
+}
+
+/**
+ * @brief Finds the intersection point of a ray and a shape.
+ * The ray and shape should be in the same space for this function to work properly.
+ * This function does not check if the intersection point is in front of the camera.
+ * @param p, the point of the ray
+ * @param d, the direction of the space
+ * @param shape, the shape to be intersected with the ray
+ * @return the intersection point as a vec4. If there exists no intersection, returns vec4(0.f).
+ */
+glm::vec4 RayTracer::findIntersection(
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape)
+{
+ switch(shape.primitive.type) {
+ case PrimitiveType::PRIMITIVE_SPHERE:
+ return intersectCircle(p, d, shape);
+ case PrimitiveType::PRIMITIVE_CONE:
+ return intersectCone(p, d, shape);
+ case PrimitiveType::PRIMITIVE_CYLINDER:
+ return intersectCylinder(p, d, shape);
+ case PrimitiveType::PRIMITIVE_CUBE:
+ return intersectCube(p, d, shape);
+ case PrimitiveType::PRIMITIVE_MESH:
+ break;
+ }
+ return glm::vec4(0.f);
+} \ No newline at end of file
diff --git a/src/intersect/normals.cpp b/src/intersect/normals.cpp
new file mode 100644
index 0000000..a5ffdbe
--- /dev/null
+++ b/src/intersect/normals.cpp
@@ -0,0 +1,97 @@
+//
+// Created by Michael Foiani on 11/4/23.
+//
+
+#include "raytracer/raytracer.h"
+
+glm::vec3 getConeNormal(
+ glm::vec4 intersectPointObject)
+{
+ if (RayTracer::floatEquals(intersectPointObject.y, -.5f)) // normal for base
+ {
+ return {0.f, -1.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.y, .5f)) // normal for top
+ {
+ return {0.f, 1.f, 0.f};
+ }
+
+ // gradient in object space for cone top is 2x, r^2 - .5*y, 2z
+ return glm::vec3{
+ 2.f * intersectPointObject.x,
+ .25f - .5f * intersectPointObject.y,
+ 2.f * intersectPointObject.z
+ };
+}
+
+glm::vec3 getCylinderNormal(
+ glm::vec4 intersectPointObject)
+{
+ if (RayTracer::floatEquals(intersectPointObject.y, -.5f)) // normal for base
+ {
+ return {0.f, -1.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.y, .5f)) // normal for top
+ {
+ return {0.f, 1.f, 0.f};
+ }
+
+ // gradient in object space for cylinder top is 2x, 0, 2z
+ return glm::vec3{
+ 2.f * intersectPointObject.x,
+ 0.f,
+ 2.f * intersectPointObject.z
+ };
+}
+
+glm::vec3 getCubeNormal(
+ glm::vec4 intersectPointObject)
+{
+ if (RayTracer::floatEquals(intersectPointObject.y, -.5f)) // neg y
+ {
+ return {0.f, -1.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.y, .5f)) // pos y
+ {
+ return {0.f, 1.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.x, -.5f)) // neg x
+ {
+ return {-1.f, 0.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.x, .5f)) // pos x
+ {
+ return {1.f, 0.f, 0.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.z, -.5f)) // neg z
+ {
+ return {0.f, 0.f, -1.f};
+ }
+ if (RayTracer::floatEquals(intersectPointObject.z, .5f)) // pos z
+ {
+ return {0.f, 0.f, 1.f};
+ }
+ return glm::vec3(0.f);
+}
+
+glm::vec3 RayTracer::getNormal(
+ glm::vec4 intersectPointObject,
+ const RenderShapeData& shape,
+ const RayTraceScene &scene)
+{
+ switch(shape.primitive.type)
+ {
+ case PrimitiveType::PRIMITIVE_SPHERE:
+ // gradient in object space for sphere is 2x, 2y, 2z
+ return 2.f * intersectPointObject;
+ case PrimitiveType::PRIMITIVE_CONE:
+ return getConeNormal(intersectPointObject);
+ case PrimitiveType::PRIMITIVE_CYLINDER:
+ return getCylinderNormal(intersectPointObject);
+ case PrimitiveType::PRIMITIVE_CUBE:
+ return getCubeNormal(intersectPointObject);
+ case PrimitiveType::PRIMITIVE_MESH:
+ break;
+ }
+ return glm::vec3(0.f);
+} \ No newline at end of file
diff --git a/src/main.cpp b/src/main.cpp
new file mode 100644
index 0000000..8cb00b8
--- /dev/null
+++ b/src/main.cpp
@@ -0,0 +1,86 @@
+#include <QCoreApplication>
+#include <QCommandLineParser>
+#include <QImage>
+#include <QtCore>
+
+#include <iostream>
+#include "utils/sceneparser.h"
+#include "raytracer/raytracer.h"
+#include "raytracer/raytracescene.h"
+
+int main(int argc, char *argv[])
+{
+ QCoreApplication a(argc, argv);
+
+ QCommandLineParser parser;
+ parser.addHelpOption();
+ parser.addPositionalArgument("config", "Path of the config file.");
+ parser.process(a);
+
+ auto positionalArgs = parser.positionalArguments();
+ if (positionalArgs.size() != 1) {
+ std::cerr << "Not enough arguments. Please provide a path to a config file (.ini) as a command-line argument." << std::endl;
+ a.exit(1);
+ return 1;
+ }
+
+ QSettings settings( positionalArgs[0], QSettings::IniFormat );
+ QString iScenePath = settings.value("IO/scene").toString();
+ QString oImagePath = settings.value("IO/output").toString();
+
+ RenderData metaData;
+ bool success = SceneParser::parse(iScenePath.toStdString(), metaData);
+
+ if (!success) {
+ std::cerr << "Error loading scene: \"" << iScenePath.toStdString() << "\"" << std::endl;
+ a.exit(1);
+ return 1;
+ }
+
+ // Raytracing-relevant code starts here
+
+ int width = settings.value("Canvas/width").toInt();
+ int height = settings.value("Canvas/height").toInt();
+
+ // Extracting data pointer from Qt's image API
+ QImage image = QImage(width, height, QImage::Format_RGBX8888);
+ image.fill(Qt::black);
+ RGBA *data = reinterpret_cast<RGBA *>(image.bits());
+
+ // Setting up the raytracer
+ Config rtConfig{};
+ rtConfig.enableShadow = settings.value("Feature/shadows").toBool();
+ rtConfig.enableReflection = settings.value("Feature/reflect").toBool();
+ rtConfig.enableRefraction = settings.value("Feature/refract").toBool();
+ rtConfig.enableTextureMap = settings.value("Feature/texture").toBool();
+ rtConfig.enableTextureFilter = settings.value("Feature/texture-filter").toBool();
+ rtConfig.enableParallelism = settings.value("Feature/parallel").toBool();
+ rtConfig.enableSuperSample = settings.value("Feature/super-sample").toBool();
+ rtConfig.enableAntiAliasing = settings.value("Feature/post-process").toBool();
+ rtConfig.enableAcceleration = settings.value("Feature/acceleration").toBool();
+ rtConfig.enableDepthOfField = settings.value("Feature/depthoffield").toBool();
+ rtConfig.maxRecursiveDepth = settings.value("Settings/maximum-recursive-depth").toInt();
+ rtConfig.onlyRenderNormals = settings.value("Settings/only-render-normals").toBool();
+
+ RayTracer raytracer{ rtConfig };
+
+ RayTraceScene rtScene{ width, height, metaData };
+
+ // Note that we're passing `data` as a pointer (to its first element)
+ // Recall from Lab 1 that you can access its elements like this: `data[i]`
+ raytracer.render(data, rtScene);
+
+ // Saving the image
+ success = image.save(oImagePath);
+ if (!success) {
+ success = image.save(oImagePath, "PNG");
+ }
+ if (success) {
+ std::cout << "Saved rendered image to \"" << oImagePath.toStdString() << "\"" << std::endl;
+ } else {
+ std::cerr << "Error: failed to save image to \"" << oImagePath.toStdString() << "\"" << std::endl;
+ }
+
+ a.exit();
+ return 0;
+}
diff --git a/src/raytracer/raytracer.cpp b/src/raytracer/raytracer.cpp
new file mode 100644
index 0000000..c3466cf
--- /dev/null
+++ b/src/raytracer/raytracer.cpp
@@ -0,0 +1,150 @@
+#include <QList>
+#include <QtConcurrent>
+#include <iostream>
+#include "raytracer.h"
+#include "raytracescene.h"
+
+//struct Ray {
+// glm::vec3 p;
+// glm::vec3 d;
+//};
+
+RayTracer::RayTracer(const Config &config) : m_config(config) {}
+
+void RayTracer::render(RGBA *imageData, const RayTraceScene &scene) {
+ if(m_config.enableParallelism)
+ {
+ renderParallel(imageData, scene);
+ return;
+ }
+
+ // naive rendering
+ Camera camera = scene.getCamera();
+ float cameraDepth = 1.f;
+
+ float viewplaneHeight = 2.f*cameraDepth*std::tan(camera.getHeightAngle() / 2.f);
+ float viewplaneWidth = cameraDepth*viewplaneHeight*((float)scene.width()/(float)scene.height());
+
+ for (int imageRow = 0; imageRow < scene.height(); imageRow++) {
+ for (int imageCol = 0; imageCol < scene.width(); imageCol++) {
+ float xCameraSpace = viewplaneWidth *
+ (-.5f + (imageCol + .5f) / scene.width());
+ float yCameraSpace = viewplaneHeight *
+ (-.5f + (imageRow + .5f) / scene.height());
+
+ glm::vec4 pixelDirCamera{xCameraSpace, -yCameraSpace, -cameraDepth, 0.f}; //w=0 for dir
+ glm::vec4 eyeCamera{0.f, 0.f, 0.f, 1.f}; // w=1.f for point
+
+ // convert to world space
+ glm::vec4 pWorld = camera.getInverseViewMatrix() * eyeCamera;
+ glm::vec4 dWorld = glm::normalize(camera.getInverseViewMatrix() * pixelDirCamera);
+
+ // cast ray!
+ glm::vec4 pixel = getPixelFromRay(pWorld, dWorld, scene);
+ imageData[imageRow * scene.width() + imageCol] = toRGBA(pixel);
+ }
+ }
+}
+
+
+glm::vec4 RayTracer::getPixelFromRay(
+ glm::vec4 pWorld,
+ glm::vec4 dWorld,
+ const RayTraceScene &scene,
+ int depth)
+{
+ if (depth > m_config.maxRecursiveDepth)
+ {
+ return glm::vec4(0.f);
+ }
+
+ // variables from computing the intersection
+ glm::vec4 closestIntersectionObj;
+ glm::vec4 closestIntersectionWorld;
+ RenderShapeData intersectedShape;
+
+ if (m_config.enableAcceleration)
+ {
+ float tWorld = traverseBVH(pWorld, dWorld, intersectedShape, scene.m_bvh);
+ if (tWorld == FINF)
+ {
+ return glm::vec4(0.f);
+ }
+ closestIntersectionWorld = pWorld + tWorld * dWorld;
+ closestIntersectionObj = intersectedShape.inverseCTM * closestIntersectionWorld;
+ }
+ else
+ {
+ float minDist = FINF;
+ // shoot a ray at each shape
+ for (const RenderShapeData &shape : scene.getShapes()) {
+ glm::vec4 pObject = shape.inverseCTM * pWorld;
+ glm::vec4 dObject = glm::normalize(shape.inverseCTM * dWorld);
+
+ glm::vec4 newIntersectionObj = findIntersection(pObject, dObject, shape);
+ if (newIntersectionObj.w == 0) // no hit
+ {
+ continue;
+ }
+
+ auto newIntersectionWorld = shape.ctm * newIntersectionObj;
+ float newDist = glm::distance(newIntersectionWorld, pWorld);
+ if (
+ newDist < minDist // closer intersection
+ && !floatEquals(newDist, 0) // and not a self intersection
+ )
+ {
+ minDist = newDist;
+
+ intersectedShape = shape;
+ closestIntersectionObj = newIntersectionObj;
+ closestIntersectionWorld = newIntersectionWorld;
+ }
+ }
+
+ if (minDist == FINF) // no hit
+ {
+ return glm::vec4(0.f);
+ }
+ }
+
+ glm::vec3 normalObject = getNormal(closestIntersectionObj, intersectedShape, scene);
+ glm::vec3 normalWorld =
+ (
+ glm::inverse(glm::transpose(intersectedShape.ctm))
+ * glm::vec4(normalObject, 0.f)
+ ).xyz();
+
+ return illuminatePixel(closestIntersectionWorld, normalWorld, -dWorld, intersectedShape, scene, depth);
+}
+
+// EXTRA CREDIT -> depth of field
+glm::vec4 RayTracer::secondaryRays(glm::vec4 pWorld, glm::vec4 dWorld, RayTraceScene &scene)
+{
+ auto inv = scene.getCamera().getInverseViewMatrix();
+ float focalLength = scene.getCamera().getFocalLength();
+ float aperture = scene.getCamera().getAperture();
+
+ glm::vec4 illumination(0.f);
+ glm::vec4 focalPoint = pWorld + focalLength * dWorld;
+
+ int TIMES = 500;
+ for (int i = 0; i < TIMES; i++) {
+ // generate a random number from -aperature to aperature
+ float rand1 = ((float) rand() / (float) RAND_MAX) * aperture;
+ rand1 *= (rand() % 2 == 0) ? 1 : -1;
+ // generate another number also inside the aperature lens
+ float rand2 = ((float) rand() / (float) RAND_MAX) * std::sqrt(aperture - rand1*rand1);
+ rand2 *= (rand() % 2 == 0) ? 1 : -1;
+ glm::vec4 randEye = (rand() % 2 == 0) ? glm::vec4(rand1, rand2, 0.f, 1.f) : glm::vec4(rand2, rand1, 0.f, 1.f);
+ // convert this random point to world space
+ glm::vec4 eyeWorld = inv * randEye;
+
+ // make the ray
+ glm::vec4 randomDir = glm::vec4(glm::normalize(focalPoint.xyz() - eyeWorld.xyz()), 0.f);
+
+ illumination += getPixelFromRay(eyeWorld, randomDir, scene, 0);
+ }
+
+ return illumination / (float) TIMES;
+} \ No newline at end of file
diff --git a/src/raytracer/raytracer.h b/src/raytracer/raytracer.h
new file mode 100644
index 0000000..6a16cdf
--- /dev/null
+++ b/src/raytracer/raytracer.h
@@ -0,0 +1,140 @@
+#pragma once
+
+#include <glm/glm.hpp>
+#include "utils/rgba.h"
+#include "utils/sceneparser.h"
+#include "raytracescene.h"
+#include "accelerate/kdtree.h"
+#include "accelerate/bvh.h"
+
+// A forward declaration for the RaytraceScene class
+
+class RayTraceScene;
+
+// A class representing a ray-tracer
+
+const float FINF = std::numeric_limits<float>::infinity();
+static float mediumIor = 1.0f;
+
+struct Config {
+ bool enableShadow = false;
+ bool enableReflection = false;
+ bool enableRefraction = false;
+ bool enableTextureMap = false;
+ bool enableTextureFilter = false;
+ bool enableParallelism = false;
+ bool enableSuperSample = false;
+ bool enableAntiAliasing = false;
+ bool enableAcceleration = false;
+ bool enableDepthOfField = false;
+ int maxRecursiveDepth = 4;
+ bool onlyRenderNormals = false;
+};
+
+class RayTracer
+{
+public:
+ // constructor for the config
+ explicit RayTracer(const Config &config);
+ const Config &m_config;
+
+ // Renders the scene synchronously.
+ // The ray-tracer will render the scene and fill imageData in-place.
+ // @param imageData The pointer to the imageData to be filled.
+ // @param scene The scene to be rendered.
+ void render(RGBA *imageData, const RayTraceScene &scene);
+
+ // shadow
+ bool isShadowed(glm::vec4 lightPosition, float distanceToLight, glm::vec4 directionFromIntersectionToLight,
+ glm::vec4 intersectionWorld, const RayTraceScene &scene);
+
+ // texture
+ glm::vec4 interpolateTexture(
+ glm::vec4 pObject,
+ const RenderShapeData &shape,
+ glm::vec4 illuminationToInterpolate);
+
+ glm::vec3 getNormal(
+ glm::vec4 intersectPointObject,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene);
+
+ // ray tracing
+ glm::vec4 getPixelFromRay(
+ glm::vec4 pWorld,
+ glm::vec4 dWorld,
+ const RayTraceScene &scene,
+ int depth = 0);
+
+ // intersect
+ glm::vec4 findIntersection(
+ glm::vec4 p,
+ glm::vec4 d,
+ const RenderShapeData& shape);
+
+ // utils
+ static RGBA toRGBA(const glm::vec4 &illumination);
+ static bool floatEquals(float a, float b, float epsilon = 0.0001f);
+
+ // refracting, reflecting
+ glm::vec4 refract(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 incidentDir,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene,
+ int depth);
+ glm::vec4 reflect(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 incidentDir,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene,
+ int depth);
+ glm::vec4 illuminatePixel(
+ glm::vec3 intersectionWorld,
+ glm::vec3 normalWorld,
+ glm::vec3 directionToCamera,
+ const RenderShapeData &shape,
+ const RayTraceScene &scene,
+ int depth);
+
+
+ // shading, and helpers for each type of light
+ glm::vec4
+ phong(glm::vec4 lightColor, float attenuation, glm::vec3 directionFromIntersectionToLight,
+ glm::vec3 directionToCamera,
+ glm::vec3 intersectionWorld, glm::vec3 normalWorld, const RenderShapeData &shape, const RayTraceScene &scene);
+
+ glm::vec4
+ illuminationFromPointLight(const SceneLightData &light, glm::vec3 intersectionWorld, glm::vec3 normalWorld,
+ glm::vec3 directionToCamera, const RenderShapeData &shape,
+ const RayTraceScene &scene);
+
+ glm::vec4 illuminationFromSpotLight(const SceneLightData &light, glm::vec3 intersectionWorld, glm::vec3 normalWorld,
+ glm::vec3 directionToCamera, const RenderShapeData &shape,
+ const RayTraceScene &scene);
+
+ glm::vec4
+ illuminationFromDirectionalLight(const SceneLightData &light, glm::vec3 intersectionWorld, glm::vec3 normalWorld,
+ glm::vec3 directionToCamera, const RenderShapeData &shape,
+ const RayTraceScene &scene);
+
+ glm::vec4 illuminationFromAreaLight(const SceneLightData &light, glm::vec3 intersectionWorld, glm::vec3 normalWorld,
+ glm::vec3 directionToCamera, const RenderShapeData &shape,
+ const RayTraceScene &scene);
+
+
+ // acceleration data structures
+ void renderParallel(RGBA *imageData, const RayTraceScene &scene);
+ float traverse(glm::vec4 p, glm::vec4 d, float tStart, float tEnd, RenderShapeData &testShape, KdTree *tree);
+ float traverseBVH(glm::vec4 p, glm::vec4 d, RenderShapeData &testShape, bvh *root);
+
+ // aliasing
+ RGBA superSample(glm::vec4 eyeCamera, glm::vec4 pixelDirCamera, const RayTraceScene &scene);
+ void filterBlur(RGBA *imageData, int width, int height, float blurRadius = 3.f);
+
+ // depth of field
+ glm::vec4 secondaryRays(glm::vec4 pWorld, glm::vec4 dWorld, RayTraceScene &scene);
+};
+
diff --git a/src/raytracer/raytracescene.cpp b/src/raytracer/raytracescene.cpp
new file mode 100644
index 0000000..f70aa83
--- /dev/null
+++ b/src/raytracer/raytracescene.cpp
@@ -0,0 +1,56 @@
+#include <stdexcept>
+#include "raytracescene.h"
+#include "utils/sceneparser.h"
+#include "raytracer.h"
+#include <iostream>
+
+RayTraceScene::RayTraceScene(int width, int height, const RenderData &metaData) :
+ m_camera(* new Camera(metaData.cameraData))
+{
+ // Optional TODO: implement this. Store whatever you feel is necessary.
+ m_width = width;
+ m_height = height;
+ m_sceneGlobalData = metaData.globalData;
+ m_shapes = metaData.shapes;
+ m_lights = metaData.lights;
+
+ // populate the kd tree
+ m_kdTree = nullptr;
+ std::vector<KdShape> shapes;
+ for (const auto& shape : metaData.shapes) {
+ KdShape s{
+ shape,
+ KdTree::transformBoundingRegion(OBJECT_BOUNDS, shape.ctm)
+ };
+ shapes.push_back(s);
+ }
+ m_bvh = new bvh(shapes, 0);
+}
+
+const int& RayTraceScene::width() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_width;
+}
+
+const int& RayTraceScene::height() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_height;
+}
+
+const SceneGlobalData& RayTraceScene::getGlobalData() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_sceneGlobalData;
+}
+
+const std::vector<RenderShapeData> RayTraceScene::getShapes() const {
+ return m_shapes;
+}
+
+const std::vector<SceneLightData> RayTraceScene::getLights() const {
+ return m_lights;
+}
+
+const Camera& RayTraceScene::getCamera() const {
+ // Optional TODO: implement the getter or make your own design
+ return m_camera;
+}
diff --git a/src/raytracer/raytracescene.h b/src/raytracer/raytracescene.h
new file mode 100644
index 0000000..b61bd2f
--- /dev/null
+++ b/src/raytracer/raytracescene.h
@@ -0,0 +1,42 @@
+#pragma once
+
+#include "utils/scenedata.h"
+#include "utils/sceneparser.h"
+#include "camera/camera.h"
+#include "accelerate/kdtree.h"
+#include "accelerate/bvh.h"
+
+// A class representing a scene to be ray-traced
+
+// Feel free to make your own design choices for RayTraceScene, the functions below are all optional / for your convenience.
+// You can either implement and use these getters, or make your own design.
+// If you decide to make your own design, feel free to delete these as TAs won't rely on them to grade your assignments.
+class RayTraceScene
+{
+public:
+ RayTraceScene(int width, int height, const RenderData &metaData);
+
+ // The getter of the width of the scene
+ const int& width() const;
+
+ // The getter of the height of the scene
+ const int& height() const;
+
+ // The getter of the global data of the scene
+ const SceneGlobalData& getGlobalData() const;
+ const std::vector<RenderShapeData> getShapes() const;
+ const std::vector<SceneLightData> getLights() const;
+
+ // The getter of the shared pointer to the camera instance of the scene
+ const Camera& getCamera() const;
+
+ KdTree *m_kdTree;
+ bvh *m_bvh;
+private:
+ int m_width;
+ int m_height;
+ SceneGlobalData m_sceneGlobalData;
+ Camera& m_camera;
+ std::vector<RenderShapeData>m_shapes;
+ std::vector<SceneLightData>m_lights;
+};
diff --git a/src/texture/texture.cpp b/src/texture/texture.cpp
new file mode 100644
index 0000000..1fa4353
--- /dev/null
+++ b/src/texture/texture.cpp
@@ -0,0 +1,180 @@
+//
+// Created by Michael Foiani on 11/4/23.
+//
+
+#include "raytracer/raytracer.h"
+
+glm::vec2 getUVCube(glm::vec4 pObject) {
+ float u = -1.f,v = -1.f;
+
+ if (RayTracer::floatEquals(pObject.y, -.5f)) // neg y, bottom face
+ {
+ u = pObject.x + 0.5f;
+ v = pObject.z + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.y, .5f)) // pos y, top face
+ {
+ u = pObject.x + 0.5f;
+ v = .5f - pObject.z; // flip z
+ }
+ else if (RayTracer::floatEquals(pObject.x, -.5f)) // neg x, left face
+ {
+ u = pObject.z + 0.5f;
+ v = pObject.y + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.x, .5f)) // pos x, right face
+ {
+ u = .5f - pObject.z; // flip z
+ v = pObject.y + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.z, -.5f)) // neg z, back face
+ {
+ u = .5f - pObject.x; // flip x
+ v = pObject.y + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.z, .5f)) // pos z, front face
+ {
+ u = pObject.x + 0.5f;
+ v = pObject.y + 0.5f;
+ }
+
+ return {u, v};
+}
+
+glm::vec2 getUVCone(glm::vec4 pObject) {
+ float u, v;
+
+ // three cases -> 1) top cap, 2) bottom cap, 3) conical side
+ if (RayTracer::floatEquals(pObject.y, -.5f)) // 1) bottom cap
+ {
+ u = pObject.x + 0.5f;
+ v = pObject.z + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.y, .5f)) // 2) top cap
+ {
+ u = pObject.x + 0.5f;
+ v = 0.5f - pObject.z; // flip z
+ }
+ else // case 3) conical face
+ {
+ // get u from theta zpos and xpos
+ float theta = glm::atan(pObject.x, pObject.z);
+ u = (theta + 1.5 * M_PI) / (2 * M_PI);
+
+ // get v from ypos, trivial
+ v = pObject.y + 0.5f;
+ }
+
+ return {u, v};
+}
+
+glm::vec2 getUVCylinder(glm::vec4 pObject) {
+ float u, v;
+
+ // three cases -> top cap, bottom cap, cylindrical side
+ if (RayTracer::floatEquals(pObject.y, -.5f)) // 1) bottom cap
+ {
+ u = pObject.x + 0.5f;
+ v = pObject.z + 0.5f;
+ }
+ else if (RayTracer::floatEquals(pObject.y, .5f)) // 2) top cap
+ {
+ u = pObject.x + 0.5f;
+ v = 0.5f - pObject.z; // flip z
+ }
+ else // case 3) cylindrical face
+ {
+ // get u from theta zpos and xpos
+ float theta = glm::atan(pObject.x, pObject.z);
+ u = (theta + 1.5 * M_PI) / (2 * M_PI);
+
+ // get v from ypos and origin
+ v = pObject.y + 0.5f;
+ }
+
+ return {u, v};
+}
+
+glm::vec2 getUVMesh(glm::vec4 pObject) {
+ return glm::vec2(-1.f);
+}
+
+glm::vec2 getUVSphere(glm::vec4 pObject) {
+ float u = -1.f,v = -1.f;
+
+ // get u from theta between xpos and zpos
+ // get u from theta zpos and xpos
+ float theta = glm::atan(pObject.x, pObject.z);
+ u = (theta + 1.5 * M_PI) / (2 * M_PI);
+
+ // get v from phi from ypos and origin
+ float height = pObject.y/.5f;
+ height = std::clamp(height, -1.f, 1.f);
+ float phi = glm::asin(height);
+ v = phi / M_PI + .5f;
+
+ return {u, v};
+}
+
+glm::vec4 RayTracer::interpolateTexture(
+ glm::vec4 pObject,
+ const RenderShapeData &shape,
+ glm::vec4 illuminationToInterpolate)
+{
+ auto material = shape.primitive.material;
+ if (!material.textureMap.isUsed)
+ {
+ // return if no texture
+ return illuminationToInterpolate;
+ }
+
+ // determine uv based on shape
+ glm::vec2 uv;
+ switch (shape.primitive.type)
+ {
+ case PrimitiveType::PRIMITIVE_CUBE:
+ uv = getUVCube(pObject);
+ break;
+ case PrimitiveType::PRIMITIVE_CONE:
+ uv = getUVCone(pObject);
+ break;
+ case PrimitiveType::PRIMITIVE_CYLINDER:
+ uv = getUVCylinder(pObject);
+ break;
+ case PrimitiveType::PRIMITIVE_SPHERE:
+ uv = getUVSphere(pObject);
+ break;
+ case PrimitiveType::PRIMITIVE_MESH:
+ uv = getUVMesh(pObject);
+ break;
+ }
+
+ float u = uv.x, v = uv.y;
+ if (u == -1.f) {
+ return illuminationToInterpolate;
+ }
+
+ // map u,v to texture image
+ TextureData textureData = material.textureData;
+ if (textureData.data == nullptr) {
+ return illuminationToInterpolate;
+ }
+
+ int m = material.textureMap.repeatU;
+ int c = (int) glm::floor(u * m * textureData.width) % textureData.width;
+ if (c >= textureData.width) {
+ c = textureData.width - 1;
+ }
+ int n = material.textureMap.repeatV;
+ int r = (int) glm::floor((1-v) * n * textureData.height) % textureData.height;
+ if (r >= textureData.height) {
+ r = textureData.height - 1;
+ }
+ RGBA texture = textureData.data[r * textureData.width + c];
+
+ // interpolate the texture color with the illumination
+ float blend = shape.primitive.material.blend;
+ glm::vec4 blended = blend * (glm::vec4(texture.r, texture.g, texture.b, texture.a) / 255.f)
+ + (1.f - blend) * illuminationToInterpolate;
+ return blended;
+} \ No newline at end of file
diff --git a/src/utils/raytracerutils.cpp b/src/utils/raytracerutils.cpp
new file mode 100644
index 0000000..bdb49a4
--- /dev/null
+++ b/src/utils/raytracerutils.cpp
@@ -0,0 +1,21 @@
+//
+// Created by Michael Foiani on 11/4/23.
+//
+
+#include "raytracer/raytracer.h"
+
+// Helper function to convert illumination to RGBA, applying some form of tone-mapping (e.g. clamping) in the process
+RGBA RayTracer::toRGBA(const glm::vec4 &illumination) {
+ // Task 1
+ return RGBA
+ {
+ (std::uint8_t) (255 * std::clamp(illumination.r, 0.f, 1.f)),
+ (std::uint8_t) (255 * std::clamp(illumination.g, 0.f, 1.f)),
+ (std::uint8_t) (255 * std::clamp(illumination.b, 0.f, 1.f)),
+ (std::uint8_t) (255 * std::clamp(illumination.b, 0.f, 1.f))
+ };
+}
+
+bool RayTracer::floatEquals(float a, float b, float epsilon) {
+ return std::abs(a - b) <= epsilon;
+} \ No newline at end of file
diff --git a/src/utils/rgba.h b/src/utils/rgba.h
new file mode 100644
index 0000000..2103dab
--- /dev/null
+++ b/src/utils/rgba.h
@@ -0,0 +1,10 @@
+#pragma once
+
+#include <cstdint>
+
+struct RGBA {
+ std::uint8_t r;
+ std::uint8_t g;
+ std::uint8_t b;
+ std::uint8_t a = 255;
+};
diff --git a/src/utils/scenedata.h b/src/utils/scenedata.h
new file mode 100644
index 0000000..043b84d
--- /dev/null
+++ b/src/utils/scenedata.h
@@ -0,0 +1,179 @@
+#pragma once
+
+#include <vector>
+#include <string>
+
+#include <glm/glm.hpp>
+#include "rgba.h"
+
+// Enum of the types of virtual lights that might be in the scene
+enum class LightType {
+ LIGHT_POINT,
+ LIGHT_DIRECTIONAL,
+ LIGHT_SPOT,
+ LIGHT_AREA
+};
+
+// Enum of the types of primitives that might be in the scene
+enum class PrimitiveType {
+ PRIMITIVE_CUBE,
+ PRIMITIVE_CONE,
+ PRIMITIVE_CYLINDER,
+ PRIMITIVE_SPHERE,
+ PRIMITIVE_MESH
+};
+
+// Enum of the types of transformations that can be applied
+enum class TransformationType {
+ TRANSFORMATION_TRANSLATE,
+ TRANSFORMATION_SCALE,
+ TRANSFORMATION_ROTATE,
+ TRANSFORMATION_MATRIX
+};
+
+// Type which can be used to store an RGBA color in floats [0,1]
+using SceneColor = glm::vec4;
+
+// Struct which contains the global color coefficients of a scene.
+// These are multiplied with the object-specific materials in the lighting equation.
+struct SceneGlobalData {
+ float ka; // Ambient term
+ float kd; // Diffuse term
+ float ks; // Specular term
+ float kt; // Transparency; used for extra credit (refraction)
+};
+
+// Struct which contains raw parsed data fro a single light
+struct SceneLight {
+ int id;
+ LightType type;
+
+ SceneColor color;
+ glm::vec3 function; // Attenuation function
+ glm::vec4 dir; // Not applicable to point lights
+
+ float penumbra; // Only applicable to spot lights, in RADIANS
+ float angle; // Only applicable to spot lights, in RADIANS
+
+ float width, height; // No longer supported (area lights)
+};
+
+// Struct which contains data for a single light with CTM applied
+struct SceneLightData {
+ int id;
+ LightType type;
+
+ SceneColor color;
+ glm::vec3 function; // Attenuation function
+
+ glm::vec4 pos; // Position with CTM applied (Not applicable to directional lights)
+ glm::vec4 dir; // Direction with CTM applied (Not applicable to point lights)
+
+ float penumbra; // Only applicable to spot lights, in RADIANS
+ float angle; // Only applicable to spot lights, in RADIANS
+
+ float width, height; // No longer supported (area lights)
+};
+
+// Struct which contains data for the camera of a scene
+struct SceneCameraData {
+ glm::vec4 pos;
+ glm::vec4 look;
+ glm::vec4 up;
+
+ float heightAngle; // The height angle of the camera in RADIANS
+
+ float aperture; // Only applicable for depth of field
+ float focalLength; // Only applicable for depth of field
+};
+
+// Struct which contains data for texture mapping files
+struct SceneFileMap {
+ SceneFileMap() : isUsed(false) {}
+
+ bool isUsed;
+ std::string filename;
+
+ float repeatU;
+ float repeatV;
+
+ void clear()
+ {
+ isUsed = false;
+ repeatU = 0.0f;
+ repeatV = 0.0f;
+ filename = std::string();
+ }
+};
+
+struct TextureData {
+ int width;
+ int height;
+ RGBA* data;
+};
+
+// Struct which contains data for a material (e.g. one which might be assigned to an object)
+struct SceneMaterial {
+ SceneColor cAmbient; // Ambient term
+ SceneColor cDiffuse; // Diffuse term
+ SceneColor cSpecular; // Specular term
+ float shininess; // Specular exponent
+
+ SceneColor cReflective; // Used to weight contribution of reflected ray lighting (via multiplication)
+
+ SceneColor cTransparent; // Transparency; used for extra credit (refraction)
+ float ior; // Index of refraction; used for extra credit (refraction)
+
+ SceneFileMap textureMap; // Used for texture mapping
+ float blend; // Used for texture mapping
+
+ TextureData textureData;
+
+ SceneColor cEmissive; // Not used
+ SceneFileMap bumpMap; // Not used
+
+ void clear()
+ {
+ cAmbient = glm::vec4(0);
+ cDiffuse = glm::vec4(0);
+ cSpecular = glm::vec4(0);
+ shininess = 0;
+
+ cReflective = glm::vec4(0);
+
+ cTransparent = glm::vec4(0);
+ ior = 0;
+
+ textureMap.clear();
+ blend = 0;
+
+ cEmissive = glm::vec4(0);
+ bumpMap.clear();
+ }
+};
+
+// Struct which contains data for a single primitive in a scene
+struct ScenePrimitive {
+ PrimitiveType type;
+ SceneMaterial material;
+ std::string meshfile; // Used for triangle meshes
+};
+
+// Struct which contains data for a transformation.
+struct SceneTransformation {
+ TransformationType type;
+
+ glm::vec3 translate; // Only applicable when translating. Defines t_x, t_y, and t_z, the amounts to translate by, along each axis.
+ glm::vec3 scale; // Only applicable when scaling. Defines s_x, s_y, and s_z, the amounts to scale by, along each axis.
+ glm::vec3 rotate; // Only applicable when rotating. Defines the axis of rotation; should be a unit vector.
+ float angle; // Only applicable when rotating. Defines the angle to rotate by in RADIANS, following the right-hand rule.
+ glm::mat4 matrix; // Only applicable when transforming by a custom matrix. This is that custom matrix.
+};
+
+// Struct which represents a node in the scene graph/tree, to be parsed by the student's `SceneParser`.
+struct SceneNode {
+ std::vector<SceneTransformation*> transformations; // Note the order of transformations described in lab 5
+ std::vector<ScenePrimitive*> primitives;
+ std::vector<SceneLight*> lights;
+ std::vector<SceneNode*> children;
+};
diff --git a/src/utils/scenefilereader.cpp b/src/utils/scenefilereader.cpp
new file mode 100644
index 0000000..ef2ad5e
--- /dev/null
+++ b/src/utils/scenefilereader.cpp
@@ -0,0 +1,1073 @@
+#include "scenefilereader.h"
+#include "scenedata.h"
+
+#include "glm/gtc/type_ptr.hpp"
+
+#include <cassert>
+#include <cstring>
+#include <iostream>
+#include <filesystem>
+
+#include <QFile>
+#include <QJsonArray>
+
+#define ERROR_AT(e) "error at line " << e.lineNumber() << " col " << e.columnNumber() << ": "
+#define PARSE_ERROR(e) std::cout << ERROR_AT(e) << "could not parse <" << e.tagName().toStdString() \
+ << ">" << std::endl
+#define UNSUPPORTED_ELEMENT(e) std::cout << ERROR_AT(e) << "unsupported element <" \
+ << e.tagName().toStdString() << ">" << std::endl;
+
+// Students, please ignore this file.
+ScenefileReader::ScenefileReader(const std::string &name) {
+ file_name = name;
+
+ memset(&m_cameraData, 0, sizeof(SceneCameraData));
+ memset(&m_globalData, 0, sizeof(SceneGlobalData));
+
+ m_root = new SceneNode;
+
+ m_templates.clear();
+ m_nodes.clear();
+
+ m_nodes.push_back(m_root);
+}
+
+ScenefileReader::~ScenefileReader() {
+ // Delete all Scene Nodes
+ for (unsigned int node = 0; node < m_nodes.size(); node++) {
+ for (size_t i = 0; i < (m_nodes[node])->transformations.size(); i++)
+ {
+ delete (m_nodes[node])->transformations[i];
+ }
+ for (size_t i = 0; i < (m_nodes[node])->primitives.size(); i++)
+ {
+ delete (m_nodes[node])->primitives[i];
+ }
+ (m_nodes[node])->transformations.clear();
+ (m_nodes[node])->primitives.clear();
+ (m_nodes[node])->children.clear();
+ delete m_nodes[node];
+ }
+
+ m_nodes.clear();
+ m_templates.clear();
+}
+
+SceneGlobalData ScenefileReader::getGlobalData() const {
+ return m_globalData;
+}
+
+SceneCameraData ScenefileReader::getCameraData() const {
+ return m_cameraData;
+}
+
+SceneNode *ScenefileReader::getRootNode() const {
+ return m_root;
+}
+
+// This is where it all goes down...
+bool ScenefileReader::readJSON() {
+ // Read the file
+ QFile file(file_name.c_str());
+ if (!file.open(QFile::ReadOnly)) {
+ std::cout << "could not open " << file_name << std::endl;
+ return false;
+ }
+
+ // Load the JSON document
+ QByteArray fileContents = file.readAll();
+ QJsonParseError jsonError;
+ QJsonDocument doc = QJsonDocument::fromJson(fileContents, &jsonError);
+ if (doc.isNull()) {
+ std::cout << "could not parse " << file_name << std::endl;
+ std::cout << "parse error at line " << jsonError.offset << ": "
+ << jsonError.errorString().toStdString() << std::endl;
+ return false;
+ }
+ file.close();
+
+ if (!doc.isObject()) {
+ std::cout << "document is not an object" << std::endl;
+ return false;
+ }
+
+ // Get the root element
+ QJsonObject scenefile = doc.object();
+
+ if (!scenefile.contains("globalData")) {
+ std::cout << "missing required field \"globalData\" on root object" << std::endl;
+ return false;
+ }
+ if (!scenefile.contains("cameraData")) {
+ std::cout << "missing required field \"cameraData\" on root object" << std::endl;
+ return false;
+ }
+
+ QStringList requiredFields = {"globalData", "cameraData"};
+ QStringList optionalFields = {"name", "groups", "templateGroups"};
+ // If other fields are present, raise an error
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto &field : scenefile.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on root object" << std::endl;
+ return false;
+ }
+ }
+
+ // Parse the global data
+ if (!parseGlobalData(scenefile["globalData"].toObject())) {
+ std::cout << "could not parse \"globalData\"" << std::endl;
+ return false;
+ }
+
+ // Parse the camera data
+ if (!parseCameraData(scenefile["cameraData"].toObject())) {
+ std::cout << "could not parse \"cameraData\"" << std::endl;
+ return false;
+ }
+
+ // Parse the template groups
+ if (scenefile.contains("templateGroups")) {
+ if (!parseTemplateGroups(scenefile["templateGroups"])) {
+ return false;
+ }
+ }
+
+ // Parse the groups
+ if (scenefile.contains("groups")) {
+ if (!parseGroups(scenefile["groups"], m_root)) {
+ return false;
+ }
+ }
+
+ std::cout << "Finished reading " << file_name << std::endl;
+ return true;
+}
+
+/**
+ * Parse a globalData field and fill in m_globalData.
+ */
+bool ScenefileReader::parseGlobalData(const QJsonObject &globalData) {
+ QStringList requiredFields = {"ambientCoeff", "diffuseCoeff", "specularCoeff"};
+ QStringList optionalFields = {"transparentCoeff"};
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto field : globalData.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on globalData object" << std::endl;
+ return false;
+ }
+ }
+ for (auto field : requiredFields) {
+ if (!globalData.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on globalData object" << std::endl;
+ return false;
+ }
+ }
+
+ // Parse the global data
+ if (globalData["ambientCoeff"].isDouble()) {
+ m_globalData.ka = globalData["ambientCoeff"].toDouble();
+ }
+ else {
+ std::cout << "globalData ambientCoeff must be a floating-point value" << std::endl;
+ return false;
+ }
+ if (globalData["diffuseCoeff"].isDouble()) {
+ m_globalData.kd = globalData["diffuseCoeff"].toDouble();
+ }
+ else {
+ std::cout << "globalData diffuseCoeff must be a floating-point value" << std::endl;
+ return false;
+ }
+ if (globalData["specularCoeff"].isDouble()) {
+ m_globalData.ks = globalData["specularCoeff"].toDouble();
+ }
+ else {
+ std::cout << "globalData specularCoeff must be a floating-point value" << std::endl;
+ return false;
+ }
+ if (globalData.contains("transparentCoeff")) {
+ if (globalData["transparentCoeff"].isDouble()) {
+ m_globalData.kt = globalData["transparentCoeff"].toDouble();
+ }
+ else {
+ std::cout << "globalData transparentCoeff must be a floating-point value" << std::endl;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Parse a Light and add a new CS123SceneLightData to m_lights.
+ */
+bool ScenefileReader::parseLightData(const QJsonObject &lightData, SceneNode *node) {
+ QStringList requiredFields = {"type", "color"};
+ QStringList optionalFields = {"name", "attenuationCoeff", "direction", "penumbra", "angle", "width", "height"};
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto &field : lightData.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on light object" << std::endl;
+ return false;
+ }
+ }
+ for (auto &field : requiredFields) {
+ if (!lightData.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on light object" << std::endl;
+ return false;
+ }
+ }
+
+ // Create a default light
+ SceneLight *light = new SceneLight();
+ memset(light, 0, sizeof(SceneLight));
+ node->lights.push_back(light);
+
+ light->dir = glm::vec4(0.f, 0.f, 0.f, 0.f);
+ light->function = glm::vec3(1, 0, 0);
+
+ // parse the color
+ if (!lightData["color"].isArray()) {
+ std::cout << "light color must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray colorArray = lightData["color"].toArray();
+ if (colorArray.size() != 3) {
+ std::cout << "light color must be of size 3" << std::endl;
+ return false;
+ }
+ if (!colorArray[0].isDouble() || !colorArray[1].isDouble() || !colorArray[2].isDouble()) {
+ std::cout << "light color must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->color.r = colorArray[0].toDouble();
+ light->color.g = colorArray[1].toDouble();
+ light->color.b = colorArray[2].toDouble();
+
+ // parse the type
+ if (!lightData["type"].isString()) {
+ std::cout << "light type must be of type string" << std::endl;
+ return false;
+ }
+ std::string lightType = lightData["type"].toString().toStdString();
+
+ // parse directional light
+ if (lightType == "directional") {
+ light->type = LightType::LIGHT_DIRECTIONAL;
+
+ // parse direction
+ if (!lightData.contains("direction")) {
+ std::cout << "directional light must contain field \"direction\"" << std::endl;
+ return false;
+ }
+ if (!lightData["direction"].isArray()) {
+ std::cout << "directional light direction must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray directionArray = lightData["direction"].toArray();
+ if (directionArray.size() != 3) {
+ std::cout << "directional light direction must be of size 3" << std::endl;
+ return false;
+ }
+ if (!directionArray[0].isDouble() || !directionArray[1].isDouble() || !directionArray[2].isDouble()) {
+ std::cout << "directional light direction must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->dir.x = directionArray[0].toDouble();
+ light->dir.y = directionArray[1].toDouble();
+ light->dir.z = directionArray[2].toDouble();
+ }
+ else if (lightType == "point") {
+ light->type = LightType::LIGHT_POINT;
+
+ // parse the attenuation coefficient
+ if (!lightData.contains("attenuationCoeff")) {
+ std::cout << "point light must contain field \"attenuationCoeff\"" << std::endl;
+ return false;
+ }
+ if (!lightData["attenuationCoeff"].isArray()) {
+ std::cout << "point light attenuationCoeff must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray attenuationArray = lightData["attenuationCoeff"].toArray();
+ if (attenuationArray.size() != 3) {
+ std::cout << "point light attenuationCoeff must be of size 3" << std::endl;
+ return false;
+ }
+ if (!attenuationArray[0].isDouble() || !attenuationArray[1].isDouble() || !attenuationArray[2].isDouble()) {
+ std::cout << "ppoint light attenuationCoeff must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->function.x = attenuationArray[0].toDouble();
+ light->function.y = attenuationArray[1].toDouble();
+ light->function.z = attenuationArray[2].toDouble();
+ }
+ else if (lightType == "spot") {
+ QStringList pointRequiredFields = {"direction", "penumbra", "angle", "attenuationCoeff"};
+ for (auto &field : pointRequiredFields) {
+ if (!lightData.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on spotlight object" << std::endl;
+ return false;
+ }
+ }
+ light->type = LightType::LIGHT_SPOT;
+
+ // parse direction
+ if (!lightData["direction"].isArray()) {
+ std::cout << "spotlight direction must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray directionArray = lightData["direction"].toArray();
+ if (directionArray.size() != 3) {
+ std::cout << "spotlight direction must be of size 3" << std::endl;
+ return false;
+ }
+ if (!directionArray[0].isDouble() || !directionArray[1].isDouble() || !directionArray[2].isDouble()) {
+ std::cout << "spotlight direction must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->dir.x = directionArray[0].toDouble();
+ light->dir.y = directionArray[1].toDouble();
+ light->dir.z = directionArray[2].toDouble();
+
+ // parse attenuation coefficient
+ if (!lightData["attenuationCoeff"].isArray()) {
+ std::cout << "spotlight attenuationCoeff must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray attenuationArray = lightData["attenuationCoeff"].toArray();
+ if (attenuationArray.size() != 3) {
+ std::cout << "spotlight attenuationCoeff must be of size 3" << std::endl;
+ return false;
+ }
+ if (!attenuationArray[0].isDouble() || !attenuationArray[1].isDouble() || !attenuationArray[2].isDouble()) {
+ std::cout << "spotlight direction must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->function.x = attenuationArray[0].toDouble();
+ light->function.y = attenuationArray[1].toDouble();
+ light->function.z = attenuationArray[2].toDouble();
+
+ // parse penumbra
+ if (!lightData["penumbra"].isDouble()) {
+ std::cout << "spotlight penumbra must be of type float" << std::endl;
+ return false;
+ }
+ light->penumbra = lightData["penumbra"].toDouble() * M_PI / 180.f;
+
+ // parse angle
+ if (!lightData["angle"].isDouble()) {
+ std::cout << "spotlight angle must be of type float" << std::endl;
+ return false;
+ }
+ light->angle = lightData["angle"].toDouble() * M_PI / 180.f;
+ }
+ else if (lightType == "area") {
+ light->type = LightType::LIGHT_AREA;
+
+ QStringList pointRequiredFields = {"width", "height"};
+ for (auto &field : pointRequiredFields) {
+ if (!lightData.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on area light object" << std::endl;
+ return false;
+ }
+ }
+
+ // parse width
+ if (!lightData["width"].isDouble()) {
+ std::cout << "arealight penumbra must be of type float" << std::endl;
+ return false;
+ }
+ light->width = lightData["width"].toDouble();
+
+ // parse height
+ if (!lightData["height"].isDouble()) {
+ std::cout << "arealight height must be of type float" << std::endl;
+ return false;
+ }
+ light->height = lightData["height"].toDouble();
+
+ // parse the attenuation coefficient
+ if (!lightData.contains("attenuationCoeff")) {
+ std::cout << "area light must contain field \"attenuationCoeff\"" << std::endl;
+ return false;
+ }
+ if (!lightData["attenuationCoeff"].isArray()) {
+ std::cout << "area light attenuationCoeff must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray attenuationArray = lightData["attenuationCoeff"].toArray();
+ if (attenuationArray.size() != 3) {
+ std::cout << "area light attenuationCoeff must be of size 3" << std::endl;
+ return false;
+ }
+ if (!attenuationArray[0].isDouble() || !attenuationArray[1].isDouble() || !attenuationArray[2].isDouble()) {
+ std::cout << "area light attenuationCoeff must contain floating-point values" << std::endl;
+ return false;
+ }
+ light->function.x = attenuationArray[0].toDouble();
+ light->function.y = attenuationArray[1].toDouble();
+ light->function.z = attenuationArray[2].toDouble();
+ }
+ else {
+ std::cout << "unknown light type \"" << lightType << "\"" << std::endl;
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Parse cameraData and fill in m_cameraData.
+ */
+bool ScenefileReader::parseCameraData(const QJsonObject &cameradata) {
+ QStringList requiredFields = {"position", "up", "heightAngle"};
+ QStringList optionalFields = {"aperture", "focalLength", "look", "focus"};
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto &field : cameradata.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on cameraData object" << std::endl;
+ return false;
+ }
+ }
+ for (auto &field : requiredFields) {
+ if (!cameradata.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on cameraData object" << std::endl;
+ return false;
+ }
+ }
+
+ // Must have either look or focus, but not both
+ if (cameradata.contains("look") && cameradata.contains("focus")) {
+ std::cout << "cameraData cannot contain both \"look\" and \"focus\"" << std::endl;
+ return false;
+ }
+
+ // Parse the camera data
+ if (cameradata["position"].isArray()) {
+ QJsonArray position = cameradata["position"].toArray();
+ if (position.size() != 3) {
+ std::cout << "cameraData position must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!position[0].isDouble() || !position[1].isDouble() || !position[2].isDouble()) {
+ std::cout << "cameraData position must be a floating-point value" << std::endl;
+ return false;
+ }
+ m_cameraData.pos.x = position[0].toDouble();
+ m_cameraData.pos.y = position[1].toDouble();
+ m_cameraData.pos.z = position[2].toDouble();
+ }
+ else {
+ std::cout << "cameraData position must be an array" << std::endl;
+ return false;
+ }
+
+ if (cameradata["up"].isArray()) {
+ QJsonArray up = cameradata["up"].toArray();
+ if (up.size() != 3) {
+ std::cout << "cameraData up must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!up[0].isDouble() || !up[1].isDouble() || !up[2].isDouble()) {
+ std::cout << "cameraData up must be a floating-point value" << std::endl;
+ return false;
+ }
+ m_cameraData.up.x = up[0].toDouble();
+ m_cameraData.up.y = up[1].toDouble();
+ m_cameraData.up.z = up[2].toDouble();
+ }
+ else {
+ std::cout << "cameraData up must be an array" << std::endl;
+ return false;
+ }
+
+ if (cameradata["heightAngle"].isDouble()) {
+ m_cameraData.heightAngle = cameradata["heightAngle"].toDouble() * M_PI / 180.f;
+ }
+ else {
+ std::cout << "cameraData heightAngle must be a floating-point value" << std::endl;
+ return false;
+ }
+
+ if (cameradata.contains("aperture")) {
+ if (cameradata["aperture"].isDouble()) {
+ m_cameraData.aperture = cameradata["aperture"].toDouble();
+ }
+ else {
+ std::cout << "cameraData aperture must be a floating-point value" << std::endl;
+ return false;
+ }
+ }
+
+ if (cameradata.contains("focalLength")) {
+ if (cameradata["focalLength"].isDouble()) {
+ m_cameraData.focalLength = cameradata["focalLength"].toDouble();
+ }
+ else {
+ std::cout << "cameraData focalLength must be a floating-point value" << std::endl;
+ return false;
+ }
+ }
+
+ // Parse the look or focus
+ // if the focus is specified, we will convert it to a look vector later
+ if (cameradata.contains("look")) {
+ if (cameradata["look"].isArray()) {
+ QJsonArray look = cameradata["look"].toArray();
+ if (look.size() != 3) {
+ std::cout << "cameraData look must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!look[0].isDouble() || !look[1].isDouble() || !look[2].isDouble()) {
+ std::cout << "cameraData look must be a floating-point value" << std::endl;
+ return false;
+ }
+ m_cameraData.look.x = look[0].toDouble();
+ m_cameraData.look.y = look[1].toDouble();
+ m_cameraData.look.z = look[2].toDouble();
+ }
+ else {
+ std::cout << "cameraData look must be an array" << std::endl;
+ return false;
+ }
+ }
+ else if (cameradata.contains("focus")) {
+ if (cameradata["focus"].isArray()) {
+ QJsonArray focus = cameradata["focus"].toArray();
+ if (focus.size() != 3) {
+ std::cout << "cameraData focus must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!focus[0].isDouble() || !focus[1].isDouble() || !focus[2].isDouble()) {
+ std::cout << "cameraData focus must be a floating-point value" << std::endl;
+ return false;
+ }
+ m_cameraData.look.x = focus[0].toDouble();
+ m_cameraData.look.y = focus[1].toDouble();
+ m_cameraData.look.z = focus[2].toDouble();
+ }
+ else {
+ std::cout << "cameraData focus must be an array" << std::endl;
+ return false;
+ }
+ }
+
+ // Convert the focus point (stored in the look vector) into a
+ // look vector from the camera position to that focus point.
+ if (cameradata.contains("focus")) {
+ m_cameraData.look -= m_cameraData.pos;
+ }
+
+ return true;
+}
+
+bool ScenefileReader::parseTemplateGroups(const QJsonValue &templateGroups) {
+ if (!templateGroups.isArray()) {
+ std::cout << "templateGroups must be an array" << std::endl;
+ return false;
+ }
+
+ QJsonArray templateGroupsArray = templateGroups.toArray();
+ for (auto templateGroup : templateGroupsArray) {
+ if (!templateGroup.isObject()) {
+ std::cout << "templateGroup items must be of type object" << std::endl;
+ return false;
+ }
+
+ if (!parseTemplateGroupData(templateGroup.toObject())) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ScenefileReader::parseTemplateGroupData(const QJsonObject &templateGroup) {
+ QStringList requiredFields = {"name"};
+ QStringList optionalFields = {"translate", "rotate", "scale", "matrix", "lights", "primitives", "groups"};
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto &field : templateGroup.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on templateGroup object" << std::endl;
+ return false;
+ }
+ }
+
+ for (auto &field : requiredFields) {
+ if (!templateGroup.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on templateGroup object" << std::endl;
+ return false;
+ }
+ }
+
+ if (!templateGroup["name"].isString()) {
+ std::cout << "templateGroup name must be a string" << std::endl;
+ }
+ if (m_templates.contains(templateGroup["name"].toString().toStdString())) {
+ std::cout << "templateGroups cannot have the same" << std::endl;
+ }
+
+ SceneNode *templateNode = new SceneNode;
+ m_nodes.push_back(templateNode);
+ m_templates[templateGroup["name"].toString().toStdString()] = templateNode;
+
+ return parseGroupData(templateGroup, templateNode);
+}
+
+/**
+ * Parse a group object and create a new CS123SceneNode in m_nodes.
+ * NAME OF NODE CANNOT REFERENCE TEMPLATE NODE
+ */
+bool ScenefileReader::parseGroupData(const QJsonObject &object, SceneNode *node) {
+ QStringList optionalFields = {"name", "translate", "rotate", "scale", "matrix", "lights", "primitives", "groups"};
+ QStringList allFields = optionalFields;
+ for (auto &field : object.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on group object" << std::endl;
+ return false;
+ }
+ }
+
+ // parse translation if defined
+ if (object.contains("translate")) {
+ if (!object["translate"].isArray()) {
+ std::cout << "group translate must be of type array" << std::endl;
+ return false;
+ }
+
+ QJsonArray translateArray = object["translate"].toArray();
+ if (translateArray.size() != 3) {
+ std::cout << "group translate must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!translateArray[0].isDouble() || !translateArray[1].isDouble() || !translateArray[2].isDouble()) {
+ std::cout << "group translate must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ SceneTransformation *translation = new SceneTransformation();
+ translation->type = TransformationType::TRANSFORMATION_TRANSLATE;
+ translation->translate.x = translateArray[0].toDouble();
+ translation->translate.y = translateArray[1].toDouble();
+ translation->translate.z = translateArray[2].toDouble();
+
+ node->transformations.push_back(translation);
+ }
+
+ // parse rotation if defined
+ if (object.contains("rotate")) {
+ if (!object["rotate"].isArray()) {
+ std::cout << "group rotate must be of type array" << std::endl;
+ return false;
+ }
+
+ QJsonArray rotateArray = object["rotate"].toArray();
+ if (rotateArray.size() != 4) {
+ std::cout << "group rotate must have 4 elements" << std::endl;
+ return false;
+ }
+ if (!rotateArray[0].isDouble() || !rotateArray[1].isDouble() || !rotateArray[2].isDouble() || !rotateArray[3].isDouble()) {
+ std::cout << "group rotate must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ SceneTransformation *rotation = new SceneTransformation();
+ rotation->type = TransformationType::TRANSFORMATION_ROTATE;
+ rotation->rotate.x = rotateArray[0].toDouble();
+ rotation->rotate.y = rotateArray[1].toDouble();
+ rotation->rotate.z = rotateArray[2].toDouble();
+ rotation->angle = rotateArray[3].toDouble() * M_PI / 180.f;
+
+ node->transformations.push_back(rotation);
+ }
+
+ // parse scale if defined
+ if (object.contains("scale")) {
+ if (!object["scale"].isArray()) {
+ std::cout << "group scale must be of type array" << std::endl;
+ return false;
+ }
+
+ QJsonArray scaleArray = object["scale"].toArray();
+ if (scaleArray.size() != 3) {
+ std::cout << "group scale must have 3 elements" << std::endl;
+ return false;
+ }
+ if (!scaleArray[0].isDouble() || !scaleArray[1].isDouble() || !scaleArray[2].isDouble()) {
+ std::cout << "group scale must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ SceneTransformation *scale = new SceneTransformation();
+ scale->type = TransformationType::TRANSFORMATION_SCALE;
+ scale->scale.x = scaleArray[0].toDouble();
+ scale->scale.y = scaleArray[1].toDouble();
+ scale->scale.z = scaleArray[2].toDouble();
+
+ node->transformations.push_back(scale);
+ }
+
+ // parse matrix if defined
+ if (object.contains("matrix")) {
+ if (!object["matrix"].isArray()) {
+ std::cout << "group matrix must be of type array of array" << std::endl;
+ return false;
+ }
+
+ QJsonArray matrixArray = object["matrix"].toArray();
+ if (matrixArray.size() != 4) {
+ std::cout << "group matrix must be 4x4" << std::endl;
+ return false;
+ }
+
+ SceneTransformation *matrixTransformation = new SceneTransformation();
+ matrixTransformation->type = TransformationType::TRANSFORMATION_MATRIX;
+
+ float *matrixPtr = glm::value_ptr(matrixTransformation->matrix);
+ int rowIndex = 0;
+ for (auto row : matrixArray) {
+ if (!row.isArray()) {
+ std::cout << "group matrix must be of type array of array" << std::endl;
+ return false;
+ }
+
+ QJsonArray rowArray = row.toArray();
+ if (rowArray.size() != 4) {
+ std::cout << "group matrix must be 4x4" << std::endl;
+ return false;
+ }
+
+ int colIndex = 0;
+ for (auto val : rowArray) {
+ if (!val.isDouble()) {
+ std::cout << "group matrix must contain all floating-point values" << std::endl;
+ return false;
+ }
+
+ // fill in column-wise
+ matrixPtr[colIndex * 4 + rowIndex] = (float)val.toDouble();
+ colIndex++;
+ }
+ rowIndex++;
+ }
+
+ node->transformations.push_back(matrixTransformation);
+ }
+
+ // parse lights if any
+ if (object.contains("lights")) {
+ if (!object["lights"].isArray()) {
+ std::cout << "group lights must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray lightsArray = object["lights"].toArray();
+ for (auto light : lightsArray) {
+ if (!light.isObject()) {
+ std::cout << "light must be of type object" << std::endl;
+ return false;
+ }
+
+ if (!parseLightData(light.toObject(), node)) {
+ return false;
+ }
+ }
+ }
+
+ // parse primitives if any
+ if (object.contains("primitives")) {
+ if (!object["primitives"].isArray()) {
+ std::cout << "group primitives must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray primitivesArray = object["primitives"].toArray();
+ for (auto primitive : primitivesArray) {
+ if (!primitive.isObject()) {
+ std::cout << "primitive must be of type object" << std::endl;
+ return false;
+ }
+
+ if (!parsePrimitive(primitive.toObject(), node)) {
+ return false;
+ }
+ }
+ }
+
+ // parse children groups if any
+ if (object.contains("groups")) {
+ if (!parseGroups(object["groups"], node)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool ScenefileReader::parseGroups(const QJsonValue &groups, SceneNode *parent) {
+ if (!groups.isArray()) {
+ std::cout << "groups must be of type array" << std::endl;
+ return false;
+ }
+
+ QJsonArray groupsArray = groups.toArray();
+ for (auto group : groupsArray) {
+ if (!group.isObject()) {
+ std::cout << "group items must be of type object" << std::endl;
+ return false;
+ }
+
+ QJsonObject groupData = group.toObject();
+ if (groupData.contains("name")) {
+ if (!groupData["name"].isString()) {
+ std::cout << "group name must be of type string" << std::endl;
+ return false;
+ }
+
+ // if its a reference to a template group append it
+ std::string groupName = groupData["name"].toString().toStdString();
+ if (m_templates.contains(groupName)) {
+ parent->children.push_back(m_templates[groupName]);
+ continue;
+ }
+ }
+
+ SceneNode *node = new SceneNode;
+ m_nodes.push_back(node);
+ parent->children.push_back(node);
+
+ if (!parseGroupData(group.toObject(), node)) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Parse an <object type="primitive"> tag into node.
+ */
+bool ScenefileReader::parsePrimitive(const QJsonObject &prim, SceneNode *node) {
+ QStringList requiredFields = {"type"};
+ QStringList optionalFields = {
+ "meshFile", "ambient", "diffuse", "specular", "reflective", "transparent", "shininess", "ior",
+ "blend", "textureFile", "textureU", "textureV", "bumpMapFile", "bumpMapU", "bumpMapV"};
+
+ QStringList allFields = requiredFields + optionalFields;
+ for (auto field : prim.keys()) {
+ if (!allFields.contains(field)) {
+ std::cout << "unknown field \"" << field.toStdString() << "\" on primitive object" << std::endl;
+ return false;
+ }
+ }
+ for (auto field : requiredFields) {
+ if (!prim.contains(field)) {
+ std::cout << "missing required field \"" << field.toStdString() << "\" on primitive object" << std::endl;
+ return false;
+ }
+ }
+
+ if (!prim["type"].isString()) {
+ std::cout << "primitive type must be of type string" << std::endl;
+ return false;
+ }
+ std::string primType = prim["type"].toString().toStdString();
+
+ // Default primitive
+ ScenePrimitive *primitive = new ScenePrimitive();
+ SceneMaterial &mat = primitive->material;
+ mat.clear();
+ primitive->type = PrimitiveType::PRIMITIVE_CUBE;
+ mat.textureMap.isUsed = false;
+ mat.bumpMap.isUsed = false;
+ mat.cDiffuse.r = mat.cDiffuse.g = mat.cDiffuse.b = 1;
+ node->primitives.push_back(primitive);
+
+ std::filesystem::path basepath = std::filesystem::path(file_name).parent_path().parent_path();
+ if (primType == "sphere")
+ primitive->type = PrimitiveType::PRIMITIVE_SPHERE;
+ else if (primType == "cube")
+ primitive->type = PrimitiveType::PRIMITIVE_CUBE;
+ else if (primType == "cylinder")
+ primitive->type = PrimitiveType::PRIMITIVE_CYLINDER;
+ else if (primType == "cone")
+ primitive->type = PrimitiveType::PRIMITIVE_CONE;
+ else if (primType == "mesh") {
+ primitive->type = PrimitiveType::PRIMITIVE_MESH;
+ if (!prim.contains("meshFile")) {
+ std::cout << "primitive type mesh must contain field meshFile" << std::endl;
+ return false;
+ }
+ if (!prim["meshFile"].isString()) {
+ std::cout << "primitive meshFile must be of type string" << std::endl;
+ return false;
+ }
+
+ std::filesystem::path relativePath(prim["meshFile"].toString().toStdString());
+ primitive->meshfile = (basepath / relativePath).string();
+ }
+ else {
+ std::cout << "unknown primitive type \"" << primType << "\"" << std::endl;
+ return false;
+ }
+
+ if (prim.contains("ambient")) {
+ if (!prim["ambient"].isArray()) {
+ std::cout << "primitive ambient must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray ambientArray = prim["ambient"].toArray();
+ if (ambientArray.size() != 3) {
+ std::cout << "primitive ambient array must be of size 3" << std::endl;
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (!ambientArray[i].isDouble()) {
+ std::cout << "primitive ambient must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ mat.cAmbient[i] = ambientArray[i].toDouble();
+ }
+ }
+
+ if (prim.contains("diffuse")) {
+ if (!prim["diffuse"].isArray()) {
+ std::cout << "primitive diffuse must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray diffuseArray = prim["diffuse"].toArray();
+ if (diffuseArray.size() != 3) {
+ std::cout << "primitive diffuse array must be of size 3" << std::endl;
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (!diffuseArray[i].isDouble()) {
+ std::cout << "primitive diffuse must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ mat.cDiffuse[i] = diffuseArray[i].toDouble();
+ }
+ }
+
+ if (prim.contains("specular")) {
+ if (!prim["specular"].isArray()) {
+ std::cout << "primitive specular must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray specularArray = prim["specular"].toArray();
+ if (specularArray.size() != 3) {
+ std::cout << "primitive specular array must be of size 3" << std::endl;
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (!specularArray[i].isDouble()) {
+ std::cout << "primitive specular must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ mat.cSpecular[i] = specularArray[i].toDouble();
+ }
+ }
+
+ if (prim.contains("reflective")) {
+ if (!prim["reflective"].isArray()) {
+ std::cout << "primitive reflective must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray reflectiveArray = prim["reflective"].toArray();
+ if (reflectiveArray.size() != 3) {
+ std::cout << "primitive reflective array must be of size 3" << std::endl;
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (!reflectiveArray[i].isDouble()) {
+ std::cout << "primitive reflective must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ mat.cReflective[i] = reflectiveArray[i].toDouble();
+ }
+ }
+
+ if (prim.contains("transparent")) {
+ if (!prim["transparent"].isArray()) {
+ std::cout << "primitive transparent must be of type array" << std::endl;
+ return false;
+ }
+ QJsonArray transparentArray = prim["transparent"].toArray();
+ if (transparentArray.size() != 3) {
+ std::cout << "primitive transparent array must be of size 3" << std::endl;
+ return false;
+ }
+
+ for (int i = 0; i < 3; i++) {
+ if (!transparentArray[i].isDouble()) {
+ std::cout << "primitive transparent must contain floating-point values" << std::endl;
+ return false;
+ }
+
+ mat.cTransparent[i] = transparentArray[i].toDouble();
+ }
+ }
+
+ if (prim.contains("shininess")) {
+ if (!prim["shininess"].isDouble()) {
+ std::cout << "primitive shininess must be of type float" << std::endl;
+ return false;
+ }
+
+ mat.shininess = (float) prim["shininess"].toDouble();
+ }
+
+ if (prim.contains("ior")) {
+ if (!prim["ior"].isDouble()) {
+ std::cout << "primitive ior must be of type float" << std::endl;
+ return false;
+ }
+
+ mat.ior = (float) prim["ior"].toDouble();
+ }
+
+ if (prim.contains("blend")) {
+ if (!prim["blend"].isDouble()) {
+ std::cout << "primitive blend must be of type float" << std::endl;
+ return false;
+ }
+
+ mat.blend = (float)prim["blend"].toDouble();
+ }
+
+ if (prim.contains("textureFile")) {
+ if (!prim["textureFile"].isString()) {
+ std::cout << "primitive textureFile must be of type string" << std::endl;
+ return false;
+ }
+ std::filesystem::path fileRelativePath(prim["textureFile"].toString().toStdString());
+
+ mat.textureMap.filename = (basepath / fileRelativePath).string();
+ mat.textureMap.repeatU = prim.contains("textureU") && prim["textureU"].isDouble() ? prim["textureU"].toDouble() : 1;
+ mat.textureMap.repeatV = prim.contains("textureV") && prim["textureV"].isDouble() ? prim["textureV"].toDouble() : 1;
+ mat.textureMap.isUsed = true;
+ }
+
+ if (prim.contains("bumpMapFile")) {
+ if (!prim["bumpMapFile"].isString()) {
+ std::cout << "primitive bumpMapFile must be of type string" << std::endl;
+ return false;
+ }
+ std::filesystem::path fileRelativePath(prim["bumpMapFile"].toString().toStdString());
+
+ mat.bumpMap.filename = (basepath / fileRelativePath).string();
+ mat.bumpMap.repeatU = prim.contains("bumpMapU") && prim["bumpMapU"].isDouble() ? prim["bumpMapU"].toDouble() : 1;
+ mat.bumpMap.repeatV = prim.contains("bumpMapV") && prim["bumpMapV"].isDouble() ? prim["bumpMapV"].toDouble() : 1;
+ mat.bumpMap.isUsed = true;
+ }
+
+ return true;
+}
diff --git a/src/utils/scenefilereader.h b/src/utils/scenefilereader.h
new file mode 100644
index 0000000..e51f4e5
--- /dev/null
+++ b/src/utils/scenefilereader.h
@@ -0,0 +1,50 @@
+#pragma once
+
+#include "scenedata.h"
+
+#include <vector>
+#include <map>
+
+#include <QJsonDocument>
+#include <QJsonObject>
+
+// This class parses the scene graph specified by the CS123 Xml file format.
+class ScenefileReader {
+public:
+ // Create a ScenefileReader, passing it the scene file.
+ ScenefileReader(const std::string &filename);
+
+ // Clean up all data for the scene
+ ~ScenefileReader();
+
+ // Parse the XML scene file. Returns false if scene is invalid.
+ bool readJSON();
+
+ SceneGlobalData getGlobalData() const;
+
+ SceneCameraData getCameraData() const;
+
+ SceneNode *getRootNode() const;
+
+private:
+ // The filename should be contained within this parser implementation.
+ // If you want to parse a new file, instantiate a different parser.
+ bool parseGlobalData(const QJsonObject &globaldata);
+ bool parseCameraData(const QJsonObject &cameradata);
+ bool parseTemplateGroups(const QJsonValue &templateGroups);
+ bool parseTemplateGroupData(const QJsonObject &templateGroup);
+ bool parseGroups(const QJsonValue &groups, SceneNode *parent);
+ bool parseGroupData(const QJsonObject &object, SceneNode *node);
+ bool parsePrimitive(const QJsonObject &prim, SceneNode *node);
+ bool parseLightData(const QJsonObject &lightData, SceneNode *node);
+
+ std::string file_name;
+
+ mutable std::map<std::string, SceneNode *> m_templates;
+
+ SceneGlobalData m_globalData;
+ SceneCameraData m_cameraData;
+
+ SceneNode *m_root;
+ std::vector<SceneNode *> m_nodes;
+};
diff --git a/src/utils/sceneparser.cpp b/src/utils/sceneparser.cpp
new file mode 100644
index 0000000..74c605a
--- /dev/null
+++ b/src/utils/sceneparser.cpp
@@ -0,0 +1,136 @@
+#include "sceneparser.h"
+#include "scenefilereader.h"
+#include <glm/gtx/transform.hpp>
+#include <QImage>
+#include <iostream>
+
+
+/**
+ * @brief Stores the image specified from the input file in this class's
+ * `std::vector<RGBA> m_image`.
+ * @param file: file path to an image
+ * @return True if successfully loads image, False otherwise.
+ */
+TextureData loadTextureFromFile(const QString &file) {
+ QImage myTexture;
+
+ int width; int height;
+ if (!myTexture.load(file)) {
+ std::cout<<"Failed to load in image: " << file.toStdString() << std::endl;
+ return TextureData{0, 0, nullptr};
+ }
+ myTexture = myTexture.convertToFormat(QImage::Format_RGBX8888);
+ width = myTexture.width();
+ height = myTexture.height();
+
+ RGBA* texture = new RGBA[width*height];
+ QByteArray arr = QByteArray::fromRawData((const char*) myTexture.bits(), myTexture.sizeInBytes());
+
+ for (int i = 0; i < arr.size() / 4.f; i++){
+ texture[i] = RGBA{(std::uint8_t) arr[4*i], (std::uint8_t) arr[4*i+1], (std::uint8_t) arr[4*i+2], (std::uint8_t) arr[4*i+3]};
+ }
+
+ return TextureData{width, height, texture};
+}
+
+// helper to handle recursive creation of tree
+void initTree(SceneNode* currentNode, std::vector<RenderShapeData> *shapes, std::vector<SceneLightData> *lights, glm::mat4 currentCTM) {
+ for (auto t : currentNode->transformations) {
+ switch (t->type)
+ {
+ case TransformationType::TRANSFORMATION_TRANSLATE:
+ currentCTM *= glm::translate(glm::vec3(t->translate[0], t->translate[1], t->translate[2]));
+ break;
+ case TransformationType::TRANSFORMATION_SCALE:
+ currentCTM *= glm::scale(glm::vec3(t->scale[0], t->scale[1], t->scale[2]));
+ break;
+ case TransformationType::TRANSFORMATION_ROTATE:
+ currentCTM *= glm::rotate(t->angle, glm::vec3(t->rotate[0], t->rotate[1], t->rotate[2]));
+ break;
+ case TransformationType::TRANSFORMATION_MATRIX:
+ currentCTM *= glm::mat4(t->matrix);
+ break;
+ default:
+ std::cout << "Invalid transformation type" << std::endl;
+ break;
+ }
+ }
+
+
+ for(auto primitive : currentNode->primitives) {
+ primitive->material.textureData = loadTextureFromFile(QString::fromStdString(primitive->material.textureMap.filename));
+ RenderShapeData rsd = {*primitive, currentCTM, glm::inverse(currentCTM)};
+ shapes->push_back(rsd);
+ }
+
+ // add the lights
+ for(auto l : currentNode->lights) {
+ SceneLightData sld{};
+ sld.id = l->id;
+ sld.color = l->color;
+ sld.function = l->function;
+
+ switch (l->type)
+ {
+ case LightType::LIGHT_POINT:
+ sld.type = LightType::LIGHT_POINT;
+ sld.pos = currentCTM * glm::vec4(0.f, 0.f, 0.f, 1.f);
+ sld.dir = glm::vec4(0.0f);
+ break;
+ case LightType::LIGHT_DIRECTIONAL:
+ sld.type = LightType::LIGHT_DIRECTIONAL;
+ sld.pos = glm::vec4(0.0f);
+ sld.dir = glm::vec4(currentCTM * l->dir);
+ break;
+ case LightType::LIGHT_SPOT:
+ sld.type = LightType::LIGHT_SPOT;
+ sld.pos = currentCTM * glm::vec4(0.f, 0.f, 0.f, 1.f);
+ sld.dir = currentCTM * l->dir;
+ sld.penumbra = l->penumbra;
+ sld.angle = l->angle;
+ break;
+ case LightType::LIGHT_AREA:
+ sld.type = LightType::LIGHT_AREA;
+ sld.pos = currentCTM * glm::vec4(0.f, 0.f, 0.f, 1.f);
+ sld.width = l->width;
+ sld.height = l->height;
+ break;
+ default:
+ std::cout << "Invalid light type" << std::endl;
+ continue;
+ }
+
+ lights->push_back(sld);
+ }
+
+ for (auto child : currentNode->children) {
+ initTree(child, shapes, lights, currentCTM);
+ }
+
+}
+
+
+bool SceneParser::parse(std::string filepath, RenderData &renderData) {
+ ScenefileReader fileReader = ScenefileReader(filepath);
+ bool success = fileReader.readJSON();
+ if (!success) {
+ return false;
+ }
+
+ // TODO: Use your Lab 5 code here
+ // Task 5: populate renderData with global data, and camera data;
+ renderData.globalData = fileReader.getGlobalData();
+ renderData.cameraData = fileReader.getCameraData();
+
+ // Task 6: populate renderData's list of primitives and their transforms.
+ // This will involve traversing the scene graph, and we recommend you
+ // create a helper function to do so!
+ SceneNode* root = fileReader.getRootNode();
+ renderData.shapes.clear();
+ renderData.lights.clear();
+ auto currentCTM = glm::mat4(1.0f);
+
+ initTree(root, &renderData.shapes, &renderData.lights, currentCTM);
+
+ return true;
+}
diff --git a/src/utils/sceneparser.h b/src/utils/sceneparser.h
new file mode 100644
index 0000000..699d6fb
--- /dev/null
+++ b/src/utils/sceneparser.h
@@ -0,0 +1,31 @@
+#pragma once
+
+#include "scenedata.h"
+#include <vector>
+#include <string>
+#include "rgba.h"
+
+// Struct which contains data for a single primitive, to be used for rendering
+struct RenderShapeData {
+ ScenePrimitive primitive;
+ glm::mat4 ctm; // the cumulative transformation matrix
+ glm::mat4 inverseCTM;
+};
+
+// Struct which contains all the data needed to render a scene
+struct RenderData {
+ SceneGlobalData globalData;
+ SceneCameraData cameraData;
+
+ std::vector<SceneLightData> lights;
+ std::vector<RenderShapeData> shapes;
+};
+
+class SceneParser {
+public:
+ // Parse the scene and store the results in renderData.
+ // @param filepath The path of the scene file to load.
+ // @param renderData On return, this will contain the metadata of the loaded scene.
+ // @return A boolean value indicating whether the parse was successful.
+ static bool parse(std::string filepath, RenderData &renderData);
+};