From 12314672f76db478a7c7ed03051350e97f46dc7f Mon Sep 17 00:00:00 2001 From: Lucas Crane Date: Wed, 9 Oct 2019 16:54:52 -0700 Subject: [PATCH] build 0.1.4 (#30) * build 0.1.4 * build from latest master commit --- build/RayTracingRenderer.es5.js | 468 ++++++++++++++++++++++---------- build/RayTracingRenderer.js | 433 ++++++++++++++++++++--------- package.json | 2 +- 3 files changed, 626 insertions(+), 277 deletions(-) diff --git a/build/RayTracingRenderer.es5.js b/build/RayTracingRenderer.es5.js index a7a0296..7f439f9 100644 --- a/build/RayTracingRenderer.es5.js +++ b/build/RayTracingRenderer.es5.js @@ -224,19 +224,15 @@ function (_DirectionalLight) { _inherits(SoftDirectionalLight, _DirectionalLight); - function SoftDirectionalLight() { - var _getPrototypeOf2; - + function SoftDirectionalLight(color, intensity) { var _this; - _classCallCheck(this, SoftDirectionalLight); + var softness = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : 0; - for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) { - args[_key] = arguments[_key]; - } + _classCallCheck(this, SoftDirectionalLight); - _this = _possibleConstructorReturn(this, (_getPrototypeOf2 = _getPrototypeOf(SoftDirectionalLight)).call.apply(_getPrototypeOf2, [this].concat(args))); - _this.softness = 0.0; + _this = _possibleConstructorReturn(this, _getPrototypeOf(SoftDirectionalLight).call(this, color, intensity)); + _this.softness = softness; return _this; } @@ -508,7 +504,7 @@ } } - function vertString (params) { + function vertString () { return "#version 300 es\n\nlayout(location = 0) in vec2 position;\nout vec2 vCoord;\n\nvoid main() {\n vCoord = position;\n gl_Position = vec4(2. * position - 1., 0, 1);\n}\n\n"; } @@ -533,43 +529,43 @@ } // Manually performs linear filtering if the extension OES_texture_float_linear is not supported - function textureLinear (params) { + function textureLinear (defines) { return "\n\n vec4 textureLinear(sampler2D map, vec2 uv) {\n #ifdef OES_texture_float_linear\n return texture(map, uv);\n #else\n vec2 size = vec2(textureSize(map, 0));\n vec2 texelSize = 1.0 / size;\n\n uv = uv * size - 0.5;\n vec2 f = fract(uv);\n uv = floor(uv) + 0.5;\n\n vec4 s1 = texture(map, (uv + vec2(0, 0)) * texelSize);\n vec4 s2 = texture(map, (uv + vec2(1, 0)) * texelSize);\n vec4 s3 = texture(map, (uv + vec2(0, 1)) * texelSize);\n vec4 s4 = texture(map, (uv + vec2(1, 1)) * texelSize);\n\n return mix(mix(s1, s2, f.x), mix(s3, s4, f.x), f.y);\n #endif\n }\n"; } - function intersect (params) { - return "\n\nuniform highp isampler2D indices;\nuniform sampler2D positions;\nuniform sampler2D normals;\nuniform sampler2D uvs;\nuniform sampler2D bvh;\n\nuniform Materials {\n vec4 colorAndMaterialType[NUM_MATERIALS];\n vec4 roughnessMetalnessNormalScale[NUM_MATERIALS];\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS) || defined(NUM_PBR_MAPS)\n ivec4 diffuseNormalRoughnessMetalnessMapIndex[NUM_MATERIALS];\n #endif\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS)\n vec4 diffuseNormalMapSize[".concat(Math.max(params.NUM_DIFFUSE_MAPS, params.NUM_NORMAL_MAPS), "];\n #endif\n\n #if defined(NUM_PBR_MAPS)\n vec2 pbrMapSize[NUM_PBR_MAPS];\n #endif\n} materials;\n\n#ifdef NUM_DIFFUSE_MAPS\n uniform mediump sampler2DArray diffuseMap;\n#endif\n\n#ifdef NUM_NORMAL_MAPS\n uniform mediump sampler2DArray normalMap;\n#endif\n\n#ifdef NUM_PBR_MAPS\n uniform mediump sampler2DArray pbrMap;\n#endif\n\nstruct Triangle {\n vec3 p0;\n vec3 p1;\n vec3 p2;\n};\n\nvoid surfaceInteractionFromIntersection(inout SurfaceInteraction si, Triangle tri, vec3 barycentric, ivec3 index, vec3 faceNormal, int materialIndex) {\n si.hit = true;\n si.faceNormal = faceNormal;\n si.position = barycentric.x * tri.p0 + barycentric.y * tri.p1 + barycentric.z * tri.p2;\n ivec2 i0 = unpackTexel(index.x, VERTEX_COLUMNS);\n ivec2 i1 = unpackTexel(index.y, VERTEX_COLUMNS);\n ivec2 i2 = unpackTexel(index.z, VERTEX_COLUMNS);\n\n vec3 n0 = texelFetch(normals, i0, 0).xyz;\n vec3 n1 = texelFetch(normals, i1, 0).xyz;\n vec3 n2 = texelFetch(normals, i2, 0).xyz;\n si.normal = normalize(barycentric.x * n0 + barycentric.y * n1 + barycentric.z * n2);\n\n si.color = materials.colorAndMaterialType[materialIndex].xyz;\n si.roughness = materials.roughnessMetalnessNormalScale[materialIndex].x;\n si.metalness = materials.roughnessMetalnessNormalScale[materialIndex].y;\n\n si.materialType = int(materials.colorAndMaterialType[materialIndex].w);\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS) || defined(NUM_PBR_MAPS)\n vec2 uv0 = texelFetch(uvs, i0, 0).xy;\n vec2 uv1 = texelFetch(uvs, i1, 0).xy;\n vec2 uv2 = texelFetch(uvs, i2, 0).xy;\n vec2 uv = fract(barycentric.x * uv0 + barycentric.y * uv1 + barycentric.z * uv2);\n #endif\n\n #ifdef NUM_DIFFUSE_MAPS\n int diffuseMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].x;\n if (diffuseMapIndex >= 0) {\n si.color *= texture(diffuseMap, vec3(uv * materials.diffuseNormalMapSize[diffuseMapIndex].xy, diffuseMapIndex)).rgb;\n }\n #endif\n\n #ifdef NUM_NORMAL_MAPS\n int normalMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].y;\n if (normalMapIndex >= 0) {\n vec2 duv02 = uv0 - uv2;\n vec2 duv12 = uv1 - uv2;\n vec3 dp02 = tri.p0 - tri.p2;\n vec3 dp12 = tri.p1 - tri.p2;\n\n // Method One\n // http://www.pbr-book.org/3ed-2018/Shapes/Triangle_Meshes.html#fragment-Computetrianglepartialderivatives-0\n // Compute tangent vectors relative to the face normal. These vectors won't necessarily be orthogonal to the smoothed normal\n // This means the TBN matrix won't be orthogonal which is technically incorrect.\n // This is Three.js's method (https://github.com/mrdoob/three.js/blob/dev/src/renderers/shaders/ShaderChunk/normalmap_pars_fragment.glsl.js)\n // --------------\n // float scale = sign(duv02.x * duv12.y - duv02.y * duv12.x);\n // vec3 dpdu = normalize((duv12.y * dp02 - duv02.y * dp12) * scale);\n // vec3 dpdv = normalize((-duv12.x * dp02 + duv02.x * dp12) * scale);\n\n // Method Two\n // Compute tangent vectors as in Method One but apply Gram-Schmidt process to make vectors orthogonal to smooth normal\n // This might inadvertently flip coordinate space orientation\n // --------------\n // float scale = sign(duv02.x * duv12.y - duv02.y * duv12.x);\n // vec3 dpdu = normalize((duv12.y * dp02 - duv02.y * dp12) * scale);\n // dpdu = (dpdu - dot(dpdu, si.normal) * si.normal); // Gram-Schmidt process\n // vec3 dpdv = cross(si.normal, dpdu) * scale;\n\n // Method Three\n // http://www.thetenthplanet.de/archives/1180\n // Compute co-tangent and co-bitangent vectors\n // These vectors are orthongal and maintain a consistent coordinate space\n // --------------\n vec3 dp12perp = cross(dp12, si.normal);\n vec3 dp02perp = cross(si.normal, dp02);\n vec3 dpdu = dp12perp * duv02.x + dp02perp * duv12.x;\n vec3 dpdv = dp12perp * duv02.y + dp02perp * duv12.y;\n float invmax = inversesqrt(max(dot(dpdu, dpdu), dot(dpdv, dpdv)));\n dpdu *= invmax;\n dpdv *= invmax;\n\n vec3 n = 2.0 * texture(normalMap, vec3(uv * materials.diffuseNormalMapSize[normalMapIndex].zw, normalMapIndex)).rgb - 1.0;\n n.xy *= materials.roughnessMetalnessNormalScale[materialIndex].zw;\n\n mat3 tbn = mat3(dpdu, dpdv, si.normal);\n\n si.normal = normalize(tbn * n);\n }\n #endif\n\n #ifdef NUM_PBR_MAPS\n int roughnessMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].z;\n int metalnessMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].w;\n if (roughnessMapIndex >= 0) {\n si.roughness *= texture(pbrMap, vec3(uv * materials.pbrMapSize[roughnessMapIndex].xy, roughnessMapIndex)).g;\n }\n if (metalnessMapIndex >= 0) {\n si.metalness *= texture(pbrMap, vec3(uv * materials.pbrMapSize[metalnessMapIndex].xy, metalnessMapIndex)).b;\n }\n #endif\n}\n\nstruct TriangleIntersect {\n float t;\n vec3 barycentric;\n};\n\n// Triangle-ray intersection\n// Faster than the classic M\xF6ller\u2013Trumbore intersection algorithm\n// http://www.pbr-book.org/3ed-2018/Shapes/Triangle_Meshes.html#TriangleIntersection\nTriangleIntersect intersectTriangle(Ray r, Triangle tri, int maxDim, vec3 shear) {\n TriangleIntersect ti;\n vec3 d = r.d;\n\n // translate vertices based on ray origin\n vec3 p0t = tri.p0 - r.o;\n vec3 p1t = tri.p1 - r.o;\n vec3 p2t = tri.p2 - r.o;\n\n // permute components of triangle vertices\n if (maxDim == 0) {\n p0t = p0t.yzx;\n p1t = p1t.yzx;\n p2t = p2t.yzx;\n } else if (maxDim == 1) {\n p0t = p0t.zxy;\n p1t = p1t.zxy;\n p2t = p2t.zxy;\n }\n\n // apply shear transformation to translated vertex positions\n p0t.xy += shear.xy * p0t.z;\n p1t.xy += shear.xy * p1t.z;\n p2t.xy += shear.xy * p2t.z;\n\n // compute edge function coefficients\n vec3 e = vec3(\n p1t.x * p2t.y - p1t.y * p2t.x,\n p2t.x * p0t.y - p2t.y * p0t.x,\n p0t.x * p1t.y - p0t.y * p1t.x\n );\n\n // check if intersection is inside triangle\n if (any(lessThan(e, vec3(0))) && any(greaterThan(e, vec3(0)))) {\n return ti;\n }\n\n float det = e.x + e.y + e.z;\n\n // not needed?\n // if (det == 0.) {\n // return ti;\n // }\n\n p0t.z *= shear.z;\n p1t.z *= shear.z;\n p2t.z *= shear.z;\n float tScaled = (e.x * p0t.z + e.y * p1t.z + e.z * p2t.z);\n\n // not needed?\n // if (sign(det) != sign(tScaled)) {\n // return ti;\n // }\n\n // check if closer intersection already exists\n if (abs(tScaled) > abs(r.tMax * det)) {\n return ti;\n }\n\n float invDet = 1. / det;\n ti.t = tScaled * invDet;\n ti.barycentric = e * invDet;\n\n return ti;\n}\n\nstruct Box {\n vec3 min;\n vec3 max;\n};\n\n// Branchless ray/box intersection\n// https://tavianator.com/fast-branchless-raybounding-box-intersections/\nfloat intersectBox(Ray r, Box b) {\n vec3 tBot = (b.min - r.o) * r.invD;\n vec3 tTop = (b.max - r.o) * r.invD;\n vec3 tNear = min(tBot, tTop);\n vec3 tFar = max(tBot, tTop);\n float t0 = max(tNear.x, max(tNear.y, tNear.z));\n float t1 = min(tFar.x, min(tFar.y, tFar.z));\n\n return (t0 > t1 || t0 > r.tMax) ? -1.0 : (t0 > 0.0 ? t0 : t1);\n}\n\nint maxDimension(vec3 v) {\n return v.x > v.y ? (v.x > v.z ? 0 : 2) : (v.y > v.z ? 1 : 2);\n}\n\n// Traverse BVH, find closest triangle intersection, and return surface information\nSurfaceInteraction intersectScene(inout Ray ray) {\n SurfaceInteraction si;\n\n int maxDim = maxDimension(abs(ray.d));\n\n // Permute space so that the z dimension is the one where the absolute value of the ray's direction is largest.\n // Then create a shear transformation that aligns ray direction with the +z axis\n vec3 shear;\n if (maxDim == 0) {\n shear = vec3(-ray.d.y, -ray.d.z, 1.0) * ray.invD.x;\n } else if (maxDim == 1) {\n shear = vec3(-ray.d.z, -ray.d.x, 1.0) * ray.invD.y;\n } else {\n shear = vec3(-ray.d.x, -ray.d.y, 1.0) * ray.invD.z;\n }\n\n int nodesToVisit[STACK_SIZE];\n int stack = 0;\n\n nodesToVisit[0] = 0;\n\n while(stack >= 0) {\n int i = nodesToVisit[stack--];\n\n vec4 r1 = fetchData(bvh, i, BVH_COLUMNS);\n vec4 r2 = fetchData(bvh, i + 1, BVH_COLUMNS);\n\n int splitAxisOrNumPrimitives = floatBitsToInt(r1.w);\n\n if (splitAxisOrNumPrimitives >= 0) {\n // Intersection is a bounding box. Test for box intersection and keep traversing BVH\n int splitAxis = splitAxisOrNumPrimitives;\n\n Box bbox = Box(r1.xyz, r2.xyz);\n\n if (intersectBox(ray, bbox) > 0.0) {\n // traverse near node to ray first, and far node to ray last\n if (ray.d[splitAxis] > 0.0) {\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n nodesToVisit[++stack] = i + 2;\n } else {\n nodesToVisit[++stack] = i + 2;\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n }\n }\n } else {\n ivec3 index = floatBitsToInt(r1.xyz);\n Triangle tri = Triangle(\n fetchData(positions, index.x, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.y, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.z, VERTEX_COLUMNS).xyz\n );\n TriangleIntersect hit = intersectTriangle(ray, tri, maxDim, shear);\n\n if (hit.t > 0.0) {\n ray.tMax = hit.t;\n int materialIndex = floatBitsToInt(r2.w);\n vec3 faceNormal = r2.xyz;\n surfaceInteractionFromIntersection(si, tri, hit.barycentric, index, faceNormal, materialIndex);\n }\n }\n }\n\n // Values must be clamped outside of intersection loop. Clamping inside the loop produces incorrect numbers on some devices.\n si.roughness = clamp(si.roughness, 0.03, 1.0);\n si.metalness = clamp(si.metalness, 0.0, 1.0);\n\n return si;\n}\n\nbool intersectSceneShadow(inout Ray ray) {\n int maxDim = maxDimension(abs(ray.d));\n\n // Permute space so that the z dimension is the one where the absolute value of the ray's direction is largest.\n // Then create a shear transformation that aligns ray direction with the +z axis\n vec3 shear;\n if (maxDim == 0) {\n shear = vec3(-ray.d.y, -ray.d.z, 1.0) * ray.invD.x;\n } else if (maxDim == 1) {\n shear = vec3(-ray.d.z, -ray.d.x, 1.0) * ray.invD.y;\n } else {\n shear = vec3(-ray.d.x, -ray.d.y, 1.0) * ray.invD.z;\n }\n\n int nodesToVisit[STACK_SIZE];\n int stack = 0;\n\n nodesToVisit[0] = 0;\n\n while(stack >= 0) {\n int i = nodesToVisit[stack--];\n\n vec4 r1 = fetchData(bvh, i, BVH_COLUMNS);\n vec4 r2 = fetchData(bvh, i + 1, BVH_COLUMNS);\n\n int splitAxisOrNumPrimitives = floatBitsToInt(r1.w);\n\n if (splitAxisOrNumPrimitives >= 0) {\n int splitAxis = splitAxisOrNumPrimitives;\n\n Box bbox = Box(r1.xyz, r2.xyz);\n\n if (intersectBox(ray, bbox) > 0.0) {\n if (ray.d[splitAxis] > 0.0) {\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n nodesToVisit[++stack] = i + 2;\n } else {\n nodesToVisit[++stack] = i + 2;\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n }\n }\n } else {\n ivec3 index = floatBitsToInt(r1.xyz);\n Triangle tri = Triangle(\n fetchData(positions, index.x, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.y, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.z, VERTEX_COLUMNS).xyz\n );\n\n if (intersectTriangle(ray, tri, maxDim, shear).t > 0.0) {\n return true;\n }\n }\n }\n\n return false;\n}\n"); + function intersect (defines) { + return "\n\nuniform highp isampler2D indices;\nuniform sampler2D positions;\nuniform sampler2D normals;\nuniform sampler2D uvs;\nuniform sampler2D bvh;\n\nuniform Materials {\n vec4 colorAndMaterialType[NUM_MATERIALS];\n vec4 roughnessMetalnessNormalScale[NUM_MATERIALS];\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS) || defined(NUM_PBR_MAPS)\n ivec4 diffuseNormalRoughnessMetalnessMapIndex[NUM_MATERIALS];\n #endif\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS)\n vec4 diffuseNormalMapSize[".concat(Math.max(defines.NUM_DIFFUSE_MAPS, defines.NUM_NORMAL_MAPS), "];\n #endif\n\n #if defined(NUM_PBR_MAPS)\n vec2 pbrMapSize[NUM_PBR_MAPS];\n #endif\n} materials;\n\n#ifdef NUM_DIFFUSE_MAPS\n uniform mediump sampler2DArray diffuseMap;\n#endif\n\n#ifdef NUM_NORMAL_MAPS\n uniform mediump sampler2DArray normalMap;\n#endif\n\n#ifdef NUM_PBR_MAPS\n uniform mediump sampler2DArray pbrMap;\n#endif\n\nstruct Triangle {\n vec3 p0;\n vec3 p1;\n vec3 p2;\n};\n\nvoid surfaceInteractionFromIntersection(inout SurfaceInteraction si, Triangle tri, vec3 barycentric, ivec3 index, vec3 faceNormal, int materialIndex) {\n si.hit = true;\n si.faceNormal = faceNormal;\n si.position = barycentric.x * tri.p0 + barycentric.y * tri.p1 + barycentric.z * tri.p2;\n ivec2 i0 = unpackTexel(index.x, VERTEX_COLUMNS);\n ivec2 i1 = unpackTexel(index.y, VERTEX_COLUMNS);\n ivec2 i2 = unpackTexel(index.z, VERTEX_COLUMNS);\n\n vec3 n0 = texelFetch(normals, i0, 0).xyz;\n vec3 n1 = texelFetch(normals, i1, 0).xyz;\n vec3 n2 = texelFetch(normals, i2, 0).xyz;\n si.normal = normalize(barycentric.x * n0 + barycentric.y * n1 + barycentric.z * n2);\n\n si.color = materials.colorAndMaterialType[materialIndex].xyz;\n si.roughness = materials.roughnessMetalnessNormalScale[materialIndex].x;\n si.metalness = materials.roughnessMetalnessNormalScale[materialIndex].y;\n\n si.materialType = int(materials.colorAndMaterialType[materialIndex].w);\n\n #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS) || defined(NUM_PBR_MAPS)\n vec2 uv0 = texelFetch(uvs, i0, 0).xy;\n vec2 uv1 = texelFetch(uvs, i1, 0).xy;\n vec2 uv2 = texelFetch(uvs, i2, 0).xy;\n vec2 uv = fract(barycentric.x * uv0 + barycentric.y * uv1 + barycentric.z * uv2);\n #endif\n\n #ifdef NUM_DIFFUSE_MAPS\n int diffuseMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].x;\n if (diffuseMapIndex >= 0) {\n si.color *= texture(diffuseMap, vec3(uv * materials.diffuseNormalMapSize[diffuseMapIndex].xy, diffuseMapIndex)).rgb;\n }\n #endif\n\n #ifdef NUM_NORMAL_MAPS\n int normalMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].y;\n if (normalMapIndex >= 0) {\n vec2 duv02 = uv0 - uv2;\n vec2 duv12 = uv1 - uv2;\n vec3 dp02 = tri.p0 - tri.p2;\n vec3 dp12 = tri.p1 - tri.p2;\n\n // Method One\n // http://www.pbr-book.org/3ed-2018/Shapes/Triangle_Meshes.html#fragment-Computetrianglepartialderivatives-0\n // Compute tangent vectors relative to the face normal. These vectors won't necessarily be orthogonal to the smoothed normal\n // This means the TBN matrix won't be orthogonal which is technically incorrect.\n // This is Three.js's method (https://github.com/mrdoob/three.js/blob/dev/src/renderers/shaders/ShaderChunk/normalmap_pars_fragment.glsl.js)\n // --------------\n // float scale = sign(duv02.x * duv12.y - duv02.y * duv12.x);\n // vec3 dpdu = normalize((duv12.y * dp02 - duv02.y * dp12) * scale);\n // vec3 dpdv = normalize((-duv12.x * dp02 + duv02.x * dp12) * scale);\n\n // Method Two\n // Compute tangent vectors as in Method One but apply Gram-Schmidt process to make vectors orthogonal to smooth normal\n // This might inadvertently flip coordinate space orientation\n // --------------\n // float scale = sign(duv02.x * duv12.y - duv02.y * duv12.x);\n // vec3 dpdu = normalize((duv12.y * dp02 - duv02.y * dp12) * scale);\n // dpdu = (dpdu - dot(dpdu, si.normal) * si.normal); // Gram-Schmidt process\n // vec3 dpdv = cross(si.normal, dpdu) * scale;\n\n // Method Three\n // http://www.thetenthplanet.de/archives/1180\n // Compute co-tangent and co-bitangent vectors\n // These vectors are orthongal and maintain a consistent coordinate space\n // --------------\n vec3 dp12perp = cross(dp12, si.normal);\n vec3 dp02perp = cross(si.normal, dp02);\n vec3 dpdu = dp12perp * duv02.x + dp02perp * duv12.x;\n vec3 dpdv = dp12perp * duv02.y + dp02perp * duv12.y;\n float invmax = inversesqrt(max(dot(dpdu, dpdu), dot(dpdv, dpdv)));\n dpdu *= invmax;\n dpdv *= invmax;\n\n vec3 n = 2.0 * texture(normalMap, vec3(uv * materials.diffuseNormalMapSize[normalMapIndex].zw, normalMapIndex)).rgb - 1.0;\n n.xy *= materials.roughnessMetalnessNormalScale[materialIndex].zw;\n\n mat3 tbn = mat3(dpdu, dpdv, si.normal);\n\n si.normal = normalize(tbn * n);\n }\n #endif\n\n #ifdef NUM_PBR_MAPS\n int roughnessMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].z;\n int metalnessMapIndex = materials.diffuseNormalRoughnessMetalnessMapIndex[materialIndex].w;\n if (roughnessMapIndex >= 0) {\n si.roughness *= texture(pbrMap, vec3(uv * materials.pbrMapSize[roughnessMapIndex].xy, roughnessMapIndex)).g;\n }\n if (metalnessMapIndex >= 0) {\n si.metalness *= texture(pbrMap, vec3(uv * materials.pbrMapSize[metalnessMapIndex].xy, metalnessMapIndex)).b;\n }\n #endif\n}\n\nstruct TriangleIntersect {\n float t;\n vec3 barycentric;\n};\n\n// Triangle-ray intersection\n// Faster than the classic M\xF6ller\u2013Trumbore intersection algorithm\n// http://www.pbr-book.org/3ed-2018/Shapes/Triangle_Meshes.html#TriangleIntersection\nTriangleIntersect intersectTriangle(Ray r, Triangle tri, int maxDim, vec3 shear) {\n TriangleIntersect ti;\n vec3 d = r.d;\n\n // translate vertices based on ray origin\n vec3 p0t = tri.p0 - r.o;\n vec3 p1t = tri.p1 - r.o;\n vec3 p2t = tri.p2 - r.o;\n\n // permute components of triangle vertices\n if (maxDim == 0) {\n p0t = p0t.yzx;\n p1t = p1t.yzx;\n p2t = p2t.yzx;\n } else if (maxDim == 1) {\n p0t = p0t.zxy;\n p1t = p1t.zxy;\n p2t = p2t.zxy;\n }\n\n // apply shear transformation to translated vertex positions\n p0t.xy += shear.xy * p0t.z;\n p1t.xy += shear.xy * p1t.z;\n p2t.xy += shear.xy * p2t.z;\n\n // compute edge function coefficients\n vec3 e = vec3(\n p1t.x * p2t.y - p1t.y * p2t.x,\n p2t.x * p0t.y - p2t.y * p0t.x,\n p0t.x * p1t.y - p0t.y * p1t.x\n );\n\n // check if intersection is inside triangle\n if (any(lessThan(e, vec3(0))) && any(greaterThan(e, vec3(0)))) {\n return ti;\n }\n\n float det = e.x + e.y + e.z;\n\n // not needed?\n // if (det == 0.) {\n // return ti;\n // }\n\n p0t.z *= shear.z;\n p1t.z *= shear.z;\n p2t.z *= shear.z;\n float tScaled = (e.x * p0t.z + e.y * p1t.z + e.z * p2t.z);\n\n // not needed?\n // if (sign(det) != sign(tScaled)) {\n // return ti;\n // }\n\n // check if closer intersection already exists\n if (abs(tScaled) > abs(r.tMax * det)) {\n return ti;\n }\n\n float invDet = 1. / det;\n ti.t = tScaled * invDet;\n ti.barycentric = e * invDet;\n\n return ti;\n}\n\nstruct Box {\n vec3 min;\n vec3 max;\n};\n\n// Branchless ray/box intersection\n// https://tavianator.com/fast-branchless-raybounding-box-intersections/\nfloat intersectBox(Ray r, Box b) {\n vec3 tBot = (b.min - r.o) * r.invD;\n vec3 tTop = (b.max - r.o) * r.invD;\n vec3 tNear = min(tBot, tTop);\n vec3 tFar = max(tBot, tTop);\n float t0 = max(tNear.x, max(tNear.y, tNear.z));\n float t1 = min(tFar.x, min(tFar.y, tFar.z));\n\n return (t0 > t1 || t0 > r.tMax) ? -1.0 : (t0 > 0.0 ? t0 : t1);\n}\n\nint maxDimension(vec3 v) {\n return v.x > v.y ? (v.x > v.z ? 0 : 2) : (v.y > v.z ? 1 : 2);\n}\n\n// Traverse BVH, find closest triangle intersection, and return surface information\nSurfaceInteraction intersectScene(inout Ray ray) {\n SurfaceInteraction si;\n\n int maxDim = maxDimension(abs(ray.d));\n\n // Permute space so that the z dimension is the one where the absolute value of the ray's direction is largest.\n // Then create a shear transformation that aligns ray direction with the +z axis\n vec3 shear;\n if (maxDim == 0) {\n shear = vec3(-ray.d.y, -ray.d.z, 1.0) * ray.invD.x;\n } else if (maxDim == 1) {\n shear = vec3(-ray.d.z, -ray.d.x, 1.0) * ray.invD.y;\n } else {\n shear = vec3(-ray.d.x, -ray.d.y, 1.0) * ray.invD.z;\n }\n\n int nodesToVisit[STACK_SIZE];\n int stack = 0;\n\n nodesToVisit[0] = 0;\n\n while(stack >= 0) {\n int i = nodesToVisit[stack--];\n\n vec4 r1 = fetchData(bvh, i, BVH_COLUMNS);\n vec4 r2 = fetchData(bvh, i + 1, BVH_COLUMNS);\n\n int splitAxisOrNumPrimitives = floatBitsToInt(r1.w);\n\n if (splitAxisOrNumPrimitives >= 0) {\n // Intersection is a bounding box. Test for box intersection and keep traversing BVH\n int splitAxis = splitAxisOrNumPrimitives;\n\n Box bbox = Box(r1.xyz, r2.xyz);\n\n if (intersectBox(ray, bbox) > 0.0) {\n // traverse near node to ray first, and far node to ray last\n if (ray.d[splitAxis] > 0.0) {\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n nodesToVisit[++stack] = i + 2;\n } else {\n nodesToVisit[++stack] = i + 2;\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n }\n }\n } else {\n ivec3 index = floatBitsToInt(r1.xyz);\n Triangle tri = Triangle(\n fetchData(positions, index.x, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.y, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.z, VERTEX_COLUMNS).xyz\n );\n TriangleIntersect hit = intersectTriangle(ray, tri, maxDim, shear);\n\n if (hit.t > 0.0) {\n ray.tMax = hit.t;\n int materialIndex = floatBitsToInt(r2.w);\n vec3 faceNormal = r2.xyz;\n surfaceInteractionFromIntersection(si, tri, hit.barycentric, index, faceNormal, materialIndex);\n }\n }\n }\n\n // Values must be clamped outside of intersection loop. Clamping inside the loop produces incorrect numbers on some devices.\n si.roughness = clamp(si.roughness, 0.03, 1.0);\n si.metalness = clamp(si.metalness, 0.0, 1.0);\n\n return si;\n}\n\nbool intersectSceneShadow(inout Ray ray) {\n int maxDim = maxDimension(abs(ray.d));\n\n // Permute space so that the z dimension is the one where the absolute value of the ray's direction is largest.\n // Then create a shear transformation that aligns ray direction with the +z axis\n vec3 shear;\n if (maxDim == 0) {\n shear = vec3(-ray.d.y, -ray.d.z, 1.0) * ray.invD.x;\n } else if (maxDim == 1) {\n shear = vec3(-ray.d.z, -ray.d.x, 1.0) * ray.invD.y;\n } else {\n shear = vec3(-ray.d.x, -ray.d.y, 1.0) * ray.invD.z;\n }\n\n int nodesToVisit[STACK_SIZE];\n int stack = 0;\n\n nodesToVisit[0] = 0;\n\n while(stack >= 0) {\n int i = nodesToVisit[stack--];\n\n vec4 r1 = fetchData(bvh, i, BVH_COLUMNS);\n vec4 r2 = fetchData(bvh, i + 1, BVH_COLUMNS);\n\n int splitAxisOrNumPrimitives = floatBitsToInt(r1.w);\n\n if (splitAxisOrNumPrimitives >= 0) {\n int splitAxis = splitAxisOrNumPrimitives;\n\n Box bbox = Box(r1.xyz, r2.xyz);\n\n if (intersectBox(ray, bbox) > 0.0) {\n if (ray.d[splitAxis] > 0.0) {\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n nodesToVisit[++stack] = i + 2;\n } else {\n nodesToVisit[++stack] = i + 2;\n nodesToVisit[++stack] = floatBitsToInt(r2.w);\n }\n }\n } else {\n ivec3 index = floatBitsToInt(r1.xyz);\n Triangle tri = Triangle(\n fetchData(positions, index.x, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.y, VERTEX_COLUMNS).xyz,\n fetchData(positions, index.z, VERTEX_COLUMNS).xyz\n );\n\n if (intersectTriangle(ray, tri, maxDim, shear).t > 0.0) {\n return true;\n }\n }\n }\n\n return false;\n}\n"); } - function random (params) { + function random (defines) { return "\n\n// Noise texture used to generate a different random number for each pixel.\n// We use blue noise in particular, but any type of noise will work.\nuniform sampler2D noise;\n\nuniform float stratifiedSamples[SAMPLING_DIMENSIONS];\nuniform float strataSize;\nuniform float useStratifiedSampling;\n\n// Every time we call randomSample() in the shader, and for every call to render,\n// we want that specific bit of the shader to fetch a sample from the same position in stratifiedSamples\n// This allows us to use stratified sampling for each random variable in our path tracing\nint sampleIndex = 0;\n\nconst highp float maxUint = 1.0 / 4294967295.0;\n\nfloat pixelSeed;\nhighp uint randState;\n\n// simple integer hashing function\n// https://en.wikipedia.org/wiki/Xorshift\nuint xorshift(uint x) {\n x ^= x << 13u;\n x ^= x >> 17u;\n x ^= x << 5u;\n return x;\n}\n\nvoid initRandom() {\n vec2 noiseSize = vec2(textureSize(noise, 0));\n\n // tile the small noise texture across the entire screen\n pixelSeed = texture(noise, vCoord / (pixelSize * noiseSize)).r;\n\n // white noise used if stratified sampling is disabled\n // produces more balanced path tracing for 1 sample-per-pixel renders\n randState = xorshift(xorshift(floatBitsToUint(vCoord.x)) * xorshift(floatBitsToUint(vCoord.y)));\n}\n\nfloat randomSample() {\n randState = xorshift(randState);\n\n float stratifiedSample = stratifiedSamples[sampleIndex++];\n\n float random = mix(\n float(randState) * maxUint, // white noise\n fract((stratifiedSample + pixelSeed) * strataSize), // blue noise + stratified samples\n useStratifiedSampling\n );\n\n // transform random number between [0, 1] to (0, 1)\n return EPS + (1.0 - 2.0 * EPS) * random;\n}\n\nvec2 randomSampleVec2() {\n return vec2(randomSample(), randomSample());\n}\n"; } // Sample the environment map using a cumulative distribution function as described in // http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Sampling_Light_Sources.html#InfiniteAreaLights - function envmap (params) { + function envmap (defines) { return "\n\nuniform sampler2D envmap;\nuniform sampler2D envmapDistribution;\n\nvec2 cartesianToEquirect(vec3 pointOnSphere) {\n float phi = mod(atan(-pointOnSphere.z, -pointOnSphere.x), TWOPI);\n float theta = acos(pointOnSphere.y);\n return vec2(phi * 0.5 * INVPI, theta * INVPI);\n}\n\nfloat getEnvmapV(float u, out int vOffset, out float pdf) {\n ivec2 size = textureSize(envmap, 0);\n\n int left = 0;\n int right = size.y + 1; // cdf length is the length of the envmap + 1\n while (left < right) {\n int mid = (left + right) >> 1;\n float s = texelFetch(envmapDistribution, ivec2(0, mid), 0).x;\n if (s <= u) {\n left = mid + 1;\n } else {\n right = mid;\n }\n }\n vOffset = left - 1;\n\n // x channel is cumulative distribution of envmap luminance\n // y channel is partial probability density of envmap luminance\n vec2 s0 = texelFetch(envmapDistribution, ivec2(0, vOffset), 0).xy;\n vec2 s1 = texelFetch(envmapDistribution, ivec2(0, vOffset + 1), 0).xy;\n\n pdf = s0.y;\n\n return (float(vOffset) + (u - s0.x) / (s1.x - s0.x)) / float(size.y);\n}\n\nfloat getEnvmapU(float u, int vOffset, out float pdf) {\n ivec2 size = textureSize(envmap, 0);\n\n int left = 0;\n int right = size.x + 1; // cdf length is the length of the envmap + 1\n while (left < right) {\n int mid = (left + right) >> 1;\n float s = texelFetch(envmapDistribution, ivec2(1 + mid, vOffset), 0).x;\n if (s <= u) {\n left = mid + 1;\n } else {\n right = mid;\n }\n }\n int uOffset = left - 1;\n\n // x channel is cumulative distribution of envmap luminance\n // y channel is partial probability density of envmap luminance\n vec2 s0 = texelFetch(envmapDistribution, ivec2(1 + uOffset, vOffset), 0).xy;\n vec2 s1 = texelFetch(envmapDistribution, ivec2(1 + uOffset + 1, vOffset), 0).xy;\n\n pdf = s0.y;\n\n return (float(uOffset) + (u - s0.x) / (s1.x - s0.x)) / float(size.x);\n}\n\n// Perform two binary searches to find light direction.\nvec3 sampleEnvmap(vec2 random, out vec2 uv, out float pdf) {\n vec2 partialPdf;\n int vOffset;\n\n uv.y = getEnvmapV(random.x, vOffset, partialPdf.y);\n uv.x = getEnvmapU(random.y, vOffset, partialPdf.x);\n\n float phi = uv.x * TWOPI;\n float theta = uv.y * PI;\n float cosTheta = cos(theta);\n float sinTheta = sin(theta);\n float cosPhi = cos(phi);\n float sinPhi = sin(phi);\n\n vec3 dir = vec3(-sinTheta * cosPhi, cosTheta, -sinTheta * sinPhi);\n\n pdf = partialPdf.x * partialPdf.y * INVPI2 / (2.0 * sinTheta);\n\n return dir;\n}\n\nfloat envmapPdf(vec2 uv) {\n vec2 size = vec2(textureSize(envmap, 0));\n\n float sinTheta = sin(uv.y * PI);\n\n uv *= size;\n\n float partialX = texelFetch(envmapDistribution, ivec2(1.0 + uv.x, uv.y), 0).y;\n float partialY = texelFetch(envmapDistribution, ivec2(0, uv.y), 0).y;\n\n return partialX * partialY * INVPI2 / (2.0 * sinTheta);\n}\n\nvec3 sampleEnvmapFromDirection(vec3 d) {\n vec2 uv = cartesianToEquirect(d);\n return textureLinear(envmap, uv).rgb;\n}\n\n"; } - function bsdf (params) { + function bsdf (defines) { return "\n\n// Computes the exact value of the Fresnel factor\n// https://seblagarde.wordpress.com/2013/04/29/memo-on-fresnel-equations/\nfloat fresnel(float cosTheta, float eta, float invEta) {\n eta = cosTheta > 0.0 ? eta : invEta;\n cosTheta = abs(cosTheta);\n\n float gSquared = eta * eta + cosTheta * cosTheta - 1.0;\n\n if (gSquared < 0.0) {\n return 1.0;\n }\n\n float g = sqrt(gSquared);\n\n float a = (g - cosTheta) / (g + cosTheta);\n float b = (cosTheta * (g + cosTheta) - 1.0) / (cosTheta * (g - cosTheta) + 1.0);\n\n return 0.5 * a * a * (1.0 + b * b);\n}\n\nfloat fresnelSchlickWeight(float cosTheta) {\n float w = 1.0 - cosTheta;\n return (w * w) * (w * w) * w;\n}\n\n// Computes Schlick's approximation of the Fresnel factor\n// Assumes ray is moving from a less dense to a more dense medium\nfloat fresnelSchlick(float cosTheta, float r0) {\n return mix(fresnelSchlickWeight(cosTheta), 1.0, r0);\n}\n\n// Computes Schlick's approximation of Fresnel factor\n// Accounts for total internal reflection if ray is moving from a more dense to a less dense medium\nfloat fresnelSchlickTIR(float cosTheta, float r0, float ni) {\n\n // moving from a more dense to a less dense medium\n if (cosTheta < 0.0) {\n float inv_eta = ni;\n float SinT2 = inv_eta * inv_eta * (1.0f - cosTheta * cosTheta);\n if (SinT2 > 1.0) {\n return 1.0; // total internal reflection\n }\n cosTheta = sqrt(1.0f - SinT2);\n }\n\n return mix(fresnelSchlickWeight(cosTheta), 1.0, r0);\n}\n\nfloat trowbridgeReitzD(float cosTheta, float alpha2) {\n float e = cosTheta * cosTheta * (alpha2 - 1.0) + 1.0;\n return alpha2 / (PI * e * e);\n}\n\nfloat trowbridgeReitzLambda(float cosTheta, float alpha2) {\n float cos2Theta = cosTheta * cosTheta;\n float tan2Theta = (1.0 - cos2Theta) / cos2Theta;\n return 0.5 * (-1.0 + sqrt(1.0 + alpha2 * tan2Theta));\n}\n\n// An implementation of Disney's principled BRDF\n// https://disney-animation.s3.amazonaws.com/library/s2012_pbs_disney_brdf_notes_v2.pdf\nvec3 materialBrdf(SurfaceInteraction si, vec3 viewDir, vec3 lightDir, float cosThetaL, float diffuseWeight, out float pdf) {\n vec3 halfVector = normalize(viewDir + lightDir);\n\n cosThetaL = abs(cosThetaL);\n float cosThetaV = abs(dot(si.normal, viewDir));\n float cosThetaH = abs(dot(si.normal, halfVector));\n float cosThetaD = abs(dot(lightDir, halfVector));\n\n float alpha2 = (si.roughness * si.roughness) * (si.roughness * si.roughness);\n\n float F = fresnelSchlick(cosThetaD, mix(R0, 0.6, si.metalness));\n float D = trowbridgeReitzD(cosThetaH, alpha2);\n\n float roughnessRemapped = 0.5 + 0.5 * si.roughness;\n float alpha2Remapped = (roughnessRemapped * roughnessRemapped) * (roughnessRemapped * roughnessRemapped);\n\n float G = 1.0 / (1.0 + trowbridgeReitzLambda(cosThetaV, alpha2Remapped) + trowbridgeReitzLambda(cosThetaL, alpha2Remapped));\n\n float specular = F * D * G / (4.0 * cosThetaV * cosThetaL);\n float specularPdf = D * cosThetaH / (4.0 * cosThetaD);\n\n float f = -0.5 + 2.0 * cosThetaD * cosThetaD * si.roughness;\n float diffuse = diffuseWeight * INVPI * (1.0 + f * fresnelSchlickWeight(cosThetaL)) * (1.0 + f * fresnelSchlickWeight(cosThetaV));\n float diffusePdf = cosThetaL * INVPI;\n\n pdf = mix(0.5 * (specularPdf + diffusePdf), specularPdf, si.metalness);\n\n return mix(si.color * diffuse + specular, si.color * specular, si.metalness);\n}\n\n"; } - function sample (params) { + function sample (defines) { return "\n\n// https://graphics.pixar.com/library/OrthonormalB/paper.pdf\nmat3 orthonormalBasis(vec3 n) {\n float zsign = n.z >= 0.0 ? 1.0 : -1.0;\n float a = -1.0 / (zsign + n.z);\n float b = n.x * n.y * a;\n vec3 s = vec3(1.0 + zsign * n.x * n.x * a, zsign * b, -zsign * n.x);\n vec3 t = vec3(b, zsign + n.y * n.y * a, -n.y);\n return mat3(s, t, n);\n}\n\n// http://www.pbr-book.org/3ed-2018/Monte_Carlo_Integration/2D_Sampling_with_Multidimensional_Transformations.html#SamplingaUnitDisk\nvec2 sampleCircle(vec2 p) {\n p = 2.0 * p - 1.0;\n\n bool greater = abs(p.x) > abs(p.y);\n\n float r = greater ? p.x : p.y;\n float theta = greater ? 0.25 * PI * p.y / p.x : PI * (0.5 - 0.25 * p.x / p.y);\n\n return r * vec2(cos(theta), sin(theta));\n}\n\n// http://www.pbr-book.org/3ed-2018/Monte_Carlo_Integration/2D_Sampling_with_Multidimensional_Transformations.html#Cosine-WeightedHemisphereSampling\nvec3 cosineSampleHemisphere(vec2 p) {\n vec2 h = sampleCircle(p);\n float z = sqrt(max(0.0, 1.0 - h.x * h.x - h.y * h.y));\n return vec3(h, z);\n}\n\n\n// http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Sampling_Reflection_Functions.html#MicrofacetBxDFs\n// Instead of Beckmann distrubtion, we use the GTR2 (GGX) distrubtion as covered in Disney's Principled BRDF paper\nvec3 lightDirSpecular(vec3 faceNormal, vec3 viewDir, mat3 basis, float roughness, vec2 random) {\n float phi = TWOPI * random.y;\n float alpha = roughness * roughness;\n float cosTheta = sqrt((1.0 - random.x) / (1.0 + (alpha * alpha - 1.0) * random.x));\n float sinTheta = sqrt(1.0 - cosTheta * cosTheta);\n\n vec3 halfVector = basis * sign(dot(faceNormal, viewDir)) * vec3(sinTheta * cos(phi), sinTheta * sin(phi), cosTheta);\n\n vec3 lightDir = reflect(-viewDir, halfVector);\n\n return lightDir;\n}\n\nvec3 lightDirDiffuse(vec3 faceNormal, vec3 viewDir, mat3 basis, vec2 random) {\n return basis * sign(dot(faceNormal, viewDir)) * cosineSampleHemisphere(random);\n}\n\nfloat powerHeuristic(float f, float g) {\n return (f * f) / (f * f + g * g);\n}\n\n"; } // Estimate the direct lighting integral using multiple importance sampling // http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Direct_Lighting.html#EstimatingtheDirectLightingIntegral - function sampleMaterial (params) { + function sampleMaterial (defines) { return "\n\nvec3 importanceSampleLight(SurfaceInteraction si, vec3 viewDir, bool lastBounce, vec2 random) {\n vec3 li;\n\n float lightPdf;\n vec2 uv;\n vec3 lightDir = sampleEnvmap(random, uv, lightPdf);\n\n float cosThetaL = dot(si.normal, lightDir);\n\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n if (orientation < 0.0) {\n return li;\n }\n\n float diffuseWeight = 1.0;\n Ray ray;\n initRay(ray, si.position + EPS * lightDir, lightDir);\n if (intersectSceneShadow(ray)) {\n if (lastBounce) {\n diffuseWeight = 0.0;\n } else {\n return li;\n }\n }\n\n vec3 irr = textureLinear(envmap, uv).xyz;\n\n float scatteringPdf;\n vec3 brdf = materialBrdf(si, viewDir, lightDir, cosThetaL, diffuseWeight, scatteringPdf);\n\n float weight = powerHeuristic(lightPdf, scatteringPdf);\n\n li = brdf * irr * abs(cosThetaL) * weight / lightPdf;\n\n return li;\n}\n\nvec3 importanceSampleMaterial(SurfaceInteraction si, vec3 viewDir, bool lastBounce, vec3 lightDir) {\n vec3 li;\n\n float cosThetaL = dot(si.normal, lightDir);\n\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n if (orientation < 0.0) {\n return li;\n }\n\n float diffuseWeight = 1.0;\n Ray ray;\n initRay(ray, si.position + EPS * lightDir, lightDir);\n if (intersectSceneShadow(ray)) {\n if (lastBounce) {\n diffuseWeight = 0.0;\n } else {\n return li;\n }\n }\n\n vec2 uv = cartesianToEquirect(lightDir);\n\n float lightPdf = envmapPdf(uv);\n\n vec3 irr = textureLinear(envmap, uv).rgb;\n\n float scatteringPdf;\n vec3 brdf = materialBrdf(si, viewDir, lightDir, cosThetaL, diffuseWeight, scatteringPdf);\n\n float weight = powerHeuristic(scatteringPdf, lightPdf);\n\n li += brdf * irr * abs(cosThetaL) * weight / scatteringPdf;\n\n return li;\n}\n\nvec3 sampleMaterial(SurfaceInteraction si, int bounce, inout Ray ray, inout vec3 beta, inout bool abort) {\n mat3 basis = orthonormalBasis(si.normal);\n vec3 viewDir = -ray.d;\n\n vec2 diffuseOrSpecular = randomSampleVec2();\n\n vec3 lightDir = diffuseOrSpecular.x < mix(0.5, 0.0, si.metalness) ?\n lightDirDiffuse(si.faceNormal, viewDir, basis, randomSampleVec2()) :\n lightDirSpecular(si.faceNormal, viewDir, basis, si.roughness, randomSampleVec2());\n\n bool lastBounce = bounce == BOUNCES;\n\n // Add path contribution\n vec3 li = beta * (\n importanceSampleLight(si, viewDir, lastBounce, randomSampleVec2()) +\n importanceSampleMaterial(si, viewDir, lastBounce, lightDir)\n );\n\n // Get new path direction\n\n lightDir = diffuseOrSpecular.y < mix(0.5, 0.0, si.metalness) ?\n lightDirDiffuse(si.faceNormal, viewDir, basis, randomSampleVec2()) :\n lightDirSpecular(si.faceNormal, viewDir, basis, si.roughness, randomSampleVec2());\n\n float cosThetaL = dot(si.normal, lightDir);\n\n float scatteringPdf;\n vec3 brdf = materialBrdf(si, viewDir, lightDir, cosThetaL, 1.0, scatteringPdf);\n\n beta *= abs(cosThetaL) * brdf / scatteringPdf;\n\n initRay(ray, si.position + EPS * lightDir, lightDir);\n\n // If new ray direction is pointing into the surface,\n // the light path is physically impossible and we terminate the path.\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n abort = orientation < 0.0;\n\n return li;\n}\n\n"; } - function sampleShadowCatcher (params) { + function sampleShadowCatcher (defines) { return "\n\n#ifdef USE_SHADOW_CATCHER\n\nfloat importanceSampleLightShadowCatcher(SurfaceInteraction si, vec3 viewDir, vec2 random, inout float alpha) {\n float li;\n\n float lightPdf;\n vec2 uv;\n vec3 lightDir = sampleEnvmap(random, uv, lightPdf);\n\n float cosThetaL = dot(si.normal, lightDir);\n\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n if (orientation < 0.0) {\n return li;\n }\n\n float occluded = 1.0;\n\n Ray ray;\n initRay(ray, si.position + EPS * lightDir, lightDir);\n if (intersectSceneShadow(ray)) {\n occluded = 0.0;\n }\n\n float irr = dot(luminance, textureLinear(envmap, uv).rgb);\n\n // lambertian BRDF\n float brdf = INVPI;\n float scatteringPdf = abs(cosThetaL) * INVPI;\n\n float weight = powerHeuristic(lightPdf, scatteringPdf);\n\n float lightEq = irr * brdf * abs(cosThetaL) * weight / lightPdf;\n\n alpha += lightEq;\n li += occluded * lightEq;\n\n return li;\n}\n\nfloat importanceSampleMaterialShadowCatcher(SurfaceInteraction si, vec3 viewDir, vec3 lightDir, inout float alpha) {\n float li;\n\n float cosThetaL = dot(si.normal, lightDir);\n\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n if (orientation < 0.0) {\n return li;\n }\n\n float occluded = 1.0;\n\n Ray ray;\n initRay(ray, si.position + EPS * lightDir, lightDir);\n if (intersectSceneShadow(ray)) {\n occluded = 0.0;\n }\n\n vec2 uv = cartesianToEquirect(lightDir);\n\n float lightPdf = envmapPdf(uv);\n\n float irr = dot(luminance, textureLinear(envmap, uv).rgb);\n\n // lambertian BRDF\n float brdf = INVPI;\n float scatteringPdf = abs(cosThetaL) * INVPI;\n\n float weight = powerHeuristic(scatteringPdf, lightPdf);\n\n float lightEq = irr * brdf * abs(cosThetaL) * weight / scatteringPdf;\n\n alpha += lightEq;\n li += occluded * lightEq;\n\n return li;\n}\n\nvec3 sampleShadowCatcher(SurfaceInteraction si, int bounce, inout Ray ray, inout vec3 beta, inout float alpha, inout vec3 prevLi, inout bool abort) {\n mat3 basis = orthonormalBasis(si.normal);\n vec3 viewDir = -ray.d;\n vec3 color = sampleEnvmapFromDirection(-viewDir);\n\n vec3 lightDir = lightDirDiffuse(si.faceNormal, viewDir, basis, randomSampleVec2());\n\n float alphaBounce = 0.0;\n\n // Add path contribution\n vec3 li = beta * color * (\n importanceSampleLightShadowCatcher(si, viewDir, randomSampleVec2(), alphaBounce) +\n importanceSampleMaterialShadowCatcher(si, viewDir, lightDir, alphaBounce)\n );\n\n // alphaBounce contains the lighting of the shadow catcher *without* shadows\n alphaBounce = alphaBounce == 0.0 ? 1.0 : alphaBounce;\n\n // in post processing step, we divide by alpha to obtain the percentage of light relative to shadow for the shadow catcher\n alpha *= alphaBounce;\n\n // we only want the alpha division to affect the shadow catcher\n // factor in alpha to the previous light, so that dividing by alpha with the previous light cancels out this contribution\n prevLi *= alphaBounce;\n\n // Get new path direction\n\n lightDir = lightDirDiffuse(si.faceNormal, viewDir, basis, randomSampleVec2());\n\n float cosThetaL = dot(si.normal, lightDir);\n\n // lambertian brdf with terms cancelled\n beta *= color;\n\n initRay(ray, si.position + EPS * lightDir, lightDir);\n\n // If new ray direction is pointing into the surface,\n // the light path is physically impossible and we terminate the path.\n float orientation = dot(si.faceNormal, viewDir) * cosThetaL;\n abort = orientation < 0.0;\n\n // advance dimension index by unused stratified samples\n const int usedSamples = 6;\n sampleIndex += SAMPLES_PER_MATERIAL - usedSamples;\n\n return li;\n}\n\n#endif\n"; } - function sampleGlass (params) { + function sampleGlass (defines) { return "\n\n#ifdef USE_GLASS\n\nvec3 sampleGlassSpecular(SurfaceInteraction si, int bounce, inout Ray ray, inout vec3 beta) {\n vec3 viewDir = -ray.d;\n float cosTheta = dot(si.normal, viewDir);\n\n float F = si.materialType == THIN_GLASS ?\n fresnelSchlick(abs(cosTheta), R0) : // thin glass\n fresnelSchlickTIR(cosTheta, R0, IOR); // thick glass\n\n vec3 lightDir;\n\n float reflectionOrRefraction = randomSample();\n\n if (reflectionOrRefraction < F) {\n lightDir = reflect(-viewDir, si.normal);\n } else {\n lightDir = si.materialType == THIN_GLASS ?\n refract(-viewDir, sign(cosTheta) * si.normal, INV_IOR_THIN) : // thin glass\n refract(-viewDir, sign(cosTheta) * si.normal, cosTheta < 0.0 ? IOR : INV_IOR); // thick glass\n beta *= si.color;\n }\n\n initRay(ray, si.position + EPS * lightDir, lightDir);\n\n // advance sample index by unused stratified samples\n const int usedSamples = 1;\n sampleIndex += SAMPLES_PER_MATERIAL - usedSamples;\n\n return bounce == BOUNCES ? beta * sampleEnvmapFromDirection(lightDir) : vec3(0.0);\n}\n\n#endif\n\n"; } @@ -601,28 +597,10 @@ return defines; } - function fragString (params) { - return "#version 300 es\n\nprecision mediump float;\nprecision mediump int;\n\n".concat(addDefines(params), "\n\n#define PI 3.14159265359\n#define TWOPI 6.28318530718\n#define INVPI 0.31830988618\n#define INVPI2 0.10132118364\n#define EPS 0.0005\n#define INF 1.0e999\n#define RAY_MAX_DISTANCE 9999.0\n\n#define STANDARD 0\n#define THIN_GLASS 1\n#define THICK_GLASS 2\n#define SHADOW_CATCHER 3\n\n#define SAMPLES_PER_MATERIAL 8\n\nconst float IOR = 1.5;\nconst float INV_IOR = 1.0 / IOR;\n\nconst float IOR_THIN = 1.015;\nconst float INV_IOR_THIN = 1.0 / IOR_THIN;\n\nconst float R0 = (1.0 - IOR) * (1.0 - IOR) / ((1.0 + IOR) * (1.0 + IOR));\n\n// https://www.w3.org/WAI/GL/wiki/Relative_luminance\nconst vec3 luminance = vec3(0.2126, 0.7152, 0.0722);\n\nstruct Ray {\n vec3 o;\n vec3 d;\n vec3 invD;\n float tMax;\n};\n\nstruct SurfaceInteraction {\n bool hit;\n vec3 position;\n vec3 normal; // smoothed normal from the three triangle vertices\n vec3 faceNormal; // normal of the triangle\n vec3 color;\n float roughness;\n float metalness;\n int materialType;\n};\n\nstruct Camera {\n mat4 transform;\n float aspect;\n float fov;\n float focus;\n float aperture;\n};\n\nuniform Camera camera;\nuniform vec2 pixelSize; // 1 / screenResolution\n\nin vec2 vCoord;\n\nout vec4 fragColor;\n\nvoid initRay(inout Ray ray, vec3 origin, vec3 direction) {\n ray.o = origin;\n ray.d = direction;\n ray.invD = 1.0 / ray.d;\n ray.tMax = RAY_MAX_DISTANCE;\n}\n\n// given the index from a 1D array, retrieve corresponding position from packed 2D texture\nivec2 unpackTexel(int i, int columnsLog2) {\n ivec2 u;\n u.y = i >> columnsLog2; // equivalent to (i / 2^columnsLog2)\n u.x = i - (u.y << columnsLog2); // equivalent to (i % 2^columnsLog2)\n return u;\n}\n\nvec4 fetchData(sampler2D s, int i, int columnsLog2) {\n return texelFetch(s, unpackTexel(i, columnsLog2), 0);\n}\n\nivec4 fetchData(isampler2D s, int i, int columnsLog2) {\n return texelFetch(s, unpackTexel(i, columnsLog2), 0);\n}\n\n").concat(textureLinear(), "\n").concat(intersect(params), "\n").concat(random(), "\n").concat(envmap(), "\n").concat(bsdf(), "\n").concat(sample(), "\n").concat(sampleMaterial(), "\n").concat(sampleGlass(), "\n").concat(sampleShadowCatcher(), "\n\nstruct Path {\n Ray ray;\n vec3 li;\n float alpha;\n vec3 beta;\n bool specularBounce;\n bool abort;\n};\n\nvoid bounce(inout Path path, int i) {\n if (path.abort) {\n return;\n }\n\n SurfaceInteraction si = intersectScene(path.ray);\n\n if (!si.hit) {\n if (path.specularBounce) {\n path.li += path.beta * sampleEnvmapFromDirection(path.ray.d);\n }\n\n path.abort = true;\n } else {\n #ifdef USE_GLASS\n if (si.materialType == THIN_GLASS || si.materialType == THICK_GLASS) {\n path.li += sampleGlassSpecular(si, i, path.ray, path.beta);\n path.specularBounce = true;\n }\n #endif\n #ifdef USE_SHADOW_CATCHER\n if (si.materialType == SHADOW_CATCHER) {\n path.li += sampleShadowCatcher(si, i, path.ray, path.beta, path.alpha, path.li, path.abort);\n path.specularBounce = false;\n }\n #endif\n if (si.materialType == STANDARD) {\n path.li += sampleMaterial(si, i, path.ray, path.beta, path.abort);\n path.specularBounce = false;\n }\n\n // Russian Roulette sampling\n if (i >= 2) {\n float q = 1.0 - dot(path.beta, luminance);\n if (randomSample() < q) {\n path.abort = true;\n }\n path.beta /= 1.0 - q;\n }\n }\n}\n\n// Path tracing integrator as described in\n// http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Path_Tracing.html#\nvec4 integrator(inout Ray ray) {\n Path path;\n path.ray = ray;\n path.li = vec3(0);\n path.alpha = 1.0;\n path.beta = vec3(1.0);\n path.specularBounce = true;\n path.abort = false;\n\n // Manually unroll for loop.\n // Some hardware fails to interate over a GLSL loop, so we provide this workaround\n\n ").concat(unrollLoop('i', 1, params.BOUNCES + 1, 1, "\n // equivelant to\n // for (int i = 1; i < params.bounces + 1, i += 1)\n bounce(path, i);\n "), "\n\n return vec4(path.li, path.alpha);\n}\n\nvoid main() {\n initRandom();\n\n vec2 vCoordAntiAlias = vCoord + pixelSize * (randomSampleVec2() - 0.5);\n\n vec3 direction = normalize(vec3(vCoordAntiAlias - 0.5, -1.0) * vec3(camera.aspect, 1.0, camera.fov));\n\n // Thin lens model with depth-of-field\n // http://www.pbr-book.org/3ed-2018/Camera_Models/Projective_Camera_Models.html#TheThinLensModelandDepthofField\n vec2 lensPoint = camera.aperture * sampleCircle(randomSampleVec2());\n vec3 focusPoint = -direction * camera.focus / direction.z; // intersect ray direction with focus plane\n\n vec3 origin = vec3(lensPoint, 0.0);\n direction = normalize(focusPoint - origin);\n\n origin = vec3(camera.transform * vec4(origin, 1.0));\n direction = mat3(camera.transform) * direction;\n\n Ray cam;\n initRay(cam, origin, direction);\n\n vec4 liAndAlpha = integrator(cam);\n\n if (!(liAndAlpha.x < INF && liAndAlpha.x > -EPS)) {\n liAndAlpha = vec4(0, 0, 0, 1);\n }\n\n fragColor = liAndAlpha;\n\n // Stratified Sampling Sample Count Test\n // ---------------\n // Uncomment the following code\n // Then observe the colors of the image\n // If:\n // * The resulting image is pure black\n // Extra samples are being passed to the shader that aren't being used.\n // * The resulting image contains red\n // Not enough samples are being passed to the shader\n // * The resulting image contains only white with some black\n // All samples are used by the shader. Correct result!\n\n // fragColor = vec4(0, 0, 0, 1);\n // if (sampleIndex == SAMPLING_DIMENSIONS) {\n // fragColor = vec4(1, 1, 1, 1);\n // } else if (sampleIndex > SAMPLING_DIMENSIONS) {\n // fragColor = vec4(1, 0, 0, 1);\n // }\n}\n"); + function fragString (defines) { + return "#version 300 es\n\nprecision mediump float;\nprecision mediump int;\n\n".concat(addDefines(defines), "\n\n#define PI 3.14159265359\n#define TWOPI 6.28318530718\n#define INVPI 0.31830988618\n#define INVPI2 0.10132118364\n#define EPS 0.0005\n#define INF 1.0e999\n#define RAY_MAX_DISTANCE 9999.0\n\n#define STANDARD 0\n#define THIN_GLASS 1\n#define THICK_GLASS 2\n#define SHADOW_CATCHER 3\n\n#define SAMPLES_PER_MATERIAL 8\n\nconst float IOR = 1.5;\nconst float INV_IOR = 1.0 / IOR;\n\nconst float IOR_THIN = 1.015;\nconst float INV_IOR_THIN = 1.0 / IOR_THIN;\n\nconst float R0 = (1.0 - IOR) * (1.0 - IOR) / ((1.0 + IOR) * (1.0 + IOR));\n\n// https://www.w3.org/WAI/GL/wiki/Relative_luminance\nconst vec3 luminance = vec3(0.2126, 0.7152, 0.0722);\n\nstruct Ray {\n vec3 o;\n vec3 d;\n vec3 invD;\n float tMax;\n};\n\nstruct SurfaceInteraction {\n bool hit;\n vec3 position;\n vec3 normal; // smoothed normal from the three triangle vertices\n vec3 faceNormal; // normal of the triangle\n vec3 color;\n float roughness;\n float metalness;\n int materialType;\n};\n\nstruct Camera {\n mat4 transform;\n float aspect;\n float fov;\n float focus;\n float aperture;\n};\n\nuniform Camera camera;\nuniform vec2 pixelSize; // 1 / screenResolution\n\nin vec2 vCoord;\n\nout vec4 fragColor;\n\nvoid initRay(inout Ray ray, vec3 origin, vec3 direction) {\n ray.o = origin;\n ray.d = direction;\n ray.invD = 1.0 / ray.d;\n ray.tMax = RAY_MAX_DISTANCE;\n}\n\n// given the index from a 1D array, retrieve corresponding position from packed 2D texture\nivec2 unpackTexel(int i, int columnsLog2) {\n ivec2 u;\n u.y = i >> columnsLog2; // equivalent to (i / 2^columnsLog2)\n u.x = i - (u.y << columnsLog2); // equivalent to (i % 2^columnsLog2)\n return u;\n}\n\nvec4 fetchData(sampler2D s, int i, int columnsLog2) {\n return texelFetch(s, unpackTexel(i, columnsLog2), 0);\n}\n\nivec4 fetchData(isampler2D s, int i, int columnsLog2) {\n return texelFetch(s, unpackTexel(i, columnsLog2), 0);\n}\n\n").concat(textureLinear(), "\n").concat(intersect(defines), "\n").concat(random(), "\n").concat(envmap(), "\n").concat(bsdf(), "\n").concat(sample(), "\n").concat(sampleMaterial(), "\n").concat(sampleGlass(), "\n").concat(sampleShadowCatcher(), "\n\nstruct Path {\n Ray ray;\n vec3 li;\n float alpha;\n vec3 beta;\n bool specularBounce;\n bool abort;\n};\n\nvoid bounce(inout Path path, int i) {\n if (path.abort) {\n return;\n }\n\n SurfaceInteraction si = intersectScene(path.ray);\n\n if (!si.hit) {\n if (path.specularBounce) {\n path.li += path.beta * sampleEnvmapFromDirection(path.ray.d);\n }\n\n path.abort = true;\n } else {\n #ifdef USE_GLASS\n if (si.materialType == THIN_GLASS || si.materialType == THICK_GLASS) {\n path.li += sampleGlassSpecular(si, i, path.ray, path.beta);\n path.specularBounce = true;\n }\n #endif\n #ifdef USE_SHADOW_CATCHER\n if (si.materialType == SHADOW_CATCHER) {\n path.li += sampleShadowCatcher(si, i, path.ray, path.beta, path.alpha, path.li, path.abort);\n path.specularBounce = false;\n }\n #endif\n if (si.materialType == STANDARD) {\n path.li += sampleMaterial(si, i, path.ray, path.beta, path.abort);\n path.specularBounce = false;\n }\n\n // Russian Roulette sampling\n if (i >= 2) {\n float q = 1.0 - dot(path.beta, luminance);\n if (randomSample() < q) {\n path.abort = true;\n }\n path.beta /= 1.0 - q;\n }\n }\n}\n\n// Path tracing integrator as described in\n// http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Path_Tracing.html#\nvec4 integrator(inout Ray ray) {\n Path path;\n path.ray = ray;\n path.li = vec3(0);\n path.alpha = 1.0;\n path.beta = vec3(1.0);\n path.specularBounce = true;\n path.abort = false;\n\n // Manually unroll for loop.\n // Some hardware fails to interate over a GLSL loop, so we provide this workaround\n\n ").concat(unrollLoop('i', 1, defines.BOUNCES + 1, 1, "\n // equivelant to\n // for (int i = 1; i < defines.bounces + 1, i += 1)\n bounce(path, i);\n "), "\n\n return vec4(path.li, path.alpha);\n}\n\nvoid main() {\n initRandom();\n\n vec2 vCoordAntiAlias = vCoord + pixelSize * (randomSampleVec2() - 0.5);\n\n vec3 direction = normalize(vec3(vCoordAntiAlias - 0.5, -1.0) * vec3(camera.aspect, 1.0, camera.fov));\n\n // Thin lens model with depth-of-field\n // http://www.pbr-book.org/3ed-2018/Camera_Models/Projective_Camera_Models.html#TheThinLensModelandDepthofField\n vec2 lensPoint = camera.aperture * sampleCircle(randomSampleVec2());\n vec3 focusPoint = -direction * camera.focus / direction.z; // intersect ray direction with focus plane\n\n vec3 origin = vec3(lensPoint, 0.0);\n direction = normalize(focusPoint - origin);\n\n origin = vec3(camera.transform * vec4(origin, 1.0));\n direction = mat3(camera.transform) * direction;\n\n Ray cam;\n initRay(cam, origin, direction);\n\n vec4 liAndAlpha = integrator(cam);\n\n if (!(liAndAlpha.x < INF && liAndAlpha.x > -EPS)) {\n liAndAlpha = vec4(0, 0, 0, 1);\n }\n\n fragColor = liAndAlpha;\n\n // Stratified Sampling Sample Count Test\n // ---------------\n // Uncomment the following code\n // Then observe the colors of the image\n // If:\n // * The resulting image is pure black\n // Extra samples are being passed to the shader that aren't being used.\n // * The resulting image contains red\n // Not enough samples are being passed to the shader\n // * The resulting image contains only white with some black\n // All samples are used by the shader. Correct result!\n\n // fragColor = vec4(0, 0, 0, 1);\n // if (sampleIndex == SAMPLING_DIMENSIONS) {\n // fragColor = vec4(1, 1, 1, 1);\n // } else if (sampleIndex > SAMPLING_DIMENSIONS) {\n // fragColor = vec4(1, 0, 0, 1);\n // }\n}\n"); } - function addFlatGeometryIndices(geometry) { - var position = geometry.getAttribute('position'); - - if (!position) { - console.warn('No position attribute'); - return; - } - - var index = new Uint32Array(position.count); - - for (var i = 0; i < index.length; i++) { - index[i] = i; - } - - geometry.setIndex(new THREE$1.BufferAttribute(index, 1, false)); - return geometry; - } //TODO: Add UV support - function mergeMeshesToGeometry(meshes) { var vertexCount = 0; var indexCount = 0; @@ -636,7 +614,7 @@ for (var _iterator = meshes[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { var mesh = _step.value; - var _geometry = mesh.geometry.clone(); + var _geometry = cloneBufferGeometry(mesh.geometry, ['position', 'normal', 'uv']); var index = _geometry.getIndex(); @@ -648,6 +626,8 @@ if (!_geometry.getAttribute('normal')) { _geometry.computeVertexNormals(); + } else { + _geometry.normalizeNormals(); } vertexCount += _geometry.getAttribute('position').count; @@ -702,8 +682,8 @@ bg.addAttribute('normal', normal); bg.addAttribute('uv', uv); bg.setIndex(index); - var vertexIndex = 0; - var indexIndex = 0; + var currentVertex = 0; + var currentIndex = 0; var _iteratorNormalCompletion2 = true; var _didIteratorError2 = false; var _iteratorError2 = undefined; @@ -713,21 +693,22 @@ var _step2$value = _step2.value, geometry = _step2$value.geometry, materialIndex = _step2$value.materialIndex; - bg.merge(geometry, vertexIndex); + var _vertexCount = geometry.getAttribute('position').count; + bg.merge(geometry, currentVertex); var meshIndex = geometry.getIndex(); - for (var k = 0; k < meshIndex.count; k++) { - index.setX(indexIndex + k, vertexIndex + meshIndex.getX(k)); + for (var i = 0; i < meshIndex.count; i++) { + index.setX(currentIndex + i, currentVertex + meshIndex.getX(i)); } var triangleCount = meshIndex.count / 3; - for (var _k = 0; _k < triangleCount; _k++) { + for (var _i = 0; _i < triangleCount; _i++) { materialIndices.push(materialIndex); } - vertexIndex += geometry.getAttribute('position').count; - indexIndex += meshIndex.count; + currentVertex += _vertexCount; + currentIndex += meshIndex.count; } } catch (err) { _didIteratorError2 = true; @@ -748,6 +729,65 @@ geometry: bg, materialIndices: materialIndices }; + } // Similar to buffergeometry.clone(), except we only copy + // specific attributes instead of everything + + + function cloneBufferGeometry(bufferGeometry, attributes) { + var newGeometry = new THREE$1.BufferGeometry(); + var _iteratorNormalCompletion3 = true; + var _didIteratorError3 = false; + var _iteratorError3 = undefined; + + try { + for (var _iterator3 = attributes[Symbol.iterator](), _step3; !(_iteratorNormalCompletion3 = (_step3 = _iterator3.next()).done); _iteratorNormalCompletion3 = true) { + var name = _step3.value; + var attrib = bufferGeometry.getAttribute(name); + + if (attrib) { + newGeometry.addAttribute(name, attrib.clone()); + } + } + } catch (err) { + _didIteratorError3 = true; + _iteratorError3 = err; + } finally { + try { + if (!_iteratorNormalCompletion3 && _iterator3["return"] != null) { + _iterator3["return"](); + } + } finally { + if (_didIteratorError3) { + throw _iteratorError3; + } + } + } + + var index = bufferGeometry.getIndex(); + + if (index) { + newGeometry.setIndex(index); + } + + return newGeometry; + } + + function addFlatGeometryIndices(geometry) { + var position = geometry.getAttribute('position'); + + if (!position) { + console.warn('No position attribute'); + return; + } + + var index = new Uint32Array(position.count); + + for (var i = 0; i < index.length; i++) { + index[i] = i; + } + + geometry.setIndex(new THREE$1.BufferAttribute(index, 1, false)); + return geometry; } // Reorders the elements in the range [first, last) in such a way that @@ -1120,19 +1160,26 @@ // Convert image data from the RGBE format to a 32-bit floating point format // See https://www.cg.tuwien.ac.at/research/theses/matkovic/node84.html for a description of the RGBE format + // Optional multiplier argument for performance optimization function rgbeToFloat(buffer) { + var intensity = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : 1; var texels = buffer.length / 4; var floatBuffer = new Float32Array(texels * 3); + var expTable = []; - for (var i = 0; i < texels; i++) { - var r = buffer[4 * i]; - var g = buffer[4 * i + 1]; - var b = buffer[4 * i + 2]; - var a = buffer[4 * i + 3]; - var e = Math.pow(2, a - 128); - floatBuffer[3 * i] = r * e / 255; - floatBuffer[3 * i + 1] = g * e / 255; - floatBuffer[3 * i + 2] = b * e / 255; + for (var i = 0; i < 255; i++) { + expTable[i] = intensity * Math.pow(2, i - 128) / 255; + } + + for (var _i = 0; _i < texels; _i++) { + var r = buffer[4 * _i]; + var g = buffer[4 * _i + 1]; + var b = buffer[4 * _i + 2]; + var a = buffer[4 * _i + 3]; + var e = expTable[a]; + floatBuffer[3 * _i] = r * e; + floatBuffer[3 * _i + 1] = g * e; + floatBuffer[3 * _i + 2] = b * e; } return floatBuffer; @@ -1163,6 +1210,7 @@ return true; } + // Convert image data from the RGBE format to a 32-bit floating point format var DEFAULT_MAP_RESOLUTION = { width: 4096, height: 2048 @@ -1186,10 +1234,7 @@ height: environmentLight.map.image.height, data: environmentLight.map.image.data }; - envImage.data = rgbeToFloat(envImage.data); - envImage.data.forEach(function (datum, index, arr) { - arr[index] = datum * environmentLight.intensity; - }); + envImage.data = rgbeToFloat(envImage.data, environmentLight.intensity); } else { // initialize blank map envImage = generateBlankMap(DEFAULT_MAP_RESOLUTION.width, DEFAULT_MAP_RESOLUTION.height); @@ -1200,7 +1245,6 @@ function generateBlankMap(width, height) { var texels = width * height; var floatBuffer = new Float32Array(texels * 3); - floatBuffer.fill(0.0); return { width: width, height: height, @@ -1216,56 +1260,116 @@ return addLightAtCoordinates(light, image, sphericalCoords); } // Perform modifications on env map to match input scene - function addLightAtCoordinates(light, image, originSphericalCoords) { + function addLightAtCoordinates(light, image, originCoords) { var floatBuffer = image.data; var width = image.width; var height = image.height; var xTexels = floatBuffer.length / (3 * height); - var yTexels = floatBuffer.length / (3 * width); // default softness for standard directional lights is 0.95 + var yTexels = floatBuffer.length / (3 * width); // default softness for standard directional lights is 0.01, i.e. a hard shadow + + var softness = light.softness || 0.01; // angle from center of light at which no more contributions are projected + + var threshold = findThreshold(softness); // if too few texels are rejected by the threshold then the time to evaluate it is no longer worth it - var softness = "softness" in light && light.softness !== null ? light.softness : 0.45; + var useThreshold = threshold < Math.PI / 5; // functional trick to keep the conditional check out of the main loop + + var intensityFromAngleFunction = useThreshold ? getIntensityFromAngleDifferentialThresholded : getIntensityFromAngleDifferential; + var begunAddingContributions = false; + var currentCoords = new THREE$1.Spherical(); // Iterates over each row from top to bottom for (var i = 0; i < xTexels; i++) { + var encounteredInThisRow = false; // Iterates over each texel in row + for (var j = 0; j < yTexels; j++) { var bufferIndex = j * width + i; - var currentSphericalCoords = equirectangularToSpherical(i, j, width, height); - var falloff = getIntensityFromAngleDifferential(originSphericalCoords, currentSphericalCoords, softness); + currentCoords = equirectangularToSpherical(i, j, width, height, currentCoords); + var falloff = intensityFromAngleFunction(originCoords, currentCoords, softness, threshold); + + if (falloff > 0) { + encounteredInThisRow = true; + begunAddingContributions = true; + } + var intensity = light.intensity * falloff; floatBuffer[bufferIndex * 3] += intensity * light.color.r; floatBuffer[bufferIndex * 3 + 1] += intensity * light.color.g; floatBuffer[bufferIndex * 3 + 2] += intensity * light.color.b; + } // First row to not add a contribution since adding began + // This means the entire light has been added and we can exit early + + + if (!encounteredInThisRow && begunAddingContributions) { + return floatBuffer; } } return floatBuffer; } + function findThreshold(softness) { + var step = Math.PI / 128; + var maxSteps = 2.0 * Math.PI / step; + + for (var i = 0; i < maxSteps; i++) { + var angle = i * step; + var falloff = getFalloffAtAngle(angle, softness); + + if (falloff <= 0.0001) { + return angle; + } + } + } + + function getIntensityFromAngleDifferentialThresholded(originCoords, currentCoords, softness, threshold) { + var deltaPhi = getAngleDelta(originCoords.phi, currentCoords.phi); + var deltaTheta = getAngleDelta(originCoords.theta, currentCoords.theta); + + if (deltaTheta > threshold && deltaPhi > threshold) { + return 0; + } + + var angle = angleBetweenSphericals(originCoords, currentCoords); + return getFalloffAtAngle(angle, softness); + } + function getIntensityFromAngleDifferential(originCoords, currentCoords, softness) { var angle = angleBetweenSphericals(originCoords, currentCoords); - var falloffCoeficient = getFalloffAtAngle(angle, softness); - return falloffCoeficient; + return getFalloffAtAngle(angle, softness); + } + + function getAngleDelta(angleA, angleB) { + var diff = Math.abs(angleA - angleB) % (2 * Math.PI); + return diff > Math.PI ? 2 * Math.PI - diff : diff; } - function angleBetweenSphericals(originCoords, currentCoords) { + var angleBetweenSphericals = function () { var originVector = new THREE$1.Vector3(); - originVector.setFromSpherical(originCoords); var currentVector = new THREE$1.Vector3(); - currentVector.setFromSpherical(currentCoords); - return originVector.angleTo(currentVector); - } + return function (originCoords, currentCoords) { + originVector.setFromSpherical(originCoords); + currentVector.setFromSpherical(currentCoords); + return originVector.angleTo(currentVector); + }; + }(); // TODO: possibly clean this up and optimize it + // + // This function was arrived at through experimentation, it provides good + // looking results with percieved softness that scale relatively linearly with + // the softness value in the 0 - 1 range + // + // For now it doesn't incur too much of a performance penalty because for most of our use cases (lights without too much softness) + // the threshold cutoff in getIntensityFromAngleDifferential stops us from running it too many times + function getFalloffAtAngle(angle, softness) { - var softnessCoeficient = Math.pow(2, 14.5 * Math.max(0.001, 1.0 - clamp(softness, 0.0, 1.0))); - var falloff = Math.pow(softnessCoeficient, 1.1) * Math.pow(8, softnessCoeficient * -1 * Math.pow(angle, 1.8)); + var softnessCoefficient = Math.pow(2, 14.5 * Math.max(0.001, 1.0 - clamp(softness, 0.0, 1.0))); + var falloff = Math.pow(softnessCoefficient, 1.1) * Math.pow(8, -softnessCoefficient * Math.pow(angle, 1.8)); return falloff; } - function equirectangularToSpherical(x, y, width, height) { - var TWOPI = 2.0 * Math.PI; - var theta = TWOPI * x / width; - var phi = Math.PI * y / height; - var sphericalCoords = new THREE$1.Spherical(1.0, phi, theta); - return sphericalCoords; + function equirectangularToSpherical(x, y, width, height, target) { + target.phi = Math.PI * y / height; + target.theta = 2.0 * Math.PI * x / width; + return target; } // retrieve textures used by meshes, grouping textures from meshes shared by *the same* mesh property @@ -1384,33 +1488,33 @@ } function makeTexture(gl, params) { - var _params$wrapS = params.wrapS, - wrapS = _params$wrapS === void 0 ? gl.REPEAT : _params$wrapS, - _params$wrapT = params.wrapT, - wrapT = _params$wrapT === void 0 ? gl.REPEAT : _params$wrapT, - _params$minFilter = params.minFilter, - minFilter = _params$minFilter === void 0 ? gl.LINEAR : _params$minFilter, - _params$magFilter = params.magFilter, - magFilter = _params$magFilter === void 0 ? gl.LINEAR : _params$magFilter, - _params$gammaCorrecti = params.gammaCorrection, - gammaCorrection = _params$gammaCorrecti === void 0 ? false : _params$gammaCorrecti, - _params$width = params.width, + var _params$width = params.width, width = _params$width === void 0 ? null : _params$width, _params$height = params.height, height = _params$height === void 0 ? null : _params$height, + _params$data = params.data, + data = _params$data === void 0 ? null : _params$data, _params$channels = params.channels, channels = _params$channels === void 0 ? null : _params$channels, _params$storage = params.storage, storage = _params$storage === void 0 ? null : _params$storage, - _params$data = params.data, - data = _params$data === void 0 ? null : _params$data, _params$flipY = params.flipY, - flipY = _params$flipY === void 0 ? false : _params$flipY; + flipY = _params$flipY === void 0 ? false : _params$flipY, + _params$gammaCorrecti = params.gammaCorrection, + gammaCorrection = _params$gammaCorrecti === void 0 ? false : _params$gammaCorrecti, + _params$wrapS = params.wrapS, + wrapS = _params$wrapS === void 0 ? gl.REPEAT : _params$wrapS, + _params$wrapT = params.wrapT, + wrapT = _params$wrapT === void 0 ? gl.REPEAT : _params$wrapT, + _params$minFilter = params.minFilter, + minFilter = _params$minFilter === void 0 ? gl.LINEAR : _params$minFilter, + _params$magFilter = params.magFilter, + magFilter = _params$magFilter === void 0 ? gl.LINEAR : _params$magFilter; width = width || data.width || 0; height = height || data.height || 0; var texture = gl.createTexture(); var target; - var dataArray; // if data is a JS array but not a TypedArray, assume data is an array of TypedArrays and create a GL Array Texture + var dataArray; // if data is a JS array but not a TypedArray, assume data is an array of images and create a GL Array Texture if (Array.isArray(data)) { dataArray = data; @@ -1445,7 +1549,7 @@ type = gl.FLOAT; internalFormat = [gl.R32F, gl.RG32F, gl.RGB32F, gl.RGBA32F][channels - 1]; } else { - console.error('Texture of unknown type:', data); + console.error('Texture of unknown type:', storage || data); } if (dataArray) { @@ -1976,7 +2080,7 @@ directionalLights.push(child); } - if (child instanceof THREE$1.EnvironmentLight) { + if (child instanceof EnvironmentLight) { if (environmentLights.length > 1) { console.warn(environmentLights, 'only one environment light can be used per scene'); } else if (isHDRTexture(child)) { @@ -2117,18 +2221,18 @@ return texture.map && texture.map.image && (texture.map.encoding === THREE$1.RGBEEncoding || texture.map.encoding === THREE$1.LinearEncoding); } - function fragString$1 (params) { - return "#version 300 es\n\nprecision mediump float;\nprecision mediump int;\n\nin vec2 vCoord;\n\nout vec4 fragColor;\n\nuniform sampler2D image;\n\n".concat(textureLinear(), "\n\n// Tonemapping functions from THREE.js\n\nvec3 linear(vec3 color) {\n return color;\n}\n// https://www.cs.utah.edu/~reinhard/cdrom/\nvec3 reinhard(vec3 color) {\n return clamp(color / (vec3(1.0) + color), vec3(0.0), vec3(1.0));\n}\n// http://filmicworlds.com/blog/filmic-tonemapping-operators/\n#define uncharted2Helper(x) max(((x * (0.15 * x + 0.10 * 0.50) + 0.20 * 0.02) / (x * (0.15 * x + 0.50) + 0.20 * 0.30)) - 0.02 / 0.30, vec3(0.0))\nconst vec3 uncharted2WhitePoint = 1.0 / uncharted2Helper(vec3(").concat(params.whitePoint, "));\nvec3 uncharted2( vec3 color ) {\n // John Hable's filmic operator from Uncharted 2 video game\n return clamp(uncharted2Helper(color) * uncharted2WhitePoint, vec3(0.0), vec3(1.0));\n}\n// http://filmicworlds.com/blog/filmic-tonemapping-operators/\nvec3 cineon( vec3 color ) {\n // optimized filmic operator by Jim Hejl and Richard Burgess-Dawson\n color = max(vec3( 0.0 ), color - 0.004);\n return pow((color * (6.2 * color + 0.5)) / (color * (6.2 * color + 1.7) + 0.06), vec3(2.2));\n}\n// https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/\nvec3 acesFilmic( vec3 color ) {\n return clamp((color * (2.51 * color + 0.03)) / (color * (2.43 * color + 0.59) + 0.14), vec3(0.0), vec3(1.0));\n}\n\nvoid main() {\n vec4 tex = textureLinear(image, vCoord);\n\n // alpha channel stores the number of samples progressively rendered\n // divide the sum of light by alpha to obtain average contribution of light\n\n // in addition, alpha contains a scale factor for the shadow catcher material\n // dividing by alpha normalizes the brightness of the shadow catcher to match the background envmap.\n vec3 light = tex.rgb / tex.a;\n\n light *= ").concat(params.exposure, "; // exposure\n\n light = ").concat(params.toneMapping, "(light); // tone mapping\n\n light = pow(light, vec3(1.0 / 2.2)); // gamma correction\n\n fragColor = vec4(light, 1.0);\n}\n\n"); + function fragString$1 (defines) { + return "#version 300 es\n\nprecision mediump float;\nprecision mediump int;\n\nin vec2 vCoord;\n\nout vec4 fragColor;\n\nuniform sampler2D image;\n\n".concat(textureLinear(), "\n\n// Tonemapping functions from THREE.js\n\nvec3 linear(vec3 color) {\n return color;\n}\n// https://www.cs.utah.edu/~reinhard/cdrom/\nvec3 reinhard(vec3 color) {\n return clamp(color / (vec3(1.0) + color), vec3(0.0), vec3(1.0));\n}\n// http://filmicworlds.com/blog/filmic-tonemapping-operators/\n#define uncharted2Helper(x) max(((x * (0.15 * x + 0.10 * 0.50) + 0.20 * 0.02) / (x * (0.15 * x + 0.50) + 0.20 * 0.30)) - 0.02 / 0.30, vec3(0.0))\nconst vec3 uncharted2WhitePoint = 1.0 / uncharted2Helper(vec3(").concat(defines.whitePoint, "));\nvec3 uncharted2( vec3 color ) {\n // John Hable's filmic operator from Uncharted 2 video game\n return clamp(uncharted2Helper(color) * uncharted2WhitePoint, vec3(0.0), vec3(1.0));\n}\n// http://filmicworlds.com/blog/filmic-tonemapping-operators/\nvec3 cineon( vec3 color ) {\n // optimized filmic operator by Jim Hejl and Richard Burgess-Dawson\n color = max(vec3( 0.0 ), color - 0.004);\n return pow((color * (6.2 * color + 0.5)) / (color * (6.2 * color + 1.7) + 0.06), vec3(2.2));\n}\n// https://knarkowicz.wordpress.com/2016/01/06/aces-filmic-tone-mapping-curve/\nvec3 acesFilmic( vec3 color ) {\n return clamp((color * (2.51 * color + 0.03)) / (color * (2.43 * color + 0.59) + 0.14), vec3(0.0), vec3(1.0));\n}\n\nvoid main() {\n vec4 tex = textureLinear(image, vCoord);\n\n // alpha channel stores the number of samples progressively rendered\n // divide the sum of light by alpha to obtain average contribution of light\n\n // in addition, alpha contains a scale factor for the shadow catcher material\n // dividing by alpha normalizes the brightness of the shadow catcher to match the background envmap.\n vec3 light = tex.rgb / tex.a;\n\n light *= ").concat(defines.exposure, "; // exposure\n\n light = ").concat(defines.toneMapping, "(light); // tone mapping\n\n light = pow(light, vec3(1.0 / 2.2)); // gamma correction\n\n fragColor = vec4(light, 1.0);\n}\n\n"); } var _toneMapFunctions; var toneMapFunctions = (_toneMapFunctions = {}, _defineProperty(_toneMapFunctions, THREE$1.LinearToneMapping, 'linear'), _defineProperty(_toneMapFunctions, THREE$1.ReinhardToneMapping, 'reinhard'), _defineProperty(_toneMapFunctions, THREE$1.Uncharted2ToneMapping, 'uncharted2'), _defineProperty(_toneMapFunctions, THREE$1.CineonToneMapping, 'cineon'), _defineProperty(_toneMapFunctions, THREE$1.ACESFilmicToneMapping, 'acesFilmic'), _toneMapFunctions); - function makeToneMapShader(_ref) { - var gl = _ref.gl, - optionalExtensions = _ref.optionalExtensions, - fullscreenQuad = _ref.fullscreenQuad, - textureAllocator = _ref.textureAllocator, - toneMappingParams = _ref.toneMappingParams; + function makeToneMapShader(params) { + var fullscreenQuad = params.fullscreenQuad, + gl = params.gl, + optionalExtensions = params.optionalExtensions, + textureAllocator = params.textureAllocator, + toneMappingParams = params.toneMappingParams; var OES_texture_float_linear = optionalExtensions.OES_texture_float_linear; var toneMapping = toneMappingParams.toneMapping, whitePoint = toneMappingParams.whitePoint, @@ -2142,12 +2246,12 @@ })); var program = createProgram(gl, fullscreenQuad.vertexShader, fragmentShader); var uniforms = getUniforms(gl, program); - var bindFramebuffer = textureAllocator.reserveSlot(); + var image = textureAllocator.reserveSlot(); - function draw(_ref2) { - var texture = _ref2.texture; + function draw(_ref) { + var texture = _ref.texture; gl.useProgram(program); - bindFramebuffer(uniforms.image, texture); + image.bind(uniforms.image, texture); fullscreenQuad.draw(); } @@ -2156,30 +2260,16 @@ }; } - function makeRenderTargetFloat(gl, linearFiltering) { - return makeRenderTarget(gl, 'float', linearFiltering); - } - function makeRenderTarget(gl, storage, linearFiltering) { + function makeFramebuffer(params) { + var gl = params.gl, + _params$linearFilteri = params.linearFiltering, + linearFiltering = _params$linearFilteri === void 0 ? false : _params$linearFilteri, + renderTarget = params.renderTarget; var framebuffer = gl.createFramebuffer(); var texture; var width = 0; var height = 0; - function setSize(w, h) { - width = Math.floor(w); - height = Math.floor(h); - texture = makeTexture(gl, { - width: width, - height: height, - storage: storage, - minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, - magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, - channels: 4 - }); - gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer); - gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, texture.target, texture.texture, 0); - } - function bind() { gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer); } @@ -2188,6 +2278,20 @@ gl.bindFramebuffer(gl.FRAMEBUFFER, null); } + function setSize(w, h) { + this.bind(); + width = Math.floor(w); + height = Math.floor(h); + + if (Array.isArray(renderTarget)) { + texture = initMultipleTextures(gl, width, height, linearFiltering, renderTarget); + } else { + texture = initSingleTexture(gl, width, height, linearFiltering, renderTarget); + } + + this.unbind(); + } + function copyToScreen() { gl.bindFramebuffer(gl.READ_FRAMEBUFFER, framebuffer); gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, null); @@ -2195,26 +2299,86 @@ } return { - setSize: setSize, bind: bind, - unbind: unbind, copyToScreen: copyToScreen, + get height() { + return height; + }, + + setSize: setSize, + get texture() { return texture; }, + unbind: unbind, + get width() { return width; - }, - - get height() { - return height; } }; } + function initSingleTexture(gl, width, height, linearFiltering, _ref) { + var storage = _ref.storage; + var texture = makeTexture(gl, { + width: width, + height: height, + storage: storage, + minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + channels: 4 + }); + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, texture.target, texture.texture, 0); + return texture; + } + + function initMultipleTextures(gl, width, height, linearFiltering, renderTargets) { + var texture = {}; + var drawBuffers = []; + var _iteratorNormalCompletion = true; + var _didIteratorError = false; + var _iteratorError = undefined; + + try { + for (var _iterator = renderTargets.targets[Symbol.iterator](), _step; !(_iteratorNormalCompletion = (_step = _iterator.next()).done); _iteratorNormalCompletion = true) { + var _step$value = _step.value, + name = _step$value.name, + storage = _step$value.storage, + index = _step$value.index; + var t = makeTexture(gl, { + width: width, + height: height, + storage: storage, + minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + channels: 4 + }); + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + index, t.target, t.texture, 0); + texture[name] = t; + drawBuffers.push(gl.COLOR_ATTACHMENT0 + index); + } + } catch (err) { + _didIteratorError = true; + _iteratorError = err; + } finally { + try { + if (!_iteratorNormalCompletion && _iterator["return"] != null) { + _iterator["return"](); + } + } finally { + if (_didIteratorError) { + throw _iteratorError; + } + } + } + + gl.drawBuffers(drawBuffers); + return texture; + } + // Sampling the scene with the RayTracingRenderer can be very slow (<1 fps). // This overworks the GPU and tends to lock up the OS, making it unresponsive. // To fix this, we can split the screen into smaller tiles, and sample the scene one tile at a time @@ -2373,8 +2537,10 @@ function reserveSlot() { var unit = nextUnit++; - return function (uniform, textureObj) { - bindGl(uniform, textureObj, unit); + return { + bind: function bind(uniform, textureObj) { + bindGl(uniform, textureObj, unit); + } }; } @@ -2418,11 +2584,22 @@ ready = true; }; - var useLinearFiltering = optionalExtensions.OES_texture_float_linear; - var hdrBuffer = makeRenderTargetFloat(gl); // full resolution buffer representing the rendered scene with HDR lighting + var useLinearFiltering = optionalExtensions.OES_texture_float_linear; // full resolution buffer representing the rendered scene with HDR lighting + + var hdrBuffer = makeFramebuffer({ + gl: gl, + renderTarget: { + storage: 'float' + } + }); // lower resolution buffer used for the first frame - var hdrPreviewBuffer = makeRenderTargetFloat(gl, useLinearFiltering); // lower resolution buffer used for the first frame - // used to sample only a portion of the scene to the HDR Buffer to prevent the GPU from locking up from excessive computation + var hdrPreviewBuffer = makeFramebuffer({ + gl: gl, + renderTarget: { + storage: 'float' + }, + useLinearFiltering: useLinearFiltering + }); // used to sample only a portion of the scene to the HDR Buffer to prevent the GPU from locking up from excessive computation var tileRender = makeTileRender(gl); var lastCamera = new LensCamera(); // how many samples to render with uniform noise before switching to stratified noise @@ -2812,9 +2989,8 @@ return true; }; - /* global THREE */ - - if (THREE) { + if (window.THREE) { + /* global THREE */ THREE.LensCamera = LensCamera; THREE.SoftDirectionalLight = SoftDirectionalLight; THREE.EnvironmentLight = EnvironmentLight; diff --git a/build/RayTracingRenderer.js b/build/RayTracingRenderer.js index bf725e7..3bef156 100644 --- a/build/RayTracingRenderer.js +++ b/build/RayTracingRenderer.js @@ -27,9 +27,9 @@ } class SoftDirectionalLight extends THREE$1.DirectionalLight { - constructor(...args) { - super(...args); - this.softness = 0.0; + constructor(color, intensity, softness = 0) { + super(color, intensity); + this.softness = softness; } copy(source) { @@ -218,7 +218,7 @@ } } - function vertString(params) { + function vertString() { return `#version 300 es layout(location = 0) in vec2 position; @@ -257,7 +257,7 @@ void main() { // Manually performs linear filtering if the extension OES_texture_float_linear is not supported - function textureLinear(params) { + function textureLinear(defines) { return ` vec4 textureLinear(sampler2D map, vec2 uv) { @@ -282,7 +282,7 @@ void main() { `; } - function intersect(params) { + function intersect(defines) { return ` uniform highp isampler2D indices; @@ -300,7 +300,7 @@ uniform Materials { #endif #if defined(NUM_DIFFUSE_MAPS) || defined(NUM_NORMAL_MAPS) - vec4 diffuseNormalMapSize[${Math.max(params.NUM_DIFFUSE_MAPS, params.NUM_NORMAL_MAPS)}]; + vec4 diffuseNormalMapSize[${Math.max(defines.NUM_DIFFUSE_MAPS, defines.NUM_NORMAL_MAPS)}]; #endif #if defined(NUM_PBR_MAPS) @@ -647,7 +647,7 @@ bool intersectSceneShadow(inout Ray ray) { `; } - function random(params) { + function random(defines) { return ` // Noise texture used to generate a different random number for each pixel. @@ -712,7 +712,7 @@ vec2 randomSampleVec2() { // Sample the environment map using a cumulative distribution function as described in // http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Sampling_Light_Sources.html#InfiniteAreaLights - function envmap(params) { + function envmap(defines) { return ` uniform sampler2D envmap; @@ -819,7 +819,7 @@ vec3 sampleEnvmapFromDirection(vec3 d) { `; } - function bsdf(params) { + function bsdf(defines) { return ` // Computes the exact value of the Fresnel factor @@ -916,7 +916,7 @@ vec3 materialBrdf(SurfaceInteraction si, vec3 viewDir, vec3 lightDir, float cosT `; } - function sample(params) { + function sample(defines) { return ` // https://graphics.pixar.com/library/OrthonormalB/paper.pdf @@ -978,7 +978,7 @@ float powerHeuristic(float f, float g) { // Estimate the direct lighting integral using multiple importance sampling // http://www.pbr-book.org/3ed-2018/Light_Transport_I_Surface_Reflection/Direct_Lighting.html#EstimatingtheDirectLightingIntegral - function sampleMaterial(params) { + function sampleMaterial(defines) { return ` vec3 importanceSampleLight(SurfaceInteraction si, vec3 viewDir, bool lastBounce, vec2 random) { @@ -1099,7 +1099,7 @@ vec3 sampleMaterial(SurfaceInteraction si, int bounce, inout Ray ray, inout vec3 `; } - function sampleShadowCatcher (params) { + function sampleShadowCatcher (defines) { return ` #ifdef USE_SHADOW_CATCHER @@ -1232,7 +1232,7 @@ vec3 sampleShadowCatcher(SurfaceInteraction si, int bounce, inout Ray ray, inout `; } - function sampleGlass (params) { + function sampleGlass (defines) { return ` #ifdef USE_GLASS @@ -1297,13 +1297,13 @@ vec3 sampleGlassSpecular(SurfaceInteraction si, int bounce, inout Ray ray, inout return defines; } - function fragString(params) { + function fragString(defines) { return `#version 300 es precision mediump float; precision mediump int; -${addDefines(params)} +${addDefines(defines)} #define PI 3.14159265359 #define TWOPI 6.28318530718 @@ -1388,7 +1388,7 @@ ivec4 fetchData(isampler2D s, int i, int columnsLog2) { } ${textureLinear()} -${intersect(params)} +${intersect(defines)} ${random()} ${envmap()} ${bsdf()} @@ -1462,9 +1462,9 @@ vec4 integrator(inout Ray ray) { // Manually unroll for loop. // Some hardware fails to interate over a GLSL loop, so we provide this workaround - ${unrollLoop('i', 1, params.BOUNCES + 1, 1, ` + ${unrollLoop('i', 1, defines.BOUNCES + 1, 1, ` // equivelant to - // for (int i = 1; i < params.bounces + 1, i += 1) + // for (int i = 1; i < defines.bounces + 1, i += 1) bounce(path, i); `)} @@ -1522,25 +1522,6 @@ void main() { `; } - function addFlatGeometryIndices(geometry) { - const position = geometry.getAttribute('position'); - - if (!position) { - console.warn('No position attribute'); - return; - } - - const index = new Uint32Array(position.count); - - for (let i = 0; i < index.length; i++) { - index[i] = i; - } - - geometry.setIndex(new THREE$1.BufferAttribute(index, 1, false)); - - return geometry; - } - function mergeMeshesToGeometry(meshes) { let vertexCount = 0; @@ -1550,7 +1531,7 @@ void main() { const materialIndexMap = new Map(); for (const mesh of meshes) { - const geometry = mesh.geometry.clone(); + const geometry = cloneBufferGeometry(mesh.geometry, ['position', 'normal', 'uv']); const index = geometry.getIndex(); if (!index) { @@ -1561,6 +1542,8 @@ void main() { if (!geometry.getAttribute('normal')) { geometry.computeVertexNormals(); + } else { + geometry.normalizeNormals(); } vertexCount += geometry.getAttribute('position').count; @@ -1588,7 +1571,6 @@ void main() { }; } - function mergeGeometry(geometryAndMaterialIndex, vertexCount, indexCount) { const position = new THREE$1.BufferAttribute(new Float32Array(3 * vertexCount), 3, false); const normal = new THREE$1.BufferAttribute(new Float32Array(3 * vertexCount), 3, false); @@ -1603,29 +1585,69 @@ void main() { bg.addAttribute('uv', uv); bg.setIndex(index); - let vertexIndex = 0; - let indexIndex = 0; + let currentVertex = 0; + let currentIndex = 0; for (const { geometry, materialIndex } of geometryAndMaterialIndex) { - bg.merge(geometry, vertexIndex); + const vertexCount = geometry.getAttribute('position').count; + bg.merge(geometry, currentVertex); const meshIndex = geometry.getIndex(); - for (let k = 0; k < meshIndex.count; k++) { - index.setX(indexIndex + k, vertexIndex + meshIndex.getX(k)); + for (let i = 0; i < meshIndex.count; i++) { + index.setX(currentIndex + i, currentVertex + meshIndex.getX(i)); } const triangleCount = meshIndex.count / 3; - for (let k = 0; k < triangleCount; k++) { + for (let i = 0; i < triangleCount; i++) { materialIndices.push(materialIndex); } - vertexIndex += geometry.getAttribute('position').count; - indexIndex += meshIndex.count; + currentVertex += vertexCount; + currentIndex += meshIndex.count; } return { geometry: bg, materialIndices }; } + // Similar to buffergeometry.clone(), except we only copy + // specific attributes instead of everything + function cloneBufferGeometry(bufferGeometry, attributes) { + const newGeometry = new THREE$1.BufferGeometry(); + + for (const name of attributes) { + const attrib = bufferGeometry.getAttribute(name); + if (attrib) { + newGeometry.addAttribute(name, attrib.clone()); + } + } + + const index = bufferGeometry.getIndex(); + if (index) { + newGeometry.setIndex(index); + } + + return newGeometry; + } + + function addFlatGeometryIndices(geometry) { + const position = geometry.getAttribute('position'); + + if (!position) { + console.warn('No position attribute'); + return; + } + + const index = new Uint32Array(position.count); + + for (let i = 0; i < index.length; i++) { + index[i] = i; + } + + geometry.setIndex(new THREE$1.BufferAttribute(index, 1, false)); + + return geometry; + } + // Reorders the elements in the range [first, last) in such a way that // all elements for which the comparator c returns true // precede the elements for which comparator c returns false. @@ -1998,19 +2020,27 @@ void main() { // Convert image data from the RGBE format to a 32-bit floating point format // See https://www.cg.tuwien.ac.at/research/theses/matkovic/node84.html for a description of the RGBE format - function rgbeToFloat(buffer) { + // Optional multiplier argument for performance optimization + function rgbeToFloat(buffer, intensity = 1) { const texels = buffer.length / 4; const floatBuffer = new Float32Array(texels * 3); + const expTable = []; + for (let i = 0; i < 255; i++) { + expTable[i] = intensity * Math.pow(2, i - 128) / 255; + } + for (let i = 0; i < texels; i++) { + const r = buffer[4 * i]; const g = buffer[4 * i + 1]; const b = buffer[4 * i + 2]; const a = buffer[4 * i + 3]; - const e = 2 ** (a - 128); - floatBuffer[3 * i] = r * e / 255; - floatBuffer[3 * i + 1] = g * e / 255; - floatBuffer[3 * i + 2] = b * e / 255; + const e = expTable[a]; + + floatBuffer[3 * i] = r * e; + floatBuffer[3 * i + 1] = g * e; + floatBuffer[3 * i + 2] = b * e; } return floatBuffer; @@ -2040,6 +2070,8 @@ void main() { return true; } + // Convert image data from the RGBE format to a 32-bit floating point format + const DEFAULT_MAP_RESOLUTION = { width: 4096, height: 2048, @@ -2055,20 +2087,21 @@ void main() { function initializeEnvMap(environmentLights) { let envImage; + // Initialize map from environment light if present if (environmentLights.length > 0) { // TODO: support multiple environment lights (what if they have different resolutions?) const environmentLight = environmentLights[0]; + envImage = { width: environmentLight.map.image.width, height: environmentLight.map.image.height, data: environmentLight.map.image.data, }; - envImage.data = rgbeToFloat(envImage.data); - envImage.data.forEach((datum, index, arr) => { - arr[index] = datum * environmentLight.intensity; - }); - } else { // initialize blank map + + envImage.data = rgbeToFloat(envImage.data, environmentLight.intensity); + } else { + // initialize blank map envImage = generateBlankMap(DEFAULT_MAP_RESOLUTION.width, DEFAULT_MAP_RESOLUTION.height); } @@ -2078,7 +2111,6 @@ void main() { function generateBlankMap(width, height) { const texels = width * height; const floatBuffer = new Float32Array(texels * 3); - floatBuffer.fill(0.0); return { width: width, @@ -2090,63 +2122,134 @@ void main() { function addDirectionalLightToEnvMap(light, image) { const sphericalCoords = new THREE$1.Spherical(); const lightDirection = light.position.clone().sub(light.target.position); + sphericalCoords.setFromVector3(lightDirection); sphericalCoords.theta = (Math.PI * 3 / 2) - sphericalCoords.theta; sphericalCoords.makeSafe(); + return addLightAtCoordinates(light, image, sphericalCoords); } // Perform modifications on env map to match input scene - function addLightAtCoordinates(light, image, originSphericalCoords) { + function addLightAtCoordinates(light, image, originCoords) { const floatBuffer = image.data; const width = image.width; const height = image.height; + const xTexels = floatBuffer.length / (3 * height); + const yTexels = floatBuffer.length / (3 * width); + + // default softness for standard directional lights is 0.01, i.e. a hard shadow + const softness = light.softness || 0.01; - const xTexels = (floatBuffer.length / (3 * height)); - const yTexels = (floatBuffer.length / (3 * width)); - // default softness for standard directional lights is 0.95 - const softness = ("softness" in light && light.softness !== null) ? light.softness : 0.45; + // angle from center of light at which no more contributions are projected + const threshold = findThreshold(softness); + + // if too few texels are rejected by the threshold then the time to evaluate it is no longer worth it + const useThreshold = threshold < Math.PI / 5; + + // functional trick to keep the conditional check out of the main loop + const intensityFromAngleFunction = useThreshold ? getIntensityFromAngleDifferentialThresholded : getIntensityFromAngleDifferential; + + let begunAddingContributions = false; + let currentCoords = new THREE$1.Spherical(); + + // Iterates over each row from top to bottom for (let i = 0; i < xTexels; i++) { + + let encounteredInThisRow = false; + + // Iterates over each texel in row for (let j = 0; j < yTexels; j++) { const bufferIndex = j * width + i; - const currentSphericalCoords = equirectangularToSpherical(i, j, width, height); - const falloff = getIntensityFromAngleDifferential(originSphericalCoords, currentSphericalCoords, softness); + currentCoords = equirectangularToSpherical(i, j, width, height, currentCoords); + const falloff = intensityFromAngleFunction(originCoords, currentCoords, softness, threshold); + + if(falloff > 0) { + encounteredInThisRow = true; + begunAddingContributions = true; + } + const intensity = light.intensity * falloff; floatBuffer[bufferIndex * 3] += intensity * light.color.r; floatBuffer[bufferIndex * 3 + 1] += intensity * light.color.g; floatBuffer[bufferIndex * 3 + 2] += intensity * light.color.b; } + + // First row to not add a contribution since adding began + // This means the entire light has been added and we can exit early + if(!encounteredInThisRow && begunAddingContributions) { + return floatBuffer; + } } + return floatBuffer; } + function findThreshold(softness) { + const step = Math.PI / 128; + const maxSteps = (2.0 * Math.PI) / step; + + for (let i = 0; i < maxSteps; i++) { + const angle = i * step; + const falloff = getFalloffAtAngle(angle, softness); + if (falloff <= 0.0001) { + return angle; + } + } + } + + function getIntensityFromAngleDifferentialThresholded(originCoords, currentCoords, softness, threshold) { + const deltaPhi = getAngleDelta(originCoords.phi, currentCoords.phi); + const deltaTheta = getAngleDelta(originCoords.theta, currentCoords.theta); + + if(deltaTheta > threshold && deltaPhi > threshold) { + return 0; + } + + const angle = angleBetweenSphericals(originCoords, currentCoords); + return getFalloffAtAngle(angle, softness); + } + function getIntensityFromAngleDifferential(originCoords, currentCoords, softness) { const angle = angleBetweenSphericals(originCoords, currentCoords); - const falloffCoeficient = getFalloffAtAngle(angle, softness); - return falloffCoeficient; + return getFalloffAtAngle(angle, softness); + } + + function getAngleDelta(angleA, angleB) { + const diff = Math.abs(angleA - angleB) % (2 * Math.PI); + return diff > Math.PI ? (2 * Math.PI - diff) : diff; } - function angleBetweenSphericals(originCoords, currentCoords) { + const angleBetweenSphericals = function() { const originVector = new THREE$1.Vector3(); - originVector.setFromSpherical(originCoords); const currentVector = new THREE$1.Vector3(); - currentVector.setFromSpherical(currentCoords); - return originVector.angleTo(currentVector); - } + return (originCoords, currentCoords) => { + originVector.setFromSpherical(originCoords); + currentVector.setFromSpherical(currentCoords); + return originVector.angleTo(currentVector); + }; + }(); + + // TODO: possibly clean this up and optimize it + // + // This function was arrived at through experimentation, it provides good + // looking results with percieved softness that scale relatively linearly with + // the softness value in the 0 - 1 range + // + // For now it doesn't incur too much of a performance penalty because for most of our use cases (lights without too much softness) + // the threshold cutoff in getIntensityFromAngleDifferential stops us from running it too many times function getFalloffAtAngle(angle, softness) { - const softnessCoeficient = Math.pow(2, 14.5 * Math.max(0.001, (1.0 - clamp(softness, 0.0, 1.0)))); - const falloff = Math.pow(softnessCoeficient, 1.1) * Math.pow(8, softnessCoeficient * -1 * (Math.pow(angle, 1.8))); + const softnessCoefficient = Math.pow(2, 14.5 * Math.max(0.001, 1.0 - clamp(softness, 0.0, 1.0))); + const falloff = Math.pow(softnessCoefficient, 1.1) * Math.pow(8, -softnessCoefficient * Math.pow(angle, 1.8)); return falloff; } - function equirectangularToSpherical(x, y, width, height) { - const TWOPI = 2.0 * Math.PI; - const theta = (TWOPI * x) / width; - const phi = (Math.PI * y) / height; - const sphericalCoords = new THREE$1.Spherical(1.0, phi, theta); - return sphericalCoords; + function equirectangularToSpherical(x, y, width, height, target) { + target.phi = (Math.PI * y) / height; + target.theta = (2.0 * Math.PI * x) / width; + return target; } // retrieve textures used by meshes, grouping textures from meshes shared by *the same* mesh property @@ -2206,17 +2309,29 @@ void main() { function makeTexture(gl, params) { let { - wrapS = gl.REPEAT, - wrapT = gl.REPEAT, - minFilter = gl.LINEAR, - magFilter = gl.LINEAR, - gammaCorrection = false, width = null, height = null, + + // A single HTMLImageElement, ImageData, or TypedArray, + // Or an array of any of these objects. In this case an Array Texture will be created + data = null, + + // Number of channels, [1-4]. If left blank, the the function will decide the number of channels automatically from the data channels = null, + + // Either 'byte' or 'float' + // If left empty, the function will decide the format automatically from the data storage = null, - data = null, - flipY = false + + // Reverse the texture across the y-axis. + flipY = false, + + // sampling properties + gammaCorrection = false, + wrapS = gl.REPEAT, + wrapT = gl.REPEAT, + minFilter = gl.LINEAR, + magFilter = gl.LINEAR, } = params; width = width || data.width || 0; @@ -2227,7 +2342,7 @@ void main() { let target; let dataArray; - // if data is a JS array but not a TypedArray, assume data is an array of TypedArrays and create a GL Array Texture + // if data is a JS array but not a TypedArray, assume data is an array of images and create a GL Array Texture if (Array.isArray(data)) { dataArray = data; data = dataArray[0]; @@ -2287,7 +2402,7 @@ void main() { gl.RGBA32F ][channels - 1]; } else { - console.error('Texture of unknown type:', data); + console.error('Texture of unknown type:', storage || data); } if (dataArray) { @@ -2740,7 +2855,7 @@ void main() { if (child instanceof THREE$1.DirectionalLight) { directionalLights.push(child); } - if (child instanceof THREE$1.EnvironmentLight) { + if (child instanceof EnvironmentLight) { if (environmentLights.length > 1) { console.warn(environmentLights, 'only one environment light can be used per scene'); } @@ -2834,7 +2949,7 @@ void main() { && (texture.map.encoding === THREE$1.RGBEEncoding || texture.map.encoding === THREE$1.LinearEncoding); } - function fragString$1(params) { + function fragString$1(defines) { return `#version 300 es precision mediump float; @@ -2859,7 +2974,7 @@ vec3 reinhard(vec3 color) { } // http://filmicworlds.com/blog/filmic-tonemapping-operators/ #define uncharted2Helper(x) max(((x * (0.15 * x + 0.10 * 0.50) + 0.20 * 0.02) / (x * (0.15 * x + 0.50) + 0.20 * 0.30)) - 0.02 / 0.30, vec3(0.0)) -const vec3 uncharted2WhitePoint = 1.0 / uncharted2Helper(vec3(${params.whitePoint})); +const vec3 uncharted2WhitePoint = 1.0 / uncharted2Helper(vec3(${defines.whitePoint})); vec3 uncharted2( vec3 color ) { // John Hable's filmic operator from Uncharted 2 video game return clamp(uncharted2Helper(color) * uncharted2WhitePoint, vec3(0.0), vec3(1.0)); @@ -2885,9 +3000,9 @@ void main() { // dividing by alpha normalizes the brightness of the shadow catcher to match the background envmap. vec3 light = tex.rgb / tex.a; - light *= ${params.exposure}; // exposure + light *= ${defines.exposure}; // exposure - light = ${params.toneMapping}(light); // tone mapping + light = ${defines.toneMapping}(light); // tone mapping light = pow(light, vec3(1.0 / 2.2)); // gamma correction @@ -2905,13 +3020,14 @@ void main() { [THREE$1.ACESFilmicToneMapping]: 'acesFilmic' }; - function makeToneMapShader({ + function makeToneMapShader(params) { + const { + fullscreenQuad, gl, optionalExtensions, - fullscreenQuad, textureAllocator, toneMappingParams - }) { + } = params; const { OES_texture_float_linear } = optionalExtensions; const { toneMapping, whitePoint, exposure } = toneMappingParams; @@ -2925,12 +3041,12 @@ void main() { const program = createProgram(gl, fullscreenQuad.vertexShader, fragmentShader); const uniforms = getUniforms(gl, program); - const bindFramebuffer = textureAllocator.reserveSlot(); + const image = textureAllocator.reserveSlot(); function draw({ texture }) { gl.useProgram(program); - bindFramebuffer(uniforms.image, texture); + image.bind(uniforms.image, texture); fullscreenQuad.draw(); } @@ -2940,31 +3056,22 @@ void main() { }; } - function makeRenderTargetFloat(gl, linearFiltering) { - return makeRenderTarget(gl, 'float', linearFiltering); - } + function makeFramebuffer(params) { + const { + gl, + linearFiltering = false, // linearly filter textures + + // A single render target in the form { storage: 'byte' | 'float' } + // Or multiple render targets passed as a RenderTargets object + renderTarget + } = params; - function makeRenderTarget(gl, storage, linearFiltering) { const framebuffer = gl.createFramebuffer(); let texture; + let width = 0; let height = 0; - function setSize(w, h) { - width = Math.floor(w); - height = Math.floor(h); - texture = makeTexture(gl, { - width, - height, - storage, - minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, - magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, - channels: 4 - }); - gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer); - gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, texture.target, texture.texture, 0); - } - function bind() { gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer); } @@ -2973,6 +3080,21 @@ void main() { gl.bindFramebuffer(gl.FRAMEBUFFER, null); } + function setSize(w, h) { + this.bind(); + + width = Math.floor(w); + height = Math.floor(h); + + if (Array.isArray(renderTarget)) { + texture = initMultipleTextures(gl, width, height, linearFiltering, renderTarget); + } else { + texture = initSingleTexture(gl, width, height, linearFiltering, renderTarget); + } + + this.unbind(); + } + function copyToScreen() { gl.bindFramebuffer(gl.READ_FRAMEBUFFER, framebuffer); gl.bindFramebuffer(gl.DRAW_FRAMEBUFFER, null); @@ -2980,22 +3102,61 @@ void main() { } return { - setSize, bind, - unbind, copyToScreen, + get height() { + return height; + }, + setSize, get texture() { return texture; }, + unbind, get width() { return width; }, - get height() { - return height; - }, }; } + function initSingleTexture(gl, width, height, linearFiltering, { storage }) { + const texture = makeTexture(gl, { + width, + height, + storage, + minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + channels: 4 + }); + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, texture.target, texture.texture, 0); + + return texture; + } + + function initMultipleTextures(gl, width, height, linearFiltering, renderTargets) { + const texture = {}; + const drawBuffers = []; + + for (const { name, storage, index } of renderTargets.targets) { + const t = makeTexture(gl, { + width, + height, + storage, + minFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + magFilter: linearFiltering ? gl.LINEAR : gl.NEAREST, + channels: 4 + }); + + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0 + index, t.target, t.texture, 0); + + texture[name] = t; + drawBuffers.push(gl.COLOR_ATTACHMENT0 + index); + } + + gl.drawBuffers(drawBuffers); + + return texture; + } + // TileRender is based on the concept of a compute shader's work group. // Sampling the scene with the RayTracingRenderer can be very slow (<1 fps). @@ -3167,8 +3328,10 @@ void main() { function reserveSlot() { const unit = nextUnit++; - return (uniform, textureObj) => { - bindGl(uniform, textureObj, unit); + return { + bind(uniform, textureObj) { + bindGl(uniform, textureObj, unit); + } }; } @@ -3206,8 +3369,18 @@ void main() { const useLinearFiltering = optionalExtensions.OES_texture_float_linear; - const hdrBuffer = makeRenderTargetFloat(gl); // full resolution buffer representing the rendered scene with HDR lighting - const hdrPreviewBuffer = makeRenderTargetFloat(gl, useLinearFiltering); // lower resolution buffer used for the first frame + // full resolution buffer representing the rendered scene with HDR lighting + const hdrBuffer = makeFramebuffer({ + gl, + renderTarget: { storage: 'float' } + }); + + // lower resolution buffer used for the first frame + const hdrPreviewBuffer = makeFramebuffer({ + gl, + renderTarget: { storage: 'float' }, + useLinearFiltering + }); // used to sample only a portion of the scene to the HDR Buffer to prevent the GPU from locking up from excessive computation const tileRender = makeTileRender(gl); @@ -3592,8 +3765,8 @@ void main() { return true; }; - /* global THREE */ - if (THREE) { + if (window.THREE) { + /* global THREE */ THREE.LensCamera = LensCamera; THREE.SoftDirectionalLight = SoftDirectionalLight; THREE.EnvironmentLight = EnvironmentLight; diff --git a/package.json b/package.json index 6771ebc..c036114 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "ray-tracing-renderer", - "version": "0.1.3", + "version": "0.1.4", "description": "A [Three.js](https://github.com/mrdoob/three.js/) renderer which utilizes path tracing to render a scene with true photorealism. The renderer supports global illumination, reflections, soft shadows, and realistic environment lighting.", "main": "src/main.js", "scripts": {