diff --git a/example/webgpu_primitives.html b/example/webgpu_primitives.html
new file mode 100644
index 00000000..a6c51007
--- /dev/null
+++ b/example/webgpu_primitives.html
@@ -0,0 +1,46 @@
+
+
+ Basic Primitives Path Tracing Example
+
+
+
+
+
+
+
+
+
+
diff --git a/example/webgpu_primitives.js b/example/webgpu_primitives.js
new file mode 100644
index 00000000..54a8ad70
--- /dev/null
+++ b/example/webgpu_primitives.js
@@ -0,0 +1,145 @@
+import { Scene, SphereGeometry, MeshStandardMaterial, Mesh, BoxGeometry, PerspectiveCamera, ACESFilmicToneMapping, WebGPURenderer } from 'three/webgpu';
+import { WebGPUPathTracer, GradientEquirectTexture } from '../src/index.js';
+import { getScaledSettings } from './utils/getScaledSettings.js';
+import GUI from 'three/examples/jsm/libs/lil-gui.module.min.js';
+import Chart from 'chart.js/auto';
+
+const options = {
+ useMegakernel: false,
+};
+
+// init scene, renderer, camera, controls, etc
+const scene = new Scene();
+const sphereGeom = new SphereGeometry( 0.49, 64, 32 );
+const ball1 = new Mesh(
+ sphereGeom,
+ new MeshStandardMaterial( {
+ color: '#e91e63',
+ roughness: 0.25,
+ metalness: 1,
+ } )
+);
+const ball2 = new Mesh(
+ sphereGeom,
+ new MeshStandardMaterial( {
+ color: '#ff9800',
+ roughness: 0.1,
+ metalness: 1,
+ } )
+);
+const ball3 = new Mesh(
+ sphereGeom,
+ new MeshStandardMaterial( {
+ color: '#2196f3',
+ roughness: 0.2,
+ metalness: 1,
+ } )
+);
+const ground = new Mesh(
+ new BoxGeometry( 3.5, 0.1, 1.5 ),
+ new MeshStandardMaterial( { color: '#f0f0f0' } ),
+);
+
+ball1.position.x = - 1;
+ball3.position.x = 1;
+ground.position.y = - 0.54;
+scene.add( ball1, ball2, ball3, ground );
+
+// set the environment map
+const texture = new GradientEquirectTexture();
+texture.bottomColor.set( 0xffffff );
+texture.bottomColor.set( 0x666666 );
+texture.update();
+scene.environment = texture;
+scene.background = texture;
+
+const camera = new PerspectiveCamera();
+camera.position.set( 0, 1, - 5 );
+camera.lookAt( 0, 0, 0 );
+
+const renderer = new WebGPURenderer( { antialias: true, trackTimestamp: true } );
+renderer.toneMapping = ACESFilmicToneMapping;
+document.body.appendChild( renderer.domElement );
+renderer.setDrawingBufferSize( 1920, 1080, 1 );
+
+const settings = getScaledSettings();
+const pathTracer = new WebGPUPathTracer( renderer );
+pathTracer.renderScale = settings.renderScale;
+pathTracer.tiles.setScalar( settings.tiles );
+pathTracer.setScene( scene, camera );
+pathTracer.useMegakernel( options.useMegakernel );
+
+const gui = new GUI();
+
+gui.add( options, 'useMegakernel' ).onChange( () => {
+
+ pathTracer.useMegakernel( options.useMegakernel );
+
+} );
+
+
+onResize();
+
+animate();
+
+window.addEventListener( 'resize', onResize );
+
+// const samplesEl = document.getElementById( 'samples' );
+const canvasEl = document.getElementById( 'corner-canvas' );
+const timestamps = [];
+const labels = [];
+
+const chart = new Chart( canvasEl, { type: 'line', data: { labels, datasets: [ { animation: false, label: 'Sample time', data: timestamps } ] } } );
+
+async function handleTimestamp() {
+
+ const samples = pathTracer.getSampleCount();
+ const timestamp = await pathTracer.getLatestSampleTimestamp();
+ timestamps.length = samples;
+ timestamps[ samples - 1 ] = timestamp;
+ labels.length = samples;
+ labels[ samples - 1 ] = samples;
+ chart.update();
+
+ // let totalTime = 0;
+ // for ( const t of timestamps ) {
+ //
+ // totalTime += t;
+ //
+ // }
+ //
+ // const avgTime = totalTime / timestamps.length;
+ //
+ // samplesEl.innerText = `Rendering ${samples} samples took ${totalTime.toFixed( 6 )}ms (${avgTime.toFixed( 6 )}ms on average)`;
+
+}
+
+function animate() {
+
+ // if the camera position changes call "ptRenderer.reset()"
+ requestAnimationFrame( animate );
+
+ // update the camera and render one sample
+ pathTracer.renderSample();
+
+ handleTimestamp();
+
+}
+
+function onResize() {
+
+ return;
+ // update rendering resolution
+ const w = window.innerWidth;
+ const h = window.innerHeight;
+
+ renderer.setSize( w, h );
+ renderer.setPixelRatio( window.devicePixelRatio );
+
+ const aspect = w / h;
+ camera.aspect = aspect;
+ camera.updateProjectionMatrix();
+
+ pathTracer.setScene( scene, camera );
+
+}
diff --git a/package-lock.json b/package-lock.json
index aefd2dea..a9f062a9 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -8,6 +8,9 @@
"name": "three-gpu-pathtracer",
"version": "0.0.23",
"license": "MIT",
+ "dependencies": {
+ "chart.js": "^4.5.1"
+ },
"devDependencies": {
"@lookingglass/webxr": "^0.6.0",
"@monogrid/gainmap-js": "^3.0.5",
@@ -25,13 +28,13 @@
"rollup": "^2.70.0",
"simple-git": "^3.10.0",
"three": "^0.181.1",
- "three-mesh-bvh": "^0.7.4",
+ "three-mesh-bvh": "^0.9.2",
"typescript": "^5.9.2",
"vite": "^6.2.2",
"yargs": "^17.5.1"
},
"peerDependencies": {
- "three": ">=0.151.0",
+ "three": ">=0.180.0",
"three-mesh-bvh": ">=0.7.4",
"xatlas-web": "^0.1.0"
}
@@ -631,6 +634,12 @@
"dev": true,
"license": "BSD-3-Clause"
},
+ "node_modules/@kurkle/color": {
+ "version": "0.3.4",
+ "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz",
+ "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w==",
+ "license": "MIT"
+ },
"node_modules/@kwsites/file-exists": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@kwsites/file-exists/-/file-exists-1.1.1.tgz",
@@ -1614,6 +1623,18 @@
"integrity": "sha512-9r6MNQrbg+cFURvEy10wo9Q35PD5GVj2GvXCbUYv8mU0Uf/NbkR7KlzMrjT4Ycd8a2nxApFJXQX2lTOPRFyG2g==",
"dev": true
},
+ "node_modules/chart.js": {
+ "version": "4.5.1",
+ "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz",
+ "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==",
+ "license": "MIT",
+ "dependencies": {
+ "@kurkle/color": "^0.3.0"
+ },
+ "engines": {
+ "pnpm": ">=8"
+ }
+ },
"node_modules/chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
@@ -3657,12 +3678,13 @@
"license": "MIT"
},
"node_modules/three-mesh-bvh": {
- "version": "0.7.4",
- "resolved": "https://registry.npmjs.org/three-mesh-bvh/-/three-mesh-bvh-0.7.4.tgz",
- "integrity": "sha512-flxe0A4uflTPR6elgq/Y8VrLoljDNS899i422SxQcU3EtMj6o8z4kZRyqZqGWzR0qMf1InTZzY1/0xZl/rnvVw==",
+ "version": "0.9.2",
+ "resolved": "https://registry.npmjs.org/three-mesh-bvh/-/three-mesh-bvh-0.9.2.tgz",
+ "integrity": "sha512-W0oUU4AZv0QwisjlkYlLVaYTVxijhMXCztyNvVlDmTK/u0QB16Xbfem5nWkQBsz3oTzztA1B/ouiz4wYCMj78g==",
"dev": true,
+ "license": "MIT",
"peerDependencies": {
- "three": ">= 0.151.0"
+ "three": ">= 0.159.0"
}
},
"node_modules/through": {
@@ -4419,6 +4441,11 @@
"integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==",
"dev": true
},
+ "@kurkle/color": {
+ "version": "0.3.4",
+ "resolved": "https://registry.npmjs.org/@kurkle/color/-/color-0.3.4.tgz",
+ "integrity": "sha512-M5UknZPHRu3DEDWoipU6sE8PdkZ6Z/S+v4dD+Ke8IaNlpdSQah50lz1KtcFBa2vsdOnwbbnxJwVM4wty6udA5w=="
+ },
"@kwsites/file-exists": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/@kwsites/file-exists/-/file-exists-1.1.1.tgz",
@@ -5037,6 +5064,14 @@
"integrity": "sha512-9r6MNQrbg+cFURvEy10wo9Q35PD5GVj2GvXCbUYv8mU0Uf/NbkR7KlzMrjT4Ycd8a2nxApFJXQX2lTOPRFyG2g==",
"dev": true
},
+ "chart.js": {
+ "version": "4.5.1",
+ "resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz",
+ "integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==",
+ "requires": {
+ "@kurkle/color": "^0.3.0"
+ }
+ },
"chownr": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz",
@@ -6431,9 +6466,9 @@
"dev": true
},
"three-mesh-bvh": {
- "version": "0.7.4",
- "resolved": "https://registry.npmjs.org/three-mesh-bvh/-/three-mesh-bvh-0.7.4.tgz",
- "integrity": "sha512-flxe0A4uflTPR6elgq/Y8VrLoljDNS899i422SxQcU3EtMj6o8z4kZRyqZqGWzR0qMf1InTZzY1/0xZl/rnvVw==",
+ "version": "0.9.2",
+ "resolved": "https://registry.npmjs.org/three-mesh-bvh/-/three-mesh-bvh-0.9.2.tgz",
+ "integrity": "sha512-W0oUU4AZv0QwisjlkYlLVaYTVxijhMXCztyNvVlDmTK/u0QB16Xbfem5nWkQBsz3oTzztA1B/ouiz4wYCMj78g==",
"dev": true,
"requires": {}
},
diff --git a/package.json b/package.json
index ee479111..5152d308 100644
--- a/package.json
+++ b/package.json
@@ -36,6 +36,7 @@
"@types/three": "^0.181.0",
"@typescript-eslint/eslint-plugin": "^8.40.0",
"canvas-capture": "^2.0.5",
+ "chart.js": "^4.5.1",
"eslint": "^8.56.0",
"eslint-config-mdcs": "^5.0.0",
"node-fetch": "^3.2.9",
@@ -46,7 +47,7 @@
"rollup": "^2.70.0",
"simple-git": "^3.10.0",
"three": "^0.181.1",
- "three-mesh-bvh": "^0.7.4",
+ "three-mesh-bvh": "^0.9.2",
"typescript": "^5.9.2",
"vite": "^6.2.2",
"yargs": "^17.5.1"
diff --git a/src/index.js b/src/index.js
index a317c960..10ae65ab 100644
--- a/src/index.js
+++ b/src/index.js
@@ -22,3 +22,6 @@ export * from './materials/surface/FogVolumeMaterial.js';
// deprecated
export * from './materials/pathtracing/PhysicalPathTracingMaterial.js';
export * from './core/PathTracingRenderer.js';
+
+// webgpu
+export * from './webgpu/WebGPUPathTracer.js';
diff --git a/src/webgpu/PathTracerCore.js b/src/webgpu/PathTracerCore.js
new file mode 100644
index 00000000..de37bd0c
--- /dev/null
+++ b/src/webgpu/PathTracerCore.js
@@ -0,0 +1,530 @@
+import { IndirectStorageBufferAttribute, StorageBufferAttribute, Matrix4, Vector2, TimestampQuery } from 'three/webgpu';
+import { uniform, storage, globalId } from 'three/tsl';
+import megakernelShader from './nodes/megakernel.wgsl.js';
+import resetResultFn from './nodes/reset.wgsl.js';
+import {
+ generateRays, traceRay, bsdfEval, escapedRay, cleanQueues,
+ writeTraceRayDispatchSize, writeBsdfDispatchSize, writeEscapedRayDispatchSize,
+} from './nodes/wavefront.wgsl.js';
+
+function* renderTask() {
+
+ const tileSize = new Vector2();
+
+ while ( true ) {
+
+ const {
+ megakernel,
+ _renderer,
+ WORKGROUP_SIZE,
+ useMegakernel,
+ } = this;
+
+ this.getTileSize( tileSize );
+
+ _renderer.info.reset();
+
+ if ( useMegakernel ) {
+
+ const dispatchSize = [
+ Math.ceil( tileSize.x / WORKGROUP_SIZE[ 0 ] ),
+ Math.ceil( tileSize.y / WORKGROUP_SIZE[ 1 ] ),
+ 1
+ ];
+
+ _renderer.compute( megakernel, dispatchSize );
+
+ } else {
+
+ const dispatchSize = [
+ Math.ceil( tileSize.x / WORKGROUP_SIZE[ 0 ] ),
+ Math.ceil( tileSize.y / WORKGROUP_SIZE[ 1 ] ),
+ 1,
+ ];
+ // 0. Clean queues
+ _renderer.compute( this.cleanQueuesKernel, 1 );
+ // 0.1 Generate one ray per pixel and write it into ray queue
+ _renderer.compute( this.generateRaysKernel, dispatchSize );
+
+ for ( let i = 0; i < this.bounces; i ++ ) {
+
+ // 1. Trace rays from the ray queue
+ // Traced ray can either hit something - then it goes into the hitResultQueue
+ // If it doesn't hit anything it goes into the escapedRayQueue
+ _renderer.compute( this.writeTraceRayDispatchSizeKernel, 1 );
+ _renderer.compute( this.traceRayKernel, this.traceRayDispatchBuffer );
+
+ // 2. Handle escaped and scattered rays
+ // 2.1 Calcuate dispatch sizes and write it to gpu buffer
+ _renderer.compute( this.writeEscapedRayDispatchSizeKernel, 1 );
+ _renderer.compute( this.writeBsdfDispatchSizeKernel, 1 );
+ // 2.1 Dispatch shaders
+ // When processing escaped rays, calculate new contribution and add that to the result image
+ _renderer.compute( this.escapedRayKernel, this.escapedRayDispatchBuffer );
+ // When processing hit results, sample a new ray according to material's properties
+ _renderer.compute( this.bsdfEvalKernel, this.bsdfDispatchBuffer );
+
+ }
+
+ }
+
+ this.samples += 1;
+
+ const updateTimestamps = async () => {
+
+ await _renderer.resolveTimestampsAsync( TimestampQuery.COMPUTE );
+ const delta = _renderer.info.compute.timestamp;
+
+ return delta;
+
+ };
+
+ this.getLatestSampleTimestamp = updateTimestamps;
+
+
+ yield;
+
+ }
+
+}
+
+export class PathTracerCore {
+
+ get megakernelParams() {
+
+ return this.megakernel.computeNode.parameters;
+
+ }
+
+ get traceRayParams() {
+
+ return this.traceRayKernel.computeNode.parameters;
+
+ }
+
+ get bsdfEvalParams() {
+
+ return this.bsdfEvalKernel.computeNode.parameters;
+
+ }
+
+ get escapedRayParams() {
+
+ return this.escapedRayKernel.computeNode.parameters;
+
+ }
+
+ get generateRaysParams() {
+
+ return this.generateRaysKernel.computeNode.parameters;
+
+ }
+
+ constructor( renderer ) {
+
+ this.camera = null;
+ this._renderer = renderer;
+ this._task = null;
+
+ this.samples = 0;
+ this.bounces = 7;
+
+ this.tiles = new Vector2( 2, 2 );
+ this.tileSize = new Vector2();
+ this.currentTile = 0;
+
+ this.dimensions = new Vector2();
+
+ this.useMegakernel = true;
+
+ this.getLatestSampleTimestamp = async () => {
+
+ return 0;
+
+ };
+
+ this.geometry = {
+ bvh: new StorageBufferAttribute(),
+ index: new StorageBufferAttribute(),
+ position: new StorageBufferAttribute(),
+ normal: new StorageBufferAttribute(),
+
+ materialIndex: new StorageBufferAttribute(),
+ materials: new StorageBufferAttribute(),
+ };
+
+ this.resultBuffer = new StorageBufferAttribute( new Float32Array( 4 ) );
+ this.resultBuffer.name = 'Result Image #0';
+
+ this.sampleCountBuffer = new StorageBufferAttribute( new Uint32Array( 1 ) );
+ this.sampleCountBuffer.name = 'Sample Count';
+
+ // More resolution does not fit into webgpu-defualt 128mb buffer
+ const maxRayCount = 1920 * 1080;
+ const queueSize = /* element storage */ 16 * maxRayCount;
+ this.rayQueue = new StorageBufferAttribute( new Uint32Array( queueSize ) );
+ this.rayQueue.name = 'Ray Queue';
+
+ // [rayQueueSize, hitResultQueueSize, escapedRayQueueSize]
+ this.queueSizes = new StorageBufferAttribute( new Uint32Array( 3 ) );
+ this.queueSizes.name = 'Queue Sizes';
+
+ this.escapedQueue = new StorageBufferAttribute( new Uint32Array( 16 * maxRayCount ) );
+ this.escapedQueue.name = 'Escaped Rays Queue';
+
+ this.hitResultQueue = new StorageBufferAttribute( new Uint32Array( 16 * maxRayCount ) );
+ this.hitResultQueue.name = 'Hit Result Queue';
+
+ this.WORKGROUP_SIZE = [ 8, 8, 1 ];
+ this.bsdfEvalWorkgroupSize = [ 128, 1, 1 ];
+ this.traceRayWorkgroupSize = [ 128, 1, 1 ];
+ this.escapedRayWorkgroupSize = [ 128, 1, 1 ];
+
+ this.createMegakernel();
+ this.createResetKernel();
+
+ const generateRaysParams = {
+
+ cameraToModelMatrix: uniform( new Matrix4() ),
+ inverseProjectionMatrix: uniform( new Matrix4() ),
+ offset: uniform( new Vector2() ),
+ tileSize: uniform( new Vector2() ),
+ dimensions: uniform( this.dimensions ),
+
+ rayQueue: storage( this.rayQueue, 'RayQueueElement' ),
+ rayQueueSize: storage( this.queueSizes, 'uint' ).toAtomic(),
+
+ globalId: globalId,
+
+ };
+
+ this.generateRaysKernel = generateRays( generateRaysParams ).computeKernel( this.WORKGROUP_SIZE );
+
+ this.createTraceRayKernel();
+ this.createEscapedRayKernel();
+ this.createBsdfEvalKernel();
+
+ this.traceRayDispatchBuffer = new IndirectStorageBufferAttribute( new Uint32Array( 3 ) );
+ this.traceRayDispatchBuffer.name = 'Dispatch Buffer for Trace Ray';
+
+ const writeTraceRayDispatchSizeParams = {
+ outputBuffer: storage( this.traceRayDispatchBuffer, 'uint' ),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ workgroupSize: uniform( this.traceRayWorkgroupSize[ 0 ] ),
+ };
+
+ this.writeTraceRayDispatchSizeKernel = writeTraceRayDispatchSize( writeTraceRayDispatchSizeParams ).computeKernel( [ 1, 1, 1 ] );
+ this.escapedRayDispatchBuffer = new IndirectStorageBufferAttribute( new Uint32Array( 3 ) );
+ this.escapedRayDispatchBuffer.name = 'Dispatch Buffer for Escaped Rays';
+
+ const writeEscapedRayDispatchSizeParams = {
+ outputBuffer: storage( this.escapedRayDispatchBuffer, 'uint' ),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ workgroupSize: uniform( this.escapedRayWorkgroupSize[ 0 ] ),
+ };
+
+ this.writeEscapedRayDispatchSizeKernel = writeEscapedRayDispatchSize( writeEscapedRayDispatchSizeParams ).computeKernel( [ 1, 1, 1 ] );
+
+ this.bsdfDispatchBuffer = new IndirectStorageBufferAttribute( new Uint32Array( 3 ) );
+ this.bsdfDispatchBuffer.name = 'Dispatch Buffer for bsdf eval';
+ const writeBsdfDispatchSizeParams = {
+ outputBuffer: storage( this.bsdfDispatchBuffer, 'uint' ),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ workgroupSize: uniform( this.bsdfEvalWorkgroupSize[ 0 ] ),
+ };
+
+ this.writeBsdfDispatchSizeKernel = writeBsdfDispatchSize( writeBsdfDispatchSizeParams ).computeKernel( [ 1, 1, 1 ] );
+
+ const cleanQueuesParams = {
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ };
+
+ this.cleanQueuesKernel = cleanQueues( cleanQueuesParams ).computeKernel( [ 1, 1, 1 ] );
+
+ }
+
+ createMegakernel() {
+
+ const megakernelShaderParams = {
+ resultBuffer: storage( this.resultBuffer, 'vec4' ),
+ offset: uniform( new Vector2() ),
+ tileSize: uniform( new Vector2() ),
+ dimensions: uniform( this.dimensions ),
+ sample_count_buffer: storage( this.sampleCountBuffer, 'u32' ),
+ smoothNormals: uniform( 1 ),
+ seed: uniform( 0 ),
+
+ // transforms
+ inverseProjectionMatrix: uniform( new Matrix4() ),
+ cameraToModelMatrix: uniform( new Matrix4() ),
+
+ // bvh and geometry definition
+ geom_index: storage( this.geometry.index, 'uvec3' ).toReadOnly(),
+ geom_position: storage( this.geometry.position, 'vec3' ).toReadOnly(),
+ geom_normals: storage( this.geometry.normal, 'vec3' ).toReadOnly(),
+ geom_material_index: storage( this.geometry.materialIndex, 'u32' ).toReadOnly(),
+ bvh: storage( this.geometry.bvh, 'BVHNode' ).toReadOnly(),
+
+ materials: storage( this.geometry.materials, 'Material' ).toReadOnly(),
+
+ // compute variables
+ globalId: globalId,
+ };
+
+ this.megakernel = megakernelShader( this.bounces )( megakernelShaderParams ).computeKernel( this.WORKGROUP_SIZE );
+
+ }
+
+ createResetKernel() {
+
+ const resetParams = {
+ resultBuffer: storage( this.resultBuffer, 'vec4f' ),
+ dimensions: uniform( this.dimensions ),
+ sample_count_buffer: storage( this.sampleCountBuffer, 'u32' ),
+
+ globalId: globalId,
+ };
+
+
+ this.resetKernel = resetResultFn( resetParams ).computeKernel( this.WORKGROUP_SIZE );
+
+ }
+
+ createEscapedRayKernel() {
+
+ const escapedRayParams = {
+ resultBuffer: storage( this.resultBuffer, 'vec4' ),
+ inputQueue: storage( this.escapedQueue, 'RayQueueElement' ).toReadOnly(),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ sampleCountBuffer: storage( this.sampleCountBuffer, 'u32' ),
+
+ dimensions: uniform( this.dimensions ),
+ globalId: globalId,
+ };
+
+ this.escapedRayKernel = escapedRay( escapedRayParams ).computeKernel( this.escapedRayWorkgroupSize );
+
+ }
+
+ createTraceRayKernel() {
+
+ const traceRayParams = {
+ inputQueue: storage( this.rayQueue, 'RayQueueElement' ).toReadOnly(),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+ escapedQueue: storage( this.escapedQueue, 'RayQueueElement' ),
+ outputQueue: storage( this.hitResultQueue, 'HitResultQueueElement' ),
+
+ geom_index: storage( this.geometry.index, 'uvec3' ).toReadOnly(),
+ geom_position: storage( this.geometry.position, 'vec3' ).toReadOnly(),
+ geom_normals: storage( this.geometry.normal, 'vec3' ).toReadOnly(),
+ // geom_material_index: storage( this.geometry.materialIndex, 'u32' ).toReadOnly(),
+ bvh: storage( this.geometry.bvh, 'BVHNode' ).toReadOnly(),
+
+ globalId: globalId,
+ };
+
+ this.traceRayKernel = traceRay( traceRayParams ).computeKernel( this.traceRayWorkgroupSize );
+
+ }
+
+ createBsdfEvalKernel() {
+
+ const bsdfEvalParams = {
+ inputQueue: storage( this.hitResultQueue, 'HitResultQueueElement' ).toReadOnly(),
+ outputQueue: storage( this.rayQueue, 'RayQueueElement' ),
+ queueSizes: storage( this.queueSizes, 'uint' ).toAtomic(),
+
+ geom_material_index: storage( this.geometry.materialIndex, 'u32' ).toReadOnly(),
+ materials: storage( this.geometry.materials, 'Material' ).toReadOnly(),
+ seed: uniform( 0 ),
+
+ globalId: globalId,
+ };
+
+ this.bsdfEvalKernel = bsdfEval( bsdfEvalParams ).computeKernel( this.bsdfEvalWorkgroupSize );
+
+ }
+
+ setUseMegakernel( value ) {
+
+ this.useMegakernel = value;
+ this.reset();
+
+ }
+
+ setGeometryData( geometry ) {
+
+ for ( const propName in geometry ) {
+
+ const prop = this.geometry[ propName ];
+ if ( prop === undefined ) {
+
+ console.error( `Invalid property name in geometry data: ${propName}` );
+ continue;
+
+ }
+
+ try {
+
+ this._renderer.destroyAttribute( prop );
+
+ } catch ( e ) {
+
+ console.error( 'Failed to destroy geometry attribute. Pbbly because it did not have a gpu buffer' );
+
+ }
+
+ this.geometry[ propName ] = geometry[ propName ];
+
+ }
+
+ this.createMegakernel();
+ this.createBsdfEvalKernel();
+ this.createTraceRayKernel();
+
+ }
+
+ setCamera( camera ) {
+
+ this.camera = camera;
+
+ }
+
+ setSize( w, h ) {
+
+ w = 1920;
+ h = 1080;
+
+ w = Math.ceil( w );
+ h = Math.ceil( h );
+
+ if ( this.dimensions.x === w && this.dimensions.y === h ) {
+
+ return;
+
+ }
+
+ this.bufferCount = ( this.bufferCount ?? 0 ) + 1;
+ this.dimensions.set( w, h );
+
+ try {
+
+ this._renderer.destroyAttribute( this.resultBuffer );
+ this._renderer.destroyAttribute( this.sampleCountBuffer );
+
+ } catch ( e ) {
+
+ console.log( 'Failed to destroy result buffer. Pbbly there was no gpu buffer for it' );
+
+ }
+
+ this.resultBuffer = new StorageBufferAttribute( new Float32Array( 4 * w * h ) );
+ this.resultBuffer.name = `Result Image #${this.bufferCount}`;
+ this.sampleCountBuffer = new StorageBufferAttribute( new Uint32Array( w * h ) );
+ this.sampleCountBuffer.name = 'Sample Counts';
+
+ this.createResetKernel();
+ this.createEscapedRayKernel();
+ this.createMegakernel();
+
+ this.reset();
+
+ }
+
+ getSize( target ) {
+
+ target.copy( this.dimensions );
+
+ }
+
+ setTiles( tiles ) {
+
+ this.tiles.copy( tiles );
+
+ }
+
+ getTileSize( target ) {
+
+ target.copy( this.dimensions ).divide( this.tiles ).ceil();
+
+ return target;
+
+ }
+
+ dispose() {
+
+ // TODO: dispose of all buffers
+ this._task = null;
+
+ }
+
+ reset() {
+
+ const { _renderer } = this;
+
+ const dispatchSize = [
+ Math.ceil( this.dimensions.x / this.WORKGROUP_SIZE[ 0 ] ),
+ Math.ceil( this.dimensions.y / this.WORKGROUP_SIZE[ 1 ] ),
+ 1
+ ];
+
+ _renderer.compute( this.resetKernel, dispatchSize );
+
+ this.megakernelParams.seed.value = 0;
+ this.bsdfEvalParams.seed.value = 0;
+
+ this.samples = 0;
+ this.currentTile = 0;
+ this._task = null;
+
+ }
+
+ update() {
+
+ if ( ! this.camera ) {
+
+ return;
+
+ }
+
+ const tileSize = this.getTileSize( new Vector2() );
+ const currentTileVec = new Vector2(
+ this.currentTile % this.tiles.x,
+ Math.floor( this.currentTile / this.tiles.x )
+ );
+ const offset = currentTileVec.multiply( tileSize );
+
+ this.megakernelParams.seed.value += 1;
+ this.megakernelParams.offset.value.copy( offset );
+ this.megakernelParams.tileSize.value.copy( tileSize );
+ this.megakernelParams.dimensions.value.copy( this.dimensions );
+ this.megakernelParams.inverseProjectionMatrix.value.copy( this.camera.projectionMatrixInverse );
+ this.megakernelParams.cameraToModelMatrix.value.copy( this.camera.matrixWorld );
+
+ this.bsdfEvalParams.seed.value += 1;
+ this.escapedRayParams.dimensions.value.copy( this.dimensions );
+ this.generateRaysParams.offset.value.copy( offset );
+ this.generateRaysParams.tileSize.value.copy( tileSize );
+ this.generateRaysParams.dimensions.value.copy( this.dimensions );
+ this.generateRaysParams.inverseProjectionMatrix.value.copy( this.camera.projectionMatrixInverse );
+ this.generateRaysParams.cameraToModelMatrix.value.copy( this.camera.matrixWorld );
+
+ if ( ! this._task ) {
+
+ this._task = renderTask.call( this );
+
+ }
+
+ this._task.next();
+
+ this.currentTile = ( this.currentTile + 1 ) % ( this.tiles.x * this.tiles.y );
+
+ }
+
+ getResultBuffer() {
+
+ return this.resultBuffer;
+
+ }
+
+}
diff --git a/src/webgpu/WebGPUPathTracer.js b/src/webgpu/WebGPUPathTracer.js
new file mode 100644
index 00000000..5f4ead58
--- /dev/null
+++ b/src/webgpu/WebGPUPathTracer.js
@@ -0,0 +1,446 @@
+import { Color, StorageBufferAttribute, PerspectiveCamera, Scene, Vector2, Clock, NormalBlending, NoBlending, AdditiveBlending, NodeMaterial } from 'three/webgpu';
+import { storage, uniform, wgslFn, uv, varying, positionGeometry } from 'three/tsl';
+import { PathTracingSceneGenerator } from '../core/PathTracingSceneGenerator.js';
+import { FullScreenQuad } from 'three/examples/jsm/postprocessing/Pass.js';
+import { GradientEquirectTexture } from '../textures/GradientEquirectTexture.js';
+import { getIesTextures, getLights, getTextures } from '../core/utils/sceneUpdateUtils.js';
+import { ClampedInterpolationMaterial } from '../materials/fullscreen/ClampedInterpolationMaterial.js';
+import { CubeToEquirectGenerator } from '../utils/CubeToEquirectGenerator.js';
+import { PathTracerCore } from './PathTracerCore.js';
+
+// function supportsFloatBlending( renderer ) {
+
+// return renderer.extensions.get( 'EXT_float_blend' );
+
+// }
+
+const _resolution = new Vector2();
+export class WebGPUPathTracer {
+
+ // get multipleImportanceSampling() {
+
+ // return Boolean( this._pathTracer.material.defines.FEATURE_MIS );
+
+ // }
+
+ // set multipleImportanceSampling( v ) {
+
+ // this._pathTracer.material.setDefine( 'FEATURE_MIS', v ? 1 : 0 );
+
+ // }
+
+ // get transmissiveBounces() {
+
+ // return this._pathTracer.material.transmissiveBounces;
+
+ // }
+
+ // set transmissiveBounces( v ) {
+
+ // this._pathTracer.material.transmissiveBounces = v;
+
+ // }
+
+ get bounces() {
+
+ return this._pathTracer.material.bounces;
+
+ }
+
+ set bounces( v ) {
+
+ this._pathTracer.material.bounces = v;
+
+ }
+
+ // get filterGlossyFactor() {
+
+ // return this._pathTracer.material.filterGlossyFactor;
+
+ // }
+
+ // set filterGlossyFactor( v ) {
+
+ // this._pathTracer.material.filterGlossyFactor = v;
+
+ // }
+
+ // get samples() {
+
+ // return this._pathTracer.samples;
+
+ // }
+
+ // get target() {
+
+ // return this._pathTracer.target;
+
+ // }
+
+ // get tiles() {
+
+ // return this._pathTracer.tiles;
+
+ // }
+
+ // get stableNoise() {
+
+ // return this._pathTracer.stableNoise;
+
+ // }
+
+ // set stableNoise( v ) {
+
+ // this._pathTracer.stableNoise = v;
+
+ // }
+
+ get isCompiling() {
+
+ return Boolean( this._pathTracer.isCompiling );
+
+ }
+
+ useMegakernel( value ) {
+
+ this._pathTracer.setUseMegakernel( value );
+
+ }
+
+ constructor( renderer ) {
+
+ // members
+ this._renderer = renderer;
+ this._generator = new PathTracingSceneGenerator();
+ this._pathTracer = new PathTracerCore( renderer );
+ this._queueReset = false;
+ this._clock = new Clock();
+ this._compilePromise = null;
+
+ this.tiles = new Vector2();
+
+ // this._lowResPathTracer = new PathTracingRenderer( renderer );
+ // this._lowResPathTracer.tiles.set( 1, 1 );
+ // this._quad = new FullScreenQuad( new ClampedInterpolationMaterial( {
+ // map: null,
+ // transparent: true,
+ // blending: NoBlending,
+
+ // premultipliedAlpha: renderer.getContextAttributes().premultipliedAlpha,
+ // } ) );
+ this._materials = null;
+
+ this._previousEnvironment = null;
+ this._previousBackground = null;
+ this._internalBackground = null;
+
+ // options
+ this.renderDelay = 100;
+ this.minSamples = 5;
+ this.fadeDuration = 500;
+ this.enablePathTracing = true;
+ this.pausePathTracing = false;
+ this.dynamicLowRes = false;
+ this.lowResScale = 0.25;
+ this.renderScale = 1;
+ this.synchronizeRenderSize = true;
+ this.rasterizeScene = true;
+ this.renderToCanvas = true;
+ this.textureSize = new Vector2( 1024, 1024 );
+ this.rasterizeSceneCallback = ( scene, camera ) => {
+
+ this._renderer.render( scene, camera );
+
+ };
+
+ const blitMaterial = new NodeMaterial();
+ const fragmentShaderParams = {
+ resultBuffer: storage( new StorageBufferAttribute(), 'vec4' ),
+ dimensions: uniform( new Vector2() ),
+ uv: varying( uv() ),
+ };
+
+ // TODO: Apply gamma correction?
+ this.blitFragmentShader = wgslFn( /* wgsl */ `
+ fn blit(
+ resultBuffer: ptr, read>,
+ dimensions: vec2u,
+ uv: vec2f,
+ ) -> vec4f {
+ let x = min(u32( uv.x * f32(dimensions.x) ), dimensions.x - 1);
+ let y = min(u32( uv.y * f32(dimensions.y) ), dimensions.y - 1);
+ let offset = x + y * dimensions.x;
+ return resultBuffer[offset];
+ }
+ ` );
+
+ blitMaterial.fragmentNode = this.blitFragmentShader( fragmentShaderParams );
+
+ const vertexShaderParams = {
+ position: positionGeometry,
+ };
+ const fullScreenQuadVertex = wgslFn( /* wgsl */ `
+ fn noop(position: vec4f) -> vec4f {
+ return position;
+ }
+ ` );
+ blitMaterial.vertexNode = fullScreenQuadVertex( vertexShaderParams );
+
+ const blitQuad = new FullScreenQuad( blitMaterial );
+
+ this.renderToCanvasCallback = ( finalBuffer, renderer, quad ) => {
+
+ const blitBuffer = blitQuad.material.fragmentNode.parameters.resultBuffer.value;
+ if ( blitBuffer !== finalBuffer ) {
+
+ const fragmentShaderParams = {
+ resultBuffer: storage( finalBuffer, 'vec4' ),
+ dimensions: uniform( new Vector2() ),
+ uv: varying( uv() ),
+ };
+
+ blitMaterial.fragmentNode = this.blitFragmentShader( fragmentShaderParams );
+
+ }
+
+ const dimensions = blitQuad.material.fragmentNode.parameters.dimensions.value;
+ this._renderer.getSize( dimensions );
+ blitQuad.render( renderer );
+
+ // const currentAutoClear = renderer.autoClear;
+ // renderer.autoClear = false;
+ // quad.render( renderer );
+ // renderer.autoClear = currentAutoClear;
+
+ };
+
+ // initialize the scene so it doesn't fail
+ this.setScene( new Scene(), new PerspectiveCamera() );
+
+ }
+
+ setBVHWorker( worker ) {
+
+ this._generator.setBVHWorker( worker );
+
+ }
+
+ setScene( scene, camera, options = {} ) {
+
+ scene.updateMatrixWorld( true );
+ camera.updateMatrixWorld();
+
+ const generator = this._generator;
+ generator.setObjects( scene );
+
+ if ( this._buildAsync ) {
+
+ return generator.generateAsync( options.onProgress ).then( result => {
+
+ return this._updateFromResults( scene, camera, result );
+
+ } );
+
+ } else {
+
+ const result = generator.generate();
+ return this._updateFromResults( scene, camera, result );
+
+ }
+
+ }
+
+ setSceneAsync( ...args ) {
+
+ this._buildAsync = true;
+ const result = this.setScene( ...args );
+ this._buildAsync = false;
+
+ return result;
+
+ }
+
+ setCamera( camera ) {
+
+ this.camera = camera;
+ this.updateCamera();
+
+ }
+
+ updateCamera() {
+
+ const camera = this.camera;
+ camera.updateMatrixWorld();
+
+ this._pathTracer.setCamera( camera );
+ // this._lowResPathTracer.setCamera( camera );
+ this.reset();
+
+ }
+
+ updateMaterials() {
+
+ }
+
+ updateLights() {
+
+ }
+
+ updateEnvironment() {
+
+ }
+
+ _updateFromResults( scene, camera, results ) {
+
+ const {
+ materials,
+ geometry,
+ bvh,
+ bvhChanged,
+ needsMaterialIndexUpdate,
+ } = results;
+
+ const pathTracer = this._pathTracer;
+
+ const newGeometryData = {};
+
+ if ( bvhChanged ) {
+
+ // dereference a new index attribute if we're using indirect storage
+ const dereferencedIndexAttr = geometry.index.clone();
+ const indirectBuffer = bvh._indirectBuffer;
+ if ( indirectBuffer ) {
+
+ dereferenceIndex( geometry, indirectBuffer, dereferencedIndexAttr );
+
+ }
+
+ const newIndex = new StorageBufferAttribute( dereferencedIndexAttr.array, 3 );
+ newIndex.name = 'Geometry Index';
+ newGeometryData.index = newIndex;
+
+ const newPosition = new StorageBufferAttribute( geometry.attributes.position.array, 3 );
+ newPosition.name = 'Geometry Positions';
+ newGeometryData.position = newPosition;
+
+ const newNormals = new StorageBufferAttribute( geometry.attributes.normal.array, 3 );
+ newNormals.name = 'Geometry Normals';
+ newGeometryData.normal = newNormals;
+
+ const newBvhRoots = new StorageBufferAttribute( new Float32Array( bvh._roots[ 0 ] ), 8 );
+ newBvhRoots.name = 'BVH Roots';
+ newGeometryData.bvh = newBvhRoots;
+
+ }
+
+ if ( needsMaterialIndexUpdate ) {
+
+ const newMaterialIndex = new StorageBufferAttribute( geometry.attributes.materialIndex.array, 1 );
+ newMaterialIndex.name = 'Material Index';
+ newGeometryData.materialIndex = newMaterialIndex;
+
+ }
+
+ const newMaterialsData = new Float32Array( materials.length * 3 );
+ const defaultColor = new Color();
+ for ( let i = 0; i < materials.length; i ++ ) {
+
+ const material = materials[ i ];
+ const color = material.color ?? defaultColor;
+ // Make sure those are in linear-sRGB space
+ newMaterialsData[ 3 * i + 0 ] = color.r;
+ newMaterialsData[ 3 * i + 1 ] = color.g;
+ newMaterialsData[ 3 * i + 2 ] = color.b;
+
+ }
+
+ const newMaterialsBuffer = new StorageBufferAttribute( newMaterialsData, 3 );
+ newMaterialsBuffer.name = 'Material Data';
+ newGeometryData.materials = newMaterialsBuffer;
+
+ pathTracer.setGeometryData( newGeometryData );
+
+ this.setCamera( camera );
+
+ }
+
+ renderSample() {
+
+ if ( ! this._renderer._initialized ) {
+
+ this._renderer.init();
+ return;
+
+ }
+
+ this._updateScale();
+
+ this._pathTracer.update();
+
+ this.renderToCanvasCallback( this._pathTracer.getResultBuffer(), this._renderer );
+
+ }
+
+ reset() {
+
+ }
+
+ dispose() {
+
+ this._pathTracer.dispose();
+
+ }
+
+ _updateScale() {
+
+ // update the path tracer scale if it has changed
+ if ( this.synchronizeRenderSize ) {
+
+ this._renderer.getDrawingBufferSize( _resolution );
+
+ const w = Math.floor( this.renderScale * _resolution.x );
+ const h = Math.floor( this.renderScale * _resolution.y );
+
+ this._pathTracer.getSize( _resolution );
+ if ( _resolution.x !== w || _resolution.y !== h ) {
+
+ this._pathTracer.setSize( w, h );
+
+ }
+
+ }
+
+ }
+
+ getSampleCount() {
+
+ return this._pathTracer.samples;
+
+ }
+
+ async getLatestSampleTimestamp() {
+
+ return await this._pathTracer.getLatestSampleTimestamp();
+
+ }
+
+}
+
+// TODO: Expose in three-mesh-bvh?
+function dereferenceIndex( geometry, indirectBuffer, target ) {
+
+ const unpacked = target.array;
+ const indexArray = geometry.index ? geometry.index.array : null;
+ for ( let i = 0, l = indirectBuffer.length; i < l; i ++ ) {
+
+ const i3 = 3 * i;
+ const v3 = 3 * indirectBuffer[ i ];
+ for ( let c = 0; c < 3; c ++ ) {
+
+ unpacked[ i3 + c ] = indexArray ? indexArray[ v3 + c ] : v3 + c;
+
+ }
+
+ }
+
+}
diff --git a/src/webgpu/nodes/megakernel.wgsl.js b/src/webgpu/nodes/megakernel.wgsl.js
new file mode 100644
index 00000000..59069b65
--- /dev/null
+++ b/src/webgpu/nodes/megakernel.wgsl.js
@@ -0,0 +1,113 @@
+import { wgslFn } from 'three/tsl';
+import { ndcToCameraRay, bvhIntersectFirstHit, constants, getVertexAttribute } from 'three-mesh-bvh/webgpu';
+import { pcgRand3, pcgInit } from './random.wgsl.js';
+import { lambertBsdfFunc } from './sampling.wgsl.js';
+import { materialStruct, surfaceRecordStruct } from './structs.wgsl.js';
+
+export const megakernelShader = ( bounces ) => wgslFn( /* wgsl */`
+
+ fn compute(
+ resultBuffer: ptr, read_write>,
+ offset: vec2u,
+ tileSize: vec2u,
+ dimensions: vec2u,
+ smoothNormals: u32,
+ inverseProjectionMatrix: mat4x4f,
+ cameraToModelMatrix: mat4x4f,
+ seed: u32,
+ sample_count_buffer: ptr, read_write>,
+
+ geom_position: ptr, read>,
+ geom_index: ptr, read>,
+ geom_normals: ptr, read>,
+ geom_material_index: ptr, read>,
+ bvh: ptr, read>,
+
+ materials: ptr, read>,
+
+ globalId: vec3u,
+ ) -> void {
+ if ( globalId.x >= tileSize.x || globalId.y >= tileSize.y ) {
+ return;
+ }
+
+ // to screen coordinates
+ let indexUV = offset + globalId.xy;
+ let uv = vec2f( indexUV ) / vec2f( dimensions );
+ let ndc = uv * 2.0 - vec2f( 1.0 );
+
+ pcgInitialize(indexUV, seed);
+
+ // scene ray
+ // TODO: sample a random ray
+ var ray = ndcToCameraRay( ndc, cameraToModelMatrix * inverseProjectionMatrix );
+
+ const bounces: u32 = ${bounces};
+ var resultColor = vec3f( 0.0 );
+ var throughputColor = vec3f( 1.0 );
+ var sampleCount = 0u;
+ // TODO: fix shadow acne? RTIOW says we could just ignore ray hits that are too close
+ for (var bounce = 0u; bounce < bounces; bounce++) {
+ let hitResult = bvhIntersectFirstHit( geom_index, geom_position, bvh, ray );
+
+ // write result
+ if ( hitResult.didHit ) {
+
+ let material = materials[ geom_material_index[ hitResult.indices.x ] ];
+ // var surfaceRecord: SurfaceRecord;
+ // surfaceRecord.normal = hitResult.normal;
+ // surfaceRecord.albedo = material.albedo;
+ // surfaceRecord.roughness = material.roughness;
+ // surfaceRecord.metalness = material.metalness;
+
+ let hitPosition = getVertexAttribute( hitResult.barycoord, hitResult.indices.xyz, geom_position );
+ let hitNormal = getVertexAttribute( hitResult.barycoord, hitResult.indices.xyz, geom_normals );
+
+ let scatterRec = bsdfEval(hitNormal, - ray.direction);
+ // let scatterRec = bsdfEval(hitResult.normal, - ray.direction);
+ // TODO: fix shadow acne
+ // if (bounce == 1) {
+ // resultColor = vec3f( 0.0, 1.0, 0.0 ); // dot( scatterRec.direction, hitNormal ) ); // ( vec3f( 1.0 ) + scatterRec.direction ) * 0.5;
+ // sampleCount = 1;
+ // break;
+ // }
+
+ throughputColor *= material.albedo * scatterRec.value / scatterRec.pdf;
+
+ ray.origin = hitPosition;
+ ray.direction = scatterRec.direction;
+
+ } else {
+
+ let background = normalize( vec3f( 0.0366, 0.0813, 0.1057 ) );
+ resultColor += background * throughputColor;
+ sampleCount += 1;
+ break;
+ }
+
+ }
+
+ if ( sampleCount == 0 ) {
+ return;
+ }
+
+ const accumulate: bool = true;
+
+ let index = indexUV.x + indexUV.y * dimensions.x;
+
+ let prevColor = resultBuffer[index];
+ if ( accumulate ) {
+ let prevSampleCount = sample_count_buffer[index];
+ let newSampleCount = prevSampleCount + sampleCount;
+ sample_count_buffer[index] = newSampleCount;
+
+ let newColor = ( ( prevColor.xyz * f32( prevSampleCount ) ) + resultColor ) / f32( newSampleCount );
+ resultBuffer[index] = vec4f( newColor, 1.0 );
+ } else {
+ resultBuffer[index] = vec4f( resultColor.xyz / f32( sampleCount ), 1.0 );
+ }
+
+ }
+`, [ ndcToCameraRay, bvhIntersectFirstHit, constants, getVertexAttribute, materialStruct, surfaceRecordStruct, pcgRand3, pcgInit, lambertBsdfFunc ] );
+
+export default megakernelShader;
diff --git a/src/webgpu/nodes/random.wgsl.js b/src/webgpu/nodes/random.wgsl.js
new file mode 100644
index 00000000..ecb2dee0
--- /dev/null
+++ b/src/webgpu/nodes/random.wgsl.js
@@ -0,0 +1,55 @@
+import { wgsl, wgslFn } from 'three/tsl';
+
+export const pcgStateStruct = wgsl( /* wgsl */`
+ struct PcgState {
+ s0: vec4u,
+ s1: vec4u,
+ pixel: vec2i,
+ };
+
+ var g_state: PcgState;
+` );
+
+export const pcgInit = wgslFn( /* wgsl */`
+ fn pcgInitialize(p: vec2u, frame: u32) -> void {
+ g_state.pixel = vec2i( p );
+
+ //white noise seed
+ g_state.s0 = vec4u(p, frame, u32(p.x) + u32(p.y));
+
+ //blue noise seed
+ g_state.s1 = vec4u(frame, frame*15843, frame*31 + 4566, frame*2345 + 58585);
+ }
+`, [ pcgStateStruct ] );
+
+export const pcg4d = wgslFn( /* wgsl */ `
+ fn pcg4d(v: ptr) -> void {
+ *v = *v * 1664525u + 1013904223u;
+ v.x += v.y*v.w; v.y += v.z*v.x; v.z += v.x*v.y; v.w += v.y*v.z;
+ *v = *v ^ (*v >> vec4u(16u));
+ v.x += v.y*v.w; v.y += v.z*v.x; v.z += v.x*v.y; v.w += v.y*v.z;
+ }
+` );
+
+export const pcgCycleState = wgslFn( /* wgsl */ `
+ fn pcgCycleState(n: u32) -> void {
+ for (var i = 0u; i < n; i++) {
+ pcg4d(&g_state.s0);
+ }
+ }
+` );
+
+// TODO: test if abs there is necessary
+export const pcgRand3 = wgslFn( /*wgsl*/`
+ fn pcgRand3() -> vec3f {
+ pcg4d(&g_state.s0);
+ return abs( vec3f(g_state.s0.xyz) / f32(0xffffffffu) );
+ }
+`, [ pcg4d, pcgStateStruct ] );
+
+export const pcgRand2 = wgslFn( /*wgsl*/`
+ fn pcgRand2() -> vec2f {
+ pcg4d(&g_state.s0);
+ return abs( vec2f(g_state.s0.xy) / f32(0xffffffffu) );
+ }
+`, [ pcg4d, pcgStateStruct ] );
diff --git a/src/webgpu/nodes/reset.wgsl.js b/src/webgpu/nodes/reset.wgsl.js
new file mode 100644
index 00000000..c7bbb1ae
--- /dev/null
+++ b/src/webgpu/nodes/reset.wgsl.js
@@ -0,0 +1,21 @@
+import { wgslFn } from 'three/tsl';
+
+export const resetResultFn = wgslFn( /* wgsl */ `
+
+ fn resetBuffers(
+ resultBuffer: ptr, read_write>,
+ sample_count_buffer: ptr, read_write>,
+ dimensions: vec2u,
+
+ globalId: vec2u,
+ ) -> void {
+
+ let offset = globalId.x + globalId.y * dimensions.x;
+ sample_count_buffer[offset] = 0;
+ resultBuffer[offset] = vec4f(0.0);
+
+ }
+
+` );
+
+export default resetResultFn;
diff --git a/src/webgpu/nodes/sampling.wgsl.js b/src/webgpu/nodes/sampling.wgsl.js
new file mode 100644
index 00000000..f1e51681
--- /dev/null
+++ b/src/webgpu/nodes/sampling.wgsl.js
@@ -0,0 +1,61 @@
+import { wgslFn } from 'three/tsl';
+import { pcgRand2 } from './random.wgsl.js';
+import { scatterRecordStruct, constants } from './structs.wgsl.js';
+
+// TODO: Move to a local (s, t, n) coordinate system
+// From RayTracingGems v1.9 chapter 16.6.2 -- Its shit!
+// https://www.realtimerendering.com/raytracinggems/unofficial_RayTracingGems_v1.9.pdf
+// result.xyz = cosine-wighted vector on the hemisphere oriented to a vector
+// result.w = pdf
+export const sampleSphereCosineFn = wgslFn( /* wgsl */ `
+ fn sampleSphereCosine(rng: vec2f, n: vec3f) -> vec4f {
+
+ let a = (1 - 2 * rng.x) * 0.99999;
+ let b = sqrt( 1 - a * a ) * 0.99999;
+ let phi = 2 * PI * rng.y;
+ let direction = normalize( vec3f(n.x + b * cos( phi ), n.y + b * sin( phi ), n.z + a) );
+ let pdf = dot( direction, n ) / PI;
+
+ return vec4f( direction, pdf );
+ }
+`, [ constants ] );
+
+
+export const lambertBsdfFunc = wgslFn( /* wgsl */`
+ fn bsdfEval(normal: vec3f, view: vec3f) -> ScatterRecord {
+
+ var record: ScatterRecord;
+
+ // Return bsdfValue / pdf, not bsdfValue and pdf separatly?
+ let res = sampleSphereCosine( pcgRand2(), normal );
+ record.direction = res.xyz;
+ record.pdf = res.w;
+ record.value = dot( record.direction, normal ) / PI;
+
+ return record;
+
+ }
+`, [ scatterRecordStruct, sampleSphereCosineFn, pcgRand2, constants ] );
+
+// const equirectDirectionToUvFn = wgslFn( /* wgsl */`
+// fn equirectDirectionToUv(direction: vec3f) -> vec2f {
+//
+// // from Spherical.setFromCartesianCoords
+// vec2 uv = vec2f( atan2( direction.z, direction.x ), acos( direction.y ) );
+// uv /= vec2f( 2.0 * PI, PI );
+//
+// // apply adjustments to get values in range [0, 1] and y right side up
+// uv.x += 0.5;
+// uv.y = 1.0 - uv.y;
+// return uv;
+//
+// }
+// ` );
+
+// const sampleEquirectColorFn = wgslFn( /* wgsl */ `
+// fn sampleEquirectColor( envMap: texture_2d, envMapSampler: sampler, direction: vec3f ) -> vec3f {
+
+// return texture2D( envMap, equirectDirectionToUv( direction ) ).rgb;
+
+// }
+// `, [ equirectDirectionToUvFn ] );
diff --git a/src/webgpu/nodes/structs.wgsl.js b/src/webgpu/nodes/structs.wgsl.js
new file mode 100644
index 00000000..97cb1ba1
--- /dev/null
+++ b/src/webgpu/nodes/structs.wgsl.js
@@ -0,0 +1,66 @@
+import { wgsl } from 'three/tsl';
+import { rayStruct } from 'three-mesh-bvh/webgpu';
+
+export const constants = wgsl( /* wgsl */ `
+ const PI: f32 = 3.141592653589793;
+` );
+
+export const scatterRecordStruct = wgsl( /* wgsl */ `
+ struct ScatterRecord {
+ direction: vec3f,
+ pdf: f32, // Actually just a probability
+ value: f32,
+ };
+` );
+
+export const materialStruct = wgsl( /* wgsl */`
+ struct Material {
+ albedo: vec3f,
+ // roughness: f32,
+ // metalness: f32,
+ };
+` );
+
+export const surfaceRecordStruct = wgsl( /* wgsl */`
+ struct SurfaceRecord {
+ normal: vec3f,
+ albedo: vec3f,
+
+ roughness: f32,
+ metalness: f32,
+ };
+` );
+
+// TODO: write a proposal for a storage-backed structs and arrays in structs for three.js
+//
+// const hitResultQueueStruct = wgsl( /* wgsl */ `
+// struct HitResultQueue {
+// currentSize: atomic,
+// queue: array,
+// };
+// `, [ hitResultQueueElementStruct ] );
+
+export const rayQueueElementStruct = wgsl( /* wgsl */ `
+
+ struct RayQueueElement {
+ ray: Ray,
+ throughputColor: vec3f,
+ currentBounce: u32,
+ pixel: vec2u,
+ };
+
+`, [ rayStruct ] );
+
+export const hitResultQueueElementStruct = wgsl( /* wgsl */`
+ struct HitResultQueueElement {
+ normal: vec3f,
+ pixel_x: u32,
+ position: vec3f,
+ pixel_y: u32,
+ view: vec3f,
+ currentBounce: u32,
+ throughputColor: vec3f,
+ vertexIndex: u32,
+ };
+` );
+
diff --git a/src/webgpu/nodes/wavefront.wgsl.js b/src/webgpu/nodes/wavefront.wgsl.js
new file mode 100644
index 00000000..fe157758
--- /dev/null
+++ b/src/webgpu/nodes/wavefront.wgsl.js
@@ -0,0 +1,231 @@
+import { wgslFn } from 'three/tsl';
+import { ndcToCameraRay, bvhIntersectFirstHit, constants as bvhConstants, getVertexAttribute } from 'three-mesh-bvh/webgpu';
+import { hitResultQueueElementStruct, rayQueueElementStruct, materialStruct, constants } from './structs.wgsl';
+import { lambertBsdfFunc } from './sampling.wgsl';
+import { pcgInit, pcgCycleState } from './random.wgsl';
+
+export const generateRays = wgslFn( /* wgsl */ `
+
+ fn generateRays(
+ cameraToModelMatrix: mat4x4f,
+ inverseProjectionMatrix: mat4x4f,
+ offset: vec2u,
+ tileSize: vec2u,
+ dimensions: vec2u,
+
+ rayQueue: ptr, read_write>,
+ rayQueueSize: ptr>, read_write>,
+
+ globalId: vec3u
+ ) -> void {
+ if (globalId.x >= tileSize.x || globalId.y >= tileSize.y) {
+ return;
+ }
+ let indexUV = offset + globalId.xy;
+ let uv = vec2f( indexUV ) / vec2f( dimensions );
+ let ndc = uv * 2.0 - vec2f( 1.0 );
+
+ let ray = ndcToCameraRay( ndc, cameraToModelMatrix * inverseProjectionMatrix );
+
+ // TODO: Firstly write to workgroup-local memory, then put a bunch inside storage mem
+ let index = atomicAdd(&rayQueueSize[0], 1);
+
+ rayQueue[index].ray = ray;
+ rayQueue[index].pixel = indexUV;
+ rayQueue[index].throughputColor = vec3f(1.0);
+ rayQueue[index].currentBounce = 0;
+ }
+
+`, [ rayQueueElementStruct, ndcToCameraRay ] );
+
+export const bsdfEval = wgslFn( /* wgsl */ `
+ fn bsdf(
+ inputQueue: ptr, read>,
+ outputQueue: ptr, read_write>,
+ queueSizes: ptr>, read_write>,
+
+ geom_material_index: ptr, read>,
+ materials: ptr, read>,
+ seed: u32,
+
+ globalId: vec3u,
+ ) -> void {
+ let inputSize = atomicLoad(&queueSizes[1]);
+ if (globalId.x >= inputSize) {
+ return;
+ }
+
+ let input = inputQueue[globalId.x];
+ let pixel = vec2u(input.pixel_x, input.pixel_y);
+
+ pcgInitialize(pixel, seed);
+ pcgCycleState(input.currentBounce);
+
+ var record: ScatterRecord;
+
+ let material = materials[ geom_material_index[ input.vertexIndex ] ];
+
+ let scatterRec = bsdfEval(input.normal, input.view);
+
+ let throughputColor = input.throughputColor * material.albedo * scatterRec.value / scatterRec.pdf;
+
+ let rayIndex = atomicAdd(&queueSizes[0], 1);
+ outputQueue[rayIndex].ray.origin = input.position;
+ outputQueue[rayIndex].ray.direction = scatterRec.direction;
+ outputQueue[rayIndex].pixel = pixel;
+ outputQueue[rayIndex].throughputColor = throughputColor;
+ outputQueue[rayIndex].currentBounce = input.currentBounce + 1;
+
+ }
+`, [ lambertBsdfFunc, hitResultQueueElementStruct, rayQueueElementStruct, materialStruct, pcgInit, pcgCycleState, constants ] );
+
+export const traceRay = wgslFn( /* wgsl */`
+
+ fn traceRay(
+ inputQueue: ptr, read>,
+ queueSizes: ptr>, read_write>,
+ escapedQueue: ptr, read_write>,
+ outputQueue: ptr, read_write>,
+
+ geom_position: ptr, read>,
+ geom_index: ptr, read>,
+ geom_normals: ptr, read>,
+ bvh: ptr, read>,
+
+ globalId: vec3u,
+ ) -> void {
+ let inputSize = atomicLoad(&queueSizes[0]);
+ if (globalId.x >= inputSize) {
+ return;
+ }
+
+ let input = inputQueue[globalId.x];
+
+ let hitResult = bvhIntersectFirstHit( geom_index, geom_position, bvh, input.ray );
+
+ if ( hitResult.didHit ) {
+
+ let index = atomicAdd(&queueSizes[1], 1);
+ outputQueue[index].view = - input.ray.direction;
+ outputQueue[index].normal = getVertexAttribute( hitResult.barycoord, hitResult.indices.xyz, geom_normals );
+ outputQueue[index].position = getVertexAttribute( hitResult.barycoord, hitResult.indices.xyz, geom_position );
+ outputQueue[index].pixel_x = input.pixel.x;
+ outputQueue[index].pixel_y = input.pixel.y;
+ outputQueue[index].vertexIndex = hitResult.indices.x;
+ outputQueue[index].throughputColor = input.throughputColor;
+ outputQueue[index].currentBounce = input.currentBounce;
+ // outputQueue[index].materialIndex = geom_material_index[hitResult.indices.x];
+
+ } else {
+
+ let index = atomicAdd(&queueSizes[2], 1);
+ escapedQueue[index] = input;
+
+ }
+
+ }
+
+`, [ hitResultQueueElementStruct, rayQueueElementStruct, getVertexAttribute, bvhIntersectFirstHit, bvhConstants ] );
+
+// WARN: this kernel assumes only one ray per pixel at one time is possible
+export const escapedRay = wgslFn( /* wgsl */`
+
+ fn escapedRay(
+ resultBuffer: ptr, read_write>,
+ inputQueue: ptr, read>,
+ queueSizes: ptr>, read_write>,
+ sampleCountBuffer: ptr, read_write>,
+
+ dimensions: vec2u,
+ globalId: vec3u,
+ ) -> void {
+ let inputSize = atomicLoad(&queueSizes[2]);
+ if (globalId.x >= inputSize) {
+ return;
+ }
+
+ let current = inputQueue[globalId.x];
+
+ let background = normalize( vec3f( 0.0366, 0.0813, 0.1057 ) );
+ let resultColor = background * current.throughputColor;
+
+ let offset = current.pixel.x + current.pixel.y * dimensions.x;
+
+ const accumulate: bool = true;
+
+ let prevColor = resultBuffer[offset];
+ if ( accumulate ) {
+ let prevSampleCount = sampleCountBuffer[offset];
+ let newSampleCount = prevSampleCount + 1;
+ sampleCountBuffer[offset] = newSampleCount;
+
+ let newColor = ( ( prevColor.xyz * f32( prevSampleCount ) ) + resultColor ) / f32( newSampleCount );
+ resultBuffer[offset] = vec4f( newColor, 1.0 );
+ } else {
+ resultBuffer[offset] = vec4f( resultColor, 1.0 );
+ }
+ }
+
+`, [ rayQueueElementStruct ] );
+
+export const writeTraceRayDispatchSize = wgslFn( /* wgsl */ `
+ fn writeTraceRayDispatchSize(
+ outputBuffer: ptr, read_write>,
+
+ queueSizes: ptr>, read_write>,
+
+ workgroupSize: u32,
+ ) -> void {
+ atomicStore(&queueSizes[1], 0);
+ atomicStore(&queueSizes[2], 0);
+
+ let size = atomicLoad(&queueSizes[0]);
+ outputBuffer[0] = u32( ceil( f32(size) / f32( workgroupSize ) ) );
+ outputBuffer[1] = 1;
+ outputBuffer[2] = 1;
+ }
+
+` );
+
+export const writeEscapedRayDispatchSize = wgslFn( /* wgsl */ `
+ fn writeTraceRayDispatchSize(
+ outputBuffer: ptr, read_write>,
+
+ queueSizes: ptr>, read_write>,
+ workgroupSize: u32,
+ ) -> void {
+ let size = atomicLoad(&queueSizes[2]);
+ outputBuffer[0] = u32( ceil( f32(size) / f32( workgroupSize ) ) );
+ outputBuffer[1] = 1;
+ outputBuffer[2] = 1;
+ }
+
+` );
+
+export const writeBsdfDispatchSize = wgslFn( /* wgsl */ `
+ fn writeBsdfDispatchSize(
+ queueSizes: ptr>, read_write>,
+ outputBuffer: ptr, read_write>,
+ workgroupSize: u32
+ ) -> void {
+
+ atomicStore(&queueSizes[0], 0);
+
+ let count = atomicLoad(&queueSizes[1]);
+ outputBuffer[0] = u32( ceil( f32(count) / f32( workgroupSize ) ) );
+ outputBuffer[1] = 1;
+ outputBuffer[2] = 1;
+ }
+`, );
+
+export const cleanQueues = wgslFn( /* wgsl */`
+ fn clean(
+ queueSizes: ptr>, read_write>,
+ ) -> void {
+ atomicStore(&queueSizes[0], 0);
+ atomicStore(&queueSizes[1], 0);
+ atomicStore(&queueSizes[2], 0);
+ }
+` );
+
+