TAAUNode.js 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835
  1. import { HalfFloatType, Vector2, RenderTarget, RendererUtils, QuadMesh, NodeMaterial, TempNode, NodeUpdateType, Matrix4, DepthTexture } from 'three/webgpu';
  2. import { add, exp, float, If, Fn, max, texture, uniform, uv, vec2, vec4, luminance, convertToTexture, passTexture, velocity, getViewPosition, viewZToPerspectiveDepth, struct, ivec2, mix, property, outputStruct } from 'three/tsl';
  3. const _quadMesh = /*@__PURE__*/ new QuadMesh();
  4. const _size = /*@__PURE__*/ new Vector2();
  5. let _rendererState;
  6. /**
  7. * A special node that performs Temporal Anti-Aliasing Upscaling (TAAU).
  8. *
  9. * Like TRAA, the node accumulates jittered samples over multiple frames and
  10. * reprojects history with motion vectors. Unlike TRAA, the input buffers
  11. * (beauty, depth, velocity) are expected to be rendered at a lower resolution
  12. * than the renderer's drawing buffer — typically by lowering the upstream
  13. * pass's resolution via {@link PassNode#setResolutionScale} — and the resolve
  14. * pass reconstructs an output-resolution image using a 9-tap Blackman-Harris
  15. * filter (Gaussian approximation) over the jittered input samples. The result
  16. * is an alternative to FSR2/3 that does anti-aliasing and upscaling in a
  17. * single pass.
  18. *
  19. * References:
  20. * - Karis, "High Quality Temporal Supersampling", SIGGRAPH 2014, {@link https://advances.realtimerendering.com/s2014/}
  21. * - Riley/Arcila, FidelityFX Super Resolution 2, GDC 2022, {@link https://gpuopen.com/download/GDC_FidelityFX_Super_Resolution_2_0.pdf}
  22. *
  23. * Note: MSAA must be disabled when TAAU is in use.
  24. *
  25. * @augments TempNode
  26. * @three_import import { taau } from 'three/addons/tsl/display/TAAUNode.js';
  27. */
  28. class TAAUNode extends TempNode {
  29. static get type() {
  30. return 'TAAUNode';
  31. }
  32. /**
  33. * Constructs a new TAAU node.
  34. *
  35. * @param {TextureNode} beautyNode - The texture node that represents the input of the effect.
  36. * @param {TextureNode} depthNode - A node that represents the scene's depth.
  37. * @param {TextureNode} velocityNode - A node that represents the scene's velocity.
  38. * @param {Camera} camera - The camera the scene is rendered with.
  39. */
  40. constructor( beautyNode, depthNode, velocityNode, camera ) {
  41. super( 'vec4' );
  42. /**
  43. * This flag can be used for type testing.
  44. *
  45. * @type {boolean}
  46. * @readonly
  47. * @default true
  48. */
  49. this.isTAAUNode = true;
  50. /**
  51. * The `updateBeforeType` is set to `NodeUpdateType.FRAME` since the node renders
  52. * its effect once per frame in `updateBefore()`.
  53. *
  54. * @type {string}
  55. * @default 'frame'
  56. */
  57. this.updateBeforeType = NodeUpdateType.FRAME;
  58. /**
  59. * The texture node that represents the input of the effect.
  60. *
  61. * @type {TextureNode}
  62. */
  63. this.beautyNode = beautyNode;
  64. /**
  65. * A node that represents the scene's depth.
  66. *
  67. * @type {TextureNode}
  68. */
  69. this.depthNode = depthNode;
  70. /**
  71. * A node that represents the scene's velocity.
  72. *
  73. * @type {TextureNode}
  74. */
  75. this.velocityNode = velocityNode;
  76. /**
  77. * The camera the scene is rendered with.
  78. *
  79. * @type {Camera}
  80. */
  81. this.camera = camera;
  82. /**
  83. * When the difference between the current and previous depth goes above this threshold,
  84. * the history is considered invalid.
  85. *
  86. * @type {number}
  87. * @default 0.0005
  88. */
  89. this.depthThreshold = 0.0005;
  90. /**
  91. * The depth difference within the 3×3 neighborhood to consider a pixel as an edge.
  92. *
  93. * @type {number}
  94. * @default 0.001
  95. */
  96. this.edgeDepthDiff = 0.001;
  97. /**
  98. * The history becomes invalid as the pixel length of the velocity approaches this value.
  99. *
  100. * @type {number}
  101. * @default 128
  102. */
  103. this.maxVelocityLength = 128;
  104. /**
  105. * Baseline weight applied to the current frame in the resolve. Lower
  106. * values produce smoother results with longer accumulation but slower
  107. * convergence on disoccluded regions; the motion factor is added on
  108. * top, so fast-moving pixels still respond quickly.
  109. *
  110. * @type {number}
  111. * @default 0.025
  112. */
  113. this.currentFrameWeight = 0.025;
  114. /**
  115. * The jitter index selects the current camera offset value.
  116. *
  117. * @private
  118. * @type {number}
  119. * @default 0
  120. */
  121. this._jitterIndex = 0;
  122. /**
  123. * A uniform node holding the current jitter offset in input-pixel
  124. * units. The shader needs this to know where each input sample was
  125. * actually rendered when computing per-tap reconstruction weights.
  126. *
  127. * @private
  128. * @type {UniformNode<vec2>}
  129. */
  130. this._jitterOffset = uniform( new Vector2() );
  131. /**
  132. * The render target that represents the history of frame data.
  133. * Sized to the renderer's drawing buffer (the output resolution).
  134. *
  135. * @private
  136. * @type {?RenderTarget}
  137. */
  138. this._historyRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType, count: 2 } );
  139. this._historyRenderTarget.textures[ 0 ].name = 'TAAUNode.history.color';
  140. this._historyRenderTarget.textures[ 1 ].name = 'TAAUNode.history.lock';
  141. /**
  142. * The render target for the resolve. Sized to the renderer's drawing
  143. * buffer (the output resolution).
  144. *
  145. * @private
  146. * @type {?RenderTarget}
  147. */
  148. this._resolveRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, type: HalfFloatType } );
  149. this._resolveRenderTarget.texture.name = 'TAAUNode.resolve';
  150. /**
  151. * Render target whose depth attachment holds the previous frame's
  152. * depth buffer. The depth texture must be owned by a render target
  153. * so that `copyTextureToTexture` can copy into it on the WebGL
  154. * backend, which uses a framebuffer blit and therefore needs the
  155. * destination depth texture to be attached to a framebuffer. This
  156. * render target is sized independently of the history target so it
  157. * can match the (lower-resolution) input depth texture.
  158. *
  159. * @private
  160. * @type {RenderTarget}
  161. */
  162. this._previousDepthRenderTarget = new RenderTarget( 1, 1, { depthBuffer: false, depthTexture: new DepthTexture() } );
  163. this._previousDepthRenderTarget.depthTexture.name = 'TAAUNode.previousDepth';
  164. /**
  165. * Material used for the resolve step.
  166. *
  167. * @private
  168. * @type {NodeMaterial}
  169. */
  170. this._resolveMaterial = new NodeMaterial();
  171. this._resolveMaterial.name = 'TAAU.resolve';
  172. /**
  173. * Material used to seed the history render target on resize. It
  174. * performs a bilinear upscale of the current beauty buffer into the
  175. * output-sized history target so that the first frames after a
  176. * resize do not fade in from black.
  177. *
  178. * @private
  179. * @type {NodeMaterial}
  180. */
  181. this._seedMaterial = new NodeMaterial();
  182. this._seedMaterial.name = 'TAAU.seed';
  183. /**
  184. * The result of the effect is represented as a separate texture node.
  185. *
  186. * @private
  187. * @type {PassTextureNode}
  188. */
  189. this._textureNode = passTexture( this, this._resolveRenderTarget.texture );
  190. /**
  191. * Used to save the original/unjittered projection matrix.
  192. *
  193. * @private
  194. * @type {Matrix4}
  195. */
  196. this._originalProjectionMatrix = new Matrix4();
  197. /**
  198. * A uniform node holding the camera's near and far.
  199. *
  200. * @private
  201. * @type {UniformNode<vec2>}
  202. */
  203. this._cameraNearFar = uniform( new Vector2() );
  204. /**
  205. * A uniform node holding the camera world matrix.
  206. *
  207. * @private
  208. * @type {UniformNode<mat4>}
  209. */
  210. this._cameraWorldMatrix = uniform( new Matrix4() );
  211. /**
  212. * A uniform node holding the camera world matrix inverse.
  213. *
  214. * @private
  215. * @type {UniformNode<mat4>}
  216. */
  217. this._cameraWorldMatrixInverse = uniform( new Matrix4() );
  218. /**
  219. * A uniform node holding the camera projection matrix inverse.
  220. *
  221. * @private
  222. * @type {UniformNode<mat4>}
  223. */
  224. this._cameraProjectionMatrixInverse = uniform( new Matrix4() );
  225. /**
  226. * A uniform node holding the previous frame's view matrix.
  227. *
  228. * @private
  229. * @type {UniformNode<mat4>}
  230. */
  231. this._previousCameraWorldMatrix = uniform( new Matrix4() );
  232. /**
  233. * A uniform node holding the previous frame's projection matrix inverse.
  234. *
  235. * @private
  236. * @type {UniformNode<mat4>}
  237. */
  238. this._previousCameraProjectionMatrixInverse = uniform( new Matrix4() );
  239. /**
  240. * A texture node for the previous depth buffer.
  241. *
  242. * @private
  243. * @type {TextureNode}
  244. */
  245. this._previousDepthNode = texture( this._previousDepthRenderTarget.depthTexture );
  246. /**
  247. * Sync the post processing stack with the TAAU node.
  248. *
  249. * @private
  250. * @type {boolean}
  251. */
  252. this._needsPostProcessingSync = false;
  253. }
  254. /**
  255. * Returns the result of the effect as a texture node.
  256. *
  257. * @return {PassTextureNode} A texture node that represents the result of the effect.
  258. */
  259. getTextureNode() {
  260. return this._textureNode;
  261. }
  262. /**
  263. * Sets the output size of the effect (history and resolve targets). The
  264. * previous-depth texture is sized independently in `updateBefore()` to
  265. * track the scene's current depth texture.
  266. *
  267. * @param {number} outputWidth - The output width (drawing buffer width).
  268. * @param {number} outputHeight - The output height (drawing buffer height).
  269. */
  270. setSize( outputWidth, outputHeight ) {
  271. this._historyRenderTarget.setSize( outputWidth, outputHeight );
  272. this._resolveRenderTarget.setSize( outputWidth, outputHeight );
  273. }
  274. /**
  275. * Defines the TAAU's current jitter as a view offset to the scene's
  276. * camera. The jitter is shrunk to one *output* pixel (rather than one
  277. * input pixel) so that the halton sequence gradually fills the output
  278. * sub-pixel grid over multiple frames.
  279. *
  280. * @param {number} inputWidth - The width of the input buffers the camera renders into.
  281. * @param {number} inputHeight - The height of the input buffers the camera renders into.
  282. */
  283. setViewOffset( inputWidth, inputHeight ) {
  284. // save original/unjittered projection matrix for velocity pass
  285. this.camera.updateProjectionMatrix();
  286. this._originalProjectionMatrix.copy( this.camera.projectionMatrix );
  287. velocity.setProjectionMatrix( this._originalProjectionMatrix );
  288. // The jitter range must span one output pixel (not one input pixel),
  289. // so we shrink the input-pixel-unit offset by the ratio of input to
  290. // output resolution.
  291. const haltonOffset = _haltonOffsets[ this._jitterIndex ];
  292. const jitterX = ( haltonOffset[ 0 ] - 0.5 );
  293. const jitterY = ( haltonOffset[ 1 ] - 0.5 );
  294. this._jitterOffset.value.set( jitterX, jitterY );
  295. this.camera.setViewOffset(
  296. inputWidth, inputHeight,
  297. jitterX, jitterY,
  298. inputWidth, inputHeight
  299. );
  300. }
  301. /**
  302. * Clears the view offset from the scene's camera.
  303. */
  304. clearViewOffset() {
  305. this.camera.clearViewOffset();
  306. velocity.setProjectionMatrix( null );
  307. // update jitter index
  308. this._jitterIndex ++;
  309. this._jitterIndex = this._jitterIndex % ( _haltonOffsets.length - 1 );
  310. }
  311. /**
  312. * This method is used to render the effect once per frame.
  313. *
  314. * @param {NodeFrame} frame - The current node frame.
  315. */
  316. updateBefore( frame ) {
  317. const { renderer } = frame;
  318. // store previous frame matrices before updating current ones
  319. this._previousCameraWorldMatrix.value.copy( this._cameraWorldMatrix.value );
  320. this._previousCameraProjectionMatrixInverse.value.copy( this._cameraProjectionMatrixInverse.value );
  321. // update camera matrices uniforms
  322. this._cameraNearFar.value.set( this.camera.near, this.camera.far );
  323. this._cameraWorldMatrix.value.copy( this.camera.matrixWorld );
  324. this._cameraWorldMatrixInverse.value.copy( this.camera.matrixWorldInverse );
  325. this._cameraProjectionMatrixInverse.value.copy( this.camera.projectionMatrixInverse );
  326. // extract input dimensions from the beauty buffer and output
  327. // dimensions from the renderer's drawing buffer
  328. const beautyRenderTarget = ( this.beautyNode.isRTTNode ) ? this.beautyNode.renderTarget : this.beautyNode.passNode.renderTarget;
  329. const inputWidth = beautyRenderTarget.texture.width;
  330. const inputHeight = beautyRenderTarget.texture.height;
  331. const drawingBufferSize = renderer.getDrawingBufferSize( _size );
  332. const outputWidth = drawingBufferSize.width;
  333. const outputHeight = drawingBufferSize.height;
  334. //
  335. _rendererState = RendererUtils.resetRendererState( renderer, _rendererState );
  336. //
  337. const needsRestart =
  338. this._historyRenderTarget.width !== outputWidth ||
  339. this._historyRenderTarget.height !== outputHeight;
  340. this.setSize( outputWidth, outputHeight );
  341. // every time the dimensions change we need fresh history data
  342. if ( needsRestart === true ) {
  343. // make sure render targets are initialized after the resize which triggers a dispose()
  344. renderer.initRenderTarget( this._historyRenderTarget );
  345. renderer.initRenderTarget( this._resolveRenderTarget );
  346. // Seed the history with a bilinear upscale of the current beauty
  347. // buffer. Without this the first frames after a resize fade in
  348. // from black because the history target was cleared. The seed
  349. // material is a quad pass that samples beauty at output UVs, so
  350. // it produces an output-sized image regardless of the input size.
  351. renderer.setRenderTarget( this._historyRenderTarget );
  352. _quadMesh.material = this._seedMaterial;
  353. _quadMesh.name = 'TAAU.seed';
  354. _quadMesh.render( renderer );
  355. renderer.setRenderTarget( null );
  356. }
  357. // must run after needsRestart so it does not affect the seed reset
  358. if ( this._needsPostProcessingSync === true ) {
  359. this.setViewOffset( inputWidth, inputHeight );
  360. this._needsPostProcessingSync = false;
  361. }
  362. // resolve
  363. renderer.setRenderTarget( this._resolveRenderTarget );
  364. _quadMesh.material = this._resolveMaterial;
  365. _quadMesh.name = 'TAAU';
  366. _quadMesh.render( renderer );
  367. renderer.setRenderTarget( null );
  368. // update history
  369. renderer.copyTextureToTexture( this._resolveRenderTarget.texture, this._historyRenderTarget.texture );
  370. // Copy the current scene depth into the previous-depth texture. We
  371. // keep the destination size locked to the source's actual dimensions
  372. // so that any one-frame timing mismatch between the scene pass's depth
  373. // attachment and the beauty render target's bookkeeping cannot
  374. // produce a copy with mismatched extents (which WebGPU rejects for
  375. // depth/stencil formats).
  376. const currentDepth = this.depthNode.value;
  377. const srcW = currentDepth.image !== null && currentDepth.image !== undefined ? currentDepth.image.width : 0;
  378. const srcH = currentDepth.image !== null && currentDepth.image !== undefined ? currentDepth.image.height : 0;
  379. if ( srcW > 0 && srcH > 0 ) {
  380. if ( this._previousDepthRenderTarget.width !== srcW || this._previousDepthRenderTarget.height !== srcH ) {
  381. this._previousDepthRenderTarget.setSize( srcW, srcH );
  382. renderer.initRenderTarget( this._previousDepthRenderTarget );
  383. }
  384. const dstDepth = this._previousDepthRenderTarget.depthTexture;
  385. renderer.copyTextureToTexture( currentDepth, dstDepth );
  386. this._previousDepthNode.value = dstDepth;
  387. }
  388. // restore
  389. RendererUtils.restoreRendererState( renderer, _rendererState );
  390. }
  391. /**
  392. * This method is used to setup the effect's render targets and TSL code.
  393. *
  394. * @param {NodeBuilder} builder - The current node builder.
  395. * @return {PassTextureNode}
  396. */
  397. setup( builder ) {
  398. const renderPipeline = builder.context.renderPipeline;
  399. if ( renderPipeline ) {
  400. this._needsPostProcessingSync = true;
  401. renderPipeline.context.onBeforeRenderPipeline = () => {
  402. const beautyRenderTarget = ( this.beautyNode.isRTTNode ) ? this.beautyNode.renderTarget : this.beautyNode.passNode.renderTarget;
  403. const inputWidth = beautyRenderTarget.texture.width;
  404. const inputHeight = beautyRenderTarget.texture.height;
  405. this.setViewOffset( inputWidth, inputHeight );
  406. };
  407. renderPipeline.context.onAfterRenderPipeline = () => {
  408. this.clearViewOffset();
  409. };
  410. }
  411. const currentDepthStruct = struct( {
  412. closestDepth: 'float',
  413. closestPositionTexel: 'vec2',
  414. farthestDepth: 'float',
  415. } );
  416. // Samples 3×3 neighborhood pixels and returns the closest and farthest depths.
  417. const sampleCurrentDepth = Fn( ( [ positionTexel ] ) => {
  418. const closestDepth = float( 2 ).toVar();
  419. const closestPositionTexel = vec2( 0 ).toVar();
  420. const farthestDepth = float( - 1 ).toVar();
  421. for ( let x = - 1; x <= 1; ++ x ) {
  422. for ( let y = - 1; y <= 1; ++ y ) {
  423. const neighbor = positionTexel.add( vec2( x, y ) ).toVar();
  424. const depth = this.depthNode.load( neighbor ).r.toVar();
  425. If( depth.lessThan( closestDepth ), () => {
  426. closestDepth.assign( depth );
  427. closestPositionTexel.assign( neighbor );
  428. } );
  429. If( depth.greaterThan( farthestDepth ), () => {
  430. farthestDepth.assign( depth );
  431. } );
  432. }
  433. }
  434. return currentDepthStruct( closestDepth, closestPositionTexel, farthestDepth );
  435. } );
  436. // Samples a previous depth and reproject it using the current camera matrices.
  437. const samplePreviousDepth = ( uv ) => {
  438. const depth = this._previousDepthNode.sample( uv ).r;
  439. const positionView = getViewPosition( uv, depth, this._previousCameraProjectionMatrixInverse );
  440. const positionWorld = this._previousCameraWorldMatrix.mul( vec4( positionView, 1 ) ).xyz;
  441. const viewZ = this._cameraWorldMatrixInverse.mul( vec4( positionWorld, 1 ) ).z;
  442. return viewZToPerspectiveDepth( viewZ, this._cameraNearFar.x, this._cameraNearFar.y );
  443. };
  444. // Optimized version of AABB clipping.
  445. // Reference: https://github.com/playdeadgames/temporal
  446. const clipAABB = Fn( ( [ currentColor, historyColor, minColor, maxColor ] ) => {
  447. const pClip = maxColor.rgb.add( minColor.rgb ).mul( 0.5 );
  448. const eClip = maxColor.rgb.sub( minColor.rgb ).mul( 0.5 ).add( 1e-7 );
  449. const vClip = historyColor.sub( vec4( pClip, currentColor.a ) );
  450. const vUnit = vClip.xyz.div( eClip );
  451. const absUnit = vUnit.abs();
  452. const maxUnit = max( absUnit.x, absUnit.y, absUnit.z );
  453. return maxUnit.greaterThan( 1 ).select(
  454. vec4( pClip, currentColor.a ).add( vClip.div( maxUnit ) ),
  455. historyColor
  456. );
  457. } ).setLayout( {
  458. name: 'clipAABB',
  459. type: 'vec4',
  460. inputs: [
  461. { name: 'currentColor', type: 'vec4' },
  462. { name: 'historyColor', type: 'vec4' },
  463. { name: 'minColor', type: 'vec4' },
  464. { name: 'maxColor', type: 'vec4' }
  465. ]
  466. } );
  467. // Flicker reduction based on luminance weighing.
  468. const flickerReduction = Fn( ( [ currentColor, historyColor, currentWeight ] ) => {
  469. const historyWeight = currentWeight.oneMinus();
  470. const compressedCurrent = currentColor.mul( float( 1 ).div( ( max( currentColor.r, currentColor.g, currentColor.b ).add( 1 ) ) ) );
  471. const compressedHistory = historyColor.mul( float( 1 ).div( ( max( historyColor.r, historyColor.g, historyColor.b ).add( 1 ) ) ) );
  472. const luminanceCurrent = luminance( compressedCurrent.rgb );
  473. const luminanceHistory = luminance( compressedHistory.rgb );
  474. currentWeight.mulAssign( float( 1 ).div( luminanceCurrent.add( 1 ) ) );
  475. historyWeight.mulAssign( float( 1 ).div( luminanceHistory.add( 1 ) ) );
  476. return add( currentColor.mul( currentWeight ), historyColor.mul( historyWeight ) ).div( max( currentWeight.add( historyWeight ), 0.00001 ) ).toVar();
  477. } );
  478. const historyNode = texture( this._historyRenderTarget.textures[ 0 ] );
  479. const lockNode = texture( this._historyRenderTarget.textures[ 1 ] );
  480. // --- TAAU resolve ---
  481. //
  482. // For each output pixel, we map its position into input-pixel space,
  483. // find the closest jittered input sample, and reconstruct the current
  484. // color as a weighted sum of the 3×3 neighborhood around that sample.
  485. // Each tap's weight is a Gaussian approximation of a Blackman-Harris
  486. // window evaluated at the distance between the tap's (jittered)
  487. // sample center and the output pixel center. The same neighborhood
  488. // also supplies the moments used for variance clipping of the
  489. // reprojected history, so no second neighborhood read is needed.
  490. const colorOutput = property( 'vec4' );
  491. const lockOutput = property( 'vec4' );
  492. const outputNode = outputStruct( colorOutput, lockOutput );
  493. const resolve = Fn( () => {
  494. const uvNode = uv();
  495. const inputSize = this.beautyNode.size(); // ivec2
  496. const inputSizeF = vec2( inputSize );
  497. // output pixel center in input-pixel coordinates
  498. const pIn = uvNode.mul( inputSizeF );
  499. // the input sample at integer texel (m, n) was rendered at world
  500. // position (m + 0.5 + jitter). Solving for the closest tap gives:
  501. const closestTapF = pIn.sub( vec2( 0.5 ).add( this._jitterOffset ) ).round();
  502. const closestTap = ivec2( closestTapF );
  503. // depth dilation around the closest input tap
  504. const currentDepth = sampleCurrentDepth( closestTapF );
  505. const closestDepth = currentDepth.get( 'closestDepth' );
  506. const closestPositionTexel = currentDepth.get( 'closestPositionTexel' );
  507. const farthestDepth = currentDepth.get( 'farthestDepth' );
  508. // reproject using the velocity sampled at the dilated depth tap
  509. const offsetUV = this.velocityNode.load( closestPositionTexel ).xy.mul( vec2( 0.5, - 0.5 ) );
  510. const historyUV = uvNode.sub( offsetUV );
  511. const previousDepth = samplePreviousDepth( historyUV );
  512. // history validity
  513. const isValidUV = historyUV.greaterThanEqual( 0 ).all().and( historyUV.lessThanEqual( 1 ).all() );
  514. const isEdge = farthestDepth.sub( closestDepth ).greaterThan( this.edgeDepthDiff );
  515. const isDisocclusion = closestDepth.sub( previousDepth ).greaterThan( this.depthThreshold );
  516. const hasValidHistory = isValidUV.and( isEdge.or( isDisocclusion.not() ) );
  517. // 9-tap Blackman-Harris (Gaussian approximation) reconstruction
  518. // of the current frame color, plus moment accumulation for the
  519. // variance clip of the history.
  520. const sumColor = vec4( 0 ).toVar();
  521. const sumWeight = float( 0 ).toVar();
  522. const moment1 = vec4( 0 ).toVar();
  523. const moment2 = vec4( 0 ).toVar();
  524. const offsets = [
  525. [ - 1, - 1 ], [ 0, - 1 ], [ 1, - 1 ],
  526. [ - 1, 0 ], [ 0, 0 ], [ 1, 0 ],
  527. [ - 1, 1 ], [ 0, 1 ], [ 1, 1 ]
  528. ];
  529. for ( const [ x, y ] of offsets ) {
  530. const tap = closestTap.add( ivec2( x, y ) );
  531. const tapCenter = vec2( tap ).add( vec2( 0.5 ).add( this._jitterOffset ) );
  532. const delta = pIn.sub( tapCenter );
  533. const d2 = delta.dot( delta );
  534. const w = exp( d2.mul( - 2.29 ) );
  535. // Use max() to prevent NaN values from propagating.
  536. const c = this.beautyNode.load( tap ).max( 0 );
  537. sumColor.addAssign( c.mul( w ) );
  538. sumWeight.addAssign( w );
  539. moment1.addAssign( c );
  540. moment2.addAssign( c.pow2() );
  541. }
  542. const currentColor = sumColor.div( sumWeight.max( 1e-5 ) );
  543. // variance clipping using the moments we just gathered
  544. const N = float( offsets.length );
  545. const mean = moment1.div( N );
  546. const motionFactor = uvNode.sub( historyUV ).mul( inputSizeF ).length().div( this.maxVelocityLength ).saturate();
  547. const varianceGamma = mix( 0.5, 1, motionFactor.oneMinus().pow2() );
  548. const variance = moment2.div( N ).sub( mean.pow2() ).max( 0 ).sqrt().mul( varianceGamma );
  549. const minColor = mean.sub( variance );
  550. const maxColor = mean.add( variance );
  551. const historyColor = historyNode.sample( historyUV );
  552. const clippedHistoryColor = clipAABB( mean.clamp( minColor, maxColor ), historyColor, minColor, maxColor );
  553. // Current weight. Under TAAU a single input frame covers less of
  554. // the output grid, so the baseline current weight is lower than
  555. // in standard TRAA to give the accumulator more frames to fill
  556. // in sub-pixel detail. Motion still biases toward the current
  557. // frame to keep disoccluded and fast-moving pixels responsive.
  558. const currentLuma = luminance( currentColor.rgb );
  559. const meanLuma = luminance( mean.rgb ).toConst();
  560. const thinFeature = currentLuma.sub( meanLuma ).abs().div( meanLuma ).smoothstep( 0, 0.2 );
  561. // Gate the lock by a two-sided depth change check. The
  562. // existing `isDisocclusion` is one-sided (only fires when
  563. // the scene moves farther), but new geometry appearing
  564. // closer also makes the history stale.
  565. const isDepthChanged = closestDepth.sub( previousDepth ).abs().greaterThan( this.depthThreshold );
  566. const canLock = isValidUV.and( isDepthChanged.not() );
  567. const gatedThinFeature = canLock.select( thinFeature, float( 0 ) );
  568. const decay = isDisocclusion.select( 0, 0.5 );
  569. const lock = max( gatedThinFeature, lockNode.r.mul( decay ) ).saturate();
  570. const lockedHistoryColor = mix( clippedHistoryColor, historyColor, lock );
  571. const currentWeight = float( this.currentFrameWeight ).toVar();
  572. currentWeight.assign( hasValidHistory.select( currentWeight.add( motionFactor ).saturate(), 1 ) );
  573. const output = flickerReduction( currentColor, lockedHistoryColor, currentWeight );
  574. colorOutput.assign( output );
  575. lockOutput.assign( lock );
  576. return vec4( 0 ); // temporary solution until TSL does not complain anymore
  577. } );
  578. // materials
  579. this._resolveMaterial.colorNode = resolve();
  580. this._resolveMaterial.outputNode = outputNode;
  581. this._seedMaterial.colorNode = Fn( () => {
  582. colorOutput.assign( this.beautyNode.sample( uv() ) );
  583. lockOutput.assign( 0 );
  584. return vec4( 0 );
  585. } )();
  586. this._seedMaterial.outputNode = outputNode;
  587. return this._textureNode;
  588. }
  589. /**
  590. * Frees internal resources. This method should be called
  591. * when the effect is no longer required.
  592. */
  593. dispose() {
  594. this._historyRenderTarget.dispose();
  595. this._resolveRenderTarget.dispose();
  596. this._previousDepthRenderTarget.dispose();
  597. this._resolveMaterial.dispose();
  598. this._seedMaterial.dispose();
  599. }
  600. }
  601. export default TAAUNode;
  602. function _halton( index, base ) {
  603. let fraction = 1;
  604. let result = 0;
  605. while ( index > 0 ) {
  606. fraction /= base;
  607. result += fraction * ( index % base );
  608. index = Math.floor( index / base );
  609. }
  610. return result;
  611. }
  612. const _haltonOffsets = /*@__PURE__*/ Array.from(
  613. { length: 32 },
  614. ( _, index ) => [ _halton( index + 1, 2 ), _halton( index + 1, 3 ) ]
  615. );
  616. /**
  617. * TSL function for creating a TAAU node for Temporal Anti-Aliasing Upscaling.
  618. *
  619. * @tsl
  620. * @function
  621. * @param {TextureNode} beautyNode - The texture node that represents the input of the effect.
  622. * @param {TextureNode} depthNode - A node that represents the scene's depth.
  623. * @param {TextureNode} velocityNode - A node that represents the scene's velocity.
  624. * @param {Camera} camera - The camera the scene is rendered with.
  625. * @returns {TAAUNode}
  626. */
  627. export const taau = ( beautyNode, depthNode, velocityNode, camera ) => new TAAUNode( convertToTexture( beautyNode ), depthNode, velocityNode, camera );
粤ICP备19079148号