|
|
@@ -2540,6 +2540,17 @@ class Vector4 {
|
|
|
|
|
|
}
|
|
|
|
|
|
+ divide( v ) {
|
|
|
+
|
|
|
+ this.x /= v.x;
|
|
|
+ this.y /= v.y;
|
|
|
+ this.z /= v.z;
|
|
|
+ this.w /= v.w;
|
|
|
+
|
|
|
+ return this;
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
divideScalar( scalar ) {
|
|
|
|
|
|
return this.multiplyScalar( 1 / scalar );
|
|
|
@@ -45451,6 +45462,67 @@ const clipping = () => nodeObject( new ClippingNode() );
|
|
|
|
|
|
const clippingAlpha = () => nodeObject( new ClippingNode( ClippingNode.ALPHA_TO_COVERAGE ) );
|
|
|
|
|
|
+/**
|
|
|
+ * See: https://casual-effects.com/research/Wyman2017Hashed/index.html
|
|
|
+ */
|
|
|
+
|
|
|
+const ALPHA_HASH_SCALE = 0.05; // Derived from trials only, and may be changed.
|
|
|
+
|
|
|
+const hash2D = /*@__PURE__*/ Fn( ( [ value ] ) => {
|
|
|
+
|
|
|
+ return fract( mul( 1.0e4, sin( mul( 17.0, value.x ).add( mul( 0.1, value.y ) ) ) ).mul( add( 0.1, abs( sin( mul( 13.0, value.y ).add( value.x ) ) ) ) ) );
|
|
|
+
|
|
|
+} );
|
|
|
+
|
|
|
+const hash3D = /*@__PURE__*/ Fn( ( [ value ] ) => {
|
|
|
+
|
|
|
+ return hash2D( vec2( hash2D( value.xy ), value.z ) );
|
|
|
+
|
|
|
+} );
|
|
|
+
|
|
|
+const getAlphaHashThreshold = /*@__PURE__*/ Fn( ( [ position ] ) => {
|
|
|
+
|
|
|
+ // Find the discretized derivatives of our coordinates
|
|
|
+ const maxDeriv = max$1(
|
|
|
+ length( dFdx( position.xyz ) ),
|
|
|
+ length( dFdy( position.xyz ) )
|
|
|
+ ).toVar( 'maxDeriv' );
|
|
|
+
|
|
|
+ const pixScale = float( 1 ).div( float( ALPHA_HASH_SCALE ).mul( maxDeriv ) ).toVar( 'pixScale' );
|
|
|
+
|
|
|
+ // Find two nearest log-discretized noise scales
|
|
|
+ const pixScales = vec2(
|
|
|
+ exp2( floor( log2( pixScale ) ) ),
|
|
|
+ exp2( ceil( log2( pixScale ) ) )
|
|
|
+ ).toVar( 'pixScales' );
|
|
|
+
|
|
|
+ // Compute alpha thresholds at our two noise scales
|
|
|
+ const alpha = vec2(
|
|
|
+ hash3D( floor( pixScales.x.mul( position.xyz ) ) ),
|
|
|
+ hash3D( floor( pixScales.y.mul( position.xyz ) ) ),
|
|
|
+ ).toVar( 'alpha' );
|
|
|
+
|
|
|
+ // Factor to interpolate lerp with
|
|
|
+ const lerpFactor = fract( log2( pixScale ) ).toVar( 'lerpFactor' );
|
|
|
+
|
|
|
+ // Interpolate alpha threshold from noise at two scales
|
|
|
+ const x = add( mul( lerpFactor.oneMinus(), alpha.x ), mul( lerpFactor, alpha.y ) ).toVar( 'x' );
|
|
|
+
|
|
|
+ // Pass into CDF to compute uniformly distrib threshold
|
|
|
+ const a = min$1( lerpFactor, lerpFactor.oneMinus() ).toVar( 'a' );
|
|
|
+ const cases = vec3(
|
|
|
+ x.mul( x ).div( mul( 2.0, a ).mul( sub( 1.0, a ) ) ),
|
|
|
+ x.sub( mul( 0.5, a ) ).div( sub( 1.0, a ) ),
|
|
|
+ sub( 1.0, sub( 1.0, x ).mul( sub( 1.0, x ) ).div( mul( 2.0, a ).mul( sub( 1.0, a ) ) ) ) ).toVar( 'cases' );
|
|
|
+
|
|
|
+ // Find our final, uniformly distributed alpha threshold (ατ)
|
|
|
+ const threshold = x.lessThan( a.oneMinus() ).select( x.lessThan( a ).select( cases.x, cases.y ), cases.z );
|
|
|
+
|
|
|
+ // Avoids ατ == 0. Could also do ατ =1-ατ
|
|
|
+ return clamp( threshold, 1.0e-6, 1.0 );
|
|
|
+
|
|
|
+} );
|
|
|
+
|
|
|
class NodeMaterial extends Material {
|
|
|
|
|
|
static get type() {
|
|
|
@@ -45795,6 +45867,14 @@ class NodeMaterial extends Material {
|
|
|
|
|
|
}
|
|
|
|
|
|
+ // ALPHA HASH
|
|
|
+
|
|
|
+ if ( this.alphaHash === true ) {
|
|
|
+
|
|
|
+ diffuseColor.a.lessThan( getAlphaHashThreshold( positionLocal ) ).discard();
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
if ( this.transparent === false && this.blending === NormalBlending && this.alphaToCoverage === false ) {
|
|
|
|
|
|
diffuseColor.a.assign( 1.0 );
|