Skip to content

Instantly share code, notes, and snippets.

@bgolus
Last active April 29, 2021 06:36
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save bgolus/c3bc079a81c5b43e9830b98a0d7c32d6 to your computer and use it in GitHub Desktop.
Save bgolus/c3bc079a81c5b43e9830b98a0d7c32d6 to your computer and use it in GitHub Desktop.
World Normal from Depth Texture with experimental screen space derivatives optimizations, which are worse quality, and not faster than gather!
Shader "WorldNormalFromDepthTexture 4Taps"
{
Properties {
[KeywordEnum(Sample, Depth Derivatives, View Pos Derivatives, Gather)] _SampleMethod ("Sample Method", Float) = 0.0
}
SubShader
{
Tags { "RenderType"="Transparent" "Queue"="Transparent" }
LOD 100
Pass
{
Cull Off
ZWrite Off
ZTest Always
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#pragma multi_compile _ _SAMPLEMETHOD_DEPTH_DERIVATIVES _SAMPLEMETHOD_VIEW_POS_DERIVATIVES _SAMPLEMETHOD_GATHER
#pragma target 5.0
#include "UnityCG.cginc"
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 pos : SV_POSITION;
};
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
return o;
}
Texture2D _CameraDepthTexture;
SamplerState sampler_CameraDepthTexture;
float4 _CameraDepthTexture_TexelSize;
float getRawDepth(float2 uv)
{
#if defined(_SAMPLEMETHOD_GATHER)
return _CameraDepthTexture.Load(int3(uv * _CameraDepthTexture_TexelSize.zw, 0)).r;
#else
return _CameraDepthTexture.Sample(sampler_CameraDepthTexture, uv).r;
#endif
}
// inspired by keijiro's depth inverse projection
// https://github.com/keijiro/DepthInverseProjection
// constructs view space ray at the far clip plane from the screen uv
// then multiplies that ray by the linear 01 depth
float3 viewSpacePosAtScreenUV(float2 uv)
{
float3 viewSpaceRay = mul(unity_CameraInvProjection, float4(uv * 2.0 - 1.0, 1.0, 1.0) * _ProjectionParams.z);
float rawDepth = getRawDepth(uv);
return viewSpaceRay * Linear01Depth(rawDepth);
}
float3 viewSpacePosAtScreenUV(float rawDepth, float2 uv)
{
float3 viewSpaceRay = mul(unity_CameraInvProjection, float4(uv * 2.0 - 1.0, 1.0, 1.0) * _ProjectionParams.z);
return viewSpaceRay * Linear01Depth(rawDepth);
}
float3 viewSpacePosAtPixelPosition(float2 vpos)
{
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
return viewSpacePosAtScreenUV(uv);
}
// naive 4 tap normal reconstruction
// accurate mid triangle normals compared to 3 tap
// no diagonal offset on edges, but sharp details are softened
// worse artifacts on depth disparities than 3 tap
// probably little reason to use this over the 3 tap approach
// unity's compiled fragment shader stats: 50 math, 4 tex
half3 viewNormalAtPixelPosition(float2 vpos)
{
#if defined(_SAMPLEMETHOD_DEPTH_DERIVATIVES)
// screen space derivatives experiment
// get two depth samples
float depthA = _CameraDepthTexture.Load(int3(int2(vpos), 0));
float depthB = _CameraDepthTexture.Load(int3(int2(vpos)-1, 0));
// get screen space derivatives of the depth samples
float depthA_ddx = ddx_fine(depthA);
float depthA_ddy = ddy_fine(depthA);
float depthB_ddx = ddx_fine(depthB);
float depthB_ddy = ddy_fine(depthB);
// calculate 4 depth samples from original 2 samples + derivatives
float l = depthB + depthB_ddy;
float d = depthB + depthB_ddx;
float u = depthA + depthA_ddy;
float r = depthA + depthA_ddx;
// screen space uv
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
// get view space position at 1 pixel offsets in each major direction
half3 viewSpacePos_l = viewSpacePosAtScreenUV(l, uv + float2(-1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_r = viewSpacePosAtScreenUV(r, uv + float2( 1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_d = viewSpacePosAtScreenUV(d, uv + float2( 0.0,-1.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_u = viewSpacePosAtScreenUV(u, uv + float2( 0.0, 1.0) * _CameraDepthTexture_TexelSize.xy);
#elif defined(_SAMPLEMETHOD_VIEW_POS_DERIVATIVES)
// get two depth samples
float depthA = _CameraDepthTexture.Load(int3(int2(vpos), 0));
float depthB = _CameraDepthTexture.Load(int3(int2(vpos)-1, 0));
// screen space uv
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
float3 viewSpacePos_a = viewSpacePosAtScreenUV(depthA, uv + float2( 0.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
float3 viewSpacePos_b = viewSpacePosAtScreenUV(depthB, uv + float2(-1.0,-1.0) * _CameraDepthTexture_TexelSize.xy);
// get screen space derivatives of the depth samples
float3 viewSpacePos_a_ddx = ddx_fine(viewSpacePos_a);
float3 viewSpacePos_a_ddy = ddy_fine(viewSpacePos_a);
float3 viewSpacePos_b_ddx = ddx_fine(viewSpacePos_b);
float3 viewSpacePos_b_ddy = ddy_fine(viewSpacePos_b);
// calculate 4 view space positions from original 2 positions + derivatives
float3 viewSpacePos_l = viewSpacePos_b + viewSpacePos_b_ddy;
float3 viewSpacePos_d = viewSpacePos_b + viewSpacePos_b_ddx;
float3 viewSpacePos_u = viewSpacePos_a + viewSpacePos_a_ddy;
float3 viewSpacePos_r = viewSpacePos_a + viewSpacePos_a_ddx;
#elif defined(_SAMPLEMETHOD_GATHER)
// get depth from 2 gathers instead of 4 samples
float2 uv = vpos * _CameraDepthTexture_TexelSize.xy;
float4 depthsA = _CameraDepthTexture.GatherRed(sampler_CameraDepthTexture, uv + float2( 0.5, 0.5) * _CameraDepthTexture_TexelSize.xy);
float4 depthsB = _CameraDepthTexture.GatherRed(sampler_CameraDepthTexture, uv - float2( 0.5, 0.5) * _CameraDepthTexture_TexelSize.xy);
// get view space position at 1 pixel offsets in each major direction
half3 viewSpacePos_l = viewSpacePosAtScreenUV(depthsB.x, uv + float2(-1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_r = viewSpacePosAtScreenUV(depthsA.z, uv + float2( 1.0, 0.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_d = viewSpacePosAtScreenUV(depthsB.z, uv + float2( 0.0,-1.0) * _CameraDepthTexture_TexelSize.xy);
half3 viewSpacePos_u = viewSpacePosAtScreenUV(depthsA.x, uv + float2( 0.0, 1.0) * _CameraDepthTexture_TexelSize.xy);
#else
// get view space position at 1 pixel offsets in each major direction
half3 viewSpacePos_l = viewSpacePosAtPixelPosition(vpos + float2(-1.0, 0.0));
half3 viewSpacePos_r = viewSpacePosAtPixelPosition(vpos + float2( 1.0, 0.0));
half3 viewSpacePos_d = viewSpacePosAtPixelPosition(vpos + float2( 0.0,-1.0));
half3 viewSpacePos_u = viewSpacePosAtPixelPosition(vpos + float2( 0.0, 1.0));
#endif
// get the difference between the current and each offset position
half3 hDeriv = viewSpacePos_r - viewSpacePos_l;
half3 vDeriv = viewSpacePos_u - viewSpacePos_d;
// get view space normal from the cross product of the diffs
half3 viewNormal = normalize(cross(hDeriv, vDeriv));
return viewNormal;
}
half4 frag (v2f i) : SV_Target
{
// get view space normal at the current pixel position
half3 viewNormal = viewNormalAtPixelPosition(i.pos.xy);
// transform normal from view space to world space
half3 WorldNormal = mul((float3x3)unity_CameraToWorld, viewNormal * float3(1,1,-1));
// visualize normal (assumes you're using linear space rendering)
return half4(GammaToLinearSpace(WorldNormal.xyz * 0.5 + 0.5), 1.0);
}
ENDCG
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment