Skip to content

Instantly share code, notes, and snippets.

@BlurryLight
Created May 12, 2022 09:35
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save BlurryLight/145131dbacac34345908c529a3488e8f to your computer and use it in GitHub Desktop.
Save BlurryLight/145131dbacac34345908c529a3488e8f to your computer and use it in GitHub Desktop.
Unity Builtin Pipeline Simple TAA
#ifndef TAA_SIMPLE_HLSL
#define TAA_SIMPLE_HLSL
// https://github.com/Unity-Technologies/FPSSample/blob/master/Packages/com.unity.postprocessing/PostProcessing/Shaders/Builtins/TemporalAntialiasing.shader
// https://github.com/Unity-Technologies/PostProcessing/blob/v2/PostProcessing/Shaders/Builtins/TemporalAntialiasing.shader
#include "UnityCG.cginc"
Texture2D _MainTex;
// Vector4(1 / width, 1 / height, width, height)
float4 _MainTex_TexelSize;
Texture2D _HistoryTex;
Texture2D _CameraDepthTexture;
float4 _CameraDepthTexture_TexelSize;
Texture2D _CameraMotionVectorsTexture;
float4 _CameraMotionVectorsTexture_TexelSize;
int _IgnoreHistory;
int _DilateMode;
#define _DILATE_NONE 0
#define _DILATE_TAP5 1
#define _DILATE_TAP9 2
int _HistoryMode;
#define _HISTORY_NONE 0
#define _HISTORY_AABB_CLAMP 1
#define _HISTORY_AABB_CLIP 2
#define _HISTORY_VARIANCE_CLIP 3
int _ColorMode;
#define _COLOR_RGB 0
#define _COLOR_YCoCg 1
int _SharpMode;
#define _SHARP_NONE 0
#define _SHARP_PPSV2 1
float _Sharpness;
SamplerState sampler_LinearClamp;
SamplerState sampler_PointClamp;
float2 _Jitter;
#define STATIC_BLENDING 0.95
#define DYNAMIC_BLENDING 0.85
#define MOTION_AMPLIFY 6000.0
struct Attributes
{
float3 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct Varyings
{
float4 vertex : SV_POSITION;
float2 texcoord : TEXCOORD0;
};
Varyings Vert(Attributes v)
{
Varyings o;
o.vertex = mul(unity_MatrixVP, mul(unity_ObjectToWorld, float4(v.vertex, 1.0)));
// o.vertex = UnityObjectToClipPos(v.vertex) ;
// 没有UnityObjectToClipPos可以用了,这个文件是cginc里的,不能用到hlsl里,不然会有macro redefinition之类乱七八糟的错误
o.texcoord = v.uv;
return o;
}
static const int2 kOffsets3x3[9] =
{
int2(-1, -1),
int2(0, -1),
int2(1, -1),
int2(-1, 0),
int2(0, 0),
int2(1, 0),
int2(-1, 1),
int2(0, 1),
int2(1, 1),
};
float2 GetClosestFragment(float2 uv)
{
float2 k = _CameraDepthTexture_TexelSize.xy;
const float4 neighborhood = float4(
_CameraDepthTexture.Sample(sampler_PointClamp, uv - k).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(k.x, -k.y)).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(-k.x, k.y)).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + k).r
);
#if UNITY_REVERSED_Z
#define COMPARE_DEPTH(a, b) step(b, a)
#else
#define COMPARE_DEPTH(a, b) step(a, b)
#endif
float3 result = float3(0.0, 0.0, _CameraDepthTexture.Sample(sampler_PointClamp, uv).r);
result = lerp(result, float3(-1.0, -1.0, neighborhood.x), COMPARE_DEPTH(neighborhood.x, result.z));
result = lerp(result, float3(1.0, -1.0, neighborhood.y), COMPARE_DEPTH(neighborhood.y, result.z));
result = lerp(result, float3(-1.0, 1.0, neighborhood.z), COMPARE_DEPTH(neighborhood.z, result.z));
result = lerp(result, float3(1.0, 1.0, neighborhood.w), COMPARE_DEPTH(neighborhood.w, result.z));
if (_DilateMode == _DILATE_TAP9) // 9 tap
{
const float4 neighborhood2 = float4(
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(-k.x,0.0)).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(k.x, 0.0)).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(0.0,k.y)).r,
_CameraDepthTexture.Sample(sampler_PointClamp, uv + float2(0.0,-k.y)).r
);
result = lerp(result, float3(-1.0, 0, neighborhood2.x), COMPARE_DEPTH(neighborhood2.x, result.z));
result = lerp(result, float3(1.0, 0.0, neighborhood2.y), COMPARE_DEPTH(neighborhood2.y, result.z));
result = lerp(result, float3(0, 1.0, neighborhood2.z), COMPARE_DEPTH(neighborhood2.z, result.z));
result = lerp(result, float3(0, -1.0, neighborhood2.w), COMPARE_DEPTH(neighborhood2.w, result.z));
}
return (uv + result.xy * k);
}
// 当RGB模式时这两个函数为空函数
// 实际生产环境下用宏定义和shader 变体的方式,不要用这种运行时分支。
float3 RGBToYCoCg(float3 color)
{
if (_ColorMode == _COLOR_RGB)
return color;
float3x3 mat = {
.25, .5, .25,
.5, 0, -.5,
-.25, .5, -.25
};
return mul(mat, color);
}
float3 YCoCgToRGB(float3 color)
{
if (_ColorMode == _COLOR_RGB)
return color;
float3x3 mat = {
1, 1, -1,
1, 0, 1,
1, -1, -1
};
return mul(mat, color);
}
// take from PPSv2
// 需要手画一下图
// 其实就是算从center出发到AABB边界的向量,与`extents`这个向量各轴的分量的比值
// 最短的时间就是与AABB相交的那个时间
float3 ClipHistory_PPsV2(float3 color, float3 minimum, float3 maximum)
{
// Note: only clips towards aabb center (but fast!)
float3 center = 0.5 * (maximum + minimum);
float3 extents = 0.5 * (maximum - minimum);
// This is actually `distance`, however the keyword is reserved
float3 offset = color.rgb - center;
float3 ts = abs(extents / (offset + 0.0001));
float t = saturate(min(min(ts.x, ts.y), ts.z));
color.rgb = center + offset * t;
return color;
}
float3 ClipHistory(float3 History, float3 BoxMin, float3 BoxMax)
{
float3 Filtered = (BoxMin + BoxMax) * 0.5f; //这个是来自Inside的28页,往中心步进
float3 RayOrigin = History;
float3 RayDir = Filtered - History;
RayDir = abs(RayDir) < (1.0 / 65536.0) ? (1.0 / 65536.0) : RayDir;
float3 InvRayDir = rcp(RayDir);
//这部分可以手推一下,不难。朝着AABB的中心ray tracing所以不用考虑是否相交的问题。
float3 MinIntersect = (BoxMin - RayOrigin) * InvRayDir;
float3 MaxIntersect = (BoxMax - RayOrigin) * InvRayDir;
// 首先判断和AABB的各个面所在的平面最小相交的时间,但是有可能并不是直接命中了AABB,而是命中了AABB外的平面
// 所以进一步选取三个轴时间的最大的点,可以确保一定是命中了AABB平面,可以画个图看看。
// 前提是已知一定会命中AABB,不然要判断错过的情况。
// https://www.scratchapixel.com/lessons/3d-basic-rendering/minimal-ray-tracer-rendering-simple-shapes/ray-box-intersection
float3 EnterIntersect = min(MinIntersect, MaxIntersect);
float ClipBlend = max(EnterIntersect.x, max(EnterIntersect.y, EnterIntersect.z));
//如果历史帧的数据和现在的数据比较接近,那么可能会落在AABB里,
// 导致history点落在了AABB里面,那么AABB的检测结果里会有负数。此时选用历史帧。
//同时如果包围盒很大很大,此时AABB CLip会失效。
ClipBlend = saturate(ClipBlend);
return lerp(History, Filtered, ClipBlend);
}
float4 Frag(Varyings i) : SV_Target
{
// 这个实际上是Inside PPT里的Unjitter那一步。只有在ColorBuffer混合的时候要Unjitter。
float2 uv = i.texcoord - _Jitter;
float4 Color = _MainTex.Sample(sampler_LinearClamp, uv); //当前帧的信息
//当没有上帧的历史数据,就直接使用当前帧的数据
if (_IgnoreHistory)
{
return Color;
}
if(_SharpMode == _SHARP_PPSV2)
{
//锐化核就是周围为负数,中间为正数,加起来为1.0的核
float4 topLeft = _MainTex.Sample(sampler_LinearClamp, uv - _MainTex_TexelSize.xy * 0.5);
float4 bottomRight = _MainTex.Sample(sampler_LinearClamp, uv + _MainTex_TexelSize.xy * 0.5);
float4 corners = 4.0 * (topLeft + bottomRight) - 2.0 * Color;
// Sharpen output
//这里实际上是一个这样的核,0.166667是1/6,2.718是自然对数
/* | -(2/3)*x | |topLeft |
* Color = | ((4/3)x + 1) | * |Color |
* | -(2/3)x| |bottomRight |
* 其中 x = e * _Sharpness。通过_Sharpness参数控制锐化核的程度
*/
Color = Color + (Color - (corners * 0.166667)) * 2.718282 * _Sharpness;
}
float2 Motion;
if (_DilateMode != _DILATE_NONE)
{
float2 closest = GetClosestFragment(i.texcoord);
//得到在屏幕空间中,和上帧相比UV偏移的距离
Motion = _CameraMotionVectorsTexture.Sample(sampler_LinearClamp, closest).xy;
}
else
{
Motion = _CameraMotionVectorsTexture.Sample(sampler_LinearClamp, i.texcoord).xy;
}
float2 HistoryUV = i.texcoord - Motion;
float4 HistoryColor = _HistoryTex.Sample(sampler_LinearClamp, HistoryUV);
if (HistoryUV.x < 0 || HistoryUV.y < 0 || HistoryUV.x > 1.0f || HistoryUV.y > 1.0f)
{
// for debug
// return float4(1.0,0.0,0.0,1.0);
return Color;
}
if (_HistoryMode != _HISTORY_NONE) // 如果是None的话不进行clamp,用来观察Ghosting
{
// 在 YCoCg色彩空间中进行Clip判断
// 在YCoCg空间计算的包围盒,还原到RGB下比较像有向包围盒,拥有更狭窄的包围盒大小
float3 AABBMin, AABBMax;
AABBMax = AABBMin = RGBToYCoCg(Color);
float3 m1 = 0,m2 = 0;
for (int k = 0; k < 9; k++)
{
float3 C = RGBToYCoCg(_MainTex.Sample(sampler_PointClamp, uv, kOffsets3x3[k]));
if(_HistoryMode == _HISTORY_VARIANCE_CLIP)
{
m1 += C;
m2 += C * C;
}
else
{
AABBMin = min(AABBMin, C);
AABBMax = max(AABBMax, C);
}
}
if(_HistoryMode == _HISTORY_VARIANCE_CLIP)
{
float3 mu = m1 / 9;
//这里的sigma像是估算的。 正确的计算方式应该要保存所有的样本,并且sqrt( sum(sample - mu * mu)^2)这种形式
// 见 https://en.wikipedia.org/wiki/Standard_deviation
float3 sigma = sqrt(abs(m2 / 9 - mu * mu));
#define GAMMA 1.0f
AABBMin = mu - GAMMA * sigma;
AABBMax = mu + GAMMA * sigma;
}
float3 HistoryYCoCg = RGBToYCoCg(HistoryColor);
if (_HistoryMode == _HISTORY_AABB_CLIP || _HistoryMode == _HISTORY_VARIANCE_CLIP) //clip
{
//根据AABB包围盒进行Clip计算:
HistoryColor.rgb = YCoCgToRGB(ClipHistory_PPsV2(HistoryYCoCg, AABBMin, AABBMax));
}
else if (_HistoryMode == _HISTORY_AABB_CLAMP)
{
// Clamp计算
HistoryColor.rgb = YCoCgToRGB(clamp(HistoryYCoCg, AABBMin, AABBMax));
}
}
// 对于运动的物体其尽量采用当前帧的像素,所以拥有较低的BlendFactor
float BlendFactor = clamp(
lerp(STATIC_BLENDING, DYNAMIC_BLENDING, length(Motion) * MOTION_AMPLIFY),
DYNAMIC_BLENDING, STATIC_BLENDING
);
return lerp(Color, HistoryColor, BlendFactor);
}
#endif
Shader "TAA"
{
Properties {
_MainTex ("Texture", 2D) = "white" {}
}
HLSLINCLUDE
#pragma exclude_renderers gles
//这个文件来自 https://github.com/Unity-Technologies/PostProcessing/blob/v2/PostProcessing/Shaders/StdLib.hlsl
// #include "../Stdlib.hlsl"
// #include "../Colors.hlsl"
#include "TAA.hlsl"
ENDHLSL
SubShader
{
Cull Off ZWrite Off ZTest Always
// https://www.sardinefish.com/blog/444
// https://www.sardinefish.com/blog/458
//需要两个Pass,Unity帮我们算了MotionVector
Pass
{
HLSLPROGRAM
#pragma enable_d3d11_debug_symbols
#pragma vertex Vert
#pragma fragment Frag
ENDHLSL
}
}
}
using System;
using UnityEngine;
[ExecuteInEditMode]
public class TAASimple : MonoBehaviour {
public Shader taaShader;
private Material taaMaterial;
//我发现JitterScale调大以后,在物体的锐利边缘会有几何锯齿出现
//同时在光照下,物体的边缘有点着色闪烁
[SerializeField,Range(0.0f,0.95f)] private float JitterScale= 0.7f;
private enum DilateMode{ None,tap5 = 1, tap9= 2}
[SerializeField] private DilateMode _dilateMode= DilateMode.tap5;
private enum ColorMode{ RGB = 0,YCoCg= 1}
[SerializeField] private ColorMode _colorMode = ColorMode.YCoCg;
private enum HistoryMode { None = 0, AABBClamp = 1, AABBClip= 2,VarianceClip}
[SerializeField] private HistoryMode _historyMode = HistoryMode.AABBClip;
private enum SharpenMode{ None = 0, PPSV2= 1 }
[SerializeField] private SharpenMode _sharpenMode= SharpenMode.None;
[SerializeField,Range(0.0f,1.0f)] private float _sharpness= 0.25f;
public Material material
{
get
{
if (taaMaterial == null)
{
if (taaShader == null) return null;
taaMaterial = new Material(taaShader);
}
return taaMaterial;
}
}
private Camera m_Camera;
public new Camera camera
{
get
{
if (m_Camera == null)
m_Camera = GetComponent<Camera>();
return m_Camera;
}
}
private int FrameCount = 0;
private Vector2 _Jitter;
bool m_ResetHistory = true;
private RenderTexture[] m_HistoryTextures = new RenderTexture[2];
//长度为8的Halton序列
private Vector2[] HaltonSequence = new Vector2[]
{
new Vector2(0.5f, 1.0f / 3),
new Vector2(0.25f, 2.0f / 3),
new Vector2(0.75f, 1.0f / 9),
new Vector2(0.125f, 4.0f / 9),
new Vector2(0.625f, 7.0f / 9),
new Vector2(0.375f, 2.0f / 9),
new Vector2(0.875f, 5.0f / 9),
new Vector2(0.0625f, 8.0f / 9),
};
private void OnEnable()
{
camera.depthTextureMode = DepthTextureMode.Depth | DepthTextureMode.MotionVectors;
camera.useJitteredProjectionMatrixForTransparentRendering = true;
}
private void OnPreCull()
{
var proj = camera.projectionMatrix;
camera.nonJitteredProjectionMatrix = proj;
FrameCount++;
var Index = FrameCount % 8;
_Jitter = new Vector2(
2.0f * (HaltonSequence[Index].x - 0.5f) / camera.pixelWidth,
2.0f * (HaltonSequence[Index].y - 0.5f) / camera.pixelHeight);
_Jitter *= JitterScale;
// Unity的矩阵是row-major
// matrix[x,y]指的是x row, y col
proj.m02 += _Jitter.x;
proj.m12 += _Jitter.y;
//数学推导:jitter不是移动相机位置,而是保持相机不动,控制像素成像在texture上的uv坐标变化
//其代码也可以写作: https://sugulee.wordpress.com/2021/06/21/temporal-anti-aliasingtaa-tutorial/
/*
float4 worldPos = modelMatrix * float4(in.position, 1.0);
float4 clipPos = viewportParams.viewProjectionMatrix * worldPos;
clipPos += viewportParams.jitter*clipPos.w; // Apply Jittering in Homogenous Coords x,y
out.position = clipPos;
*/
// 把_Jitter分量放在proj.m02和proj.m03的位置,在齐次坐标系下其会成为(_jitter.x * z_v) ,(_jitter.y * z_v)
// 经过透视除法后,z_v分量被消去。所剩下的在NDC坐标系下的偏移就是_jitter.xy
// 因此_Jitter.xy既是想要的uv偏移,因此先计算在以当前texel下的偏移(HaltonSequence[Index].x * 2.0 - 1.0),获得[-1,1]的纹素偏移。
// 然后再除以rendertexture.size,以获得实际的uv坐标偏移
//https://docs.unity3d.com/ScriptReference/Camera-nonJitteredProjectionMatrix.html
camera.projectionMatrix = proj;
}
private void OnPostRender()
{
camera.ResetProjectionMatrix();
}
// 基本概念:
// 镜头不动,场景不动:加点jitter出来的结果直接颜色混合就行,path tracing基本思路
// 镜头在动,场景不动:可以通过重投影(利用cur_vp和prev_vp)简单计算Motion Vector
// 镜头在动,模型在动:需要再保存一个M矩阵,上一帧的uv信息可以通过上一阵的MVP得到,当前帧的UV信息可以通过当前帧的MVP得到
/* 伪代码可以写作:
* outBlock.newNDCPos = projection * view * translation * position;
outBlock.preNDCPos = projection * previousView * previousTranslation * position;
*
*/
private void OnRenderImage(RenderTexture source, RenderTexture dest)
{
var historyRead = m_HistoryTextures[FrameCount % 2];
if (historyRead == null || historyRead.width != Screen.width || historyRead.height != Screen.height)
{
if(historyRead) RenderTexture.ReleaseTemporary(historyRead);
historyRead = RenderTexture.GetTemporary(Screen.width, Screen.height, 0, RenderTextureFormat.ARGBHalf);
m_HistoryTextures[FrameCount % 2] = historyRead;
m_ResetHistory = true;
}
var historyWrite = m_HistoryTextures[(FrameCount + 1) % 2];
if (historyWrite == null || historyWrite.width != Screen.width || historyWrite.height != Screen.height)
{
if(historyWrite) RenderTexture.ReleaseTemporary(historyWrite);
historyWrite = RenderTexture.GetTemporary(Screen.width, Screen.height, 0, RenderTextureFormat.ARGBHalf);
m_HistoryTextures[(FrameCount + 1) % 2] = historyWrite;
}
material.SetVector("_Jitter", _Jitter);
material.SetTexture("_HistoryTex", historyRead);
// 如果历史帧重置了,不要从历史帧采样
material.SetInt("_IgnoreHistory", m_ResetHistory ? 1 : 0);
material.SetInt("_DilateMode", (int)_dilateMode);
material.SetInt("_ColorMode", (int)_colorMode);
material.SetInt("_HistoryMode", (int)(_historyMode));
material.SetInt("_SharpMode", (int)(_sharpenMode));
material.SetFloat("_Sharpness", _sharpness);
Graphics.Blit(source, historyWrite, material, 0);
Graphics.Blit(historyWrite, dest);
m_ResetHistory = false;
}
}
@BlurryLight
Copy link
Author

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment