Skip to content

Instantly share code, notes, and snippets.

@bgolus
Last active April 30, 2024 04:58
Show Gist options
  • Save bgolus/a18c1a3fc9af2d73cc19169a809eb195 to your computer and use it in GitHub Desktop.
Save bgolus/a18c1a3fc9af2d73cc19169a809eb195 to your computer and use it in GitHub Desktop.
Shader "Hidden/JumpFloodOutline"
{
Properties
{
_MainTex ("Texture", 2D) = "white" {}
}
SubShader
{
Tags { "PreviewType" = "Plane" }
Cull Off ZWrite Off ZTest Always
CGINCLUDE
// just inside the precision of a R16G16_SNorm to keep encoded range 1.0 >= and > -1.0
#define SNORM16_MAX_FLOAT_MINUS_EPSILON ((float)(32768-2) / (float)(32768-1))
#define FLOOD_ENCODE_OFFSET float2(1.0, SNORM16_MAX_FLOAT_MINUS_EPSILON)
#define FLOOD_ENCODE_SCALE float2(2.0, 1.0 + SNORM16_MAX_FLOAT_MINUS_EPSILON)
#define FLOOD_NULL_POS -1.0
#define FLOOD_NULL_POS_FLOAT2 float2(FLOOD_NULL_POS, FLOOD_NULL_POS)
ENDCG
Pass // 0
{
Name "INNERSTENCIL"
Stencil {
Ref 1
ReadMask 1
WriteMask 1
Comp NotEqual
Pass Replace
}
ColorMask 0
Blend Zero One
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
float4 vert (float4 vertex : POSITION) : SV_POSITION
{
return UnityObjectToClipPos(vertex);
}
// null frag
void frag () {}
ENDCG
}
Pass // 1
{
Name "BUFFERFILL"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
struct appdata
{
float4 vertex : POSITION;
};
float4 vert (appdata v) : SV_POSITION
{
float4 pos = UnityObjectToClipPos(v.vertex);
// flip the rendering "upside down" in non OpenGL to make things easier later
// you'll notice none of the later passes need to pass UVs
#ifdef UNITY_UV_STARTS_AT_TOP
pos.y = -pos.y;
#endif
return pos;
}
half frag () : SV_Target
{
return 1.0;
}
ENDCG
}
Pass // 2
{
Name "JUMPFLOODINIT"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
struct appdata
{
float4 vertex : POSITION;
float2 uv : TEXCOORD0;
};
struct v2f
{
float4 pos : SV_POSITION;
};
Texture2D _MainTex;
float4 _MainTex_TexelSize;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
return o;
}
float2 frag (v2f i) : SV_Target {
// integer pixel position
int2 uvInt = i.pos.xy;
// sample silhouette texture for sobel
half3x3 values;
UNITY_UNROLL
for(int u=0; u<3; u++)
{
UNITY_UNROLL
for(int v=0; v<3; v++)
{
uint2 sampleUV = clamp(uvInt + int2(u-1, v-1), int2(0,0), (int2)_MainTex_TexelSize.zw - 1);
values[u][v] = _MainTex.Load(int3(sampleUV, 0)).r;
}
}
// calculate output position for this pixel
float2 outPos = i.pos.xy * abs(_MainTex_TexelSize.xy) * FLOOD_ENCODE_SCALE - FLOOD_ENCODE_OFFSET;
// interior, return position
if (values._m11 > 0.99)
return outPos;
// exterior, return no position
if (values._m11 < 0.01)
return FLOOD_NULL_POS_FLOAT2;
// sobel to estimate edge direction
float2 dir = -float2(
values[0][0] + values[0][1] * 2.0 + values[0][2] - values[2][0] - values[2][1] * 2.0 - values[2][2],
values[0][0] + values[1][0] * 2.0 + values[2][0] - values[0][2] - values[1][2] * 2.0 - values[2][2]
);
// if dir length is small, this is either a sub pixel dot or line
// no way to estimate sub pixel edge, so output position
if (abs(dir.x) <= 0.005 && abs(dir.y) <= 0.005)
return outPos;
// normalize direction
dir = normalize(dir);
// sub pixel offset
float2 offset = dir * (1.0 - values._m11);
// output encoded offset position
return (i.pos.xy + offset) * abs(_MainTex_TexelSize.xy) * FLOOD_ENCODE_SCALE - FLOOD_ENCODE_OFFSET;
}
ENDCG
}
Pass // 3
{
Name "JUMPFLOOD"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 pos : SV_POSITION;
};
Texture2D _MainTex;
float4 _MainTex_TexelSize;
int _StepWidth;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
return o;
}
float2 frag (v2f i) : SV_Target {
// integer pixel position
int2 uvInt = int2(i.pos.xy);
// initialize best distance at infinity
float bestDist = 1.#INF;
float2 bestCoord;
// jump samples
UNITY_UNROLL
for(int u=-1; u<=1; u++)
{
UNITY_UNROLL
for(int v=-1; v<=1; v++)
{
// calculate offset sample position
int2 offsetUV = uvInt + int2(u, v) * _StepWidth;
// .Load() acts funny when sampling outside of bounds, so don't
offsetUV = clamp(offsetUV, int2(0,0), (int2)_MainTex_TexelSize.zw - 1);
// decode position from buffer
float2 offsetPos = (_MainTex.Load(int3(offsetUV, 0)).rg + FLOOD_ENCODE_OFFSET) * _MainTex_TexelSize.zw / FLOOD_ENCODE_SCALE;
// the offset from current position
float2 disp = i.pos.xy - offsetPos;
// square distance
float dist = dot(disp, disp);
// if offset position isn't a null position or is closer than the best
// set as the new best and store the position
if (offsetPos.y != FLOOD_NULL_POS && dist < bestDist)
{
bestDist = dist;
bestCoord = offsetPos;
}
}
}
// if not valid best distance output null position, otherwise output encoded position
return isinf(bestDist) ? FLOOD_NULL_POS_FLOAT2 : bestCoord * _MainTex_TexelSize.xy * FLOOD_ENCODE_SCALE - FLOOD_ENCODE_OFFSET;
}
ENDCG
}
Pass // 4
{
Name "JUMPFLOOD_SINGLEAXIS"
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 pos : SV_POSITION;
};
Texture2D _MainTex;
float4 _MainTex_TexelSize;
int2 _AxisWidth;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
return o;
}
half2 frag (v2f i) : SV_Target {
// integer pixel position
int2 uvInt = int2(i.pos.xy);
// initialize best distance at infinity
float bestDist = 1.#INF;
float2 bestCoord;
// jump samples
// only one loop
UNITY_UNROLL
for(int u=-1; u<=1; u++)
{
// calculate offset sample position
int2 offsetUV = uvInt + _AxisWidth * u;
// .Load() acts funny when sampling outside of bounds, so don't
offsetUV = clamp(offsetUV, int2(0,0), (int2)_MainTex_TexelSize.zw - 1);
// decode position from buffer
float2 offsetPos = (_MainTex.Load(int3(offsetUV, 0)).rg + FLOOD_ENCODE_OFFSET) * _MainTex_TexelSize.zw / FLOOD_ENCODE_SCALE;
// the offset from current position
float2 disp = i.pos.xy - offsetPos;
// square distance
float dist = dot(disp, disp);
// if offset position isn't a null position or is closer than the best
// set as the new best and store the position
if (offsetPos.x != -1.0 && dist < bestDist)
{
bestDist = dist;
bestCoord = offsetPos;
}
}
// if not valid best distance output null position, otherwise output encoded position
return isinf(bestDist) ? FLOOD_NULL_POS_FLOAT2 : bestCoord * _MainTex_TexelSize.xy * FLOOD_ENCODE_SCALE - FLOOD_ENCODE_OFFSET;
}
ENDCG
}
Pass // 5
{
Name "JUMPFLOODOUTLINE"
Stencil {
Ref 1
ReadMask 1
WriteMask 1
Comp NotEqual
Pass Zero
Fail Zero
}
Blend SrcAlpha OneMinusSrcAlpha
CGPROGRAM
#pragma vertex vert
#pragma fragment frag
#include "UnityCG.cginc"
#pragma target 4.5
struct appdata
{
float4 vertex : POSITION;
};
struct v2f
{
float4 pos : SV_POSITION;
};
Texture2D _MainTex;
half4 _OutlineColor;
float _OutlineWidth;
v2f vert (appdata v)
{
v2f o;
o.pos = UnityObjectToClipPos(v.vertex);
return o;
}
half4 frag (v2f i) : SV_Target {
// integer pixel position
int2 uvInt = int2(i.pos.xy);
// load encoded position
float2 encodedPos = _MainTex.Load(int3(uvInt, 0)).rg;
// early out if null position
if (encodedPos.y == -1)
return half4(0,0,0,0);
// decode closest position
float2 nearestPos = (encodedPos + FLOOD_ENCODE_OFFSET) * abs(_ScreenParams.xy) / FLOOD_ENCODE_SCALE;
// current pixel position
float2 currentPos = i.pos.xy;
// distance in pixels to closest position
half dist = length(nearestPos - currentPos);
// calculate outline
// + 1.0 is because encoded nearest position is half a pixel inset
// not + 0.5 because we want the anti-aliased edge to be aligned between pixels
// distance is already in pixels, so this is already perfectly anti-aliased!
half outline = saturate(_OutlineWidth - dist + 1.0);
// apply outline to alpha
half4 col = _OutlineColor;
col.a *= outline;
// profit!
return col;
}
ENDCG
}
}
}
using System;
using System.Collections;
using System.Collections.Generic;
using UnityEngine;
using UnityEngine.Rendering;
using UnityEngine.Experimental.Rendering;
#if UNITY_EDITOR
using UnityEditor;
#endif
[ExecuteInEditMode]
public class JumpFloodOutlineRenderer : MonoBehaviour
{
[ColorUsageAttribute(true, true)] public Color outlineColor = Color.white;
[Range(0.0f, 1000.0f)] public float outlinePixelWidth = 4f;
// list of all renderer components you want to have outlined as a single silhouette
public List<Renderer> renderers = new List<Renderer>();
// hidden reference to ensure shader gets included with builds
// gets auto-assigned with an OnValidate() function later
[HideInInspector, SerializeField] private Shader outlineShader;
// some hidden settings
const string shaderName = "Hidden/JumpFloodOutline";
const CameraEvent cameraEvent = CameraEvent.AfterForwardAlpha;
const bool useSeparableAxisMethod = true;
// shader pass indices
const int SHADER_PASS_INTERIOR_STENCIL = 0;
const int SHADER_PASS_SILHOUETTE_BUFFER_FILL = 1;
const int SHADER_PASS_JFA_INIT = 2;
const int SHADER_PASS_JFA_FLOOD = 3;
const int SHADER_PASS_JFA_FLOOD_SINGLE_AXIS = 4;
const int SHADER_PASS_JFA_OUTLINE = 5;
// render texture IDs
private int silhouetteBufferID = Shader.PropertyToID("_SilhouetteBuffer");
private int nearestPointID = Shader.PropertyToID("_NearestPoint");
private int nearestPointPingPongID = Shader.PropertyToID("_NearestPointPingPong");
// shader properties
private int outlineColorID = Shader.PropertyToID("_OutlineColor");
private int outlineWidthID = Shader.PropertyToID("_OutlineWidth");
private int stepWidthID = Shader.PropertyToID("_StepWidth");
private int axisWidthID = Shader.PropertyToID("_AxisWidth");
// private variables
private CommandBuffer cb;
private Material outlineMat;
private Camera bufferCam;
private Mesh MeshFromRenderer(Renderer r)
{
if (r is SkinnedMeshRenderer)
return (r as SkinnedMeshRenderer).sharedMesh;
else if (r is MeshRenderer)
return r.GetComponent<MeshFilter>().sharedMesh;
return null;
}
private void CreateCommandBuffer(Camera cam)
{
if (renderers == null || renderers.Count == 0)
return;
if (cb == null)
{
cb = new CommandBuffer();
cb.name = "JumpFloodOutlineRenderer: " + gameObject.name;
}
else
{
cb.Clear();
}
if (outlineMat == null)
{
outlineMat = new Material(outlineShader != null ? outlineShader : Shader.Find(shaderName));
}
// do nothing if no outline will be visible
if (outlineColor.a <= (1f/255f) || outlinePixelWidth <= 0f)
{
cb.Clear();
return;
}
// support meshes with sub meshes
// can be from having multiple materials, complex skinning rigs, or a lot of vertices
int renderersCount = renderers.Count;
int[] subMeshCount = new int[renderersCount];
for (int i=0; i<renderersCount; i++)
{
var mesh = MeshFromRenderer(renderers[i]);
Debug.Assert(mesh != null, "JumpFloodOutlineRenderer's renderer [" + i + "] is missing a valid mesh.", gameObject);
if (mesh != null)
{
// assume staticly batched meshes only have one sub mesh
if (renderers[i].isPartOfStaticBatch)
subMeshCount[i] = 1; // hack hack hack
else
subMeshCount[i] = mesh.subMeshCount;
}
}
// render meshes to main buffer for the interior stencil mask
cb.SetRenderTarget(BuiltinRenderTextureType.CameraTarget);
for (int i=0; i<renderersCount; i++)
{
for (int m = 0; m < subMeshCount[i]; m++)
cb.DrawRenderer(renderers[i], outlineMat, m, SHADER_PASS_INTERIOR_STENCIL);
}
// match current quality settings' MSAA settings
// doesn't check if current camera has MSAA enabled
// also could just always do MSAA if you so pleased
int msaa = Mathf.Max(1,QualitySettings.antiAliasing);
int width = cam.scaledPixelWidth;
int height = cam.scaledPixelHeight;
// setup descriptor for silhouette render texture
RenderTextureDescriptor silhouetteRTD = new RenderTextureDescriptor() {
dimension = TextureDimension.Tex2D,
graphicsFormat = GraphicsFormat.R8_UNorm,
width = width,
height = height,
msaaSamples = msaa,
depthBufferBits = 0,
sRGB = false,
useMipMap = false,
autoGenerateMips = false
};
// create silhouette buffer and assign it as the current render target
cb.GetTemporaryRT(silhouetteBufferID, silhouetteRTD, FilterMode.Point);
cb.SetRenderTarget(silhouetteBufferID);
cb.ClearRenderTarget(false, true, Color.clear);
// render meshes to silhouette buffer
for (int i=0; i<renderersCount; i++)
{
for (int m = 0; m < subMeshCount[i]; m++)
cb.DrawRenderer(renderers[i], outlineMat, m, SHADER_PASS_SILHOUETTE_BUFFER_FILL);
}
// Humus3D wire trick, keep line 1 pixel wide and fade alpha instead of making line smaller
// slightly nicer looking and no more expensive
Color adjustedOutlineColor = outlineColor;
adjustedOutlineColor.a *= Mathf.Clamp01(outlinePixelWidth);
cb.SetGlobalColor(outlineColorID, adjustedOutlineColor.linear);
cb.SetGlobalFloat(outlineWidthID, Mathf.Max(1f, outlinePixelWidth));
// setup descriptor for jump flood render textures
var jfaRTD = silhouetteRTD;
jfaRTD.msaaSamples = 1;
jfaRTD.graphicsFormat = GraphicsFormat.R16G16_SNorm;
// create jump flood buffers to ping pong between
cb.GetTemporaryRT(nearestPointID, jfaRTD, FilterMode.Point);
cb.GetTemporaryRT(nearestPointPingPongID, jfaRTD, FilterMode.Point);
// calculate the number of jump flood passes needed for the current outline width
// + 1.0f to handle half pixel inset of the init pass and antialiasing
int numMips = Mathf.CeilToInt(Mathf.Log(outlinePixelWidth + 1.0f, 2f));
int jfaIter = numMips-1;
// Alan Wolfe's separable axis JFA - https://www.shadertoy.com/view/Mdy3D3
if (useSeparableAxisMethod)
{
// jfa init
cb.Blit(silhouetteBufferID, nearestPointID, outlineMat, SHADER_PASS_JFA_INIT);
// jfa flood passes
for (int i=jfaIter; i>=0; i--)
{
// calculate appropriate jump width for each iteration
// + 0.5 is just me being cautious to avoid any floating point math rounding errors
float stepWidth = Mathf.Pow(2, i) + 0.5f;
// the two separable passes, one axis at a time
cb.SetGlobalVector(axisWidthID, new Vector2(stepWidth, 0f));
cb.Blit(nearestPointID, nearestPointPingPongID, outlineMat, SHADER_PASS_JFA_FLOOD_SINGLE_AXIS);
cb.SetGlobalVector(axisWidthID, new Vector2(0f, stepWidth));
cb.Blit(nearestPointPingPongID, nearestPointID, outlineMat, SHADER_PASS_JFA_FLOOD_SINGLE_AXIS);
}
}
// traditional JFA
else
{
// choose a starting buffer so we always finish on the same buffer
int startBufferID = (jfaIter % 2 == 0) ? nearestPointPingPongID : nearestPointID;
// jfa init
cb.Blit(silhouetteBufferID, startBufferID, outlineMat, SHADER_PASS_JFA_INIT);
// jfa flood passes
for (int i=jfaIter; i>=0; i--)
{
// calculate appropriate jump width for each iteration
// + 0.5 is just me being cautious to avoid any floating point math rounding errors
cb.SetGlobalFloat(stepWidthID, Mathf.Pow(2, i) + 0.5f);
// ping pong between buffers
if (i % 2 == 1)
cb.Blit(nearestPointID, nearestPointPingPongID, outlineMat, SHADER_PASS_JFA_FLOOD);
else
cb.Blit(nearestPointPingPongID, nearestPointID, outlineMat, SHADER_PASS_JFA_FLOOD);
}
}
// jfa decode & outline render
cb.Blit(nearestPointID, BuiltinRenderTextureType.CameraTarget, outlineMat, SHADER_PASS_JFA_OUTLINE);
cb.ReleaseTemporaryRT(silhouetteBufferID);
cb.ReleaseTemporaryRT(nearestPointID);
cb.ReleaseTemporaryRT(nearestPointPingPongID);
}
void ApplyCommandBuffer(Camera cam)
{
#if UNITY_EDITOR
// hack to avoid rendering in the inspector preview window
if (cam.gameObject.name == "Preview Scene Camera")
return;
#endif
if (bufferCam != null)
{
if(bufferCam == cam)
return;
else
RemoveCommandBuffer(cam);
}
Plane[] planes = GeometryUtility.CalculateFrustumPlanes(cam);
// skip rendering if none of the renderers are in view
bool visible = false;
for (int i=0; i<renderers.Count; i++)
{
if (GeometryUtility.TestPlanesAABB(planes, renderers[i].bounds))
{
visible = true;
break;
}
}
if (!visible)
return;
CreateCommandBuffer(cam);
if (cb == null)
return;
bufferCam = cam;
bufferCam.AddCommandBuffer(cameraEvent, cb);
}
void RemoveCommandBuffer(Camera cam)
{
if (bufferCam != null && cb != null)
{
bufferCam.RemoveCommandBuffer(cameraEvent, cb);
bufferCam = null;
}
}
void OnEnable()
{
Camera.onPreRender += ApplyCommandBuffer;
Camera.onPostRender += RemoveCommandBuffer;
}
void OnDisable()
{
Camera.onPreRender -= ApplyCommandBuffer;
Camera.onPostRender -= RemoveCommandBuffer;
}
#if UNITY_EDITOR
void OnValidate()
{
if (renderers != null)
{
for (int i=renderers.Count-1; i>-1; i--)
{
if (renderers[i] == null || (!(renderers[i] is SkinnedMeshRenderer) && !(renderers[i] is MeshRenderer)))
renderers.RemoveAt(i);
else
{
bool foundDuplicate = false;
for (int k=0; k<i; k++)
{
if (renderers[i] == renderers[k])
{
foundDuplicate = true;
break;
}
}
if (foundDuplicate)
renderers.RemoveAt(i);
}
}
}
if (outlineShader == null)
outlineShader = Shader.Find(shaderName);
}
public void FindActiveMeshes()
{
Undo.RecordObject(this, "Filling with all active Renderer components");
GameObject parent = this.gameObject;
if (renderers != null)
{
foreach (var renderer in renderers)
{
if (renderer)
{
parent = renderer.transform.parent.gameObject;
break;
}
}
}
if (parent != null)
{
var skinnedMeshes = parent.GetComponentsInChildren<SkinnedMeshRenderer>(true);
var meshes = parent.GetComponentsInChildren<MeshRenderer>(true);
if (skinnedMeshes.Length > 0 || meshes.Length > 0)
{
foreach (var sk in skinnedMeshes)
{
if (sk.gameObject.activeSelf)
renderers.Add(sk);
}
foreach (var mesh in meshes)
{
if (mesh.gameObject.activeSelf)
renderers.Add(mesh);
}
OnValidate();
}
else
Debug.LogError("No Active Meshes Found");
}
}
#endif
}
#if UNITY_EDITOR
[CustomEditor(typeof(JumpFloodOutlineRenderer))]
public class JumpFloodOutlineRendererEditor : Editor
{
public override void OnInspectorGUI()
{
base.OnInspectorGUI();
if (GUILayout.Button("Get Active Children Renderers"))
{
UnityEngine.Object[] objs = serializedObject.targetObjects;
foreach (var obj in objs)
{
var mh = (obj as JumpFloodOutlineRenderer);
mh.FindActiveMeshes();
}
}
}
}
#endif
@bgolus
Copy link
Author

bgolus commented Jan 14, 2023

What do you mean by see-through? Do you mean the fact it'll render the outline on top of everything even if there are walls / other objects closer to the camera?

There's no easy way to fix this, as the effect is done as a 2D screen space one with no knowledge of depth. However I did think of a few ways to accomplish this, and implemented one of them for the game I used this technique in.

Option 1: Occluders
This is what I used. I marked specific meshes as being outline occluders. When an outline is being rendered, I check if those objects' bounds are closer to the camera than the bounds of the object I'm doing outlines on, and then render any closer occluder into the stencil buffer. This works if you have a limited number of meshes you want to be "on top", but obviously not very general purpose.

Option 2: Outline Mesh Depth
For this method you would render the depth of the mesh you're going to be rendering outlines for to a separate texture, one w/o anti-aliasing. Basically generate a custom camera depth texture for just that mesh. Then when rendering the outlines into the scene you would sample the depth value at the pixel offset you're checking the distance from, getting the depth of the mesh edge, and use that depth to test against the camera depth texture. There are some gotchas with this technique though. If you sample the camera depth texture at the offset position, outlines will disappear when the nearest point on the mesh isn't visible, which can cause some weird artifacts when the mesh is occluded. If you sample the camera depth texture at the pixel the outline is at, it'll behave better when going behind walls, but the outline will be clipped by the floor. So you'd have to do some combination of both, or some hack like having the outline's "depth" move towards the camera as it extends out.

Option 3: Occluded Mask
When rendering the mask, use the camera depth texture to clip the "fill" pass. This will result in an outline that goes around only the visible part of the mesh, which is a slightly different behavior, but may be what you're going for.

@creamberry824
Copy link

What do you mean by see-through? Do you mean the fact it'll render the outline on top of everything even if there are walls / other objects closer to the camera?

There's no easy way to fix this, as the effect is done as a 2D screen space one with no knowledge of depth. However I did think of a few ways to accomplish this, and implemented one of them for the game I used this technique in.

Option 1: Occluders This is what I used. I marked specific meshes as being outline occluders. When an outline is being rendered, I check if those objects' bounds are closer to the camera than the bounds of the object I'm doing outlines on, and then render any closer occluder into the stencil buffer. This works if you have a limited number of meshes you want to be "on top", but obviously not very general purpose.

Option 2: Outline Mesh Depth For this method you would render the depth of the mesh you're going to be rendering outlines for to a separate texture, one w/o anti-aliasing. Basically generate a custom camera depth texture for just that mesh. Then when rendering the outlines into the scene you would sample the depth value at the pixel offset you're checking the distance from, getting the depth of the mesh edge, and use that depth to test against the camera depth texture. There are some gotchas with this technique though. If you sample the camera depth texture at the offset position, outlines will disappear when the nearest point on the mesh isn't visible, which can cause some weird artifacts when the mesh is occluded. If you sample the camera depth texture at the pixel the outline is at, it'll behave better when going behind walls, but the outline will be clipped by the floor. So you'd have to do some combination of both, or some hack like having the outline's "depth" move towards the camera as it extends out.

Option 3: Occluded Mask When rendering the mask, use the camera depth texture to clip the "fill" pass. This will result in an outline that goes around only the visible part of the mesh, which is a slightly different behavior, but may be what you're going for.

'Option 1: Occluders' This Code Can you tell me the code?

@bgolus
Copy link
Author

bgolus commented Jan 17, 2023

Option 1: Occluders This is what I used. I marked specific meshes as being outline occluders. When an outline is being rendered, I check if those objects' bounds are closer to the camera than the bounds of the object I'm doing outlines on, and then render any closer occluder into the stencil buffer. This works if you have a limited number of meshes you want to be "on top", but obviously not very general purpose.

'Option 1: Occluders' This Code Can you tell me the code?

The simple solution would be to add a list of occluder renderers, and then make a copy of the loop that starts at line 110 that iterates over those renderer components.

What I actually did was a bit more complicated, as outline occluders are their own monobehaviour I put on separate actors, which register themselves with a manager class. But the real system I use is more complex in general than this example code.

@creamberry824
Copy link

Option 1: Occluders This is what I used. I marked specific meshes as being outline occluders. When an outline is being rendered, I check if those objects' bounds are closer to the camera than the bounds of the object I'm doing outlines on, and then render any closer occluder into the stencil buffer. This works if you have a limited number of meshes you want to be "on top", but obviously not very general purpose.

'Option 1: Occluders' This Code Can you tell me the code?

The simple solution would be to add a list of occluder renderers, and then make a copy of the loop that starts at line 110 that iterates over those renderer components.

What I actually did was a bit more complicated, as outline occluders are their own monobehaviour I put on separate actors, which register themselves with a manager class. But the real system I use is more complex in general than this example code.

'list of occluder renderers' Can you tell me the code...?

@chargingElephant
Copy link

I'd love to use this outline with the HDRP. Is that possible? I've got it working with a non-HDRP project. It just seems incompatible with that pipeline at the moment.

@NathoSteveo
Copy link

NathoSteveo commented Aug 14, 2023

Can you explain how one would apply 'Option 3: Occluded Mask' I'd like to apply this solution to my scene can you break it down?

@bgolus
Copy link
Author

bgolus commented Aug 14, 2023

'list of occluder renderers' Can you tell me the code...?

public List<Renderer> occluders = new List<Renderer>();

I'd love to use this outline with the HDRP. Is that possible? I've got it working with a non-HDRP project. It just seems incompatible with that pipeline at the moment.

Is it possible? Yes. Do I know what that codes looks like? No. I've only loosely dabbled with the HDRP. I know there are some URP ports of this code that might be a good starting point, but the HDRP and URP differ so much I don't know if it would help or not.

Can you explain how one would apply 'Option 3: Occluded Mask' I'd like to apply this solution to my scene can you break it down?

In the fill shader you'd need to modify it to have the vertex shader pass the screen UV and depth to the fragment shader stage. Technically the SV_Positon already is, and you can reconstruct the screen UV from that data, but it'd be easier to look at something like the standard particle shader to see what it's doing it to pass the screen UVs (projectedPosition), and sample the camera depth texture.

https://github.com/TwoTailsGames/Unity-Built-in-Shaders/blob/master/DefaultResourcesExtra/Particle%20Alpha%20Blend.shader

Look for any "soft particle" related code. Instead of calculating an alpha from the difference, you'd discard if the particle Z is greater than the scene Z.

if (partZ > sceneZ) discard;

There are additional minor optimizations you could do, like comparing the raw Z depth from the SV_Position Z and the raw depth texture value instead of passing the linear Z depth from the vertex shader and decoding the linear Z depth from the depth texture like the particle code does. But it's likely unnecessary to do.

@NathoSteveo
Copy link

Can you explain how one would apply 'Option 3: Occluded Mask' I'd like to apply this solution to my scene can you break it down?

In the fill shader you'd need to modify it to have the vertex shader pass the screen UV and depth to the fragment shader stage. Technically the SV_Positon already is, and you can reconstruct the screen UV from that data, but it'd be easier to look at something like the standard particle shader to see what it's doing it to pass the screen UVs (projectedPosition), and sample the camera depth texture.

https://github.com/TwoTailsGames/Unity-Built-in-Shaders/blob/master/DefaultResourcesExtra/Particle%20Alpha%20Blend.shader

Look for any "soft particle" related code. Instead of calculating an alpha from the difference, you'd discard if the particle Z is greater than the scene Z.

if (partZ > sceneZ) discard;

There are additional minor optimizations you could do, like comparing the raw Z depth from the SV_Position Z and the raw depth texture value instead of passing the linear Z depth from the vertex shader and decoding the linear Z depth from the depth texture like the particle code does. But it's likely unnecessary to do.

Thank you! awesome info here I appreciate it.

@NathoSteveo
Copy link

Can you make a version of your shader where you apply this Occluded Mask, so that outlines are not seen through objects closer to camera? If possible, I can't seem to get it.

@bgolus
Copy link
Author

bgolus commented Aug 15, 2023

Can you make a version of your shader where you apply this Occluded Mask, so that outlines are not seen through objects closer to camera? If possible, I can't seem to get it.

Ah, that's a slightly different setup. That's what "option 2" was describing, and requires an additional render of the mesh where you output the depth to a non-MSAA buffer, and modifying the outline shader to get that depth from the JFA offset position, and use it to test against the camera depth texture.

As for if I'll write that shader for you, no. I released this code so people can learn, not to release a free product, so there are features intentionally left unimplemented like that one. Though I'm happy to assist you if you post your attempts.

@NathoSteveo
Copy link

Okay no worries, this has inspired me to learn custom shaders thanks :)

@nergethic
Copy link

nergethic commented Aug 18, 2023

Hi Ben.
I'm wondering if it's possible to implement this outline effect with the support of the previously mentioned "option 2" for depth testing and temporal AA in HDRP.

I've already implemented the depth testing part (inside the JUMPFLOODOUTLINE pass). However, I'm experiencing issues with the following code:
float2 customDepthUV = currentPosCS + (nearestPosCS - currentPosCS);
float customDepth = LOAD_TEXTURE2D(_CustomDepthMask, uint2(customDepthUV));

When I activate TAA and visualize this depth value, it flickers aggressively with black-and-white stripes that change every frame. My custom pass is set to execute before post-processes, so it takes place before the TAA resolve. As I understand it, both the camera color and depth buffers, as well as the camera view matrices, are jittered at this point so I thought there wouldn't be any issues with sampling at this injection point.

The _CustomDepthMask texture is rendered using the CommandBuffer's DrawRenderers method.
I'm wondering if the JUMPFLOODOUTLINE pass might be rendered without jitter but I'm not sure how to verify this.
I've attempted to use both Blit and CoreUtils.DrawFullscreen method that invokes CommandBuffer's DrawProcedural.

I also tried to adjust customDepthUV with the jitter value by utilizing the TAAJitter vector from HDCamera, but I might have done it incorrectly. I don't have much experience with TAA, and I'm unsure if any motion vectors should be involved here. I would think not, because these textures aren't rendered to a color buffer and aren't involved with the resolve pass.

I would appreciate any help :)

@bgolus
Copy link
Author

bgolus commented Aug 18, 2023

The issue is because the fill buffer is using MSAA, and this example uses the sub-pixel approximation, there are customDepthUV positions that will have no depth value in them. This is exasperated by TAA making it flicker wildly as which edges will point to to a pixel with depth info and which do not change each frame. A fix for this would be to offset the UV by an additional texel in the offset direction if the sampled depth value is at the far plane, or run a one pixel inflate pass on the custom depth texture before the final outline pass.

Though if I were to make this effect work with TAA, I'd not do that. I'd just disable MSAA on the fill render texture, and the related subpixel tweaks to the init pass.

@nergethic
Copy link

Thank you. I implemented the depth inflate pass, and now the issue is gone. First, I made sure that I disabled mip maps, set msaaSamples to MSAASamples.None, changed the filter mode to point, and set anisoLevel to 0 on the render textures. I simplified the init pass to look like this:

float silhouetteMask = LOAD_TEXTURE2D(_MainTex, uint2(i.positionCS.xy)).r;
float2 outPos = i.positionCS.xy * abs(_MainTex_TexelSize.xy) * FLOOD_ENCODE_SCALE - FLOOD_ENCODE_OFFSET;
if (silhouetteMask > 0.5)
     return outPos;
return FLOOD_NULL_POS_FLOAT2;

Without the inflate pass, I can't see any difference, and the flickering still occurs. Maybe the problem wasn't caused by MSAA?

@bartofzo
Copy link

bartofzo commented Dec 11, 2023

Thanks a lot for this and the detailed article on Medium!

I'm trying to make this work with downscaling to make it faster on high resolutions but I'm struggling with a few aspects. Maybe you can point me in the right direction?

I got it "kind of" working by just halving the render texture sizes and applying the suggestions mentioned in this comment:
https://gist.github.com/bgolus/a18c1a3fc9af2d73cc19169a809eb195?permalink_comment_id=4072581#gistcomment-4072581

EDIT: Got the coordinates working. But it looks pixelated when doing JF at a lower resolution. Even when using an intermediate texture where I store the distances and read that values out in the final outline pass (with bilinear filtering).

Do you have any tips on how to improve on that? Or is that just the reality of downsampling this algorithm? Thanks.

@bgolus
Copy link
Author

bgolus commented Dec 13, 2023

On the game my implementation of this first shipped with, I implemented it working at a lower resolution and upscaling via an intermediate distance field texture to handle lower performance systems or very high resolutions. The main things I had to make sure were to use at least an RHalf for the intermediate texture format, and make sure it really was using bilinear filtering. And yes, after that it still looked a bit chunky, but you had to look really close to catch it. It shouldn't look pixelated, but it will look a little chunky with some hard diagonals instead of nicely rounded corners. Unity does really like to force render textures to be point filtered, so double check it really isn't being sampled that way. You might even want to use an inline sampler state to absolutely force it to behave.

I had it fall back to this mode on any resolution over 1080, and run at no more than that 1080p resolution for all of the passes. And it ran in this mode at all times on the Switch, I think capping out at 640x240 (half of 720p). For the most part I don't think most people will notice. Certainly no one else working on that project found it objectionable.

Unreal has a JFA implementaiton it ships with in one of it's optional packages. In their case they assume none of the data being fed into it has anti-aliasing. So they apply a small gaussian blur to the intermediate distance field texture to fake anti-aliasing. This won't help the upscaling chunkiness alone, but a very cheap 4 sample 1.5 texel blur could be done at the full screen resolution during the final pass to help alleviate some of the chunkiness for not too much cost.

@bartofzo
Copy link

On the game my implementation of this first shipped with, I implemented it working at a lower resolution and upscaling via an intermediate distance field texture to handle lower performance systems or very high resolutions. The main things I had to make sure were to use at least an RHalf for the intermediate texture format, and make sure it really was using bilinear filtering. And yes, after that it still looked a bit chunky, but you had to look really close to catch it. It shouldn't look pixelated, but it will look a little chunky with some hard diagonals instead of nicely rounded corners. Unity does really like to force render textures to be point filtered, so double check it really isn't being sampled that way. You might even want to use an inline sampler state to absolutely force it to behave.

I had it fall back to this mode on any resolution over 1080, and run at no more than that 1080p resolution for all of the passes. And it ran in this mode at all times on the Switch, I think capping out at 640x240 (half of 720p). For the most part I don't think most people will notice. Certainly no one else working on that project found it objectionable.

Unreal has a JFA implementaiton it ships with in one of it's optional packages. In their case they assume none of the data being fed into it has anti-aliasing. So they apply a small gaussian blur to the intermediate distance field texture to fake anti-aliasing. This won't help the upscaling chunkiness alone, but a very cheap 4 sample 1.5 texel blur could be done at the full screen resolution during the final pass to help alleviate some of the chunkiness for not too much cost.

I think the problem was that the outline width I'm using is very small... I set it at 1 up to 5 and that was pixelated. Using a larger value like 50, things start to round off. We want very narrow outlines so I'm starting to think this may not be the best approach in that case. Maybe brute forcing it would be faster (?). Anyway, thanks again!

@bgolus
Copy link
Author

bgolus commented Dec 18, 2023

Ah, Yeah.

There's a lot of things I did for the production version of this shader to handle things like that. The above sample code limits the actual line width to 1 pixel, and then fades out the line opacity below that. When doing downsampled outlines, you need to limit it to 1 texel of the downsampled resolution.

As for being "faster", if you look at the article this example code is from, you'll see that JFA was faster for literally everything not a single pixel wide. But not by much. So if you want a single pixel, brute force. If you ever want more than one pixel, JFA.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment