Skip to content

Instantly share code, notes, and snippets.

@igv
Last active April 19, 2024 11:26
Show Gist options
  • Star 101 You must be signed in to star a gist
  • Fork 4 You must be signed in to fork a gist
  • Save igv/2364ffa6e81540f29cb7ab4c9bc05b6b to your computer and use it in GitHub Desktop.
Save igv/2364ffa6e81540f29cb7ab4c9bc05b6b to your computer and use it in GitHub Desktop.
Basically it's an accurate sharpener + antiringing. Usage: glsl-shader="~~/SSimSuperRes.glsl"
// SSimSuperRes by Shiandow
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 3.0 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library.
//!HOOK POSTKERNEL
//!BIND HOOKED
//!SAVE LOWRES
//!HEIGHT NATIVE_CROPPED.h
//!WHEN NATIVE_CROPPED.h OUTPUT.h <
//!COMPONENTS 4
//!DESC SSSR Downscaling I
#define axis 1
#define offset vec2(0,0)
#define MN(B,C,x) (x < 1.0 ? ((2.-1.5*B-(C))*x + (-3.+2.*B+C))*x*x + (1.-(B)/3.) : (((-(B)/6.-(C))*x + (B+5.*C))*x + (-2.*B-8.*C))*x+((4./3.)*B+4.*C))
#define Kernel(x) MN(0.334, 0.333, abs(x))
#define taps 2.0
#define Luma(rgb) dot(rgb*rgb, vec3(0.2126, 0.7152, 0.0722))
vec4 hook() {
float low = ceil((HOOKED_pos - taps/input_size) * HOOKED_size - offset - 0.5)[axis];
float high = floor((HOOKED_pos + taps/input_size) * HOOKED_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = HOOKED_pos;
vec4 tex;
for (float k = low; k <= high; k++) {
pos[axis] = HOOKED_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - HOOKED_pos[axis])*input_size[axis];
float w = Kernel(rel);
tex.rgb = textureLod(HOOKED_raw, pos, 0.0).rgb * HOOKED_mul;
tex.a = Luma(tex.rgb);
avg += w * tex;
W += w;
}
avg /= W;
return vec4(avg.rgb, max(abs(avg.a - Luma(avg.rgb)), 5e-7));
}
//!HOOK POSTKERNEL
//!BIND LOWRES
//!SAVE LOWRES
//!WIDTH NATIVE_CROPPED.w
//!HEIGHT NATIVE_CROPPED.h
//!WHEN NATIVE_CROPPED.w OUTPUT.w <
//!COMPONENTS 4
//!DESC SSSR Downscaling II
#define axis 0
#define offset vec2(0,0)
#define MN(B,C,x) (x < 1.0 ? ((2.-1.5*B-(C))*x + (-3.+2.*B+C))*x*x + (1.-(B)/3.) : (((-(B)/6.-(C))*x + (B+5.*C))*x + (-2.*B-8.*C))*x+((4./3.)*B+4.*C))
#define Kernel(x) MN(0.334, 0.333, abs(x))
#define taps 2.0
#define Luma(rgb) dot(rgb*rgb, vec3(0.2126, 0.7152, 0.0722))
vec4 hook() {
float low = ceil((LOWRES_pos - taps/input_size) * LOWRES_size - offset - 0.5)[axis];
float high = floor((LOWRES_pos + taps/input_size) * LOWRES_size - offset - 0.5)[axis];
float W = 0.0;
vec4 avg = vec4(0);
vec2 pos = LOWRES_pos;
vec4 tex;
for (float k = low; k <= high; k++) {
pos[axis] = LOWRES_pt[axis] * (k - offset[axis] + 0.5);
float rel = (pos[axis] - LOWRES_pos[axis])*input_size[axis];
float w = Kernel(rel);
tex.rgb = textureLod(LOWRES_raw, pos, 0.0).rgb * LOWRES_mul;
tex.a = Luma(tex.rgb);
avg += w * tex;
W += w;
}
avg /= W;
return vec4(avg.rgb, max(abs(avg.a - Luma(avg.rgb)), 5e-7) + LOWRES_texOff(0).a);
}
//!HOOK POSTKERNEL
//!BIND PREKERNEL
//!BIND LOWRES
//!SAVE var
//!WIDTH NATIVE_CROPPED.w
//!HEIGHT NATIVE_CROPPED.h
//!WHEN NATIVE_CROPPED.h OUTPUT.h <
//!COMPONENTS 2
//!DESC SSSR var
#define spread 1.0 / 4.0
#define GetL(x,y) PREKERNEL_tex(PREKERNEL_pt * (PREKERNEL_pos * input_size + tex_offset + vec2(x,y))).rgb
#define GetH(x,y) LOWRES_texOff(vec2(x,y)).rgb
#define Luma(rgb) dot(rgb*rgb, vec3(0.2126, 0.7152, 0.0722))
#define diff(x,y) vec2(Luma((GetL(x,y) - meanL)), Luma((GetH(x,y) - meanH)))
vec4 hook() {
vec3 meanL = GetL(0,0);
vec3 meanH = GetH(0,0);
for (int X=-1; X<=1; X+=2) {
meanL += GetL(X,0) * spread;
meanH += GetH(X,0) * spread;
}
for (int Y=-1; Y<=1; Y+=2) {
meanL += GetL(0,Y) * spread;
meanH += GetH(0,Y) * spread;
}
meanL /= (1.0 + 4.0*spread);
meanH /= (1.0 + 4.0*spread);
vec2 var = diff(0,0);
for (int X=-1; X<=1; X+=2)
var += diff(X,0) * spread;
for (int Y=-1; Y<=1; Y+=2)
var += diff(0,Y) * spread;
return vec4(max(var / (1.0 + 4.0*spread), vec2(1e-6)), 0, 0);
}
//!HOOK POSTKERNEL
//!BIND HOOKED
//!BIND PREKERNEL
//!BIND LOWRES
//!BIND var
//!WHEN NATIVE_CROPPED.h OUTPUT.h <
//!DESC SSSR final pass
#define oversharp 0.5
// -- Window Size --
#define taps 3.0
#define even (taps - 2.0 * floor(taps / 2.0) == 0.0)
#define minX int(1.0-ceil(taps/2.0))
#define maxX int(floor(taps/2.0))
#define Kernel(x) cos(acos(-1.0)*(x)/taps) // Hann kernel
// -- Input processing --
#define var(x,y) var_tex(var_pt * (pos + vec2(x,y) + 0.5)).rg
#define GetL(x,y) PREKERNEL_tex(PREKERNEL_pt * (pos + tex_offset + vec2(x,y) + 0.5)).rgb
#define GetH(x,y) LOWRES_tex(LOWRES_pt * (pos + vec2(x,y) + 0.5))
#define Luma(rgb) dot(rgb*rgb, vec3(0.2126, 0.7152, 0.0722))
vec4 hook() {
vec4 c0 = HOOKED_texOff(0);
vec2 pos = HOOKED_pos * LOWRES_size - vec2(0.5);
vec2 offset = pos - (even ? floor(pos) : round(pos));
pos -= offset;
vec2 mVar = vec2(0.0);
for (int X=-1; X<=1; X++)
for (int Y=-1; Y<=1; Y++) {
vec2 w = clamp(1.5 - abs(vec2(X,Y)), 0.0, 1.0);
mVar += w.r * w.g * vec2(GetH(X,Y).a, 1.0);
}
mVar.r /= mVar.g;
// Calculate faithfulness force
float weightSum = 0.0;
vec3 diff = vec3(0);
for (int X = minX; X <= maxX; X++)
for (int Y = minX; Y <= maxX; Y++)
{
float R = (-1.0 - oversharp) * sqrt(var(X,Y).r / (var(X,Y).g + mVar.r));
vec2 krnl = Kernel(vec2(X,Y) - offset);
float weight = krnl.r * krnl.g / (Luma((c0.rgb - GetH(X,Y).rgb)) + GetH(X,Y).a);
diff += weight * (GetL(X,Y) + GetH(X,Y).rgb * R + (-1.0 - R) * (c0.rgb));
weightSum += weight;
}
diff /= weightSum;
c0.rgb = ((c0.rgb) + diff);
return c0;
}
@yeezylife
Copy link

yeezylife commented Aug 27, 2021

Hello igv.
I'm wondering in oder to get the best quality possible,which built in scaler should we choose to use with SSR?
I'm currently using scale=ewa_lanczossharp by the way.

@igv
Copy link
Author

igv commented Aug 27, 2021

ewa_lanczossharp probably.

@igv
Copy link
Author

igv commented Aug 27, 2021

Btw, I'm using VIFp (preferred) and SSIM (with weights = [1] or sometimes weights = [0.2,0.8]) for measuring "quality".

@yeezylife
Copy link

yeezylife commented Oct 12, 2021

I watch anime mostly,and I find out that SSSR tend to bloate up the lines(as in dark line) in anime content.
With ravu or fsrcnnx the lines are thin,like the original lines.

@igv
Copy link
Author

igv commented Oct 12, 2021

If you talking about dark lines, then that's because your anime was downscaled in gamma light. SSSR doesn't account for that, ravu and fsrcnnx do.

@Rafee-M
Copy link

Rafee-M commented Jan 24, 2022

@crazysword1 Could you post your config? I'm trying to make one and I'm a little lost on how to use shaders and scalers. Thanks!

@Rafee-M
Copy link

Rafee-M commented Jan 25, 2022

@igv Is there a way to know if the shaders are working? When playing back 4K HDR content on a 1440p screen, CPU (Ryzen 5 3600) usage is high and GPU (RTX 3070) usage is low. From what I've seen on various posts, shaders are supposed to be GPU intensive

My config:

vo=gpu 
deband=no
gpu-api=vulkan
fbo-format=rgba16hf
linear-downscaling=no
keep-open=yes
save-position-on-quit=yes
autofit=30%
screenshot-format=png
screenshot-high-bit-depth=yes
screenshot-png-compression=0
screenshot-png-filter=0
screenshot-directory='~~desktop/'


#### OSD/OSC
osc=no                           #Allows for custom OSC to be used  https://github.com/cyl0/mpv-osc-morden-x
border=no


#### Subtitles
demuxer-mkv-subtitle-preroll=yes
sub-ass-vsfilter-blur-compat=no
slang=en,eng


#### Dither
dither-depth=auto


#### Resizer & Shader ####
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\FSRCNNX_x2_16-0-4-1.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\SSimSuperRes.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\SSimDownscaler.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\KrigBilateral.glsl"
no-scaler-resizes-only           #fixing the pixel shift

scale=ewa_lanczossharp
dscale=lanczos 
cscale=lanczos

image

@yeezylife
Copy link

"shift+i" then press "2" .Also you can find most stuff you need to know here by the way.

@Rafee-M
Copy link

Rafee-M commented Jan 26, 2022

When do I know SSR or FSRCNNX is in use and when scale=ewa_lanczossharp is in use? (Native res: 1920x1080p, Scaled res: 2560x1440p)

Config:

glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\FSRCNNX_x2_16-0-4-1.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\SSimSuperRes.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\SSimDownscaler.glsl"
glsl-shader="C:\Users\Rafee\Documents\Programs\mpv\mpv\shaders\KrigBilateral.glsl"
no-scaler-resizes-only

scale=ewa_lanczossharp
dscale=lanczos 
cscale=lanczos

@igv
Copy link
Author

igv commented Jan 26, 2022

For scaling factors >1 ... <=1.3 - only SSimSuperRes (together with ewa_lanczossharp) is in use.
>1.3 ... <2 - FSRCNNX + SSimDownscaler.
>2 - FSRCNNX + SSimSuperRes (together with ewa_lanczossharp)

@yeezylife
Copy link

In artoriuz's tests Mathematically Evaluating mpv's Upscaling Algorithms,lanczos seems to have higher scores in every upscale tests.Does this mean we should just use scale=lanczos with SSR since it has higher test scores and use less resources?

@igv
Copy link
Author

igv commented Jun 4, 2022

yes.

@lextra2
Copy link

lextra2 commented Jul 20, 2022

@igv Would you be interested in adding this as a shader for mpv? I'm mostly interested in debilinear & debicubic. Would be useful for bluray sources from older animes.

@yeezylife
Copy link

yeezylife commented Jul 20, 2022

It's a "VapourSynth plugin to undo upscaling"

Maybe not suitable for normal downscaling?

@lextra2
Copy link

lextra2 commented Jul 20, 2022

It's a "VapourSynth plugin to undo upscaling"

Maybe not suitable for normal downscaling?

Yeah. Not suitable for normal downscaling. But I don't see a reason why you couldn't do a chain like "scale down with debilinear shader > scale up with spline36 from mpv".

Undoing the bilinear/bicubic upscaling companies did for the bluray release and using a better upscaling filter would result in a sharper image.

@po5
Copy link

po5 commented Jul 20, 2022

Sadly almost no production has all elements at the same resolution, and a lot of them have to be descaled per-scene if a descale is even possible.
Descaling without masking isn't a great idea, and that's where you lose enough performance that your filtering may not be realtime viable anymore.
You can use mpv's vapoursynth filtering feature btw.

@yeezylife
Copy link

your filtering may not be realtime viable anymore.

That's why this kind of process(if it was needed at all) was usually done by a BDrip release group. For example "GA-REI -zero" by VCB-Studio.

@lextra2
Copy link

lextra2 commented Jul 20, 2022

Sadly almost no production has all elements at the same resolution, and a lot of them have to be descaled per-scene if a descale is even possible. Descaling without masking isn't a great idea, and that's where you lose enough performance that your filtering may not be realtime viable anymore. You can use mpv's vapoursynth filtering feature btw.

True. If you'd want perfection you'd add masking. But undoing bilinear/bicubic is enough for my needs.

And yeah, I guess I'll see if I can make the vapoursynth plugin work. But a shader would be more flexible.

@igv
Copy link
Author

igv commented Jul 21, 2022

@lextra2 No, not interested.

@Lorograch1
Copy link

I followed the steps but I can't seem to get it working. It is not listed when I press Shift+I+2. Please help.

Placed the glsl in C:\Users_user_\AppData\Roaming\mpv\shaders\

Tried either of both usage parameters in mpv.config;

glsl-shader="C:\Users\_user_\AppData\Roaming\mpv\shaders\SSimSuperRes.glsl"
glsl-shader="~~/SSimSuperRes.glsl"

@slashbeast
Copy link

slashbeast commented Mar 9, 2023

@igv how does this shader compares to adaptive-sharpen.glsl? Would those be mutually exclusive or does that make sense to use both at the same time?

@yeezylife
Copy link

yeezylife commented Mar 10, 2023

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment