Last active
November 10, 2019 21:50
Example ME1 Upscaling Script
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#Import stuff here. Comment out what you don't need. | |
#import mxnet as mx | |
import mvsfunc as mvf | |
import muvsfunc as muf | |
import Alpha_VSFunctions as avsfunc | |
#import fvsfunc as fvf | |
import havsfunc as haf | |
import G41Fun as G41 | |
#import lostfunc as lof | |
#import kagefunc as kage | |
import vsgan as VSGAN | |
import vapoursynth as vs | |
import xvs as xvs | |
from vapoursynth import core | |
core.std.SetMaxCPU("none") | |
#Comment this out if mxnet isn't working, or if you don't need it | |
#core.std.LoadPlugin(r'MXNet/vs_mxnet.dll', altsearchpath=True) | |
#Set max cache size, in MB. If you have RAM to spare, remove the "#" in front of the line below and change the value. | |
#core.max_cache_size = 6000 | |
#Argument for the MXNet neural network. $un "_Select_Neural_Network_.bat" to change this automatically! | |
sr_args = dict(model_filename=r'../NeuralNetworks/MSRN\MSRN_2x', device_id=0,up_scale=2, is_rgb_model=True, pad=None, crop=None, pre_upscale=False) | |
#change MXnet super resolution arguments here! | |
manual_sr_args = sr_args | |
manual_sr_args['block_w']=128 | |
manual_sr_args['block_h']=128 | |
#Change the path to the video(s) you're working with! | |
clip = core.ffms2.Source(r"""C:\Program Files (x86)\Steam\steamapps\common\Mass Effect\BioGame\CookedPC\Movies\END_90_SovereignGood_CUT_01.bik""") | |
orig = core.ffms2.Source(r"""C:\Code\Python\VSSuperRes Helper\MeVids\END_90_SovereignGood_CUT_01.bik""") | |
#save source for comparison later | |
source = clip | |
#Resample to 16 bits, for higher quality processing | |
clip = mvf.Depth(clip, depth=16) | |
#KNLMeansCL is a denoiser that runs on the GPU. Unlike traditional denoisers, it can look at multiple frames and a wide area for noise. | |
#h is the denoising strength, d sets the number of previous/next frames to look at (temporal noise), a is the search radius (spatial noise) | |
#BM3D uses a TON of RAM, and doesn't run on the GPU, but it's high quality and nice to blend with KNLmeansCL if you machine can handle it | |
#BM3D's radius1 is its "temporal" radius, like h in KNLMeansCL | |
#BM3DStrength = 3 | |
#clip = mvf.BM3D(clip, radius1=2, sigma=[BM3DStrength]*5) | |
clip = core.knlm.KNLMeansCL(clip, a=2, h=2.0, d=2, device_type='gpu', device_id=0) | |
#Various Deblocking Functions. In order of speed, from slowest to fastest, though lof and autodeblock can be buggy. | |
#Configured for stronk deblocking below: | |
#clip = lof.fast_deblock(clip) | |
#clip = fvf.AutoDeblock(clip) | |
clip = haf.Deblock_QED(clip, quant1 = 35) | |
clip = core.deblock.Deblock(clip, quant = 27) | |
#Sharpening. Why not blend both? | |
sharpen1 = G41.LSFmod(clip) | |
sharpen2 = G41.DetailSharpen(clip) | |
clip = core.std.Merge(sharpen1, sharpen2) | |
#Interesting OpenCV detail enhancement function | |
#Converts clip to 8 bit RGB, which is fine, as VSGAN does that anyway | |
clip = avsfunc.OpenCV_Detail(clip, strength = 27) | |
#VSGAN (PyTorch) AI upscaling. It'll resample your clip to 8 bit RGB! | |
#https://github.com/imPRAGMA/VSGAN/wiki | |
#Set "chunk" to true if you run out of VRAM | |
vsgan_device = VSGAN.VSGAN() | |
vsgan_device.load_model(model=r"""../ESRGANModels/1x_DeJpeg_Fatality_PlusULTRA_200000_G.pth""", scale=1) | |
clip = vsgan_device.run(clip=clip, chunk = False) | |
vsgan_device = VSGAN.VSGAN() | |
vsgan_device.load_model(model=r"""../ESRGANModels/4x_SpongeBob_235000_G.pth""", scale=1) | |
clip = vsgan_device.run(clip=clip, chunk = False) | |
#Alternative MXNet super resolution. There are dozens of different pretrained models to pick from, use select_neural_network.bat | |
#Note that some models are grayscale models, hence they won't work | |
#https://github.com/WolframRhodium/muvsfunc/blob/master/muvsfunc.py#L4329 | |
#clip = muf.super_resolution(clip, **manual_sr_args) | |
#A sharp downscaling filter, when shrinking output is necessary. | |
#SSIM is CPU only, dpid requires an Nvidia GPU | |
clip = mvf.Depth(clip, depth=32) | |
#clip = muf.SSIM_downsample(clip, w = 2560, h = 1440) | |
clip = xvs.dpidDown(clip, width=2560 ,height=1440) | |
#For photoshop like filters see the "color" section here: | |
#http://vsdb.top/ | |
#Optional extra denoising, to get rid of any flickering and to help compress the output for bink | |
clip = core.knlm.KNLMeansCL(clip, d=3, a=3, h=2.2) | |
#Resize the source for comparison | |
source = core.resize.Point(source, width = clip.width, height = clip.height) | |
#Interleave the source and processed clip for easy comparison in VSEDIT's preview window. | |
#Comment this out if you aren't previewing | |
clip = mvf.Preview(clips=[core.text.Text(clip, "Processed"), core.text.Text(source, "Source"), core.text.Text(orig, "Original Upscale")]) | |
#Write the images, if you want test PNGs to upload | |
#clip = core.imwri.Write(clip, imgformat = "png", filename = "favorite_test_on_citadel_frame%d.png", quality = 100, overwrite = True) | |
#Motion interpolation to 60fps | |
#clip = haf.InterFrame(clip, Preset='medium', Tuning='weak', NewNum=60000, NewDen=1001, GPU=True) | |
#Export as YUV 420 8-bit | |
#Alternatively, you can do YUV 444 10 bit for maximum quality. | |
#You may need to set the colorspace | |
clip = mvs.ToYUV(clip, css="420", depth=8) | |
#final output | |
clip.set_output() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment