| FFMPEG '-i', file, '-c:v', 'libopenjpeg', "jpeg2000\\" + name + ".jp2"\ | |
| Convert image to jpeg2000 | |
| ffmpeg -i in.png -c:v libopenjpeg out.jp2 | |
| Hex Edit out.jp2 | |
| ffmpeg -i in.jp2 -c:v png out.png | |
| General Edit | |
| ffmpeg -i input.avi -c:v mpeg2video -g 999 -q:v 1 output.avi | |
| *edit in avidemux/whatever* | |
| ffmpeg -i input.avi -c:v h264 -preset medium -crf 24 final.mp4 | |
| Remove bFrames | |
| -x265-params b-frames=0" | |
| GET FORMATS | |
| ffmpeg.exe -codecs | grep DEV | |
| Get list of Containers | |
| ffmpeg -formats | grep DE | |
| Get Encoder options | |
| ffmpeg -h encoder=X | |
| X from Get Formats | |
| sharpen abuse (loop dis) | |
| ffmpeg -i IN.jpg -q:v 31 -vf unsharp=luma_amount=0.2 input.jpg | |
| Displacement Maps | |
| ( Remove crf and b:v parts to get normal quality) | |
| ffmpeg -i IN.mp4 -i IN.mp4 -i IN.mp4 -c:v hevc -crf 40 -b:v 20k -filter_complex "[0][1][2]displace=edge=wrap[middle];[middle]stereo3d=ar" -y res.mp4 | |
| Get Frames | |
| ffmpeg -i in.avi -q:v 0 imagename_%04d.jpg | |
| convert image to video | |
| (single image into 10 sec video) | |
| ffmpeg -loop 1 -y -i sourceimage.jpg -t 10 -c:v codec-you-wanna-use video.avi | |
| (from Frames r == fps) | |
| ffmpeg -r 1/5 -i img%03d.png -c:v libx264 -vf fps=25 -pix_fmt yuv420p out.mp4 | |
| Video to RAW for sonifaction | |
| ffmpeg -ss 00:01:02 -t 5 -i IN.mp4 -f rawvideo -pix_fmt yuv420p result.yuv | |
| ss = start time | |
| t == seconds after ss to use | |
| RAW to Video (After Sonfication) | |
| ffmpeg -f rawvideo -video_size 1280x720 -pix_fmt yuv420p -i result.yuv output.mp4 | |
| BELOW CREDIT oioiiooixiii: | |
| STACK EFFECTS!!! can use ffmpeg but results will be different. | |
| ffplay -i IN.mp4 -vf \ | |
| " | |
| scale=-2:720, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| spp=4:10, | |
| tblend=all_mode=average, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| spp=4:10, | |
| tblend=all_mode=average, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| spp=4:10, | |
| tblend=all_mode=average, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128, | |
| tblend=all_mode=difference128 | |
| " | |
| # Isolate motion-vectors using 'difference128' blend filter | |
| # - add brightness, contrast, and scaling, to taste | |
| ffplay \ | |
| -flags2 +export_mvs \ | |
| -i "video.mp4" \ | |
| -vf \ | |
| " | |
| split[original], | |
| codecview=mv=pf+bf+bb[vectors], | |
| [vectors][original]blend=all_mode=difference128, | |
| eq=contrast=7:brightness=-0.3, | |
| scale=720:-2 | |
| " | |
| # chromatic aberration in ffmpeg | |
| # ffplay used for testing; filtergraph can be directly inserted into ffmpeg | |
| ffplay\ | |
| -i video.mkv\ | |
| -vf\ | |
| "split=3[r][g][b];\ | |
| nullsrc=size=640x360[base1];\ | |
| nullsrc=size=640x360[base2];\ | |
| nullsrc=size=640x360[base3];\ | |
| [r]lutrgb=g=0:b=0[red];\ | |
| [g]lutrgb=r=0:b=0[green];\ | |
| [b]lutrgb=r=0:g=0[blue];\ | |
| [base1][red]overlay=x=10:shortest=1,format=rgb24[x];\ | |
| [base2][green]overlay=x=0:shortest=1,format=rgb24[y];\ | |
| [base3][blue]overlay=y=10:shortest=1,format=rgb24[z];\ | |
| [x][y]blend=all_mode='addition'[xy];\ | |
| [xy][z]blend=all_mode='addition'[xyz];\ | |
| [xyz]crop=630:350:10:10,scale=640:360:out_color_matrix=bt709" | |
| # Averaged Key-Frames | |
| # To extract keyframes from a video using FFmpeg | |
| ffmpeg -i video.mp4 -vf "select=eq(pict_type\,I)" -vsync vfr frame-%08d.png | |
| # To merge images together with ImageMagick | |
| convert *.png -evaluate-sequence mean output.png | |
| #Pixel Array | |
| # A trimmed down version, using only one instance of FFmpeg. | |
| # The disadvantage with this is the lack of progress display. | |
| ffmpeg -i input.vid -frames 1 -vf "scale=1:1,tile=640x220" output.png | |
| # OR v1 | |
| # First instance of FFmpeg traverses the frames, the second concatenates them. | |
| ffmpeg -y -i video.mkv -vf "scale=1:1" -c:v png -f image2pipe pipe:1 |\ | |
| ffmpeg -y -i pipe:0 -vf "tile=640x242" output.png 2> /dev/null | |
| ## ECHO!!!! by Rob Mac | |
| ## so to deconstruct, we have the normal input, then use seeking to offset each input of the same file each time: | |
| -i k1.mkv -ss 00:00:00.25 -i k1.mkv, and so on, i'm using offsets of 250ms (0.25 seconds) but you can use whatever | |
| works for you, ffmpeg will try its best to match it. then we use [0][1]blend[a] and [1][2]blend[b] to make the two | |
| echoes, nothing too unusual there. after that each needs a stage to blend back together, so we go [a][b]blend[p] to | |
| mix down our echoes, then [0][p]blend[o] to mix back into main signal, and [o] to output. if we did more, say 4 instead | |
| of 2, we would do [0][1]blend[a], [1][2]blend[b], [2][3]blend[c], | |
| then [b][c]blend[mix1],[a][b]blend[mix2],[mix1][mix2]blend[echo],[0][echo]blend[o] instead. i probably wouldn't want to | |
| use this for more than a few echoes to be honest just because making the filter toplogy is a bit of a pain, but it encodes | |
| pretty quick and does some fun stuff in different blend modes.. | |
| ffmpeg -i k1.mkv -ss 00:00:00.25 -i k1.mkv -ss 00:00:00.50 -i k1.mkv -filter_complex "[0][1]blend=all_mode=lighten[a];[1][2]blend=all_mode=lighten[b];[a][b]blend=all_mode=lighten[p];[0][p]blend=all_mode=lighten[o]" -map "[o]" -c:v h264 -crf 15 kd.mp4 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment