# clone the project (later they'll push a docker image to dockerhub) | |
git clone --depth 1 https://github.com/Netflix/vmaf.git vmaf | |
cd vmaf | |
# build the image | |
docker build -t vmaf . | |
# get the pivot video (reference video) | |
wget http://www.sample-videos.com/video/mp4/360/big_buck_bunny_360p_5mb.mp4 | |
# generate a new transcoded video (vp9, vcodec:500kbps) | |
docker run --rm -v $(PWD):/files jrottenberg/ffmpeg -i /files/big_buck_bunny_360p_5mb.mp4 -c:v libvpx-vp9 -b:v 500K -c:a libvorbis /files/big_buck_bunny_360p.webm | |
# extract the yuv (yuv420p) color space from them | |
docker run --rm -v $(PWD):/files jrottenberg/ffmpeg -i /files/big_buck_bunny_360p_5mb.mp4 -c:v rawvideo -pix_fmt yuv420p /files/360p_mpeg4-v_1000.yuv | |
docker run --rm -v $(PWD):/files jrottenberg/ffmpeg -i /files/big_buck_bunny_360p.webm -c:v rawvideo -pix_fmt yuv420p /files/360p_vp9_700.yuv | |
# checks VMAF score | |
docker run --rm -v $(PWD):/files vmaf run_vmaf yuv420p 640 368 /files/360p_mpeg4-v_1000.yuv /files/360p_vp9_700.yuv --out-fmt json | |
# and you can even check VMAF score using existent trained model | |
docker run --rm -v $(PWD):/files vmaf run_vmaf yuv420p 640 368 /files/360p_mpeg4-v_1000.yuv /files/360p_vp9_700.yuv --out-fmt json --model /files/resource/model/nflxall_vmafv4.pkl | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
This comment has been minimized.
I get a 403 on the wget.