Skip to content

Instantly share code, notes, and snippets.

@SavinaRoja
Created September 18, 2017 02:36
Show Gist options
  • Save SavinaRoja/7298a88df051531203f30b66f6266af4 to your computer and use it in GitHub Desktop.
Save SavinaRoja/7298a88df051531203f30b66f6266af4 to your computer and use it in GitHub Desktop.
#!/usr/bin/env python
"""
Dashify
Usage:
dashify <input> [options] <output-dir>
dashify (-h | --help)
dashify --version
Options:
-p --preset=PRESET Set the libx264 -preset [default: ultrafast]
-f --frag-time=TIME Set the time, in seconds, which we will try to employ
for the duration of fragments. [default: 5]
-k --max-keyint=INT Use to set a maximum of frames between keyframes for
fragmentation. Wins conflicts with --frag-time
[default: 250]
-t --timecut=TIME For quick testing, set the amount of time of a video to
encode, in seconds.
-P --pix-format=FMT Set the pixel format for the output video stream.
[default: yuv444p]
-n --no-run Enable for the program to only print the commands
it generates and not run them.
Authors:
Paul Barton (SavinaRoja)
"""
__author__ = 'Paul Barton (SavinaRoja) - pablo.barton@gmail.com'
__version__ = '0.0.2'
#stdlib imports
import json
from collections import OrderedDict
import os
#import shutil
import subprocess
from collections import namedtuple
#External library imports
from docopt import docopt # For the easy interface
#Remove any OS biases about how executables will be invoked
FFMPEG = '/usr/local/bin/ffmpeg'
MP4BOX = '/home/pablo/workspace/gpac/mp4box'
BENTO_MP4DASH = '/tmp/dash/mp4-dash.py'
NULL = '/dev/null' # 'NUL' on Windows
#Do we also need to have audio specs?
Spec = namedtuple('Spec', ['width', 'height', 'bitrate'])
specs = [Spec(480, 270, '400k'),
Spec(640, 360, '1000k'),
Spec(960, 540, '3000k'),
Spec(1280, 720, '5000k'),
Spec(1920, 1080, '10000k'),
Spec(3840, 2160, '15000k'),
]
if __name__ == '__main__':
args = docopt(__doc__, version='Dashify {}'.format(__version__))
#print(args)
#import sys
#sys.exit(0)
#Get information on video stream:
ffprobe_cmd = ['ffprobe', '-i', args['<input>'], '-show_streams',
'-select_streams', 'v',
'-print_format', 'json',
'-hide_banner', '-v', 'quiet']
vidstream = json.loads(subprocess.check_output(ffprobe_cmd),object_pairs_hook=OrderedDict)['streams'][0]
#We will want to know what the dimensions are of the input so we can
#decide which video representations to use. we also need to know the
#framerate so we can set the keyint for the segments
input_width, input_height = int(vidstream['width']), int(vidstream['height'])
input_framerate = vidstream['r_frame_rate']
if len(input_framerate.split('/')) == 2:
n, d = input_framerate.split('/')
input_framerate = float(n) / float(d)
else:
input_framerate = float(input_framerate)
#Given the desired fragment time and framerate, how many frames should
#be in a fragment/segment. Cannot exceed --max-keyint
target_frag_time = float(args['--frag-time'])
args['--max-keyint'] = int(args['--max-keyint'])
frames_per_fragment = int(target_frag_time * input_framerate)
if frames_per_fragment > args['--max-keyint']:
frames_per_fragment = args['--max-keyint']
#frag_duration will be important to know for our audio fragmentation,
#in microseconds
frag_duration = (frames_per_fragment / input_framerate) * 1000000.0
outputs = []
for spec in specs:
#Skip the specs that are larger than the input
if spec.width > input_width or spec.height > input_height:
continue
scale_w, scale_h = str(spec.width), str(spec.height)
if spec.width == input_width:
scale_w = '-1'
if spec.height == input_height:
scale_h = '-1'
#Start the command to build up
command = [FFMPEG, '-i', args['<input>']]
if args['--timecut'] is not None:
command += ['-t', args['--timecut']]
command += ['-c:v', 'libx264', '-preset', args['--preset'],
'-b:v', spec.bitrate,
'-an',
'-x264-params', 'keyint={f}:min-keyint={f}:no-scenecut'.format(f=frames_per_fragment),
'-movflags', 'frag_keyframe+empty_moov',
'-pix_fmt', args['--pix-format'], '-write_tmcd', '0',
'-vf', 'scale={}:{}'.format(scale_w, scale_h)
]
#Compute the output path
base = os.path.basename(args['<input>'])
root = os.path.splitext(base)[0]
output = os.path.join(args['<output-dir>'], root + '-{}x{}.mp4'.format(spec.width, spec.height))
outputs.append(output)
#Broken down into two separate commands and calls, just because
#OSs will vary on how to chain
first_pass = command + ['-pass', '1', '-f', 'mp4', NULL, '-y']
second_pass = command + ['-pass', '2', '-f', 'mp4', output]
if args['--no-run']:
print(' '.join(first_pass))
print(' '.join(second_pass) + '\n')
else:
subprocess.call(first_pass)
subprocess.call(second_pass)
#create the audio output
audio_outputs = []
audio_output = os.path.join(args['<output-dir>'], root + '-audio.mp4')
audio_outputs.append(audio_output)
audio_copy = [FFMPEG, '-i', args['<input>']]
if args['--timecut'] is not None:
audio_copy += ['-t', args['--timecut']]
audio_copy += ['-c:a', 'aac', '-vn',
'-b:a', '128k',
'-frag_duration', str(frag_duration),
'-f', 'mp4', audio_output, '-y']
if args['--no-run']:
print(' '.join(audio_copy))
else:
subprocess.call(audio_copy)
#At this point we have produced all of the fragmented mp4 files for use in
#MPEG-Dash and HLS formats, time to process and make manifests!
finalize = [BENTO_MP4DASH, '--force', '--hls'] + outputs + audio_outputs
if args['--no-run']:
print(' '.join(finalize))
else:
subprocess.call(finalize)
#TODO: look into producing output without splitting if desired, also set up
#the call to mp4-dash.py with --exec-dir, also make see how we want the output
#dir to be set up. If the files get split, then they are kinda temp files
#anyway and we should just use the output-dir set for this script
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment