Skip to content

Instantly share code, notes, and snippets.

@ZoomTen
Last active August 8, 2020 05:33
Show Gist options
  • Save ZoomTen/6376825ba79209a94dddca41b32f4e1f to your computer and use it in GitHub Desktop.
Save ZoomTen/6376825ba79209a94dddca41b32f4e1f to your computer and use it in GitHub Desktop.
from argparse import ArgumentParser
import moviepy.editor as mp
class EditBase():
def __init__(self):
self.YOUTUBE_SIZE = (854, 480)
self.INSTAGRAM_SIZE = (480,480)
def relative_to(self, target_res, position):
# target_res and position are both tuples
return tuple( round(target_res[i] * position[i])
for i in range(2)
)
class WhatHow(EditBase):
def __init__(self, args):
super().__init__()
self.TARGET_SIZE = self.INSTAGRAM_SIZE
self.args = args
def process(self):
sequence = []
audio = mp.AudioFileClip('audio/sanctuary-guardian.ogg')
blank_video = mp.ColorClip(color=(0,0,0), duration=audio.duration, size=self.TARGET_SIZE)
if (self.args.image):
freeze_frame = mp.ImageClip(self.args.input, duration=audio.duration)
else:
orig_clip = mp.VideoFileClip(self.args.input).resize(newsize=self.TARGET_SIZE)
if(self.args.no_prepend == False):
sequence.append(orig_clip)
freeze_frame = orig_clip.to_ImageClip(orig_clip.duration-0.1, duration=audio.duration)
frame_size = (0.8, 0.6)
gap_size = 0.005
border_white = mp.ColorClip(color=(255,255,255),
duration=audio.duration,
size=self.relative_to(self.TARGET_SIZE, frame_size)
)
border_black = mp.ColorClip(color=(0,0,0),
duration=audio.duration,
size=self.relative_to(self.TARGET_SIZE, tuple(size-gap_size for size in frame_size))
)
freeze_frame = freeze_frame.resize(self.relative_to(self.TARGET_SIZE, tuple(size-(gap_size*2) for size in frame_size)))
layers = [blank_video,border_white,border_black,freeze_frame]
for i in range(len(layers)):
layers[i] = layers[i].set_position(("center",0.1+(gap_size*i*0.5)),relative=True)
top_text = mp.TextClip(self.args.top, color='white', font="Times-New-Roman", fontsize=36)
top_text = top_text.set_position(("center",frame_size[1]+0.15),relative=True)
top_text = top_text.set_duration(audio.duration)
bottom_text = mp.TextClip(self.args.bottom, color='white', font="Times-New-Roman", fontsize=18)
bottom_text = bottom_text.set_position(("center",frame_size[1]+0.3),relative=True)
bottom_text = bottom_text.set_duration(audio.duration)
layers.append(top_text)
layers.append(bottom_text)
part = mp.CompositeVideoClip(layers, size=self.TARGET_SIZE)
part = part.set_audio(audio)
sequence.append(part)
final = mp.concatenate_videoclips(sequence)
final.write_videofile(self.args.output, fps=30, codec='libx264', audio_codec='aac', ffmpeg_params=['-crf',str(self.args.crf)])
if __name__ == '__main__':
ap = ArgumentParser(
description="freeze frame at the very end of the video and plays music"
)
ap.add_argument(
'-i', '--input',
help='input video', required=True
)
ap.add_argument(
'-o', '--output',
help='output video', required=True
)
ap.add_argument(
'-t', '--top',
help='top text', default="WHAT"
)
ap.add_argument(
'-b', '--bottom',
help='bottom text', default="HOW"
)
ap.add_argument(
'-c', '--crf',
help='CRF', type=int, default=36
)
ap.add_argument(
'-n', '--no-prepend',
help="don't prepend the original clip", action='store_true'
)
ap.add_argument(
'-j', '--image',
help="is an image", action='store_true'
)
args = ap.parse_args()
wh = WhatHow(args)
wh.process()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment