Skip to content

Instantly share code, notes, and snippets.

View aravindpai's full-sized avatar

Aravind Pai aravindpai

View GitHub Profile
image, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
_ , mask = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)
plt.figure(figsize=(5,5))
plt.imshow(mask,cmap='gray')
img= cv2.imread('frames/' + frames[10])
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray,(25,25),0)
plt.figure(figsize=(5,10))
plt.imshow(gray,cmap='gray')
threshold = 15 * 10e3
for i in range(len(images)-1):
if(nonzero[i]>threshold):
scene_change_idx = i
break
frames = frames[:(scene_change_idx+1)]
nonzero=[]
for i in range((len(images)-1)):
mask = cv2.absdiff(images[i],images[i+1])
_ , mask = cv2.threshold(mask, 50, 255, cv2.THRESH_BINARY)
num = np.count_nonzero((mask.ravel()))
nonzero.append(num)
x = np.arange(0,len(images)-1)
import matplotlib.pyplot as plt
import cv2
import numpy as np
import os
import re
#listing down all the file names
frames = os.listdir('frames/')
frames.sort(key=lambda f: int(re.sub('\D', '', f)))
#defining function to read MIDI files
def read_midi(file):
print("Loading Music File:",file)
notes=[]
notes_to_parse = None
#parsing a midi file
midi = converter.parse(file)
history=model.fit([x_tr,y_tr[:,:-1]], y_tr.reshape(y_tr.shape[0],y_tr.shape[1], 1)[:,1:] ,epochs=50,callbacks=[es],batch_size=512, validation_data=([x_val,y_val[:,:-1]], y_val.reshape(y_val.shape[0],y_val.shape[1], 1)[:,1:]))
import sounddevice as sd
import soundfile as sf
samplerate = 16000
duration = 1 # seconds
filename = 'yes.wav'
print("start")
mydata = sd.rec(int(samplerate * duration), samplerate=samplerate,
channels=1, blocking=True)
print("end")
@aravindpai
aravindpai / build.py
Last active September 2, 2021 03:43
Music Generation
model = simple_wavenet()
model.fit(X,np.array(y), epochs=300, batch_size=128,callbacks=[mc])