Skip to content

Instantly share code, notes, and snippets.

@aravindpai
Last active September 2, 2021 03:43
Show Gist options
  • Star 3 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save aravindpai/850a378c8bb3b3f487911c355e716f0d to your computer and use it in GitHub Desktop.
Save aravindpai/850a378c8bb3b3f487911c355e716f0d to your computer and use it in GitHub Desktop.
Music Generation
model = simple_wavenet()
model.fit(X,np.array(y), epochs=300, batch_size=128,callbacks=[mc])
import keras
mc = keras.callbacks.ModelCheckpoint('model{epoch:03d}.h5', save_weights_only=False, period=50)
def convert_to_midi(prediction_output):
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# Specify duration between 2 notes
offset+ = 0.5
# offset += random.uniform(0.5,0.9)
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='music.mid')
#Select random chunk for the first iteration
start = np.random.randint(0, len(X)-1)
pattern = X[start]
#load the best model
model=load_model('model300.h5')
#generate and save music
music = generate_music(model,pitch,no_of_timesteps,pattern)
convert_to_midi(music)
def generate_music(model, pitch, no_of_timesteps, pattern):
int_to_note = dict((number, note) for number, note in enumerate(pitch))
prediction_output = []
# generate 50 elements
for note_index in range(50):
#reshaping array to feed into model
input_ = np.reshape(pattern, (1, len(pattern), 1))
#predict the probability and choose the maximum value
proba = model.predict(input_, verbose=0)
index = np.argmax(proba)
#convert integer back to the element
pred = int_to_note[index]
prediction_output.append(pred)
pattern = list(pattern)
pattern.append(index/float(n_vocab))
#leave the first value at index 0
pattern = pattern[1:len(pattern)]
return prediction_output
#dealing with midi files
from music21 import *
#array processing
import numpy as np
import os
#random number generator
import random
#keras for building deep learning model
from keras.layers import *
from keras.models import *
import keras.backend as K
def lstm():
model = Sequential()
model.add(LSTM(128,return_sequences=True))
model.add(LSTM(128))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
return model
#length of a input sequence
no_of_timesteps = 128
#no. of unique notes
n_vocab = len(set(notes))
#all the unique notes
pitch = sorted(set(item for item in notes))
#assign unique value to every note
note_to_int = dict((note, number) for number, note in enumerate(pitch))
#preparing input and output sequences
X = []
y = []
for notes in all_notes:
for i in range(0, len(notes) - no_of_timesteps, 1):
input_ = notes[i:i + no_of_timesteps]
output = notes[i + no_of_timesteps]
X.append([note_to_int[note] for note in input_])
y.append(note_to_int[output])
#read all the filenames
files=[i for i in os.listdir() if i.endswith(".mid")]
#reading each midi file
all_notes=[]
for i in files:
all_notes.append(read_midi(i))
#notes and chords of all the midi files
notes = [element for notes in all_notes for element in notes]
def read_midi(file):
notes=[]
notes_to_parse = None
#parsing a midi file
midi = converter.parse(file)
#grouping based on different instruments
s2 = instrument.partitionByInstrument(midi)
#Looping over all the instruments
for part in s2.parts:
#select elements of only piano
if 'Piano' in str(part):
notes_to_parse = part.recurse()
#finding whether a particular element is note or a chord
for element in notes_to_parse:
if isinstance(element, note.Note):
notes.append(str(element.pitch))
elif isinstance(element, chord.Chord):
notes.append('.'.join(str(n) for n in element.normalOrder))
return notes
#reshaping
X = np.reshape(X, (len(X), no_of_timesteps, 1))
#normalizing the inputs
X = X / float(n_vocab)
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
K.clear_session()
def simple_wavenet():
no_of_kernels=64
num_of_blocks= int(np.sqrt(no_of_timesteps)) - 1 #no. of stacked conv1d layers
model = Sequential()
for i in range(num_of_blocks):
model.add(Conv1D(no_of_kernels,3,dilation_rate=(2**i),padding='causal',activation='relu'))
model.add(Conv1D(1, 1, activation='relu', padding='causal'))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(n_vocab, activation='softmax'))
model.compile(loss='sparse_categorical_crossentropy', optimizer='adam')
return model
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment