Skip to content

Instantly share code, notes, and snippets.

@GeoffChurch
Forked from karpathy/min-char-rnn.py
Last active February 21, 2016 04:36
Show Gist options
  • Save GeoffChurch/b695d779d147e8185946 to your computer and use it in GitHub Desktop.
Save GeoffChurch/b695d779d147e8185946 to your computer and use it in GitHub Desktop.
Minimal character-level language model with a Vanilla Recurrent Neural Network, in Python/numpy
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy). Edited by Geoffrey Churchill
BSD License
"""
import numpy as np
from sys import argv
try:
num_steps=int(argv[3])
except IndexError:
num_steps=100
# data I/O
try:
chunk_size=int(argv[2]) # number of bytes per chunk
except IndexError:
chunk_size=1
print('reading',argv[1])
with open(argv[1],'rb') as f:
data=f.read()
print('chunking data')
data=[data[i:i+chunk_size] for i in range(0, len(data), chunk_size)]
print('making set of chunks')
chunks=set(data)
data_size, vocab_size = len(data), len(chunks)
print('data has {0} characters, {1} unique.'.format(data_size, vocab_size))
print('compiling lookup tables')
char_to_ix = { ch:i for i,ch in enumerate(chunks) }
ix_to_char = { i:ch for i,ch in enumerate(chunks) }
# hyperparameters
hidden_size =128 # size of hidden layer of neurons
seq_length = 128 # number of steps to unroll the RNN for
learning_rate = lambda i:0.1#10/(i+1)
# model parameters
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in range(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(range(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
def sample(h, seed_ix, n):
"""
sample a sequence of integers from the model
h is memory state, seed_ix is seed letter for first time step
"""
x = np.zeros((vocab_size, 1))
x[seed_ix] = 1
for t in range(n):
if not t&255:
print('{0}%'.format(100*t/n))
h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
y = np.dot(Why, h) + by
p = np.exp(y) / np.sum(np.exp(y))
ix = np.random.choice(range(vocab_size), p=p.ravel())
x = np.zeros((vocab_size, 1))
x[ix] = 1
yield ix
p=0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
for n in range(num_steps):
# prepare inputs (we're sweeping from left to right in steps seq_length long)
if p+seq_length+1 >= len(data) or n == 0:
hprev = np.zeros((hidden_size,1)) # reset RNN memory
p = 0 # go from start of data
inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]
"""
# sample from the model now and then
if n % 100 == 0:
sample_ix = sample(hprev, inputs[0], 200)
txt = ''.join(str(ix_to_char[ix]) for ix in sample_ix)
print('----\n {0} \n----'.format(txt))
"""
# forward seq_length characters through the net and fetch gradient
loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
smooth_loss = smooth_loss * 0.999 + loss * 0.001
print('iter {0}, loss: {1}'.format(n, smooth_loss)) # print progress
# perform parameter update with Adagrad
for param, dparam, mem in zip([Wxh, Whh, Why, bh, by],
[dWxh, dWhh, dWhy, dbh, dby],
[mWxh, mWhh, mWhy, mbh, mby]):
mem += dparam * dparam
param += -learning_rate(n) * dparam / np.sqrt(mem + 1e-8) # adagrad update
p += seq_length # move data pointer
n += 1 # iteration counter
num_out_chunks=int(argv[4])
print('writing {0} chunks of {1} bytes (total={2} bytes) to {3}'.format(num_out_chunks,chunk_size,num_out_chunks*chunk_size,argv[5]))
with open(argv[5],'wb') as of:
of.write(b''.join(ix_to_char[ix] for ix in sample(hprev, inputs[0],num_out_chunks)))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment