Skip to content

Instantly share code, notes, and snippets.

@odanado
Created June 18, 2016 09:42
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save odanado/88af99977871d6f8b371f92209eb9bbe to your computer and use it in GitHub Desktop.
Save odanado/88af99977871d6f8b371f92209eb9bbe to your computer and use it in GitHub Desktop.
FizzBuzz
#!/usr/bin/env python
from __future__ import print_function
import argparse
import time
import numpy as np
import six
import chainer
from chainer import computational_graph
from chainer import cuda
import chainer.links as L
from chainer import optimizers
from chainer import serializers
import net
# Represent each input by an array of its binary digits.
def binary_encode(i, num_digits):
return np.array([i >> d & 1 for d in range(num_digits)])
# One-hot encode the desired outputs: [number, "fizz", "buzz", "fizzbuzz"]
def fizz_buzz_encode(i):
if i % 15 == 0:
return 3
elif i % 5 == 0:
return 2
elif i % 3 == 0:
return 1
else:
return 0
def fizz_buzz(i, prediction):
return [str(i), "fizz", "buzz", "fizzbuzz"][prediction]
parser = argparse.ArgumentParser(description='Chainer Fizz Buzz')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--epoch', '-e', default=20, type=int,
help='number of epochs to learn')
parser.add_argument('--unit', '-u', default=100, type=int,
help='number of units')
parser.add_argument('--batchsize', '-b', type=int, default=10,
help='learning minibatch size')
parser.add_argument('--num_digits', '-d', type=int, default=10,
help='input number of digits')
args = parser.parse_args()
batchsize = args.batchsize
n_epoch = args.epoch
n_units = args.unit
num_digits = args.num_digits
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('# Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('# num_digits: {}'.format(args.num_digits))
print('')
N = 2 ** num_digits - 101
x_train = np.array([binary_encode(i, num_digits)
for i in range(101, 2 ** num_digits)]).astype(np.float32)
y_train = np.array([fizz_buzz_encode(i)
for i in range(101, 2 ** num_digits)]).astype(np.int32)
x_test = np.array([binary_encode(i, num_digits)
for i in range(1, 101)]).astype(np.float32)
y_test = np.array([fizz_buzz_encode(i)
for i in range(1, 101)]).astype(np.int32)
N_test = y_test.size
# Prepare multi-layer perceptron model, defined in net.py
model = L.Classifier(net.FizzBuzz(num_digits, n_units, 4))
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
xp = np if args.gpu < 0 else cuda.cupy
# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
for epoch in six.moves.range(1, n_epoch + 1):
print('epoch', epoch)
# training
perm = np.random.permutation(N)
sum_accuracy = 0
sum_loss = 0
start = time.time()
for i in six.moves.range(0, N, batchsize):
x = chainer.Variable(xp.asarray(x_train[perm[i:i + batchsize]]))
t = chainer.Variable(xp.asarray(y_train[perm[i:i + batchsize]]))
# Pass the loss function (Classifier defines it) and its arguments
optimizer.update(model, x, t)
if epoch == 1 and i == 0:
with open('graph.dot', 'w') as o:
variable_style = {'shape': 'octagon', 'fillcolor': '#E0E0E0',
'style': 'filled'}
function_style = {'shape': 'record', 'fillcolor': '#6495ED',
'style': 'filled'}
g = computational_graph.build_computational_graph(
(model.loss, ),
variable_style=variable_style,
function_style=function_style)
o.write(g.dump())
print('graph generated')
sum_loss += float(model.loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
end = time.time()
elapsed_time = end - start
throughput = N / elapsed_time
print('train mean loss={}, accuracy={}, throughput={} images/sec'.format(
sum_loss / N, sum_accuracy / N, throughput))
# evaluation
sum_accuracy = 0
sum_loss = 0
for i in six.moves.range(0, N_test, batchsize):
x = chainer.Variable(xp.asarray(x_test[i:i + batchsize]),
volatile='on')
t = chainer.Variable(xp.asarray(y_test[i:i + batchsize]),
volatile='on')
loss = model(x, t)
sum_loss += float(loss.data) * len(t.data)
sum_accuracy += float(model.accuracy.data) * len(t.data)
print('test mean loss={}, accuracy={}'.format(
sum_loss / N_test, sum_accuracy / N_test))
ys = model.predictor(chainer.Variable(x_test)).data
for num, y in enumerate(ys, start=1):
accuracy = y_test[num - 1]
prediction = np.argmax(y)
print("{} {} {}".format(num,
fizz_buzz(num, accuracy),
fizz_buzz(num, prediction)))
# Save the model and the optimizer
print('save the model')
serializers.save_npz('mlp.model', model)
print('save the optimizer')
serializers.save_npz('mlp.state', optimizer)
import chainer
import chainer.functions as F
import chainer.links as L
class FizzBuzz(chainer.Chain):
def __init__(self, n_in, n_units, n_out):
super(FizzBuzz, self).__init__(
l1=L.Linear(n_in, n_units),
l2=L.Linear(n_units, n_units),
l3=L.Linear(n_units, n_out),
)
def __call__(self, x):
h1 = F.relu(self.l1(x))
h2 = F.relu(self.l2(h1))
return self.l3(h2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment