Skip to content

Instantly share code, notes, and snippets.

@butsugiri
Last active May 17, 2016 00:18
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save butsugiri/1b3091c1f8a45809b1eb5c98fad15322 to your computer and use it in GitHub Desktop.
Save butsugiri/1b3091c1f8a45809b1eb5c98fad15322 to your computer and use it in GitHub Desktop.
多層パーセプトロンの実装例
# -*- coding: utf-8 -*-
"""
"""
import numpy as np
def sigmoid(x):
return 1./(1.+np.exp(-x))
def sigmoid_deriv(x):
return x * (1.0 - x)
class SigmoidLayer:
"""
多層パーセプトロンの各層のレイヤーを表すクラス
"""
def __init__(self, input, n_in, n_out, W=None):
"""
Keyword arguments:
input -- layer input, given in numpy array. 各列が一つのデータを表す.
n_in -- dimension of each input data
n_out -- dimension of each output data
W -- weight
"""
self.input = input
self.n_in = n_in
self.n_out = n_out
# 重みの初期化 -1から1までのランダムな値
# n_in + 1としているのは,各層にバイアス項が含まれるため
self.W = np.random.uniform(-1.0, 1.0, (self.n_out, self.n_in+1))
def forward(self, input=None, last=False):
if input is not None:
self.input = input
u = np.dot(self.W, self.input)
self.output = sigmoid(u)
if last != True:
#出力層以外は,バイアス項を追加
self.output = self.__add_bias(self.output)
return self.output
def backward(self, d, lr):
"""
ここでは,出力層を除くすべての層は,ユニットを二つ以上持つと仮定している.
なので隠れ層のユニットを一つに設定するとエラーとなるはず.
スライスでバイアス項を除いておく (backpropには使わない)
スライスによるバイアスの取り扱いについては,
参考: http://stmind.hatenablog.com/entry/2014/06/16/145524
"""
if d.shape[0] > 1:
d = d[1::]
d_next = sigmoid_deriv(self.input) * np.dot(self.W.T, d)
self.W -= lr * np.dot(np.atleast_2d(d),np.atleast_2d(self.input).T)
return d_next
def __add_bias(self, x, axis=None):
ones = np.ones([1,x.shape[1]])
return np.r_[ones,x]
def get_outputlayer_delta(self, y):
return (self.output - y) * sigmoid_deriv(self.output)
class MLP:
def __init__(self, input, label, n_in, hidden_layer_sizes,n_out):
"""
Keyword arguments:
input -- layer input, given in numpy array. 各列が一つのデータを表す.
label -- correct label given in numpy array.
n_in -- dimension of input layer
hidden_layer_sizes -- number of units in each hidden layer (e.g. [3,3])
n_out -- dimension of output layer
"""
self.input = input
self.label = label
self.hidden_layers = []
n_layers = len(hidden_layer_sizes)
self.n_layers = n_layers
for i in range(n_layers):
if i == 0:
input_size = n_in
layer_input = None
else:
input_size = hidden_layer_sizes[i-1]
layer_input = None
self.hidden_layers.append(
SigmoidLayer(layer_input, input_size, hidden_layer_sizes[i])
)
self.output_layer = SigmoidLayer(
None,
hidden_layer_sizes[n_layers-1],
n_out
)
def train(self, lr=0.2, epochs=10000):
X = self.input
t = self.label
for i in xrange(epochs):
for n in range(0, X.shape[0]):
index = np.random.randint(X.shape[0])
g = np.atleast_2d(X[index]).T
#入力にバイアス項を追加
ones = np.ones([1,g.shape[1]])
g = np.r_[ones,g]
for j in range(self.n_layers):
g = self.hidden_layers[j].forward(g)
g = self.output_layer.forward(input = g, last = True)
output_delta = self.output_layer.get_outputlayer_delta(t[index])
delta = self.output_layer.backward(output_delta, lr)
for j in range(self.n_layers)[::-1]:
delta = self.hidden_layers[j].backward(delta, lr)
loss_sum = np.sum((self.predict() - t)**2)
print "epoch: {0:5d}, loss: {1:.5f}".format(i, loss_sum)
def predict(self, x=None):
if x is None:
x = self.input
x = np.hstack((np.ones([x.shape[0],1]),x)).T
for i in range(self.n_layers):
x = self.hidden_layers[i].forward(x)
return self.output_layer.forward(input = x, last = True)
if __name__ == "__main__":
x_train = np.array([[0,0], [0,1], [1,0], [1,1]])
y_train = np.array([0,1,1,0])
mlp = MLP(x_train,y_train, len(x_train[0]), [3], 1)
mlp.train()
for x,y in zip(x_train, y_train):
input = x
x = np.insert(x,0,1)
x = np.atleast_2d(x).T
print "input: {}\tlabel: {}\tprediction: {}".format(input, y, mlp.predict(x))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment