Create a gist now

Instantly share code, notes, and snippets.

What would you like to do?
python script version former numpy neural network
# coding: utf-8
# 非線形問題を解くNeuralNetwork(Numpy)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six, time
import numpy as np
import pandas as pd
class InputLayer:
"""
Neural netの入力層の機能を持つクラス
コンストラクタの引数:入力データの次元dim(int)
"""
def __init__(self, dim):
self.dim = dim
self.data = np.zeros((1, self.dim))
def forward(self):
pass
def backward(self):
pass
def updateWeight(self, alpha):
pass
class NeuroLayer:
"""
Neural netの隠れ層の機能をもつクラス(中間層or出力層)
コンストラクタの引数:入力データの次元dim(int)\
データ入力を受ける層preLayer(Object)\
重み初期値を決める乱数のシード(double)\
バイアスの初期値bias(double)\
重み初期値の範囲randA~randB(double)
"""
def __init__(self, dim, preLayer, bias, r_max, r_min):
self.dim = dim
self.preLayer = preLayer
self.data = np.zeros((1, self.dim))
self.weight = np.random.rand(\
self.dim, self.preLayer.dim) * (r_max - r_min) - r_max
self.bias = np.ones((1, self.dim)) * bias
self.nextLayer = None
self.preLayer.nextLayer = self
self.diff = np.zeros((1, self.preLayer.dim))
self.diffWeight = np.zeros((self.dim, self.preLayer.dim))
self.diffBias = np.zeros((1, self.dim))
def forward(self):
temp = np.dot(self.preLayer.data, self.weight.T)
self.data = temp + self.bias
def backward(self):
self.diffWeight += np.dot(self.nextLayer.diff.T, self.preLayer.data)
self.diffBias += self.nextLayer.diff * 1.
self.diff = np.dot(self.nextLayer.diff, self.weight)
def updateWeight(self, alpha):
self.bias -= self.diffBias * alpha
self.weight -= self.diffWeight * alpha
self.diffBias = np.zeros((1, self.dim))
self.diffWeight = np.zeros((self.dim, self.preLayer.dim))
class ActionLayer:
"""
活性関数の機能をもつクラス
コンストラクタの引数:データ入力を受ける層prelayer(object)
"""
def __init__(self, preLayer):
self.preLayer = preLayer
self.dim = self.preLayer.dim
self.data = np.zeros((1, self.preLayer.dim))
self.nextLayer = None
self.preLayer.nextLayer = self
self.diff = np.zeros((1, self.preLayer.dim))
def forward(self):
self.data = (np.ones(self.dim) / (np.ones(self.dim) + np.exp(-self.preLayer.data)))
def backward(self):
self.diff = self.nextLayer.diff * (self.data * (np.ones(self.dim) - self.data))
def updateWeight(self, alpha):
pass
class ErrorLayer:
"""
出力層の出力と教師ラベルとの誤差を求める機能をもつクラス
コンストラクタの引数:入力を受ける層preLayer(Object)
"""
def __init__(self, preLayer):
self.preLayer = preLayer
self.dim = self.preLayer.dim
self.data = 0.0
self.target = np.zeros((1, self.dim))
self.diff = np.zeros((1, self.preLayer.dim))
self.preLayer.nextLayer = self
self.result = np.zeros((1, self.dim))
def forward(self):
dataSum = np.power(self.preLayer.data - self.target, 2)
self.data += dataSum.sum()
self.result = self.preLayer.data.copy()
self.result[self.result > 0.5] = 1
self.result[self.result <= 0.5] = 0
def backward(self):
self.diff = 2 * (self.preLayer.data - self.target)
def updateWeight(self, alpha):
pass
def train_nn(alpha, iteration, batchsize, neuralNetwork,\
trainingData, trainingTarget, testData, testTarget):
"""
neural netを学習させる
入力:学習率appha(double)、学習epoch(int)、neuralnet(list)、
 学習用データ(list)、学習ラベル(list)、テストデータ(list)、テストラベル(list)
出力:それぞれのepoch終了時でのlossの値をもつlist
"""
start_time = time.clock()
loss_list = np.zeros(iteration)
# train
for itr in six.moves.range(iteration):
perm = np.random.permutation(len(trainingData))
for i in six.moves.range(0, len(trainingData), batchsize):
x = trainingData[perm[i: i + batchsize]]
t = trainingTarget[perm[i: i + batchsize]]
for (d, t) in zip(x, t):
neuralNetwork[0].data = np.expand_dims(d, axis=0)
#neuralNetwork[0].data = d
neuralNetwork[5].target = t
for layer in neuralNetwork:
layer.forward()
for layer in reversed(neuralNetwork):
layer.backward()
for layer in neuralNetwork:
layer.updateWeight(alpha)
loss_list[itr] = neuralNetwork[5].data / len(trainingData)
neuralNetwork[5].data = 0
# test
correct = 0
for (d, t) in zip(testData, testTarget):
neuralNetwork[0].data = d
neuralNetwork[5].target = t
for layer in neuralNetwork:
layer.forward()
if (neuralNetwork[5].result == t).all():
correct += 1
elapsed_time = time.clock() - start_time
print("経過時間",elapsed_time)
print("train epoch={}, test accuracy={}%".format(\
iteration, (correct / len(testData) * 100)))
return loss_list, (correct / len(testData) * 100)
def get_dataset(d_dir, N_train):
"""
IrisデータセットのCSVファイルからデータを抽出および整形
入力:ファイルディレクトリ(str)、教師ラベル名(list)、学習に使うデータ数(int)
出力:学習データ(list)、学習ラベル(list)、テストデータ(list)、テストラベル(list)
"""
csv_data = pd.read_csv(d_dir, header=None)
input_data = csv_data[[0, 1, 2, 3]]
input_data = input_data.as_matrix()
change_target = csv_data[4]
input_target = np.asarray([([1., 0., 0.]) if (d == "Iris-setosa") else (\
([0., 1., 0.]) if (d == "Iris-versicolor") else (\
([0., 0., 1.]) if (d == "Iris-virginica") else (\
))) for d in change_target])
perm = np.random.permutation(len(input_data))
input_data = input_data[perm]
input_target = input_target[perm]
train_d = input_data[: N_train]
train_t = input_target[: N_train]
test_d = input_data[N_train:]
test_t = input_target[N_train:]
return train_d, train_t, test_d, test_t
# ## 排他的論理和問題にNeural Netを適用
"""
ハイパーパラメータ
alpha :学習係数
iteration :学習epoch数
batchsize :学習時のバッチサイズ
bias :バイアスの初期値
hiddenDim :隠れ層の次元数
randA :重みの初期値を決める乱数の下限
randB :重みの初期値を決める乱数の上限
"""
alpha = 0.7
iteration = 700
batchsize = 4
bias = 0.6
r_max = -0.5
r_min = 0.5
hiddenDim = 5
train_d = np.asarray([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
train_t = np.asarray([[0.0], [1.0], [1.0], [0.0]])
test_d = np.asarray([[0.0, 0.0], [0.0, 1.0], [1.0, 0.0], [1.0, 1.0]])
test_t = np.asarray([[0.0], [1.0], [1.0], [0.0]])
inputLayer = InputLayer(len(train_d[0]))
hiddenLayer = NeuroLayer(hiddenDim, inputLayer, bias, r_max, r_min)
hiddenActionLayer = ActionLayer(hiddenLayer)
outputLayer = NeuroLayer(len(train_t[0]), hiddenActionLayer, bias, r_max, r_min)
outputActionLayer = ActionLayer(outputLayer)
errorLayer = ErrorLayer(outputActionLayer)
neuralNetwork = [inputLayer, hiddenLayer, hiddenActionLayer,\
outputLayer, outputActionLayer, errorLayer]
loss_list, acc = train_nn(alpha, iteration, batchsize, neuralNetwork,\
train_d, train_t, test_d, test_t)
# plot loss values on training
plt.plot(loss_list)
plt.title('XOR train loss, accuracy={}'.format(acc))
plt.xlabel('epoch')
plt.ylabel('loss')
plt.xlim([0, len(loss_list)-1])
# plt.savefig("xor_loss.png")
plt.show()
# CUI Iris datasetにNeural Netを適用
"""
ハイパーパラメータ
alpha :学習係数
iteration :学習epoch数
batchsize :学習時のバッチサイズ
bias :バイアスの初期値
hiddenDim :隠れ層の次元数
randA :重みの初期値を決める乱数の下限
randB :重みの初期値を決める乱数の上限
N_train :学習に使うサンプル数
f_name :ファイルのパス
"""
alpha = 0.02
iteration = 500
batchsize = 20
bias = 0.9
hiddenDim = 12
r_max = -0.3
r_min = 0.3
N_train = 120
f_name = "./iris.csv"
train_d, train_t, test_d, test_t = get_dataset(f_name, N_train)
inputLayer = InputLayer(len(train_d[0]))
hiddenLayer = NeuroLayer(hiddenDim, inputLayer, bias, r_max, r_min)
hiddenActionLayer = ActionLayer(hiddenLayer)
outputLayer = NeuroLayer(len(train_t[0]), hiddenActionLayer, bias, r_max, r_min)
outputActionLayer = ActionLayer(outputLayer)
errorLayer = ErrorLayer(outputActionLayer)
neuralNetwork = [inputLayer, hiddenLayer, hiddenActionLayer,\
outputLayer, outputActionLayer, errorLayer]
loss_list, acc = train_nn(alpha, iteration, batchsize, neuralNetwork,\
train_d, train_t, test_d, test_t)
# plot loss value on training
plt.plot(loss_list)
plt.title('Iris train loss, accuracy={}'.format(acc))
plt.xlabel('epoch')
plt.ylabel('loss')
plt.xlim([0, len(loss_list)-1])
# plt.savefig("iris_loss.png")
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment