Skip to content

Instantly share code, notes, and snippets.

@inodev
Created November 5, 2016 06:45
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save inodev/d34be07411cd97d5e26aae832cf488e2 to your computer and use it in GitHub Desktop.
Save inodev/d34be07411cd97d5e26aae832cf488e2 to your computer and use it in GitHub Desktop.
#encoding: utf-8
#
# Copyright (c) 2016 chainer_nlp_man
#
# This software is released under the MIT License.
# http://opensource.org/licenses/mit-license.php
#
import numpy as np # NumPyをnpという名前で使用
import chainer # Chainerを使用
from chainer import cuda, Function, gradient_check, Variable, optimizers, serializers, utils # 名前の省略
from chainer import Link, Chain, ChainList
import chainer.functions as F # functionsをFで使用可能に
import chainer.links as L # linksをLで使用可能に
class Perceptron(Chain):
def __init__(self, nobias_flag):
super(Perceptron, self).__init__(
l1 = L.Linear(2,2,nobias=nobias_flag)
)
self.nobias_flag = nobias_flag
def __call__(self, x):
h = self.l1(x)
return h
def dump(self):
print(self.l1.W.data)
if not self.nobias_flag:
print(self.l1.b.data)
class Classifier(Chain):
def __init__(self,predictor):
super(Classifier, self).__init__(
predictor = predictor
)
def __call__(self, x, t):
y = self.predictor(x)
self.loss = F.softmax_cross_entropy(y,t)
self.accuracy = F.accuracy(y,t)
return self.loss
# モデルの準備
model = Classifier(Perceptron(False)) # Classifier : 分類問題用の誤差計算の関数
optimizer = optimizers.Adam() # Adam : 勾配降下法(誤りを修正する為の評価方法)のアルゴリズムのひとつ
optimizer.setup(model)
# 学習ループ
loss_value = 50000
cnt = 0
while loss_value > 1e-5:
# 学習データ
x = Variable(np.array([[0,0],[1,0],[0,1],[1,1]], dtype=np.float32)) # AND条件
t = Variable(np.array([0,0,0,1], dtype=np.int32))
# X Y
# 0 0 0
# 1 0 0
# 0 1 0
# 1 1 1
# 学習
model.zerograds() # 勾配をゼロに初期化
loss = model(x,t)
loss_value = loss.data
loss.backward() # 誤差逆伝播で勾配を計算
optimizer.update()
cnt += 1
if cnt%1000 == 0:
# 途中結果の出力
y = F.softmax(model.predictor(x))
print("=====iter = {0}, loss = {1}=====".format(cnt, loss_value))
print("---output value---")
print(y.data)
print("---result---")
print(y.data.argmax(1))
print("---dump---")
model.predictor.dump()
# モデルファイル(学習した学習したアルゴリズム)を保存
serializers.save_npz('my_xor.model', model)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment