Skip to content

Instantly share code, notes, and snippets.

@yoshiyama
Last active August 3, 2018 10:12
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save yoshiyama/f6ca941b6544045761684e89a37ba227 to your computer and use it in GitHub Desktop.
Save yoshiyama/f6ca941b6544045761684e89a37ba227 to your computer and use it in GitHub Desktop.
import win_unicode_console
win_unicode_console.enable()
from keras.models import Sequential
from keras.layers import Activation, Dense, Dropout, Convolution2D, Flatten, MaxPooling2D
from keras.utils.np_utils import to_categorical
from keras.optimizers import Adagrad
from keras.optimizers import Adam
import numpy as np
from PIL import Image
import os
import tensorflow as tf
gpuConfig = tf.ConfigProto(
gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.95),
device_count={'GPU': 0})
sess = tf.Session(config=gpuConfig)
# 学習用のデータを作る.
image_list = []
label_list = []
# ./data/train 以下のorange,appleディレクトリ以下の画像を読み込む。
for dir in os.listdir("HASE_KIYO\\train"):
if dir == ".DS_Store":
continue
dir1 = "HASE_KIYO\\train\\" + dir
label = 0
if dir == "kisenosato_0_out":
label = 0
elif dir == "hashimoto_1_out":
label = 1
elif dir == "hirose_2_out":
label = 2
for file in os.listdir(dir1):
if file != ".DS_Store":
# 配列label_listに正解ラベルを追加(稀勢の里:0 橋本環奈:1 広瀬すず:2)
label_list.append(label)
filepath = dir1 + "//" + file
# 画像を100x100pixelに変換し、1要素が[R,G,B]3要素を含む配列の100x100の2次元配列として読み込む。
# [R,G,B]はそれぞれが0-255の配列。
image = np.array(Image.open(filepath).resize((12, 12)))
# print(filepath)
# 配列を変換し、[[Redの配列],[Greenの配列],[Blueの配列]] のような形にする。
# image = image.transpose(2, 0, 1)
# print(image.shape)
# 出来上がった配列をimage_listに追加。
image_list.append(image / 255.)
# kerasに渡すためにnumpy配列に変換。
image_list = np.array(image_list)
# ラベルの配列を1と0からなるラベル配列に変更
# 0 -> [1,0,0], 1 -> [0,1,0] という感じ。
Y = to_categorical(label_list)
# モデルを生成してニューラルネットを構築
model = Sequential()
# model.add(Convolution2D(32, (3, 3), border_mode='same', input_shape=(1,12,12), data_format='channels_first'))
model.add(Convolution2D(32, (3, 3),strides=1, border_mode='same', input_shape=(1,12,12), data_format = 'channels_first'))
model.add(Activation("relu"))
model.add(Convolution2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), border_mode=("same")))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(200))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(200))
model.add(Activation("relu"))
model.add(Dropout(0.2))
model.add(Dense(2))
model.add(Activation("softmax"))
# オプティマイザにAdamを使用
opt = Adam(lr=0.0001)
# モデルをコンパイル
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# 学習を実行。10%はテストに使用。
print("image_list matrix before reshape",image_list.shape)
image_list = image_list.reshape
print("image_list.shape before np.array=",image_list.shape)
image_list=np.array(image_list)
Y=np.array(Y)
print("image_list.shape after np.array=",image_list.shape)
print("Y.shape=",Y.shape)
# print("image_list matrix after reshape",image_list.shape)
print("Y matrix before reshape",Y)
# Y.reshape(1,810,12,12)
print("Y matrix after reshape",Y)
model.fit(image_list, Y, nb_epoch=1000, batch_size=25, validation_split=0.1)
# テスト用ディレクトリ(./data/train/)の画像でチェック。正解率を表示する。
total = 0.
ok_count = 0.
for dir in os.listdir("HASE_KIYO\\test"):
if dir == ".DS_Store":
continue
dir1 = "HASE_KIYO\\test\\" + dir
label = 0
if dir == "kisenosato_0_out":
label = 0
elif dir == "hashimoto_1_out":
label = 1
elif dir == "hirose_2_out":
label = 2
for file in os.listdir(dir1):
if file != ".DS_Store":
label_list.append(label)
filepath = dir1 + "/" + file
image = np.array(Image.open(filepath).resize((12, 12)))
print(filepath)
# image = image.transpose(2, 0, 1)
result = model.predict_classes(np.array([image / 255.]))
print("label:", label, "result:", result[0])
total += 1.
if label == result[0]:
ok_count += 1.
print("seikai: ", ok_count / total * 100, "%")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment