# ベースモデルを作成する
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

import tensorflow as tf

# データセットの準備
(x_train, t_train), (x_test, t_test) = tf.keras.datasets.cifar10.load_data()

plt.figure(figsize=(12,12))
for i in range(25):
    plt.subplot(5, 5, i+1)
    plt.imshow(x_train[i])

plt.show()

# 正規化
x_train = x_train / 255.0
x_test = x_test / 255.0

print(x_train.shape, x_test.shape, t_train.shape, t_test.shape)

# モデルの定義と学習
import os
import random

def reset_seed(seed=0):

    os.environ['PYTHONHASHSEED'] = '0'
    random.seed(seed) # random関数のシードを固定
    np.random.seed(seed) #numpyのシードを固定
    tf.random.set_seed(seed) #tensorflowのシードを固定

from tensorflow.keras import models, layers

# シードの固定
reset_seed(0)

# モデルの構築
model = models.Sequential([
    layers.Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=(32, 32, 3)),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(64, (3, 3), padding='same', activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Conv2D(128, (3, 3), padding='same', activation='relu'),
    layers.MaxPooling2D((2, 2)),
    layers.Flatten(),
    layers.Dense(128, activation='relu'),
    layers.Dense(10, activation='softmax')
])

# optimizerの設定
optimizer = tf.keras.optimizers.Adam(lr=1e-3)

# モデルのコンパイル
model.compile(loss='sparse_categorical_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])

model.summary()

# 学習の詳細設定
batch_size = 1024
epochs = 50

# 学習の実行
history = model.fit(x_train, t_train,
                    batch_size=batch_size,
                    epochs=epochs,
                    verbose=1,
                    validation_data=(x_test, t_test))

# 結果の確認
results = pd.DataFrame(history.history)

results[['accuracy', 'val_accuracy']].plot()
plt.show()

results[["loss", "val_loss"]].plot()
plt.show()

print(results.tail(1))