Skip to content

Instantly share code, notes, and snippets.

@darden1
darden1 / myrnn_retur_sequences_false.py
Created November 8, 2018 13:32
myrnn_retur_sequences_false.py
model_myrnn_rsf = RecurrentNeuralNetwork(rnn_units, return_sequences=False)
model_myrnn_rsf.fit(X_train, Y_train_rsf,
batch_size=batch_size,
epochs=n_epochs,
mu=lr,
validation_data=(X_val, Y_val_rsf),
verbose=1)
plt.plot(indices, history_rsf.history["loss"], label="loss (keras)")
plt.plot(indices, history_rsf.history["val_loss"], label="val_loss (keras)")
@darden1
darden1 / myrnn_retur_sequences_true.py
Created November 8, 2018 13:31
myrnn_retur_sequences_true.py
model_myrnn_rst = RecurrentNeuralNetwork(rnn_units, return_sequences=True)
model_myrnn_rst.fit(X_train, Y_train,
batch_size=batch_size,
epochs=n_epochs,
mu=lr,
validation_data=(X_val, Y_val),
verbose=1)
plt.plot(indices, history_rst.history["loss"], label="loss (keras)")
plt.plot(indices, history_rst.history["val_loss"], label="val_loss (keras)")
@darden1
darden1 / my_rnn.py
Last active November 8, 2018 16:17
my_rnn.py
class RecurrentNeuralNetwork():
def __init__(self, rnn_units=10, rnn_activation="tanh", return_sequences=False, random_state=0):
self.init_state = True # 重みの初期化判定フラグ
self.loss = None # トレーニングデータのLoss
self.val_loss = None # テストデータのLoss
self.acc = None # トレーニングデータの正答率
self.val_acc = None # テストデータの正答率
self.n_layers = 3 # 全レイヤーの数 - 1
self.rnn_units = rnn_units # 隠れ層のサイズ
self.rnn_activation = rnn_activation # 活性化関数の名前
@darden1
darden1 / my_time_series_dense.py
Last active November 8, 2018 15:31
my_time_series_dense.py
class TimeSeriesDense(Dense):
def forward_prop(self, Phi):
"""順伝播演算"""
n_samples, T = Phi.shape[0], Phi.shape[1]
Z = np.zeros([n_samples, T, self.units])
for t in range(T):
Z[:,t,:] = np.dot(Phi[:,t,:], self.W) + self.b
@darden1
darden1 / my_rnn_block.py
Last active November 9, 2018 12:45
my_rnn_block.py
class RNN:
def __init__(self, units, input_dim, activation="tanh",
kernel_initializer="glorot_normal",
bias_initializer='zeros',
return_sequences=False):
self.units = units
self.input_dim = input_dim
self.activation = activation
self.kernel_initializer = kernel_initializer
self.bias_initializer = bias_initializer
@darden1
darden1 / keras_simplernn_return_sequences_false.py
Last active November 9, 2018 15:46
keras_simplernn_return_sequences_false.py
# ターゲットデータを最終時間のみにする
Y_train_rsf, Y_val_rsf = Y_train[:, -1, :], Y_val[:, -1, :]
model_rsf = Sequential()
model_rsf.add(SimpleRNN(rnn_units, input_shape=(n_sequence, n_features), return_sequences=False))
model_rsf.add(Dense(n_classes, activation="linear"))
model_rsf.compile(loss='mean_squared_error', optimizer=SGD(lr))
history_rsf = model_rsf.fit(X_train, Y_train_rsf,
batch_size=batch_size,
epochs=n_epochs,
@darden1
darden1 / keras_simplernn_return_sequences_true.py
Last active November 8, 2018 12:58
keras_simplernn_return_sequences_trye.py
from keras.models import Sequential
from keras.layers import Dense, SimpleRNN
from keras.optimizers import SGD
n_train = 80
X_train, X_val = X[:n_train], X[n_train:]
Y_train, Y_val = Y[:n_train], Y[n_train:]
batch_size = 10
@darden1
darden1 / sin.py
Last active December 9, 2018 14:36
sin.py
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(0)
n_sequence = 5
n_data = int(20+1)*n_sequence
a, b = 1., 1.
phase = np.pi / 4
time = np.linspace(0, 1, n_data)
@darden1
darden1 / my_mlp_vs_sklearn_vs_keras.py
Created April 16, 2018 10:56
my_mlp_vs_sklearn_vs_keras.py
plt.figure(figsize=(10, 7))
plt.subplot(211)
plt.title("learning log (loss)")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.plot(np.arange(len(clf.loss)), clf.loss, label="my train")
plt.plot(np.arange(len(clf.loss)), clf.val_loss, label="my test")
plt.plot(np.arange(len(history.history["loss"])), history.history["loss"], label="keras train")
plt.plot(np.arange(len(history.history["loss"])), history.history["val_loss"], label="keras test")
plt.plot(np.arange(len(clf_sk.loss_curve_)),clf_sk.loss_curve_, label="sklearn train")
@darden1
darden1 / train_with_keras_mlp.py
Last active April 16, 2018 10:53
train_with_keras_mlp.py
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.optimizers import SGD
from keras.losses import categorical_crossentropy
from keras.initializers import he_normal
n_features = X.shape[1]
n_classes = Y.shape[1]
batch_size = int(len(X_train)*0.2) # ミニバッチサイズ