Skip to content

Instantly share code, notes, and snippets.

@adiamb
Created May 18, 2017 01:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save adiamb/6d533484bce288973d4da6d77199bbbd to your computer and use it in GitHub Desktop.
Save adiamb/6d533484bce288973d4da6d77199bbbd to your computer and use it in GitHub Desktop.
Training a LSTM network
###PYTHON#############
#Purpose : to test Recurrent neural networks
#Author : Aditya Ambati
#date : May 3 2017
#update : version 1
#####import libraries
import pandas as pd
import numpy as np
from sklearn.metrics import mean_squared_error
from math import sqrt
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
cd /home/labcomp/Desktop
sales = pd.read_csv('sales-of-shampoo-over-a-three-ye.csv', header =0, delimiter=';')
sales['parse']='190'+sales.Month
sales.index=pd.to_datetime(sales['parse'])
sales.drop(['Month', 'parse'], axis =1, inplace =True)
sales.plot()
##########split data into train and test
X = sales.values
train, test = X[0:-12], X[-12:]
history = [x for x in train]
predictions = list()
for i in range(len(test)):
predictions.append(history[-1])
history.append(test[i])
rmse = sqrt(mean_squared_error(test, predictions))
## 136.76131884905664 baseline performance of lag model (baseline)
##LSTM model
def timeseries_to_supervised(data, lag=1):
df = pd.DataFrame(data)
columns = [df.shift(i) for i in range(1, lag+1)]
columns.append(df)
df = pd.concat(columns, axis=1)
df.fillna(0, inplace=True)
return df
#supervised=timeseries_to_supervised(X, 1)
def difference(dataset, interval = 1):
diff = []
for i in range(interval, len(dataset)):
value = dataset[i] - dataset[i-interval]
diff.append(value)
return pd.Series(diff)
# invert differenced value
def inverse_difference(history, yhat, interval=1):
return yhat + history[-interval]
def scale(train, test):
# fit scaler
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(train)
# transform train
train = train.reshape(train.shape[0], train.shape[1])
train_scaled = scaler.transform(train)
# transform test
test = test.reshape(test.shape[0], test.shape[1])
test_scaled = scaler.transform(test)
return scaler, train_scaled, test_scaled
def invert_scale(scaler, X, value):
new_row = [x for x in X] + [yhat]
array = numpy.array(new_row)
array = array.reshape(1, len(array))
inverted = scaler.inverse_transform(array)
return inverted[0, -1]
def fit_lstm(train, batch_size, nb_epoch, neurons):
X, y = train[:, 0:-1], train[:, -1]
X = X.reshape(X.shape[0], 1, X.shape[1])
model = Sequential()
model.add(LSTM(neurons, batch_input_shape=(batch_size, X.shape[1], X.shape[2]), stateful=True))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
for i in range(nb_epoch):
model.fit(X, y, epochs=1, batch_size=batch_size, verbose=True, shuffle=False)
model.reset_states()
return model
def forecast_lstm(model, batch_size, X):
X = X.reshape(1, 1, len(X))
yhat = model.predict(X, batch_size=batch_size)
return yhat[0,0]
series = pd.read_csv('sales-of-shampoo-over-a-three-ye.csv', header =0, delimiter=';', squeeze=True)
series['parse']='190'+series.Month
series.index=pd.to_datetime(series['parse'])
series.drop(['Month', 'parse'], axis =1, inplace =True)
raw_values = series.values
diff_values = difference(raw_values, 1)
# transform data to be supervised learning
supervised = timeseries_to_supervised(diff_values, 1)
supervised_values = supervised.values
# split data into train and test-sets
train, test = supervised_values[0:-12], supervised_values[-12:]
# transform the scale of the data
scaler, train_scaled, test_scaled = scale(train, test)
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 1000, 1)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
expected = raw_values[len(train) + i + 1]
print('Month=%d, Predicted=%f, Expected=%f' % (i+1, yhat, expected))
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('Test RMSE: %.3f' % rmse)
# line plot of observed vs predicted
pyplot.plot(raw_values[-12:])
pyplot.plot(predictions)
pyplot.show()
# repeat experiment
repeats = 30
error_scores = list()
for r in range(repeats):
# fit the model
lstm_model = fit_lstm(train_scaled, 1, 3000, 4)
# forecast the entire training dataset to build up state for forecasting
train_reshaped = train_scaled[:, 0].reshape(len(train_scaled), 1, 1)
lstm_model.predict(train_reshaped, batch_size=1)
# walk-forward validation on the test data
predictions = list()
for i in range(len(test_scaled)):
# make one-step forecast
X, y = test_scaled[i, 0:-1], test_scaled[i, -1]
yhat = forecast_lstm(lstm_model, 1, X)
# invert scaling
yhat = invert_scale(scaler, X, yhat)
# invert differencing
yhat = inverse_difference(raw_values, yhat, len(test_scaled)+1-i)
# store forecast
predictions.append(yhat)
# report performance
rmse = sqrt(mean_squared_error(raw_values[-12:], predictions))
print('%d) Test RMSE: %.3f' % (r+1, rmse))
error_scores.append(rmse)
differenced = difference(sales, 1)
inverted = []
for i in range(len(differenced)):
value = inverse_difference(sales, differenced[i], len(sales)-i)
inverted.append(value)
inverted = pd.Series(inverted)
print(inverted.head())
from sklearn.preprocessing import MinMaxScaler
X = sales.values
X = X.reshape(len(X), 1)
scaler = MinMaxScaler(feature_range=(-1, 1))
scaler = scaler.fit(X)
scaled_X = scaler.transform(X)
inverted_X = scaler.inverse_transform(scaled_X)
##########prepare the RNN
X, y = train[:, 0:-1], train[:, -1]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment