Skip to content

Instantly share code, notes, and snippets.

import numpy as np
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout, GRU, Bidirectional
from keras.optimizers import SGD
import math
from sklearn.metrics import mean_squared_error
@ashwinprasadme
ashwinprasadme / 2
Last active December 15, 2020 10:48
np.sqrt(((dataY_plot[-testX.size()[0]:] - data_predict[-testX.size()[0]:] ) ** 2).mean())
@ashwinprasadme
ashwinprasadme / 2
Last active January 11, 2021 11:08
def normalize_data(df):
min_max_scaler = sklearn.preprocessing.MinMaxScaler()
df['open'] = min_max_scaler.fit_transform(df.open.values.reshape(-1,1))
df['high'] = min_max_scaler.fit_transform(df.high.values.reshape(-1,1))
df['low'] = min_max_scaler.fit_transform(df.low.values.reshape(-1,1))
df['close'] = min_max_scaler.fit_transform(df['close'].values.reshape(-1,1))
return df
@ashwinprasadme
ashwinprasadme / 2
Last active December 14, 2020 16:45
#Create the testing data set
#Create a new array containing scaled values from index 1543 to 2002
test_data = scaled_data[training_data_len - 60: , :]
#Create the data sets x_test and y_test
x_test = []
y_test = dataset[training_data_len:, :]
for i in range(60, len(test_data)):
x_test.append(test_data[i-60:i, 0])
# Convert the data to a numpy array
######Prediction###############
lstm.eval()
train_predict = lstm(dataX.to(device))
data_predict = train_predict.cpu().data.numpy()
dataY_plot = dataY.data.numpy()
## Inverse Normalize
data_predict = scaler.inverse_transform(data_predict)
## finding all columns that have nan:
droping_list_all=[]
for j in range(0,7):
if not df.iloc[:, j].notnull().all():
droping_list_all.append(j)
#print(df.iloc[:,j].unique())
droping_list_all
@ashwinprasadme
ashwinprasadme / 1
Last active December 15, 2020 11:07
df = df[df.symbol == 'GOOG']
df.drop(['symbol'],1,inplace=True)
df.head()