Skip to content

Instantly share code, notes, and snippets.

View Chim-SO's full-sized avatar

Chim SO Chim-SO

View GitHub Profile
# Other metrics:
train_pred = model.predict(x_train)
val_pred = model.predict(validation['x'])
test_pred = model.predict(test['x'])
print("Displaying other metrics:")
print("\t\tMedAE\tMAPE")
print(f"Train:\t{round(median_absolute_error(y_train, train_pred) ,3)}\t{round(mean_absolute_percentage_error(y_train, train_pred), 3)}")
print(f"Val :\t{round(median_absolute_error(validation['y'], val_pred) ,3)}\t{round(mean_absolute_percentage_error(validation['y'], val_pred), 3)}")
print(f"Test :\t{round(median_absolute_error(test['y'], test_pred) ,3)}\t{round(mean_absolute_percentage_error(test['y'], test_pred), 3)}")
from numpy.random import seed
seed(1)
from tensorflow import random, config
random.set_seed(1)
config.experimental.enable_op_determinism()
import random
random.seed(2)
from tensorflow.python.keras import Input
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.metrics import median_absolute_error, mean_absolute_percentage_error
from numpy import arange
@Chim-SO
Chim-SO / model_evaluation.py
Last active November 16, 2022 12:52
Evaluate keras model.
# Evaluate on test set:
test = pd.read_csv('dataset/regression/cosine/test.csv')
test_results = model.evaluate(test['x'], test['y'], verbose=1)
print(f'Test set: - loss: {test_results[0]} - mae: {test_results[1]}')
# Display function:
plt.scatter(dataset['x'], dataset['y'])
x = arange(0, 25, 0.1).tolist()
plt.plot(x, model.predict(x), c='red')
plt.xlabel('x')
plt.ylabel('y')
plt.show()
@Chim-SO
Chim-SO / learning_curves.py
Created November 1, 2022 16:06
Display the learning curves using model history (returned by the fit() function of Keras).
# Display loss:
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'])
plt.show()
# Display metric:
# Train:
epochs = 1750
x_train, y_train = train['x'], train['y']
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
history = model.fit(x_train, y_train, epochs=epochs, batch_size=64, verbose=1, validation_data=(x_val, y_val))
# Create model:
model = Sequential()
model.add(Input(shape=(1,)))
model.add(Dense(200, activation='sigmoid'))
model.add(Dense(200, activation='sigmoid'))
model.add(Dense(200, activation='sigmoid'))
model.add(Dense(1, activation='linear'))
print(model.summary())
@Chim-SO
Chim-SO / data_split.py
Last active November 1, 2022 19:33
split dataset to train and test set.
def split_dataset(dataset, train_frac=0.7):
train = dataset.sample(frac=train_frac)
val = dataset.drop(train.index)
return train, val
# Split dataset into train and validation:
train, validation = split_dataset(dataset, train_frac=0.7)
plt.scatter(train['x'], train['y'], c='blue', alpha=0.4)
plt.scatter(validation['x'], validation['y'], c='red', alpha=0.4)
plt.xlabel('x')
@Chim-SO
Chim-SO / data_preparation.py
Last active November 16, 2022 12:51
Data preparation.
# Read dataset:
dataset = pd.read_csv('dataset/regression/cosine/train.csv')
print(f"There are {len(dataset.index)} instances.")
print(dataset.head())
plt.scatter(dataset['x'], dataset['y'])
plt.xlabel('x')
plt.ylabel('y')
plt.show()