Skip to content

Instantly share code, notes, and snippets.

@CYHSM
Last active November 21, 2022 07:50
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save CYHSM/cf51e23b2641bd838396d47c26a1969c to your computer and use it in GitHub Desktop.
Save CYHSM/cf51e23b2641bd838396d47c26a1969c to your computer and use it in GitHub Desktop.
Build a neural network to predict euclidean distance to goal object from XY location
# Standard imports
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# Keras
from keras.layers import Dense
from keras.models import Model, Input
import keras.optimizers as opt
# Euclidean distance/loss
def euclidean_loss(y_pred, y_true):
euc_loss = np.sqrt(np.sum(np.power(y_pred - y_true, 2), axis=-1))
return euc_loss
# Simple regression model taking as input the XY position and predicts euclidean distance to goal object
def create_model(num_hidden=10, act_hidden='linear'):
input_layer = Input(shape=(2,))
hidden = Dense(units=num_hidden, activation=act_hidden)(input_layer)
out = Dense(units=1)(hidden)
model = Model(inputs=input_layer, outputs=out)
model.compile(opt.SGD(lr=0.001), loss='mean_squared_error', metrics=['mae'])
return model
# Create X,Y coordinates and put a goal object at [1,5]
num_points = 200
x,y = np.meshgrid(np.linspace(0,10, num_points), np.linspace(0,10, num_points))
x, y = x.flatten(), y.flatten()
model_input = np.array([x,y]).transpose()
model_output = euclidean_loss(model_input, np.array([1,5]))
X_train, X_test, y_train, y_test = train_test_split(model_input, model_output, test_size=0.25, random_state=42)
# Show test data
plt.plot(X_test[:,0],X_test[:,1],'k.')
plt.show()
# Run tests
nn_results = dict()
for act_hidden in ['relu', 'linear']:
for num_hidden in [1,2,3,4,5,10,100,1000]:
nn_results[(act_hidden, num_hidden)] = []
for rr in range(0, 5): # Make sure results are reproducible
model = create_model(num_hidden=num_hidden, act_hidden=act_hidden)
history = model.fit(x=X_train, y=y_train, validation_data=(X_test, y_test), epochs=50, verbose=0)
print('Num hidden {}, Validation loss {}'.format(num_hidden, history.history['val_loss'][-1]))
nn_results[(act_hidden, num_hidden)].append(history.history['val_loss'][-1])
# Plot results
nn_tests = pd.DataFrame(nn_results).melt(var_name=['Activation Function', 'Number of hidden Units'], value_name='Validation Loss')
sns.boxplot(x='Number of hidden Units', y='Validation Loss', hue='Activation Function', data=nn_tests)
@CYHSM
Copy link
Author

CYHSM commented Nov 23, 2019

If somebody wants to play around with the code: Open In Colab

@CYHSM
Copy link
Author

CYHSM commented Nov 23, 2019

Script output:
validation_loss_inputrepresentation

@CYHSM
Copy link
Author

CYHSM commented Nov 23, 2019

Weights and biases for the linear activation function with one hidden unit:

# First layer to hidden layer
W1 = [0.464,0.005]
b1 = 0.632
# Hidden layer to output layer
W2 = 1.59
b2 = 0.414 

which has a loss of around 1 in a 10x10 box.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment