Skip to content

Instantly share code, notes, and snippets.

@endolith
Last active February 6, 2024 19:13
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save endolith/371f0945ab849aa111817fabc395869e to your computer and use it in GitHub Desktop.
Save endolith/371f0945ab849aa111817fabc395869e to your computer and use it in GitHub Desktop.
Single neuron experiments

My first keras experiments:

  1. simplest.py — A single neuron with no activation function and no bias (so just input→weight→output, can learn y=mx functions only). Show the function for a few random weights. (Doesn't learn anything.)
  2. linear with bias.py — A single neuron with no activation function but with bias (so it can learn y=mx+b functions, such as relationship between Celsius and Fahrenheit).
  3. single_neuron_buffer.py — A single neuron with sigmoid activation and no bias, trained to learn a digital logic buffer.
  4. single_neuron_inverter.py — A single neuron with sigmoid activation and no bias, trained to learn an inverting analog amplifier.
"""
Train a single neuron with bias to learn relationship between C and F.
This works, but is extremely slow to converge.
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
degC = [-40, -20, 10, 40, 60]
degF = [-40, -4, 50, 104, 140]
model = Sequential([Dense(units=1, input_dim=1, use_bias=True)])
model.compile(loss='mean_squared_error', optimizer='adam')
X_train = np.array(degC, ndmin=2).T
Y_train = np.array(degF, ndmin=2).T
model.fit(X_train,
Y_train,
epochs=70000,
verbose=0, # faster without printing constantly
)
# Plot after fitting
x_plot = np.linspace(-100, 100, 3)
plt.plot(degC, degF, 'o', label='training data')
plt.plot(x_plot, model.predict(x_plot), label='trained')
plt.grid(True)
plt.xlabel('Input value')
plt.ylabel('Output value')
plt.legend()
print(f'Slope: {model.get_weights()[0][0][0]}')
print(f'Offset: {model.get_weights()[1][0]}')
"""
Create a keras network that consists only of a single neuron without bias,
initialize it a few times and plot its function.
(Actually, the API for re-initializing weights keeps changing, so I changed
this to just create a new model each time. See
https://github.com/keras-team/keras/issues/341 , etc.)
Created on Fri Jan 19 22:00:06 2018
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
x = np.linspace(-2, +2, 300)
for n in range(4):
model = Sequential([Dense(units=1, input_dim=1, use_bias=False)])
model.compile(loss='mean_squared_error', optimizer='sgd')
plt.plot(x, model.predict(x),
label=f'w={model.get_weights()[0][0][0]:.4f}')
plt.legend()
plt.grid(True)
# Working version: https://github.com/endolith/ann-visualizer
from ann_visualizer.visualize import ann_viz
ann_viz(model, title="Learned single neuron", view=True)
"""
Train a single neuron with sigmoid activation function and no bias.
This example is a digital logic buffer, so the sigmoid will have to become
"squeezed" horizontally to mimic a sudden transition.
Created on Fri Jan 19 22:00:06 2018
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
try:
print(model.summary())
except NameError:
model = Sequential([
Dense(1, input_shape=(1,), use_bias=False),
Activation('tanh'),
Dense(1, use_bias=False),
])
print(model.summary())
sgd = optimizers.SGD(lr=1)
model.compile(loss='mean_squared_error', optimizer=sgd)
weights = model.get_weights()
w0 = weights[0][0][0]
w1 = weights[1][0][0]
print(f'Neural net initialized with weights w0: {w0:.2f}, w1: {w1:.2f}')
# Plot default function with no weighting
x_plot = np.linspace(-2, +2, 1000)
plt.plot(x_plot, model.predict(x_plot), label='init')
plt.grid(True)
# Fit the model to a (normalized, split supply) logic buffer gate.
v = [
(-1.1, -1),
(-1.0, -1),
(-0.7, -1),
(-0.1, -1),
(+0.2, +1),
(+0.5, +1),
(+1.0, +1),
(+1.1, +1),
]
x = [vv[0] for vv in v]
y = [vv[1] for vv in v]
plt.plot(x, y, '.', label='buffer data')
X_train = np.array(x, ndmin=2).T
Y_train = np.array(y, ndmin=2).T
model.fit(X_train,
Y_train,
epochs=500,
# verbose=0,
# callbacks=[history]
)
# Plot after fitting
plt.plot(x_plot, model.predict(x_plot), label='trained buffer')
plt.legend()
# Working version: https://github.com/endolith/ann-visualizer
from ann_visualizer.visualize import ann_viz
ann_viz(model, title="Learned single neuron", view=True)
"""
Train a single neuron with sigmoid activation function and no bias.
This example is an inverting amplifier, so the sigmoid will have to become very
"stretched out" and the middle transition region will act as the amplifier.
Created on Fri Jan 19 22:00:06 2018
"""
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
try:
print(model.summary())
except NameError:
model = Sequential([
Dense(1, input_shape=(1,), use_bias=False),
Activation('tanh'),
Dense(1, use_bias=False),
])
print(model.summary())
sgd = optimizers.SGD(lr=1)
model.compile(loss='mean_squared_error', optimizer=sgd)
weights = model.get_weights()
w0 = weights[0][0][0]
w1 = weights[1][0][0]
print(f'Neural net initialized with weights w0: {w0:.2f}, w1: {w1:.2f}')
# Plot default function with no weighting
x_plot = np.linspace(-2, +2, 1000)
plt.plot(x_plot, model.predict(x_plot), label='init')
plt.grid(True)
# Fit the model to an inverting amplifier
v = [
(-1.1, +1.1),
(-1.0, +1.0),
(-0.7, +0.7),
(-0.1, +0.1),
(+0.2, -0.2),
(+0.5, -0.5),
(+1.0, -1.0),
(+1.1, -1.1),
]
x = [vv[0] for vv in v]
y = [vv[1] for vv in v]
plt.plot(x, y, '.', label='amplifier data')
X_train = np.array(x, ndmin=2).T
Y_train = np.array(y, ndmin=2).T
model.fit(X_train,
Y_train,
epochs=500,
# verbose=0,
# callbacks=[history]
)
# Plot after fitting
plt.plot(x_plot, model.predict(x_plot), label='trained amplifier')
plt.legend()
# Working version: https://github.com/endolith/ann-visualizer
from ann_visualizer.visualize import ann_viz
ann_viz(model, title="Learned single neuron", view=True)
@endolith
Copy link
Author

endolith commented Nov 25, 2022

Simplest possible network with random weights:

simplest

Linear with bias output:

temp

Trained against logic buffer:

trained buffer

Trained against inverting amplifier:

trained amplifier

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment