Skip to content

Instantly share code, notes, and snippets.

@chaidhat
Created January 14, 2021 02:54
Show Gist options
  • Save chaidhat/4db5a3f6deb22e156d9cac03cada8f51 to your computer and use it in GitHub Desktop.
Save chaidhat/4db5a3f6deb22e156d9cac03cada8f51 to your computer and use it in GitHub Desktop.
Linear classification ML/AI - 100% my code by Chaidhat Chaimongkol
import math # this library helps with mathematics
import matplotlib.pyplot as plt # this library helps visual plotting
import random # this library generates random numbers
#
# Linear Classification AI by Chaidhat Chaimongkol. 100% my code.
#
# our data points in a 2D matrix
data = []
dataPointRange = 100 # how many data points there should be
for i in range(dataPointRange):
x = random.uniform(-4, 4) # generate random x coord
y = random.uniform(-4, 4) # generate random y coord
if x > 0:
val = 1
else:
val = -1
data.append([x, y, val])
# initalize profiling weights
lossFormattedY = []
# intialize plotter
fig2, ax2 = plt.subplots(1)
fig2.suptitle("Profiler")
# intialize plotter
fig1, ax1 = plt.subplots(1)
fig1.suptitle("Artifical Intelligence by Chaidhat Chaimongkol")
#ax1.set_title("visualization")
ax1.set_xlabel("feature x0")
ax1.set_ylabel("feature x1")
#ax2.set_title("loss")
ax2.set_xlabel("time (epochs)")
ax2.set_ylabel("loss")
fig1.tight_layout()
# plot image
plotFiness = 20 # how fine the heatmap is
plotSize = 4 # how large the heatmap is
image = ax1.imshow(
[[]],
cmap = "seismic",
interpolation = "nearest",
extent = (-plotSize, plotSize, -plotSize, plotSize),
vmin = -3,
vmax = 3
)
# initalize our weights randomly
# note: z = w0*x + w1*y
w0 = random.uniform(-5,0)
w1 = random.uniform(-5,0)
learningRate = 0.010 # how fast to train the AI
maxEpochs = 500 # maximum amount of epochs
for epoch in range(maxEpochs): # for each epoch
# initalize our variables
loss = 0
dW0 = 0
dW1 = 0
for dataPoint in data: # for each datapoint in data
dataPointX = dataPoint[0] # x coordinate of point
dataPointY = dataPoint[1] # y coordinate of point
# linear regression (w0*x + w1*y) (y = mx + c)
predictedValue = w0 * dataPointX + w1 * dataPointY # our predicted value
actualValue = dataPoint[2] # the actual y coordinate of point
# error calculation
error = actualValue - predictedValue
# loss calculation (Mean Squared Error)
loss += (error) ** 2 # loss = SUM OF (actual - predicted) ^ 2
# derivative of loss w.r.t w0
dW0 += -2 * dataPointX * (error) # -2 * x * error
# derivative of loss w.r.t w1
dW1 += -2 * dataPointY * (error) # -2 * y * error
#loss = loss / len(data[0])
# now flip the gradient vector to point to the local minima
dW0 *= -1 * (1 / len(data))
dW1 *= -1 * (1 / len(data))
# update the weights
w0 += dW0 * learningRate
w1 += dW1 * learningRate
# plot data
print("")
print("epoch", epoch)
print("loss", loss / len(data))
print("dW0", dW0)
print("dW1", dW1)
print("w0", w0)
print("w1", w1)
# format the data points for matplotlib
dataFormattedX0 = [] # get all the x coords of data 0
dataFormattedY0 = [] # get all the y coords of data 0
dataFormattedX1 = [] # get all the x coords of data 1
dataFormattedY1 = [] # get all the y coords of data 1
for i in data:
if i[2] > 0:
dataFormattedX0.append(i[0]);
dataFormattedY0.append(i[1]);
else:
dataFormattedX1.append(i[0]);
dataFormattedY1.append(i[1]);
# plot the data points
ax1.plot(dataFormattedX0, dataFormattedY0, 'ro')
ax1.plot(dataFormattedX1, dataFormattedY1, 'bo')
plt.pause(0.05)
# plot confidence
confidencePlot = []
for y in range(plotFiness):
a = []
confidencePlot.append(a)
for x in range(plotFiness):
xPlot = ((x / plotFiness) * plotSize) - (plotSize / 2)
yPlot = (plotSize / 2) - ((y / plotFiness) * plotSize)
a.append(w0*xPlot + w1*yPlot)
image.set_data(confidencePlot)
plt.draw()
# plot profile
lossFormattedX = list(range(1,epoch + 2)) # generate 1-epoch numbers
lossFormattedY.append(loss)
ax2.plot(lossFormattedX, lossFormattedY, "r-")
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment