Skip to content

Instantly share code, notes, and snippets.

@fredriccliver
Last active January 3, 2019 03:10
Show Gist options
  • Save fredriccliver/0bc031ce88d6fad0542af58cc1f6577d to your computer and use it in GitHub Desktop.
Save fredriccliver/0bc031ce88d6fad0542af58cc1f6577d to your computer and use it in GitHub Desktop.
Easiest understanding about Gradient descent
#%% making dummy data ----------------------------------------
from random import *
import math
import matplotlib.pyplot as plt
import numpy as np
def get_biased_point_randomly():
x = randint(1, 100)
y = x + randint(1,20) * pow(-1, x)
y = math.floor(y)
return [x,y]
points = []
for i in range(100):
points.append(get_biased_point_randomly())
points = np.asarray(points)
points
#%% plotting dummy data ----------------------------------------
def draw_points():
plt.scatter(points.T[0], points.T[1])
draw_points()
#%%
b = randint(-100,100)
b = 0 # If you want learn for 'b', escape this line.
w = randint(-2,2)
def draw_regLine():
linear_line = [
[0,b],
[100,100*w]
]
linear_line = np.asarray(linear_line)
plt.plot(linear_line.T[0], linear_line.T[1])
draw_points(); draw_regLine()
#%% calculate loss ----------------------------------------
# ex : estimated x
# rx : real x
def calLoss(w):
loss_arr = []
for point in points:
rx = point[1]
ex = w * point[0]
loss_arr.append(ex-rx)
return math.floor(
pow(sum(loss_arr)/ len(points), 2)
)
print('loss is : ', calLoss(w))
#%% manualy w updating ----------------------------------------
w = w+.1
draw_points(); draw_regLine()
plt.title(calLoss(w))
#%% auto gradient descent ----------------------------------------
# gradient is (delta loss) over (delta w)
delta = .1; learning_rate = .0001
def get_gradient():
return ( (calLoss(w+delta) - calLoss(w-delta)) / (2*delta) )
print(get_gradient())
#%% auto gradient descent ----------------------------------------
# if you repeat this Block Cell, w will be adjust automatically
w = w - (learning_rate*get_gradient())
draw_points(); draw_regLine()
plt.title("Loss:"+str(calLoss(w)) + " " + "gradient:"+str(get_gradient()))
#%% draw loss=f(w) graph ----------------------------------------
# this is not countinuously
# this is computational function
loss_coordinates = []
i = -10
while i < 10:
loss_coordinates.append([i, calLoss(i)])
i = i + .1
loss_coordinates
plt.plot(loss_coordinates)
@fredriccliver
Copy link
Author

fredriccliver commented Jan 3, 2019

[오류 수정]
print(gradient) 대신 print(get_gradient())로 변경

@fredriccliver
Copy link
Author

fredriccliver commented Jan 3, 2019

[섹션 추가]
#%% draw loss=f(w) graph 섹션 추가됨

@fredriccliver
Copy link
Author

더이상 수정하지 않습니다.
ipynb 로 만든 다른 파일 참조 : https://gist.github.com/fredriccliver/0e5c79a2c0c277de332aa85bf686312a

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment