Skip to content

Instantly share code, notes, and snippets.

@Gumball12
Created January 20, 2020 14:45
Show Gist options
  • Save Gumball12/954697b94312a9260c7e3ce6f99a6db2 to your computer and use it in GitHub Desktop.
Save Gumball12/954697b94312a9260c7e3ce6f99a6db2 to your computer and use it in GitHub Desktop.
200120 - 딥러닝
# import module
import numpy as np
# datas
# 0 1 2 3 4
y = np.array([0.13, 0.64, 0.0, 0.05, 0.52]) # Prediction Label
t = np.array([0 , 1 , 0 , 0 , 0 ]) # Correction Label
w = np.array([0 , 0 , 1 , 0 , 0 ]) # Wrong Label
"""
"""
# mean squared error
def mean_squared_error(y, t):
""" Calculate MSE
Args:
y (list): Prediction Label Array
t (list): Test Correction Label Array
Retruns:
float: MSE
"""
return 1 / 2 * np.sum((y - t) ** 2)
print(mean_squared_error(y, t)) # 1 => Error: 0.2097
print(mean_squared_error(y, w)) # 2 => Error: 0.8497
# 0.2097 < 0.8497 ==> Answer: 1
"""
"""
# cross entropy error
def cross_entropy_error(y, t):
""" Calculate CEE
Args:
y (list): Prediction Label Array
t (list): Test Correction Label Array
Returns:
float: CEE result
"""
delta = 1e-7 # smallest value (1e-7 = 0.0000001)
return -np.sum(t * np.log(y + delta))
print(cross_entropy_error(y, t)) # 0.4462 ...
print(cross_entropy_error(y, w)) # 16.118 ...
# 0.4462 < 16.118 ==> Answer: 1
"""
"""
def numerical_differential(f, x):
""" 중앙 차분을 이용한 수치 미분 (Numerical Differential) 계산
Args:
f (function): 미분을 진행하려는 함수
x (float): 미분을 진행하려는 함수 상의 위치 x
Returns:
float: 수치 미분 결과
"""
h = 1e-4 # 0.0001
return (f(x + h) - f(x - h)) / (2 * h)
print(numerical_differential(lambda x: 10 * x ** 2, 2)) # 40.00000000001336
"""
"""
def numerial_gradient(f, x):
""" 편미분을 통한 다변수 함수의 기울기 계산
Args:
f (function): 다변수 편미분 진행하려는 함수
x (list): 변수 값 목록
Returns:
list: 다변수 편미분 결과
"""
h = 1e-4 # 0.0001
grad = np.zeros_like(x) # 편미분 결과 벡터
for ind in range(x.size):
tmp_x = x[ind]
# f(x + h)
x[ind] = tmp_x + h # 값 대입
fxh1 = f(x)
# f(x - h)
x[ind] = tmp_x - h # 값 대입
fxh2 = f(x)
# grad 계산
grad[ind] = (fxh1 - fxh2) / (2 * h)
x[ind] = tmp_x # restore
return grad
print(numerial_gradient(
lambda x: 10 * x[0] + 20 * x[1],
np.array([3.0, 4.0])
)) # [10. 20.]
"""
"""
def grad_descent(f, init_x, lr = 0.01, step = 1000):
""" 경사하강법 (Gradient Descent) 진행
Args:
f (function): 경사하강법을 진행할 대상이 되는 함수
init_x (float): 함수 f 상에 위치하는 시작할 x의 위치
lr (float): 학습률 (learning rate)
step (int): 반복 횟수 (epoch)
Returns:
float: 학습 결과
"""
x = init_x
for i in range(step):
grad = numerial_gradient(f, x)
x -= lr * grad
return x
print(
grad_descent(lambda x: x ** 2, np.array([10.0]), lr = 0.01, step = 1000)
# 100회: 1.32619556, 1000회: 0.000000168296736
)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment