Skip to content

Instantly share code, notes, and snippets.

@chiangqiqi
Created April 16, 2024 12:40
Show Gist options
  • Save chiangqiqi/7c4ac34c4f7a9da6ee2b7eaa88c1cb9d to your computer and use it in GitHub Desktop.
Save chiangqiqi/7c4ac34c4f7a9da6ee2b7eaa88c1cb9d to your computer and use it in GitHub Desktop.
手撕算法
#!/usr/bin/env python3
import numpy as np
class Dense:
def __init__(self, input_width, width):
self._W = np.random.normal(size=(input_width, width))
self._b = np.random.normal(size=(1, width))
self._X = None
def forward(self, X):
self._X = X
return X @ self._W + self._b
def backward(self, grad):
# grad 是上游bp回来的
# dloss/dw = dloss/dy * dy/ dw
# = X_t * grad
# return dloss/dx = dloss/dy * dy/dx
# 链式法则,
self._grad_w = self._X.T @ grad # shape =
self._b_w = grad
return grad @ self._W.T # [N, 10]
def update(self, lr=0.01):
self._W -= lr * self._grad_w
self._b -= lr * self._b_w
class MSE:
def __init__(self, target):
self._target = target
self._X = None
def forward(self, X):
self._X = X
diff = X-self._target
# diff [N, 2], diff.T
return diff @ diff.T
def backward(self, grad):
return self._X - self._target
def update(self):
pass
def sigmoid(x):
return 1 / (1+ np.exp(-x))
class Sigmoid:
"""only used for activate"""
def __init__(self) -> None:
self._X = None
# self._target = target
pass
def forward(self, X):
self._X = X
return sigmoid(X)
def backward(self, grad):
return grad / (sigmoid(self._X) * (1 - sigmoid(self._X)))
def update(self):
# no param do nothing
return
class LogLoss:
def __init__(self, target) -> None:
self._target = target
def forward(self, X):
# in target
self._X = X
return self._target * -np.log(X) + (1-self._target) * -np.log(1-X)
def backward(self, grad):
"""Loss node, grad act as a place holder
"""
return -self._target * (1/self._X) + (1-self._target) /(1-self._X)
def update(self):
# do nothing
return
class SigmoidWithLoss:
def __init__(self, target) -> None:
self._target = target
def forward(self, X):
self._X = X
p = sigmoid(X)
return self._target * -np.log(p) + (1-self._target) * -np.log(1-p)
def backward(self, X):
"""need to be careful with
"""
p = sigmoid(self._X)
return -(-self._target * (1-p) + (1-self._target)* p) * self._X
def update(self):
return
input_X = np.random.normal(size=(1,10))
def test_mse():
layer = Dense(10, 2)
loss = MSE(np.zeros(2))
layers = [layer, loss]
for i in range(10):
X = input_X
for layer in layers:
X = layer.forward(X)
print(X)
print(X)
grad = None
for layer in reversed(layers):
grad = layer.backward(grad)
layer.update()
print(X)
def test_log_loss():
layer = Dense(10, 10)
act = Sigmoid()
loss = LogLoss(np.array([0,0,0,0,0,1,1,1,1,1]))
layers = [layer, act, loss]
import pdb; pdb.set_trace()
for i in range(10):
X = input_X
for layer in layers:
X = layer.forward(X)
# print(X)
print("loss", X)
grad = None
for layer in reversed(layers):
grad = layer.backward( grad)
layer.update()
def test_log_loss2():
layer = Dense(10, 10)
act = Sigmoid()
loss = SigmoidWithLoss(np.array([0,0,0,0,0,1,1,1,1,1]))
layers = [layer, loss]
for i in range(10):
X = input_X
for layer in layers:
X = layer.forward(X)
if isinstance(layer, Dense):
print("logit", X)
# print(X)
print("loss", X)
grad = None
for layer in reversed(layers):
grad = layer.backward( grad)
layer.update()
test_log_loss2()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment