Skip to content

Instantly share code, notes, and snippets.

@omarsar
Created December 29, 2019 16:07
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save omarsar/e863040027bef2877000e6007446d5d8 to your computer and use it in GitHub Desktop.
Save omarsar/e863040027bef2877000e6007446d5d8 to your computer and use it in GitHub Desktop.
class LR(nn.Module):
def __init__(self, dim, lr=torch.scalar_tensor(0.01)):
super(LR, self).__init__()
# intialize parameters
self.w = torch.zeros(dim, 1, dtype=torch.float).to(device)
self.b = torch.scalar_tensor(0).to(device)
self.grads = {"dw": torch.zeros(dim, 1, dtype=torch.float).to(device),
"db": torch.scalar_tensor(0).to(device)}
self.lr = lr.to(device)
def forward(self, x):
## compute forward
z = torch.mm(self.w.T, x)
a = self.sigmoid(z)
return a
def sigmoid(self, z):
return 1/(1 + torch.exp(-z))
def backward(self, x, yhat, y):
## compute backward
self.grads["dw"] = (1/x.shape[1]) * torch.mm(x, (yhat - y).T)
self.grads["db"] = (1/x.shape[1]) * torch.sum(yhat - y)
def optimize(self):
## optimization step
self.w = self.w - self.lr * self.grads["dw"]
self.b = self.b - self.lr * self.grads["db"]
## utility functions
def loss(yhat, y):
m = y.size()[1]
return -(1/m)* torch.sum(y*torch.log(yhat) + (1 - y)* torch.log(1-yhat))
def predict(yhat, y):
y_prediction = torch.zeros(1, y.size()[1])
for i in range(yhat.size()[1]):
if yhat[0, i] <= 0.5:
y_prediction[0, i] = 0
else:
y_prediction[0, i] = 1
return 100 - torch.mean(torch.abs(y_prediction - y)) * 100
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment