Skip to content

Instantly share code, notes, and snippets.

View Mehdi-Amine's full-sized avatar
💭
Having fun with Neural Networks

Mehdi Mehdi-Amine

💭
Having fun with Neural Networks
View GitHub Profile
@Mehdi-Amine
Mehdi-Amine / backpropagation.py
Created June 5, 2020 15:35
implementing back prop
#----------- Using our differentiations -----------#
ce_p = crossentropy_prime(sm, y) # = [[ 0.0000, 0.0000, -3.3230]]
sm_p = softmax_prime(z) # = [[ 0.1919, -0.1140, -0.0779],
# [-0.1140, 0.2464, -0.1324],
# [-0.0779, -0.1324, 0.2104]])
z_p_w = torch.stack(([x]*3)).squeeze() # Recall: z' w.r.t the weights is equal to x
z_p_b = torch.ones_like(b) # Recall: z' w.r.t the biases is equal to 1
@Mehdi-Amine
Mehdi-Amine / feedforward.py
Created June 5, 2020 13:31
Implementing feedforward
y = torch.tensor([2])
x = torch.tensor([[0.9, 0.5, 0.3]])
w = torch.tensor([[0.2, 0.1, 0.4], [0.5, 0.6, 0.1], [0.1, 0.7, 0.2]], requires_grad=True)
b = torch.tensor([[0.1, 0.2, 0.1]], requires_grad=True)
#----------- Using our functions -----------#
z = x @ w.T + b # = [[0.4500, 0.9800, 0.6000]]
sm = softmax(z) # = [[0.2590, 0.4401, 0.3009]]
@Mehdi-Amine
Mehdi-Amine / cross_entropy_prime.py
Last active June 4, 2020 00:52
differentiating cross entropy
import torch
zs = torch.tensor([[0.1, 0.4, 0.2], [0.3, 0.9, 0.6]]) # The values of 3 output neurons for 2 instances
activations = softmax(zs) # = [[0.2894, 0.3907, 0.3199],[0.2397, 0.4368, 0.3236]]
y = torch.tensor([2,0]) # equivalent to [[0,0,1],[1,0,0]]
#----------- Implementing the math -----------#
def crossentropy_prime(activations, labels):
n = labels.shape[0]
activs = torch.zeros_like(activations)
@Mehdi-Amine
Mehdi-Amine / cross-entropy.py
Last active June 3, 2020 21:59
Implementing cross-entropy
import torch
import torch.nn.functional as F
#----------- Implementing the math -----------#
def cross_entropy(activations, labels):
return - torch.log(activations[range(labels.shape[0]), labels]).mean()
zs = torch.tensor([[0.1, 0.4, 0.2], [0.3, 0.9, 0.6]]) # The values of 3 output neurons for 2 instances
activations = softmax(zs) # = [[0.2894, 0.3907, 0.3199],[0.2397, 0.4368, 0.3236]]
y = torch.tensor([2,0]) # equivalent to [[0,0,1],[1,0,0]]
@Mehdi-Amine
Mehdi-Amine / softmax_prime.py
Last active June 4, 2020 21:16
Softmax differentiation
import torch
import torch.nn.functional as F
#----------- Implementing the math -----------#
def softmax(z):
return z.exp() / z.exp().sum(axis=1, keepdim=True)
def softmax_prime(z):
sm = softmax(z).squeeze()
sm_size = sm.shape[0]
@Mehdi-Amine
Mehdi-Amine / softmax.py
Last active June 2, 2020 23:02
softmax activation
import torch
import torch.nn.functional as F
#----------- Implementing the math -----------#
def softmax(z):
return z.exp() / z.exp().sum(axis=1, keepdim=True)
# keepdim=True tells sum() that we want its output to have the same dimension as z
zs = torch.tensor([[2., 3., 1.]]) # Three output neurons
@Mehdi-Amine
Mehdi-Amine / sig-prime.py
Last active June 2, 2020 23:03
Sigmoid prime
import torch
#----------- Implementing the math -----------#
def sigmoid_prime(z):
return sigmoid(z) * (1 - sigmoid(z))
z = torch.tensor([[2.], [-3.]], requires_grad=True)
sig_p = sigmoid_prime(z)
@Mehdi-Amine
Mehdi-Amine / sig.py
Last active June 2, 2020 23:05
Sigmoid function or logistic function
import torch
#----------- Implementing the math -----------#
def sigmoid(z):
return 1 / (1+torch.exp(-z))
z = torch.tensor([[2.], [-3.]]) # Two neurons with different values
sig = sigmoid(z)
@Mehdi-Amine
Mehdi-Amine / relu-diff.py
Last active June 2, 2020 23:06
differentiation of ReLU
import torch
import torch.nn.functional as F
#----------- Implementing the math -----------#
def relu_prime(z):
return torch.where(z>0, torch.tensor(1.), torch.tensor(0.))
z = torch.tensor([[-0.2], [0.6]], requires_grad=True)
@Mehdi-Amine
Mehdi-Amine / relu.py
Last active June 2, 2020 23:08
relu with clamp and with torch.nn.functional
import torch
import torch.nn.functional as F
#----------- Implementing the math -----------#
def relu(z):
return torch.clamp(z, 0, None) # None specifies that we don't require an upper-bound
z = torch.tensor([[-0.2], [0.], [0.6]]) # Three neurons with different values