W1 = torch.tensor([[1,-1,0,0],[0,1,-1,0],[0,0,1,-1]])
W1
tensor([[ 1, -1, 0, 0],
[ 0, 1, -1, 0],
[ 0, 0, 1, -1]])
tensor([0., 0., 0.])
w2 = torch.tensor([-1,-1,-1])
w2
tensor([-1, -1, -1])
b2 = torch.tensor([0])
b2
tensor([0])
def activation(z):
return torch.where(z >= 0, torch.ones_like(z), torch.zeros_like(z))
tensor([[1, 0, 1, 1],
[1, 1, 0, 1],
[1, 1, 1, 0]])
# xs should be a 1D tensor of length 4
def eval(xs):
layer1_result = activation(torch.matmul(W1, xs))
layer2_result = activation(torch.matmul(w2, layer1_result))
return layer2_result
eval(torch.tensor([1,2,3,4]))
tensor(1)
eval(torch.tensor([1,1,3,4]))
tensor(0)
eval(torch.tensor([1,3,5,14]))
tensor(1)
eval(torch.tensor([3,2,1,0]))
tensor(0)
eval(torch.tensor([-10,-9,-8,0]))
tensor(1)