Skip to content

Instantly share code, notes, and snippets.

@PulkitS01
Last active May 7, 2020 03:19
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save PulkitS01/961cde8f6ebe63517f21af223981b132 to your computer and use it in GitHub Desktop.
Save PulkitS01/961cde8f6ebe63517f21af223981b132 to your computer and use it in GitHub Desktop.
Introduction to PyTorch
# initializing two arrays
a = np.array(2)
b = np.array(1)
print(a,b)
# initializing a numpy array
a = np.array(1)
# initializing a tensor
b = torch.tensor(1)
print(a)
print(b)
# back propagating
c.backward()
# computing gradients
print(a.grad)
# concatenating horizontally
torch.cat((a,b),dim=1)
# concatenating vertically
torch.cat((a,b))
# initializing a numpy array
a = np.array([[1,2],[3,4]])
print(a, '\n')
# converting the numpy array to tensor
tensor = torch.from_numpy(a)
print(tensor)
# loading dataset
train = pd.read_csv('train_LbELtWX/train.csv')
test = pd.read_csv('test_ScVgIM0/test.csv')
sample_submission = pd.read_csv('sample_submission_I5njJSF.csv')
train.head()
# back propagating
c.backward()
# computing gradients
print(a.grad)
# initializing two tensors
a = torch.tensor([[1,2],[3,4]])
b = torch.tensor([[5,6],[7,8]])
print(a, '\n')
print(b)
#Input tensor
X = torch.Tensor([[1,0,1,0],[1,0,1,1],[0,1,0,1]])
#Output
y = torch.Tensor([[1],[1],[0]])
print(X, '\n')
print(y)
# importing libraries
import numpy as np
import torch
# importing the libraries
import pandas as pd
import numpy as np
from skimage.io import imread
import matplotlib.pyplot as plt
%matplotlib inline
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# plotting the training and validation loss
plt.plot(train_losses, label='Training loss')
plt.plot(val_losses, label='Validation loss')
plt.legend()
plt.show()
# addition
print(a+b)
# subtraction
print(b-a)
# multiplication
print(a*b)
# division
print(a/b)
# addition
print(a+b)
# subtraction
print(b-a)
# multiplication
print(a*b)
# division
print(a/b)
# matrix of zeros
a = np.zeros((3,3))
print(a)
print(a.shape)
# matrix addition
print(np.add(a,b), '\n')
# matrix subtraction
print(np.subtract(a,b), '\n')
# matrix multiplication
print(np.dot(a,b), '\n')
# matrix multiplication
print(np.divide(a,b))
# matrix addition
print(torch.add(a,b), '\n')
# matrix subtraction
print(torch.sub(a,b), '\n')
# matrix multiplication
print(torch.mm(a,b), '\n')
# matrix division
print(torch.div(a,b))
# matrix of zeros
a = torch.zeros((3,3))
print(a)
print(a.shape)
# define model
model = Sequential(Linear(input_num_units, hidden_num_units),
ReLU(),
Linear(hidden_num_units, output_num_units))
# loss function
loss_fn = CrossEntropyLoss()
# define optimization algorithm
optimizer = Adam(model.parameters(), lr=learning_rate)
train_losses = []
val_losses = []
for epoch in range(epochs):
avg_cost = 0
x, y = Variable(torch.from_numpy(train_x)), Variable(torch.from_numpy(train_y), requires_grad=False)
x_val, y_val = Variable(torch.from_numpy(val_x)), Variable(torch.from_numpy(val_y), requires_grad=False)
pred = model(x)
pred_val = model(x_val)
# get loss
loss = loss_fn(pred, y)
loss_val = loss_fn(pred_val, y_val)
train_losses.append(loss)
val_losses.append(loss_val)
# perform backpropagation
loss.backward()
optimizer.step()
avg_cost = avg_cost + loss.data
if (epoch%2 != 0):
print(epoch+1, avg_cost)
# number of neurons in each layer
input_num_units = 28*28
hidden_num_units = 500
output_num_units = 10
# set remaining variables
epochs = 20
learning_rate = 0.0005
for i in range(epoch):
#Forward Propogation
hidden_layer_input1 = torch.mm(X, wh)
hidden_layer_input = hidden_layer_input1 + bh
hidden_layer_activations = sigmoid(hidden_layer_input)
output_layer_input1 = torch.mm(hidden_layer_activations, wout)
output_layer_input = output_layer_input1 + bout
output = sigmoid(output_layer_input)
#Backpropagation
E = y-output
slope_output_layer = derivatives_sigmoid(output)
slope_hidden_layer = derivatives_sigmoid(hidden_layer_activations)
d_output = E * slope_output_layer
Error_at_hidden_layer = torch.mm(d_output, wout.t())
d_hiddenlayer = Error_at_hidden_layer * slope_hidden_layer
wout += torch.mm(hidden_layer_activations.t(), d_output) *lr
bout += d_output.sum() *lr
wh += torch.mm(X.t(), d_hiddenlayer) *lr
bh += d_output.sum() *lr
print('actual :\n', y, '\n')
print('predicted :\n', output)
# importing the optim module
from torch import optim
# adam
## adam = optim.Adam(model.parameters(), lr=learning_rate)
# sgd
## SGD = optim.SGD(model.parameters(), lr=learning_rate)
# print an image
img_name = rng.choice(train['id'])
filepath = 'train_LbELtWX/train/' + str(img_name) + '.png'
img = imread(filepath, as_gray=True)
img = img.astype('float32')
plt.figure(figsize=(5,5))
plt.imshow(img, cmap='gray')
# getting the prediction for test images
prediction = np.argmax(model(torch.from_numpy(test_x)).data.numpy(), axis=1)
# replacing the label with prediction
sample_submission['label'] = prediction
sample_submission.head()
# setting the random seed for numpy
np.random.seed(42)
# matrix of random numbers
a = np.random.randn(3,3)
a
# setting the random seed for pytorch
torch.manual_seed(42)
# matrix of random numbers
a = torch.randn(3,3)
a
# setting the random seed for numpy and initializing two matrices
np.random.seed(42)
a = np.random.randn(3,3)
b = np.random.randn(3,3)
# random number generator
seed = 128
rng = np.random.RandomState(seed)
# setting the random seed for pytorch and initializing two tensors
torch.manual_seed(42)
a = torch.randn(3,3)
b = torch.randn(3,3)
# setting the random seed for pytorch
torch.manual_seed(42)
# initializing tensor
a = torch.randn(2,4)
print(a)
a.shape
# reshaping tensor
b = a.reshape(1,8)
print(b)
b.shape
# first five rows of sample submission file
sample_submission.head()
# saving the file
sample_submission.to_csv('submission.csv', index=False)
#Sigmoid Function
def sigmoid (x):
return 1/(1 + torch.exp(-x))
#Derivative of Sigmoid Function/
def derivatives_sigmoid(x):
return sigmoid(x) * (1 - sigmoid(x))
train_y = train['label'].values
# initializing two tensors
a = torch.tensor(2)
b = torch.tensor(1)
print(a,b)
# initializing a tensor
a = torch.ones((2,2), requires_grad=True)
a
# performing operations on the tensor
b = a + 5
c = b.mean()
print(b,c)
# loading test images
test_img = []
for img_name in test['id']:
image_path = 'test_ScVgIM0/test/' + str(img_name) + '.png'
img = imread(image_path, as_gray=True)
img = img.astype('float32')
test_img.append(img)
test_x = np.array(test_img)
test_x.shape
# converting the images to 1-D
test_x = test_x/train_x.max()
test_x = test_x.reshape(-1, 28*28).astype('float32')
test_x.shape
import torch
from torch.autograd import Variable
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential
from torch.optim import Adam
# get training accuracy
x, y = Variable(torch.from_numpy(train_x)), Variable(torch.from_numpy(train_y), requires_grad=False)
pred = model(x)
final_pred = np.argmax(pred.data.numpy(), axis=1)
accuracy_score(train_y, final_pred)
# loading training images
train_img = []
for img_name in train['id']:
image_path = 'train_LbELtWX/train/' + str(img_name) + '.png'
img = imread(image_path, as_gray=True)
img = img.astype('float32')
train_img.append(img)
train_x = np.array(train_img)
train_x.shape
train_x = train_x/train_x.max()
train_x = train_x.reshape(-1, 28*28).astype('float32')
train_x.shape
# original matrix
print(a, '\n')
# matrix transpose
print(np.transpose(a))
# original matrix
print(a, '\n')
# matrix transpose
torch.t(a)
type(a), type(b)
# get validation accuracy
x, y = Variable(torch.from_numpy(val_x)), Variable(torch.from_numpy(val_y), requires_grad=False)
pred = model(x)
final_pred = np.argmax(pred.data.numpy(), axis=1)
accuracy_score(val_y, final_pred)
# create validation set
train_x, val_x, train_y, val_y = train_test_split(train_x, train_y, test_size = 0.1, stratify = train_y)
(train_x.shape, train_y.shape), (val_x.shape, val_y.shape)
#Variable initialization
epoch=7000 #Setting training iterations
lr=0.1 #Setting learning rate
inputlayer_neurons = X.shape[1] #number of features in data set
hiddenlayer_neurons = 3 #number of hidden layer neurons
output_neurons = 1 #number of neurons in output layer
#weight and bias initialization
wh=torch.randn(inputlayer_neurons, hiddenlayer_neurons).type(torch.FloatTensor)
bh=torch.randn(1, hiddenlayer_neurons).type(torch.FloatTensor)
wout=torch.randn(hiddenlayer_neurons, output_neurons)
bout=torch.randn(1, output_neurons)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment