Skip to content

Instantly share code, notes, and snippets.

View aletheia's full-sized avatar
🏠
Working from home

Luca Bianchi aletheia

🏠
Working from home
View GitHub Profile
def forward(self,x):
'''Forward pass, it is equal to PyTorch forward method. Here network computational graph is built
Parameters:
x (Tensor): A Tensor containing the input batch of the network
Returns:
An one dimensional Tensor with probability array for each input image
'''
x=self.conv_layer_1(x)
x=self.conv_layer_2(x)
x=self.dropout1(x)
def prepare_data(self):
'''Prepares datasets. Called once per training execution
'''
self.train_loader, self.val_loader, self.test_loader = self.load_split_train_test()
def train_dataloader(self):
'''
Returns:
(torch.utils.data.DataLoader): Training set data loader
'''
def load_split_train_test(self, valid_size = .2):
'''Loads data and builds training/validation dataset with provided split size
Parameters:
valid_size (float): the percentage of data reserved to validation
Returns:
(torch.utils.data.DataLoader): Training data loader
(torch.utils.data.DataLoader): Validation data loader
(torch.utils.data.DataLoader): Test data loader
def __init__(self, train_data_dir,batch_size=128,test_data_dir=None, num_workers=4):
'''Constructor method
Parameters:
train_data_dir (string): path of training dataset to be used either for training and validation
batch_size (int): number of images per batch. Defaults to 128.
test_data_dir (string): path of testing dataset to be used after training. Optional.
num_workers (int): number of processes used by data loader. Defaults to 4.
'''
# Invoke constructor
import os
import math
import random as rn
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
pip install pipenv
pipenv install
pipenv shell
python main.py
import os
import math
import random as rn
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import argparse
import os
# default pytorch import
import torch
# import lightning library
import pytorch_lightning as pl
# import trainer class, which orchestrates our model training
# MNIST on SageMaker with PyTorch Lightning
import json
import boto3
import sagemaker
from sagemaker.pytorch import PyTorch
# Initializes SageMaker session which holds context data
sagemaker_session = sagemaker.Session()
# The bucket containig our input data
SM_HOSTS=["algo-1"]
SM_NETWORK_INTERFACE_NAME=eth0
SM_HPS={"batch-size":128,"epochs":6}
SM_USER_ENTRY_POINT=train.py
SM_FRAMEWORK_PARAMS={}
SM_RESOURCE_CONFIG={"current_host":"algo-1","hosts":["algo-1"],"network_interface_name":"eth0"}
SM_INPUT_DATA_CONFIG={"test":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"},"train":{"RecordWrapperType":"None","S3DistributionType":"FullyReplicated","TrainingInputMode":"File"}}
SM_OUTPUT_DATA_DIR=/opt/ml/output/data
SM_CHANNELS=["test","train"]
SM_CURRENT_HOST=algo-1