Skip to content

Instantly share code, notes, and snippets.

View devpatelio's full-sized avatar
🏟️
!99S

Dev Patel devpatelio

🏟️
!99S
View GitHub Profile
def MerDeBruijn(strings):
preffs = [i[:-1] for i in strings]
suffs = [i[1:] for i in strings]
merdebruijn = {}
for idx, i in enumerate(preffs):
if i in merdebruijn:
merdebruijn[i] = [merdebruijn[i], suffs[idx]]
else:
merdebruijn[i] = suffs[idx]
def _read(filename):
with open(filename, 'r') as sqinput:
sqinput = sqinput.read().splitlines()
return sqinput
def Composition(k, text):
return [text[i:k+i] for i in range(len(text)-k+1)]
def Overlap(strings):
final = [strings[0]]
def TransSimu(Rt, days=300, nd=30, muT=0.7, sizeV=1, limit=1000000, pp=0.001, n0=1):
kk = np.repeat(0, days, axis=0) # kk: daily new cases;
atrisk = kk
kk = atrisk# atrisk: number of active cases each day; simulation period of nn days
tt = 0 # the cumulative total number of confirmed cases.
if nd > len(Rt):
print("The length of Rt shold not be smaller than nd.")
stoplimit = limit*(1-pp)
nk = n0
model_ft = models.resnet18(pretrained=True) #load in model from the models library in pytorch
num_ftrs = model_ft.fc.in_features #store the last fully connected layer of the resnet model in a variable for the output
model_ft.fc = nn.Linear(num_ftrs, 5) #add another linear layer with input of num_ftrs and output of 5 classes
##In the train_model function, we pre-established the dataset being used -> you can change that easily for new datasets if you wanted
model_ft = model_ft.to(device) #set it to device
criterion = nn.CrossEntropyLoss() #loss function declaration
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) #optimizer declaration
def train_model(model, criterion, optimizer, scheduler, num_epochs): #same parameters for a bare-bones training function
since = time.time() #use current time to display total training time
best_models_wts = copy.deepcopy(model.state_dict()) #creates a copy of the best model through an if statement on line 46
best_acc = 0.0 #stores best accuracy
for epoch in range(num_epochs):
print(f'Epoch {epoch+1}/{num_epochs}')
print('-' * 10)
def imshow(inp, title):
imshow_mean = np.array(mean)
imshow_std = np.array(std)
inp = (inp.numpy().transpose((1, 2, 0))) * imshow_std + imshow_mean ##normalize the data using dot product
inp = np.clip(inp, 0, 1) #scale down all values from 0,1
inp = cv2.resize(inp, (2520, 1420)) #resize the image for viewing ease
plt.imshow(inp)
dataiters = iter(dataloaders['train']) #use trainloader
images, labels = dataiters.next()
###This time, I'm using colab for the GPU so the path will be different
from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import torchvision
from torchvision import datasets, models, transforms
class Net(nn.Module):
def __init__ (self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 8, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(8, 8, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv3 = nn.Conv2d(8, 20, 5) ## note change here in 16 to 20
self.pool = nn.MaxPool2d(2, 2)
self.fc1 = nn.Linear(11520, 2000) ## note change here in 9216 to 11520
for epoch in range(num_epochs):
running_loss = 0.0
for i, data in enumerate(train_loader, 0): #starting from i=0, we iterate over the train_loader
inputs, labels = data #assign image and corresp. label to data variable
labels = labels.type(torch.LongTensor) #turn label into longtensor just in case error is thrown
optimizer.zero_grad() #zero the gradients before training begins after each epoch
outputs = net(inputs) #input the given values into the model 'net'
# print(labels.data)
# print(outputs.data)
loss = criterion(outputs, labels) #calculates the loss between the outputs and the given labels
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)