This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
###This time, I'm using colab for the GPU so the path will be different | |
from __future__ import print_function, division | |
import torch | |
import torch.nn as nn | |
import torch.optim as optim | |
from torch.optim import lr_scheduler | |
import numpy as np | |
import torchvision | |
from torchvision import datasets, models, transforms |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def imshow(inp, title): | |
imshow_mean = np.array(mean) | |
imshow_std = np.array(std) | |
inp = (inp.numpy().transpose((1, 2, 0))) * imshow_std + imshow_mean ##normalize the data using dot product | |
inp = np.clip(inp, 0, 1) #scale down all values from 0,1 | |
inp = cv2.resize(inp, (2520, 1420)) #resize the image for viewing ease | |
plt.imshow(inp) | |
dataiters = iter(dataloaders['train']) #use trainloader | |
images, labels = dataiters.next() |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def train_model(model, criterion, optimizer, scheduler, num_epochs): #same parameters for a bare-bones training function | |
since = time.time() #use current time to display total training time | |
best_models_wts = copy.deepcopy(model.state_dict()) #creates a copy of the best model through an if statement on line 46 | |
best_acc = 0.0 #stores best accuracy | |
for epoch in range(num_epochs): | |
print(f'Epoch {epoch+1}/{num_epochs}') | |
print('-' * 10) | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
model_ft = models.resnet18(pretrained=True) #load in model from the models library in pytorch | |
num_ftrs = model_ft.fc.in_features #store the last fully connected layer of the resnet model in a variable for the output | |
model_ft.fc = nn.Linear(num_ftrs, 5) #add another linear layer with input of num_ftrs and output of 5 classes | |
##In the train_model function, we pre-established the dataset being used -> you can change that easily for new datasets if you wanted | |
model_ft = model_ft.to(device) #set it to device | |
criterion = nn.CrossEntropyLoss() #loss function declaration | |
optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.001, momentum=0.9) #optimizer declaration |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def TransSimu(Rt, days=300, nd=30, muT=0.7, sizeV=1, limit=1000000, pp=0.001, n0=1): | |
kk = np.repeat(0, days, axis=0) # kk: daily new cases; | |
atrisk = kk | |
kk = atrisk# atrisk: number of active cases each day; simulation period of nn days | |
tt = 0 # the cumulative total number of confirmed cases. | |
if nd > len(Rt): | |
print("The length of Rt shold not be smaller than nd.") | |
stoplimit = limit*(1-pp) | |
nk = n0 | |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def _read(filename): | |
with open(filename, 'r') as sqinput: | |
sqinput = sqinput.read().splitlines() | |
return sqinput | |
def Composition(k, text): | |
return [text[i:k+i] for i in range(len(text)-k+1)] | |
def Overlap(strings): | |
final = [strings[0]] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def MerDeBruijn(strings): | |
preffs = [i[:-1] for i in strings] | |
suffs = [i[1:] for i in strings] | |
merdebruijn = {} | |
for idx, i in enumerate(preffs): | |
if i in merdebruijn: | |
merdebruijn[i] = [merdebruijn[i], suffs[idx]] | |
else: | |
merdebruijn[i] = suffs[idx] |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
0 -> 1,19,731 | |
1 -> 290,3,4 | |
10 -> 100,5 | |
100 -> 102,164,269 | |
1000 -> 1002 | |
1001 -> 1185,148 | |
1002 -> 1001 | |
1003 -> 1005 | |
1004 -> 1935,90 | |
1005 -> 1004,2166 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def _read(filename): | |
with open(filename, 'r') as sqinput: | |
sqinput = sqinput.read().splitlines() | |
return sqinput | |
txt = _read('dataset_203_99 (1).txt') | |
final = EulerianCycle(txt) | |
print('->'.join(final)) |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def EulerianPath(strings, format=True): | |
#Similar formatting for turning txt file to DeBruijn graph in python dict form | |
if format: | |
graph = [i.split(' -> ') for i in strings] | |
graph = dict(graph) | |
for (key, val) in graph.items(): | |
val = val.split(',') | |
graph[key] = val | |
copy_graph = copy.deepcopy(dict(graph)) | |
else: |