Skip to content

Instantly share code, notes, and snippets.

View danielcwq's full-sized avatar
🎯
Focusing

Daniel Ching danielcwq

🎯
Focusing
View GitHub Profile
@danielcwq
danielcwq / model1fastai.py
Created November 14, 2021 12:49
First model to differentiate between cats and dogs.
from fastai.vision.all import *
path = untar_data(URLs.PETS)/'images'
def is_cat(x): return x[0].isupper()
dls = ImageDataLoaders.from_name_func(
path, get_image_files(path), valid_pct=0.2, seed=42,
label_func=is_cat, item_tfms=Resize(224))
learn = cnn_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
@danielcwq
danielcwq / init_params_func.py
Created November 28, 2021 05:39
Initialise Random Weights
def init_params(size, std=1.0):
return (torch.randn(size)*std).requires_grad_()
weights = init_params((28*28,1))
bias = init_params(1)
@danielcwq
danielcwq / mnist_loss_func.py
Created November 28, 2021 05:48
Creating the mnist_loss function
def mnist_loss(predictions, targets):
return torch.where(targets==1, 1-predictions, predictions).mean()
@danielcwq
danielcwq / sigmoid.py
Created November 28, 2021 05:50
Creating sigmoid func
def sigmoid(x):
return 1/(1+torch.exp(-x))
def calc_grad(xb, yb, model):
preds = model(xb)
loss = mnist_loss(preds, yb)
loss.backward()
weights.grad.zero_()
bias.grad.zero_();
#in pseudocode, this would be:
w -= gradient(w) *lr
#stepping learning function here
#converting this into a function:
def train_epoch(model, lr, params):
for xb,yb in dl:
def batch_accuracy(xb, yb):
preds = xb.sigmoid()
correct = (preds>0.5) == yb
return correct.float().mean()
def validate_epoch(model):
accs = [batch_accuracy(model(xb), yb) for xb,yb in valid_dl]
return round(torch.stack(accs).mean().item(), 4)
res = res.max(tensor(0.0))
simple_net = nn.Sequential(
nn.Linear(28*28,30),
nn.ReLU(),
nn.Linear(30,1)
)
valid_x = torch.cat([valid_2_tens, valid_9_tens]).view(-1, 28*28)
valid_y = tensor([1]*len(valid_2_tens) +[0]*len(valid_9_tens)).unsqueeze(1)
valid_dset = list(zip(valid_x,valid_y))