Skip to content

Instantly share code, notes, and snippets.

View oscarknagg's full-sized avatar

Oscar Knagg oscarknagg

View GitHub Profile
@oscarknagg
oscarknagg / iphrp.png
Last active August 22, 2022 13:27
Index of Private Housing Rental Prices
iphrp.png
@oscarknagg
oscarknagg / object_store_performance.py
Created January 31, 2022 17:46
Ray object store performance
import string
import argparse
import time
import ray
import pandas as pd
import numpy as np
import uuid
import os
import os.path
import psutil
import pyarrow as pa
import numpy as np
from pyarrow import parquet as pq
import time
WINDOW_LENGTH = 1000
N = 1000000
import torch
bodies = torch.zeros((2, 1, 7, 7))
heads = torch.zeros((2, 1, 7, 7))
num_envs = bodies.size(0)
# Initialise body as shown in diagram
bodies[:, :, 3, 2] = 1
bodies[:, :, 3, 3] = 2
bodies[:, :, 2, 3] = 3
@oscarknagg
oscarknagg / move_heads.py
Created May 14, 2019 13:47
Moving a head in 4 different directions with convolutions and einsum
import torch
import torch.nn.functional as F
movement_filters = torch.Tensor([
[
[0, 1, 0],
[0, 0, 0],
[0, 0, 0],
],
[
@oscarknagg
oscarknagg / adversarial_training.py
Last active January 3, 2019 23:28
Linf adversarial training for MNIST
from torchvision import transforms, datasets
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
@oscarknagg
oscarknagg / projected_gradient_descent.py
Last active November 25, 2023 03:52
Gist for projected gradient descent adversarial attack using PyTorch
import torch
def projected_gradient_descent(model, x, y, loss_fn, num_steps, step_size, step_norm, eps, eps_norm,
clamp=(0,1), y_target=None):
"""Performs the projected gradient descent attack on a batch of images."""
x_adv = x.clone().detach().requires_grad_(True).to(x.device)
targeted = y_target is not None
num_channels = x.shape[1]
for i in range(num_steps):
@oscarknagg
oscarknagg / maml.py
Last active January 26, 2020 05:35
Key functionality for the supervised learning part of Model-Agnostic Meta-Learning (Finn et al 2017)
import torch
import torch.nn.functional as F
def replace_grad(parameter_gradients, parameter_name):
"""Creates a backward hook function that replaces the calculated gradient
with a precomputed value when .backward() is called.
See
https://pytorch.org/docs/stable/autograd.html?highlight=hook#torch.Tensor.register_hook
for more info
@oscarknagg
oscarknagg / proto_nets.py
Last active November 26, 2018 22:04
Key functionality for Prototypical Networks (Snell et al 2017)
def proto_net_episode(model: Module,
optimiser: Optimizer,
loss_fn: Callable,
x: torch.Tensor,
y: torch.Tensor,
n_shot: int,
k_way: int,
q_queries: int,
distance: str,
train: bool):
@oscarknagg
oscarknagg / matching_networks.py
Last active September 23, 2021 09:52
Key functionality for Matching Networks (Vinyals et al 2016)
import torch
from torch.nn.utils import clip_grad_norm_
def matching_net_episode(model: Module,
optimiser: Optimizer,
loss_fn: Loss,
x: torch.Tensor,
y: torch.Tensor,
n_shot: int,
k_way: int,