Skip to content

Instantly share code, notes, and snippets.

View oscarknagg's full-sized avatar

Oscar Knagg oscarknagg

View GitHub Profile
@oscarknagg
oscarknagg / siamese.py
Last active October 2, 2018 18:43
Demonstration code for a siamese network
from keras.layers import Input, Subtract, Dense, Lambda
from keras.models import Model
import keras.backend as K
def build_siamese_network(encoder, input_shape):
input_1 = Input(input_shape)
input_2 = Input(input_shape)
# `encoder` is any predefined network that maps a single sample
# into an embedding space.
class NShotTaskSampler(Sampler):
def __init__(self,
dataset: torch.utils.data.Dataset,
episodes_per_epoch: int = None,
n: int = None,
k: int = None,
q: int = None,
num_tasks: int = 1,
fixed_tasks: List[Iterable[int]] = None):
"""PyTorch Sampler subclass that generates batches of n-shot, k-way, q-query tasks.
@oscarknagg
oscarknagg / matching_networks.py
Last active September 23, 2021 09:52
Key functionality for Matching Networks (Vinyals et al 2016)
import torch
from torch.nn.utils import clip_grad_norm_
def matching_net_episode(model: Module,
optimiser: Optimizer,
loss_fn: Loss,
x: torch.Tensor,
y: torch.Tensor,
n_shot: int,
k_way: int,
@oscarknagg
oscarknagg / proto_nets.py
Last active November 26, 2018 22:04
Key functionality for Prototypical Networks (Snell et al 2017)
def proto_net_episode(model: Module,
optimiser: Optimizer,
loss_fn: Callable,
x: torch.Tensor,
y: torch.Tensor,
n_shot: int,
k_way: int,
q_queries: int,
distance: str,
train: bool):
@oscarknagg
oscarknagg / maml.py
Last active January 26, 2020 05:35
Key functionality for the supervised learning part of Model-Agnostic Meta-Learning (Finn et al 2017)
import torch
import torch.nn.functional as F
def replace_grad(parameter_gradients, parameter_name):
"""Creates a backward hook function that replaces the calculated gradient
with a precomputed value when .backward() is called.
See
https://pytorch.org/docs/stable/autograd.html?highlight=hook#torch.Tensor.register_hook
for more info
@oscarknagg
oscarknagg / projected_gradient_descent.py
Last active November 25, 2023 03:52
Gist for projected gradient descent adversarial attack using PyTorch
import torch
def projected_gradient_descent(model, x, y, loss_fn, num_steps, step_size, step_norm, eps, eps_norm,
clamp=(0,1), y_target=None):
"""Performs the projected gradient descent attack on a batch of images."""
x_adv = x.clone().detach().requires_grad_(True).to(x.device)
targeted = y_target is not None
num_channels = x.shape[1]
for i in range(num_steps):
@oscarknagg
oscarknagg / adversarial_training.py
Last active January 3, 2019 23:28
Linf adversarial training for MNIST
from torchvision import transforms, datasets
from torch import nn, optim
from torch.utils.data import DataLoader
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=5)
@oscarknagg
oscarknagg / move_heads.py
Created May 14, 2019 13:47
Moving a head in 4 different directions with convolutions and einsum
import torch
import torch.nn.functional as F
movement_filters = torch.Tensor([
[
[0, 1, 0],
[0, 0, 0],
[0, 0, 0],
],
[
import torch
bodies = torch.zeros((2, 1, 7, 7))
heads = torch.zeros((2, 1, 7, 7))
num_envs = bodies.size(0)
# Initialise body as shown in diagram
bodies[:, :, 3, 2] = 1
bodies[:, :, 3, 3] = 2
bodies[:, :, 2, 3] = 3
import os.path
import psutil
import pyarrow as pa
import numpy as np
from pyarrow import parquet as pq
import time
WINDOW_LENGTH = 1000
N = 1000000