Skip to content

Instantly share code, notes, and snippets.

Brandon Victor Multihuntr

  • La Trobe University
View GitHub Profile
@Multihuntr
Multihuntr / listlike.py
Created Jun 18, 2020
A list-like wrapper for dictionary-like objects
View listlike.py
import collections
class ListLike(collections.abc.MutableSequence):
'''
A list-like interface wrapping dictionary-like objects.
Uses integer keys, like you would for a list, has range checking,
negative indexing and slicing working as you'd expect.
i.e.
with shelve.open('foo.dat') as sf:
@Multihuntr
Multihuntr / multi_level_index.py
Created May 7, 2020
For flat indexing into a nested structure without flattening that structure.
View multi_level_index.py
import bisect
import numpy as np
import collections.abc
class TwoLevelIndex(collections.abc.Sequence):
def __init__(self, coll):
counts = [len(thing) for thing in coll]
self.cum_counts = np.cumsum(counts)
def __getitem__(self, idx):
seg_idx = bisect.bisect(self.cum_counts, idx)
@Multihuntr
Multihuntr / wrapper.py
Created Apr 24, 2020
Generic Pytorch Module Wrapper - When nn.Sequential just isn't enough
View wrapper.py
# I keep properties on my main nn.Modules. e.g. a list of the training statistics the model is tracking.
# I wanted to perform a set of extra actions across multiple different modules without having to
# - write those steps into each of the 5+ different model definitions, or
# - explicitly expose those values on the wrapper module.
# It's fairly trivial, but if you don't use the try: super(), it doesn't keep the `wrapped` property.
import torch
import torch.nn as nn
class Wrapper(nn.Module):
@Multihuntr
Multihuntr / derive.py
Last active Dec 17, 2019
Decorator for lazy-loaded derived values
View derive.py
def derive(_from, _coll='_derived', name=None):
'''
Creates a decorator that caches derived values.
Utilises a property on the object to keep a collection of derived properties,
but requires the calling obj to manage that collection (clearing, instantiation, etc).
Args:
_from (str): Property this property is derived from
_coll (str, optional): Collection name of derived property names
name (str, optional): Overwrite the property used to cache
@Multihuntr
Multihuntr / pytorch_tensor_to_image.py
Created Sep 2, 2019
One liner to save a GPU pytorch tensor to disk as an image using PIL
View pytorch_tensor_to_image.py
from PIL import Image
# Assumes tensor is shaped [c, h, w]
Image.fromarray(tensor.detach().cpu().numpy().transpose([1, 2, 0])).save('test.png')
# Assumes tensor is shaped [n, c, h, w]
Image.fromarray(tensor[0].detach().cpu().numpy().transpose([1, 2, 0])).save('test.png')
# Yeah, pretty simple, but annoying to remember...
@Multihuntr
Multihuntr / gauss_smooth.py
Created Aug 2, 2019
Numpy gaussian smoothing
View gauss_smooth.py
# Super simple 1D smoothing with just numpy. Just smooths a few sharp edges.
import math
import numpy as np
def gaussian_kernel(n=5):
x = np.arange(n)/n
g = math.e ** -(x**2/((n - 2)**2)) # drop the constant factor at the start
return g/g.sum() # normalise so it sums to 1
@Multihuntr
Multihuntr / minimal_nvvl_tests.py
Created Jun 11, 2018
Minimal examples of using NVIDIA/nvvl
View minimal_nvvl_tests.py
# Simplest case (note: VideoDataset.__get__ puts frames on CPU)
import nvvl
d = nvvl.VideoDataset(['prepared.mp4'], 3)
fr = d[0]
print(type(fr))
print(fr.shape)
# Custom processing (note: VideoDataset.__get__ puts frames on CPU)
import nvvl
d = nvvl.VideoDataset(['prepared.mp4'], 3, processing={'a': nvvl.ProcessDesc()})
@Multihuntr
Multihuntr / accumulate_grads_min.py
Created Jan 23, 2018
A minimal example of how you can accumulate gradients across batches, allowing you to train using much larger batch sizes than can fit in memory at the cost of speed.
View accumulate_grads_min.py
import numpy as np
import tensorflow as tf
import sys
from tensorflow.examples.tutorials.mnist import input_data
n_pseudo_batches = int(sys.argv[1]) if len(sys.argv) > 1 else 128
actual_batch_size = int(sys.argv[2]) if len(sys.argv) > 2 else 32
iterations = int(sys.argv[3]) if len(sys.argv) > 3 else 10
@Multihuntr
Multihuntr / accumulate_grads.py
Last active Aug 10, 2018
Accumulating gradients to reduce memory requirement per forward pass (using MNIST)
View accumulate_grads.py
import numpy as np
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
def simple_model(input):
# This ensures that the model will always be instantiated the same, for comparison.
hidden_initializer = tf.constant_initializer(np.random.uniform(-0.025, 0.025, size=[784,100]))
hidden = tf.layers.dense(input, 100, kernel_initializer=hidden_initializer)
out_initializer = tf.constant_initializer(np.random.uniform(-0.025, 0.025, size=[100,10]))
@Multihuntr
Multihuntr / random_queue.py
Created Oct 20, 2017
Blocking Thread-safe Random Queue
View random_queue.py
import queue
import random
class RandomQueue(queue.Queue):
def _put(self, item):
n = len(self.queue)
i = random.randint(0, n)
self.queue.insert(i, item)
You can’t perform that action at this time.