Skip to content

Instantly share code, notes, and snippets.

View ajbrock's full-sized avatar

Andy Brock ajbrock

View GitHub Profile
%% Benchmark Analysis Script
% A Brock, 11.16.2016
%
% This quick script runs through and determines the fastest method from a
% given set of benchmarks.
%
% Note that this script is really only set up to work well with a single
% benchmark file, as the indexing isn't quite perfect for the multipl
% inputs case. Extending it should be easy enough if desired.
%% Clear the playing field
class MyOp(theano.Op):
# Properties attribute
#itypes and otypes attributes are
#compulsory if make_node method is not defined.
#They're the type of input and output respectively
itypes = [cuda.CudaNdarrayType([False,False,False,False])]
otypes = [cuda.CudaNdarrayType([False]*4)]
def reflect_pad(x, width, batch_ndim=1):
"""
Pad a tensor with a constant value.
Parameters
----------
x : tensor
width : int, iterable of int, or iterable of tuple
Padding width. If an int, pads each axis symmetrically with the same
amount in the beginning and end. If an iterable of int, defines the
symmetric padding width separately for each axis. If an iterable of
import theano
import theano.tensor as T
import lasagne
import numpy as np
import time
# Subpixel Upsample Layer using Set_subtensor
# This layer uses a set of r^2 inc_subtensor calls to reorganize the tensor in a subpixel-layer upscaling style
# as done in the ESPCN magic pony paper for super-resolution. There is almost certainly a more efficient way to do this,
# but I haven't figured it out at the moment and this seems to be fast enough.
%% knn Custom K-Nearest Neighbors Function
% class = knn_all(x,labels,K,weighted)
%
% A Brock, 29.9.15
%
% This function is a modification of the knn function to process the KNN-value of
% each member of a dataset. It uses parallel processing to quickly compute the class of
% x based on the K nearest neighbors of each member of x in x. The user can choose if
% they want to use the weighted version of the algorithm, which allows the
% K nearest neighbors to "vote" for the desired class, with the weight of
# Inception Score Calculator
#
# A Brock, 2017
#
# This snippet assumes you have two functions defined:
# 1. sample_net, which takes in a batch x num_latents random vector and returns batch samples,
# 2. eval_net, which takes in batch samples and returns a batch x #classes prediction vector.
num_latents = 100
## Unrolled GAN
# A Brock, 2016
# This code implements the toy experiment for unrolled GANs.
# TODO: Make shared variables and reduce the memory transfer overhead
# Imports
import numpy as np
## Wide ResNet with Shift and incorrect hyperparams.
# Based on code by xternalz: https://github.com/xternalz/WideResNet-pytorch
# WRN by Sergey Zagoruyko and Nikos Komodakis
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as V
import torch.optim as optim
import torch
import math
import torch.optim
from torch.optim.optimizer import Optimizer, required
class AdamHD(Optimizer):
"""Implements Adam algorithm.
It has been proposed in `Adam: A Method for Stochastic Optimization`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
import torch
# Dict to store hooks and flop count
data_dict = {'conv_flops' : 0, 'hooks' :[]}
def count_conv_flops(self, input, output):
# Flop contribution from channelwise connections
flops_c = self.out_channels * self.in_channels / self.groups
# Flop contribution from number of spatial locations we convolve over
flops_s = output.size(2) * output.size(3)