Skip to content

Instantly share code, notes, and snippets.

View wdhorton's full-sized avatar

William Horton wdhorton

View GitHub Profile
@wdhorton
wdhorton / test_backward.ipynb
Created September 18, 2018 02:39
Testing ConstantPadNd backward
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
@wdhorton
wdhorton / pytorch_build_output.txt
Created September 1, 2018 04:10
Pytorch build output
Building wheel torch-0.5.0a0+1b7172a
running install
running build_deps
+ USE_CUDA=0
+ USE_ROCM=0
+ USE_NNPACK=0
+ USE_MKLDNN=0
+ USE_GLOO_IBVERBS=0
+ FULL_CAFFE2=0
+ [[ 9 -gt 0 ]]
params = []
# callback for storing the params of the model after each epoch
class SaveModelParams(Callback):
def __init__(self, model):
self.model = model
def on_epoch_end(self, metrics):
params.append([p.data.cpu().numpy() for p in self.model.parameters()])
def collect_bn_modules(module, bn_modules):
if isinstance(module, torch.nn.modules.batchnorm._BatchNorm):
bn_modules.append(module)
def fix_batchnorm(swa_model, train_dl):
"""
During training, batch norm layers keep track of a running mean and
variance of the previous layer's activations. Because the parameters
of the SWA model are computed as the average of other models' parameters,
the SWA model never sees the training data itself, and therefore has no
@wdhorton
wdhorton / swa.py
Created April 1, 2018 17:58
SWA callback
class SWA(Callback):
def __init__(self, model, swa_model, swa_start):
super().__init__()
self.model,self.swa_model,self.swa_start=model,swa_model,swa_start
def on_train_begin(self):
self.epoch = 0
self.swa_n = 0
def on_epoch_end(self, metrics):
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.