Skip to content

Instantly share code, notes, and snippets.

@XinDongol
Last active July 1, 2018 15:19
Show Gist options
  • Save XinDongol/9bfbdf9fb37fe62632b7dc497ce4e66a to your computer and use it in GitHub Desktop.
Save XinDongol/9bfbdf9fb37fe62632b7dc497ce4e66a to your computer and use it in GitHub Desktop.
# coding: utf-8
import numpy as np
import torch
import torch.nn as nn
from torch.nn import Module
from torch.autograd import Function
class Hwhq(Module):
def __init__(self, config):
super.__init__()
self.centers = config.centers
self.threshoulds = [(self.centers[i] + self.centers[i + 1]) / 2.
for i in range(len(self.centers) - 1)] + [np.finfo(np.float32).max, ]
self.t2c = {k: v for k, v in zip(self.threshoulds, self.centers)}
self.neg_slope = config.neg_slope
def forward(x):
# only demostricts "hwgq froward"
return torch.where(x < 0., torch.ones_like(x) * self.neg_slope,
self.t2c(max(torch.arg_where(x <= self.threshoulds))))
class BinActive(torch.autograd.Function):
'''
Binarize the input activations and calculate the mean across channel dimension.
'''
def forward(self, input, bin_degree):
self.save_for_backward(input)
size = input.size()
mean = torch.mean(input.abs(), 1, keepdim=True)
proj = input.sign()
output = (bin_degree * proj + input) / (1+bin_degree)
return output, mean
def backward(self, grad_output, grad_output_mean):
input, = self.saved_tensors
grad_input = grad_output.clone()
grad_input[input.ge(1)] = 0
grad_input[input.le(-1)] = 0
return grad_input
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment