Skip to content

Instantly share code, notes, and snippets.

@mohcinemadkour
Created July 25, 2020 05:02
Show Gist options
  • Save mohcinemadkour/0ae0fff3f81c82d83b9d9615f13a2861 to your computer and use it in GitHub Desktop.
Save mohcinemadkour/0ae0fff3f81c82d83b9d9615f13a2861 to your computer and use it in GitHub Desktop.
class MyDropout(nn.Module):
def __init__(self, p=0.5):
super(MyDropout, self).__init__()
self.p = p
# multiplier is 1/(1-p). Set multiplier to 0 when p=1 to avoid error...
if self.p < 1:
self.multiplier_ = 1.0 / (1.0-p)
else:
self.multiplier_ = 0.0
def forward(self, input):
# if model.eval(), don't apply dropout
if not self.training:
return input
# So that we have `input.shape` numbers of Bernoulli(1-p) samples
selected_ = torch.Tensor(input.shape).uniform_(0,1)>self.p
# To support both CPU and GPU.
if input.is_cuda:
selected_ = Variable(selected_.type(torch.cuda.FloatTensor), requires_grad=False)
else:
selected_ = Variable(selected_.type(torch.FloatTensor), requires_grad=False)
# Multiply output by multiplier as described in the paper [1]
return torch.mul(selected_,input) * self.multiplier_
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment