Skip to content

Instantly share code, notes, and snippets.

@toshi-k
Last active August 29, 2015 14:25
Show Gist options
  • Save toshi-k/c663000bab75075e7cec to your computer and use it in GitHub Desktop.
Save toshi-k/c663000bab75075e7cec to your computer and use it in GitHub Desktop.
Randomized LeakyReLU (Torch 7)
local RLReLU, parent = torch.class('nn.RLReLU','nn.Module')
function RLReLU:__init(l, u, version)
parent.__init(self)
self.train = true
self.l = l or 3
self.u = u or 8
self.v = version or 1
self.a = (self.u + self.u) / 2
self.ReLU_p = nn.ReLU()
-- version 1: element-wise random
-- version 2: layer-wise random
if self.v == 1 then
self.noise = torch.Tensor()
else
self.noise = self.a
end
end
function RLReLU:updateOutput(input)
self.output:resizeAs(input):copy(input)
if self.train then
if self.v==1 then
self.noise:resizeAs(input)
self.noise:uniform(self.l, self.u)
self.noise:pow(-1)
self.output:cmul(self.noise)
self.ReLU_p.output = self.ReLU_p:updateOutput(input)
self.ReLU_p.output:cmul(self.noise:add(-1):mul(-1))
else
self.noise = torch.uniform(self.l, self.u)
self.noise = 1/self.noise
self.output:mul(self.noise)
self.ReLU_p.output = self.ReLU_p:updateOutput(input)
self.ReLU_p.output:mul(1-self.noise)
end
else
self.output:mul(1/self.a)
self.ReLU_p.output = self.ReLU_p:updateOutput(input)
self.ReLU_p.output:mul(1-1/self.a)
end
self.output:add(self.ReLU_p.output)
return self.output
end
function RLReLU:updateGradInput(input, gradOutput)
self.gradInput:resizeAs(gradOutput):copy(gradOutput)
if self.train then
if self.v==1 then
self.gradInput:cmul(self.noise)
self.ReLU_p.gradInput = self.ReLU_p:updateGradInput(input, gradOutput)
self.ReLU_p.gradInput:cmul(self.noise:add(-1):mul(-1))
else
self.gradInput:mul(self.noise)
self.ReLU_p.gradInput = self.ReLU_p:updateGradInput(input, gradOutput)
self.ReLU_p.gradInput:mul(1-self.noise)
end
else
self.gradInput:mul(1/self.a)
self.ReLU_p.gradInput = self.ReLU_p:updateGradInput(input, gradOutput)
self.ReLU_p.gradInput:mul(1-1/self.a)
end
self.gradInput:add(self.ReLU_p.gradInput)
return self.gradInput
end
function RLReLU:__tostring__()
if self.v==1 then
return string.format('%s(element-wise [%f,%f])', torch.type(self), self.l, self.u)
else
return string.format('%s(layer-wise [%f,%f])', torch.type(self), self.l, self.u)
end
end
--[[
<<References>>
[1] Empirical Evaluation of Rectified Activations in Convolutional Network
Bing Xu, Naiyan Wang, Tianqi Chen, Mu Li
http://arxiv.org/abs/1505.00853
--]]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment