Created
December 15, 2015 16:35
-
-
Save thouis/fd50b8c28efcc078765e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
local grad = require 'autograd' | |
local nn = require 'nn' | |
local W1 = torch.FloatTensor(1, 1, 10, 10):normal() | |
local pooler = nn.SpatialMaxPooling(2, 2, 2, 2) | |
local unpooler = nn.SpatialMaxUnpooling(pooler) | |
-- not sure why these lines are necessary. | |
pooler.indices = torch.FloatTensor() | |
pooler.output = torch.FloatTensor() | |
unpooler.output = torch.FloatTensor() | |
local testunpool = function(inputs) | |
local pooled = pooler(inputs.W1) | |
local unpooled = unpooler(pooled) | |
return torch.sum(unpooled) | |
end | |
print(testunpool({W1=W1})) | |
df = grad(testunpool) | |
print(df({W1=W1})) |
@fmassa, Sorry, I'm not a torch expert. Can you give me an example as to what you mean?
I think he means pooler:float()
instead of assigning float types to its member tensors, like pooler.output = torch.FloatTensor()
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
If you are using float tensors, it's better to cast the modules to
:float()
instead of having to manually hack it