Skip to content

Instantly share code, notes, and snippets.

@willwhitney
Created April 30, 2015 15:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save willwhitney/e08a90c78d552ab9a4a8 to your computer and use it in GitHub Desktop.
Save willwhitney/e08a90c78d552ab9a4a8 to your computer and use it in GitHub Desktop.
Diff of unnecessarily brutal changes to get DC-IGN working without GPU
diff --git a/config.lua b/config.lua
index c1b1cb2..93c6984 100644
--- a/config.lua
+++ b/config.lua
@@ -1,6 +1,6 @@
require 'modules/prequire'
require 'modules/UnPooling'
-cunn_mod = prequire('cunn')
+-- cunn_mod = prequire('cunn')
--Global variables for config
bsize = 50
imwidth = 150
@@ -9,9 +9,9 @@ TOTALFACES = 5230
num_train_batches = 5000
num_test_batches = TOTALFACES-num_train_batches
-if not cudnn then
- cudnn = nn
-end
+-- if not cudnn then
+ -- cudnn = nn
+-- end
-- config = {
-- learningRate = -0.0005,
@@ -89,17 +89,17 @@ function init_network2_150_mv(dim_hidden, feature_maps)
encoder = nn.Sequential()
- encoder:add(cudnn.SpatialConvolution(colorchannels,feature_maps,filter_size,filter_size))
- encoder:add(cudnn.SpatialMaxPooling(2,2,2,2))
+ encoder:add(nn.SpatialConvolution(colorchannels,feature_maps,filter_size,filter_size))
+ encoder:add(nn.SpatialMaxPooling(2,2,2,2))
encoder:add(nn.Threshold(0,1e-6))
- encoder:add(cudnn.SpatialConvolution(feature_maps,feature_maps/2,filter_size,filter_size))
- encoder:add(cudnn.SpatialMaxPooling(2,2,2,2))
+ encoder:add(nn.SpatialConvolution(feature_maps,feature_maps/2,filter_size,filter_size))
+ encoder:add(nn.SpatialMaxPooling(2,2,2,2))
encoder:add(nn.Threshold(0,1e-6))
- encoder:add(cudnn.SpatialConvolution(feature_maps/2,feature_maps/4,filter_size,filter_size))
- encoder:add(cudnn.SpatialMaxPooling(2,2,2,2))
+ encoder:add(nn.SpatialConvolution(feature_maps/2,feature_maps/4,filter_size,filter_size))
+ encoder:add(nn.SpatialMaxPooling(2,2,2,2))
encoder:add(nn.Threshold(0,1e-6))
encoder:add(nn.Reshape((feature_maps/4)*15*15))
@@ -127,29 +127,29 @@ function init_network2_150_mv(dim_hidden, feature_maps)
decoder:add(nn.Reshape((feature_maps/4),15,15))
decoder:add(nn.SpatialUpSamplingNearest(2))
- decoder:add(cudnn.SpatialConvolution(feature_maps/4,feature_maps/2, 7, 7))
+ decoder:add(nn.SpatialConvolution(feature_maps/4,feature_maps/2, 7, 7))
decoder:add(nn.Threshold(0,1e-6))
decoder:add(nn.SpatialUpSamplingNearest(2))
- decoder:add(cudnn.SpatialConvolution(feature_maps/2,feature_maps,7,7))
+ decoder:add(nn.SpatialConvolution(feature_maps/2,feature_maps,7,7))
decoder:add(nn.Threshold(0,1e-6))
decoder:add(nn.SpatialUpSamplingNearest(2))
- decoder:add(cudnn.SpatialConvolution(feature_maps,feature_maps,7,7))
+ decoder:add(nn.SpatialConvolution(feature_maps,feature_maps,7,7))
decoder:add(nn.Threshold(0,1e-6))
decoder:add(nn.SpatialUpSamplingNearest(2))
- decoder:add(cudnn.SpatialConvolution(feature_maps,1,7,7))
- decoder:add(cudnn.Sigmoid())
+ decoder:add(nn.SpatialConvolution(feature_maps,1,7,7))
+ decoder:add(nn.Sigmoid())
model = nn.Sequential()
model:add(encoder)
model:add(nn.Reparametrize(dim_hidden))
model:add(decoder)
- if cunn_mod then
- model:cuda()
- end
+ -- if cunn_mod then
+ -- model:cuda()
+ -- end
collectgarbage()
return model
end
diff --git a/modules/Reparametrize.lua b/modules/Reparametrize.lua
index 43bf6a7..a531080 100644
--- a/modules/Reparametrize.lua
+++ b/modules/Reparametrize.lua
@@ -8,17 +8,17 @@ function Reparametrize:__init(dimension)
self.size = torch.LongStorage()
self.dimension = dimension
self.gradInput = {}
-end
+end
function Reparametrize:updateOutput(input)
self.eps = torch.randn(input[2]:size(1),self.dimension)
- if torch.typename(input[1]) == 'torch.CudaTensor' then
- self.eps = self.eps:cuda()
- self.output = torch.CudaTensor():resizeAs(input[2]):fill(0.5)
- else
+ -- if torch.typename(input[1]) == 'torch.CudaTensor' then
+ -- self.eps = self.eps:cuda()
+ -- self.output = torch.CudaTensor():resizeAs(input[2]):fill(0.5)
+ -- else
self.output = torch.Tensor():resizeAs(input[2]):fill(0.5)
- end
+ -- end
self.output:cmul(input[2]):exp():cmul(self.eps)
@@ -31,13 +31,13 @@ end
function Reparametrize:updateGradInput(input, gradOutput)
-- Derivative with respect to mean is 1
self.gradInput[1] = gradOutput:clone()
-
+
--test gradient with Jacobian
- if torch.typename(input[1]) == 'torch.CudaTensor' then
- self.gradInput[2] = torch.CudaTensor():resizeAs(input[2]):fill(0.5)
- else
+ -- if torch.typename(input[1]) == 'torch.CudaTensor' then
+ -- self.gradInput[2] = torch.CudaTensor():resizeAs(input[2]):fill(0.5)
+ -- else
self.gradInput[2] = torch.Tensor():resizeAs(input[2]):fill(0.5)
- end
+ -- end
self.gradInput[2]:cmul(input[2]):exp():mul(0.5):cmul(self.eps)
self.gradInput[2]:cmul(gradOutput)
diff --git a/modules/UnPooling.lua b/modules/UnPooling.lua
index 5359713..d8f80df 100644
--- a/modules/UnPooling.lua
+++ b/modules/UnPooling.lua
@@ -17,9 +17,9 @@ function UnPooling:updateOutput(input)
local bsize = input:size()[1]
local dim = input:size()[3]
local sdim = dim*self.scale
-
+
self.output = torch.zeros(bsize, input:size()[2] , sdim, sdim )
-
+
local ii,jj,kk; ii=1;jj=1;kk=1;
self.mapping = {} -- store non-zero mappings for gradient calc
@@ -34,7 +34,7 @@ function UnPooling:updateOutput(input)
ii = ii + 1;
end
- self.output = self.output:cuda()
+ -- self.output = self.output:cuda()
return self.output
end
@@ -44,18 +44,18 @@ function UnPooling:updateGradInput(input, gradOutput)
input = input:float()
local dim = input:size()[3]
-
+
self.gradInput = torch.zeros(bsize, input:size()[2], dim, dim)
for ii=1,dim do
for jj=1,dim do
local t = self.mapping[ii .. jj]
i = t[1]; j = t[2];
- self.gradInput[{{},{},ii,jj}] = gradOutput[{{},{}, i,j}]
+ self.gradInput[{{},{},ii,jj}] = gradOutput[{{},{}, i,j}]
end
end
- self.gradInput = self.gradInput:cuda()
+ -- self.gradInput = self.gradInput:cuda()
return self.gradInput
end
diff --git a/monovariant_main.lua b/monovariant_main.lua
index 2c4d8c3..2ff835d 100644
--- a/monovariant_main.lua
+++ b/monovariant_main.lua
@@ -14,16 +14,16 @@ require 'modules/Reparametrize'
require 'modules/SelectiveOutputClamp'
require 'modules/SelectiveGradientFilter'
-cutorch_mod = prequire('cutorch')
-cunn_mod = prequire('cunn')
+-- cutorch_mod = prequire('cutorch')
+-- cunn_mod = prequire('cunn')
require 'optim'
require 'testf'
require 'utils'
require 'config'
-if cutorch_mod then
- print( cutorch.getDeviceProperties(cutorch.getDevice()) )
-end
+-- if cutorch_mod then
+ -- print( cutorch.getDeviceProperties(cutorch.getDevice()) )
+-- end
cmd = torch.CmdLine()
cmd:text()
@@ -38,8 +38,8 @@ cmd:option('--networks_dir', 'networks', 'the directory to save the res
cmd:option('--name', 'default', 'the name for this network. used for saving the network and results')
cmd:option('--datasetdir', 'DATASET', 'dataset source directory')
-cmd:option('--dim_hidden', 200, 'dimension of the representation layer')
-cmd:option('--feature_maps', 96, 'number of feature maps')
+cmd:option('--dim_hidden', 40, 'dimension of the representation layer')
+cmd:option('--feature_maps', 24, 'number of feature maps')
cmd:option('--force_invariance', false, 'propagate error equal to change in outputs corresponding to fixed variables')
cmd:option('--invariance_strength',0, 'multiplier for the invariance error signal')
@@ -55,7 +55,7 @@ cmd:text()
cmd:text()
cmd:text("Probably don't change these:")
-cmd:option('--threads',2,'how many threads to use in torch')
+cmd:option('--threads',8,'how many threads to use in torch')
cmd:option('--num_train_batches',5000,'number of batches to train with per epoch')
cmd:option('--num_train_batches_per_type',3000,'number of available train batches of each data type')
cmd:option('--num_test_batches',1400,'number of batches to test with')
@@ -66,7 +66,7 @@ cmd:text()
opt = cmd:parse(arg)
opt.save = paths.concat(opt.networks_dir, opt.name)
-os.execute('mkdir ' .. opt.save)
+os.execute('mkdir -p ' .. opt.save)
config = {
learningRate = opt.learning_rate,
@@ -98,12 +98,12 @@ criterion.sizeAverage = false
KLD = nn.KLDCriterion()
KLD.sizeAverage = false
-if cutorch_mod then
- criterion:cuda()
- KLD:cuda()
- model:cuda()
- cutorch.synchronize()
-end
+-- if cutorch_mod then
+-- criterion:cuda()
+-- KLD:cuda()
+-- model:cuda()
+-- cutorch.synchronize()
+-- end
parameters, gradients = model:getParameters()
print('Num before', #parameters)
@@ -175,9 +175,9 @@ while true do
gradFilters[clampIndex].active = true
end
- if cutorch_mod then
- batch = batch:cuda()
- end
+ -- if cutorch_mod then
+ -- batch = batch:cuda()
+ -- end
--Optimization function
local opfunc = function(x)
diff --git a/utils.lua b/utils.lua
index 1ea13f1..b0f6600 100644
--- a/utils.lua
+++ b/utils.lua
@@ -5,7 +5,8 @@ function load_batch(id, mode)
end
function load_mv_batch(id, dataset_name, mode)
- return torch.load(opt.datasetdir .. '/th_' .. dataset_name .. '/' .. mode .. '/batch' .. id)
+ return torch.rand(30, 1, 150, 150)
+ -- return torch.load(opt.datasetdir .. '/th_' .. dataset_name .. '/' .. mode .. '/batch' .. id)
end
function load_random_mv_batch(mode)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment