Created
September 21, 2017 10:22
-
-
Save albanie/0f7d39e81710943bb6faa33682f0f01c to your computer and use it in GitHub Desktop.
toDagNN (working progress)
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
function dag = toDagNN(net, customObj) | |
%TODAGNN Converts a compiled AutoNN network to a DagNN object | |
% DAG = toDagNN(NETOUTPUTS) converts a set of Autonn outputs | |
% (i.e. a set of recursively nested layer objects), into a dagnn object, | |
% DAG. | |
% TODO(samuel): clean up | |
if nargin < 2 | |
customObj.support = {} ; | |
end | |
dag = dagnn.DagNN() ; | |
autoOutputs = Layer.fromCompiledNet(net) ; | |
% construct from each network output | |
for ii = 1:numel(autoOutputs) | |
out = autoOutputs{ii} ; | |
dag = addToDag(dag, out) ; | |
end | |
% copy meta-information | |
dag.meta = net.meta ; | |
dag.rebuild() ; | |
function dag = addToDag(dag, out) | |
inLayers = find(cellfun(@(x) isFuncLayer(x) , out.inputs)) ; | |
% uses uniqueness property of autonn naming scheme | |
if ismember(out.name, {dag.layers.name}), return ; end | |
for jj = 1:numel(inLayers) | |
dag = addToDag(dag, out.inputs{inLayers(jj)}) ; | |
end | |
name = out.name ; ins = out.inputs ; funcType = func2str(out.func) ; | |
params = struct('name', {},'value', {}, ... | |
'learningRate', [], 'weightDecay', []) ; | |
paramOpts = fieldnames(params) ; | |
% defautlt dag input/output scheme | |
inputs = {ins{1}.name} ; outputs = {out.name} ; | |
switch funcType | |
case {'vl_nnconv', 'vl_nnconvt'} | |
sz = size(ins{2}.value) ; | |
hasBias = ~isempty(ins{3}) ; | |
params(1) = parseFields(ins{2}, paramOpts) ; | |
if hasBias, params(2) = parseFields(ins{3}, paramOpts) ; end | |
switch funcType | |
case 'vl_nnconv' | |
block = dagnn.Conv() ; | |
block.size = sz ; | |
block = parseCells(block, ins, {'pad', 'stride', 'dilate'}) ; | |
case 'vl_nnconvt' | |
block = ConvTranspose() ; | |
block.size = sz ; | |
block = parseCells(block, ins, {'upsample', 'crop', 'numGroups'}) ; | |
end | |
block.hasBias = hasBias ; | |
case 'vl_nnrelu' | |
block = dagnn.ReLU() ; | |
block = parseCells(block, ins, {'leak'}) ; | |
case 'vl_nnmask', return ; % do nothing (handled by vl_nndropout_wrapper) | |
case {'vl_nnwsum', 'vl_nnsum'} | |
if strcmp(funcType, 'vl_nnwsum') | |
msg = 'currently, only a balanced summation is supported' ; | |
assert(all(ones(size(ins{4})) == ins{4}), msg) ; | |
end | |
block = dagnn.Sum() ; | |
inputs = cellfun(@(x) {x.name}, ins(inLayers)) ; | |
case 'vl_nnbnorm_wrapper' | |
block = dagnn.BatchNorm() ; | |
% we must handle two cases: | |
% (1) Stanard usage of parameters | |
% (2) Following initalisation, the autonn wrapper uses dummy variables | |
% in the parameter positions | |
params(1) = parseFields(ins{2}, paramOpts) ; | |
params(2) = parseFields(ins{3}, paramOpts) ; | |
params(3) = parseFields(ins{4}, paramOpts) ; | |
if params(1).value == 1 && params(2).value == 0 % second case | |
% make the assumption that bn follows on from a conv at some point | |
if isequal(ins{1}.func, @vl_nnconv) | |
prev = ins{1} ; | |
else | |
prev = ins{1}.find(@vl_nnconv, 1) ; | |
end | |
numChannels = size(prev.inputs{2}.value, 4) ; | |
params(1).value = zeros(numChannels, 1, 'single') ; | |
params(2).value = zeros(numChannels, 1, 'single') ; | |
params(3).value = zeros(numChannels, 2, 'single') ; | |
else | |
numChannels = size(ins{4}.value, 1) ; | |
end | |
block.numChannels = numChannels ; | |
case 'vl_nndropout_wrapper' | |
block = dagnn.DropOut() ; | |
rate = ins{2}.inputs{2} ; % extract rate parameter from input mask | |
block.rate = rate ; | |
case 'vl_nnpool' | |
block = dagnn.Pooling() ; | |
block.opts = {'cuDNN'} ; | |
block.poolSize = ins{2} ; | |
block = parseCells(block, ins, {'method', 'pad', 'stride'}) ; | |
case 'vl_nnsoftmax' | |
block = dagnn.SoftMax() ; | |
case 'vl_nnloss' | |
% may require extension for more complex losses | |
block = dagnn.Loss() ; | |
block = parseCells(block, ins, {'loss', 'opts'}) ; | |
inputs = cellfun(@(x) {x.name}, ins(1:2)) ; | |
case customObj.support | |
[block,inputs,params] = customObj.convert(ins, out, params) ; | |
otherwise | |
error('%s is unsupported', funcType) ; | |
end | |
dag.addLayer(name, block, inputs, outputs, {params.name}) ; | |
for pp = 1:numel(params) | |
pindex = dag.getParamIndex(params(pp).name) ; | |
for prop = {'value', 'learningRate', 'weightDecay'} | |
pname = char(prop) ; | |
if ~isempty(params(pp).(pname)) | |
dag.params(pindex).(pname) = params(pp).(pname) ; | |
end | |
end | |
end | |
end | |
function x = parseFields(src, opts) | |
x = struct() ; | |
for jj = 1:numel(opts) | |
opt = opts{jj} ; | |
x.(opt) = src.(opt) ; | |
end | |
end | |
% override options on struct x if they are available in src | |
function x = parseCells(x, src, opts) | |
for jj = 1:numel(opts) | |
opt = opts{jj} ; | |
pos = find(strcmp(src, opt)) + 1 ; | |
if ~isempty(pos), x.(opt) = src{pos} ; end | |
end | |
end | |
function res = isFuncLayer(x) | |
res = isa(x, 'Layer') && ~isa(x, 'Input') && ~isa(x, 'Param') ; | |
end | |
end |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Hi Albanie,
I'm trying to use resnet in autonn with 2 class
I can convert dag2autonn and so "netx.eval({'data', randn(224,224,3,1, 'single'),'label',1 } ) ; " works well
but in "dag = toDagNN(netx );" case, I've got an error :
%------------------------------------------------------------------
Operands to the || and && operators must be convertible to logical scalar values.
Error in toDagNN/addToDag (line 84)
if params(1).value == 1 && params(2).value == 0 % second case
Error in toDagNN/addToDag (line 35)
dag = addToDag(dag, out.inputs{inLayers(jj)}) ;
%------------------------------------------------------------------
How can I use "toDagNN"?
Thanks..