Last active
August 8, 2018 02:55
-
-
Save joisino/4a1a7c8dee268facbca8d0972b55748e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
''' | |
The implementation of Dilated Convolution with chainer | |
http://joisino.hatenablog.com/entry/2017/07/13/210000 | |
Copyright (c) 2017 joisino | |
Released under the MIT license | |
http://opensource.org/licenses/mit-license.php | |
''' | |
import numpy as np | |
import chainer | |
from chainer import Function, report, training, utils, Variable | |
from chainer import datasets, iterators, optimizers, serializers | |
from chainer import initializers | |
from chainer import Link, Chain | |
import chainer.functions as F | |
import chainer.links as L | |
from chainer.training import extensions | |
class DilatedConvolution2DFunction(Function): | |
def __init__(self,dilate): | |
self.dy = dilate | |
self.dx = dilate | |
def forward(self, inputs): | |
x = inputs[0] # dim: (batch, in_layer, y, x) | |
W = inputs[1] # dim: (out_layer, in_layer, y, x) | |
b = inputs[2] # dim: (out_layer) | |
filter_height = 1 + self.dy * ( W.shape[2] - 1 ) | |
filter_width = 1 + self.dx * ( W.shape[3] - 1 ) | |
batch_size = x.shape[0] | |
in_n_layer = x.shape[1] | |
out_n_layer = W.shape[0] | |
out_height = x.shape[2] - filter_height + 1 | |
out_width = x.shape[3] - filter_width + 1 | |
assert in_n_layer == W.shape[1], "W.shape[1] must be in_n_layer" | |
assert out_n_layer == b.shape[0], "b.shape[0] must be out_n_layer" | |
# gather values so that it can be applied normal convolution | |
# dim: (batch, in_layer, out_height, out_width, conv_y, conv_x) | |
new_x = np.zeros( (batch_size, in_n_layer, out_height, out_width, W.shape[2], W.shape[3]), dtype=np.float32) | |
for i in range(out_height): | |
for j in range(out_width): | |
new_x[:,:,i,j,:,:] = x[:,:,i:i+filter_height:self.dy,j:j+filter_width:self.dx] | |
# calc output | |
# dim: (batch, out_layer, out_height, out_width) | |
y = np.tensordot(new_x, W, ([1,4,5],[1,2,3])) # convolution | |
y += b # bias | |
new_y = np.rollaxis(y, 3, 1) # reshape (move axis) | |
assert new_y.shape[0] == batch_size, "new_y.shape[0] must be batch_size" | |
assert new_y.shape[1] == out_n_layer, "new_y.shape[1] must be out_n_layer" | |
assert new_y.shape[2] == out_height, "new_y.shape[2] must be out_height" | |
assert new_y.shape[3] == out_width, "new_y.shape[3] must be out_width" | |
return new_y, | |
def backward(self, inputs, grad_outputs): | |
x = inputs[0] | |
W = inputs[1] | |
b = inputs[2] | |
gy = grad_outputs[0] | |
filter_height = 1 + self.dy * ( W.shape[2] - 1 ) | |
filter_width = 1 + self.dx * ( W.shape[3] - 1 ) | |
batch_size = x.shape[0] | |
in_n_layer = x.shape[1] | |
out_n_layer = W.shape[0] | |
out_height = x.shape[2] - filter_height + 1 | |
out_width = x.shape[3] - filter_width + 1 | |
assert in_n_layer == W.shape[1], "W.shape[1] must be in_n_layer" | |
assert out_n_layer == b.shape[0], "b.shape[0] must be out_n_layer" | |
new_x = np.zeros( (batch_size, in_n_layer, out_height, out_width, W.shape[2], W.shape[3]), dtype=np.float32) | |
for i in range(out_height): | |
for j in range(out_width): | |
new_x[:,:,i,j,:,:] = x[:,:,i:i+filter_height:self.dy,j:j+filter_width:self.dx] | |
# calc gb | |
gb = gy.sum(axis=(0,2,3)) | |
# calc gW | |
gW = np.tensordot(gy, new_x, ([0,2,3],[0,2,3])) | |
# calc gx | |
g_newx_temp = np.tensordot(gy, W, ([1],[0])) | |
g_newx = np.rollaxis(g_newx_temp, 3, 1) | |
gx = np.zeros(x.shape, dtype=np.float32) | |
for i in range(out_height): | |
for j in range(out_width): | |
gx[:,:,i:i+filter_height:self.dy,j:j+filter_width:self.dx] = g_newx[:,:,i,j,:,:] | |
assert gx.shape == x.shape, "gx.shape must be x.shape" | |
assert gW.shape == W.shape, "gW.shape must be W.shape" | |
assert gb.shape == b.shape, "gb.shape must be b.shape" | |
return gx, gW, gb | |
def dilated_convolution_2d(x, W, b, dilate): | |
func = DilatedConvolution2DFunction(dilate) | |
return func(x, W, b) | |
class DilatedConvolution2D(Link): | |
def __init__(self,in_channels,out_channels,ksize,dilate=1): | |
super(DilatedConvolution2D, self).__init__() | |
self.dilate = dilate | |
with self.init_scope(): | |
W_initializer = initializers._get_initializer(None) | |
self.W = chainer.variable.Parameter(W_initializer) | |
self.W.initialize((out_channels,in_channels,ksize[0],ksize[1])) | |
initial_bias = 0 | |
initial_bias = initializers._get_initializer(initial_bias) | |
self.b = chainer.variable.Parameter(initial_bias, out_channels) | |
def __call__(self,x): | |
return dilated_convolution_2d( x, self.W, self.b, self.dilate ) | |
class CNN(Chain): | |
def __init__(self): | |
super(CNN,self).__init__( | |
l1 = DilatedConvolution2D(1,4,(3,3),1), | |
l2 = DilatedConvolution2D(4,4,(3,3),2), | |
l3 = DilatedConvolution2D(4,4,(3,3),4), | |
l4 = L.Linear(None,10) | |
) | |
def __call__(self, x): | |
h0 = F.reshape(x, (100, 1, 28, 28)) | |
h1 = F.relu( self.l1(h0) ) | |
h2 = F.relu( self.l2(h1) ) | |
h3 = F.relu( self.l3(h2) ) | |
h4 = self.l4(h3) | |
return F.softmax( h4 ) | |
n_epoch = 10 | |
batch_size = 100 | |
train, test = datasets.get_mnist() | |
train_iter = iterators.SerialIterator(train, batch_size=batch_size, shuffle=True) | |
test_iter = iterators.SerialIterator(test, batch_size=batch_size, repeat=False, shuffle=False ) | |
CNN = L.Classifier(CNN()) | |
opt = optimizers.Adam() | |
opt.setup(CNN) | |
updater = training.StandardUpdater(train_iter, opt) | |
trainer = training.Trainer(updater, (n_epoch, 'epoch'), out='result' ) | |
trainer.extend(extensions.Evaluator(test_iter, CNN)) | |
trainer.extend(extensions.LogReport()) | |
trainer.extend(extensions.PrintReport(['epoch', 'main/accuracy', 'validation/main/accuracy'])) | |
trainer.extend(extensions.ProgressBar()) | |
trainer.run() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment