Skip to content

Instantly share code, notes, and snippets.

@JaeDukSeo
Last active March 29, 2019 21:55
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save JaeDukSeo/5deeaee18c7f9a0e623e9ecd00076d1e to your computer and use it in GitHub Desktop.
Save JaeDukSeo/5deeaee18c7f9a0e623e9ecd00076d1e to your computer and use it in GitHub Desktop.
class CNN():
def __init__(self,k,inc,out, stddev=0.05,which_reg='A',act=tf_iden,d_act=d_tf_iden):
self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=stddev,seed=2,dtype=tf.float32))
self.m,self.v = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))
self.act,self.d_act = act,d_act
self.current_case = which_reg
def getw(self): return self.w
def feedforward(self,input,stride=1,padding='SAME',training_phase=True,std_value=0.0005):
self.input = input
if self.current_case == 'B':
def training_fn(): return tf.nn.dropout(tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding),0.8)
def testing_fn(): return tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding)
self.layer = tf.cond(training_phase,true_fn=training_fn,false_fn=testing_fn)
elif self.current_case == 'E':
def training_fn(): return tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding)
def testing_fn():
sampled_weight = tf.squeeze(tf.distributions.Normal(loc=self.w, scale=std_value).sample(1))
return tf.nn.conv2d(input,sampled_weight,strides=[1,stride,stride,1],padding=padding)
self.layer = tf.cond(training_phase,true_fn=training_fn,false_fn=testing_fn)
else: self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding)
self.layerA = self.act(self.layer)
return self.layer, self.layerA
def backprop(self,gradient,std_value,stride=1,padding='SAME'):
grad_part_1 = gradient
grad_part_2 = self.d_act(self.layer)
grad_part_3 = self.input
grad_middle = grad_part_1 * grad_part_2
grad = tf.nn.conv2d_backprop_filter(input = grad_part_3,filter_sizes = tf.shape(self.w), out_backprop = grad_middle,strides=[1,stride,stride,1],padding=padding)
grad_pass = tf.nn.conv2d_backprop_input (input_sizes = tf.shape(self.input),filter= self.w,out_backprop = grad_middle,strides=[1,stride,stride,1],padding=padding)
if self.current_case == 'D' or self.current_case == 'E':
grad = tf.squeeze(tf.distributions.Normal(loc=grad, scale=std_value).sample(1))
update_w = []
update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))
update_w.append(tf.assign( self.v,self.v*beta2 + (1-beta2) * (grad ** 2) ))
m_hat = self.m / (1-beta1) ; v_hat = self.v / (1-beta2)
adam_middle = m_hat * learning_rate/(tf.sqrt(v_hat) + adam_e)
if self.current_case == 'C' or self.current_case == 'D' or self.current_case == 'E':
adam_middle = tf.squeeze(tf.distributions.Normal(loc=adam_middle, scale=std_value).sample(1))
update_w.append(tf.assign(self.w,tf.subtract(self.w,adam_middle )))
return grad_pass,grad,update_w
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment