Created
February 4, 2019 04:38
-
-
Save JaeDukSeo/239890e40ac72705ed0c82fe25b39680 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class RELU_as_Reg(): | |
def __init__(self,batch,width,channel,regularizer): | |
self.w = tf.Variable(tf.ones([batch,width,width,channel],tf.float32) ) | |
self.m,self.v = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w)) | |
self.regularizer = regularizer | |
self.lamda = 0.0001 | |
def feedforward(self,input): | |
self.input = input | |
self.layerA = self.w * input | |
return self.layerA | |
def backprop(self,gradient): | |
grad = gradient * self.input | |
gradient_p = self.w * gradient | |
# add reg here | |
if self.regularizer == 'A': grad = grad + self.lamda * tf.sign(self.w) | |
if self.regularizer == 'B': grad = grad + self.lamda * 2.0 * self.w | |
if self.regularizer == 'C': grad = grad + self.lamda * (1.0/tf.sqrt(tf.square(self.w)+ 1e-5)) * self.w | |
if self.regularizer == 'D': grad = grad + self.lamda * -(2*self.w)/(1 + self.w**2) | |
if self.regularizer == 'E': grad = grad + self.lamda * -(1-tf.tanh(self.w) ** 2) | |
if self.regularizer == 'F': grad = grad + self.lamda * -(1-tf.tanh(self.w** 2) ** 2) * 2.0 * self.w | |
if self.regularizer == 'G': grad = grad + self.lamda * -(1-tf.tanh(tf.abs(self.w)) ** 2) * tf.sign(self.w) | |
if self.regularizer == 'H': grad = grad + self.lamda * -(1-tf.tanh(tf.abs(self.w)** 2) ** 2) * 2.0 * tf.abs(self.w) * tf.sign(self.w) | |
if self.regularizer == 'I': grad = grad + self.lamda * tf.cos(self.w) | |
if self.regularizer == 'J': grad = grad + self.lamda * tf.sign(tf.sin(self.w)) * tf.cos(self.w) | |
if self.regularizer == 'K': grad = grad + self.lamda * (2)/(self.w + 1e-5) | |
if self.regularizer == 'L': grad = grad + self.lamda * (tf.log(self.w**2 + 1e-5) + 2.0) | |
# add reg here | |
update_w = [] | |
update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) )) | |
update_w.append(tf.assign( self.v,self.v*beta2 + (1-beta2) * (grad ** 2) )) | |
m_hat = self.m / (1-beta1) ; v_hat = self.v / (1-beta2) | |
adam_middle = m_hat * learning_rate/(tf.sqrt(v_hat) + adam_e) | |
gradient_temp = tf.clip_by_value(self.w - adam_middle,clip_value_min=0.0,clip_value_max=1.0) | |
update_w.append(self.w.assign(gradient_temp)) | |
return gradient_p,update_w |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment