Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save nyngwang/da85732ebe52cd6cf2866d468f42ba69 to your computer and use it in GitHub Desktop.
Save nyngwang/da85732ebe52cd6cf2866d468f42ba69 to your computer and use it in GitHub Desktop.
One layer backward pass for feed forwards neural networks.
def one_layer_backward_pass(curr_grad, curr_weight, curr_bias, curr_out, prev_act, activation='R'):
#how many sample in previous activations?
num = prev_act.shape[1]
#find out what we are differentiating
if activation is 'R':
d_act_func = activations.d_relu
elif activation is 'S':
d_act_func = activations.d_sigmoid
else:
raise Exception('Nahh!')
#derivative of activation function
d_curr_out = d_act_func(curr_grad, curr_out)
#derivative of weight matrix
d_curr_weight = np.dot(d_curr_out, prev_act.T) / num
#derivative of bias matrix
d_curr_bias = np.sum(d_curr_out, axis=1, keepdims=True) / num
#derivative of input activations from previous layer
d_prev_act = np.dot(curr_weight.T, d_curr_out)
return d_prev_act, d_curr_weight, d_curr_bias
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment