Skip to content

Instantly share code, notes, and snippets.

@vamc-stash
Last active May 19, 2020 05:02
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vamc-stash/2f35083ab29c76922698b57c958a42c3 to your computer and use it in GitHub Desktop.
Save vamc-stash/2f35083ab29c76922698b57c958a42c3 to your computer and use it in GitHub Desktop.
backward propagation
def intermediate_differentiation(dEA, activation, stash):
if activation == "relu":
z = stash
dAZ = 1
dEZ = np.array(dEA,copy=True)
dEZ[z<=0] = 0
elif activation == "sigmoid":
z = stash
tmp = 1/(1+np.exp(-z))
dAZ = tmp*(1-tmp) # differentiation of output w.r.t input dA/dZ
# multiply dE/dA * dA/dZ
dEZ = dEA*dAZ
return dEZ
def error_rate_calc(dEZ, stash):
z,w,b = stash
m = z.shape[0]
dZW = z.T # rate of change of input w.r.t weight, dZ/dW = z
dEW = np.dot(dZW,dEZ)/m # rate of change of error w.r.t weight, dE/dW = dE/dZ * dZ/dW
dEb = np.sum(dEZ,axis=0,keepdims=1)/m # rate of change of error w.r.t bias, dE/db = dE/dZ * dZ/db
dA_prev = np.dot(dEZ,w.T) # error propagated backward
return dA_prev,dEW, dEb
def linear_backward(dEA, stash, activation):
linear_stash, activation_stash = stash
dEZ = intermediate_differentiation(dEA,activation,activation_stash) # dE/dZ
dA_prev,dW, db = error_rate_calc(dEZ, linear_stash)
return dA_prev,dW,db
def backward_pass(A,Y,stashes):
grads = {}
L =len(stashes)
Y = Y.reshape(A.shape)
dEA = -(np.divide(Y,A)-np.divide(1-Y,1-A)) # differentiation of error w.r.t output dE/dA
current_stash = stashes[L-1]
grads['dA'+str(L-1)],grads['dW'+str(L)],grads['db'+str(L)] = linear_backward(dEA,current_stash,"sigmoid")
for l in reversed(range(L-1)):
current_stash = stashes[l]
grads['dA'+str(l)],grads['dW'+str(l+1)],grads['db'+str(l+1)] = linear_backward(grads['dA'+str(l+1)],current_stash,"relu")
return grads
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment