Skip to content

Instantly share code, notes, and snippets.

@wbadry
Created January 8, 2019 00:31
Show Gist options
  • Save wbadry/bbcfac20fc441872a665d05a92bdc991 to your computer and use it in GitHub Desktop.
Save wbadry/bbcfac20fc441872a665d05a92bdc991 to your computer and use it in GitHub Desktop.
Simple implementation of Gradient Descent using Python
import numpy as np
import matplotlib.pyplot as plt
def gradientdescentoptimizer(param, c, learning_rate =0.01):
# Forward Propagation
param["cost"] = c[0][0] * param["w"] ** 2 + c[1][0] * param["w"] + c[2][0]
# Backward Propagation
param["dw"] = 2 * c[0][0] * param["w"] + c[1][0]
# Update Omega
param["w"] = param["w"] - learning_rate * param["dw"]
return param
if __name__ == '__main__':
param = {"w": 0., "dw": 0., "cost": 0.}
learning_rate = 0.01
cost_data = list()
# c = np.array([[1.], [-10.], [25.]])
coefficients = np.array([[1.], [-20.], [100.]])
for i in range(1000):
param = gradientdescentoptimizer(param, coefficients, learning_rate)
cost_data.append(param["cost"])
print("w after ={} iterations is {}".format(i, param["w"]))
plt.plot(cost_data)
plt.xlabel("#iterations")
plt.ylabel("Cost J")
plt.title("Gradient Descent")
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment