Skip to content

Instantly share code, notes, and snippets.

@smcantab
Last active May 18, 2016 16:43
Show Gist options
  • Save smcantab/8045ad120fe724c089ac48412e4e6dde to your computer and use it in GitHub Desktop.
Save smcantab/8045ad120fe724c089ac48412e4e6dde to your computer and use it in GitHub Desktop.
#pele and PyCG_DESCENT need to be installed and added to the pythonpath
import numpy as np
from pele.potentials import BasePotential
from PyCG_DESCENT import CGDescent
class squareFunc(BasePotential):
# this is the only method that you actually need to implement
def getEnergy(self, x):
val = x[0]**2 + x[1]**2
return val
# this is optional, if you know the analytical derivative implement it
# otherwise the base class will automatically compute the gradient numerically
# using finite differences
def getEnergyGradient(self, x):
val = x[0]**2 + x[1]**2
grad = np.array([2*x[0], 2*x[1]])
return val, grad
def main():
f = squareFunc() #initialize the cost function (or potential in our language)
x_start = np.array([1, 1])
e_start = f.getEnergy(x_start)
cgd = CGDescent(x_start, f, tol=1e-5, M=0, print_level=0, verbosity=0)
result = cgd.run(1000) #1000 is the maximum number of steps, it terminates when gradient inf-norm is less than tol
x_end, e_end = result.coords, result.energy
print "f(x_start)={} --> f(x_end)={}".format(e_start, e_end)
if __name__=='__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment