Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save terjehaukaas/ebeffca81bf4448d3a45a489492fa709 to your computer and use it in GitHub Desktop.
Save terjehaukaas/ebeffca81bf4448d3a45a489492fa709 to your computer and use it in GitHub Desktop.
Directional Line Search Optimization Analysis
# ------------------------------------------------------------------------
# The following Python code is implemented by Professor Terje Haukaas at
# the University of British Columbia in Vancouver, Canada. It is made
# freely available online at terje.civil.ubc.ca together with notes,
# examples, and additional Python code. Please be cautious when using
# this code; it may contain bugs and comes without any form of warranty.
# Please see the Programming note for how to get started, and notice
# that you must copy certain functions into the file terjesfunctions.py
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import terjesfunctions as fcns
# ------------------------------------------------------------------------
# INPUT
# ------------------------------------------------------------------------
# Objective function
def F(x):
x1 = x[0]
x2 = x[1]
k1 = 100.0
k2 = 90.0
Fx1 = 20.0
Fx2 = 40.0
return k1 * ( np.sqrt( x1**2 + (x2+1)**2 ) - 1 )**2 + k2 * ( np.sqrt( x1**2 + (x2-1)**2 ) - 1 )**2 - ( Fx1 * x1 + Fx2 * x2 )
# Start point
startPoint = np.zeros(2)
# ------------------------------------------------------------------------
# OPTIMIZATION ALGORITHM
# ------------------------------------------------------------------------
# Algorithm parameters
tolerance = 0.05
maxSteps = 100
# Initial declarations
maxFunctionValue = 0.0
minFunctionValue = 0.0
maxDesignVariableValue = 0.0
minDesignVariableValue = 0.0
numDVs = len(startPoint)
searchDirection = np.zeros(numDVs)
previousPoint = np.zeros(numDVs)
previousGradient = np.zeros(numDVs)
inverseHessian = np.zeros((numDVs, numDVs))
previousInverseHessian = np.zeros((numDVs, numDVs))
x = startPoint.copy()
# Start the loop
loopCounter = 0
while loopCounter < maxSteps:
loopCounter += 1
# Evaluate objective function and its gradient
Fvalue = F(x)
Fgradient = fcns.nablaF(x, F, Fvalue)
gradientNorm = np.linalg.norm(Fgradient)
# Output
print('\n'"At step %d of the optimization analysis:" %loopCounter)
print("Objective function: ", Fvalue)
print("Design variables: ", x)
print("Gradient vector: ", Fgradient)
print("Convergence criterion:", gradientNorm)
# Check convergence
if gradientNorm < tolerance:
print('\n'"The opimization analysis converged")
break
elif loopCounter==maxSteps:
print('\n'"Maximum number of steps reached before convergence")
else:
# Determine search direction by steepest descent
#searchDirection = fcns.steepestDescentSearchDirection(Fgradient)
# Determine search direction by the conjugate gradient method
#searchDirection = fcns.conjugateGradientSearchDirection(Fgradient, previousGradient)
# Determine the search direction by the quasi Newton method (BFGS, DFP, Broyden, SR1)
[searchDirection, inverseHessian] = fcns.quasiNewtonSearchDirection(x, previousPoint, \
Fgradient, previousGradient, previousInverseHessian, "BFGS")
# Store the previous gradient and perhaps Hessian, because some search direction algorithms need it
previousPoint = x.copy()
previousGradient = Fgradient.copy()
previousInverseHessian = inverseHessian.copy()
# Define the merit function for the line search along the search direction
def meritFunction(stepSize):
# Determine the trial point corresponding to the step size
xTrial = x + stepSize * searchDirection
# Evaluate the objective function
FTrial = F(xTrial)
# Evaluate the merit function
return FTrial
# Search along the search direction for the optimal step size
stepSize = fcns.goldenSectionLineSearch(meritFunction, 0.0, 10.0, 50.0, 0.0001, 0.01)
# Take the step
x += stepSize * searchDirection
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment