Skip to content

Instantly share code, notes, and snippets.

@terjehaukaas
Last active September 6, 2023 19:22
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save terjehaukaas/e2493b918a1189f294a728ffaeec0563 to your computer and use it in GitHub Desktop.
Save terjehaukaas/e2493b918a1189f294a728ffaeec0563 to your computer and use it in GitHub Desktop.
FORM Analysis
# ------------------------------------------------------------------------
# The following Python code is implemented by Professor Terje Haukaas at
# the University of British Columbia in Vancouver, Canada. It is made
# freely available online at terje.civil.ubc.ca together with notes,
# examples, and additional Python code. Please be cautious when using
# this code; it may contain bugs and comes without warranty of any kind.
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from ModifyCorrelationMatrix import *
from NatafTransformation import *
from GoldenSectionLineSearchAlgorithm import *
from ArmijoLineSearchAlgorithm import *
# ------------------------------------------------------------------------
# RANDOM VARIABLES
# ------------------------------------------------------------------------
distributions = ["Lognormal", "Lognormal", "Uniform"]
means = np.array([500.0, 2000.0, 5.0])
stdvs = np.array([100.0, 400.0, 0.5])
correlation = np.array([(1, 2, 0.3),
(1, 3, 0.2),
(2, 3, 0.2)])
# ------------------------------------------------------------------------
# LIMIT-STATE FUNCTION
# ------------------------------------------------------------------------
def g(x):
# Count the total number of evaluations
global evaluationCount
evaluationCount += 1
# Get the value of the random variables from the input vector
x1 = x[0]
x2 = x[1]
x3 = x[2]
g = 1.0 - x2 / (1000.0 * x3) - (x1 / (200.0 * x3))**2
return g
# ------------------------------------------------------------------------
# EXACT DERIVATIVES OF LIMIT-STATE FUNCTION
# ------------------------------------------------------------------------
# Used if you set ddm = True
def dgddm(x):
# Initialize the gradient vector
dgdx = np.zeros(len(x))
# Get the value of the random variables from the input vector
x1 = x[0]
x2 = x[1]
x3 = x[2]
# Calculate derivatives
dgdx[0] = - 2 * x1 / (200.0 * x3)**2
dgdx[1] = - 1 / (1000.0 * x3)
dgdx[2] = x2 / (1000.0 * x3**2) + 2 * x1**2 / (200.0**2 * x3**3)
return dgdx
# ------------------------------------------------------------------------
# FINITE DIFFERENCE DERIVATIVES OF LIMIT-STATE FUNCTION
# ------------------------------------------------------------------------
# Used if you set ddm = False
def dg(x, gValue, gFunction):
numRV = len(x)
dgdx = np.zeros(numRV)
for j in range(numRV):
# Take a backup of this random variable
backup = x[j]
# Set the perturbation as a fraction of the x-value
dx = 0.001
if x[j] != 0.0:
dx = dx * x[j]
# Perturb this random variable
x[j] = backup + dx
# Re-evaluate the limit-state function
gPerturbed = gFunction(x)
# Re-set the random variable
x[j] = backup
# Calculate the sought derivative
dgdx[j] = (gPerturbed - gValue) / dx
return dgdx
# ------------------------------------------------------------------------
# FORM ALGORITHM
# ------------------------------------------------------------------------
ddm = True
maxSteps = 50
convergenceCriterion1 = 0.001
convergenceCriterion2 = 0.001
merit_y = 1.0
merit_g = 5.0
# Cholesky decomposition of modified correlation matrix
if 'correlation' in locals():
print('\n'"Modifying correlation matrix...", flush=True)
R0 = modifyCorrelationMatrix(means, stdvs, distributions, correlation)
print('\n'"Done modifying correlation matrix...", flush=True)
L = np.linalg.cholesky(R0)
else:
L = np.identity(len(means))
# Set start point in the standard normal space
numRV = len(means)
y = np.zeros(numRV)
# Initialize the vectors
x = np.zeros(numRV)
dGdy = np.zeros(numRV)
# Start the FORM loop
plotStepValues = []
plotBetaValues = []
evaluationCount = 0
# Plot the evolution of the analysis
plt.ion()
fig1 = plt.figure(1)
plt.title("Evolution of the FORM Analysis")
plt.grid(True)
plt.autoscale(True)
plt.ylabel('Distance from origin to trial point')
plt.xlabel('Iteration number')
for i in range(1, maxSteps+1):
# Transform from y to x
[x, dxdy] = transform_y_to_x(L, y, means, stdvs, distributions, True)
# Evaluate the limit-state function, g(x) = G(y)
gValue = g(x)
# Evaluate the gradient in the x-space, dg / dx
if ddm:
dgdx = dgddm(x)
else:
dgdx = dg(x, gValue, g)
# Notice that dG/dy = dg/dx * dx/dz * dz/dy can be multiplied in opposite order if transposed
dGdy = dxdy.dot(dgdx)
# Determine the alpha-vector
alpha = np.multiply(dGdy, -1 / np.linalg.norm(dGdy))
# Calculate the first convergence criterion
if i == 1:
gfirst = gValue
criterion1 = np.abs(gValue / gfirst)
# Calculate the second convergence criterion
yNormOr1 = np.linalg.norm(y)
plotStepValues.append(i)
plotBetaValues.append(yNormOr1)
if yNormOr1 < 1.0:
yNormOr1 = 1.0
u_scaled = np.multiply(y, 1.0/yNormOr1)
u_scaled_times_alpha = u_scaled.dot(alpha)
criterion2 = np.linalg.norm(np.subtract(u_scaled, np.multiply(alpha, u_scaled_times_alpha)))
# Print status
print('\n'"FORM Step:", i, "Check1:", criterion1, ", Check2:", criterion2)
# Check convergence
if criterion1 < convergenceCriterion1 and criterion2 < convergenceCriterion2:
# Here we converged; first calculate beta and pf
betaFORM = np.linalg.norm(y)
pfFORM = norm.cdf(-betaFORM)
print('\n'"FORM analysis converged after", i, "steps with reliability index", betaFORM, "and pf", pfFORM)
print('\n'"Total number of limit-state evaluations:", evaluationCount)
print('\n'"Design point in x-space:", x)
print('\n'"Design point in y-space:", y)
# Importance vectors alpha and gamma
print('\n'"Importance vector alpha:", alpha)
gamma = alpha.dot(np.linalg.inv(L))
gamma = gamma / np.linalg.norm(gamma)
print('\n'"Importance vector gamma:", gamma)
break
# Take a step if convergence did not occur
else:
# Give message if non-convergence in maxSteps
if i == maxSteps:
print('\n'"FORM analysis did not converge")
break
# Determine the search direction
searchDirection = np.multiply(alpha, (gValue / np.linalg.norm(dGdy) + alpha.dot(y))) - y
# Define the merit function for the line search along the search direction
def meritFunction(stepSize):
# Determine the trial point corresponding to the step size
yTrial = y + stepSize * searchDirection
# Calculate the distance from the origin
yNormTrial = np.linalg.norm(yTrial)
# Transform from y to x
xTrial = transform_y_to_x(L, yTrial, means, stdvs, distributions, False)
# Evaluate the limit-state function
gTrial = g(xTrial)
# Evaluate the merit function m = 1/2 * ||y||^2 + c*g
return merit_y * yNormTrial + merit_g * np.abs(gTrial/gfirst)
# Perform the line search for the optimal step size
#stepSize = 1.0
stepSize = goldenSectionLineSearch(meritFunction, 0, 1.5, 50, 0.1, 0.1)
#stepSize = armijoLineSearch(meritFunction, 1.0, 5)
# Take a step
y += stepSize * searchDirection
# Plot the new trial point
plt.figure(1)
plt.plot([i, i+1], [np.linalg.norm(y - stepSize * searchDirection), np.linalg.norm(y)], 'rs-', linewidth=1.0)
# Increment the loop counter
i += 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment