Skip to content

Instantly share code, notes, and snippets.

@terjehaukaas
Created June 12, 2019 03:33
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save terjehaukaas/fedb1f31398c95530cf74f97e824c592 to your computer and use it in GitHub Desktop.
Save terjehaukaas/fedb1f31398c95530cf74f97e824c592 to your computer and use it in GitHub Desktop.
Sampling Analysis
# ------------------------------------------------------------------------
# The following Python code is implemented by Professor Terje Haukaas at
# the University of British Columbia in Vancouver, Canada. It is made
# freely available online at terje.civil.ubc.ca together with notes,
# examples, and additional Python code. Please be cautious when using
# this code; it may contain bugs and comes without any form of warranty.
# Please see the Programming note for how to get started, and notice
# that you must copy certain functions into the file terjesfunctions.py
# ------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm, lognorm, uniform
import terjesfunctions as fcns
# ------------------------------------------------------------------------
# INPUT
# ------------------------------------------------------------------------
# Distribution types (Normal, Lognormal, and Uniform are currently available)
distributions = ["Lognormal", "Lognormal", "Uniform"]
# Means
means = np.array([500.0, 2000.0, 5.0])
# Standard deviations
stdvs = np.array([100.0, 400.0, 0.5])
# Correlation
correlation = np.array([(1, 2, 0.3),
(1, 3, 0.2),
(2, 3, 0.2)])
# Limit-state function
def limitStateFunction(x):
# Get the value of the random variables from the input vector
x1 = x[0]
x2 = x[1]
x3 = x[2]
g = 1.0 - x2 / (1000.0 * x3) - (x1 / (200.0 * x3))**2
return g
# ------------------------------------------------------------------------
# ALGORITHM PARAMETERS
# ------------------------------------------------------------------------
maxNumSamples = 1000
targetCov = 0.02
# Set sampling centre
y = np.zeros(len(means))
# ------------------------------------------------------------------------
# MODIFY THE CORRELATION MATRIX AND COMPUTE THE CHOLESKY DECOMPOSITION
# ------------------------------------------------------------------------
if 'correlation' in locals():
print('\n'"Modifying correlation matrix...", flush=True)
R0 = fcns.modifyCorrelationMatrix(means, stdvs, distributions, correlation)
print('\n'"Done modifying correlation matrix...", flush=True)
L = np.linalg.cholesky(R0)
else:
L = np.identity(len(means))
# ------------------------------------------------------------------------
# SAMPLING ALGORITHM
# ------------------------------------------------------------------------
# Initialize plot
plt.ion()
fig = plt.figure()
plt.axis('off')
plt.title('Sampling Analysis')
axes1 = fig.add_subplot(211)
axes1.set_autoscale_on(True)
axes1.autoscale_view(True,True,True)
axes1.get_xaxis().set_visible(False)
topPlot, = plt.plot([], [], 'k-', linewidth=1.0)
plt.ylabel('pf')
axes2 = fig.add_subplot(212)
axes2.set_autoscale_on(True)
axes2.autoscale_view(True,True,True)
bottomPlot, = plt.plot([], [], 'k-', linewidth=1.0)
plt.xlabel('Sample number')
plt.ylabel('C.o.v.')
iData = [0]
covData = [0]
pfData = [0]
# Initialize parameters before loop
IphiOverh_Sum = 0
IphiOverh_SquaredSum = 0
covPf = 0
IphiOverh_Average = 0
# Start the sampling
for i in range(1, maxNumSamples+1):
# Generate realizations of standard normal random variables
ySamples = np.zeros(len(means))
for j in range(len(means)):
ySamples[j] = np.random.normal() + y[j]
# Transform from y-space to z-space, z = L * y
x = fcns.transform_y_to_x(L, ySamples, means, stdvs, distributions, False)
# Evaluate the limit-state function, g(x) = G(y)
g = limitStateFunction(x)
# Calculate the correction factor phi/h
phiOverh = np.exp(0.5*((np.linalg.norm(ySamples-y))**2 - (np.linalg.norm(ySamples))**2))
# Evaluate indicator function
if g < 0:
I = 1
else:
I = 0
# Add to sum of I * phi / h
IphiOverh_Sum += I * phiOverh
IphiOverh_SquaredSum += I * phiOverh**2
# Estimate the failure probability and its coefficient of variation
if IphiOverh_Sum != 0.0:
pf = IphiOverh_Sum / i
pfVariance = (IphiOverh_SquaredSum / i - pf * pf) / i
covPf = np.sqrt(pfVariance) / pf
else:
pf = 0
covPf = 0
# Plot pf and c.o.v. status
iData.append(i)
covData.append(covPf)
pfData.append(pf)
topPlot.set_data(iData, pfData)
axes1.relim()
axes1.autoscale_view(True, True, True)
bottomPlot.set_data(iData, covData)
axes2.relim()
axes2.autoscale_view(True, True, True)
plt.show()
plt.pause(.000001)
# Stop if the c.o.v. is sufficiently small
if covPf < targetCov and covPf > 0.0:
break
# Estimate the reliability index corresponding to pf
beta = -norm.ppf(pf)
# Print results
print('\n'"After", i, "samples: Beta", beta, "and pf", pf, "with cov", covPf, flush=True)
# Hold the plot for a few seconds before proceeding
print('\n'"Pausing for a few seconds before closing the plot...", flush=True)
plt.pause(2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment