Skip to content

Instantly share code, notes, and snippets.

@agoodman
Last active January 5, 2023 00:06
Show Gist options
  • Save agoodman/5402356986915a5e441da319e8791b98 to your computer and use it in GitHub Desktop.
Save agoodman/5402356986915a5e441da319e8791b98 to your computer and use it in GitHub Desktop.
Hessian PDE solver
import numpy as np
from random import randrange
xact = np.array([4, 6, 2, -3, 3, -7, 8, -9, -0.4949])
p = np.ones(xact.size)
u = np.eye(p.size)
for k in range(p.size):
u[k] = np.zeros(p.size)
u[k][k] = 1
hnow = np.eye(p.size)
jnow = np.zeros(p.size)
debug = False
pointing_factor = 1 #np.sqrt(p.size)
local_factor = 0.01
iteration_max = 1000
early_victory = True
def f(values):
# f(x) = (x1-4)(x2-6)(x3-2)(x4+3)(x5-3)(x6+7)(x7-8)(x8+9)(cos(x9)-0.88)
return -(values[0] - 4)*(values[1] - 6)*(values[2] - 2)*(values[3] + 3)*(values[4]-3)*(values[5]+7)*(values[6]-8)*(values[7]+9)*(np.cos(values[8])-0.88)
def diagonal_term(h, p, k, d):
# recompute H[kk]
p1 = p + 2*d*u[k]
p2 = p - 2*d*u[k]
# [ f(x + 2 ∆x ui) - 2 f(x) + f(x - 2 ∆x ui) ] / (4 ∆x2)
jkk = (f(p1) - 2*f(p) + f(p2)) / (4*d*d)
h[k][k] = jkk
return jkk
def pde_term(h, p, k1, k2, d):
# recompute H[k1][k2]
# [ f(x + ∆x ui + ∆x uj) - f(x + ∆x ui - ∆x uj) - f(x - ∆x ui + ∆x uj) + f(x - ∆x ui - ∆x uj) ] / (4 ∆x2)
p1 = p + d*u[k1] + d*u[k2]
p2 = p + d*u[k1] - d*u[k2]
p3 = p - d*u[k1] + d*u[k2]
p4 = p - d*u[k1] - d*u[k2]
if debug:
print("p1: {}\np2: {}\n p3: {}\np4: {}".format(p1, p2, p3, p4))
result = (f(p1) - f(p2) - f(p3) + f(p4)) / (4*d*d)
h[k1][k2] = result
h[k2][k1] = result
return result
def compute_hessian(h, p, d):
for k in range(p.size):
term = diagonal_term(h, p, k, d)
if debug:
print("diag {} => {}".format(k, term))
for k1 in range(p.size):
for k2 in range(p.size):
if k2 < k1:
continue
term = pde_term(h, p, k1, k2, d)
if debug:
print("pde {}, {} => {}".format(k1, k2, term))
return
def jacobian_term(p, k, d):
# [ f(x + ∆x ui) - f(x - ∆x ui) ] / (2 ∆x)
p1 = p + d*u[k]
p2 = p - d*u[k]
result = (f(p1) - f(p2)) / (2*d)
return result
def compute_jacobian(j, p, d):
for k in range(p.size):
term = jacobian_term(p, k, d)
if debug:
print("jac {} => {}",format(k, term))
j[k] = term
return
def next_guess(pnow):
# H p = -grad(f)
# x2 = x1 + cp
if debug:
print("{} * {} = {}".format(hnow, pnow, -jnow))
pnext = np.linalg.solve(hnow, -jnow)
return pnow + pointing_factor*pnext
def cycle_iteration(guess):
# guess the unit vector
fguess = f(guess)
if debug:
print("jnow: {}".format(jnow))
print("hnow: {}".format(hnow))
prev_grad_mag = np.linalg.norm(jnow)
compute_jacobian(jnow, guess, local_factor)
if debug:
print(jnow)
compute_hessian(hnow, guess, local_factor)
grad_mag = np.linalg.norm(jnow)
delta_mag = prev_grad_mag - grad_mag
print("guess: {}, {}, {} ({})".format(p, np.linalg.det(hnow), grad_mag, delta_mag))
if debug:
print(hnow)
result = next_guess(guess)
return result
flast = f(p)
plast = p
attempts = 0
victory_threshold = np.sqrt(p.size) * 1e-6
for i in range(iteration_max):
attempts = attempts + 1
pnext = cycle_iteration(p)
ftmp = f(pnext)
p = pnext
if early_victory:
if np.linalg.norm(jnow) < victory_threshold:
break
plast = p
flast = ftmp
print("guess: {} => {}".format(p, f(p)))
err = xact - p
print("error: {} ({})".format(err, np.linalg.norm(err)))
print("required {} iterations".format(attempts))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment