Skip to content

Instantly share code, notes, and snippets.

@jeromepl
Last active January 24, 2024 14:48
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save jeromepl/a945608a3db1853edfa78339bdc27ca4 to your computer and use it in GitHub Desktop.
Save jeromepl/a945608a3db1853edfa78339bdc27ca4 to your computer and use it in GitHub Desktop.
Raked Weighting in Python
import numpy as np
def raking_inverse(x):
return np.exp(x)
def d_raking_inverse(x):
return np.exp(x)
def graking(X, T, max_steps=500, tolerance=1e-6):
# Based on algo in (Deville et al., 1992) explained in detail on page 37 in
# https://orca.cf.ac.uk/109727/1/2018daviesgpphd.pdf
# Initialize variables - Step 1
n, m = X.shape
L = np.zeros(m) # Lagrange multipliers (lambda)
w = np.ones(n) # Our weights (will get progressively updated)
H = np.eye(n)
success = False
for step in range(max_steps):
L += np.dot(np.linalg.pinv(np.dot(np.dot(X.T, H), X)), (T - np.dot(X.T, w))) # Step 2.1
w = raking_inverse(np.dot(X, L)) # Step 2.2
H = np.diag(d_raking_inverse(np.dot(X, L))) # Step 2.3
# Termination condition:
loss = np.max(np.abs(np.dot(X.T, w) - T) / T)
if loss < tolerance:
success = True
break
if not success: raise Exception("Did not converge")
return w
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment