Skip to content

Instantly share code, notes, and snippets.

@jaems33
Last active October 20, 2023 23:53
Show Gist options
  • Save jaems33/6f445734f777db08d90892681641eb2e to your computer and use it in GitHub Desktop.
Save jaems33/6f445734f777db08d90892681641eb2e to your computer and use it in GitHub Desktop.
import numpy as np
from numpy.linalg import inv
x_observations = np.array([4000, 4260, 4550, 4860, 5110])
v_observations = np.array([280, 282, 285, 286, 290])
z = np.c_[x_observations, v_observations]
# Initial Conditions
a = 2 # Acceleration
v = 280
t = 1 # Difference in time
# Process / Estimation Errors
error_est_x = 20
error_est_v = 5
# Observation Errors
error_obs_x = 25 # Uncertainty in the measurement
error_obs_v = 6
def prediction2d(x, v, t, a):
A = np.array([[1, t],
[0, 1]])
X = np.array([[x],
[v]])
B = np.array([[0.5 * t ** 2],
[t]])
X_prime = A.dot(X) + B.dot(a)
return X_prime
def covariance2d(sigma1, sigma2):
cov1_2 = sigma1 * sigma2
cov2_1 = sigma2 * sigma1
cov_matrix = np.array([[sigma1 ** 2, cov1_2],
[cov2_1, sigma2 ** 2]])
return np.diag(np.diag(cov_matrix))
# Initial Estimation Covariance Matrix
P = covariance2d(error_est_x, error_est_v)
A = np.array([[1, t],
[0, 1]])
# Initial State Matrix
X = np.array([[z[0][0]],
[v]])
n = len(z[0])
for data in z[1:]:
X = prediction2d(X[0][0], X[1][0], t, a)
# To simplify the problem, professor
# set off-diagonal terms to 0.
P = np.diag(np.diag(A.dot(P).dot(A.T)))
# Calculating the Kalman Gain
H = np.identity(n)
R = covariance2d(error_obs_x, error_obs_v)
S = H.dot(P).dot(H.T) + R
K = P.dot(H).dot(inv(S))
# Reshape the new data into the measurement space.
Y = H.dot(data).reshape(n, -1)
# Update the State Matrix
# Combination of the predicted state, measured values, covariance matrix and Kalman Gain
X = X + K.dot(Y - H.dot(X))
# Update Process Covariance Matrix
P = (np.identity(len(K)) - K.dot(H)).dot(P)
print("Kalman Filter State Matrix:\n", X)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment