Skip to content

Instantly share code, notes, and snippets.

@romanmichaelpaolucci
Created February 21, 2024 23:14
Show Gist options
  • Save romanmichaelpaolucci/594a43cfd4d78777adb52afd25bfa4c8 to your computer and use it in GitHub Desktop.
Save romanmichaelpaolucci/594a43cfd4d78777adb52afd25bfa4c8 to your computer and use it in GitHub Desktop.
import numpy as np
import random
# Define the state space and number of days
state_space = range(101)
num_days = 365*20
# Generate fake daily data for the Markov chain
# For simplicity, we'll use a random choice for transitions between states
daily_data = [random.choice(state_space) for _ in range(num_days)]
# Initialize a transition count matrix
transition_counts = np.zeros((len(state_space), len(state_space)))
# Count transitions
for i in range(1, len(daily_data)):
current_state = daily_data[i - 1]
next_state = daily_data[i]
transition_counts[current_state, next_state] += 1
# Adjust the transition counts to make the last state (100) an absorbing state
transition_counts[-1, :] = 0
transition_counts[-1, -1] = 1 # Self-transition for the absorbing state
# Calculate transition probabilities (MLE)
# To avoid division by zero, we'll use a mask where counts are non-zero
transition_matrix = np.zeros_like(transition_counts)
non_zero_counts = transition_counts.sum(axis=1) > 0
transition_matrix[non_zero_counts] = transition_counts[non_zero_counts] / transition_counts[non_zero_counts].sum(axis=1, keepdims=True)
# Display a part of the transition matrix for readability
print(transition_matrix[-10:, -10:])
print('Probability of Maximum Occupancy: ', np.linalg.matrix_power(transition_matrix, 100)[0][100])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment