Skip to content

Instantly share code, notes, and snippets.

@evd0kim
Created August 10, 2020 15:38
Show Gist options
  • Save evd0kim/e28c04ef3f12c7ace0c310d83e530c85 to your computer and use it in GitHub Desktop.
Save evd0kim/e28c04ef3f12c7ace0c310d83e530c85 to your computer and use it in GitHub Desktop.
Coinflips
"""
reused code from https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
"""
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as stats
dist = stats.beta
n_trials = [0, 1, 2, 3, 4, 5, 10, 15, 20, 24]
data = np.array([0, 1, 0, 0, 0,
0, 0, 0, 0, 0,
0, 1, 0, 1, 1,
0, 0, 1, 0, 0,
1, 1, 1, 1, 0,
0, 1, 1, 0, 1,
1, 1, 1, 1, 0,
1, 1, 0, 1, 0,
1, 0, 1, 0, 0,
1, 1, 1, 0, 1,
0, 1, 1, 0, 1,
1],
dtype=int)
n_trials[-1] = len(data)
data_1 = stats.bernoulli.rvs(0.5, size=n_trials[-1])
data_2 = stats.bernoulli.rvs(0.5, size=n_trials[-1])
print(data)
print(type(data))
x = np.linspace(0, 1, 100)
# For the already prepared, I'm using Binomial's conj. prior.
for k, N in enumerate(n_trials):
sx = plt.subplot(len(n_trials)/2, 2, k+1)
plt.xlabel("$p$, probability of heads") \
if k in [0, len(n_trials)-1] else None
plt.setp(sx.get_yticklabels(), visible=False)
heads = data[:N].sum()
heads_1 = data_1[:N].sum()
heads_2 = data_2[:N].sum()
y = dist.pdf(x, 1 + heads, 1 + N - heads)
y_1 = dist.pdf(x, 1 + heads_1, 1 + N - heads_1)
y_2 = dist.pdf(x, 1 + heads_2, 1 + N - heads_2)
plt.plot(x, y, label="observe %d tosses,\n %d heads" % (N, heads))
plt.plot(x, y_1, label="conrol g1 %d tosses,\n %d heads" % (N, heads_1))
plt.plot(x, y_2, label="control g2 %d tosses,\n %d heads" % (N, heads_2))
plt.fill_between(x, 0, y, color="#348ABD", alpha=0.2)
plt.fill_between(x, 0, y_1, color="#ffccff", alpha=0.4)
plt.fill_between(x, 0, y_1, color="#ccffcc", alpha=0.4)
plt.vlines(0.5, 0, 4, color="k", linestyles="--", lw=1)
leg = plt.legend()
leg.get_frame().set_alpha(0.4)
plt.autoscale(tight=True)
plt.suptitle("Bayesian updating of posterior probabilities",
y=1.02,
fontsize=14)
plt.tight_layout()
plt.show()
"""
reused code from https://github.com/CamDavidsonPilon/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers
"""
import numpy as np
from matplotlib import pyplot as plt
import scipy.stats as stats
dist = stats.beta
n_trials = [6, 8, 10, 20, 30, 35]
data = np.array([0, 0, 0, 1, 1, 0, 0, 0, 0, 1,
0, 1, 0, 1, 0, 1, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
0, 0, 1, 0, 0, 0, 0, 0, 1, 1,
0, 0, 0, 0, 0, 0, 1, 1, 1, 0,
0, 0, 1, 1, 0, 0, 0, 1, 0, 0,
0, 1, 0, 0, 0, 1, 0, 1, 0, 0,
1, 0, 0, 0, 0, 0, 1],
dtype=int)
n_trials[-1] = len(data)
data_1 = stats.bernoulli.rvs(1.0/3, size=n_trials[-1])
data_2 = stats.bernoulli.rvs(1.0/3, size=n_trials[-1])
print(data)
print(type(data))
x = np.linspace(0, 1, 100)
# For the already prepared, I'm using Binomial's conj. prior.
for k, N in enumerate(n_trials):
sx = plt.subplot(len(n_trials)/2, 2, k+1)
plt.xlabel("$p$, probability of heads") \
if k in [0, len(n_trials)-1] else None
plt.setp(sx.get_yticklabels(), visible=False)
heads = data[:N].sum()
heads_1 = data_1[:N].sum()
heads_2 = data_2[:N].sum()
y = dist.pdf(x, 1 + heads, 1 + N - heads)
y_1 = dist.pdf(x, 1 + heads_1, 1 + N - heads_1)
y_2 = dist.pdf(x, 1 + heads_2, 1 + N - heads_2)
plt.plot(x, y, label="observe %d tosses,\n %d heads" % (N, heads))
plt.plot(x, y_1, label="conrol g1 %d tosses,\n %d heads" % (N, heads_1))
plt.plot(x, y_2, label="control g2 %d tosses,\n %d heads" % (N, heads_2))
plt.fill_between(x, 0, y, color="#348ABD", alpha=0.2)
plt.fill_between(x, 0, y_1, color="#ffccff", alpha=0.4)
plt.fill_between(x, 0, y_1, color="#ccffcc", alpha=0.4)
plt.vlines(0.3, 0, 4, color="k", linestyles="--", lw=1)
leg = plt.legend()
leg.get_frame().set_alpha(0.4)
plt.autoscale(tight=True)
plt.suptitle("Bayesian updating of posterior probabilities",
y=1.02,
fontsize=14)
plt.tight_layout()
plt.show()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment