Skip to content

Instantly share code, notes, and snippets.

@fehiepsi
Created February 19, 2018 05:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save fehiepsi/2015afb530adcae844645b185371b39d to your computer and use it in GitHub Desktop.
Save fehiepsi/2015afb530adcae844645b185371b39d to your computer and use it in GitHub Desktop.
hcm_fail.log
============================= test session starts ==============================
platform linux -- Python 3.5.4, pytest-3.3.2, py-1.5.2, pluggy-0.6.0
rootdir: /home/fehiepsi/pyro, inifile: setup.cfg
plugins: nbval-0.9.0
collected 1 item
tests/infer/mcmc/test_hmc.py F [100%]
=================================== FAILURES ===================================
__________ test_hmc_conjugate_gaussian[dim=10_chain-len=3_num_obs=1] ___________
fixture = <tests.infer.mcmc.test_hmc.GaussianChain object at 0x7fc05eb84ba8>
num_samples = 800, warmup_steps = 50
hmc_params = {'num_steps': 4, 'step_size': 0.5}
expected_means = [0.25, 0.5, 0.75], expected_precs = [1.33, 1, 1.33]
mean_tol = 0.06, std_tol = 0.06
@pytest.mark.parametrize(
'fixture, num_samples, warmup_steps, hmc_params, expected_means, expected_precs, mean_tol, std_tol',
TEST_CASES,
ids=TEST_IDS)
@pytest.mark.init(rng_seed=34)
def test_hmc_conjugate_gaussian(fixture,
num_samples,
warmup_steps,
hmc_params,
expected_means,
expected_precs,
mean_tol,
std_tol):
hmc_kernel = HMC(fixture.model, **hmc_params)
mcmc_run = MCMC(hmc_kernel, num_samples, warmup_steps)
pyro.get_param_store().clear()
post_trace = defaultdict(list)
for t, _ in mcmc_run._traces(fixture.data):
for i in range(1, fixture.chain_len + 1):
param_name = 'mu_' + str(i)
post_trace[param_name].append(t.nodes[param_name]['value'])
for i in range(1, fixture.chain_len + 1):
param_name = 'mu_' + str(i)
latent_mu = torch.mean(torch.stack(post_trace[param_name]), 0)
latent_std = torch.std(torch.stack(post_trace[param_name]), 0)
expected_mean = Variable(torch.ones_like(torch.Tensor(fixture.dim)) * expected_means[i - 1])
expected_std = 1 / torch.sqrt(Variable(torch.ones_like(torch.Tensor(fixture.dim)) * expected_precs[i - 1]))
# Actual vs expected posterior means for the latents
logger.info('Posterior mean (actual) - {}'.format(param_name))
logger.info(latent_mu)
logger.info('Posterior mean (expected) - {}'.format(param_name))
logger.info(expected_mean)
assert_equal(rmse(latent_mu, expected_mean).data[0], 0.0, prec=mean_tol)
# Actual vs expected posterior precisions for the latents
logger.info('Posterior std (actual) - {}'.format(param_name))
logger.info(latent_std)
logger.info('Posterior std (expected) - {}'.format(param_name))
logger.info(expected_std)
> assert_equal(rmse(latent_std, expected_std).data[0], 0.0, prec=std_tol)
tests/infer/mcmc/test_hmc.py:157:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
x = 0.06356138968201844, y = 0.0, prec = 0.06
msg = '0.06356138968201844 vs 0.0'
def assert_equal(x, y, prec=1e-5, msg=''):
x, y = _unwrap_variables(x, y)
if torch.is_tensor(x) and torch.is_tensor(y):
assert_equal(x.is_sparse, y.is_sparse, prec, msg)
if x.is_sparse:
x = _safe_coalesce(x)
y = _safe_coalesce(y)
assert_tensors_equal(x._indices(), y._indices(), prec, msg)
assert_tensors_equal(x._values(), y._values(), prec, msg)
else:
assert_tensors_equal(x, y, prec, msg)
elif type(x) == np.ndarray and type(y) == np.ndarray:
if prec == 0:
assert (x == y).all(), msg
else:
assert_allclose(x, y, atol=prec, equal_nan=True)
elif isinstance(x, numbers.Number) and isinstance(y, numbers.Number):
if not msg:
msg = '{} vs {}'.format(x, y)
if prec == 0:
assert x == y, msg
else:
> assert x == approx(y, abs=prec), msg
E AssertionError: 0.06356138968201844 vs 0.0
tests/common.py:205: AssertionError
----------------------------- Captured stderr call -----------------------------
INFO Starting MCMC using kernel - HMC ...
INFO Iteration: 43.
INFO Acceptance rate: 0.9302325581395349
INFO Iteration: 86.
INFO Acceptance rate: 0.8372093023255814
INFO Iteration: 129.
INFO Acceptance rate: 0.8449612403100775
INFO Iteration: 172.
INFO Acceptance rate: 0.8313953488372093
INFO Iteration: 215.
INFO Acceptance rate: 0.8232558139534883
INFO Iteration: 258.
INFO Acceptance rate: 0.7984496124031008
INFO Iteration: 301.
INFO Acceptance rate: 0.813953488372093
INFO Iteration: 344.
INFO Acceptance rate: 0.811046511627907
INFO Iteration: 387.
INFO Acceptance rate: 0.8217054263565892
INFO Iteration: 430.
INFO Acceptance rate: 0.8232558139534883
INFO Iteration: 473.
INFO Acceptance rate: 0.828752642706131
INFO Iteration: 516.
INFO Acceptance rate: 0.8236434108527132
INFO Iteration: 559.
INFO Acceptance rate: 0.817531305903399
INFO Iteration: 602.
INFO Acceptance rate: 0.8089700996677741
INFO Iteration: 645.
INFO Acceptance rate: 0.813953488372093
INFO Iteration: 688.
INFO Acceptance rate: 0.8125
INFO Iteration: 731.
INFO Acceptance rate: 0.8166894664842681
INFO Iteration: 774.
INFO Acceptance rate: 0.8178294573643411
INFO Iteration: 817.
INFO Acceptance rate: 0.813953488372093
INFO Posterior mean (actual) - mu_1
INFO Variable containing:
0.2942
0.2242
0.2814
0.2379
0.2083
0.2221
0.2610
0.1957
0.2502
0.2174
[torch.DoubleTensor of size (10,)]
INFO Posterior mean (expected) - mu_1
INFO Variable containing:
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
[torch.DoubleTensor of size (10,)]
INFO Posterior std (actual) - mu_1
INFO Variable containing:
0.9765
0.7582
0.8839
0.9287
0.8342
0.9533
0.8529
0.9121
0.8414
0.9004
[torch.DoubleTensor of size (10,)]
INFO Posterior std (expected) - mu_1
INFO Variable containing:
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
[torch.DoubleTensor of size (10,)]
------------------------------ Captured log call -------------------------------
mcmc.py 37 INFO Starting MCMC using kernel - HMC ...
mcmc.py 42 INFO Iteration: 43.
mcmc.py 45 INFO Acceptance rate: 0.9302325581395349
mcmc.py 42 INFO Iteration: 86.
mcmc.py 45 INFO Acceptance rate: 0.8372093023255814
mcmc.py 42 INFO Iteration: 129.
mcmc.py 45 INFO Acceptance rate: 0.8449612403100775
mcmc.py 42 INFO Iteration: 172.
mcmc.py 45 INFO Acceptance rate: 0.8313953488372093
mcmc.py 42 INFO Iteration: 215.
mcmc.py 45 INFO Acceptance rate: 0.8232558139534883
mcmc.py 42 INFO Iteration: 258.
mcmc.py 45 INFO Acceptance rate: 0.7984496124031008
mcmc.py 42 INFO Iteration: 301.
mcmc.py 45 INFO Acceptance rate: 0.813953488372093
mcmc.py 42 INFO Iteration: 344.
mcmc.py 45 INFO Acceptance rate: 0.811046511627907
mcmc.py 42 INFO Iteration: 387.
mcmc.py 45 INFO Acceptance rate: 0.8217054263565892
mcmc.py 42 INFO Iteration: 430.
mcmc.py 45 INFO Acceptance rate: 0.8232558139534883
mcmc.py 42 INFO Iteration: 473.
mcmc.py 45 INFO Acceptance rate: 0.828752642706131
mcmc.py 42 INFO Iteration: 516.
mcmc.py 45 INFO Acceptance rate: 0.8236434108527132
mcmc.py 42 INFO Iteration: 559.
mcmc.py 45 INFO Acceptance rate: 0.817531305903399
mcmc.py 42 INFO Iteration: 602.
mcmc.py 45 INFO Acceptance rate: 0.8089700996677741
mcmc.py 42 INFO Iteration: 645.
mcmc.py 45 INFO Acceptance rate: 0.813953488372093
mcmc.py 42 INFO Iteration: 688.
mcmc.py 45 INFO Acceptance rate: 0.8125
mcmc.py 42 INFO Iteration: 731.
mcmc.py 45 INFO Acceptance rate: 0.8166894664842681
mcmc.py 42 INFO Iteration: 774.
mcmc.py 45 INFO Acceptance rate: 0.8178294573643411
mcmc.py 42 INFO Iteration: 817.
mcmc.py 45 INFO Acceptance rate: 0.813953488372093
test_hmc.py 146 INFO Posterior mean (actual) - mu_1
test_hmc.py 147 INFO Variable containing:
0.2942
0.2242
0.2814
0.2379
0.2083
0.2221
0.2610
0.1957
0.2502
0.2174
[torch.DoubleTensor of size (10,)]
test_hmc.py 148 INFO Posterior mean (expected) - mu_1
test_hmc.py 149 INFO Variable containing:
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
0.2500
[torch.DoubleTensor of size (10,)]
test_hmc.py 153 INFO Posterior std (actual) - mu_1
test_hmc.py 154 INFO Variable containing:
0.9765
0.7582
0.8839
0.9287
0.8342
0.9533
0.8529
0.9121
0.8414
0.9004
[torch.DoubleTensor of size (10,)]
test_hmc.py 155 INFO Posterior std (expected) - mu_1
test_hmc.py 156 INFO Variable containing:
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
0.8671
[torch.DoubleTensor of size (10,)]
=========================== 1 failed in 4.06 seconds ===========================
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment