Skip to content

Instantly share code, notes, and snippets.

@cberzan
Last active August 29, 2015 14:02
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save cberzan/cb3fb12f9ba3ab56da67 to your computer and use it in GitHub Desktop.
Save cberzan/cb3fb12f9ba3ab56da67 to your computer and use it in GitHub Desktop.
mockup of how the repl might work
//////////////////////////////////////////////////////////////////////////////
// BLOG use case: Want some burn-in samples at the beginning of the run.
model = new Model.fromFile("burglary.blog")
evidence = new Evidence()
evidence.addFromFile("burglary.evi")
queries = new List()
sampler = new blog.sample.MHSampler(model, evidence, queries)
engine = new blog.engine.SamplingEngine(sampler)
// Burn-in: Since no queries are set yet, no results are recorded.
engine.sample(1000)
/* Output:
Running for 1000 samples...
Samples done: 1000. Time elapsed: 0.175 s.
======== MH Trial Stats ========
Fraction of proposals accepted (this trial): 1.0
Fraction of proposals accepted (running avg, all trials): 1.0
Time spent computing acceptance probs: 0.018 s
Time spent updating world: 0.009 s
===== blog.sample.GenericProposer Stats ====
Initial world attempts: 1
Running average (for trials so far): 1.0
*/
// Now set some queries, to accumulate results.
myquery = queries.addQuery("Burglary")
engine.sample(10000)
/* Output:
Running for 10000 samples...
Query Reporting interval is 10000
Samples done: 1000. Time elapsed: 0.176 s.
Samples done: 2000. Time elapsed: 0.27 s.
Samples done: 3000. Time elapsed: 0.37 s.
Samples done: 4000. Time elapsed: 0.429 s.
Samples done: 5000. Time elapsed: 0.498 s.
Samples done: 6000. Time elapsed: 0.562 s.
Samples done: 7000. Time elapsed: 0.601 s.
Samples done: 8000. Time elapsed: 0.623 s.
Samples done: 9000. Time elapsed: 0.64 s.
Samples done: 10000. Time elapsed: 0.661 s.
======== MH Trial Stats ========
Fraction of proposals accepted (this trial): 0.9811
Fraction of proposals accepted (running avg, all trials): 0.9811
Time spent computing acceptance probs: 0.059 s
Time spent updating world: 0.017 s
===== blog.sample.GenericProposer Stats ====
Initial world attempts: 1
Running average (for trials so far): 1.0
*/
// Print query results to stdout. (Should provide a shortcut for this.)
writer = new TableWriter(myquery)
writer.writeResults(System.out)
/* Output:
Distribution of values for Burglary
0.7233733088174801 false
0.2766266911825274 true
*/
// Write results to file.
writer = new blog.io.JsonWriter(myquery)
writer.writeResults("out.json")
//////////////////////////////////////////////////////////////////////////////
// BLOG use case: Want to inspect MH initial world and sampled worlds.
model = new Model.fromFile("burglary.blog")
evidence = new Evidence()
evidence.addFromFile("burglary.evi")
queries = new List()
sampler = new blog.sample.MHSampler(model, evidence, queries)
engine = new blog.engine.SamplingEngine(sampler)
// At this point the sampler is initialized,
// and we can ask about its initial world.
val initialWorld = sampler.getInitialWorld()
initialWorld.get("Earthquake")
/* Output:
Boolean = false
*/
// Sample one world:
val (world, loglik) = engine.sampleOne()
// Inspect variables:
world.get("Earthquake")
/* Output:
Boolean = true
*/
world.get("Alarm")
/* Output:
Boolean = false
*/
// Evaluate an expression:
world.eval("JohnCalls | MaryCalls")
/* Output:
Boolean = true
*/
//////////////////////////////////////////////////////////////////////////////
// BLOG use case: Let's say all worlds have tiny likelihood, and we want to
// know why. So we use LWSampler and sample one variable at a time.
model = new Model.fromFile("burglary.blog")
evidence = new Evidence()
evidence.addFromFile("burglary.evi")
queries = new List()
sampler = new blog.sample.LWSampler(model, evidence, queries)
sampler.startNewSample()
/* Output:
Setting var: JohnCalls to true, replacing null
Setting var: MaryCalls to true, replacing null
World(JohnCalls = true, MaryCalls = true) has log_lik = 0.0
*/
sampler.sampleNextVariable()
/* Output:
Setting var: Earthquake to false, replacing null
World(JohnCalls = true, MaryCalls = true, Earthquake = false) has log_lik = -12.345
*/
sampler.sampleNextVariable()
/* Output:
Setting var: Alarm to false, replacing null
World(JohnCalls = true, MaryCalls = true, Earthquake = false, Alarm = false) has log_lik = -9999.99
*/
// At this point, we can see that sampling Alarm caused a big drop in
// likelihood, and we can revisit our model to figure out why.
//////////////////////////////////////////////////////////////////////////////
// DBLOG use case: Want to inspect particles after a number of timesteps.
// Note that we use a ParticleFilter instead of SamplingEngine. SamplingEngine
// provides sample(n) and sampleOne(), whereas for the ParticleFilter things
// are a little more interesting.
model = new Model.fromFile("car.blog")
evidence = new Evidence()
evidence.addFromFile("car.evi")
queries = new List()
engine = new blog.engine.ParticleFilter(100, model, evidence, queries)
// Run until a given timestep, and then inspect particles.
engine.runUntilTimestep(20)
/* output elided */
engine.getParticles().foreach((p: Particle) =>
print p.getWorld()
print p.getLogLik()
print p.getWorld().eval("some expression")
)
/* output elided */
// Run for one timestep in verbose mode.
engine.runForOneTimestep(verbose=true)
/* Output:
(This would include details about updating the individual particles, and
details about resampling, and how many particles survive, etc.)
*/
//////////////////////////////////////////////////////////////////////////////
// DBLOG use case: Evidence comes from a CSV file; we process it on the spot.
model = new Model.fromFile("car.blog")
evidence = new Evidence()
val reader = CSVReader.open(new File("laser.csv"))
for ((line, lineno) <- reader.zipWithIndex) {
val obs = // construct obs statement from line
evidence.add(obs)
}
queries = new List()
engine = new blog.engine.ParticleFilter(100, model, evidence, queries)
engine.runUntilFinished()
//////////////////////////////////////////////////////////////////////////////
// DBLOG use case: Online particle filter running on a robot.
// We plot the trajectory as time progresses.
// (There is not a batch but a stream of evidence and queries.)
model = new Model.fromFile("car.blog")
evidence = new Evidence()
queries = new List()
engine = new blog.engine.ParticleFilter(100, model, evidence, queries)
while(true) {
val evidenceNow = getEvidenceFromSensors()
evidence.add(evidenceNow)
val queriesNow = makeQueries(engine.getCurrentTimestep())
queries.add(queriesNow)
engine.runUntilNextTiemstep()
// `queriesNow` now contains results we can use:
updatePlot(queriesNow)
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment