Last active
September 21, 2015 08:42
-
-
Save wernsey/60f27c181876437ff8ce to your computer and use it in GitHub Desktop.
A Java port of Chris Meyers' Prolog interpreter
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
child(X) :- boy(X). | |
child(X) :- girl(X). | |
girl(alice). | |
boy(alex). | |
trace on | |
child(Q)? | |
boy(bill) | |
mother(alice,bill) | |
child(X,Y) :- mother(Y,X) | |
child(X,Y) :- father(Y,X) | |
son(X,Y) :- child(X,Y),boy(X) | |
son(X,alice)? | |
# These will cause infinite loops. | |
# According to Davis [2] it is to be expected | |
# and you're supposed to work around it. | |
edge(a,b). | |
edge(b,c). | |
edge(a,d). | |
path(X, Y) :- edge(X, Y). | |
path(X, Y) :- path(X, Z), edge(Z, Y). | |
path(a,d)? |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
Simple Java Prolog engine. | |
This is basically a Java port of Chris Meyers' Python Prolog interpreter [1]. | |
This currently only contains the first part, but maybe I'll port the remainder at | |
a later stage. | |
I actually wanted a Datalog [5] engine, but ended up going with the Prolog engine | |
since Datalog is a Prolog subset. | |
References: | |
[1] Prolog in Python. Version 1 Copyright (c) 2009-2015 Chris Meyers | |
http://www.openbookproject.net/py4fun/prolog/intro.html | |
[2] Inference in Datalog, Ernest Davis | |
http://cs.nyu.edu/faculty/davise/ai/datalog.html | |
[3] mixu/datalog.js | |
https://github.com/mixu/datalog.js | |
[4] https://en.wikipedia.org/wiki/Prolog | |
[5] https://en.wikipedia.org/wiki/Datalog | |
[6] http://www.ccs.neu.edu/home/ramsdell/tools/datalog/index.html | |
Compile/Run: | |
> javac -Xlint:unchecked JProlog.java | |
> java JProlog | |
Changes: | |
- I replaced the parser that relied on String.split() with one based on | |
StreamTokenizer. | |
*/ | |
import java.util.List; | |
import java.util.LinkedList; | |
import java.util.ArrayList; | |
import java.util.Map; | |
import java.util.HashMap; | |
import java.util.Set; | |
import java.util.HashSet; | |
import java.util.Deque; | |
import java.util.ArrayDeque; | |
import java.io.StreamTokenizer; | |
import java.io.StringReader; | |
import java.io.BufferedReader; | |
import java.io.InputStreamReader; | |
import java.io.IOException; | |
class JProlog { | |
List<Rule> rules; | |
boolean trace = false; | |
static int goalId = 100; | |
public JProlog() { | |
rules = new LinkedList<Rule>(); | |
} | |
static class ParseException extends Exception { | |
public ParseException(String message) { | |
super(message); | |
} | |
public ParseException(Exception cause) { | |
super(cause); | |
} | |
} | |
enum AtomType { CONSTANT, VARIABLE } | |
static class Atom { | |
AtomType type; | |
String value; | |
Atom(String v) { | |
this.type = looksLikeVariable(v)?AtomType.VARIABLE:AtomType.CONSTANT; | |
this.value = v; | |
} | |
public boolean equals(Object other) { | |
return (other != null && other instanceof Atom | |
&& ((Atom) other).type == type | |
&& ((Atom) other).value.equals(value) | |
); | |
} | |
public int hashCode() { | |
return type.hashCode() + value.hashCode(); | |
} | |
static boolean looksLikeVariable(String value) { | |
return Character.isUpperCase(value.charAt(0)); | |
} | |
public String toString() { | |
return value; | |
} | |
public boolean isGround() { | |
return type != AtomType.VARIABLE; | |
} | |
} | |
static class Term { | |
String predicate; | |
List<Atom> terms; | |
Term(String predicate, String ... T) { | |
this.predicate = predicate; | |
this.terms = new ArrayList<Atom>(T.length); | |
for(String ti : T) { | |
this.terms.add(new Atom(ti)); | |
} | |
} | |
Term(String predicate, List<Atom> terms) { | |
this.predicate = predicate; | |
this.terms = terms; | |
} | |
int arity() { return terms.size(); } | |
public String toString() { | |
StringBuilder sb = new StringBuilder(); | |
sb.append(predicate).append('('); | |
for(int i = 0; i < terms.size(); i++) { | |
sb.append(terms.get(i)); | |
if(i < terms.size() - 1) | |
sb.append(", "); | |
} | |
sb.append(')'); | |
return sb.toString(); | |
} | |
public boolean isGround() { | |
for(Atom term : terms) { | |
if(!term.isGround()) | |
return false; | |
} | |
return true; | |
} | |
public boolean equals(Object other) { | |
if(other == null || !(other instanceof Term)) | |
return false; | |
Term that = ((Term) other); | |
if(!this.predicate.equals(that.predicate)) | |
return false; | |
if(arity() != that.arity()) | |
return false; | |
for(int i = 0; i < terms.size(); i++) { | |
if(!terms.get(i).equals(that.terms.get(i))) | |
return false; | |
} | |
return true; | |
} | |
public int hashCode() { | |
int hash = predicate.hashCode(); | |
for(Atom term : terms) { | |
hash += term.hashCode(); | |
} | |
return hash; | |
} | |
static Term parse(StreamTokenizer scan) throws ParseException { | |
try { | |
if(scan.nextToken() != StreamTokenizer.TT_WORD) | |
throw new ParseException("predicate expected"); | |
String pred = scan.sval; | |
if(scan.nextToken() != '(') | |
throw new ParseException("'(' expected"); | |
List<Atom> args = new ArrayList<Atom>(); | |
if(scan.nextToken() != ')') { | |
scan.pushBack(); | |
do { | |
if(scan.nextToken() != StreamTokenizer.TT_WORD) | |
throw new ParseException("predicate expected"); | |
args.add(new Atom(scan.sval)); | |
} while(scan.nextToken() == ','); | |
if(scan.ttype != ')') { | |
throw new ParseException("')' expected"); | |
} } | |
return new Term(pred, args); | |
} catch (IOException e) { | |
throw new ParseException(e); | |
} | |
} | |
} | |
static class Rule { | |
Term head; | |
List<Term> body; | |
static Rule parse(StreamTokenizer scan) throws ParseException { | |
try { | |
Term head = Term.parse(scan); | |
List<Term> body = new ArrayList<Term>(); | |
if(scan.nextToken() == ':') { | |
if(scan.nextToken() != '-') { | |
throw new ParseException("':-' expected"); | |
} | |
do { | |
Term arg = Term.parse(scan); | |
body.add(arg); | |
} while(scan.nextToken() == ','); | |
} else { | |
scan.pushBack(); | |
} | |
return new Rule(head, body); | |
} catch (IOException e) { | |
throw new ParseException(e); | |
} | |
} | |
Rule(Term head, Term... body) { | |
this.head = head; | |
this.body = new ArrayList<Term>(body.length); | |
for(Term e : body) { | |
this.body.add(e); | |
} | |
} | |
Rule(Term head, List<Term> body) { | |
this.head = head; | |
this.body = body; | |
} | |
public String toString() { | |
StringBuilder sb = new StringBuilder(); | |
sb.append(head); | |
sb.append(" :- "); | |
for(int i = 0; i < body.size(); i++) { | |
sb.append(body.get(i)); | |
if(i < body.size() - 1) | |
sb.append(", "); | |
} | |
return sb.toString(); | |
} | |
} | |
static class Goal { | |
int id; | |
Rule rule; | |
Goal parent; | |
Map<Atom, Atom> env; | |
int inx; | |
Goal(Rule rule, Goal parent, Map<Atom, Atom> env) { | |
goalId++; | |
this.id = goalId; | |
this.rule = rule; | |
this.parent = parent; | |
this.inx = 0; | |
if(env == null) { | |
this.env = new HashMap<Atom, Atom>(); | |
} else { | |
// deepcopy in original | |
this.env = new HashMap<Atom, Atom>(env); | |
} | |
} | |
Goal(Goal that) { | |
this.id = that.id; | |
this.rule = that.rule; | |
this.parent = that.parent; | |
this.inx = that.inx; | |
if(that.env == null) { | |
this.env = null; | |
} else { | |
// deepcopy in original | |
this.env = new HashMap<Atom, Atom>(that.env); | |
} | |
} | |
public String toString() { | |
StringBuilder sb = new StringBuilder("Goal "); | |
sb.append(id); | |
sb.append(" rule=").append(rule); | |
sb.append(" inx=").append(inx); | |
sb.append(" env={"); | |
for(Atom k : env.keySet()) { | |
Atom v = env.get(k); | |
sb.append(k).append(": ").append(v).append(", "); | |
} | |
sb.append("}"); | |
return sb.toString(); | |
} | |
} | |
static boolean unify(Term srcTerm, Map<Atom, Atom> srcEnv, Term destTerm, Map<Atom, Atom> destEnv) { | |
if(!srcTerm.predicate.equals(destTerm.predicate) || srcTerm.arity() != destTerm.arity()) { | |
return false; | |
} | |
for(int i = 0; i < srcTerm.arity(); i++) { | |
Atom srcArg = srcTerm.terms.get(i); | |
Atom destArg = destTerm.terms.get(i); | |
Atom srcVal = null; | |
if(srcArg.type == AtomType.VARIABLE) { | |
srcVal = srcEnv.get(srcArg); | |
} else { | |
srcVal = srcArg; | |
} | |
if(srcVal != null) { | |
if(destArg.type == AtomType.VARIABLE) { | |
Atom destVal = destEnv.get(destArg); | |
if(destVal == null) { | |
destEnv.put(destArg, srcVal); | |
} else if(!destVal.equals(srcArg)) { | |
return false; | |
} | |
} else if(!destArg.equals(srcVal)) { | |
return false; | |
} | |
} | |
} | |
return true; | |
} | |
private boolean search(Term term) { | |
boolean found = false; | |
goalId = 0; | |
if(trace) | |
System.out.println("search: " + term); | |
Goal goal = new Goal(new Rule(null, term), null, null); | |
Deque<Goal> stack = new ArrayDeque<Goal>(); | |
stack.addFirst(goal); | |
while (!stack.isEmpty()) { | |
Goal c = stack.removeFirst(); | |
if(trace) | |
System.out.println(" pop: " + c); | |
if (c.rule == null || c.inx >= c.rule.body.size()) { | |
found = true; | |
if(c.parent == null) { | |
if (c.env != null) { | |
System.out.println(envToString(c.env)); | |
} else { | |
System.out.println("Yes"); | |
} | |
continue; | |
} | |
Goal parent = new Goal(c.parent); // deepcopy in original | |
unify(c.rule.head, c.env, parent.rule.body.get(parent.inx), parent.env); | |
parent.inx = parent.inx + 1; | |
if(trace) | |
System.out.println(" stack: " + parent); | |
stack.addFirst(parent); | |
continue; | |
} | |
term = c.rule.body.get(c.inx); | |
for(Rule rule : rules) { | |
if(!rule.head.predicate.equals(term.predicate) || rule.head.arity() != term.arity()) | |
continue; | |
Goal child = new Goal(rule, c, new HashMap<Atom, Atom>()); | |
boolean ans = unify(term, c.env, rule.head, child.env); | |
if(trace) | |
System.out.println(" unify: " + term + " vs " + rule.head + ": " + ans ); | |
if (ans) { | |
if(trace) | |
System.out.println(" stack: " + child); | |
stack.addFirst(child); | |
} | |
} | |
if(goalId > 20) break; | |
} | |
return found; | |
} | |
//public boolean search(List<Term> goals) {} | |
//public boolean search(Term ... goals) {} | |
private boolean search(String goalString) { | |
try { | |
StreamTokenizer scan = new StreamTokenizer(new StringReader(goalString)); | |
Term term = Term.parse(scan); | |
return search(term); | |
} catch (ParseException e) { | |
e.printStackTrace(); | |
return false; | |
} | |
} | |
public static void main(String... args) { | |
try { | |
Map<Atom, Atom> srcEnv = new HashMap<Atom, Atom>(); | |
JProlog jProlog = new JProlog(); | |
BufferedReader buffer=new BufferedReader(new InputStreamReader(System.in)); | |
while(true) { | |
System.out.print("> "); | |
String line = buffer.readLine(); | |
if(line == null) { | |
break; // EOF | |
} | |
StreamTokenizer scan = new StreamTokenizer(new StringReader(line)); | |
scan.ordinaryChar('.'); // looks like a number | |
scan.nextToken(); | |
if(scan.ttype == StreamTokenizer.TT_EOF) { | |
continue; | |
} else if(scan.ttype != StreamTokenizer.TT_WORD) { | |
System.err.println("command expected"); | |
continue; | |
} | |
if(scan.sval.equalsIgnoreCase("trace")) { | |
if(scan.nextToken() != StreamTokenizer.TT_WORD) { | |
System.out.println("on/off expected"); | |
continue; | |
} | |
jProlog.trace = scan.sval.equalsIgnoreCase("on"); | |
} else if(scan.sval.equalsIgnoreCase("dump")) { | |
for(Rule rule : jProlog.rules) { | |
System.out.println(rule); | |
} | |
} else if(scan.sval.equalsIgnoreCase("quit") || scan.sval.equalsIgnoreCase("exit")) { | |
break; | |
} else if(scan.sval.equalsIgnoreCase("unify")) { | |
// For testing unification: Unify two terms | |
try { | |
Term left = Term.parse(scan); | |
Term right = Term.parse(scan); | |
System.out.println("unify " + left + " vs " + right); | |
Map<Atom, Atom> destEnv = new HashMap<Atom, Atom>(); | |
if(unify(left, srcEnv, right, destEnv)) { | |
if(!destEnv.isEmpty()) { | |
System.out.println("Yes " + envToString(destEnv)); | |
} else { | |
System.out.println("Yes"); | |
} | |
} else { | |
System.out.println("No"); | |
} | |
} catch(ParseException e) { | |
e.printStackTrace(); | |
} | |
} else if(scan.sval.equalsIgnoreCase("clear")) { | |
// For testing unification: Clear the source environment | |
srcEnv = new HashMap<Atom, Atom>(); | |
System.out.println("srcEnv " + envToString(srcEnv)); | |
} else if(scan.sval.equalsIgnoreCase("map")) { | |
// For testing unification: Add something to the source environment | |
try { | |
if(scan.nextToken() != StreamTokenizer.TT_WORD) | |
throw new IOException("Expected atom (left)"); | |
Atom left = new Atom(scan.sval); | |
if(scan.nextToken() != StreamTokenizer.TT_WORD) | |
throw new IOException("Expected atom (right)"); | |
Atom right = new Atom(scan.sval); | |
srcEnv.put(left,right); | |
System.out.println("srcEnv " + envToString(srcEnv)); | |
} catch (IOException e) { | |
e.printStackTrace(); | |
} | |
} else { | |
/* | |
FIXME: There is no way to express queries like | |
child(alice, X),child(X, bob)? | |
with this parser (or how the Rule's are laid out). | |
*/ | |
try { | |
scan.pushBack(); | |
Rule rule = Rule.parse(scan); | |
if(scan.ttype == '?') { | |
jProlog.search(rule.head); | |
} else { | |
if (scan.ttype != '.' && scan.ttype != StreamTokenizer.TT_EOF) { | |
throw new ParseException("EOL expected"); | |
} | |
jProlog.rules.add(rule); | |
} | |
} catch(ParseException e) { | |
e.printStackTrace(); | |
} | |
} | |
} | |
} catch(Exception e) { | |
e.printStackTrace(); | |
} | |
} | |
static String envToString(Map<Atom, Atom> env) { | |
StringBuilder sb = new StringBuilder("{"); | |
for(Atom k : env.keySet()) { | |
Atom v = env.get(k); | |
sb.append(k).append(": ").append(v).append(", "); | |
} | |
sb.append("}"); | |
return sb.toString(); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment