Created
September 5, 2013 19:28
-
-
Save szarnyasg/6454936 to your computer and use it in GitHub Desktop.
Faunus output for collecting edge instances
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
$ time ~/faunus-0.3.2/bin/gremlin.sh < faunus-edgetypes.txt | |
\,,,/ | |
(o o) | |
-----oOOo-(_)-oOOo----- | |
gremlin> //Faunus load sequence | |
==>true | |
gremlin> //==================== | |
==>true | |
gremlin> | |
gremlin> hostname = "localhost"; | |
==>localhost | |
gremlin> | |
gremlin> graphSonFilename = "testBig_User_16.faunus-graphson"; | |
==>testBig_User_16.faunus-graphson | |
gremlin> loadProperties = "cassandra-load.properties"; | |
==>cassandra-load.properties | |
gremlin> | |
gremlin> conf = new BaseConfiguration(); | |
==>org.apache.commons.configuration.BaseConfiguration@1581392b | |
gremlin> conf.setProperty("storage.hostname", hostname); | |
==>null | |
gremlin> conf.setProperty("storage.backend", "cassandra"); | |
==>null | |
gremlin> g = TitanFactory.open(conf); | |
13/09/05 20:05:33 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:05:33 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:34 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:05:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:34 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:05:34 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
==>titangraph[cassandra:localhost] | |
gremlin> | |
gremlin> g.makeType().name("ROUTE_ENTRY").makeEdgeLabel(); | |
==>v[36028797018963982] | |
gremlin> g.makeType().name("ROUTE_EXIT").makeEdgeLabel(); | |
==>v[36028797018963990] | |
gremlin> g.makeType().name("ROUTE_ROUTEDEFINITION").makeEdgeLabel(); | |
==>v[36028797018963998] | |
gremlin> g.makeType().name("ROUTE_SWITCHPOSITION").makeEdgeLabel(); | |
==>v[36028797018964006] | |
gremlin> g.makeType().name("TRACKELEMENT_SENSOR").makeEdgeLabel(); | |
==>v[36028797018964014] | |
gremlin> g.makeType().name("TRACKELEMENT_CONNECTSTO").makeEdgeLabel(); | |
==>v[36028797018964022] | |
gremlin> g.makeType().name("SWITCHPOSITION_SWITCH").makeEdgeLabel(); | |
==>v[36028797018964030] | |
gremlin> | |
gremlin> g.makeType().name("idx").dataType(Integer.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964034] | |
gremlin> g.makeType().name("name").dataType(String.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964042] | |
gremlin> g.makeType().name("Segment_length").dataType(Integer.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964050] | |
gremlin> g.makeType().name("Signal_actualState").dataType(String.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964058] | |
gremlin> g.makeType().name("Switch_actualState").dataType(Integer.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964066] | |
gremlin> g.makeType().name("SwitchPosition_switchState").dataType(Integer.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964074] | |
gremlin> g.makeType().name("type").dataType(String.class).indexed(Vertex.class).unique(OUT).makePropertyKey(); | |
==>v[36028797018964082] | |
gremlin> | |
gremlin> g.stopTransaction(SUCCESS); | |
==>null | |
gremlin> | |
gremlin> hdfs.ls(); | |
==>rw-r--r-- szarnyasg vcl 220 .bash_logout | |
==>rw-r--r-- szarnyasg vcl 3637 .bashrc | |
==>rwx------ szarnyasg vcl 4096 (D) .cache | |
==>rwx------ szarnyasg vcl 4096 (D) .config | |
==>rw-r--r-- szarnyasg vcl 17989 .gremlin_faunus_history | |
==>rwxr-xr-x szarnyasg vcl 4096 (D) .java | |
==>rwx------ szarnyasg vcl 4096 (D) .local | |
==>rw-r--r-- szarnyasg vcl 675 .profile | |
==>rwx------ szarnyasg vcl 4096 (D) .ssh | |
==>rwxr-xr-x szarnyasg vcl 4096 (D) apache-cassandra-1.2.9 | |
==>rw-r--r-- szarnyasg vcl 16720513 apache-cassandra-1.2.9-bin.tar.gz | |
==>rwxr-xr-x szarnyasg vcl 4096 (D) faunus-0.3.2 | |
==>rw-r--r-- szarnyasg vcl 140163533 faunus-0.3.2.zip | |
==>rwxr-xr-x szarnyasg vcl 4096 (D) faunus-edge-collector | |
==>rwxr-xr-x szarnyasg vcl 4096 (D) titan-all-0.3.2 | |
==>rw-r--r-- szarnyasg vcl 79341638 titan-all-0.3.2.zip | |
gremlin> hdfs.copyFromLocal("hdfs/" + graphSonFilename, graphSonFilename); | |
13/09/05 20:05:40 WARN util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable | |
==>null | |
gremlin> faunusConf = new Configuration(); | |
==>io.map.index.skip=0 | |
==>io.seqfile.compress.blocksize=1000000 | |
==>mapred.task.profile.maps=0-2 | |
==>keep.failed.task.files=false | |
==>mapred.tasktracker.map.tasks.maximum=2 | |
==>mapred.disk.healthChecker.interval=60000 | |
==>mapreduce.reduce.input.limit=-1 | |
==>mapred.task.tracker.http.address=0.0.0.0:50060 | |
==>mapred.map.tasks.speculative.execution=true | |
==>mapred.userlog.retain.hours=24 | |
==>webinterface.private.actions=false | |
==>fs.s3.impl=org.apache.hadoop.fs.s3.S3FileSystem | |
==>mapred.local.dir.minspacestart=0 | |
==>hadoop.native.lib=true | |
==>fs.checkpoint.edits.dir=${fs.checkpoint.dir} | |
==>ipc.server.listen.queue.size=128 | |
==>mapred.cluster.reduce.memory.mb=-1 | |
==>io.sort.spill.percent=0.80 | |
==>hadoop.http.authentication.kerberos.keytab=${user.home}/hadoop.keytab | |
==>mapred.reduce.parallel.copies=5 | |
==>tasktracker.http.threads=40 | |
==>mapred.reduce.tasks=1 | |
==>mapreduce.tasktracker.outofband.heartbeat=false | |
==>hadoop.security.authorization=false | |
==>mapreduce.ifile.readahead=true | |
==>io.file.buffer.size=4096 | |
==>mapred.min.split.size=0 | |
==>hadoop.logfile.size=10000000 | |
==>mapred.job.queue.name=default | |
==>mapred.submit.replication=10 | |
==>mapred.local.dir.minspacekill=0 | |
==>fs.webhdfs.impl=org.apache.hadoop.hdfs.web.WebHdfsFileSystem | |
==>mapred.task.profile=false | |
==>ipc.client.kill.max=10 | |
==>mapred.acls.enabled=false | |
==>mapred.heartbeats.in.second=100 | |
==>mapreduce.reduce.shuffle.read.timeout=180000 | |
==>mapred.output.compress=false | |
==>ipc.server.tcpnodelay=false | |
==>mapred.healthChecker.interval=60000 | |
==>mapred.jobtracker.blacklist.fault-bucket-width=15 | |
==>mapred.task.timeout=600000 | |
==>mapred.temp.dir=${hadoop.tmp.dir}/mapred/temp | |
==>mapred.jobtracker.taskScheduler=org.apache.hadoop.mapred.JobQueueTaskScheduler | |
==>mapred.max.tracker.blacklists=4 | |
==>mapreduce.job.counters.counter.name.max=64 | |
==>hadoop.http.authentication.token.validity=36000 | |
==>mapred.skip.reduce.max.skip.groups=0 | |
==>mapred.tasktracker.indexcache.mb=10 | |
==>mapreduce.jobtracker.staging.root.dir=${hadoop.tmp.dir}/mapred/staging | |
==>mapred.queue.default.state=RUNNING | |
==>mapred.tasktracker.dns.nameserver=default | |
==>hadoop.logfile.count=10 | |
==>mapred.tasktracker.taskmemorymanager.monitoring-interval=5000 | |
==>mapred.tasktracker.expiry.interval=600000 | |
==>hadoop.security.uid.cache.secs=14400 | |
==>mapred.skip.attempts.to.start.skipping=2 | |
==>mapreduce.reduce.shuffle.connect.timeout=180000 | |
==>map.sort.class=org.apache.hadoop.util.QuickSort | |
==>mapred.job.tracker.persist.jobstatus.active=false | |
==>mapred.tasktracker.reduce.tasks.maximum=2 | |
==>fs.ftp.impl=org.apache.hadoop.fs.ftp.FTPFileSystem | |
==>mapred.max.tracker.failures=4 | |
==>mapred.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec | |
==>jobclient.output.filter=FAILED | |
==>mapred.job.tracker.http.address=0.0.0.0:50030 | |
==>fs.file.impl=org.apache.hadoop.fs.LocalFileSystem | |
==>mapred.jobtracker.restart.recover=false | |
==>mapred.healthChecker.script.timeout=600000 | |
==>ipc.client.connection.maxidletime=10000 | |
==>mapred.local.dir=${hadoop.tmp.dir}/mapred/local | |
==>mapreduce.job.complete.cancel.delegation.tokens=true | |
==>mapred.job.tracker.persist.jobstatus.dir=/jobtracker/jobsInfo | |
==>mapred.job.tracker=local | |
==>io.sort.record.percent=0.05 | |
==>job.end.retry.attempts=0 | |
==>mapred.job.shuffle.merge.percent=0.66 | |
==>mapred.map.max.attempts=4 | |
==>mapred.reduce.tasks.speculative.execution=true | |
==>hadoop.http.authentication.signature.secret.file=${user.home}/hadoop-http-auth-signature-secret | |
==>fs.checkpoint.size=67108864 | |
==>io.skip.checksum.errors=false | |
==>mapred.job.reduce.input.buffer.percent=0.0 | |
==>hadoop.http.authentication.kerberos.principal=HTTP/localhost@LOCALHOST | |
==>fs.s3n.impl=org.apache.hadoop.fs.s3native.NativeS3FileSystem | |
==>fs.s3.maxRetries=4 | |
==>mapred.task.cache.levels=2 | |
==>mapred.output.compression.type=RECORD | |
==>hadoop.http.authentication.simple.anonymous.allowed=true | |
==>mapred.task.tracker.task-controller=org.apache.hadoop.mapred.DefaultTaskController | |
==>mapred.job.reuse.jvm.num.tasks=1 | |
==>mapred.system.dir=${hadoop.tmp.dir}/mapred/system | |
==>io.sort.factor=10 | |
==>mapred.userlog.limit.kb=0 | |
==>mapred.jobtracker.maxtasks.per.job=-1 | |
==>fs.default.name=file:/// | |
==>mapred.job.tracker.retiredjobs.cache.size=1000 | |
==>ipc.client.idlethreshold=4000 | |
==>fs.hsftp.impl=org.apache.hadoop.hdfs.HsftpFileSystem | |
==>hadoop.tmp.dir=/tmp/hadoop-${user.name} | |
==>fs.checkpoint.dir=${hadoop.tmp.dir}/dfs/namesecondary | |
==>mapred.skip.map.auto.incr.proc.count=true | |
==>fs.s3.block.size=67108864 | |
==>io.serializations=org.apache.hadoop.io.serializer.WritableSerialization | |
==>mapred.inmem.merge.threshold=1000 | |
==>hadoop.util.hash.type=murmur | |
==>io.seqfile.lazydecompress=true | |
==>mapred.job.reduce.memory.mb=-1 | |
==>mapred.skip.map.max.skip.records=0 | |
==>mapred.job.map.memory.mb=-1 | |
==>io.mapfile.bloom.size=1048576 | |
==>fs.s3.buffer.dir=${hadoop.tmp.dir}/s3 | |
==>mapred.tasktracker.dns.interface=default | |
==>mapred.reduce.max.attempts=4 | |
==>io.compression.codecs=org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.SnappyCodec | |
==>mapred.task.profile.reduces=0-2 | |
==>mapred.job.tracker.jobhistory.lru.cache.size=5 | |
==>mapred.cluster.map.memory.mb=-1 | |
==>topology.script.number.args=100 | |
==>mapred.skip.reduce.auto.incr.proc.count=true | |
==>fs.har.impl=org.apache.hadoop.fs.HarFileSystem | |
==>mapred.cluster.max.map.memory.mb=-1 | |
==>mapred.job.tracker.persist.jobstatus.hours=0 | |
==>io.seqfile.sorter.recordlimit=1000000 | |
==>mapred.reduce.slowstart.completed.maps=0.05 | |
==>fs.trash.interval=0 | |
==>hadoop.security.authentication=simple | |
==>local.cache.size=10737418240 | |
==>hadoop.security.group.mapping=org.apache.hadoop.security.ShellBasedUnixGroupsMapping | |
==>mapred.job.tracker.handler.count=10 | |
==>hadoop.security.token.service.use_ip=true | |
==>ipc.client.connect.max.retries=10 | |
==>fs.ramfs.impl=org.apache.hadoop.fs.InMemoryFileSystem | |
==>hadoop.rpc.socket.factory.class.default=org.apache.hadoop.net.StandardSocketFactory | |
==>fs.kfs.impl=org.apache.hadoop.fs.kfs.KosmosFileSystem | |
==>mapreduce.job.acl-view-job= | |
==>mapreduce.job.counters.group.name.max=128 | |
==>fs.checkpoint.period=3600 | |
==>mapred.map.output.compression.codec=org.apache.hadoop.io.compress.DefaultCodec | |
==>topology.node.switch.mapping.impl=org.apache.hadoop.net.ScriptBasedMapping | |
==>mapreduce.job.counters.groups.max=50 | |
==>job.end.retry.interval=30000 | |
==>mapred.tasktracker.tasks.sleeptime-before-sigkill=5000 | |
==>hadoop.security.use-weak-http-crypto=false | |
==>mapred.job.shuffle.input.buffer.percent=0.70 | |
==>mapred.jobtracker.completeuserjobs.maximum=100 | |
==>mapreduce.job.counters.max=120 | |
==>mapred.user.jobconf.limit=5242880 | |
==>mapred.compress.map.output=false | |
==>mapred.queue.names=default | |
==>fs.hdfs.impl=org.apache.hadoop.hdfs.DistributedFileSystem | |
==>mapred.child.java.opts=-Xmx200m | |
==>mapred.jobtracker.blacklist.fault-timeout-window=180 | |
==>mapred.merge.recordsBeforeProgress=10000 | |
==>mapred.jobtracker.job.history.block.size=3145728 | |
==>mapreduce.reduce.shuffle.maxfetchfailures=10 | |
==>io.mapfile.bloom.error.rate=0.005 | |
==>mapreduce.ifile.readahead.bytes=4194304 | |
==>mapreduce.job.split.metainfo.maxsize=10000000 | |
==>io.bytes.per.checksum=512 | |
==>mapred.child.tmp=./tmp | |
==>fs.har.impl.disable.cache=true | |
==>ipc.client.tcpnodelay=false | |
==>fs.hftp.impl=org.apache.hadoop.hdfs.HftpFileSystem | |
==>io.sort.mb=100 | |
==>hadoop.relaxed.worker.version.check=false | |
==>mapred.cluster.max.reduce.memory.mb=-1 | |
==>mapred.line.input.format.linespermap=1 | |
==>mapreduce.tasktracker.outofband.heartbeat.damper=1000000 | |
==>mapreduce.job.acl-modify-job= | |
==>mapred.combine.recordsBeforeProgress=10000 | |
==>fs.s3.sleepTimeSeconds=10 | |
==>mapred.map.tasks=2 | |
==>mapred.task.tracker.report.address=127.0.0.1:0 | |
==>hadoop.http.authentication.type=simple | |
gremlin> faunusConf.setStrings("faunus.graph.input.format", "com.thinkaurelius.faunus.formats.graphson.GraphSONInputFormat"); | |
==>null | |
gremlin> faunusConf.setStrings("faunus.input.location", graphSonFilename); | |
==>null | |
gremlin> faunusConf.setStrings("faunus.graph.output.titan.storage.hostname", hostname); | |
==>null | |
gremlin> g = FaunusFactory.open(loadProperties, faunusConf); | |
==>faunusgraph[graphsoninputformat->titancassandraoutputformat] | |
gremlin> g._; | |
13/09/05 20:05:43 INFO mapreduce.FaunusCompiler: Using the distribution Faunus job jar: /home/szarnyasg/faunus-0.3.2/lib/faunus-0.3.2-job.jar | |
13/09/05 20:05:43 INFO mapreduce.FaunusCompiler: Compiled to 1 MapReduce job(s) | |
13/09/05 20:05:43 INFO mapreduce.FaunusCompiler: Executing job 1 out of 1: MapSequence[com.thinkaurelius.faunus.mapreduce.IdentityMap.Map, com.thinkaurelius.faunus.formats.BlueprintsGraphOutputMapReduce.Map, com.thinkaurelius.faunus.formats.BlueprintsGraphOutputMapReduce.Reduce] | |
13/09/05 20:05:43 INFO mapreduce.FaunusCompiler: Job data location: output/job-0 | |
13/09/05 20:05:43 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same. | |
13/09/05 20:05:44 INFO input.FileInputFormat: Total input paths to process : 1 | |
13/09/05 20:05:44 WARN snappy.LoadSnappy: Snappy native library not loaded | |
13/09/05 20:05:44 INFO mapred.JobClient: Running job: job_local_0001 | |
13/09/05 20:05:44 INFO util.ProcessTree: setsid exited with exit code 0 | |
13/09/05 20:05:44 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@39bd3809 | |
13/09/05 20:05:44 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:05:44 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:05:44 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:05:44 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:44 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:44 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:05:44 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:05:45 INFO mapred.JobClient: map 0% reduce 0% | |
13/09/05 20:05:50 INFO mapred.LocalJobRunner: | |
13/09/05 20:05:51 INFO mapred.JobClient: map 94% reduce 0% | |
13/09/05 20:05:53 INFO mapred.LocalJobRunner: | |
13/09/05 20:05:54 INFO mapred.JobClient: map 100% reduce 0% | |
13/09/05 20:05:55 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:05:56 INFO compress.CodecPool: Got brand-new compressor | |
13/09/05 20:05:56 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:05:56 INFO mapred.Task: Task:attempt_local_0001_m_000000_0 is done. And is in the process of commiting | |
13/09/05 20:05:56 INFO mapred.LocalJobRunner: | |
13/09/05 20:05:56 INFO mapred.Task: Task 'attempt_local_0001_m_000000_0' done. | |
13/09/05 20:05:56 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@57b13313 | |
13/09/05 20:05:56 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:05:56 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:05:56 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:05:56 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:05:56 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:56 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:05:56 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:05:56 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:05:56 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:01 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:06:01 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:06:01 INFO mapred.Task: Task:attempt_local_0001_m_000001_0 is done. And is in the process of commiting | |
13/09/05 20:06:01 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:01 INFO mapred.Task: Task 'attempt_local_0001_m_000001_0' done. | |
13/09/05 20:06:01 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@2c66f922 | |
13/09/05 20:06:01 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:06:01 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:06:01 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:06:01 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:01 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:01 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:01 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:01 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:01 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:05 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:06:06 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:06:06 INFO mapred.Task: Task:attempt_local_0001_m_000002_0 is done. And is in the process of commiting | |
13/09/05 20:06:06 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:06 INFO mapred.Task: Task 'attempt_local_0001_m_000002_0' done. | |
13/09/05 20:06:06 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@3bd1c463 | |
13/09/05 20:06:06 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:06:06 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:06:06 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:06:06 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:06 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:06 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:06 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:06 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:06 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:10 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:06:10 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:06:10 INFO mapred.Task: Task:attempt_local_0001_m_000003_0 is done. And is in the process of commiting | |
13/09/05 20:06:10 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:10 INFO mapred.Task: Task 'attempt_local_0001_m_000003_0' done. | |
13/09/05 20:06:10 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@2f710284 | |
13/09/05 20:06:10 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:06:10 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:06:10 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:06:10 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:10 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:10 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:10 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:10 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:10 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:14 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:06:15 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:06:15 INFO mapred.Task: Task:attempt_local_0001_m_000004_0 is done. And is in the process of commiting | |
13/09/05 20:06:15 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:15 INFO mapred.Task: Task 'attempt_local_0001_m_000004_0' done. | |
13/09/05 20:06:15 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@58caec54 | |
13/09/05 20:06:15 INFO mapred.MapTask: io.sort.mb = 100 | |
13/09/05 20:06:15 INFO mapred.MapTask: data buffer = 79691776/99614720 | |
13/09/05 20:06:15 INFO mapred.MapTask: record buffer = 262144/327680 | |
13/09/05 20:06:15 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:15 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:15 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:15 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:15 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:15 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:18 INFO mapred.MapTask: Starting flush of map output | |
13/09/05 20:06:19 INFO mapred.MapTask: Finished spill 0 | |
13/09/05 20:06:19 INFO mapred.Task: Task:attempt_local_0001_m_000005_0 is done. And is in the process of commiting | |
13/09/05 20:06:19 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:19 INFO mapred.Task: Task 'attempt_local_0001_m_000005_0' done. | |
13/09/05 20:06:19 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@6eed576b | |
13/09/05 20:06:19 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:19 INFO mapred.Merger: Merging 6 sorted segments | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO compress.CodecPool: Got brand-new decompressor | |
13/09/05 20:06:19 INFO mapred.Merger: Down to the last merge-pass, with 6 segments left of total size: 2370819 bytes | |
13/09/05 20:06:19 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:19 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:19 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:19 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:19 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:19 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:19 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:25 INFO mapred.LocalJobRunner: reduce > reduce | |
13/09/05 20:06:25 INFO mapred.JobClient: map 100% reduce 100% | |
13/09/05 20:06:28 INFO mapred.LocalJobRunner: reduce > reduce | |
13/09/05 20:06:32 INFO mapred.Task: Task:attempt_local_0001_r_000000_0 is done. And is in the process of commiting | |
13/09/05 20:06:32 INFO mapred.LocalJobRunner: reduce > reduce | |
13/09/05 20:06:32 INFO mapred.Task: Task 'attempt_local_0001_r_000000_0' done. | |
13/09/05 20:06:33 INFO mapred.JobClient: Job complete: job_local_0001 | |
13/09/05 20:06:33 INFO mapred.JobClient: Counters: 30 | |
13/09/05 20:06:33 INFO mapred.JobClient: File Output Format Counters | |
13/09/05 20:06:33 INFO mapred.JobClient: Bytes Written=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: FileSystemCounters | |
13/09/05 20:06:33 INFO mapred.JobClient: FILE_BYTES_READ=801542429 | |
13/09/05 20:06:33 INFO mapred.JobClient: FILE_BYTES_WRITTEN=676358174 | |
13/09/05 20:06:33 INFO mapred.JobClient: File Input Format Counters | |
13/09/05 20:06:33 INFO mapred.JobClient: Bytes Read=30125573 | |
13/09/05 20:06:33 INFO mapred.JobClient: com.thinkaurelius.faunus.formats.BlueprintsGraphOutputMapReduce$Counters | |
13/09/05 20:06:33 INFO mapred.JobClient: VERTICES_WRITTEN=87397 | |
13/09/05 20:06:33 INFO mapred.JobClient: VERTEX_PROPERTIES_WRITTEN=335800 | |
13/09/05 20:06:33 INFO mapred.JobClient: EDGES_WRITTEN=170338 | |
13/09/05 20:06:33 INFO mapred.JobClient: SUCCESSFUL_TRANSACTIONS=7 | |
13/09/05 20:06:33 INFO mapred.JobClient: com.thinkaurelius.faunus.mapreduce.IdentityMap$Counters | |
13/09/05 20:06:33 INFO mapred.JobClient: IN_EDGE_COUNT=170338 | |
13/09/05 20:06:33 INFO mapred.JobClient: VERTEX_PROPERTY_COUNT=335800 | |
13/09/05 20:06:33 INFO mapred.JobClient: OUT_EDGE_COUNT=170338 | |
13/09/05 20:06:33 INFO mapred.JobClient: VERTEX_COUNT=87397 | |
13/09/05 20:06:33 INFO mapred.JobClient: OUT_EDGE_PROPERTY_COUNT=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: IN_EDGE_PROPERTY_COUNT=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Map-Reduce Framework | |
13/09/05 20:06:33 INFO mapred.JobClient: Reduce input groups=87397 | |
13/09/05 20:06:33 INFO mapred.JobClient: Map output materialized bytes=2370843 | |
13/09/05 20:06:33 INFO mapred.JobClient: Combine output records=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Map input records=87397 | |
13/09/05 20:06:33 INFO mapred.JobClient: Reduce shuffle bytes=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Physical memory (bytes) snapshot=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Reduce output records=87397 | |
13/09/05 20:06:33 INFO mapred.JobClient: Spilled Records=515470 | |
13/09/05 20:06:33 INFO mapred.JobClient: Map output bytes=15771492 | |
13/09/05 20:06:33 INFO mapred.JobClient: CPU time spent (ms)=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Total committed heap usage (bytes)=2740453376 | |
13/09/05 20:06:33 INFO mapred.JobClient: Virtual memory (bytes) snapshot=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Combine input records=0 | |
13/09/05 20:06:33 INFO mapred.JobClient: Map output records=257735 | |
13/09/05 20:06:33 INFO mapred.JobClient: SPLIT_RAW_BYTES=834 | |
13/09/05 20:06:33 INFO mapred.JobClient: Reduce input records=257735 | |
gremlin> | |
gremlin> groovyFilename = "EdgeTypes.groovy" | |
==>EdgeTypes.groovy | |
gremlin> queryProperties = "cassandra-query.properties"; | |
==>cassandra-query.properties | |
gremlin> | |
gremlin> g = FaunusFactory.open(queryProperties); | |
==>faunusgraph[titancassandrainputformat->noopoutputformat] | |
gremlin> hdfs.copyFromLocal("hdfs/" + groovyFilename, groovyFilename); | |
==>null | |
gremlin> g.V.script("EdgeTypes.groovy", "cassandra"); | |
13/09/05 20:06:33 INFO mapreduce.FaunusCompiler: Using the distribution Faunus job jar: /home/szarnyasg/faunus-0.3.2/lib/faunus-0.3.2-job.jar | |
13/09/05 20:06:33 INFO mapreduce.FaunusCompiler: Compiled to 1 MapReduce job(s) | |
13/09/05 20:06:33 INFO mapreduce.FaunusCompiler: Executing job 1 out of 1: MapSequence[com.thinkaurelius.faunus.mapreduce.transform.VerticesMap.Map, com.thinkaurelius.faunus.mapreduce.sideeffect.ScriptMap.Map] | |
13/09/05 20:06:33 INFO mapreduce.FaunusCompiler: Job data location: output/job-0 | |
13/09/05 20:06:33 WARN mapred.JobClient: Use GenericOptionsParser for parsing the arguments. Applications should implement Tool for the same. | |
13/09/05 20:06:34 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:34 INFO mapred.JobClient: Running job: job_local_0002 | |
13/09/05 20:06:34 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@3824291 | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:06:34 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:06:35 INFO mapred.JobClient: map 0% reduce 0% | |
13/09/05 20:06:40 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:41 INFO mapred.JobClient: map 100% reduce 0% | |
13/09/05 20:06:43 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:46 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:49 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:52 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:55 INFO mapred.LocalJobRunner: | |
13/09/05 20:06:58 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:01 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:04 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:07 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:10 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:12 ERROR impl.ConnectionPoolMBeanManager: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:07:12 ERROR impl.ConnectionPoolMBeanManager: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:07:12 INFO mapred.Task: Task:attempt_local_0002_m_000000_0 is done. And is in the process of commiting | |
13/09/05 20:07:12 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:12 INFO mapred.Task: Task attempt_local_0002_m_000000_0 is allowed to commit now | |
13/09/05 20:07:12 INFO output.FileOutputCommitter: Saved output of task 'attempt_local_0002_m_000000_0' to output/job-0 | |
13/09/05 20:07:12 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:12 INFO mapred.Task: Task 'attempt_local_0002_m_000000_0' done. | |
13/09/05 20:07:12 INFO mapred.Task: Using ResourceCalculatorPlugin : org.apache.hadoop.util.LinuxResourceCalculatorPlugin@10a86205 | |
13/09/05 20:07:12 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:07:12 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:07:12 INFO impl.ConnectionPoolMBeanManager: Registering mbean: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:07:12 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:07:12 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:07:12 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:07:13 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:07:13 INFO impl.CountingConnectionPoolMonitor: AddHost: localhost | |
13/09/05 20:07:13 INFO impl.CountingConnectionPoolMonitor: AddHost: 127.0.0.1 | |
13/09/05 20:07:13 INFO impl.CountingConnectionPoolMonitor: RemoveHost: localhost | |
13/09/05 20:07:18 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:21 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:24 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:27 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:30 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:33 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:36 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:39 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:42 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:45 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:48 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:51 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:54 INFO mapred.LocalJobRunner: | |
13/09/05 20:07:57 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:00 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:03 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:06 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:09 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:12 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:16 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:19 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:21 ERROR impl.ConnectionPoolMBeanManager: com.netflix.MonitoredResources:type=ASTYANAX,name=KeyspaceTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:08:21 ERROR impl.ConnectionPoolMBeanManager: com.netflix.MonitoredResources:type=ASTYANAX,name=ClusterTitanConnectionPool,ServiceType=connectionpool | |
13/09/05 20:08:21 INFO mapred.Task: Task:attempt_local_0002_m_000001_0 is done. And is in the process of commiting | |
13/09/05 20:08:21 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:21 INFO mapred.Task: Task attempt_local_0002_m_000001_0 is allowed to commit now | |
13/09/05 20:08:21 INFO output.FileOutputCommitter: Saved output of task 'attempt_local_0002_m_000001_0' to output/job-0 | |
13/09/05 20:08:21 INFO mapred.LocalJobRunner: | |
13/09/05 20:08:21 INFO mapred.Task: Task 'attempt_local_0002_m_000001_0' done. | |
13/09/05 20:08:21 INFO mapred.JobClient: Job complete: job_local_0002 | |
13/09/05 20:08:21 INFO mapred.JobClient: Counters: 14 | |
13/09/05 20:08:21 INFO mapred.JobClient: com.thinkaurelius.faunus.mapreduce.transform.VerticesMap$Counters | |
13/09/05 20:08:21 INFO mapred.JobClient: VERTICES_PROCESSED=87397 | |
13/09/05 20:08:21 INFO mapred.JobClient: EDGES_PROCESSED=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: File Output Format Counters | |
13/09/05 20:08:21 INFO mapred.JobClient: Bytes Written=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: FileSystemCounters | |
13/09/05 20:08:21 INFO mapred.JobClient: FILE_BYTES_READ=382189327 | |
13/09/05 20:08:21 INFO mapred.JobClient: FILE_BYTES_WRITTEN=338747302 | |
13/09/05 20:08:21 INFO mapred.JobClient: File Input Format Counters | |
13/09/05 20:08:21 INFO mapred.JobClient: Bytes Read=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: Map-Reduce Framework | |
13/09/05 20:08:21 INFO mapred.JobClient: Map input records=87397 | |
13/09/05 20:08:21 INFO mapred.JobClient: Physical memory (bytes) snapshot=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: Spilled Records=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: CPU time spent (ms)=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: Total committed heap usage (bytes)=650379264 | |
13/09/05 20:08:21 INFO mapred.JobClient: Virtual memory (bytes) snapshot=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: Map output records=0 | |
13/09/05 20:08:21 INFO mapred.JobClient: SPLIT_RAW_BYTES=190 | |
==>193980: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [193960], ROUTE_ROUTEDEFINITION, []] | |
==>352084: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [352076], ROUTE_ROUTEDEFINITION, []] | |
==>248268: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [248252], ROUTE_ROUTEDEFINITION, []] | |
==>55416: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [55408], ROUTE_ROUTEDEFINITION, []] | |
==>5812: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [5800], ROUTE_ROUTEDEFINITION, []] | |
==>28320: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [28312], ROUTE_ROUTEDEFINITION, []] | |
==>85208: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [85204], ROUTE_ROUTEDEFINITION, []] | |
==>98224: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [98300, 98324, 98348, 98372, 98396, 98420, 98228, 98252, 98276], ROUTE_ROUTEDEFINITION, []] | |
==>428564: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [428556], ROUTE_ROUTEDEFINITION, []] | |
==>107276: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [107268], ROUTE_ROUTEDEFINITION, []] | |
==>104484: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [104476], ROUTE_ROUTEDEFINITION, []] | |
==>267472: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [267460], ROUTE_ROUTEDEFINITION, []] | |
==>119072: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [119052], ROUTE_ROUTEDEFINITION, []] | |
==>298636: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [298620], ROUTE_ROUTEDEFINITION, []] | |
==>411556: [SWITCHPOSITION_SWITCH, [], ROUTE_SWITCHPOSITION, [], TRACKELEMENT_SENSOR, [411540], ROUTE_ROUTEDEFINITION, []] | |
==>... | |
gremlin> | |
real 2m54.509s | |
user 2m2.920s | |
sys 0m11.901s | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment