Created
November 11, 2011 04:01
-
-
Save ceocoder/1357152 to your computer and use it in GitHub Desktop.
unicast node restart on MacOS X -
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
config | |
elasticsearch.yml | |
Config | |
========================== | |
elasticsearch.yml | |
----------------------- | |
index.number_of_shards: 1 | |
index.number_of_replicas: 0 | |
node.master: true | |
discovery.zen.minimum_master_nodes: 1 | |
discovery.zen.ping.timeout: 30s | |
discovery.zen.ping.multicast.enabled: false | |
discovery.zen.ping.unicast.hosts: "192.168.1.11:9300" | |
java -version | |
----------------- | |
java version "1.6.0_26" | |
Java(TM) SE Runtime Environment (build 1.6.0_26-b03-384-10M3425) | |
Java HotSpot(TM) Client VM (build 20.1-b02-384, mixed mode) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[2011-11-10 19:54:39,628][WARN ][bootstrap ] jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line | |
[2011-11-10 19:54:39,636][INFO ][node ] [Cyclone] {0.18.2}[19212]: initializing ... | |
[2011-11-10 19:54:39,643][INFO ][plugins ] [Cyclone] loaded [], sites [] | |
[2011-11-10 19:54:40,669][DEBUG][threadpool ] [Cyclone] creating thread_pool [cached], type [cached], keep_alive [30s] | |
[2011-11-10 19:54:40,672][DEBUG][threadpool ] [Cyclone] creating thread_pool [index], type [cached], keep_alive [5m] | |
[2011-11-10 19:54:40,672][DEBUG][threadpool ] [Cyclone] creating thread_pool [search], type [cached], keep_alive [5m] | |
[2011-11-10 19:54:40,673][DEBUG][threadpool ] [Cyclone] creating thread_pool [percolate], type [cached], keep_alive [5m] | |
[2011-11-10 19:54:40,673][DEBUG][threadpool ] [Cyclone] creating thread_pool [management], type [scaling], min [1], size [20], keep_alive [5m] | |
[2011-11-10 19:54:40,676][DEBUG][threadpool ] [Cyclone] creating thread_pool [merge], type [scaling], min [1], size [20], keep_alive [5m] | |
[2011-11-10 19:54:40,677][DEBUG][threadpool ] [Cyclone] creating thread_pool [snapshot], type [scaling], min [1], size [10], keep_alive [5m] | |
[2011-11-10 19:54:40,689][DEBUG][transport.netty ] [Cyclone] using worker_count[4], port[9300-9400], bind_host[null], publish_host[null], compress[false], connect_timeout[30s], connections_per_node[2/4/1] | |
[2011-11-10 19:54:40,705][DEBUG][discovery.zen.ping.unicast] [Cyclone] using initial hosts [192.168.1.11:9300], with concurrent_connects [10] | |
[2011-11-10 19:54:40,708][DEBUG][discovery.zen ] [Cyclone] using ping.timeout [30s] | |
[2011-11-10 19:54:40,713][DEBUG][discovery.zen.elect ] [Cyclone] using minimum_master_nodes [0] | |
[2011-11-10 19:54:40,714][DEBUG][discovery.zen.fd ] [Cyclone] [master] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] | |
[2011-11-10 19:54:40,718][DEBUG][discovery.zen.fd ] [Cyclone] [node ] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] | |
[2011-11-10 19:54:40,737][DEBUG][monitor.jvm ] [Cyclone] enabled [false], last_gc_enabled [false], interval [1s], gc_threshold [5s] | |
[2011-11-10 19:54:41,246][DEBUG][monitor.os ] [Cyclone] Using probe [org.elasticsearch.monitor.os.SigarOsProbe@1ded4c9] with refresh_interval [1s] | |
[2011-11-10 19:54:41,251][DEBUG][monitor.process ] [Cyclone] Using probe [org.elasticsearch.monitor.process.SigarProcessProbe@1ef3212] with refresh_interval [1s] | |
[2011-11-10 19:54:41,255][DEBUG][monitor.jvm ] [Cyclone] Using refresh_interval [1s] | |
[2011-11-10 19:54:41,256][DEBUG][monitor.network ] [Cyclone] Using probe [org.elasticsearch.monitor.network.SigarNetworkProbe@2d502d] with refresh_interval [5s] | |
[2011-11-10 19:54:41,264][DEBUG][monitor.network ] [Cyclone] net_info | |
host [hedwig] | |
en1 display_name [en1] | |
address [/fe80:0:0:0:216:cbff:fe08:7b92%5] [/192.168.1.11] | |
mtu [1500] multicast [true] ptp [false] loopback [false] up [true] virtual [false] | |
lo0 display_name [lo0] | |
address [/fe80:0:0:0:0:0:0:1%1] [/0:0:0:0:0:0:0:1] [/127.0.0.1] | |
mtu [16384] multicast [true] ptp [false] loopback [true] up [true] virtual [false] | |
[2011-11-10 19:54:41,282][DEBUG][env ] [Cyclone] using node location [[/Users/dhaivat/workspace/installs/elasticsearch-0.18.2/data/elasticsearch/nodes/0]], local_node_id [0] | |
[2011-11-10 19:54:41,466][DEBUG][cache.memory ] [Cyclone] using bytebuffer cache with small_buffer_size [1kb], large_buffer_size [1mb], small_cache_size [10mb], large_cache_size [500mb], direct [true] | |
[2011-11-10 19:54:41,481][DEBUG][cluster.routing.allocation.decider] [Cyclone] using node_concurrent_recoveries [2], node_initial_primaries_recoveries [4] | |
[2011-11-10 19:54:41,482][DEBUG][cluster.routing.allocation.decider] [Cyclone] using [cluster.routing.allocation.allow_rebalance] with [indices_all_active] | |
[2011-11-10 19:54:41,483][DEBUG][cluster.routing.allocation.decider] [Cyclone] using [cluster_concurrent_rebalance] with [2] | |
[2011-11-10 19:54:41,486][DEBUG][gateway.local ] [Cyclone] using initial_shards [quorum], list_timeout [30s] | |
[2011-11-10 19:54:41,513][DEBUG][indices.recovery ] [Cyclone] using max_size_per_sec[0b], concurrent_streams [5], file_chunk_size [100kb], translog_size [100kb], translog_ops [1000], and compress [true] | |
[2011-11-10 19:54:41,711][DEBUG][http.netty ] [Cyclone] using max_chunk_size[8kb], max_header_size[8kb], max_initial_line_length[4kb], max_content_length[100mb] | |
[2011-11-10 19:54:41,718][DEBUG][indices.memory ] [Cyclone] using index_buffer_size [48mb], with min_shard_index_buffer_size [4mb], max_shard_index_buffer_size [512mb], shard_inactive_time [30m] | |
[2011-11-10 19:54:41,729][DEBUG][indices.cache.filter ] [Cyclone] using [node] filter cache with size [20%], actual_size [50.5mb] | |
[2011-11-10 19:54:41,800][INFO ][node ] [Cyclone] {0.18.2}[19212]: initialized | |
[2011-11-10 19:54:41,801][INFO ][node ] [Cyclone] {0.18.2}[19212]: starting ... | |
[2011-11-10 19:54:41,831][DEBUG][netty.channel.socket.nio.NioProviderMetadata] Using the autodetected NIO constraint level: 0 | |
[2011-11-10 19:54:41,892][DEBUG][transport.netty ] [Cyclone] Bound to address [/0.0.0.0:9300] | |
[2011-11-10 19:54:41,894][INFO ][transport ] [Cyclone] bound_address {inet[/0.0.0.0:9300]}, publish_address {inet[/192.168.1.11:9300]} | |
[2011-11-10 19:54:42,025][DEBUG][transport.netty ] [Cyclone] Connected to node [[Cyclone][m0CMalV3QOuzxSIlX14DJA][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:55:11,996][WARN ][discovery ] [Cyclone] waited for 30s and no initial state was set by the discovery | |
[2011-11-10 19:55:11,996][INFO ][discovery ] [Cyclone] elasticsearch/m0CMalV3QOuzxSIlX14DJA | |
[2011-11-10 19:55:11,996][DEBUG][gateway.local ] [Cyclone] [find_latest_state]: no metadata state loaded | |
[2011-11-10 19:55:11,997][DEBUG][gateway.local ] [Cyclone] [find_latest_state]: no started shards loaded | |
[2011-11-10 19:55:11,997][DEBUG][gateway ] [Cyclone] can't wait on start for (possibly) reading state from gateway, will do it asynchronously | |
[2011-11-10 19:55:12,007][INFO ][http ] [Cyclone] bound_address {inet[/0.0.0.0:9200]}, publish_address {inet[/192.168.1.11:9200]} | |
[2011-11-10 19:55:12,009][INFO ][node ] [Cyclone] {0.18.2}[19212]: started | |
[2011-11-10 19:55:12,010][DEBUG][discovery.zen ] [Cyclone] ping responses: {none} | |
[2011-11-10 19:55:12,013][DEBUG][transport.netty ] [Cyclone] Disconnected from [[Cyclone][m0CMalV3QOuzxSIlX14DJA][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:55:12,014][DEBUG][cluster.service ] [Cyclone] processing [zen-disco-join (elected_as_master)]: execute | |
[2011-11-10 19:55:12,016][DEBUG][cluster.service ] [Cyclone] cluster state updated, version [1], source [zen-disco-join (elected_as_master)] | |
[2011-11-10 19:55:12,019][INFO ][cluster.service ] [Cyclone] new_master [Cyclone][m0CMalV3QOuzxSIlX14DJA][inet[/192.168.1.11:9300]]{master=true}, reason: zen-disco-join (elected_as_master) | |
[2011-11-10 19:55:12,039][DEBUG][transport.netty ] [Cyclone] Connected to node [[Cyclone][m0CMalV3QOuzxSIlX14DJA][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:55:12,043][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:55:12,044][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:55:12,045][DEBUG][cluster.service ] [Cyclone] processing [zen-disco-join (elected_as_master)]: done applying updated cluster_state | |
[2011-11-10 19:55:12,051][DEBUG][gateway.local ] [Cyclone] no state elected | |
[2011-11-10 19:55:12,052][DEBUG][cluster.service ] [Cyclone] processing [local-gateway-elected-state]: execute | |
[2011-11-10 19:55:12,054][DEBUG][cluster.service ] [Cyclone] cluster state updated, version [1], source [local-gateway-elected-state] | |
[2011-11-10 19:55:12,055][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:55:12,056][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:55:12,104][INFO ][gateway ] [Cyclone] recovered [0] indices into cluster_state | |
[2011-11-10 19:55:12,104][DEBUG][cluster.service ] [Cyclone] processing [local-gateway-elected-state]: done applying updated cluster_state | |
[2011-11-10 19:55:22,043][DEBUG][cluster.service ] [Cyclone] processing [routing-table-updater]: execute | |
[2011-11-10 19:55:22,043][DEBUG][cluster.service ] [Cyclone] processing [routing-table-updater]: no change in cluster_state | |
[2011-11-10 19:55:29,086][DEBUG][cluster.service ] [Cyclone] processing [create-index [twitter], cause [auto(index api)]]: execute | |
[2011-11-10 19:55:29,087][DEBUG][indices ] [Cyclone] creating Index [twitter], shards [1]/[0] | |
[2011-11-10 19:55:29,229][DEBUG][index.mapper ] [Cyclone] [twitter] using dynamic[true], default mapping: location[null] and source[{ | |
"_default_" : { | |
} | |
}] | |
[2011-11-10 19:55:29,230][DEBUG][index.cache.field.data.resident] [Cyclone] [twitter] using [resident] field cache with max_size [-1], expire [null] | |
[2011-11-10 19:55:29,232][DEBUG][index.cache ] [Cyclone] [twitter] Using stats.refresh_interval [1s] | |
[2011-11-10 19:55:29,245][INFO ][cluster.metadata ] [Cyclone] [twitter] creating index, cause [auto(index api)], shards [1]/[0], mappings [] | |
[2011-11-10 19:55:29,252][DEBUG][cluster.service ] [Cyclone] cluster state updated, version [2], source [create-index [twitter], cause [auto(index api)]] | |
[2011-11-10 19:55:29,253][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:55:29,253][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:55:29,253][DEBUG][indices.cluster ] [Cyclone] [twitter][0] creating shard | |
[2011-11-10 19:55:29,253][DEBUG][index.service ] [Cyclone] [twitter] creating shard_id [0] | |
[2011-11-10 19:55:29,358][DEBUG][index.deletionpolicy ] [Cyclone] [twitter][0] Using [keep_only_last] deletion policy | |
[2011-11-10 19:55:29,360][DEBUG][index.merge.policy ] [Cyclone] [twitter][0] using [tiered] merge policy with expunge_deletes_allowed[10.0], floor_segment[2mb], max_merge_at_once[10], max_merge_at_once_explicit[30], max_merged_segment[5gb], segments_per_tier[10.0], reclaim_deletes_weight[2.0], async_merge[true] | |
[2011-11-10 19:55:29,360][DEBUG][index.merge.scheduler ] [Cyclone] [twitter][0] using [concurrent] merge scheduler with max_thread_count[1] | |
[2011-11-10 19:55:29,362][DEBUG][index.shard.service ] [Cyclone] [twitter][0] state: [CREATED] | |
[2011-11-10 19:55:29,365][DEBUG][index.translog ] [Cyclone] [twitter][0] interval [5s], flush_threshold_ops [5000], flush_threshold_size [200mb], flush_threshold_period [30m] | |
[2011-11-10 19:55:29,369][DEBUG][index.shard.service ] [Cyclone] [twitter][0] state: [CREATED]->[RECOVERING], reason [from gateway] | |
[2011-11-10 19:55:29,370][DEBUG][index.gateway ] [Cyclone] [twitter][0] starting recovery from local ... | |
[2011-11-10 19:55:29,372][DEBUG][cluster.service ] [Cyclone] processing [create-index [twitter], cause [auto(index api)]]: done applying updated cluster_state | |
[2011-11-10 19:55:29,380][DEBUG][index.engine.robin ] [Cyclone] [twitter][0] Starting engine | |
[2011-11-10 19:55:29,467][DEBUG][index.shard.service ] [Cyclone] [twitter][0] scheduling refresher every 1s | |
[2011-11-10 19:55:29,468][DEBUG][index.shard.service ] [Cyclone] [twitter][0] scheduling optimizer / merger every 1s | |
[2011-11-10 19:55:29,468][DEBUG][index.shard.service ] [Cyclone] [twitter][0] state: [RECOVERING]->[STARTED], reason [post recovery from gateway, no translog] | |
[2011-11-10 19:55:29,468][DEBUG][index.gateway ] [Cyclone] [twitter][0] recovery completed from local, took [98ms] | |
index : files [0] with total_size [0b], took[8ms] | |
: recovered_files [0] with total_size [0b] | |
: reusing_files [0] with total_size [0b] | |
translog : number_of_operations [0], took [97ms] | |
[2011-11-10 19:55:29,468][DEBUG][cluster.action.shard ] [Cyclone] sending shard started for [twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING], reason [after recovery from gateway] | |
[2011-11-10 19:55:29,469][DEBUG][cluster.action.shard ] [Cyclone] received shard started for [twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING], reason [after recovery from gateway] | |
[2011-11-10 19:55:29,469][DEBUG][cluster.service ] [Cyclone] processing [shard-started ([twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING]), reason [after recovery from gateway]]: execute | |
[2011-11-10 19:55:29,470][DEBUG][cluster.action.shard ] [Cyclone] applying started shards [[twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING]], reason [after recovery from gateway] | |
[2011-11-10 19:55:29,470][DEBUG][cluster.service ] [Cyclone] cluster state updated, version [3], source [shard-started ([twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING]), reason [after recovery from gateway]] | |
[2011-11-10 19:55:29,470][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:55:29,470][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:55:29,473][DEBUG][cluster.service ] [Cyclone] processing [shard-started ([twitter][0], node[m0CMalV3QOuzxSIlX14DJA], [P], s[INITIALIZING]), reason [after recovery from gateway]]: done applying updated cluster_state | |
[2011-11-10 19:55:29,650][DEBUG][cluster.service ] [Cyclone] processing [update-mapping [twitter][tweet]]: execute | |
[2011-11-10 19:55:29,693][DEBUG][cluster.metadata ] [Cyclone] [twitter] update_mapping [tweet] (dynamic) with source [{"tweet":{"properties":{"message":{"type":"string"},"post_date":{"type":"date","format":"dateOptionalTime"},"user":{"type":"string"}}}}] | |
[2011-11-10 19:55:29,698][DEBUG][cluster.service ] [Cyclone] cluster state updated, version [4], source [update-mapping [twitter][tweet]] | |
[2011-11-10 19:55:29,700][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:55:29,700][DEBUG][river.cluster ] [Cyclone] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:55:29,702][DEBUG][cluster.service ] [Cyclone] processing [update-mapping [twitter][tweet]]: done applying updated cluster_state | |
[2011-11-10 19:55:47,233][INFO ][node ] [Cyclone] {0.18.2}[19212]: stopping ... | |
[2011-11-10 19:55:47,249][DEBUG][index.shard.service ] [Cyclone] [twitter][0] state: [STARTED]->[CLOSED], reason [shutdown] | |
[2011-11-10 19:55:47,292][INFO ][node ] [Cyclone] {0.18.2}[19212]: stopped | |
[2011-11-10 19:55:47,292][INFO ][node ] [Cyclone] {0.18.2}[19212]: closing ... | |
[2011-11-10 19:55:57,302][INFO ][node ] [Cyclone] {0.18.2}[19212]: closed | |
====================================== | |
[2011-11-10 19:56:07,301][WARN ][bootstrap ] jvm uses the client vm, make sure to run `java` with the server vm for best performance by adding `-server` to the command line | |
[2011-11-10 19:56:07,310][INFO ][node ] [Elysius] {0.18.2}[19271]: initializing ... | |
[2011-11-10 19:56:07,318][INFO ][plugins ] [Elysius] loaded [], sites [] | |
[2011-11-10 19:56:08,340][DEBUG][threadpool ] [Elysius] creating thread_pool [cached], type [cached], keep_alive [30s] | |
[2011-11-10 19:56:08,344][DEBUG][threadpool ] [Elysius] creating thread_pool [index], type [cached], keep_alive [5m] | |
[2011-11-10 19:56:08,344][DEBUG][threadpool ] [Elysius] creating thread_pool [search], type [cached], keep_alive [5m] | |
[2011-11-10 19:56:08,344][DEBUG][threadpool ] [Elysius] creating thread_pool [percolate], type [cached], keep_alive [5m] | |
[2011-11-10 19:56:08,344][DEBUG][threadpool ] [Elysius] creating thread_pool [management], type [scaling], min [1], size [20], keep_alive [5m] | |
[2011-11-10 19:56:08,348][DEBUG][threadpool ] [Elysius] creating thread_pool [merge], type [scaling], min [1], size [20], keep_alive [5m] | |
[2011-11-10 19:56:08,348][DEBUG][threadpool ] [Elysius] creating thread_pool [snapshot], type [scaling], min [1], size [10], keep_alive [5m] | |
[2011-11-10 19:56:08,360][DEBUG][transport.netty ] [Elysius] using worker_count[4], port[9300-9400], bind_host[null], publish_host[null], compress[false], connect_timeout[30s], connections_per_node[2/4/1] | |
[2011-11-10 19:56:08,376][DEBUG][discovery.zen.ping.unicast] [Elysius] using initial hosts [192.168.1.11:9300], with concurrent_connects [10] | |
[2011-11-10 19:56:08,379][DEBUG][discovery.zen ] [Elysius] using ping.timeout [30s] | |
[2011-11-10 19:56:08,384][DEBUG][discovery.zen.elect ] [Elysius] using minimum_master_nodes [0] | |
[2011-11-10 19:56:08,385][DEBUG][discovery.zen.fd ] [Elysius] [master] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] | |
[2011-11-10 19:56:08,389][DEBUG][discovery.zen.fd ] [Elysius] [node ] uses ping_interval [1s], ping_timeout [30s], ping_retries [3] | |
[2011-11-10 19:56:08,407][DEBUG][monitor.jvm ] [Elysius] enabled [false], last_gc_enabled [false], interval [1s], gc_threshold [5s] | |
[2011-11-10 19:56:08,917][DEBUG][monitor.os ] [Elysius] Using probe [org.elasticsearch.monitor.os.SigarOsProbe@690247] with refresh_interval [1s] | |
[2011-11-10 19:56:08,923][DEBUG][monitor.process ] [Elysius] Using probe [org.elasticsearch.monitor.process.SigarProcessProbe@14b74a7] with refresh_interval [1s] | |
[2011-11-10 19:56:08,927][DEBUG][monitor.jvm ] [Elysius] Using refresh_interval [1s] | |
[2011-11-10 19:56:08,928][DEBUG][monitor.network ] [Elysius] Using probe [org.elasticsearch.monitor.network.SigarNetworkProbe@1e22632] with refresh_interval [5s] | |
[2011-11-10 19:56:08,937][DEBUG][monitor.network ] [Elysius] net_info | |
host [hedwig] | |
en1 display_name [en1] | |
address [/fe80:0:0:0:216:cbff:fe08:7b92%5] [/192.168.1.11] | |
mtu [1500] multicast [true] ptp [false] loopback [false] up [true] virtual [false] | |
lo0 display_name [lo0] | |
address [/fe80:0:0:0:0:0:0:1%1] [/0:0:0:0:0:0:0:1] [/127.0.0.1] | |
mtu [16384] multicast [true] ptp [false] loopback [true] up [true] virtual [false] | |
[2011-11-10 19:56:08,953][DEBUG][env ] [Elysius] using node location [[/Users/dhaivat/workspace/installs/elasticsearch-0.18.2/data/elasticsearch/nodes/0]], local_node_id [0] | |
[2011-11-10 19:56:09,161][DEBUG][cache.memory ] [Elysius] using bytebuffer cache with small_buffer_size [1kb], large_buffer_size [1mb], small_cache_size [10mb], large_cache_size [500mb], direct [true] | |
[2011-11-10 19:56:09,173][DEBUG][cluster.routing.allocation.decider] [Elysius] using node_concurrent_recoveries [2], node_initial_primaries_recoveries [4] | |
[2011-11-10 19:56:09,175][DEBUG][cluster.routing.allocation.decider] [Elysius] using [cluster.routing.allocation.allow_rebalance] with [indices_all_active] | |
[2011-11-10 19:56:09,175][DEBUG][cluster.routing.allocation.decider] [Elysius] using [cluster_concurrent_rebalance] with [2] | |
[2011-11-10 19:56:09,179][DEBUG][gateway.local ] [Elysius] using initial_shards [quorum], list_timeout [30s] | |
[2011-11-10 19:56:09,197][DEBUG][indices.recovery ] [Elysius] using max_size_per_sec[0b], concurrent_streams [5], file_chunk_size [100kb], translog_size [100kb], translog_ops [1000], and compress [true] | |
[2011-11-10 19:56:09,324][DEBUG][http.netty ] [Elysius] using max_chunk_size[8kb], max_header_size[8kb], max_initial_line_length[4kb], max_content_length[100mb] | |
[2011-11-10 19:56:09,330][DEBUG][indices.memory ] [Elysius] using index_buffer_size [48mb], with min_shard_index_buffer_size [4mb], max_shard_index_buffer_size [512mb], shard_inactive_time [30m] | |
[2011-11-10 19:56:09,339][DEBUG][indices.cache.filter ] [Elysius] using [node] filter cache with size [20%], actual_size [50.5mb] | |
[2011-11-10 19:56:09,397][INFO ][node ] [Elysius] {0.18.2}[19271]: initialized | |
[2011-11-10 19:56:09,397][INFO ][node ] [Elysius] {0.18.2}[19271]: starting ... | |
[2011-11-10 19:56:09,421][DEBUG][netty.channel.socket.nio.NioProviderMetadata] Using the autodetected NIO constraint level: 0 | |
[2011-11-10 19:56:09,480][DEBUG][transport.netty ] [Elysius] Bound to address [/0.0.0.0:9300] | |
[2011-11-10 19:56:09,482][INFO ][transport ] [Elysius] bound_address {inet[/0.0.0.0:9300]}, publish_address {inet[/192.168.1.11:9300]} | |
[2011-11-10 19:56:09,623][DEBUG][transport.netty ] [Elysius] Connected to node [[Elysius][WODOgSe9Ti-Uv2smHFOypQ][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:56:39,579][WARN ][discovery ] [Elysius] waited for 30s and no initial state was set by the discovery | |
[2011-11-10 19:56:39,579][INFO ][discovery ] [Elysius] elasticsearch/WODOgSe9Ti-Uv2smHFOypQ | |
[2011-11-10 19:56:39,593][DEBUG][transport.netty ] [Elysius] Disconnected from [[Elysius][WODOgSe9Ti-Uv2smHFOypQ][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:56:39,595][DEBUG][discovery.zen ] [Elysius] ping responses: {none} | |
[2011-11-10 19:56:39,600][DEBUG][cluster.service ] [Elysius] processing [zen-disco-join (elected_as_master)]: execute | |
[2011-11-10 19:56:39,601][DEBUG][cluster.service ] [Elysius] cluster state updated, version [1], source [zen-disco-join (elected_as_master)] | |
[2011-11-10 19:56:39,602][INFO ][cluster.service ] [Elysius] new_master [Elysius][WODOgSe9Ti-Uv2smHFOypQ][inet[/192.168.1.11:9300]]{master=true}, reason: zen-disco-join (elected_as_master) | |
[2011-11-10 19:56:39,624][DEBUG][transport.netty ] [Elysius] Connected to node [[Elysius][WODOgSe9Ti-Uv2smHFOypQ][inet[/192.168.1.11:9300]]{master=true}] | |
[2011-11-10 19:56:39,659][DEBUG][river.cluster ] [Elysius] processing [reroute_rivers_node_changed]: execute | |
[2011-11-10 19:56:39,670][DEBUG][cluster.service ] [Elysius] processing [zen-disco-join (elected_as_master)]: done applying updated cluster_state | |
[2011-11-10 19:56:39,672][DEBUG][river.cluster ] [Elysius] processing [reroute_rivers_node_changed]: no change in cluster_state | |
[2011-11-10 19:56:39,711][DEBUG][gateway.local ] [Elysius] [find_latest_state]: loading metadata from [/Users/dhaivat/workspace/installs/elasticsearch-0.18.2/data/elasticsearch/nodes/0/_state/metadata-3] | |
[2011-11-10 19:56:39,713][DEBUG][gateway.local ] [Elysius] [find_latest_state]: loading started shards from [/Users/dhaivat/workspace/installs/elasticsearch-0.18.2/data/elasticsearch/nodes/0/_state/shards-3] | |
[2011-11-10 19:56:39,714][DEBUG][gateway ] [Elysius] can't wait on start for (possibly) reading state from gateway, will do it asynchronously | |
[2011-11-10 19:56:39,717][INFO ][http ] [Elysius] bound_address {inet[/0.0.0.0:9200]}, publish_address {inet[/192.168.1.11:9200]} | |
[2011-11-10 19:56:39,717][INFO ][node ] [Elysius] {0.18.2}[19271]: started | |
[2011-11-10 19:56:49,627][DEBUG][cluster.service ] [Elysius] processing [routing-table-updater]: execute | |
[2011-11-10 19:56:49,629][DEBUG][cluster.service ] [Elysius] processing [routing-table-updater]: no change in cluster_state |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment