Last active
December 19, 2015 22:49
-
-
Save nik9000/6030428 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# This works as expected: | |
# Cleanly launch three nodes | |
rm -rf data/elasticsearch/nodes/* | |
./bin/elasticsearch -p es1.pid | |
./bin/elasticsearch -p es2.pid | |
./bin/elasticsearch -p es3.pid | |
sleep 10 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=3" | |
# Create more replicas then we have nodes | |
curl -XPOST "http://localhost:9200/test_close?pretty=true" -d '{ | |
"settings": { | |
"number_of_shards": 5, | |
"number_of_replicas": 3 | |
} | |
}' | |
# Wait until all the shards that are going to allocate have allocated | |
sleep 5 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true" | |
# Close the index | |
curl -XPOST "http://localhost:9200/test_close/_close?pretty=true" | |
# Wait a bit and open it | |
sleep 2 | |
curl -XPOST "http://localhost:9200/test_close/_open?pretty=true" | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow" | |
for pidfile in *.pid; do kill $(cat $pidfile); rm $pidfile ; done | |
# Currently leaves the index stuck closed | |
# Cleanly launch two nodes | |
rm -rf data/elasticsearch/nodes/* | |
./bin/elasticsearch -p es1.pid | |
./bin/elasticsearch -p es2.pid | |
sleep 10 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=2" | |
# Create more replicas then we have nodes | |
curl -XPOST "http://localhost:9200/test_close?pretty=true" -d '{ | |
"settings": { | |
"number_of_shards": 5, | |
"number_of_replicas": 3 | |
} | |
}' | |
# Wait until all the shards that are going to allocate have allocated | |
sleep 5 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true" | |
# Close the index | |
curl -XPOST "http://localhost:9200/test_close/_close?pretty=true" | |
# Wait a bit and open it | |
sleep 2 | |
curl -XPOST "http://localhost:9200/test_close/_open?pretty=true" | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow" | |
for pidfile in *.pid; do kill $(cat $pidfile); rm $pidfile ; done | |
# Launching a new node during recovery also doesn't fix it | |
# Cleanly launch two nodes | |
rm -rf data/elasticsearch/nodes/* | |
./bin/elasticsearch -p es1.pid | |
./bin/elasticsearch -p es2.pid | |
sleep 10 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=2" | |
# Create more replicas then we have nodes | |
curl -XPOST "http://localhost:9200/test_close?pretty=true" -d '{ | |
"settings": { | |
"number_of_shards": 5, | |
"number_of_replicas": 3 | |
} | |
}' | |
# Wait until all the shards that are going to allocate have allocated | |
sleep 5 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true" | |
# Close the index | |
curl -XPOST "http://localhost:9200/test_close/_close?pretty=true" | |
# Wait a bit and open it | |
sleep 2 | |
curl -XPOST "http://localhost:9200/test_close/_open?pretty=true" | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow&timeout=10s" | |
./bin/elasticsearch -p es3.pid | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow&timeout=20s" | |
for pidfile in *.pid; do kill $(cat $pidfile); rm $pidfile ; done | |
# Also currently leaves the index stuck closed | |
# Cleanly launch three nodes | |
rm -rf data/elasticsearch/nodes/* | |
./bin/elasticsearch -p es1.pid | |
./bin/elasticsearch -p es2.pid | |
./bin/elasticsearch -p es3.pid | |
sleep 10 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=3" | |
# Create more replicas then we have nodes | |
curl -XPOST "http://localhost:9200/test_close?pretty=true" -d '{ | |
"settings": { | |
"number_of_shards": 5, | |
"number_of_replicas": 3 | |
} | |
}' | |
# Wait until all the shards that are going to allocate have allocated | |
sleep 5 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true" | |
# Kill a server | |
kill $(cat es3.pid) | |
rm es3.pid | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=2" | |
# Close the index | |
curl -XPOST "http://localhost:9200/test_close/_close?pretty=true" | |
# Wait a bit and open it | |
sleep 2 | |
curl -XPOST "http://localhost:9200/test_close/_open?pretty=true" | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow" | |
for pidfile in *.pid; do kill $(cat $pidfile); rm $pidfile ; done | |
# But if you bring the third node back online then the index does recover | |
# Cleanly launch three nodes | |
rm -rf data/elasticsearch/nodes/* | |
./bin/elasticsearch -p es1.pid | |
./bin/elasticsearch -p es2.pid | |
./bin/elasticsearch -p es3.pid | |
sleep 10 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=3" | |
# Create more replicas then we have nodes | |
curl -XPOST "http://localhost:9200/test_close?pretty=true" -d '{ | |
"settings": { | |
"number_of_shards": 5, | |
"number_of_replicas": 3 | |
} | |
}' | |
# Wait until all the shards that are going to allocate have allocated | |
sleep 5 | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true" | |
# Kill a server | |
kill $(cat es3.pid) | |
rm es3.pid | |
curl -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_nodes=2" | |
# Close the index | |
curl -XPOST "http://localhost:9200/test_close/_close?pretty=true" | |
# Wait a bit and open it | |
sleep 2 | |
curl -XPOST "http://localhost:9200/test_close/_open?pretty=true" | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow&timeout=10s" | |
./bin/elasticsearch -p es3.pid | |
curl -s -XGET "http://localhost:9200/_cluster/health?pretty=true&wait_for_status=yellow&timeout=20s" | |
for pidfile in *.pid; do kill $(cat $pidfile); rm $pidfile ; done |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment