Created
April 2, 2013 17:23
-
-
Save ppearcy/5294200 to your computer and use it in GitHub Desktop.
Code to dynamically set the number of shards per node for each elasticsearch index.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/** | |
* Iterate over the indexes and automatically set the index.routing.allocation.total_shards_per_node | |
* based on the total shards for the index and the number of data nodes that we have | |
*/ | |
public void setTotalShardsPerNode() { | |
ClusterHealthResponse health = ESIndexer.es.client.admin().cluster().health(new ClusterHealthRequest()).actionGet(); | |
// These values are used to decide what do do below | |
int numDataNodes = health.getNumberOfDataNodes(); | |
int initShards = health.getInitializingShards(); | |
int relocShards = health.getRelocatingShards(); | |
// Iterating over the concrete indices | |
ClusterStateResponse clusterState = ESIndexer.es.client.admin().cluster().state(new ClusterStateRequest()).actionGet(); | |
String[] allIndices = clusterState.getState().metaData().concreteAllIndices(); | |
Map<String, ClusterIndexHealth> allIndexHealth = health.getIndices(); | |
String error = ""; | |
boolean updatedSettings = false; | |
for (String indexName : allIndices) { | |
ClusterIndexHealth indexHealth = allIndexHealth.get(indexName); | |
int unassignedIndexShards = indexHealth.getUnassignedShards(); | |
IndexMetaData indexMeta = clusterState.getState().metaData().index(indexName); | |
int numTotalShards = indexMeta.totalNumberOfShards(); | |
long shardsPerServer = (long)Math.ceil((double)numTotalShards / (double)numDataNodes); | |
// Have a small safety. If the cluster is stable (nothing init or reloc cluster wide), but we still have unassigned shards, bump this value by 1 | |
// The next time around, this value will get bumped back down and then hopefully we get stabilized | |
// This issue was observed in dev and hopefully addressed in a future elasticsearch release | |
if (initShards == 0 && relocShards == 0 && unassignedIndexShards > 0) | |
shardsPerServer += 1; | |
Settings currentSettings = indexMeta.getSettings(); | |
String totalShardsPerNode = currentSettings.get("index.routing.allocation.total_shards_per_node"); | |
if (totalShardsPerNode == null || Long.valueOf(totalShardsPerNode) != shardsPerServer) { | |
Settings settingsUpdate = settingsBuilder().put("index.routing.allocation.total_shards_per_node", shardsPerServer).build(); | |
try { | |
logger.error("INFO: Updating the index.routing.allocation.total_shards_per_node for index: " + indexName | |
+ "\nNum Data Nodes: " + Integer.toString(numDataNodes) + "\nTotal shards: " + Integer.toString(numTotalShards) | |
+ "\nOld value: " + totalShardsPerNode + "\nNew value: " + Long.toString(shardsPerServer)); | |
ESIndexer.es.client.admin().indices() | |
.prepareUpdateSettings(indexName).setSettings(settingsUpdate) | |
.execute().actionGet(); | |
updatedSettings = true; | |
} catch (Exception e) { | |
error = error + "\nError updating index settings for index.routing.allocation.total_shards_per_node. Index: " + indexName + "\n" + e.getMessage(); | |
} | |
} | |
} | |
// Just in case we get into some funky toggle state | |
if (updatedSettings) | |
consectiveAlloctionTweaks += 1; | |
else | |
consectiveAlloctionTweaks = 0; | |
// This really shouldn't happen, but if it does, we gotta let people know so that we take a look | |
if (consectiveAlloctionTweaks > 30) { | |
AMS.raiseAlarm("setTotalShardsPerNode", | |
new StringBuilder().append("Error - Issue with getting the cluster to properly balance. Total consecutive tweaks: ") | |
.append(consectiveAlloctionTweaks) | |
.append(". Please manually tweak some cluster settings to have the cluster get into proper balance").toString()); | |
} else { | |
AMS.clearAlarm("setTotalShardsPerNode"); | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment