Skip to content

Instantly share code, notes, and snippets.

@markrmiller
Created February 29, 2024 22:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save markrmiller/ca04e97ff6c6bb9b70b0678c6cbc65e1 to your computer and use it in GitHub Desktop.
Save markrmiller/ca04e97ff6c6bb9b70b0678c6cbc65e1 to your computer and use it in GitHub Desktop.
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.net.URI;
import java.net.http.HttpClient;
import java.net.http.HttpRequest;
import java.net.http.HttpResponse;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.CompletableFuture;
import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.response.CollectionAdminResponse;
import org.apache.solr.common.cloud.*;
import org.apache.solr.common.util.NamedList;
import org.apache.zookeeper.KeeperException;
import org.junit.BeforeClass;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SolrCoreReloaderTest extends SolrCloudTestCase {
private static final int TIMEOUT = 3000;
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@BeforeClass
public static void beforeTest() throws Exception {
// System.setProperty("metricsEnabled", "true");
configureCluster(4)
.addConfig("conf", configset("cloud-minimal"))
.addConfig("conf2", configset("cloud-dynamic"))
.configure();
}
/**
* When a config name is not specified during collection creation, the _default should be used.
*/
@Test
public void testCreateWithDefaultConfigSet() throws Exception {
String collectionName = getSaferTestName();
CollectionAdminResponse response =
CollectionAdminRequest.createCollection(collectionName, 2, 2)
.setPerReplicaState(SolrCloudTestCase.USE_PER_REPLICA_STATE)
.process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 2, 4);
assertEquals(0, response.getStatus());
assertTrue(response.isSuccess());
Map<String, NamedList<Integer>> coresStatus = response.getCollectionCoresStatus();
assertEquals(4, coresStatus.size());
for (String coreName : coresStatus.keySet()) {
NamedList<Integer> status = coresStatus.get(coreName);
assertEquals(0, (int) status.get("status"));
assertTrue(status.get("QTime") > 0);
}
// Sometimes multiple cores land on the same node so it's less than 4
int nodesCreated = response.getCollectionNodesStatus().size();
// Use of _default configset should generate a warning for data-driven functionality in
// production use
assertTrue(
response.getWarning() != null
&& response.getWarning().contains("NOT RECOMMENDED for production use"));
// response =
// CollectionAdminRequest.deleteCollection(collectionName).process(cluster.getSolrClient());
// assertEquals(0, response.getStatus());
// assertTrue(response.isSuccess());
// Map<String, NamedList<Integer>> nodesStatus = response.getCollectionNodesStatus();
// assertEquals(nodesStatus.toString(), nodesCreated, nodesStatus.size());
// waitForState(
// "Expected " + collectionName + " to disappear from cluster state",
// collectionName,
// (n, c) -> c == null);
SolrCoreReloader.main(new String[] {cluster.getZkServer().getZkAddress(), collectionName});
}
}
class SolrCoreReloader {
public static void main(String[] args) {
try (SolrZkClient zkClient = new SolrZkClient.Builder().withUrl(args[0]).build()) {
ZkStateReader zkStateReader = new ZkStateReader(zkClient);
List<String> solrCoreUrls = fetchSolrCoreUrls(zkStateReader, args[1]);
reloadSolrCores(solrCoreUrls).join();
System.out.println("All cores reloaded.");
} catch (Exception e) {
e.printStackTrace();
}
}
private static List<String> fetchSolrCoreUrls(ZkStateReader zkStateReader, String collectionName) throws KeeperException, InterruptedException, IOException {
List<String> solrCoreUrls = new ArrayList<>();
zkStateReader.createClusterStateWatchersAndUpdate();
Map<String, ClusterState.CollectionRef> collectionStates = zkStateReader.getClusterState().getCollectionStates();
if (collectionStates.containsKey(collectionName)) {
ClusterState.CollectionRef collectionState = collectionStates.get(collectionName);
Collection<Slice> slices = collectionState.get().getSlices();
for (Slice slice : slices) {
Collection<Replica> replicas = slice.getReplicas();
for (Replica replica : replicas) {
String coreUrl = replica.getBaseUrl() + "/admin/cores?action=RELOAD&core=" + replica.get("core");
System.out.println("Core URL: " + coreUrl);
solrCoreUrls.add(coreUrl);
}
}
}
return solrCoreUrls;
}
private static CompletableFuture<Void> reloadSolrCores(List<String> solrCoreUrls) {
HttpClient client = HttpClient.newHttpClient();
List<CompletableFuture<Void>> futures = new ArrayList<>();
for (String url : solrCoreUrls) {
HttpRequest request = HttpRequest.newBuilder()
.uri(URI.create(url))
.header("Content-Type", "application/json")
.GET()
.build();
CompletableFuture<Void> future = client.sendAsync(request, HttpResponse.BodyHandlers.ofString())
.thenApply(HttpResponse::body)
.thenAccept(body -> System.out.println("Reloaded core: " + url + " Response: " + body))
.exceptionally(e -> {
System.err.println("Failed to reload core: " + url + " Error: " + e.getMessage());
return null;
});
futures.add(future);
}
return CompletableFuture.allOf(futures.toArray(new CompletableFuture[0]));
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment