Skip to content

Instantly share code, notes, and snippets.

View ShubhamRwt's full-sized avatar

Shubham Rawat ShubhamRwt

  • Engineer at Red Hat
  • New Delhi,India
View GitHub Profile
package info.company.Lec15;
public class Nqueens {
public static void main(String[] args) {
boolean[][] board = new boolean[4][4];
nqueen(board, 0);
}
@ShubhamRwt
ShubhamRwt / nqueen.java
Created August 8, 2020 18:27
Workshop Codes
package info.company.Lec15;
public class Nqueens {
public static void main(String[] args) {
boolean[][] board = new boolean[4][4];
nqueen(board, 0);
}
@ShubhamRwt
ShubhamRwt / bitmask
Created August 15, 2020 16:39
workshop
package com.company;
public class Main {
public static void main(String[] args) {
oddeven(6);
int[] ar ={1,1,5,2,2,6,6,7};
System.out.println(uniqueno(ar));
@ShubhamRwt
ShubhamRwt / .java
Created February 18, 2021 08:57
Code
protected static JsonObject parseLoadStats(JsonArray brokerLoadBeforeArray, JsonArray brokerLoadAfterArray) {
// There is no guarantee that the brokers are in the same order in both the before and after arrays.
// Therefore we need to convert them into maps indexed by broker ID so we can align them later for the comparison.
Map<Integer, Map<String, Object>> loadBeforeMap = extractLoadParameters(brokerLoadBeforeArray);
Map<Integer, Map<String, Object>> loadAfterMap = extractLoadParameters(brokerLoadAfterArray);
if (loadBeforeMap.size() != loadAfterMap.size()) {
throw new IllegalArgumentException("Broker data was missing from the load before/after information");
}
@ShubhamRwt
ShubhamRwt / metrics
Created July 22, 2021 06:41
Metrics
# HELP jvm_gc_pause_seconds Time spent in GC pause
# TYPE jvm_gc_pause_seconds summary
jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 2.0
jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.244
jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 16.0
jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 0.157
# HELP jvm_gc_pause_seconds_max Time spent in GC pause
# TYPE jvm_gc_pause_seconds_max gauge
jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0
jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.0
# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool
# TYPE jvm_buffer_count_buffers gauge
jvm_buffer_count_buffers{id="mapped",} 0.0
jvm_buffer_count_buffers{id="direct",} 22.0
# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time
# TYPE system_load_average_1m gauge
system_load_average_1m 0.93
# HELP vertx_http_server_active_requests Number of requests being processed
# TYPE vertx_http_server_active_requests gauge
vertx_http_server_active_requests{method="GET",} 2.0
@Test
void sendSimpleMessagesPartition(VertxTestContext context) throws InterruptedException {
String topic = "sendSimpleMessageToPartition";
KAFKA_FACADE.createTopic(topic, 2, 1);
ProtonClient client = ProtonClient.create(vertx);
Checkpoint consume = context.checkpoint();
client.connect(AmqpBridgeIT.BRIDGE_HOST, AmqpBridgeIT.BRIDGE_PORT, ar -> {
@Test
void sendSimpleMessages(VertxTestContext context) throws InterruptedException {
String topic = "sendSimpleMessage";
KAFKA_FACADE.createTopic(topic, 1, 1);
ProtonClient client = ProtonClient.create(vertx);
Checkpoint consume = context.checkpoint();
client.connect(AmqpBridgeIT.BRIDGE_HOST, AmqpBridgeIT.BRIDGE_PORT, ar -> {
@SuppressWarnings({"checkstyle:MethodLength", "checkstyle:JavaNCSS"})
public static KafkaCluster fromCrd(Reconciliation reconciliation, Kafka kafkaAssembly, KafkaVersion.Lookup versions, Storage oldStorage, int oldReplicas) {
KafkaCluster result = new KafkaCluster(reconciliation, kafkaAssembly);
result.setOwnerReference(kafkaAssembly);
KafkaSpec kafkaSpec = kafkaAssembly.getSpec();
KafkaClusterSpec kafkaClusterSpec = kafkaSpec.getKafka();
result.setReplicas(kafkaClusterSpec.getReplicas());
public <K, V> void produce(String producerName, int messageCount, Serializer<K> keySerializer, Serializer<V> valueSerializer, Runnable completionCallback, Supplier<ProducerRecord<K, V>> messageSupplier) {
Properties props = new Properties();
props.setProperty("bootstrap.servers", kafkaCluster.bootstrapServers());
props.setProperty("acks", Integer.toString(1));
Thread t = new Thread(() -> {
LOGGER.info("Starting producer {} to write {} messages", producerName, messageCount);
try {
KafkaProducer<K, V> producer = new KafkaProducer<>(props, keySerializer, valueSerializer);
Throwable cause = null;