-
-
Save codelipenghui/204c1f26c4d44a218ae235bf2de99904 to your computer and use it in GitHub Desktop.
The Kafka 5GB/s max throughput benchmark with OpenMessaging Benchmark
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
set -e | |
# This is a script to run all the cases that are needed for the Ursa project | |
mkdir -p driver-ursa | |
mkdir -p ursa-workloads | |
# Prepare the driver for the ursa low latency cases (small producer batch) | |
cat > driver-ursa/ursa.yaml <<EOF | |
name: Kafka | |
driverClass: io.openmessaging.benchmark.driver.kafka.KafkaBenchmarkDriver | |
# Kafka client-specific configuration | |
replicationFactor: 3 | |
topicConfig: | | |
min.insync.replicas=2 | |
commonConfig: | | |
bootstrap.servers=<ursa-endpoint> | |
sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username='user' password='token:<your-token>'; | |
security.protocol=SASL_PLAINTEXT | |
sasl.mechanism=PLAIN | |
session.timeout.ms=45000 | |
acks=1 | |
basic.auth.credentials.source=USER_INFO | |
client.id=zone_id={zone.id} | |
producerConfig: | | |
acks=1 | |
linger.ms=100 | |
buffer.memory=256000000 | |
max.in.flight.requests.per.connection=100000 | |
consumerConfig: | | |
auto.offset.reset=earliest | |
enable.auto.commit=false | |
max.partition.fetch.bytes=10485760 | |
EOF | |
# case 1: Maximum throughput 5GB/s - 100 topics - 8 partitions - Null key | |
cat > ursa-workloads/benchmark-kafka-nullkey-100topic-8p-16kb-5G.yaml <<EOF | |
name: Maximum throughput 5GB/s - 100 topics - 8 partitions - Null key | |
topics: 100 | |
partitionsPerTopic: 8 | |
keyDistributor: "NO_KEY" | |
messageSize: 65536 | |
payloadFile: "payload/payload-64Kb.data" | |
subscriptionsPerTopic: 1 | |
consumerPerSubscription: 2 | |
producersPerTopic: 2 | |
producerRate: 80000 | |
consumerBacklogSizeGB: 0 | |
testDurationMinutes: 60 | |
EOF | |
# case 2: Maximum throughput 5GB/s - 100 topics - 8 partitions - Random key | |
cat > ursa-workloads/benchmark-kafka-randomkey-100topic-8p-16kb-5G.yaml <<EOF | |
name: Maximum throughput 5GB/s - 100 topics - 8 partitions - Random Key | |
topics: 100 | |
partitionsPerTopic: 8 | |
keyDistributor: "RANDOM_KEY" | |
messageSize: 65536 | |
payloadFile: "payload/payload-64Kb.data" | |
subscriptionsPerTopic: 1 | |
consumerPerSubscription: 2 | |
producersPerTopic: 2 | |
producerRate: 80000 | |
consumerBacklogSizeGB: 0 | |
testDurationMinutes: 60 | |
EOF | |
# Define workloads as a space-separated string | |
WORKLOADS="ursa-workloads/benchmark-kafka-nullkey-100topic-8p-16kb-5G.yaml \ | |
ursa-workloads/benchmark-kafka-randomkey-100topic-8p-16kb-5G.yaml" | |
echo "Running the following workloads: $WORKLOADS" | |
WORKERS=”<OMB-workers>” | |
nohup bin/benchmark --drivers driver-ursa/ursa.yaml --workers $WORKERS $WORKLOADS > benchmark.log 2>&1 & |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment