-
-
Save thanethomson/6005917d2f2b5260895459099e80175f to your computer and use it in GitHub Desktop.
# A 2-validator test network with a single seed node. | |
id: testnet_02validators | |
templates: | |
base_tendermint_node: &base_tendermint_node | |
binary: v0.32.1 | |
# We use the same config for validators and the seed because of | |
# https://github.com/tendermint/tendermint/issues/3778 | |
config_template: ./validator-config.toml | |
base_load_test: &base_load_test | |
method: "tm-bench" | |
client_nodes: 1 | |
targets: | |
# We're only targeting a single node in us-east-1 (same region as the | |
# tm-bench instance) | |
- validators[0] | |
time: 300 | |
broadcast_tx_method: async | |
connections: 1 | |
size: 100 | |
monitoring: | |
signalfx: | |
enabled: no | |
influxdb: | |
enabled: yes | |
deploy: yes | |
password: ${INFLUXDB_PASSWORD} | |
node_groups: | |
- seeds: | |
<<: *base_tendermint_node | |
validators: no | |
service_state: started | |
regions: | |
- us_east_1: 1 | |
- validators: | |
<<: *base_tendermint_node | |
validators: yes | |
in_genesis: yes | |
use_seeds: | |
- seeds | |
regions: | |
- us_east_1: 1 | |
- eu_central_1: 1 | |
load_tests: | |
# 98 txs/sec, 208 txs/block (max 0 consensus rounds) | |
- 100_txs_per_sec: | |
<<: *base_load_test | |
rate: 100 | |
# 196 txs/sec, 423 txs/block (max 0 consensus rounds) | |
- 200_txs_per_sec: | |
<<: *base_load_test | |
rate: 200 | |
# 393 txs/sec, 836 txs/block (max 0 consensus rounds) | |
- 400_txs_per_sec: | |
<<: *base_load_test | |
rate: 400 | |
# 788 txs/sec, 1807 txs/block (max 1 consensus round) | |
- 800_txs_per_sec: | |
<<: *base_load_test | |
rate: 800 | |
# 1568 txs/sec, 4730 txs/block (max 1 consensus round) | |
- 1600_txs_per_sec: | |
<<: *base_load_test | |
rate: 1600 | |
- 3200_txs_per_sec: | |
<<: *base_load_test | |
rate: 3200 | |
- 6400_txs_per_sec: | |
<<: *base_load_test | |
rate: 6400 | |
- 12800_txs_per_sec: | |
<<: *base_load_test | |
rate: 12800 | |
- 25600_txs_per_sec: | |
<<: *base_load_test | |
rate: 25600 |
# This is a TOML config file. | |
# For more information, see https://github.com/toml-lang/toml | |
##### main base config options ##### | |
# TCP or UNIX socket address of the ABCI application, | |
# or the name of an ABCI application compiled in with the Tendermint binary | |
proxy_app = "kvstore" | |
# A custom human readable name for this node | |
moniker = "" | |
# If this node is many blocks behind the tip of the chain, FastSync | |
# allows them to catchup quickly by downloading blocks in parallel | |
# and verifying their commits | |
fast_sync = true | |
# Database backend: goleveldb | cleveldb | boltdb | |
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation) | |
# - pure go | |
# - stable | |
# * cleveldb (uses levigo wrapper) | |
# - fast | |
# - requires gcc | |
# - use cleveldb build tag (go build -tags cleveldb) | |
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt) | |
# - EXPERIMENTAL | |
# - may be faster is some use-cases (random reads - indexer) | |
# - use boltdb build tag (go build -tags boltdb) | |
db_backend = "goleveldb" | |
# Database directory | |
db_dir = "data" | |
# Output level for logging, including package level options | |
log_level = "main:info,state:info,*:error" | |
# Output format: 'plain' (colored text) or 'json' | |
log_format = "plain" | |
##### additional base config options ##### | |
# Path to the JSON file containing the initial validator set and other meta data | |
genesis_file = "config/genesis.json" | |
# Path to the JSON file containing the private key to use as a validator in the consensus protocol | |
priv_validator_key_file = "config/priv_validator_key.json" | |
# Path to the JSON file containing the last sign state of a validator | |
priv_validator_state_file = "data/priv_validator_state.json" | |
# TCP or UNIX socket address for Tendermint to listen on for | |
# connections from an external PrivValidator process | |
priv_validator_laddr = "" | |
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol | |
node_key_file = "config/node_key.json" | |
# Mechanism to connect to the ABCI application: socket | grpc | |
abci = "socket" | |
# TCP or UNIX socket address for the profiling server to listen on | |
prof_laddr = "" | |
# If true, query the ABCI app on connecting to a new peer | |
# so the app can decide if we should keep the connection or not | |
filter_peers = false | |
##### advanced configuration options ##### | |
##### rpc server configuration options ##### | |
[rpc] | |
# TCP or UNIX socket address for the RPC server to listen on | |
laddr = "tcp://0.0.0.0:26657" | |
# A list of origins a cross-domain request can be executed from | |
# Default value '[]' disables cors support | |
# Use '["*"]' to allow any origin | |
cors_allowed_origins = [] | |
# A list of methods the client is allowed to use with cross-domain requests | |
cors_allowed_methods = ["HEAD", "GET", "POST", ] | |
# A list of non simple headers the client is allowed to use with cross-domain requests | |
cors_allowed_headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time", ] | |
# TCP or UNIX socket address for the gRPC server to listen on | |
# NOTE: This server only supports /broadcast_tx_commit | |
grpc_laddr = "" | |
# Maximum number of simultaneous connections. | |
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections | |
# If you want to accept a larger number than the default, make sure | |
# you increase your OS limits. | |
# 0 - unlimited. | |
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} | |
# 1024 - 40 - 10 - 50 = 924 = ~900 | |
grpc_max_open_connections = 900 | |
# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool | |
unsafe = false | |
# Maximum number of simultaneous connections (including WebSocket). | |
# Does not include gRPC connections. See grpc_max_open_connections | |
# If you want to accept a larger number than the default, make sure | |
# you increase your OS limits. | |
# 0 - unlimited. | |
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} | |
# 1024 - 40 - 10 - 50 = 924 = ~900 | |
max_open_connections = 900 | |
# Maximum number of unique clientIDs that can /subscribe | |
# If you're using /broadcast_tx_commit, set to the estimated maximum number | |
# of broadcast_tx_commit calls per block. | |
max_subscription_clients = 100 | |
# Maximum number of unique queries a given client can /subscribe to | |
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to | |
# the estimated # maximum number of broadcast_tx_commit calls per block. | |
max_subscriptions_per_client = 5 | |
# How long to wait for a tx to be committed during /broadcast_tx_commit. | |
# WARNING: Using a value larger than 10s will result in increasing the | |
# global HTTP write timeout, which applies to all connections and endpoints. | |
# See https://github.com/tendermint/tendermint/issues/3435 | |
timeout_broadcast_tx_commit = "10s" | |
# The name of a file containing certificate that is used to create the HTTPS server. | |
# If the certificate is signed by a certificate authority, | |
# the certFile should be the concatenation of the server's certificate, any intermediates, | |
# and the CA's certificate. | |
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. | |
tls_cert_file = "" | |
# The name of a file containing matching private key that is used to create the HTTPS server. | |
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run. | |
tls_key_file = "" | |
##### peer to peer configuration options ##### | |
[p2p] | |
# Address to listen for incoming connections | |
laddr = "tcp://0.0.0.0:26656" | |
# Address to advertise to peers for them to dial | |
# If empty, will use the same port as the laddr, | |
# and will introspect on the listener or use UPnP | |
# to figure out the address. | |
external_address = "" | |
# Comma separated list of seed nodes to connect to | |
seeds = "" | |
# Comma separated list of nodes to keep persistent connections to | |
persistent_peers = "" | |
# UPNP port forwarding | |
upnp = false | |
# Path to address book | |
addr_book_file = "config/addrbook.json" | |
# Set true for strict address routability rules | |
# Set false for private or local networks | |
addr_book_strict = false | |
# Maximum number of inbound peers | |
max_num_inbound_peers = 40 | |
# Maximum number of outbound peers to connect to, excluding persistent peers | |
max_num_outbound_peers = 10 | |
# Time to wait before flushing messages out on the connection | |
flush_throttle_timeout = "100ms" | |
# Maximum size of a message packet payload, in bytes | |
max_packet_msg_payload_size = 1024 | |
# Rate at which packets can be sent, in bytes/second | |
send_rate = 5120000 | |
# Rate at which packets can be received, in bytes/second | |
recv_rate = 5120000 | |
# Set true to enable the peer-exchange reactor | |
pex = true | |
# Seed mode, in which node constantly crawls the network and looks for | |
# peers. If another node asks it for addresses, it responds and disconnects. | |
# | |
# Does not work if the peer-exchange reactor is disabled. | |
seed_mode = false | |
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) | |
private_peer_ids = "" | |
# Toggle to disable guard against peers connecting from the same ip. | |
allow_duplicate_ip = true | |
# Peer connection configuration. | |
handshake_timeout = "20s" | |
dial_timeout = "3s" | |
##### mempool configuration options ##### | |
[mempool] | |
recheck = true | |
broadcast = true | |
wal_dir = "" | |
# Maximum number of transactions in the mempool | |
size = 30000 | |
# Limit the total size of all txs in the mempool. | |
# This only accounts for raw transactions (e.g. given 1MB transactions and | |
# max_txs_bytes=5MB, mempool will only accept 5 transactions). | |
max_txs_bytes = 1073741824 | |
# Size of the cache (used to filter transactions we saw earlier) in transactions | |
cache_size = 10000 | |
##### consensus configuration options ##### | |
[consensus] | |
wal_file = "data/cs.wal/wal" | |
timeout_propose = "3s" | |
timeout_propose_delta = "500ms" | |
timeout_prevote = "1s" | |
timeout_prevote_delta = "500ms" | |
timeout_precommit = "1s" | |
timeout_precommit_delta = "500ms" | |
timeout_commit = "1s" | |
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) | |
skip_timeout_commit = false | |
# EmptyBlocks mode and possible interval between empty blocks | |
create_empty_blocks = true | |
create_empty_blocks_interval = "0s" | |
# Reactor sleep duration parameters | |
peer_gossip_sleep_duration = "100ms" | |
peer_query_maj23_sleep_duration = "2s" | |
##### transactions indexer configuration options ##### | |
[tx_index] | |
# What indexer to use for transactions | |
# | |
# Options: | |
# 1) "null" | |
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). | |
indexer = "kv" | |
# Comma-separated list of tags to index (by default the only tag is "tx.hash") | |
# | |
# You can also index transactions by height by adding "tx.height" tag here. | |
# | |
# It's recommended to index only a subset of tags due to possible memory | |
# bloat. This is, of course, depends on the indexer's DB and the volume of | |
# transactions. | |
index_tags = "" | |
# When set to true, tells indexer to index all tags (predefined tags: | |
# "tx.hash", "tx.height" and all tags from DeliverTx responses). | |
# | |
# Note this may be not desirable (see the comment above). IndexTags has a | |
# precedence over IndexAllTags (i.e. when given both, IndexTags will be | |
# indexed). | |
index_all_tags = false | |
##### instrumentation configuration options ##### | |
[instrumentation] | |
# When true, Prometheus metrics are served under /metrics on | |
# PrometheusListenAddr. | |
# Check out the documentation for the list of available metrics. | |
prometheus = true | |
# Address to listen for Prometheus collector(s) connections | |
prometheus_listen_addr = ":26660" | |
# Maximum number of simultaneous connections. | |
# If you want to accept a larger number than the default, make sure | |
# you increase your OS limits. | |
# 0 - unlimited. | |
max_open_connections = 5 | |
# Instrumentation namespace | |
namespace = "tendermint" |
Next, hitting the same network with 2,500 tx/sec for 5 mins. Hits 100% of mempool size.
tm-bench
completes successfully:
Stats Avg StdDev Max Total
Txs/sec 375 1877 12500 112497
Blocks/sec 0.040 0.196 1 12
Panic
Caught a panic at about 08:31 (time on the graph). To be expected, since we hit almost 100% memory utilization on the t3.small
instances I was using and the mempool size limit was 1GB.
...
Jul 30 12:24:19 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:19.380] Executed block module=state height=1067 validTxs=10000 invalidTxs=0
Jul 30 12:24:19 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:19.520] Committed state module=state height=1067 txs=10000 appHash=A0F8890100000000
Jul 30 12:24:22 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:22.624] Executed block module=state height=1068 validTxs=8080 invalidTxs=0
Jul 30 12:24:22 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:22.728] Committed state module=state height=1068 txs=8080 appHash=C0F68A0100000000
Jul 30 12:24:28 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:28.104] Executed block module=state height=1069 validTxs=11051 invalidTxs=0
Jul 30 12:24:28 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:28.300] Committed state module=state height=1069 txs=11051 appHash=96A38C0100000000
Jul 30 12:24:30 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:30.807] Executed block module=state height=1070 validTxs=10869 invalidTxs=0
Jul 30 12:24:30 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:30.900] Committed state module=state height=1070 txs=10869 appHash=80CD8D0100000000
Jul 30 12:24:35 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:35.615] Executed block module=state height=1071 validTxs=10000 invalidTxs=0
Jul 30 12:24:35 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:35.699] Committed state module=state height=1071 txs=10000 appHash=A0E98E0100000000
Jul 30 12:24:38 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:38.825] Executed block module=state height=1072 validTxs=10000 invalidTxs=0
Jul 30 12:24:39 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:39.525] Committed state module=state height=1072 txs=10000 appHash=C085900100000000
Jul 30 12:24:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:24:45.125] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:24:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:24:45.130] Couldn't connect to any seeds module=p2p
Jul 30 12:24:46 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:46.535] Executed block module=state height=1073 validTxs=12500 invalidTxs=0
Jul 30 12:24:46 ip-172-31-83-228 tendermint: I[2019-07-30|12:24:46.681] Committed state module=state height=1073 txs=12500 appHash=E8C8910100000000
Jul 30 12:25:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:25:15.049] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:25:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:25:15.049] Couldn't connect to any seeds module=p2p
Jul 30 12:25:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:25:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:25:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:25:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:26:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:26:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:26:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:26:15.048] Couldn't connect to any seeds module=p2p
Jul 30 12:26:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:26:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:26:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:26:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:27:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:27:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:27:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:27:15.048] Couldn't connect to any seeds module=p2p
Jul 30 12:27:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:27:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:27:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:27:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:28:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:28:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:28:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:28:15.049] Couldn't connect to any seeds module=p2p
Jul 30 12:28:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:28:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:28:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:28:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:28:57 ip-172-31-83-228 tendermint: E[2019-07-30|12:28:57.186] Error closing connection module=rpc-server protocol=websocket remote=172.31.92.35:57030 err="close tcp 172.31.83.228:26657->172.31.92.35:57030: use of closed network connection"
Jul 30 12:29:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:29:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:29:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:29:15.048] Couldn't connect to any seeds module=p2p
Jul 30 12:29:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:29:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:29:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:29:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:30:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:30:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:30:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:30:15.048] Couldn't connect to any seeds module=p2p
Jul 30 12:30:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:30:45.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:30:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:30:45.048] Couldn't connect to any seeds module=p2p
Jul 30 12:31:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:31:15.048] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:31:15 ip-172-31-83-228 tendermint: E[2019-07-30|12:31:15.048] Couldn't connect to any seeds module=p2p
Jul 30 12:31:43 ip-172-31-83-228 tendermint: I[2019-07-30|12:31:43.764] Executed block module=state height=1074 validTxs=20749 invalidTxs=0
Jul 30 12:31:44 ip-172-31-83-228 tendermint: I[2019-07-30|12:31:44.082] Committed state module=state height=1074 txs=20749 appHash=828D940100000000
Jul 30 12:31:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:31:45.056] Error dialing seed module=p2p err="connection with 69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656 has been established or dialed" seed=69cbd6a31dfb03c8e35c1ecca104f942308e2918@172.31.94.189:26656
Jul 30 12:31:45 ip-172-31-83-228 tendermint: E[2019-07-30|12:31:45.056] Couldn't connect to any seeds module=p2p
Jul 30 12:31:45 ip-172-31-83-228 tendermint: fatal error: runtime: out of memory
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime stack:
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.throw(0xee6942, 0x16)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/panic.go:617 +0x72
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.sysMap(0xc068000000, 0x4000000, 0x18c21f8)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mem_linux.go:170 +0xc7
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).sysAlloc(0x18a9940, 0x1ee000, 0x18a9950, 0xf7)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/malloc.go:633 +0x1cd
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).grow(0x18a9940, 0xf7, 0x0)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mheap.go:1222 +0x42
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).allocSpanLocked(0x18a9940, 0xf7, 0x18c2208, 0x20300000000000)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mheap.go:1150 +0x37f
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).alloc_m(0x18a9940, 0xf7, 0x101, 0x7fb614a66fff)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mheap.go:977 +0xc2
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).alloc.func1()
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mheap.go:1048 +0x4c
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.(*mheap).alloc(0x18a9940, 0xf7, 0x7fb614010101, 0x7fb61188b5b0)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/mheap.go:1047 +0x8a
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.largeAlloc(0x1ec32d, 0x450101, 0x7fb61188b5b0)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/malloc.go:1055 +0x99
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.mallocgc.func1()
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/malloc.go:950 +0x46
Jul 30 12:31:45 ip-172-31-83-228 tendermint: runtime.systemstack(0x1)
Jul 30 12:31:45 ip-172-31-83-228 tendermint: /usr/local/go/src/runtime/asm_amd64.s:351 +0x66
...
After this, I reset the network (consensus height back to 0, regenerated config, etc.) and set the mempool size down to 10,000. It appears as though even at 3,200 tx/sec, a fresh network performs better than a network with more history. The actual tx commit rate ended up being just over 2,000 tx/sec.
tm-bench
completed successfully:
Stats Avg StdDev Max Total
Txs/sec 2013 3097 10000 603859
Blocks/sec 0.327 0.469 1 98
I then reset the network again, dropping the mempool size down to 5000, and attempting to send transactions at 3,200 tx/sec was successful, albeit at a slower commit rate. Again, this is a fresh blockchain.
tm-bench
output:
Stats Avg StdDev Max Total
Txs/sec 1505 1984 5000 451405
Blocks/sec 0.387 0.487 1 116
Then, without resetting the network, I attempted to do a bit of a longer test at the same transaction rate. tm-bench
failed on each of 4 attempts to do a long-running benchmark due to one or more of the nodes panicking.
All of the panics resemble:
...
Jul 30 14:50:07 ip-172-31-36-10 tendermint: fatal error: runtime: out of memory
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime stack:
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.throw(0xee6942, 0x16)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/panic.go:617 +0x72
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.sysMap(0xc068000000, 0x4000000, 0x18c21f8)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mem_linux.go:170 +0xc7
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).sysAlloc(0x18a9940, 0x134000, 0x18a9950, 0x9a)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/malloc.go:633 +0x1cd
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).grow(0x18a9940, 0x9a, 0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mheap.go:1222 +0x42
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).allocSpanLocked(0x18a9940, 0x9a, 0x18c2208, 0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mheap.go:1150 +0x37f
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).alloc_m(0x18a9940, 0x9a, 0x101, 0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mheap.go:977 +0xc2
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).alloc.func1()
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mheap.go:1048 +0x4c
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.(*mheap).alloc(0x18a9940, 0x9a, 0xc000010101, 0x426b80)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/mheap.go:1047 +0x8a
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.largeAlloc(0x133a0d, 0x450101, 0xc067df2000)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/malloc.go:1055 +0x99
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.mallocgc.func1()
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/malloc.go:950 +0x46
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.systemstack(0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/asm_amd64.s:351 +0x66
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.mstart()
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/proc.go:1153
Jul 30 14:50:07 ip-172-31-36-10 tendermint: goroutine 14 [running]:
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.systemstack_switch()
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/asm_amd64.s:311 fp=0xc01265b110 sp=0xc01265b108 pc=0x45a960
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.mallocgc(0x133a0d, 0xd703e0, 0x2001, 0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/malloc.go:949 +0x872 fp=0xc01265b1b0 sp=0xc01265b110 pc=0x40c392
Jul 30 14:50:07 ip-172-31-36-10 tendermint: runtime.makeslice(0xd703e0, 0x131a8c, 0x133a0d, 0x1633)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /usr/local/go/src/runtime/slice.go:49 +0x6c fp=0xc01265b1e0 sp=0xc01265b1b0 pc=0x443d7c
Jul 30 14:50:07 ip-172-31-36-10 tendermint: github.com/syndtr/goleveldb/leveldb/util.(*BufferPool).Get(0xc000120000, 0x131a8c, 0x0, 0x0, 0x0)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /go/pkg/mod/github.com/syndtr/goleveldb@v1.0.1-0.20190318030020-c3a204f8e965/leveldb/util/buffer_pool.go:152 +0x5c2 fp=0xc01265b260 sp=0xc01265b1e0 pc=0xb5bb52
Jul 30 14:50:07 ip-172-31-36-10 tendermint: github.com/syndtr/goleveldb/leveldb/table.(*Reader).readRawBlock(0xc051843e10, 0x126265, 0x121e9, 0xc00006a701, 0xc07750321d, 0x100, 0x126265, 0xc01265b401, 0x400001)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /go/pkg/mod/github.com/syndtr/goleveldb@v1.0.1-0.20190318030020-c3a204f8e965/leveldb/table/reader.go:587 +0x4ae fp=0xc01265b3e0 sp=0xc01265b260 pc=0xb6ecee
Jul 30 14:50:07 ip-172-31-36-10 tendermint: github.com/syndtr/goleveldb/leveldb/table.(*Reader).readBlock(0xc051843e10, 0x126265, 0x121e9, 0x1, 0x0, 0x440, 0xc000001e00)
Jul 30 14:50:07 ip-172-31-36-10 tendermint: /go/pkg/mod/github.com/syndtr/goleveldb@v1.0.1-0.20190318030020-c3a204f8e965/leveldb/table/reader.go:603 +0x55 fp=0xc01265b458 sp=0xc01265b3e0 pc=0xb6f685
Jul 30 14:50:07 ip-172-31-36-10 tendermint: github.com/syndtr/goleveldb/leveldb/table.(*Reader).readBlockCached(0xc051843e10, 0x126265, 0x121e9, 0xb60001, 0xc05acec6e0, 0xc01265b508, 0x42c84f, 0x8, 0xc0079eae00)
...
After running the 3200 txs/sec load test