Skip to content

Instantly share code, notes, and snippets.

@bg5sbk
Last active November 22, 2023 18:31
Show Gist options
  • Star 8 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save bg5sbk/9342414 to your computer and use it in GitHub Desktop.
Save bg5sbk/9342414 to your computer and use it in GitHub Desktop.
tcp benchmark for echo servers
package main
import (
"flag"
"fmt"
"log"
"net"
"sync"
"sync/atomic"
"time"
)
var (
targetAddr = flag.String("a", "127.0.0.1:12345", "target echo server address")
testMsgLen = flag.Int("l", 26, "test message length")
testConnNum = flag.Int("c", 50, "test connection number")
testSeconds = flag.Int("t", 10, "test duration in seconds")
)
func main() {
flag.Parse()
var (
outNum uint64
inNum uint64
stop uint64
)
msg := make([]byte, *testMsgLen)
go func() {
time.Sleep(time.Second * time.Duration(*testSeconds))
atomic.StoreUint64(&stop, 1)
}()
wg := new(sync.WaitGroup)
for i := 0; i < *testConnNum; i++ {
wg.Add(1)
go func() {
if conn, err := net.DialTimeout("tcp", *targetAddr, time.Minute*99999); err == nil {
recv := make([]byte, len(msg))
for {
if _, err := conn.Write(msg); err != nil {
log.Println(err)
break
}
atomic.AddUint64(&outNum, 1)
if atomic.LoadUint64(&stop) == 1 {
break
}
if _, err := conn.Read(recv); err != nil {
log.Println(err)
break
}
atomic.AddUint64(&inNum, 1)
if atomic.LoadUint64(&stop) == 1 {
break
}
}
} else {
log.Println(err)
}
wg.Done()
}()
}
wg.Wait()
fmt.Println("Benchmarking:", *targetAddr)
fmt.Println(*testConnNum, "clients, running", *testMsgLen, "bytes,", *testSeconds, "sec.")
fmt.Println()
fmt.Println("Speed:", outNum/uint64(*testSeconds), "request/sec,", inNum/uint64(*testSeconds), "response/sec")
fmt.Println("Requests:", outNum)
fmt.Println("Responses:", inNum)
}
var net = require('net');
var cluster = require('cluster');
var numCPUs = require('os').cpus().length;
if (cluster.isMaster) {
for (var i = 0; i < numCPUs ; i++) {
cluster.fork();
}
cluster.on('exit', function (worker, code, signal) {
console.log('worker ' + worker.process.pid + ' died');
cluster.fork();
});
} else {
console.log("worker");
var server = net.createServer(function (socket) {
socket.pipe(socket);
});
server.listen(12345, '0.0.0.0', 10000);
}
package main
import (
"flag"
"net"
)
var testMsgLen = flag.Int("l", 26, "test message length")
func main() {
flag.Parse()
if lsn, err := net.Listen("tcp", "0.0.0.0:12345"); err == nil {
for {
conn, err := lsn.Accept()
if err != nil {
break
}
sendChan := make(chan []byte, 1000)
go func() {
defer func() {
conn.Close()
close(sendChan)
}()
msg := make([]byte, *testMsgLen)
L:
for {
if _, err := conn.Read(msg); err == nil {
select {
case sendChan <- msg:
default:
break L
}
} else {
break L
}
}
}()
go func() {
defer conn.Close()
for {
msg, ok := <-sendChan
if !ok {
break
}
if _, err := conn.Write(msg); err != nil {
break
}
}
}()
}
}
}
var net = require('net');
var server = net.createServer(function (socket) {
socket.pipe(socket);
});
server.listen(12345, '0.0.0.0', 100000);
package main
import "net"
func main() {
if lsn, err := net.Listen("tcp", "0.0.0.0:12345"); err == nil {
for {
conn, err := lsn.Accept()
if err != nil {
break
}
go func() {
conn.(*net.TCPConn).ReadFrom(conn)
}()
}
}
}

tcp_bench.go

$ go run tcp_bench.go --help
Usage of tcp_bench:
  -a="127.0.0.1:12345": target echo server address
  -c=50: test connection number
  -l=26: test message length
  -t=10: test duration in seconds

tcp_echo_normal.go

go run echo_normal.go -l=1024
$ go run tcp_bench.go -c=10 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
10 clients, running 26 bytes, 60 sec.

Speed: 26877 request/sec, 26877 response/sec
Requests: 1612629
Responses: 1612629
$ go run tcp_bench.go -c=20 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
20 clients, running 26 bytes, 60 sec.

Speed: 18228 request/sec, 18228 response/sec
Requests: 1093684
Responses: 1093684
$ go run tcp_bench.go -c=40 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
40 clients, running 26 bytes, 60 sec.

Speed: 25877 request/sec, 25877 response/sec
Requests: 1552643
Responses: 1552643
$ go run tcp_bench.go -c=100 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
100 clients, running 26 bytes, 60 sec.

Speed: 55433 request/sec, 55433 response/sec
Requests: 3326031
Responses: 3326031
$ go run tcp_bench.go -c=500 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
500 clients, running 26 bytes, 60 sec.

Speed: 57955 request/sec, 57955 response/sec
Requests: 3477353
Responses: 3477353
$ go run tcp_bench.go -c=1000 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
1000 clients, running 26 bytes, 60 sec.

Speed: 56076 request/sec, 56076 response/sec
Requests: 3364574
Responses: 3364574

tcp_echo_normal.go (GOMAXPROCS=8)

GOMAXPROCS=8 go run echo_normal.go
$ GOMAXPROCS=8 go run tcp_bench.go -c=10 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
10 clients, running 26 bytes, 60 sec.

Speed: 25214 request/sec, 25214 response/sec
Requests: 1512856
Responses: 1512856
$ GOMAXPROCS=8 go run tcp_bench.go -c=20 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
20 clients, running 26 bytes, 60 sec.

Speed: 28401 request/sec, 28401 response/sec
Requests: 1704098
Responses: 1704096
$ GOMAXPROCS=8 go run tcp_bench.go -c=40 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
40 clients, running 26 bytes, 60 sec.

Speed: 60497 request/sec, 60497 response/sec
Requests: 3629876
Responses: 3629876
$ GOMAXPROCS=8 go run tcp_bench.go -c=100 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
100 clients, running 26 bytes, 60 sec.

Speed: 60657 request/sec, 60657 response/sec
Requests: 3639421
Responses: 3639421
$ GOMAXPROCS=8 go run tcp_bench.go -c=400 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
400 clients, running 26 bytes, 60 sec.

Speed: 60674 request/sec, 60674 response/sec
Requests: 3640498
Responses: 3640498
$ GOMAXPROCS=8 go run tcp_bench.go -c=1000 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
1000 clients, running 26 bytes, 60 sec.

Speed: 60537 request/sec, 60537 response/sec
Requests: 3632244
Responses: 3632244

tcp_echo_normal.js

$ go run tcp_bench.go -c=10 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
10 clients, running 26 bytes, 60 sec.

Speed: 9242 request/sec, 9242 response/sec
Requests: 554549
Responses: 554549
$ go run tcp_bench.go -c=20 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
20 clients, running 26 bytes, 60 sec.

Speed: 15264 request/sec, 15264 response/sec
Requests: 915883
Responses: 915883
$ go run tcp_bench.go -c=40 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
40 clients, running 26 bytes, 60 sec.

Speed: 25285 request/sec, 25285 response/sec
Requests: 1517118
Responses: 1517117
$ go run tcp_bench.go -c=100 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
100 clients, running 26 bytes, 60 sec.

Speed: 26515 request/sec, 26515 response/sec
Requests: 1590943
Responses: 1590943
$ go run tcp_bench.go -c=400 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
400 clients, running 26 bytes, 60 sec.

Speed: 26453 request/sec, 26453 response/sec
Requests: 1587205
Responses: 1587205
$ go run tcp_bench.go -c=1000 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
1000 clients, running 26 bytes, 60 sec.

Speed: 24021 request/sec, 24021 response/sec
Requests: 1441292
Responses: 1441292

tcp_echo_cluster.js

$ GOMAXPROCS=8 go run tcp_bench.go -c=10 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
10 clients, running 26 bytes, 60 sec.

Speed: 19301 request/sec, 19301 response/sec
Requests: 1158096
Responses: 1158096
$ GOMAXPROCS=8 go run tcp_bench.go -c=20 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
20 clients, running 26 bytes, 60 sec.

Speed: 31164 request/sec, 31164 response/sec
Requests: 1869854
Responses: 1869854
$ GOMAXPROCS=8 go run tcp_bench.go -c=40 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
40 clients, running 26 bytes, 60 sec.

Speed: 41640 request/sec, 41640 response/sec
Requests: 2498410
Responses: 2498409
$ GOMAXPROCS=8 go run tcp_bench.go -c=100 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
100 clients, running 26 bytes, 60 sec.

Speed: 60471 request/sec, 60471 response/sec
Requests: 3628299
Responses: 3628299
$ GOMAXPROCS=8 go run tcp_bench.go -c=400 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
400 clients, running 26 bytes, 60 sec.

Speed: 60669 request/sec, 60669 response/sec
Requests: 3640189
Responses: 3640189
$ GOMAXPROCS=8 go run tcp_bench.go -c=1000 -t=60 -a=192.168.1.121:12345
Benchmarking: 192.168.1.121:12345
1000 clients, running 26 bytes, 60 sec.

Speed: 60674 request/sec, 60674 response/sec
Requests: 3640448
Responses: 3640448
@billgo
Copy link

billgo commented Mar 4, 2014

给你32个赞

@daemonchen
Copy link

学习了

@dpc
Copy link

dpc commented May 24, 2015

It seems to me that this benchmark is broken. Please see: https://gist.github.com/dpc/8cacd3b6fa5273ffdcce

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment