Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
simple bench net/rpc
package main
import (
"bufio"
"encoding/binary"
"flag"
"fmt"
"io"
"log"
"net"
"net/rpc"
"sync"
"time"
)
var (
Num int
Conn int
Addr string
)
var RS = 1000
func init() {
flag.IntVar(&Num, "num", 1, "")
flag.IntVar(&RS, "reqs", 1000, "")
flag.StringVar(&Addr, "addr", "127.0.0.1", "")
flag.Parse()
}
type DumpCodec struct {
con io.ReadWriteCloser
w *bufio.Writer
r *bufio.Reader
}
func (d *DumpCodec) WriteRequest(req *rpc.Request, body interface{}) (err error) {
head := [16]byte{}
b := body.([]byte)
binary.LittleEndian.PutUint64(head[:8], req.Seq)
binary.LittleEndian.PutUint32(head[8:], uint32(len(req.ServiceMethod)))
binary.LittleEndian.PutUint32(head[12:], uint32(len(b)))
if _, err = d.w.Write(head[:]); err != nil {
return err
}
if _, err = d.w.WriteString(req.ServiceMethod); err != nil {
return err
}
if _, err = d.w.Write(b); err != nil {
return err
}
return d.w.Flush()
}
func (d *DumpCodec) ReadResponseHeader(r *rpc.Response) (err error) {
head := [16]byte{}
if _, err = io.ReadFull(d.r, head[:]); err != nil {
return
}
r.Seq = binary.LittleEndian.Uint64(head[:8])
mlen := binary.LittleEndian.Uint32(head[8:])
elen := binary.LittleEndian.Uint32(head[12:])
b := make([]byte, mlen+elen)
if _, err = io.ReadFull(d.r, b); err != nil {
return
}
r.ServiceMethod = string(b[:mlen])
r.Error = string(b[mlen:])
return
}
func (d *DumpCodec) ReadResponseBody(body interface{}) (err error) {
b := body.(*[]byte)
head := [4]byte{}
if _, err = io.ReadFull(d.r, head[:]); err != nil {
return err
}
blen := int(binary.LittleEndian.Uint32(head[:]))
if len(*b) < blen {
*b = make([]byte, blen)
}
_, err = io.ReadFull(d.r, *b)
return
}
func (d *DumpCodec) Close() error {
return d.con.Close()
}
func main() {
conn, err := net.Dial("tcp", Addr+":8000")
if err != nil {
log.Fatalln(err)
}
codec := DumpCodec{conn, bufio.NewWriter(conn), bufio.NewReader(conn)}
client := rpc.NewClientWithCodec(&codec)
s1 := []byte("aaaaaaaaaaaaaaa")
var wg sync.WaitGroup
start := time.Now()
for i := 0; i < Num; i++ {
wg.Add(1)
go func() {
defer wg.Done()
var res []byte
for n := 0; n < RS; n++ {
if err := client.Call("EchoServer.Echo", s1, &res); err != nil {
log.Println(err)
}
}
}()
}
wg.Wait()
total := RS * Num
secs := time.Now().Sub(start).Seconds()
fmt.Printf("concurrency: %d\n", Num)
fmt.Printf("total: %d\n", total)
fmt.Printf("seconds: %.3f\n", secs)
fmt.Printf("qps: %d\n", int(float64(total)/secs))
}
package main
import (
"bufio"
"encoding/binary"
"io"
"log"
"net"
"sync"
"time"
)
func handle(conn net.Conn) {
var head [16]byte
var methbody []byte
w := bufio.NewWriter(conn)
r := bufio.NewReader(conn)
var err error
m := sync.Mutex{}
lastWritten := time.Now()
locked := false
go func() {
for {
time.Sleep(2 * time.Millisecond)
m.Lock()
if time.Now().Sub(lastWritten) > 2*time.Millisecond {
if err := w.Flush(); err != nil {
m.Unlock()
return
}
}
m.Unlock()
}
}()
for {
if _, err = io.ReadFull(r, head[:]); err != nil {
break
}
mlen := binary.LittleEndian.Uint32(head[8:])
blen := binary.LittleEndian.Uint32(head[12:])
if len(methbody) < int(mlen+blen) {
methbody = make([]byte, mlen+blen)
}
if _, err = io.ReadFull(r, methbody[:mlen+blen]); err != nil {
break
}
// no errors
m.Lock()
locked = true
binary.LittleEndian.PutUint32(head[12:], 0)
if _, err = w.Write(head[:]); err != nil {
break
}
if _, err = w.Write(methbody[:mlen]); err != nil {
break
}
binary.LittleEndian.PutUint32(head[12:], blen)
if _, err = w.Write(head[12:]); err != nil {
break
}
if _, err = w.Write(methbody[mlen : mlen+blen]); err != nil {
break
}
//if err = w.Flush(); err != nil {
//break
//}
lastWritten = time.Now()
m.Unlock()
locked = false
}
if locked {
m.Unlock()
}
log.Println(err)
return
}
func main() {
ln, err := net.Listen("tcp", "127.0.0.1:8000")
if err != nil {
log.Fatalln(err)
}
for {
if conn, err := ln.Accept(); err == nil {
go handle(conn)
} else {
log.Println(err)
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.