Skip to content

Instantly share code, notes, and snippets.

@yihuang
Last active August 3, 2020 11:01
  • Star 14 You must be signed in to star a gist
  • Fork 5 You must be signed in to fork a gist
Star You must be signed in to star a gist
Save yihuang/eb0a670c9fab188c6e3e to your computer and use it in GitHub Desktop.
benchmark gevent vs asyncio on python3.4
import asyncio
import hiredis
d = {}
def process(req):
cmd = req[0].lower()
if cmd==b'set':
d[req[1]] = req[2]
return b"+OK\r\n"
elif cmd==b'get':
v = d.get(req[1])
if v is None:
return b'$-1\r\n'
else:
return b'$1\r\n1\r\n'
else:
print(cmd)
raise NotImplemented()
class RedisServer(asyncio.Protocol):
def __init__(self):
self.reader = hiredis.Reader()
def connection_made(self, transport):
self.transport = transport
def data_received(self, data):
transport = self.transport
reader = self.reader
reader.feed(data)
req = reader.gets()
while req != False:
transport.write(process(req))
req = reader.gets()
loop = asyncio.get_event_loop()
coro = loop.create_server(RedisServer, '127.0.0.1', 3001)
server = loop.run_until_complete(coro)
print('serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
print("exit")
finally:
server.close()
loop.close()
import asyncio
import hiredis
d = {}
def process(req):
cmd = req[0].lower()
if cmd==b'set':
d[req[1]] = req[2]
return b"+OK\r\n"
elif cmd==b'get':
v = d.get(req[1])
if v is None:
return b'$-1\r\n'
else:
return b'$1\r\n1\r\n'
else:
print(cmd)
raise NotImplemented()
@asyncio.coroutine
def echo_server(reader, writer):
hireader = hiredis.Reader()
while True:
s = yield from reader.read(4096)
if not s:
break
hireader.feed(s)
req = hireader.gets()
while req != False:
writer.write(process(req))
#yield from writer.drain()
req = hireader.gets()
loop = asyncio.get_event_loop()
server = loop.run_until_complete(asyncio.start_server(echo_server, port=3002))
print('serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
print('exit')
else:
server.close()
loop.close()
# run on python3.4, with python3.4 branch of `https://github.com/johbo/gevent.git`_ .
import gevent
import gevent.monkey
import hiredis
gevent.monkey.patch_all()
d = {}
def process(req):
# only support get/set
cmd = req[0].lower()
if cmd==b'set':
d[req[1]] = req[2]
return b"+OK\r\n"
elif cmd==b'get':
v = d.get(req[1])
if v is None:
return b'$-1\r\n'
else:
return b'$1\r\n1\r\n'
#return '$%d\r\n%s\r\n' % (len(v), v)
else:
print(cmd)
raise NotImplemented()
def handle(sock, addr):
reader = hiredis.Reader()
while True:
buf = sock.recv(4096)
if not buf:
return
reader.feed(buf)
req = reader.gets()
while req != False:
sock.sendall(process(req))
req = reader.gets()
from gevent.server import StreamServer
print('serving on 0.0.0.0:3000')
StreamServer(('0.0.0.0', 3000), handle).serve_forever()
(py3) λ ~/src/benchmark/ python redis_gevent.py&
[2] 19102
(py3) λ ~/src/benchmark/ serving on ('0.0.0.0', 3000)
(py3) λ ~/src/benchmark/ python redis_asyncio.py&
[2] 19103
(py3) λ ~/src/benchmark/ serving on ('0.0.0.0', 3001)
(py3) λ ~/src/benchmark/ python redis_asyncio_coroutine.py&
[2] 19104
(py3) λ ~/src/benchmark/ serving on ('0.0.0.0', 3002)
λ ~/src/benchmark/ redis-benchmark -p 3000 -t get -n 100000 -r 100000000
====== GET ======
100000 requests completed in 3.43 seconds
50 parallel clients
3 bytes payload
keep alive: 1
0.28% <= 1 milliseconds
98.51% <= 2 milliseconds
99.25% <= 3 milliseconds
99.89% <= 4 milliseconds
99.98% <= 5 milliseconds
99.99% <= 6 milliseconds
100.00% <= 6 milliseconds
29188.56 requests per second
λ ~/src/benchmark/ redis-benchmark -p 3001 -t get -n 100000 -r 100000000
====== GET ======
100000 requests completed in 3.12 seconds
50 parallel clients
3 bytes payload
keep alive: 1
0.21% <= 1 milliseconds
98.07% <= 2 milliseconds
99.82% <= 3 milliseconds
99.91% <= 4 milliseconds
99.95% <= 5 milliseconds
99.97% <= 6 milliseconds
99.97% <= 9 milliseconds
99.98% <= 10 milliseconds
99.99% <= 11 milliseconds
99.99% <= 12 milliseconds
99.99% <= 13 milliseconds
99.99% <= 14 milliseconds
99.99% <= 16 milliseconds
99.99% <= 17 milliseconds
99.99% <= 18 milliseconds
99.99% <= 19 milliseconds
99.99% <= 21 milliseconds
100.00% <= 22 milliseconds
100.00% <= 24 milliseconds
100.00% <= 25 milliseconds
100.00% <= 27 milliseconds
100.00% <= 28 milliseconds
100.00% <= 30 milliseconds
32071.84 requests per second
(py3) λ ~/src/benchmark/
(py3) λ ~/src/benchmark/ redis-benchmark -p 3002 -t set -n 100000 -r 100000000 <<<
====== SET ======
100000 requests completed in 4.55 seconds
50 parallel clients
3 bytes payload
keep alive: 1
0.06% <= 1 milliseconds
0.60% <= 2 milliseconds
96.20% <= 3 milliseconds
99.69% <= 4 milliseconds
99.92% <= 5 milliseconds
99.96% <= 6 milliseconds
99.96% <= 7 milliseconds
99.96% <= 8 milliseconds
99.97% <= 9 milliseconds
99.97% <= 10 milliseconds
99.97% <= 11 milliseconds
99.97% <= 12 milliseconds
99.97% <= 13 milliseconds
99.97% <= 14 milliseconds
99.97% <= 15 milliseconds
99.97% <= 16 milliseconds
99.97% <= 17 milliseconds
99.97% <= 18 milliseconds
99.98% <= 19 milliseconds
99.98% <= 20 milliseconds
99.98% <= 21 milliseconds
99.98% <= 22 milliseconds
99.98% <= 23 milliseconds
99.98% <= 25 milliseconds
99.98% <= 26 milliseconds
99.98% <= 28 milliseconds
99.98% <= 29 milliseconds
99.99% <= 31 milliseconds
99.99% <= 32 milliseconds
99.99% <= 33 milliseconds
99.99% <= 35 milliseconds
99.99% <= 36 milliseconds
99.99% <= 37 milliseconds
99.99% <= 39 milliseconds
99.99% <= 40 milliseconds
99.99% <= 42 milliseconds
99.99% <= 44 milliseconds
100.00% <= 45 milliseconds
100.00% <= 47 milliseconds
100.00% <= 49 milliseconds
100.00% <= 50 milliseconds
100.00% <= 52 milliseconds
100.00% <= 54 milliseconds
21968.37 requests per second
λ ~/src/benchmark/ redis-benchmark -p 3001 -t get -n 100000 -r 100000000 -P 100
====== GET ======
100000 requests completed in 0.83 seconds
50 parallel clients
3 bytes payload
keep alive: 1
30.92% <= 1 milliseconds
60.63% <= 2 milliseconds
76.59% <= 3 milliseconds
84.80% <= 4 milliseconds
89.70% <= 5 milliseconds
92.20% <= 6 milliseconds
93.60% <= 7 milliseconds
94.40% <= 8 milliseconds
94.90% <= 9 milliseconds
95.10% <= 11 milliseconds
95.30% <= 12 milliseconds
95.40% <= 13 milliseconds
95.60% <= 14 milliseconds
95.80% <= 15 milliseconds
95.90% <= 16 milliseconds
96.00% <= 17 milliseconds
96.20% <= 18 milliseconds
96.30% <= 23 milliseconds
96.40% <= 24 milliseconds
96.50% <= 25 milliseconds
96.60% <= 26 milliseconds
96.70% <= 27 milliseconds
96.80% <= 28 milliseconds
96.90% <= 29 milliseconds
97.00% <= 30 milliseconds
97.10% <= 31 milliseconds
97.30% <= 33 milliseconds
97.40% <= 34 milliseconds
97.50% <= 35 milliseconds
97.60% <= 36 milliseconds
97.70% <= 37 milliseconds
97.80% <= 38 milliseconds
97.90% <= 39 milliseconds
98.00% <= 40 milliseconds
98.10% <= 41 milliseconds
98.20% <= 42 milliseconds
98.30% <= 43 milliseconds
98.40% <= 45 milliseconds
98.50% <= 46 milliseconds
98.70% <= 47 milliseconds
98.80% <= 49 milliseconds
99.00% <= 50 milliseconds
99.10% <= 51 milliseconds
99.20% <= 53 milliseconds
99.30% <= 54 milliseconds
99.50% <= 55 milliseconds
99.60% <= 56 milliseconds
99.70% <= 58 milliseconds
99.90% <= 64 milliseconds
100.00% <= 64 milliseconds
120627.27 requests per second
λ ~/src/benchmark/ redis-benchmark -p 3001 -t get -n 100000 -r 100000000 <<<
====== GET ======
100000 requests completed in 1.75 seconds
50 parallel clients
3 bytes payload
keep alive: 1
97.42% <= 1 milliseconds
99.96% <= 2 milliseconds
99.97% <= 3 milliseconds
99.97% <= 4 milliseconds
99.97% <= 5 milliseconds
99.97% <= 6 milliseconds
99.97% <= 7 milliseconds
99.97% <= 8 milliseconds
99.97% <= 9 milliseconds
99.97% <= 10 milliseconds
99.98% <= 12 milliseconds
99.98% <= 13 milliseconds
99.98% <= 14 milliseconds
99.98% <= 15 milliseconds
99.98% <= 16 milliseconds
99.98% <= 17 milliseconds
99.98% <= 18 milliseconds
99.99% <= 19 milliseconds
99.99% <= 20 milliseconds
99.99% <= 21 milliseconds
99.99% <= 22 milliseconds
99.99% <= 23 milliseconds
99.99% <= 24 milliseconds
99.99% <= 25 milliseconds
100.00% <= 26 milliseconds
100.00% <= 27 milliseconds
100.00% <= 28 milliseconds
100.00% <= 29 milliseconds
100.00% <= 30 milliseconds
100.00% <= 31 milliseconds
57110.22 requests per second
redis-server 2.8.8
λ ~/src/benchmark/ redis-benchmark -p 10000 -t set -n 100000 -r 100000000
====== SET ======
100000 requests completed in 0.78 seconds
50 parallel clients
3 bytes payload
keep alive: 1
99.95% <= 1 milliseconds
99.99% <= 2 milliseconds
100.00% <= 3 milliseconds
100.00% <= 3 milliseconds
127877.23 requests per second
@geekan
Copy link

geekan commented May 29, 2015

Is 3 bytes payload enough for test?

@iandyh
Copy link

iandyh commented Oct 21, 2016

Hi! Thanks for the gist. It's very helpful.

redis-server 2.8.8
λ ~/src/benchmark/ redis-benchmark -p 10000 -t set -n 100000 -r 100000000

However, is this a typo? What port 1000 is running?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment