Skip to content

Instantly share code, notes, and snippets.

@buriy
Last active April 19, 2016 15:46
Show Gist options
  • Save buriy/329bdac8f42f2c0c0e678bcbfe26ee0e to your computer and use it in GitHub Desktop.
Save buriy/329bdac8f42f2c0c0e678bcbfe26ee0e to your computer and use it in GitHub Desktop.
import time
import tarantool
import msgpack
server = tarantool.connect("localhost", 3301)
demo = server.space('example')
requests = 50000
def run_test(space, value):
avg_size = len(msgpack.packb((65536, value), use_bin_type=True))
start = time.time()
for i in xrange(requests):
if value is None:
space.ping()
else:
space.insert((65536+i, value))
delta = time.time()-start
qps = requests / (1e-9 + delta)
print "{} bytes: {:.1f} sec, {:.1f} RPS, {:.1f} MB/s".format(avg_size, delta, qps, avg_size*qps/1e6)
def cleanup(space):
for x in xrange(requests):
space.delete(65536+x)
cleanup(demo)
run_test(demo, 0)
cleanup(demo)
run_test(demo, 'a'*1000)
cleanup(demo)
run_test(demo, 'a'*10000) # ~ 0.5 GB of memory
cleanup(demo)
run_test(demo, 'a'*20000) # ~ 1 GB of memory
cleanup(demo)
run_test(demo, 'a'*50000) # ~ 2.5 GB of memory
cleanup(demo)
run_test(demo, 'a'*100000) # ~ 5 GB of memory
cleanup(demo)
run_test(demo, 'a'*200000) # ~ 10 GB of memory
cleanup(demo)
run_test(demo, 'a'*300000) # ~ 15 GB of memory
cleanup(demo)
import time
import gtarantool
import msgpack
server = gtarantool.connect("localhost", 3301)
demo = server.space('example')
requests = 50000
def run_test(space, value):
avg_size = len(msgpack.packb((65536, value), use_bin_type=True))
start = time.time()
for i in xrange(requests):
if value is None:
space.ping()
else:
space.insert((65536+i, value))
delta = time.time()-start
qps = requests / (1e-9 + delta)
print "{} bytes: {:.1f} sec, {:.1f} RPS, {:.1f} MB/s".format(avg_size, delta, qps, avg_size*qps/1e6)
def cleanup(space):
for x in xrange(requests):
space.delete(65536+x)
cleanup(demo)
run_test(demo, 0)
cleanup(demo)
run_test(demo, 'a'*1000)
cleanup(demo)
run_test(demo, 'a'*10000) # ~ 0.5 GB of memory
cleanup(demo)
run_test(demo, 'a'*20000) # ~ 1 GB of memory
cleanup(demo)
run_test(demo, 'a'*50000) # ~ 2.5 GB of memory
cleanup(demo)
run_test(demo, 'a'*100000) # ~ 5 GB of memory
cleanup(demo)
run_test(demo, 'a'*200000) # ~ 10 GB of memory
cleanup(demo)
run_test(demo, 'a'*300000) # ~ 15 GB of memory
cleanup(demo)
import time
import gtarantool
import msgpack
import gevent
import sys
threads = int(sys.argv[1])
server = gtarantool.connect("localhost", 3301)
demo = server.space('example')
REQUESTS = 50000
def insert_job(space, it, reg, n):
c = 0
try:
while True:
v = it.next()
space.insert(v)
c += 1
except StopIteration:
pass
reg[n] = c
def bulk_load(space, it):
reg = [0] * threads
jobs = [gevent.spawn(insert_job, space, it, reg, i)
for i in range(threads)]
gevent.joinall(jobs)
print min(reg), '-', max(reg)
def iterator(requests, value):
for x in xrange(requests):
yield (65536+x, value)
def run_test(space, value, async=True):
avg_size = len(msgpack.packb((65536, value), use_bin_type=True))
start = time.time()
if async:
bulk_load(space, iterator(REQUESTS, value))
else:
for i in iterator(REQUESTS, value):
space.insert((65536+i, value))
delta = time.time()-start
qps = REQUESTS / (1e-9 + delta)
print "{} bytes: {:.1f} sec, {:.1f} RPS, {:.1f} MB/s".format(avg_size, delta, qps, avg_size*qps/1e6)
def cleanup(space):
for x in xrange(REQUESTS):
space.delete(65536+x)
cleanup(demo)
run_test(demo, 0)
cleanup(demo)
run_test(demo, 'a'*100)
cleanup(demo)
run_test(demo, 'a'*1000)
cleanup(demo)
run_test(demo, 'a'*10000) # ~ 0.5 GB of memory
cleanup(demo)
run_test(demo, 'a'*20000) # ~ 1 GB of memory
cleanup(demo)
run_test(demo, 'a'*50000) # ~ 2.5 GB of memory
cleanup(demo)
import time
import redis
import msgpack
requests = 100000
r = redis.Redis()
def run_test(r, value):
avg_size = len(msgpack.packb((65536, value), use_bin_type=True))
start = time.time()
for i in xrange(0, requests+10000, 10000):
with r.pipeline() as p:
for t in xrange(i, min(i+10000, requests)):
p.hset('example', 65536+t, value)
p.execute()
delta = time.time()-start
#print r.hget('example', 5555)
assert r.hget('example', 65536+49555) == value
qps = requests / (1e-9 + delta)
print "{} bytes: {:.1f} sec, {:.1f} RPS, {:.1f} MB/s".format(avg_size, delta, qps, avg_size*qps/1e6)
def cleanup(r):
r.delete('example')
cleanup(r)
run_test(r, '0')
cleanup(r)
run_test(r, 'a'*1000)
cleanup(r)
run_test(r, 'a'*10000) # ~ 1 GB of memory
cleanup(r)
run_test(r, 'a'*20000) # ~ 2 GB of memory
cleanup(r)
run_test(r, 'a'*50000) # ~ 5 GB of memory
cleanup(r)
run_test(r, 'a'*100000) # ~ 10 GB of memory
cleanup(r)
Results:
$ python batch.py
7.0 bytes: 2.5 sec, 20263.7 RPS, 0.1 MB/s
1009.0 bytes: 2.4 sec, 20474.2 RPS, 20.7 MB/s
10009.0 bytes: 2.9 sec, 17380.0 RPS, 174.0 MB/s
20009.0 bytes: 3.4 sec, 14531.8 RPS, 290.8 MB/s
50009.0 bytes: 4.9 sec, 10230.7 RPS, 511.6 MB/s
100011.0 bytes: 7.1 sec, 7059.3 RPS, 706.0 MB/s
200011.0 bytes: 12.2 sec, 4110.2 RPS, 822.1 MB/s
300011.0 bytes: 31.4 sec, 1590.9 RPS, 477.3 MB/s
$ python gbatch.py
7 bytes: 3.8 sec, 13179.5 RPS, 0.1 MB/s
1009 bytes: 3.9 sec, 12768.2 RPS, 12.9 MB/s
10009 bytes: 4.5 sec, 11071.2 RPS, 110.8 MB/s
20009 bytes: 5.4 sec, 9243.2 RPS, 184.9 MB/s
50009 bytes: 7.7 sec, 6481.3 RPS, 324.1 MB/s
100011 bytes: 11.2 sec, 4466.4 RPS, 446.7 MB/s
200011 bytes: 17.2 sec, 2906.6 RPS, 581.4 MB/s
300011 bytes: 25.0 sec, 2000.9 RPS, 600.3 MB/s
$ python mbatch.py 100
500 - 500
7 bytes: 0.9 sec, 57176.5 RPS, 0.4 MB/s
500 - 500
108 bytes: 1.0 sec, 50389.2 RPS, 5.4 MB/s
500 - 500
1009 bytes: 1.3 sec, 38161.2 RPS, 38.5 MB/s
500 - 500
10009 bytes: 4.2 sec, 11806.5 RPS, 118.2 MB/s
500 - 500
20009 bytes: 7.3 sec, 6834.4 RPS, 136.7 MB/s
500 - 500
50009 bytes: 17.1 sec, 2922.4 RPS, 146.1 MB/s
500 - 500
100011 bytes: 39.0 sec, 1281.9 RPS, 128.2 MB/s
500 - 500
200011 bytes: 92.7 sec, 539.6 RPS, 107.9 MB/s
$ python mbatch.py 20
2500 - 2500
7 bytes: 1.1 sec, 44755.8 RPS, 0.3 MB/s
2500 - 2500
108 bytes: 1.3 sec, 38174.2 RPS, 4.1 MB/s
2500 - 2500
1009 bytes: 1.5 sec, 34186.0 RPS, 34.5 MB/s
2500 - 2500
10009 bytes: 2.5 sec, 20166.7 RPS, 201.8 MB/s
2500 - 2500
20009 bytes: 3.9 sec, 12848.2 RPS, 257.1 MB/s
2500 - 2500
50009 bytes: 6.8 sec, 7364.4 RPS, 368.3 MB/s
$ python mbatch.py 10
5000 - 5000
7 bytes: 1.4 sec, 35957.4 RPS, 0.3 MB/s
5000 - 5000
108 bytes: 1.4 sec, 35706.6 RPS, 3.9 MB/s
5000 - 5000
1009 bytes: 1.5 sec, 32562.0 RPS, 32.9 MB/s
5000 - 5000
10009 bytes: 2.5 sec, 20161.9 RPS, 201.8 MB/s
5000 - 5000
20009 bytes: 3.4 sec, 14508.5 RPS, 290.3 MB/s
5000 - 5000
50009 bytes: 5.5 sec, 9030.7 RPS, 451.6 MB/s
$ python rbatch.py
9 bytes: 0.4 sec, 123503.4 RPS, 1.1 MB/s
1009 bytes: 0.4 sec, 115915.7 RPS, 117.0 MB/s
10009 bytes: 0.7 sec, 71531.7 RPS, 716.0 MB/s
20009 bytes: 1.2 sec, 41508.3 RPS, 830.5 MB/s
50009 bytes: 2.2 sec, 22247.9 RPS, 1112.6 MB/s
100011 bytes: 3.7 sec, 13378.4 RPS, 1338.0 MB/s
200011 bytes: 7.4 sec, 6751.3 RPS, 1350.3 MB/s
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment