Skip to content

Instantly share code, notes, and snippets.

@pfreixes
Created January 7, 2021 22:24
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save pfreixes/3f706d5b30a2e8255f32d5e02bdb7226 to your computer and use it in GitHub Desktop.
Save pfreixes/3f706d5b30a2e8255f32d5e02bdb7226 to your computer and use it in GitHub Desktop.
Emcache vs Go for checking ops/sec and latencies of GETs and SETs for a Memcache server

Go using 1 CPU

$ GOMAXPROCS=1 ./stress 
GET Test results
Ops/sec:  100015
P50: 0.000304 seconds
P99: 0.000825 seconds

SET Test results
Ops/sec:  63473
P50: 0.000486 seconds
P99: 0.001260 seconds

Go using 2 CPUs

$ GOMAXPROCS=2 ./stress 
Test results
Ops/sec:  147172
P50: 0.000180 seconds
P99: 0.000711 seconds
Test results
Ops/sec:  104040
P50: 0.000262 seconds
P99: 0.000944 seconds

Emcache

Tests results:
	Ops/sec: 49358
	P50: 0.000611 seconds
	P99: 0.001357 seconds
Starting benchmark GET
Tests results:
	Ops/sec: 46268
	P50: 0.000650 seconds
	P99: 0.001361 seconds

Program used https://github.com/pfreixes/emcache/blob/master/benchmark/sets_gets_stress.py

Memcache, executed using two threads

$ memcached -t 2

Machine with 8 cores

$ lscpu 
Architecture:        x86_64
CPU op-mode(s):      32-bit, 64-bit
Byte Order:          Little Endian
CPU(s):              8
On-line CPU(s) list: 0-7
package main
import (
"fmt"
"github.com/bradfitz/gomemcache/memcache"
"math/rand"
"sort"
"strconv"
"sync"
"time"
)
const (
duration = 60
concurrency = 32
number_of_keys = 65536
)
var finish_benchmark bool = false
func set(mc *memcache.Client) int {
keyAndValue := strconv.Itoa(rand.Intn(number_of_keys))
item := &memcache.Item{Key: keyAndValue, Value: []byte(keyAndValue)}
start := time.Now()
err := mc.Set(item)
if err != nil {
panic(err)
}
return int(time.Since(start).Microseconds())
}
func get(mc *memcache.Client) int {
key := strconv.Itoa(rand.Intn(number_of_keys))
start := time.Now()
_, err := mc.Get(key)
if err != nil && err != memcache.ErrCacheMiss {
panic(err)
}
return int(time.Since(start).Microseconds())
}
func benchmark(mc *memcache.Client, bench_func func(mc *memcache.Client) int) {
var wg sync.WaitGroup
wg.Add(concurrency)
ch := make(chan []int, concurrency)
finish_benchmark = false
for i := 0; i < concurrency; i++ {
go func() {
var microseconds int
latencies := []int{}
defer wg.Done()
for !finish_benchmark {
microseconds = bench_func(mc)
latencies = append(latencies, microseconds)
}
ch <- latencies
}()
}
time.Sleep(duration * time.Second)
finish_benchmark = true
wg.Wait()
close(ch)
overallLatencies := []int{}
for latencies := range ch {
overallLatencies = append(overallLatencies, latencies...)
}
sort.Ints(overallLatencies)
total_ops := len(overallLatencies)
fmt.Println("Test results")
fmt.Println("Ops/sec: ", int(total_ops/duration))
fmt.Printf("P50: %.6f seconds\n", float32(overallLatencies[int((50*total_ops)/100)])/1e6)
fmt.Printf("P99: %.6f seconds\n", float32(overallLatencies[int((99*total_ops)/100)])/1e6)
}
func main() {
mc := memcache.New("127.0.0.1:11211")
benchmark(mc, set)
benchmark(mc, get)
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment