Skip to content

Instantly share code, notes, and snippets.

@eliquious
Last active September 5, 2018 17:30
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save eliquious/d6c183293e4e158101e6 to your computer and use it in GitHub Desktop.
Save eliquious/d6c183293e4e158101e6 to your computer and use it in GitHub Desktop.
Concurrent Map - Golang

Readme

This is an example of a sharded map using spin locks. It is extremely fast.

Benchmarks

BenchmarkCacheSet-8          	30000000	        52.6 ns/op	       0 B/op	       0 allocs/op
BenchmarkCacheGet-8          	30000000	        37.7 ns/op	       0 B/op	       0 allocs/op
BenchmarkCacheDelete-8       	50000000	        28.3 ns/op	       0 B/op	       0 allocs/op
BenchmarkCacheConcurrentSet-8	20000000	        72.5 ns/op	       0 B/op	       0 allocs/op
package main
import "testing"
import (
"crypto/rand"
rnd "math/rand"
sync "sync"
"sync/atomic"
spin "github.com/OneOfOne/go-utils/sync"
)
type Cache []*CacheShard
type CacheShard struct {
items map[uint64][]byte
lock *spin.SpinLock
}
const CacheSize = 32
const CacheSizeMask = CacheSize - 1
func New() Cache {
c := make(Cache, CacheSize)
var i int32
for i = 0; i < CacheSize; i++ {
c[i] = &CacheShard{
items: make(map[uint64][]byte, CacheSize),
lock: new(spin.SpinLock),
}
}
return c
}
func (c Cache) Get(key uint64) (value []byte, ok bool) {
shard := c.getShard(key)
shard.lock.Lock()
value, ok = shard.items[key]
shard.lock.Unlock()
return
}
func (c Cache) Set(key uint64, data []byte) {
shard := c.getShard(key)
shard.lock.Lock()
shard.items[key] = data
shard.lock.Unlock()
}
func (c Cache) Delete(key uint64) {
shard := c.getShard(key)
shard.lock.Lock()
delete(shard.items, key)
shard.lock.Unlock()
return
}
func (c Cache) getShard(key uint64) (shard *CacheShard) {
shard = c[key&CacheSizeMask]
return
}
var keys []uint64
var values [][]byte
func init() {
keys = make([]uint64, CacheSize)
values = make([][]byte, CacheSize)
for i := 0; i < CacheSize; i++ {
keys[i] = uint64(rnd.Int63())
values[i] = make([]byte, 32)
rand.Read(values[i])
}
}
func BenchmarkCacheSet(b *testing.B) {
cache := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Set(keys[i&CacheSizeMask], values[i&CacheSizeMask])
}
}
func BenchmarkCacheGet(b *testing.B) {
cache := New()
for i := 0; i < len(keys); i++ {
cache.Set(keys[i], values[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, _ = cache.Get(keys[i&CacheSizeMask])
}
}
func BenchmarkCacheDelete(b *testing.B) {
cache := New()
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Delete(keys[i&CacheSizeMask])
}
}
func BenchmarkCacheConcurrentSet(b *testing.B) {
cache := New()
wg := sync.WaitGroup{}
var i, N int64 = 0, int64(b.N)
b.ResetTimer()
for j := 0; j < 32; j++ {
wg.Add(1)
go func() {
var k int64
for k = 0; k < N; k = atomic.LoadInt64(&i) {
cache.Set(keys[k&CacheSizeMask], values[k&CacheSizeMask])
atomic.AddInt64(&i, 1)
}
wg.Done()
}()
}
wg.Wait()
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment