Skip to content

Instantly share code, notes, and snippets.

@nolash
Created April 24, 2018 13:47
Show Gist options
  • Save nolash/d94c45ffc28c6066b7fceb9fe12a4a45 to your computer and use it in GitHub Desktop.
Save nolash/d94c45ffc28c6066b7fceb9fe12a4a45 to your computer and use it in GitHub Desktop.
Benchmark swarm hash worker handling
package hasher
import (
"encoding/binary"
"math/rand"
"strconv"
"strings"
"sync"
"testing"
"github.com/ethereum/go-ethereum/log"
"github.com/ethereum/go-ethereum/swarm/storage"
)
func init() {
log.Root().SetHandler(log.LvlFilterHandler(log.LvlInfo, log.StderrHandler))
}
type hasher interface {
hash([]byte) []byte
}
type simple struct {
mu sync.Mutex
hasher storage.SwarmHash
}
func newSimple() *simple {
return &simple{
hasher: storage.MakeHashFunc(storage.DefaultHash)(),
}
}
func (s *simple) hash(data []byte) []byte {
s.mu.Lock()
defer s.mu.Unlock()
return doHash(s.hasher, data)
}
type instancer struct {
hashFunc storage.SwarmHasher
}
func newInstancer() *instancer {
return &instancer{
hashFunc: storage.MakeHashFunc(storage.DefaultHash),
}
}
func (i *instancer) hash(data []byte) []byte {
hasher := i.hashFunc()
return doHash(hasher, data)
}
type pool struct {
workers sync.Pool
inc int
}
func newPool(workerCount int64) *pool {
p := &pool{}
p.workers = sync.Pool{
New: func() interface{} {
p.inc++
return storage.MakeHashFunc(storage.DefaultHash)()
},
}
for i := int64(0); i < workerCount; i++ {
p.workers.Put(storage.MakeHashFunc(storage.DefaultHash)())
}
return p
}
func (p *pool) hash(data []byte) []byte {
h := p.workers.Get().(storage.SwarmHash)
defer p.workers.Put(h)
return doHash(h, data)
}
func doHash(h storage.SwarmHash, data []byte) []byte {
resetLength := make([]byte, 8)
binary.LittleEndian.PutUint64(resetLength, uint64(len(data)))
h.ResetWithLength(resetLength)
h.Write(data)
return h.Sum(nil)
}
func BenchmarkPool(b *testing.B) {
b.Run("simple/100/100/256", benchmarkPool)
b.Run("pool/100/100/256", benchmarkPool)
b.Run("instance/100/100/256", benchmarkPool)
b.Run("simple/100/1000/256", benchmarkPool)
b.Run("pool/100/1000/256", benchmarkPool)
b.Run("instance/100/1000/256", benchmarkPool)
b.Run("simple/100/100/4096", benchmarkPool)
b.Run("pool/100/100/4096", benchmarkPool)
b.Run("instance/100/100/4096", benchmarkPool)
b.Run("simple/100/1000/4096", benchmarkPool)
b.Run("pool/100/1000/4096", benchmarkPool)
b.Run("instance/100/1000/4096", benchmarkPool)
b.Run("simple/1000/1000/256", benchmarkPool)
b.Run("pool/1000/1000/256", benchmarkPool)
b.Run("instance/1000/1000/256", benchmarkPool)
b.Run("simple/1000/10000/256", benchmarkPool)
b.Run("pool/1000/10000/256", benchmarkPool)
b.Run("instance/1000/10000/256", benchmarkPool)
b.Run("simple/1000/1000/4096", benchmarkPool)
b.Run("pool/1000/1000/4096", benchmarkPool)
b.Run("instance/1000/1000/4096", benchmarkPool)
b.Run("simple/1000/10000/4096", benchmarkPool)
b.Run("pool/1000/10000/4096", benchmarkPool)
b.Run("instance/1000/10000/4096", benchmarkPool)
b.Run("simple/100/10000/256", benchmarkPool)
b.Run("pool/100/10000/256", benchmarkPool)
b.Run("instance/100/10000/256", benchmarkPool)
b.Run("simple/100/10000/4096", benchmarkPool)
b.Run("pool/100/10000/4096", benchmarkPool)
b.Run("instance/100/10000/4096", benchmarkPool)
}
func benchmarkPool(b *testing.B) {
params := strings.Split(b.Name(), "/")
mode := params[1]
workers, _ := strconv.ParseInt(params[2], 10, 0)
passes, _ := strconv.ParseInt(params[3], 10, 0)
size, _ := strconv.ParseInt(params[4], 10, 0)
data := make([]byte, passes+size)
c, err := rand.Read(data)
if int64(c) != passes+size {
b.Fatal("short read")
}
if err != nil {
b.Fatal(err)
}
var hasherUsed hasher
switch mode {
case "simple":
hasherUsed = newSimple()
case "pool":
hasherUsed = newPool(workers)
default:
hasherUsed = newInstancer()
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
wg := sync.WaitGroup{}
for j := int64(0); j < passes; j++ {
wg.Add(1)
go func(j int64) {
hasherUsed.hash(data[j : j+size])
wg.Done()
}(j)
}
wg.Wait()
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment