Created
August 23, 2018 04:31
-
-
Save lrita/efa8c4ae555b4b7cceee29b4ed819652 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// +build !race | |
package sync1 | |
import "unsafe" | |
const enabled = false | |
func Acquire(addr unsafe.Pointer) {} | |
func Release(addr unsafe.Pointer) {} | |
func ReleaseMerge(addr unsafe.Pointer) {} | |
func Disable() {} | |
func Enable() {} | |
func Read(addr unsafe.Pointer) {} | |
func Write(addr unsafe.Pointer) {} | |
func ReadRange(addr unsafe.Pointer, len int) {} | |
func WriteRange(addr unsafe.Pointer, len int) {} | |
func Errors() int { return 0 } |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package sync1 | |
import ( | |
"runtime" | |
"sync" | |
"sync/atomic" | |
"unsafe" | |
) | |
// noCopy may be embedded into structs which must not be copied | |
// after the first use. | |
// | |
// See https://golang.org/issues/8005#issuecomment-190753527 | |
// for details. | |
type noCopy struct{} | |
// Lock is a no-op used by -copylocks checker from `go vet`. | |
func (*noCopy) Lock() {} | |
// A Pool is a set of temporary objects that may be individually saved and | |
// retrieved. | |
// | |
// Any item stored in the Pool may be removed automatically at any time without | |
// notification. If the Pool holds the only reference when this happens, the | |
// item might be deallocated. | |
// | |
// A Pool is safe for use by multiple goroutines simultaneously. | |
// | |
// Pool's purpose is to cache allocated but unused items for later reuse, | |
// relieving pressure on the garbage collector. That is, it makes it easy to | |
// build efficient, thread-safe free lists. However, it is not suitable for all | |
// free lists. | |
// | |
// An appropriate use of a Pool is to manage a group of temporary items | |
// silently shared among and potentially reused by concurrent independent | |
// clients of a package. Pool provides a way to amortize allocation overhead | |
// across many clients. | |
// | |
// An example of good use of a Pool is in the fmt package, which maintains a | |
// dynamically-sized store of temporary output buffers. The store scales under | |
// load (when many goroutines are actively printing) and shrinks when | |
// quiescent. | |
// | |
// On the other hand, a free list maintained as part of a short-lived object is | |
// not a suitable use for a Pool, since the overhead does not amortize well in | |
// that scenario. It is more efficient to have such objects implement their own | |
// free list. | |
// | |
// A Pool must not be copied after first use. | |
type Pool struct { | |
noCopy noCopy | |
local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal | |
localSize uintptr // size of the local array | |
// New optionally specifies a function to generate | |
// a value when Get would otherwise return nil. | |
// It may not be changed concurrently with calls to Get. | |
New func() interface{} | |
} | |
// Local per-P Pool appendix. | |
type poolLocalInternal struct { | |
private interface{} // Can be used only by the respective P. | |
shared []interface{} // Can be used by any P. | |
sync.Mutex // Protects shared. | |
} | |
type poolLocal struct { | |
poolLocalInternal | |
// Prevents false sharing on widespread platforms with | |
// 128 mod (cache line size) = 0 . | |
pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte | |
} | |
// from runtime | |
//go:linkname fastrand runtime.fastrand | |
func fastrand() uint32 | |
var poolRaceHash [128]uint64 | |
// poolRaceAddr returns an address to use as the synchronization point | |
// for race detector logic. We don't use the actual pointer stored in x | |
// directly, for fear of conflicting with other synchronization on that address. | |
// Instead, we hash the pointer to get an index into poolRaceHash. | |
// See discussion on golang.org/cl/31589. | |
func poolRaceAddr(x interface{}) unsafe.Pointer { | |
ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1]) | |
h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16) | |
return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))]) | |
} | |
// Put adds x to the pool. | |
func (p *Pool) Put(x interface{}) { | |
if x == nil { | |
return | |
} | |
if enabled { | |
if fastrand()%4 == 0 { | |
// Randomly drop x on floor. | |
return | |
} | |
ReleaseMerge(poolRaceAddr(x)) | |
Disable() | |
} | |
l := p.pin() | |
if l.private == nil { // <- here report DATA RACE | |
l.private = x | |
x = nil | |
} | |
runtime_procUnpin() | |
if x != nil { | |
l.Lock() | |
l.shared = append(l.shared, x) | |
l.Unlock() | |
} | |
if enabled { | |
Enable() | |
} | |
} | |
// Get selects an arbitrary item from the Pool, removes it from the | |
// Pool, and returns it to the caller. | |
// Get may choose to ignore the pool and treat it as empty. | |
// Callers should not assume any relation between values passed to Put and | |
// the values returned by Get. | |
// | |
// If Get would otherwise return nil and p.New is non-nil, Get returns | |
// the result of calling p.New. | |
func (p *Pool) Get() interface{} { | |
if enabled { | |
Disable() | |
} | |
l := p.pin() | |
x := l.private | |
l.private = nil // <- here report DATA RACE | |
runtime_procUnpin() | |
if x == nil { | |
l.Lock() | |
last := len(l.shared) - 1 | |
if last >= 0 { | |
x = l.shared[last] | |
l.shared = l.shared[:last] | |
} | |
l.Unlock() | |
if x == nil { | |
x = p.getSlow() | |
} | |
} | |
if enabled { | |
Enable() | |
if x != nil { | |
Acquire(poolRaceAddr(x)) | |
} | |
} | |
if x == nil && p.New != nil { | |
x = p.New() | |
} | |
return x | |
} | |
func (p *Pool) getSlow() (x interface{}) { | |
// See the comment in pin regarding ordering of the loads. | |
size := atomic.LoadUintptr(&p.localSize) // load-acquire | |
local := p.local // load-consume | |
// Try to steal one element from other procs. | |
pid := runtime_procPin() | |
runtime_procUnpin() | |
for i := 0; i < int(size); i++ { | |
l := indexLocal(local, (pid+i+1)%int(size)) | |
l.Lock() | |
last := len(l.shared) - 1 | |
if last >= 0 { | |
x = l.shared[last] | |
l.shared = l.shared[:last] | |
l.Unlock() | |
break | |
} | |
l.Unlock() | |
} | |
return x | |
} | |
// pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P. | |
// Caller must call runtime_procUnpin() when done with the pool. | |
func (p *Pool) pin() *poolLocal { | |
pid := runtime_procPin() | |
// In pinSlow we store to localSize and then to local, here we load in opposite order. | |
// Since we've disabled preemption, GC cannot happen in between. | |
// Thus here we must observe local at least as large localSize. | |
// We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). | |
s := atomic.LoadUintptr(&p.localSize) // load-acquire | |
l := p.local // load-consume | |
if uintptr(pid) < s { | |
return indexLocal(l, pid) | |
} | |
return p.pinSlow() | |
} | |
func (p *Pool) pinSlow() *poolLocal { | |
// Retry under the mutex. | |
// Can not lock the mutex while pinned. | |
runtime_procUnpin() | |
allPoolsMu.Lock() | |
defer allPoolsMu.Unlock() | |
pid := runtime_procPin() | |
// poolCleanup won't be called while we are pinned. | |
s := p.localSize | |
l := p.local | |
if uintptr(pid) < s { | |
return indexLocal(l, pid) | |
} | |
if p.local == nil { | |
allPools = append(allPools, p) | |
} | |
// If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. | |
size := runtime.GOMAXPROCS(0) | |
local := make([]poolLocal, size) | |
atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release | |
atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release | |
return &local[pid] | |
} | |
var ( | |
allPoolsMu sync.Mutex | |
allPools []*Pool | |
) | |
func indexLocal(l unsafe.Pointer, i int) *poolLocal { | |
lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{})) | |
return (*poolLocal)(lp) | |
} | |
// Implemented in runtime. | |
//go:linkname runtime_procPin runtime.procPin | |
//go:nosplit | |
func runtime_procPin() int | |
//go:linkname runtime_procUnpin runtime.procUnpin | |
//go:nosplit | |
func runtime_procUnpin() |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
package sync1 | |
import ( | |
"sync" | |
"testing" | |
) | |
func TestSyncAPI(t *testing.T) { | |
p := &Pool{} | |
wg := &sync.WaitGroup{} | |
wg.Add(3) | |
go func() { | |
p.Get() | |
p.Put(1) | |
wg.Done() | |
}() | |
go func() { | |
p.Put(1) | |
p.Get() | |
wg.Done() | |
}() | |
go func() { | |
p.Get() | |
p.Put(1) | |
wg.Done() | |
}() | |
wg.Wait() | |
} | |
func TestSyncAPI2(t *testing.T) { | |
p := &sync.Pool{} | |
wg := &sync.WaitGroup{} | |
wg.Add(3) | |
go func() { | |
p.Get() | |
p.Put(1) | |
wg.Done() | |
}() | |
go func() { | |
p.Put(1) | |
p.Get() | |
wg.Done() | |
}() | |
go func() { | |
p.Get() | |
p.Put(1) | |
wg.Done() | |
}() | |
wg.Wait() | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
// +build race | |
package sync1 | |
import ( | |
"runtime" | |
"unsafe" | |
) | |
const enabled = true | |
func Acquire(addr unsafe.Pointer) { | |
runtime.RaceAcquire(addr) | |
} | |
func Release(addr unsafe.Pointer) { | |
runtime.RaceRelease(addr) | |
} | |
func ReleaseMerge(addr unsafe.Pointer) { | |
runtime.RaceReleaseMerge(addr) | |
} | |
func Disable() { | |
runtime.RaceDisable() | |
} | |
func Enable() { | |
runtime.RaceEnable() | |
} | |
func Read(addr unsafe.Pointer) { | |
runtime.RaceRead(addr) | |
} | |
func Write(addr unsafe.Pointer) { | |
runtime.RaceWrite(addr) | |
} | |
func ReadRange(addr unsafe.Pointer, len int) { | |
runtime.RaceReadRange(addr, len) | |
} | |
func WriteRange(addr unsafe.Pointer, len int) { | |
runtime.RaceWriteRange(addr, len) | |
} | |
func Errors() int { | |
return runtime.RaceErrors() | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment