Skip to content

Instantly share code, notes, and snippets.

@CAFxX
Created December 10, 2018 08:37
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save CAFxX/c901a4420216d154144b4ef4b469b2cf to your computer and use it in GitHub Desktop.
Save CAFxX/c901a4420216d154144b4ef4b469b2cf to your computer and use it in GitHub Desktop.
src/math/bits/bits.go:283:6: can inline Len as: func(uint) int { if UintSize == 32 { }; return Len64(uint64(x)) }
src/math/bits/bits.go:55:6: can inline TrailingZeros as: func(uint) int { if UintSize == 32 { }; return TrailingZeros64(uint64(x)) }
src/math/bits/bits.go:113:6: can inline OnesCount as: func(uint) int { if UintSize == 32 { }; return OnesCount64(uint64(x)) }
src/math/bits/bits.go:170:6: can inline RotateLeft as: func(uint, int) uint { if UintSize == 32 { }; return uint(RotateLeft64(uint64(x), k)) }
src/math/bits/bits.go:212:6: can inline Reverse as: func(uint) uint { if UintSize == 32 { }; return uint(Reverse64(uint64(x))) }
src/math/bits/bits.go:253:6: can inline ReverseBytes as: func(uint) uint { if UintSize == 32 { }; return uint(ReverseBytes64(uint64(x))) }
src/math/bits/bits.go:415:6: can inline Mul as: func(uint, uint) (uint, uint) { if UintSize == 32 { }; h, l = Mul64(uint64(x), uint64(y)); return uint(h), uint(l) }
src/math/bits/bits.go:458:6: can inline Div as: func(uint, uint, uint) (uint, uint) { if UintSize == 32 { }; q, r = Div64(uint64(hi), uint64(lo), uint64(y)); return uint(q), uint(r) }
src/strconv/itoa.go:162:35: inlining call to bits.TrailingZeros func(uint) int { if bool(false) { }; return bits.TrailingZeros64(uint64(bits.x)) }
src/runtime/mheap.go:593:6: can inline spanOf as: func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/mheap.go:570:13: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/mgcmark.go:1261:13: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/lfstack_64bit.go:41:6: can inline lfstackPack as: func(*lfnode, uintptr) uint64 { if GOARCH == "ppc64" && GOOS == "aix" { }; return uint64(uintptr(unsafe.Pointer(node))) << (64 - addrBits) | uint64(cnt & (1 << cntBits - 1)) }
src/runtime/mbitmap.go:363:12: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/mheap.go:1213:21: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/mheap.go:1234:20: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/lfstack.go:62:30: inlining call to lfstackPack func(*lfnode, uintptr) uint64 { if GOARCH == "ppc64" && GOOS == "aix" { }; return uint64(uintptr(unsafe.Pointer(node))) << (64 - addrBits) | uint64(cnt & (1 << cntBits - 1)) }
src/runtime/lfstack.go:27:20: inlining call to lfstackPack func(*lfnode, uintptr) uint64 { if GOARCH == "ppc64" && GOOS == "aix" { }; return uint64(uintptr(unsafe.Pointer(node))) << (64 - addrBits) | uint64(cnt & (1 << cntBits - 1)) }
src/runtime/type.go:169:6: can inline reflectOffsLock as: func() { lock(&reflectOffs.lock); if raceenabled { } }
src/runtime/type.go:176:6: can inline reflectOffsUnlock as: func() { if raceenabled { }; unlock(&reflectOffs.lock) }
src/runtime/type.go:200:17: inlining call to reflectOffsLock func() { lock(&reflectOffs.lock); if raceenabled { } }
src/runtime/type.go:202:19: inlining call to reflectOffsUnlock func() { if raceenabled { }; unlock(&reflectOffs.lock) }
src/runtime/proc.go:3493:6: can inline dolockOSThread as: func() { if GOARCH == "wasm" { }; _g_ := getg(); _g_.m.lockedg.set(_g_); _g_.lockedm.set(_g_.m) }
src/runtime/proc.go:3537:16: inlining call to dolockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); _g_.m.lockedg.set(_g_); _g_.lockedm.set(_g_.m) }
src/runtime/proc.go:3544:6: can inline dounlockOSThread as: func() { if GOARCH == "wasm" { }; _g_ := getg(); if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { return }; _g_.m.lockedg = 0; _g_.lockedm = 0 }
src/runtime/proc.go:3586:18: inlining call to dounlockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { return }; _g_.m.lockedg = 0; _g_.lockedm = 0 }
src/runtime/cgocall.go:176:14: inlining call to dolockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); _g_.m.lockedg.set(_g_); _g_.lockedm.set(_g_.m) }
src/runtime/mheap.go:640:13: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/stack.go:531:6: can inline adjustpointer as: func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:744:16: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:706:15: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:718:15: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:728:16: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:729:16: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:730:16: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:737:15: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:869:14: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/symtab.go:934:6: can inline stackmapdata as: func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/stack.go:1245:25: inlining call to stackmapdata func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/stack.go:1277:24: inlining call to stackmapdata func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/malloc.go:1105:6: can inline nextSample as: func() int32 { if GOOS == "plan9" { }; return fastexprand(MemProfileRate) }
src/runtime/mcache.go:93:28: inlining call to nextSample func() int32 { if GOOS == "plan9" { }; return fastexprand(MemProfileRate) }
src/runtime/mbitmap.go:267:13: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/malloc.go:1094:36: inlining call to nextSample func() int32 { if GOOS == "plan9" { }; return fastexprand(MemProfileRate) }
src/runtime/mbitmap.go:596:16: inlining call to spanOf func(uintptr) *mspan { ri := arenaIndex(p); if arenaL1Bits == 0 { if ri.l2() >= uint(len(mheap_.arenas[0])) { return nil } }; l2 := mheap_.arenas[ri.l1()]; if arenaL1Bits != 0 { }; ha := l2[ri.l2()]; if ha == nil { return nil }; return ha.spans[p / pageSize % pagesPerArena] }
src/runtime/env_posix.go:26:6: can inline envKeyEqual as: func(string, string) bool { if GOOS == "windows" { }; return a == b }
src/runtime/env_posix.go:17:60: inlining call to envKeyEqual func(string, string) bool { if GOOS == "windows" { }; return a == b }
src/runtime/heapdump.go:265:20: inlining call to stackmapdata func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/heapdump.go:324:28: inlining call to stackmapdata func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/type.go:230:18: inlining call to reflectOffsLock func() { lock(&reflectOffs.lock); if raceenabled { } }
src/runtime/type.go:232:20: inlining call to reflectOffsUnlock func() { if raceenabled { }; unlock(&reflectOffs.lock) }
src/runtime/type.go:267:18: inlining call to reflectOffsLock func() { lock(&reflectOffs.lock); if raceenabled { } }
src/runtime/type.go:269:20: inlining call to reflectOffsUnlock func() { if raceenabled { }; unlock(&reflectOffs.lock) }
src/runtime/map.go:1354:6: can inline reflect_maplen as: func(*hmap) int { if h == nil { return 0 }; if raceenabled { }; return h.count }
src/runtime/mbarrier.go:177:6: can inline reflect_typedmemmove as: func(*_type, unsafe.Pointer, unsafe.Pointer) { if raceenabled { }; if msanenabled { }; typedmemmove(typ, dst, src) }
src/runtime/mgcstack.go:139:6: can inline init.1 as: func() { if unsafe.Sizeof(stackWorkBuf literal) > unsafe.Sizeof(composite literal) { }; if unsafe.Sizeof(stackObjectBuf literal) > unsafe.Sizeof(composite literal) { } }
src/runtime/mgcwork.go:32:6: can inline init.2 as: func() { if workbufAlloc % pageSize != 0 || workbufAlloc % _WorkbufSize != 0 { } }
src/runtime/panic.go:827:6: can inline canpanic as: func(*g) bool { _g_ := getg(); _m_ := _g_.m; if gp == nil || gp != _m_.curg { return false }; if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { return false }; status := readgstatus(gp); if status &^ _Gscan != _Grunning || gp.syscallsp != 0 { return false }; if GOOS == "windows" { }; return true }
src/runtime/proc.go:1869:6: can inline startTemplateThread as: func() { if GOARCH == "wasm" { }; if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { return }; newm(templateThread, nil) }
src/runtime/proc.go:141:14: inlining call to dolockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); _g_.m.lockedg.set(_g_); _g_.lockedm.set(_g_.m) }
src/runtime/proc.go:183:22: inlining call to startTemplateThread func() { if GOARCH == "wasm" { }; if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { return }; newm(templateThread, nil) }
src/runtime/proc.go:231:6: can inline os_beforeExit as: func() { if raceenabled { } }
src/runtime/sys_x86.go:16:6: can inline gostartcall as: func(*gobuf, unsafe.Pointer, unsafe.Pointer) { sp := buf.sp; if sys.RegSize > sys.PtrSize { }; sp -= sys.PtrSize; *(*uintptr)(unsafe.Pointer(sp)) = buf.pc; buf.sp = sp; buf.pc = uintptr(fn); buf.ctxt = ctxt }
src/runtime/stack.go:1072:13: inlining call to gostartcall func(*gobuf, unsafe.Pointer, unsafe.Pointer) { sp := buf.sp; if sys.RegSize > sys.PtrSize { }; sp -= sys.PtrSize; *(*uintptr)(unsafe.Pointer(sp)) = buf.pc; buf.sp = sp; buf.pc = uintptr(fn); buf.ctxt = ctxt }
src/runtime/proc.go:3304:22: inlining call to stackmapdata func(*stackmap, int32) bitvector { if stackDebug > 0 { }; return bitvector literal }
src/runtime/proc.go:3314:15: inlining call to gostartcall func(*gobuf, unsafe.Pointer, unsafe.Pointer) { sp := buf.sp; if sys.RegSize > sys.PtrSize { }; sp -= sys.PtrSize; *(*uintptr)(unsafe.Pointer(sp)) = buf.pc; buf.sp = sp; buf.pc = uintptr(fn); buf.ctxt = ctxt }
src/runtime/proc.go:3523:22: inlining call to startTemplateThread func() { if GOARCH == "wasm" { }; if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) { return }; newm(templateThread, nil) }
src/runtime/proc.go:3531:16: inlining call to dolockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); _g_.m.lockedg.set(_g_); _g_.lockedm.set(_g_.m) }
src/runtime/proc.go:3576:18: inlining call to dounlockOSThread func() { if GOARCH == "wasm" { }; _g_ := getg(); if _g_.m.lockedInt != 0 || _g_.m.lockedExt != 0 { return }; _g_.m.lockedg = 0; _g_.lockedm = 0 }
src/runtime/proflabel.go:12:6: can inline runtime_setProfLabel as: func(unsafe.Pointer) { if raceenabled { }; getg().labels = labels }
src/runtime/runtime1.go:492:17: inlining call to reflectOffsLock func() { lock(&reflectOffs.lock); if raceenabled { } }
src/runtime/runtime1.go:505:19: inlining call to reflectOffsUnlock func() { if raceenabled { }; unlock(&reflectOffs.lock) }
src/runtime/signal_unix.go:64:6: can inline init.6 as: func() { if len(sigtable) != _NSIG { } }
src/runtime/signal_unix.go:373:14: inlining call to canpanic func(*g) bool { _g_ := getg(); _m_ := _g_.m; if gp == nil || gp != _m_.curg { return false }; if _m_.locks != 0 || _m_.mallocing != 0 || _m_.throwing != 0 || _m_.preemptoff != "" || _m_.dying != 0 { return false }; status := readgstatus(gp); if status &^ _Gscan != _Grunning || gp.syscallsp != 0 { return false }; if GOOS == "windows" { }; return true }
src/runtime/stack.go:656:16: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/stack.go:693:19: inlining call to adjustpointer func(*adjustinfo, unsafe.Pointer) { pp := (*uintptr)(vpp); p := *pp; if stackDebug >= 4 { }; if adjinfo.old.lo <= p && p < adjinfo.old.hi { *pp = p + adjinfo.delta; if stackDebug >= 3 { } } }
src/runtime/string.go:142:6: can inline slicebytetostringtmp as: func([]byte) string { if raceenabled { }; if msanenabled { }; return *(*string)(unsafe.Pointer(&b)) }
src/runtime/time.go:110:6: can inline startTimer as: func(*timer) { if raceenabled { }; addtimer(t) }
src/sync/rwmutex.go:43:6: can inline (*RWMutex).RLock as: method(*RWMutex) func() { if race.Enabled { }; if "sync/atomic".AddInt32(&rw.readerCount, 1) < 0 { runtime_SemacquireMutex(&rw.readerSem, false) }; if race.Enabled { } }
src/sync/rwmutex.go:145:49: inlining call to (*RWMutex).RLock method(*RWMutex) func() { if race.Enabled { }; if "sync/atomic".AddInt32(&rw.readerCount, 1) < 0 { runtime_SemacquireMutex(&rw.readerSem, false) }; if race.Enabled { } }
src/syscall/dirent.go:12:6: can inline readInt as: func([]byte, uintptr, uintptr) (uint64, bool) { if len(b) < int(off + size) { return 0, false }; if isBigEndian { }; return readIntLE(b[off:], size), true }
src/syscall/syscall_linux.go:871:16: inlining call to readInt func([]byte, uintptr, uintptr) (uint64, bool) { if len(b) < int(off + size) { return 0, false }; if isBigEndian { }; return readIntLE(b[off:], size), true }
src/syscall/syscall_linux.go:867:16: inlining call to readInt func([]byte, uintptr, uintptr) (uint64, bool) { if len(b) < int(off + size) { return 0, false }; if isBigEndian { }; return readIntLE(b[off:], size), true }
src/syscall/env_unix.go:77:15: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if "sync/atomic".AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/syscall/env_unix.go:140:15: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if "sync/atomic".AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/syscall/syscall_unix.go:187:6: can inline Write as: func(int, []byte) (int, error) { if race.Enabled { }; n, err = write(fd, p); if race.Enabled { }; if msanenabled { }; return }
src/syscall/exec_linux.go:453:20: inlining call to Write func(int, []byte) (int, error) { if race.Enabled { }; n, err = write(fd, p); if race.Enabled { }; if msanenabled { }; return }
src/syscall/exec_linux.go:483:20: inlining call to Write func(int, []byte) (int, error) { if race.Enabled { }; n, err = write(fd, p); if race.Enabled { }; if msanenabled { }; return }
src/syscall/sockcmsg_unix.go:14:6: can inline cmsgAlignOf as: func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:34:20: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:34:49: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:28:20: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:38:72: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_linux.go:15:29: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_linux.go:15:29: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_linux.go:19:18: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_linux.go:20:21: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:69:25: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:52:15: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:53:52: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:59:19: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:76:29: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:76:29: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:80:18: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/sockcmsg_unix.go:81:18: inlining call to cmsgAlignOf func(int) int { salign := sizeofPtr; if darwin64Bit || dragonfly64Bit || solaris64Bit { }; return (salen + salign - 1) & ^(salign - 1) }
src/syscall/syscall_unix.go:171:6: can inline Read as: func(int, []byte) (int, error) { n, err = read(fd, p); if race.Enabled { }; if msanenabled { }; return }
src/syscall/syscall_unix.go:334:6: can inline Sendfile as: func(int, int, *int64, int) (int, error) { if race.Enabled { }; return sendfile(outfd, infd, offset, count) }
src/time/sys_unix.go:28:21: inlining call to syscall.Read func(int, []byte) (int, error) { syscall.n, syscall.err = syscall.read(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/time/sys_unix.go:44:25: inlining call to syscall.Read func(int, []byte) (int, error) { syscall.n, syscall.err = syscall.read(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/time/zoneinfo_read.go:433:16: inlining call to syscall.Read func(int, []byte) (int, error) { syscall.n, syscall.err = syscall.read(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/internal/poll/fd_unix.go:165:25: inlining call to syscall.Read func(int, []byte) (int, error) { syscall.n, syscall.err = syscall.read(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/internal/poll/fd_unix.go:268:26: inlining call to syscall.Write func(int, []byte) (int, error) { if bool(false) { }; syscall.n, syscall.err = syscall.write(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/internal/poll/fd_unix.go:475:24: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if "sync/atomic".AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/internal/poll/fd_unix.go:507:22: inlining call to syscall.Write func(int, []byte) (int, error) { if bool(false) { }; syscall.n, syscall.err = syscall.write(syscall.fd, syscall.p); if bool(false) { }; if bool(false) { }; return }
src/internal/poll/sendfile_linux.go:28:30: inlining call to syscall.Sendfile func(int, int, *int64, int) (int, error) { if bool(false) { }; return syscall.sendfile(syscall.outfd, syscall.infd, syscall.offset, syscall.count) }
src/os/exec_unix.go:62:15: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if "sync/atomic".AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/os/file_unix.go:330:6: can inline tempDir as: func() string { dir := Getenv("TMPDIR"); if dir == "" { if runtime.GOOS == "android" { } else { dir = "/tmp" } }; return dir }
src/os/file.go:330:16: inlining call to tempDir func() string { dir := Getenv("TMPDIR"); if dir == "" { if runtime.GOOS == "android" { } else { dir = "/tmp" } }; return dir }
src/os/pipe_linux.go:19:25: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if "sync/atomic".AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/os/proc.go:17:6: can inline init.0 as: func() { if runtime.GOOS == "windows" { }; Args = runtime_args() }
src/io/ioutil/tempfile.go:52:19: inlining call to os.tempDir func() string { var os.dir·2 string; os.dir·2 = <N>; os.dir·2 = os.Getenv(string("TMPDIR")); if os.dir·2 == string("") { if bool(false) { } else { os.dir·2 = string("/tmp") } }; return os.dir·2 }
src/io/ioutil/tempfile.go:88:19: inlining call to os.tempDir func() string { var os.dir·2 string; os.dir·2 = <N>; os.dir·2 = os.Getenv(string("TMPDIR")); if os.dir·2 == string("") { if bool(false) { } else { os.dir·2 = string("/tmp") } }; return os.dir·2 }
src/go/token/position.go:385:15: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if atomic.AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/go/token/position.go:436:16: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if atomic.AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
src/go/token/position.go:452:15: inlining call to sync.(*RWMutex).RLock method(*sync.RWMutex) func() { if bool(false) { }; if atomic.AddInt32(&sync.rw.readerCount, int32(1)) < int32(0) { sync.runtime_SemacquireMutex(&sync.rw.readerSem, bool(false)) }; if bool(false) { } }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment