Skip to content

Instantly share code, notes, and snippets.

@petethepig
Last active February 20, 2021 01:29
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save petethepig/9667c2acbc6227bf3617323c4333708e to your computer and use it in GitHub Desktop.
Save petethepig/9667c2acbc6227bf3617323c4333708e to your computer and use it in GitHub Desktop.
package main
import "log"
// import "fmt"
import "time"
import "encoding/binary"
// import "encoding/hex"
import "bytes"
import "unsafe"
import "github.com/iovisor/gobpf/bcc"
// import "github.com/iovisor/gobpf/pkg/ksym"
/*
#cgo CFLAGS: -I/usr/include/bcc/compat
#cgo LDFLAGS: -lbcc
#include <bcc/bcc_common.h>
#include <bcc/libbpf.h>
#include <bcc/bcc_syms.h>
*/
import "C"
type bccSymbol struct {
name *C.char
demangleName *C.char
module *C.char
offset C.ulonglong
}
type bccSymbolOption struct {
useDebugFile int
checkDebugFileCrc int
useSymbolType uint32
}
const bufferLength = 40000
type symbolCache struct {
cachePerPid map[uint64]*C.struct_bcc_symcache
}
func newSymbolCache() *symbolCache {
return &symbolCache{
cachePerPid: make(map[uint64]*C.struct_bcc_symcache),
}
}
func (sc *symbolCache) cache(pid uint64) *C.struct_bcc_symcache {
if cache, ok := sc.cachePerPid[pid]; ok {
return cache
}
pidC := C.int(pid)
if pid == 0 {
pidC = C.int(-1)
}
symbolOpt := &bccSymbolOption{}
symbolOptC := (*C.struct_bcc_symbol_option)(unsafe.Pointer(symbolOpt))
cache := C.bcc_symcache_new(pidC, symbolOptC)
sc.cachePerPid[pid] = (*C.struct_bcc_symcache)(cache)
return sc.cachePerPid[pid]
}
func (sc *symbolCache) bccResolve(pid, addr uint64) (string, uint64, string) {
symbol := &bccSymbol{}
symbolC := (*C.struct_bcc_symbol)(unsafe.Pointer(symbol))
// cache := C.bcc_symcache_new(pidC, symbolOptC)
// defer C.bcc_free_symcache(cache, pidC)
cache:=sc.cache(pid)
var res C.int
if pid == 0 {
res = C.bcc_symcache_resolve_no_demangle(unsafe.Pointer(cache), C.ulong(addr), symbolC)
} else {
res = C.bcc_symcache_resolve(unsafe.Pointer(cache), C.ulong(addr), symbolC)
// log.Printf("res %q %q %q %d", C.GoString(symbol.name), C.GoString(symbol.demangleName), C.GoString(symbol.module), res)
}
// if res < 0 {
// return "", fmt.Errorf("unable to locate symbol %x %d, %q", addr, res, symbol)
// }
if res < 0 {
if symbol.offset > 0 {
return "", uint64(symbol.offset), C.GoString(symbol.module)
}
return "", addr, ""
}
if pid == 0 {
return C.GoString(symbol.name), uint64(symbol.offset), C.GoString(symbol.module)
} else {
return C.GoString(symbol.demangleName), uint64(symbol.offset), C.GoString(symbol.module)
}
}
func (sc *symbolCache) sym(pid, addr uint64) (string) {
name, _, _ := sc.bccResolve(pid, addr)
if name == "" {
name = "[unknown]"
}
return name
}
var bpf_program = `
#include <uapi/linux/ptrace.h>
#include <uapi/linux/bpf_perf_event.h>
#include <linux/sched.h>
struct key_t {
u32 pid;
u64 kernel_ip;
u64 kernel_ret_ip;
int user_stack_id;
int kernel_stack_id;
char name[TASK_COMM_LEN];
};
BPF_HASH(counts, struct key_t);
BPF_STACK_TRACE(stack_traces, 16384);
// This code gets a bit complex. Probably not suitable for casual hacking.
int do_perf_event(struct bpf_perf_event_data *ctx) {
u64 id = bpf_get_current_pid_tgid();
u32 tgid = id >> 32;
u32 pid = id;
if (pid == 0)
return 0;
if (!(1))
return 0;
// create map key
struct key_t key = {.pid = tgid};
bpf_get_current_comm(&key.name, sizeof(key.name));
// get stacks
key.user_stack_id = stack_traces.get_stackid(&ctx->regs, BPF_F_USER_STACK);
key.kernel_stack_id = stack_traces.get_stackid(&ctx->regs, 0);
if (key.kernel_stack_id >= 0) {
// populate extras to fix the kernel stack
u64 ip = PT_REGS_IP(&ctx->regs);
u64 page_offset;
// if ip isn't sane, leave key ips as zero for later checking
#if defined(CONFIG_X86_64) && defined(__PAGE_OFFSET_BASE)
// x64, 4.16, ..., 4.11, etc., but some earlier kernel didn't have it
page_offset = __PAGE_OFFSET_BASE;
#elif defined(CONFIG_X86_64) && defined(__PAGE_OFFSET_BASE_L4)
// x64, 4.17, and later
#if defined(CONFIG_DYNAMIC_MEMORY_LAYOUT) && defined(CONFIG_X86_5LEVEL)
page_offset = __PAGE_OFFSET_BASE_L5;
#else
page_offset = __PAGE_OFFSET_BASE_L4;
#endif
#else
// earlier x86_64 kernels, e.g., 4.6, comes here
// arm64, s390, powerpc, x86_32
page_offset = PAGE_OFFSET;
#endif
if (ip > page_offset) {
key.kernel_ip = ip;
}
}
counts.increment(key);
return 0;
}
`
type Key struct {
pid uint32
kernel_ip uint64
kernel_ret_ip uint64
user_stack_id int64
kernel_stack_id int64
name []byte
}
func Unpack(b []byte) *Key {
buf := bytes.NewBuffer(b)
g := Key{}
binary.Read(buf, binary.LittleEndian, &g.pid)
binary.Read(buf, binary.LittleEndian, &g.kernel_ip)
binary.Read(buf, binary.LittleEndian, &g.kernel_ret_ip)
binary.Read(buf, binary.LittleEndian, &g.user_stack_id)
binary.Read(buf, binary.LittleEndian, &g.kernel_stack_id)
return &g
}
type KeyBytes struct {
pid []byte
kernel_ip []byte
kernel_ret_ip []byte
user_stack_id []byte
kernel_stack_id []byte
name []byte
}
func UnpackKeyBytes(b []byte) *KeyBytes {
g := KeyBytes{}
g.pid = b[:8] // 4
g.kernel_ip = b[8:16] // 8
g.kernel_ret_ip = b[16:24] // 8
g.user_stack_id = b[24:28] // 4
g.kernel_stack_id = b[28:32] // 4
g.name = b[32:] // 8
i := bytes.Index(g.name, []byte{0})
if i >= 0 {
g.name = g.name[:i]
}
return &g
}
// u32 pid;
// u64 kernel_ip;
// u64 kernel_ret_ip;
// int user_stack_id;
// int kernel_stack_id;
// char name[TASK_COMM_LEN];
var globalCache *symbolCache
func init(){
globalCache = newSymbolCache()
}
func walkStack(stackTrace *bcc.Table, pid, rootId []byte) string {
// log.Println("stack:")
// trace, traceErr := stackTrace.Get([]byte("trace"))
// ip, ipErr := stackTrace.Get([]byte("ip"))
// log.Println("trace ip", trace, ip, traceErr, ipErr)
stack, _ := stackTrace.Get(rootId)
var pidInt uint64
r2 := bytes.NewReader(pid)
binary.Read(r2, binary.LittleEndian, &pidInt)
line := ""
for len(stack) >= 8 && !bytes.Equal(stack[:8], []byte{0,0,0,0,0,0,0,0}) {
addr := stack[:8]
stack = stack[8:]
var addrInt uint64
r := bytes.NewReader(addr)
binary.Read(r, binary.LittleEndian, &addrInt)
// addrRev := make([]byte, 8)
// addrRev[0] = addr[7-0]
// addrRev[1] = addr[7-1]
// addrRev[2] = addr[7-2]
// addrRev[3] = addr[7-3]
// addrRev[4] = addr[7-4]
// addrRev[5] = addr[7-5]
// addrRev[6] = addr[7-6]
// addrRev[7] = addr[7-7]
// stack = stack[8:]
// addr2 := make([]byte, 16)
// hex.Encode(addr2, addrRev)
name := globalCache.sym(pidInt, addrInt)
// log.Println("addr:", pidInt, addrInt, )
line += name + ";"
}
return line
}
func main(){
mod := bcc.NewModule(bpf_program, []string{})
// perfEvent := 7
// logLevel := uint(0)
// logSize := uint(0)
fd, err := mod.LoadPerfEvent("do_perf_event")
log.Println("LoadPerfEvent err: ", err)
if err !=nil {
return
}
evType := 1 // -1 // PerfType.SOFTWARE
evConfig := 0 // -1 // PerfSWConfig.CPU_CLOCK
samplePeriod := 0
sampleFreq := 49
pid := -1
cpu := -1
groupFd := -1
err = mod.AttachPerfEvent(evType, evConfig , samplePeriod , sampleFreq , pid, cpu, groupFd, fd)
log.Println("AttachPerfEvent err: ", err)
if err !=nil {
return
}
go func(){
st := time.Now()
i := 0
for time.Now().Add(-5*time.Second).Before(st) {
i++
}
}()
time.Sleep(5 * time.Second)
countsId := mod.TableId("counts")
stackTracesId := mod.TableId("stack_traces")
log.Println(countsId, stackTracesId)
ct:= bcc.NewTable(countsId, mod)
st:= bcc.NewTable(stackTracesId, mod)
iter := ct.Iter()
for iter.Next() {
k := UnpackKeyBytes(iter.Key())
line := string(k.name) + ";"
line += walkStack(st, k.pid, k.user_stack_id)
line += walkStack(st, []byte{0,0,0,0,0,0,0,0}, k.kernel_stack_id)
v := iter.Leaf()
var valInt uint64
buf := bytes.NewBuffer(v)
binary.Read(buf, binary.LittleEndian, &valInt) // TODO: not sure if it's little endian
log.Printf("%q %d", line, valInt)
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment