-
-
Save jmaslak/544665019eba87c95a2482e21a79bfde to your computer and use it in GitHub Desktop.
==41858==ERROR: AddressSanitizer: heap-buffer-overflow on address 0x7fc959bf7da8 at pc 0x7fc982a4d070 bp 0x7fc9662bc9b0 sp 0x7fc9662bc9a0 | |
WRITE of size 8 at 0x7fc959bf7da8 thread T23 | |
#0 0x7fc982a4d06f in MVMHash_gc_mark src/6model/reprs/MVMHash.c:49 | |
#1 0x7fc9829e3b0c in process_worklist src/gc/collect.c:347 | |
#2 0x7fc9829e5016 in MVM_gc_collect src/gc/collect.c:163 | |
#3 0x7fc9829d3527 in run_gc src/gc/orchestrate.c:444 | |
#4 0x7fc9829d6572 in MVM_gc_enter_from_allocator src/gc/orchestrate.c:599 | |
#5 0x7fc9829d7714 in MVM_gc_allocate_nursery src/gc/allocation.c:37 | |
#6 0x7fc9829d8015 in MVM_gc_allocate_frame src/gc/allocation.c:106 | |
#7 0x7fc9829510e5 in allocate_frame src/core/frame.c:268 | |
#8 0x7fc982951a92 in MVM_frame_invoke src/core/frame.c:511 | |
#9 0x7fc982a69f22 in invoke_handler src/6model/reprs/MVMCode.c:10 | |
#10 0x7fc9829354d2 in MVM_interp_run src/core/interp.c:1054 | |
#11 0x7fc9829704d1 in start_thread src/core/threads.c:87 | |
#12 0x7fc98181c6da in start_thread (/lib/x86_64-linux-gnu/libpthread.so.0+0x76da) | |
#13 0x7fc981f6188e in __clone (/lib/x86_64-linux-gnu/libc.so.6+0x12188e) | |
0x7fc959bf7da8 is located 0 bytes to the right of 1566120-byte region [0x7fc959a79800,0x7fc959bf7da8) | |
allocated by thread T23 here: | |
#0 0x7fc983d44f30 in realloc (/usr/lib/x86_64-linux-gnu/libasan.so.4+0xdef30) | |
#1 0x7fc9829d840d in MVM_realloc src/core/alloc.h:20 | |
#2 0x7fc9829d840d in MVM_gc_worklist_presize_for src/gc/worklist.c:27 | |
#3 0x7fc982a4cbae in MVMHash_gc_mark src/6model/reprs/MVMHash.c:47 | |
#4 0x7fc9829e3b0c in process_worklist src/gc/collect.c:347 | |
#5 0x7fc9829e5016 in MVM_gc_collect src/gc/collect.c:163 | |
#6 0x7fc9829d3527 in run_gc src/gc/orchestrate.c:444 | |
#7 0x7fc9829d6572 in MVM_gc_enter_from_allocator src/gc/orchestrate.c:599 | |
#8 0x7fc9829d7714 in MVM_gc_allocate_nursery src/gc/allocation.c:37 | |
#9 0x7fc9829d8015 in MVM_gc_allocate_frame src/gc/allocation.c:106 | |
#10 0x7fc9829510e5 in allocate_frame src/core/frame.c:268 | |
#11 0x7fc982951a92 in MVM_frame_invoke src/core/frame.c:511 | |
#12 0x7fc982a69f22 in invoke_handler src/6model/reprs/MVMCode.c:10 | |
#13 0x7fc9829354d2 in MVM_interp_run src/core/interp.c:1054 | |
#14 0x7fc9829704d1 in start_thread src/core/threads.c:87 | |
#15 0x7fc98181c6da in start_thread (/lib/x86_64-linux-gnu/libpthread.so.0+0x76da) | |
Thread T23 created by T3 here: | |
#0 0x7fc983c9dd2f in __interceptor_pthread_create (/usr/lib/x86_64-linux-gnu/libasan.so.4+0x37d2f) | |
#1 0x7fc982d6100a in uv_thread_create_ex 3rdparty/libuv/src/unix/thread.c:258 | |
#2 0x7fc982d611d7 in uv_thread_create 3rdparty/libuv/src/unix/thread.c:212 | |
#3 0x7fc9829711f7 in MVM_thread_run src/core/threads.c:171 | |
#4 0x7fc9828f4e30 in MVM_interp_run src/core/interp.c:4086 | |
#5 0x7fc9829704d1 in start_thread src/core/threads.c:87 | |
#6 0x7fc98181c6da in start_thread (/lib/x86_64-linux-gnu/libpthread.so.0+0x76da) | |
Thread T3 created by T0 here: | |
#0 0x7fc983c9dd2f in __interceptor_pthread_create (/usr/lib/x86_64-linux-gnu/libasan.so.4+0x37d2f) | |
#1 0x7fc982d6100a in uv_thread_create_ex 3rdparty/libuv/src/unix/thread.c:258 | |
#2 0x7fc982d611d7 in uv_thread_create 3rdparty/libuv/src/unix/thread.c:212 | |
#3 0x7fc9829711f7 in MVM_thread_run src/core/threads.c:171 | |
#4 0x7fc9828f4e30 in MVM_interp_run src/core/interp.c:4086 | |
#5 0x55fdbd3c7acf in main src/vm/moar/runner/main.c:392 | |
#6 0x7fc981e61b96 in __libc_start_main (/lib/x86_64-linux-gnu/libc.so.6+0x21b96) | |
SUMMARY: AddressSanitizer: heap-buffer-overflow src/6model/reprs/MVMHash.c:49 in MVMHash_gc_mark | |
Shadow bytes around the buggy address: | |
0x0ff9ab376f60: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
0x0ff9ab376f70: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
0x0ff9ab376f80: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
0x0ff9ab376f90: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
0x0ff9ab376fa0: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 | |
=>0x0ff9ab376fb0: 00 00 00 00 00[fa]fa fa fa fa fa fa fa fa fa fa | |
0x0ff9ab376fc0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa | |
0x0ff9ab376fd0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa | |
0x0ff9ab376fe0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa | |
0x0ff9ab376ff0: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa | |
0x0ff9ab377000: fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa fa | |
Shadow byte legend (one shadow byte represents 8 application bytes): | |
Addressable: 00 | |
Partially addressable: 01 02 03 04 05 06 07 | |
Heap left redzone: fa | |
Freed heap region: fd | |
Stack left redzone: f1 | |
Stack mid redzone: f2 | |
Stack right redzone: f3 | |
Stack after return: f5 | |
Stack use after scope: f8 | |
Global redzone: f9 | |
Global init order: f6 | |
Poisoned by user: f7 | |
Container overflow: fc | |
Array cookie: ac | |
Intra object redzone: bb | |
ASan internal: fe | |
Left alloca redzone: ca | |
Right alloca redzone: cb | |
==41858==ABORTING |
timo
commented
May 9, 2020
Setting a breakpoint for MVMHash.c:64 then doing a bt:
Thread 11 "rakudo-m" hit Breakpoint 1, MVMHash_gc_mark (tc=, st=, data=, worklist=0x6030008080c0)
at src/6model/reprs/MVMHash.c:64
64 fprintf(stderr, "in MVMHash_gc_mark: expected HASH_CNT to match count fo HASH_ITER_FAST invocations, but had %ld at the end\n", initial_count);
(gdb) bt
#0 MVMHash_gc_mark (tc=, st=, data=, worklist=0x6030008080c0) at src/6model/reprs/MVMHash.c:64
#1 0x00007ffff5b9204a in MVM_gc_root_add_gen2s_to_worklist (tc=tc@entry=0x618000ae0c80, worklist=worklist@entry=0x6030008080c0)
at src/gc/roots.c:341
#2 0x00007ffff5b9858b in MVM_gc_collect (tc=tc@entry=0x618000ae0c80, what_to_do=what_to_do@entry=1 '\001', gen=gen@entry=0 '\000')
at src/gc/collect.c:155
#3 0x00007ffff5b86701 in run_gc (tc=tc@entry=0x618000ae0c80, what_to_do=what_to_do@entry=1 '\001') at src/gc/orchestrate.c:444
#4 0x00007ffff5b88160 in MVM_gc_enter_from_interrupt (tc=0x618000ae0c80) at src/gc/orchestrate.c:687
#5 0x00007fffed4ae1b9 in ?? ()
#6 0x0000000000000008 in ?? ()
#7 0x000060800001f8d8 in ?? ()
#8 0x00007fffe08dc1c0 in ?? ()
#9 0x0000630000ba5718 in ?? ()
#10 0x00007fffe08dd8b0 in ?? ()
#11 0x00007fffe08dc120 in ?? ()
#12 0x0000000041b58ab3 in ?? ()
#13 0x00007ffff5f26e60 in ?? () from //home/jmaslak/rakudo/rakudo-2020.05/install/lib/libmoar.so
#14 0x00007ffff5b10190 in ?? () at src/6model/6model.h:712 from //home/jmaslak/rakudo/rakudo-2020.05/install/lib/libmoar.so
#15 0x00007fffe08dc160 in ?? ()
#16 0x00007fffe08dc160 in ?? ()
#17 0x0004c9eefc11b82c in ?? ()
#18 0x00007fff00000002 in ?? ()
#19 0x00007fffe08dc180 in ?? ()
#20 0x0000000041b58ab3 in ?? ()
#21 0x00007ffff5f6c480 in ?? () from //home/jmaslak/rakudo/rakudo-2020.05/install/lib/libmoar.so
#22 0x00007ffff5d20390 in ?? () at src/spesh/osr.c:144 from //home/jmaslak/rakudo/rakudo-2020.05/install/lib/libmoar.so
#23 0x00007fff00000000 in ?? ()
#24 0x000062e000849320 in ?? ()
#25 0xad9338b6991fe900 in ?? ()
#26 0x0000618000ae0c80 in ?? ()
#27 0x00007fffd4fdfe08 in ?? ()
#28 0x00007ffff5c1cf90 in gc_mark (tc=0x7fffe08dde50, st=, data=0x7fffe08dde50, worklist=0x62e000465c40)
at src/6model/reprs/MVMCode.c:48
#29 0x00007ffff5adf8b1 in MVM_interp_run (tc=0x6240007ebb08, tc@entry=0x618000ae0c80, initial_invoke=0x618000ae0c80, invoke_data=0x623001cd3d60,
invoke_data@entry=0x60200090b2f0, outer_runloop=0x627000f81b80, outer_runloop@entry=0x0) at src/core/interp.c:6403
#30 0x00007ffff5b23522 in start_thread (data=0x60200090b2f0) at src/core/threads.c:87
#31 0x00007ffff49cf6db in start_thread (arg=0x7fffe08de700) at pthread_create.c:463
---Type to continue, or q to quit---
#32 0x00007ffff511488f in clone () at ../sysdeps/unix/sysv/linux/x86_64/clone.S:95
Ya, I think this might be the code with the problem on my end - run inside a hyper().map(), with %last-path being shared by everyone.
if $track {
if $event ~~ Net::BGP::Event::BGP-Message and $event.message ~~ Net::BGP::Message::Update {
my @nlri = @( $event.message.nlri );
@nlri.append: @( $event.message.nlri6 );
for @nlri -> $prefix {
if %last-path{$event.peer}{$prefix}:exists {
$ret<last-path>{$prefix} = %last-path{$event.peer}{$prefix};
}
my @old-path = @( $event.message.as-array );
@old-path.push( $event.message.origin );
%last-path{$event.peer}{$prefix} = @old-path;
}
my @withdrawn = @( $event.message.withdrawn );
@withdrawn.append: @( $event.message.withdrawn6 );
for @withdrawn -> $prefix {
if %last-path{$event.peer}{$prefix}:exists {
$ret<last-path>{$prefix} = %last-path{$event.peer}{$prefix};
}
%last-path{$event.peer}{$prefix}:delete;
}
}
}