Skip to content

Instantly share code, notes, and snippets.

@5HT
Created January 16, 2017 21:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save 5HT/dfbde5179fb6112e921fa686a5e459f0 to your computer and use it in GitHub Desktop.
Save 5HT/dfbde5179fb6112e921fa686a5e459f0 to your computer and use it in GitHub Desktop.
$ cat intint.s
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
__ZN4drop17hc03495ab6f5cadc4E:
pushq %rbp
movq %rsp, %rbp
popq %rbp
retq
.p2align 4, 0x90
__ZN53_$LT$$RF$$u27$a$u20$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h1889a8c4238d8202E:
.cfi_startproc
pushq %rbp
Ltmp0:
.cfi_def_cfa_offset 16
Ltmp1:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp2:
.cfi_def_cfa_register %rbp
movq (%rdi), %rdi
popq %rbp
jmp __ZN40_$LT$f64$u20$as$u20$core..fmt..Debug$GT$3fmt17he96609929d995e69E
.cfi_endproc
.p2align 4, 0x90
__ZN53_$LT$$RF$$u27$a$u20$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h569a52652ae3d9d1E:
.cfi_startproc
pushq %rbp
Ltmp3:
.cfi_def_cfa_offset 16
Ltmp4:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp5:
.cfi_def_cfa_register %rbp
movq (%rdi), %rdi
popq %rbp
jmp __ZN4core3fmt3num50_$LT$impl$u20$core..fmt..Debug$u20$for$u20$u64$GT$3fmt17h5d620cd9388cdb43E
.cfi_endproc
.p2align 4, 0x90
__ZN67_$LT$collections..vec..Vec$LT$T$GT$$u20$as$u20$core..fmt..Debug$GT$3fmt17h21263577298cd9a6E:
.cfi_startproc
pushq %rbp
Ltmp6:
.cfi_def_cfa_offset 16
Ltmp7:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp8:
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
Ltmp9:
.cfi_offset %rbx, -56
Ltmp10:
.cfi_offset %r12, -48
Ltmp11:
.cfi_offset %r13, -40
Ltmp12:
.cfi_offset %r14, -32
Ltmp13:
.cfi_offset %r15, -24
movq (%rdi), %rbx
movq 16(%rdi), %r13
leaq -64(%rbp), %rdi
callq __ZN4core3fmt8builders14debug_list_new17h997ae1c8e97f29fdE
testq %r13, %r13
je LBB3_3
shlq $3, %r13
leaq _vtable.2(%rip), %r14
leaq -64(%rbp), %r15
leaq -48(%rbp), %r12
.p2align 4, 0x90
LBB3_2:
movq %rbx, -48(%rbp)
addq $8, %rbx
movq %r15, %rdi
movq %r12, %rsi
movq %r14, %rdx
callq __ZN4core3fmt8builders9DebugList5entry17h0984c85733c836ddE
addq $-8, %r13
jne LBB3_2
LBB3_3:
leaq -64(%rbp), %rdi
callq __ZN4core3fmt8builders9DebugList6finish17h8b6f5b2f9b818d48E
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
.p2align 4, 0x90
__ZN67_$LT$collections..vec..Vec$LT$T$GT$$u20$as$u20$core..fmt..Debug$GT$3fmt17h31fcb6ec5d3ae0adE:
.cfi_startproc
pushq %rbp
Ltmp14:
.cfi_def_cfa_offset 16
Ltmp15:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp16:
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $24, %rsp
Ltmp17:
.cfi_offset %rbx, -56
Ltmp18:
.cfi_offset %r12, -48
Ltmp19:
.cfi_offset %r13, -40
Ltmp20:
.cfi_offset %r14, -32
Ltmp21:
.cfi_offset %r15, -24
movq (%rdi), %rbx
movq 16(%rdi), %r13
leaq -64(%rbp), %rdi
callq __ZN4core3fmt8builders14debug_list_new17h997ae1c8e97f29fdE
testq %r13, %r13
je LBB4_3
shlq $3, %r13
leaq _vtable.1(%rip), %r14
leaq -64(%rbp), %r15
leaq -48(%rbp), %r12
.p2align 4, 0x90
LBB4_2:
movq %rbx, -48(%rbp)
addq $8, %rbx
movq %r15, %rdi
movq %r12, %rsi
movq %r14, %rdx
callq __ZN4core3fmt8builders9DebugList5entry17h0984c85733c836ddE
addq $-8, %r13
jne LBB4_2
LBB4_3:
leaq -64(%rbp), %rdi
callq __ZN4core3fmt8builders9DebugList6finish17h8b6f5b2f9b818d48E
addq $24, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
.cfi_endproc
.section __TEXT,__const
.p2align 5
LCPI5_0:
.quad 0
.quad 4607182418800017408
.quad 4616189618054758400
.quad 4621256167635550208
LCPI5_3:
.quad 4607182418800017408
.quad 4616189618054758400
.quad 4621256167635550208
.quad 0
LCPI5_5:
.quad 0
.quad 1
.quad 4
.quad 9
LCPI5_6:
.quad 1
.quad 4
.quad 9
.quad 0
.section __TEXT,__literal16,16byte_literals
.p2align 4
LCPI5_1:
.quad 16
.quad 16
LCPI5_4:
.quad 4607182418800017408
.quad 4616189618054758400
.section __TEXT,__literal8,8byte_literals
.p2align 3
LCPI5_2:
.quad 4611686018427387904
.section __TEXT,__text,regular,pure_instructions
.p2align 4, 0x90
__ZN6intint4main17hfc3e9c6a4a641ae7E:
Lfunc_begin0:
.cfi_startproc
.cfi_personality 155, _rust_eh_personality
.cfi_lsda 16, Lexception0
pushq %rbp
Ltmp37:
.cfi_def_cfa_offset 16
Ltmp38:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp39:
.cfi_def_cfa_register %rbp
pushq %r15
pushq %r14
pushq %r13
pushq %r12
pushq %rbx
subq $296, %rsp
Ltmp40:
.cfi_offset %rbx, -56
Ltmp41:
.cfi_offset %r12, -48
Ltmp42:
.cfi_offset %r13, -40
Ltmp43:
.cfi_offset %r14, -32
Ltmp44:
.cfi_offset %r15, -24
movl $128, %edi
movl $8, %esi
callq ___rust_allocate
movq %rax, %rbx
testq %rbx, %rbx
je LBB5_51
vmovaps LCPI5_0(%rip), %ymm0
vmovups %ymm0, (%rbx)
vmovups %ymm0, 32(%rbx)
vmovups %ymm0, 64(%rbx)
vmovups %ymm0, 96(%rbx)
movq %rbx, -128(%rbp)
vmovaps LCPI5_1(%rip), %xmm0
vmovups %xmm0, -120(%rbp)
movl $128, %edi
movl $8, %esi
vzeroupper
callq ___rust_allocate
movq %rax, %r14
testq %r14, %r14
je LBB5_51
vmovsd LCPI5_2(%rip), %xmm0
vpslldq $8, %xmm0, %xmm0
vmovdqu %xmm0, (%r14)
movabsq $4618441417868443648, %rax
movq %rax, 16(%r14)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 24(%r14)
vmovdqa LCPI5_3(%rip), %ymm1
vmovdqu %ymm1, 40(%r14)
movabsq $4611686018427387904, %rcx
movq %rcx, 72(%r14)
movq %rax, 80(%r14)
vmovdqu %xmm0, 88(%r14)
vmovaps LCPI5_4(%rip), %xmm0
vmovups %xmm0, 104(%r14)
movabsq $4621256167635550208, %rax
movq %rax, 120(%r14)
movl $128, %edi
movl $8, %esi
vzeroupper
callq ___rust_allocate
movq %rax, %r12
testq %r12, %r12
je LBB5_51
vmovaps LCPI5_5(%rip), %ymm0
vmovups %ymm0, (%r12)
vmovups %ymm0, 32(%r12)
vmovups %ymm0, 64(%r12)
vmovups %ymm0, 96(%r12)
movq %r12, -64(%rbp)
vmovaps LCPI5_1(%rip), %xmm0
vmovups %xmm0, -56(%rbp)
movl $128, %edi
movl $8, %esi
vzeroupper
callq ___rust_allocate
movq %rax, %r15
testq %r15, %r15
je LBB5_51
movl $2, %eax
vmovq %rax, %xmm0
vpslldq $8, %xmm0, %xmm0
vmovdqu %xmm0, (%r15)
movq $6, 16(%r15)
vpxor %xmm0, %xmm0, %xmm0
vmovdqu %xmm0, 24(%r15)
vmovaps LCPI5_6(%rip), %ymm0
vmovups %ymm0, 40(%r15)
vmovups %ymm0, 72(%r15)
movq $1, 104(%r15)
movq $4, 112(%r15)
movq $9, 120(%r15)
movl $128, %edi
movl $8, %esi
vzeroupper
callq ___rust_allocate
testq %rax, %rax
je LBB5_5
movq $0, (%rax)
vmovsd 8(%rbx), %xmm0
vmulsd 8(%r14), %xmm0, %xmm0
vmovsd %xmm0, 8(%rax)
vmovsd 16(%rbx), %xmm0
vmulsd 16(%r14), %xmm0, %xmm0
vmovsd %xmm0, 16(%rax)
vmovsd 24(%rbx), %xmm0
vmulsd 24(%r14), %xmm0, %xmm0
vmovsd %xmm0, 24(%rax)
vmovsd 32(%rbx), %xmm0
vmulsd 32(%r14), %xmm0, %xmm0
vmovsd %xmm0, 32(%rax)
vmovsd 40(%rbx), %xmm0
vmulsd 40(%r14), %xmm0, %xmm0
vmovsd %xmm0, 40(%rax)
vmovsd 48(%rbx), %xmm0
vmulsd 48(%r14), %xmm0, %xmm0
vmovsd %xmm0, 48(%rax)
vmovsd 56(%rbx), %xmm0
vmulsd 56(%r14), %xmm0, %xmm0
vmovsd %xmm0, 56(%rax)
vmovsd 64(%rbx), %xmm0
vmulsd 64(%r14), %xmm0, %xmm0
vmovsd %xmm0, 64(%rax)
vmovsd 72(%rbx), %xmm0
vmulsd 72(%r14), %xmm0, %xmm0
vmovsd %xmm0, 72(%rax)
vmovsd 80(%rbx), %xmm0
vmulsd 80(%r14), %xmm0, %xmm0
vmovsd %xmm0, 80(%rax)
vmovsd 88(%rbx), %xmm0
vmulsd 88(%r14), %xmm0, %xmm0
vmovsd %xmm0, 88(%rax)
vmovsd 96(%rbx), %xmm0
vmulsd 96(%r14), %xmm0, %xmm0
vmovsd %xmm0, 96(%rax)
vmovsd 104(%rbx), %xmm0
vmulsd 104(%r14), %xmm0, %xmm0
vmovsd %xmm0, 104(%rax)
vmovsd 112(%rbx), %xmm0
vmulsd 112(%r14), %xmm0, %xmm0
vmovsd %xmm0, 112(%rax)
vmovsd 120(%rbx), %xmm0
vmulsd 120(%r14), %xmm0, %xmm0
vmovsd %xmm0, 120(%rax)
movq %rax, -152(%rbp)
vmovaps LCPI5_1(%rip), %xmm0
vmovups %xmm0, -144(%rbp)
leaq -152(%rbp), %rax
movq %rax, -80(%rbp)
leaq __ZN67_$LT$collections..vec..Vec$LT$T$GT$$u20$as$u20$core..fmt..Debug$GT$3fmt17h21263577298cd9a6E(%rip), %rax
movq %rax, -72(%rbp)
leaq _ref.b(%rip), %rax
movq %rax, -280(%rbp)
movq $2, -272(%rbp)
movq $0, -264(%rbp)
leaq -80(%rbp), %rax
movq %rax, -248(%rbp)
movq $1, -240(%rbp)
Ltmp22:
leaq -280(%rbp), %rdi
callq __ZN3std2io5stdio6_print17ha1f12b7210ca1a83E
Ltmp23:
movq -144(%rbp), %rsi
testq %rsi, %rsi
je LBB5_10
shlq $3, %rsi
movq -152(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
movq -64(%rbp), %r12
LBB5_10:
movq -48(%rbp), %r13
cmpq $16, %r13
movl $16, %ebx
cmovbeq %r13, %rbx
movl $1, %eax
xorl %ecx, %ecx
testq %rbx, %rbx
je LBB5_34
movl $8, %ecx
movq %rbx, %rax
mulq %rcx
jo LBB5_12
movl $8, %esi
movq %rax, %rdi
callq ___rust_allocate
testq %rax, %rax
je LBB5_18
xorl %ecx, %ecx
movq %rax, %rdx
cmpq $4, %rbx
jb LBB5_32
xorl %ecx, %ecx
movq %rbx, %r8
movq %rax, %rdx
andq $-4, %r8
je LBB5_32
cmpq $16, %r13
movl $16, %edx
cmovbq %r13, %rdx
leaq -8(%r12,%rdx,8), %rsi
xorl %ecx, %ecx
cmpq %rsi, %rax
ja LBB5_24
leaq -8(%rax,%rdx,8), %rsi
movq %rax, %rdx
cmpq %rsi, %r12
jbe LBB5_32
LBB5_24:
leaq -4(%r8), %rdi
movq %rdi, %rcx
shrq $2, %rcx
leal 1(%rcx), %edx
andl $3, %edx
xorl %esi, %esi
cmpq $12, %rdi
jb LBB5_27
leaq -1(%rdx), %rdi
subq %rcx, %rdi
xorl %esi, %esi
.p2align 4, 0x90
LBB5_26:
vmovups (%r12,%rsi,8), %ymm0
vmovups (%r15,%rsi,8), %ymm1
vextractf128 $1, %ymm0, %xmm2
vextractf128 $1, %ymm1, %xmm3
vpmuludq %xmm2, %xmm3, %xmm4
vpsrlq $32, %xmm2, %xmm5
vpmuludq %xmm5, %xmm3, %xmm5
vpsllq $32, %xmm5, %xmm5
vpaddq %xmm5, %xmm4, %xmm4
vpsrlq $32, %xmm3, %xmm3
vpmuludq %xmm2, %xmm3, %xmm2
vpsllq $32, %xmm2, %xmm2
vpaddq %xmm2, %xmm4, %xmm2
vpmuludq %xmm0, %xmm1, %xmm3
vpsrlq $32, %xmm0, %xmm4
vpmuludq %xmm4, %xmm1, %xmm4
vpsllq $32, %xmm4, %xmm4
vpaddq %xmm4, %xmm3, %xmm3
vpsrlq $32, %xmm1, %xmm1
vpmuludq %xmm0, %xmm1, %xmm0
vpsllq $32, %xmm0, %xmm0
vpaddq %xmm0, %xmm3, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
vmovups %ymm0, (%rax,%rsi,8)
vmovups 32(%r12,%rsi,8), %ymm0
vmovups 32(%r15,%rsi,8), %ymm1
vextractf128 $1, %ymm0, %xmm2
vextractf128 $1, %ymm1, %xmm3
vpmuludq %xmm2, %xmm3, %xmm4
vpsrlq $32, %xmm2, %xmm5
vpmuludq %xmm5, %xmm3, %xmm5
vpsllq $32, %xmm5, %xmm5
vpaddq %xmm5, %xmm4, %xmm4
vpsrlq $32, %xmm3, %xmm3
vpmuludq %xmm2, %xmm3, %xmm2
vpsllq $32, %xmm2, %xmm2
vpaddq %xmm2, %xmm4, %xmm2
vpmuludq %xmm0, %xmm1, %xmm3
vpsrlq $32, %xmm0, %xmm4
vpmuludq %xmm4, %xmm1, %xmm4
vpsllq $32, %xmm4, %xmm4
vpaddq %xmm4, %xmm3, %xmm3
vpsrlq $32, %xmm1, %xmm1
vpmuludq %xmm0, %xmm1, %xmm0
vpsllq $32, %xmm0, %xmm0
vpaddq %xmm0, %xmm3, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
vmovups %ymm0, 32(%rax,%rsi,8)
vmovups 64(%r12,%rsi,8), %ymm0
vmovups 64(%r15,%rsi,8), %ymm1
vextractf128 $1, %ymm0, %xmm2
vextractf128 $1, %ymm1, %xmm3
vpmuludq %xmm2, %xmm3, %xmm4
vpsrlq $32, %xmm2, %xmm5
vpmuludq %xmm5, %xmm3, %xmm5
vpsllq $32, %xmm5, %xmm5
vpaddq %xmm5, %xmm4, %xmm4
vpsrlq $32, %xmm3, %xmm3
vpmuludq %xmm2, %xmm3, %xmm2
vpsllq $32, %xmm2, %xmm2
vpaddq %xmm2, %xmm4, %xmm2
vpmuludq %xmm0, %xmm1, %xmm3
vpsrlq $32, %xmm0, %xmm4
vpmuludq %xmm4, %xmm1, %xmm4
vpsllq $32, %xmm4, %xmm4
vpaddq %xmm4, %xmm3, %xmm3
vpsrlq $32, %xmm1, %xmm1
vpmuludq %xmm0, %xmm1, %xmm0
vpsllq $32, %xmm0, %xmm0
vpaddq %xmm0, %xmm3, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
vmovups %ymm0, 64(%rax,%rsi,8)
vmovups 96(%r12,%rsi,8), %ymm0
vmovups 96(%r15,%rsi,8), %ymm1
vextractf128 $1, %ymm0, %xmm2
vextractf128 $1, %ymm1, %xmm3
vpmuludq %xmm2, %xmm3, %xmm4
vpsrlq $32, %xmm2, %xmm5
vpmuludq %xmm5, %xmm3, %xmm5
vpsllq $32, %xmm5, %xmm5
vpaddq %xmm5, %xmm4, %xmm4
vpsrlq $32, %xmm3, %xmm3
vpmuludq %xmm2, %xmm3, %xmm2
vpsllq $32, %xmm2, %xmm2
vpaddq %xmm2, %xmm4, %xmm2
vpmuludq %xmm0, %xmm1, %xmm3
vpsrlq $32, %xmm0, %xmm4
vpmuludq %xmm4, %xmm1, %xmm4
vpsllq $32, %xmm4, %xmm4
vpaddq %xmm4, %xmm3, %xmm3
vpsrlq $32, %xmm1, %xmm1
vpmuludq %xmm0, %xmm1, %xmm0
vpsllq $32, %xmm0, %xmm0
vpaddq %xmm0, %xmm3, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
vmovups %ymm0, 96(%rax,%rsi,8)
addq $16, %rsi
addq $4, %rdi
jne LBB5_26
LBB5_27:
testq %rdx, %rdx
je LBB5_30
leaq (%r15,%rsi,8), %rdi
leaq (%r12,%rsi,8), %rcx
leaq (%rax,%rsi,8), %rsi
negq %rdx
.p2align 4, 0x90
LBB5_29:
vmovups (%rcx), %ymm0
vmovups (%rdi), %ymm1
vextractf128 $1, %ymm0, %xmm2
vextractf128 $1, %ymm1, %xmm3
vpmuludq %xmm2, %xmm3, %xmm4
vpsrlq $32, %xmm2, %xmm5
vpmuludq %xmm5, %xmm3, %xmm5
vpsllq $32, %xmm5, %xmm5
vpaddq %xmm5, %xmm4, %xmm4
vpsrlq $32, %xmm3, %xmm3
vpmuludq %xmm2, %xmm3, %xmm2
vpsllq $32, %xmm2, %xmm2
vpaddq %xmm2, %xmm4, %xmm2
vpmuludq %xmm0, %xmm1, %xmm3
vpsrlq $32, %xmm0, %xmm4
vpmuludq %xmm4, %xmm1, %xmm4
vpsllq $32, %xmm4, %xmm4
vpaddq %xmm4, %xmm3, %xmm3
vpsrlq $32, %xmm1, %xmm1
vpmuludq %xmm0, %xmm1, %xmm0
vpsllq $32, %xmm0, %xmm0
vpaddq %xmm0, %xmm3, %xmm0
vinsertf128 $1, %xmm2, %ymm0, %ymm0
vmovups %ymm0, (%rsi)
addq $32, %rdi
addq $32, %rcx
addq $32, %rsi
incq %rdx
jne LBB5_29
LBB5_30:
cmpq %r8, %rbx
je LBB5_33
leaq (%rax,%r8,8), %rdx
movq %r8, %rcx
.p2align 4, 0x90
LBB5_32:
movq (%r15,%rcx,8), %rsi
imulq (%r12,%rcx,8), %rsi
incq %rcx
movq %rsi, (%rdx)
addq $8, %rdx
cmpq %rbx, %rcx
jb LBB5_32
LBB5_33:
movq %rbx, %rcx
LBB5_34:
movq %rax, -104(%rbp)
movq %rcx, -96(%rbp)
movq %rcx, -88(%rbp)
leaq -104(%rbp), %rax
movq %rax, -80(%rbp)
leaq __ZN67_$LT$collections..vec..Vec$LT$T$GT$$u20$as$u20$core..fmt..Debug$GT$3fmt17h31fcb6ec5d3ae0adE(%rip), %rbx
movq %rbx, -72(%rbp)
leaq _ref.b(%rip), %rax
movq %rax, -328(%rbp)
movq $2, -320(%rbp)
movq $0, -312(%rbp)
leaq -80(%rbp), %rax
movq %rax, -296(%rbp)
movq $1, -288(%rbp)
Ltmp29:
leaq -328(%rbp), %rdi
vzeroupper
callq __ZN3std2io5stdio6_print17ha1f12b7210ca1a83E
Ltmp30:
movq -96(%rbp), %rsi
testq %rsi, %rsi
je LBB5_37
shlq $3, %rsi
movq -104(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
LBB5_37:
leaq -64(%rbp), %rax
movq %rax, -184(%rbp)
movq %rbx, -176(%rbp)
leaq -128(%rbp), %rax
movq %rax, -168(%rbp)
leaq __ZN67_$LT$collections..vec..Vec$LT$T$GT$$u20$as$u20$core..fmt..Debug$GT$3fmt17h21263577298cd9a6E(%rip), %rax
movq %rax, -160(%rbp)
leaq _ref.d(%rip), %rax
movq %rax, -232(%rbp)
movq $3, -224(%rbp)
movq $0, -216(%rbp)
leaq -184(%rbp), %rax
movq %rax, -200(%rbp)
movq $2, -192(%rbp)
Ltmp32:
leaq -232(%rbp), %rdi
callq __ZN3std2io5stdio6_print17ha1f12b7210ca1a83E
Ltmp33:
movl $128, %esi
movl $8, %edx
movq %r15, %rdi
callq ___rust_deallocate
movq -56(%rbp), %rsi
testq %rsi, %rsi
je LBB5_40
shlq $3, %rsi
movq -64(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
LBB5_40:
movl $128, %esi
movl $8, %edx
movq %r14, %rdi
callq ___rust_deallocate
movq -120(%rbp), %rsi
testq %rsi, %rsi
je LBB5_42
shlq $3, %rsi
movq -128(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
LBB5_42:
addq $296, %rsp
popq %rbx
popq %r12
popq %r13
popq %r14
popq %r15
popq %rbp
retq
LBB5_51:
callq __ZN5alloc3oom3oom17h087c259ea35c365cE
LBB5_5:
Ltmp34:
callq __ZN5alloc3oom3oom17h087c259ea35c365cE
Ltmp35:
LBB5_12:
Ltmp27:
leaq _str.0(%rip), %rdi
movl $17, %esi
callq __ZN4core6option13expect_failed17h88acc6328925809bE
Ltmp28:
LBB5_18:
Ltmp25:
callq __ZN5alloc3oom3oom17h087c259ea35c365cE
Ltmp26:
LBB5_48:
Ltmp36:
movq %rax, %rbx
jmp LBB5_49
LBB5_43:
Ltmp31:
movq %rax, %rbx
movq -96(%rbp), %rsi
testq %rsi, %rsi
je LBB5_49
shlq $3, %rsi
movq -104(%rbp), %rdi
jmp LBB5_15
LBB5_13:
Ltmp24:
movq %rax, %rbx
movq -144(%rbp), %rsi
testq %rsi, %rsi
je LBB5_49
shlq $3, %rsi
movq -152(%rbp), %rdi
LBB5_15:
movl $8, %edx
callq ___rust_deallocate
LBB5_49:
movl $128, %esi
movl $8, %edx
movq %r15, %rdi
callq ___rust_deallocate
movq -56(%rbp), %rsi
testq %rsi, %rsi
je LBB5_50
shlq $3, %rsi
movq -64(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
LBB5_50:
movl $128, %esi
movl $8, %edx
movq %r14, %rdi
callq ___rust_deallocate
movq -120(%rbp), %rsi
testq %rsi, %rsi
je LBB5_46
shlq $3, %rsi
movq -128(%rbp), %rdi
movl $8, %edx
callq ___rust_deallocate
LBB5_46:
movq %rbx, %rdi
callq __Unwind_Resume
Lfunc_end0:
.cfi_endproc
.section __TEXT,__gcc_except_tab
.p2align 2
GCC_except_table5:
Lexception0:
.byte 255
.byte 155
.asciz "\320"
.byte 3
.byte 78
Lset0 = Ltmp22-Lfunc_begin0
.long Lset0
Lset1 = Ltmp23-Ltmp22
.long Lset1
Lset2 = Ltmp24-Lfunc_begin0
.long Lset2
.byte 0
Lset3 = Ltmp29-Lfunc_begin0
.long Lset3
Lset4 = Ltmp30-Ltmp29
.long Lset4
Lset5 = Ltmp31-Lfunc_begin0
.long Lset5
.byte 0
Lset6 = Ltmp32-Lfunc_begin0
.long Lset6
Lset7 = Ltmp33-Ltmp32
.long Lset7
Lset8 = Ltmp36-Lfunc_begin0
.long Lset8
.byte 0
Lset9 = Ltmp33-Lfunc_begin0
.long Lset9
Lset10 = Ltmp34-Ltmp33
.long Lset10
.long 0
.byte 0
Lset11 = Ltmp34-Lfunc_begin0
.long Lset11
Lset12 = Ltmp26-Ltmp34
.long Lset12
Lset13 = Ltmp36-Lfunc_begin0
.long Lset13
.byte 0
Lset14 = Ltmp26-Lfunc_begin0
.long Lset14
Lset15 = Lfunc_end0-Ltmp26
.long Lset15
.long 0
.byte 0
.p2align 2
.section __TEXT,__text,regular,pure_instructions
.globl _main
.p2align 4, 0x90
_main:
.cfi_startproc
pushq %rbp
Ltmp45:
.cfi_def_cfa_offset 16
Ltmp46:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp47:
.cfi_def_cfa_register %rbp
movq %rsi, %rax
movq %rdi, %rcx
leaq __ZN6intint4main17hfc3e9c6a4a641ae7E(%rip), %rdi
movq %rcx, %rsi
movq %rax, %rdx
popq %rbp
jmp __ZN3std2rt10lang_start17hefd96b70277e8a4aE
.cfi_endproc
.section __TEXT,__const
.p2align 4
_str.0:
.ascii "capacity overflow"
.section __DATA,__const
.p2align 3
_vtable.1:
.quad __ZN4drop17hc03495ab6f5cadc4E
.quad 8
.quad 8
.quad __ZN53_$LT$$RF$$u27$a$u20$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h569a52652ae3d9d1E
.p2align 3
_vtable.2:
.quad __ZN4drop17hc03495ab6f5cadc4E
.quad 8
.quad 8
.quad __ZN53_$LT$$RF$$u27$a$u20$T$u20$as$u20$core..fmt..Debug$GT$3fmt17h1889a8c4238d8202E
.section __TEXT,__const
_str.9:
.byte 0
_str.a:
.byte 10
.section __DATA,__const
.p2align 3
_ref.b:
.quad _str.9
.quad 0
.quad _str.a
.quad 1
.section __TEXT,__const
_str.c:
.byte 32
.section __DATA,__const
.p2align 3
_ref.d:
.quad _str.9
.quad 0
.quad _str.c
.quad 1
.quad _str.a
.quad 1
.subsections_via_symbols
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment