Skip to content

Instantly share code, notes, and snippets.

@RyuKojiro
Created September 4, 2016 07:16
Show Gist options
  • Save RyuKojiro/0b5a71b7ecd73d728050220c048224bf to your computer and use it in GitHub Desktop.
Save RyuKojiro/0b5a71b7ecd73d728050220c048224bf to your computer and use it in GitHub Desktop.
.section __TEXT,__text,regular,pure_instructions
.macosx_version_min 10, 11
.globl _base64_byte
.align 4, 0x90
_base64_byte: ## @base64_byte
.cfi_startproc
## BB#0:
pushq %rbp
Ltmp0:
.cfi_def_cfa_offset 16
Ltmp1:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp2:
.cfi_def_cfa_register %rbp
movb %dil, %al
movl $63, %edi
movl $62, %ecx
movl $52, %edx
movl $26, %esi
movl $64, %r8d
movb %al, -1(%rbp)
movzbl -1(%rbp), %eax
movl %edx, -8(%rbp) ## 4-byte Spill
cltd
idivl %r8d
movb %dl, %r9b
movb %r9b, -1(%rbp)
movzbl -1(%rbp), %edx
addl $65, %edx
movzbl -1(%rbp), %r8d
movl %r8d, %eax
movl %edx, -12(%rbp) ## 4-byte Spill
cltd
idivl %esi
imull $6, %eax, %eax
movl -12(%rbp), %esi ## 4-byte Reload
addl %eax, %esi
movzbl -1(%rbp), %eax
cltd
movl -8(%rbp), %r8d ## 4-byte Reload
idivl %r8d
imull $81, %eax, %eax
subl %eax, %esi
movzbl -1(%rbp), %eax
cltd
idivl %ecx
imull $15, %eax, %eax
subl %eax, %esi
movzbl -1(%rbp), %eax
cltd
idivl %edi
imull $3, %eax, %eax
addl %eax, %esi
movb %sil, %r9b
movzbl %r9b, %eax
popq %rbp
retq
.cfi_endproc
.globl _base64_rebyte
.align 4, 0x90
_base64_rebyte: ## @base64_rebyte
.cfi_startproc
## BB#0:
pushq %rbp
Ltmp3:
.cfi_def_cfa_offset 16
Ltmp4:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp5:
.cfi_def_cfa_register %rbp
movb %dil, %al
movl $97, %edi
movl $65, %ecx
movl $48, %edx
movl $47, %esi
movl $123, %r8d
movb %al, -1(%rbp)
movzbl -1(%rbp), %eax
movl %edx, -8(%rbp) ## 4-byte Spill
cltd
idivl %r8d
movb %dl, %r9b
movb %r9b, -1(%rbp)
movzbl -1(%rbp), %edx
addl $19, %edx
movzbl -1(%rbp), %r8d
movl %r8d, %eax
movl %edx, -12(%rbp) ## 4-byte Spill
cltd
idivl %esi
imull $3, %eax, %eax
movl -12(%rbp), %esi ## 4-byte Reload
subl %eax, %esi
movzbl -1(%rbp), %eax
cltd
movl -8(%rbp), %r8d ## 4-byte Reload
idivl %r8d
imull $12, %eax, %eax
subl %eax, %esi
movzbl -1(%rbp), %eax
cltd
idivl %ecx
imull $69, %eax, %eax
subl %eax, %esi
movzbl -1(%rbp), %eax
cltd
idivl %edi
imull $9, %eax, %eax
addl %eax, %esi
movb %sil, %r9b
movzbl %r9b, %eax
popq %rbp
retq
.cfi_endproc
.globl _base64_xencode
.align 4, 0x90
_base64_xencode: ## @base64_xencode
.cfi_startproc
## BB#0:
pushq %rbp
Ltmp6:
.cfi_def_cfa_offset 16
Ltmp7:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp8:
.cfi_def_cfa_register %rbp
subq $64, %rsp
movl $2, %eax
movl $3, %ecx
movl $4, %edx
movl %edi, -4(%rbp)
movl %esi, -8(%rbp)
movl -8(%rbp), %esi
movl %eax, -24(%rbp) ## 4-byte Spill
movl %esi, %eax
movl %edx, -28(%rbp) ## 4-byte Spill
cltd
movl -28(%rbp), %esi ## 4-byte Reload
idivl %esi
movl %edx, -8(%rbp)
movl -4(%rbp), %edx
andl $16777215, %edx ## imm = 0xFFFFFF
movl %edx, -4(%rbp)
movl -4(%rbp), %edx
andl $255, %edx
movl %edx, -12(%rbp)
movl -4(%rbp), %edx
shrl $8, %edx
andl $255, %edx
movl %edx, -16(%rbp)
movl -4(%rbp), %edx
shrl $8, %edx
shrl $8, %edx
movl %edx, -20(%rbp)
subl -8(%rbp), %esi
movl %esi, %eax
cltd
movl -24(%rbp), %esi ## 4-byte Reload
idivl %esi
imull $1023410176, %eax, %eax ## imm = 0x3D000000
subl -8(%rbp), %ecx
movl %eax, -32(%rbp) ## 4-byte Spill
movl %ecx, %eax
cltd
idivl %esi
imull $3997696, %eax, %eax ## imm = 0x3D0000
movl -32(%rbp), %ecx ## 4-byte Reload
addl %eax, %ecx
movl -12(%rbp), %eax
shrl $2, %eax
movb %al, %r8b
movzbl %r8b, %edi
movl %ecx, -36(%rbp) ## 4-byte Spill
callq _base64_byte
movzbl %al, %ecx
movl -36(%rbp), %esi ## 4-byte Reload
addl %ecx, %esi
movl -12(%rbp), %ecx
andl $3, %ecx
shll $4, %ecx
movl -16(%rbp), %edi
shrl $4, %edi
addl %edi, %ecx
movb %cl, %al
movzbl %al, %edi
movl %esi, -40(%rbp) ## 4-byte Spill
callq _base64_byte
movl $2, %ecx
movzbl %al, %esi
shll $8, %esi
movl -40(%rbp), %edi ## 4-byte Reload
addl %esi, %edi
movl -8(%rbp), %eax
cltd
idivl %ecx
movl -16(%rbp), %ecx
andl $15, %ecx
shll $2, %ecx
movl -20(%rbp), %esi
shrl $6, %esi
addl %esi, %ecx
movb %cl, %r8b
movzbl %r8b, %ecx
movl %edi, -44(%rbp) ## 4-byte Spill
movl %ecx, %edi
movl %eax, -48(%rbp) ## 4-byte Spill
callq _base64_byte
movl $3, %ecx
movzbl %al, %esi
shll $16, %esi
movl -48(%rbp), %edi ## 4-byte Reload
imull %esi, %edi
movl -44(%rbp), %esi ## 4-byte Reload
addl %edi, %esi
movl -8(%rbp), %eax
cltd
idivl %ecx
movl -20(%rbp), %ecx
andl $63, %ecx
movb %cl, %r8b
movzbl %r8b, %edi
movl %esi, -52(%rbp) ## 4-byte Spill
movl %eax, -56(%rbp) ## 4-byte Spill
callq _base64_byte
movzbl %al, %ecx
shll $24, %ecx
movl -56(%rbp), %esi ## 4-byte Reload
imull %ecx, %esi
movl -52(%rbp), %ecx ## 4-byte Reload
addl %esi, %ecx
movl %ecx, %eax
addq $64, %rsp
popq %rbp
retq
.cfi_endproc
.globl _base64_encode
.align 4, 0x90
_base64_encode: ## @base64_encode
.cfi_startproc
## BB#0:
pushq %rbp
Ltmp9:
.cfi_def_cfa_offset 16
Ltmp10:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp11:
.cfi_def_cfa_register %rbp
subq $96, %rsp
movq %rdi, -16(%rbp)
movq %rsi, -24(%rbp)
movq %rdx, -32(%rbp)
movq %rcx, -40(%rbp)
cmpq $0, -16(%rbp)
je LBB3_3
## BB#1:
cmpq $0, -32(%rbp)
je LBB3_3
## BB#2:
cmpq $0, -24(%rbp)
jne LBB3_4
LBB3_3:
movq $0, -8(%rbp)
jmp LBB3_27
LBB3_4:
movq -32(%rbp), %rax
cmpq $0, (%rax)
jne LBB3_8
## BB#5:
movl $1, %eax
movl %eax, %edi
movl $3, %eax
movl %eax, %ecx
movq -24(%rbp), %rax
xorl %edx, %edx
## kill: RDX<def> EDX<kill>
divq %rcx
addq $1, %rax
shlq $2, %rax
addq $1, %rax
movq %rax, -40(%rbp)
movq -40(%rbp), %rsi
callq _calloc
movq -32(%rbp), %rcx
movq %rax, (%rcx)
movq -32(%rbp), %rax
cmpq $0, (%rax)
jne LBB3_7
## BB#6:
movq $0, -8(%rbp)
jmp LBB3_27
LBB3_7:
jmp LBB3_8
LBB3_8:
jmp LBB3_9
LBB3_9:
movq $0, -48(%rbp)
movq $0, -56(%rbp)
LBB3_10: ## =>This Loop Header: Depth=1
## Child Loop BB3_17 Depth 2
## Child Loop BB3_20 Depth 2
xorl %eax, %eax
movb %al, %cl
movq -48(%rbp), %rdx
cmpq -24(%rbp), %rdx
movb %cl, -77(%rbp) ## 1-byte Spill
jae LBB3_12
## BB#11: ## in Loop: Header=BB3_10 Depth=1
movq -56(%rbp), %rax
addq $4, %rax
cmpq -40(%rbp), %rax
setb %cl
movb %cl, -77(%rbp) ## 1-byte Spill
LBB3_12: ## in Loop: Header=BB3_10 Depth=1
movb -77(%rbp), %al ## 1-byte Reload
testb $1, %al
jne LBB3_13
jmp LBB3_24
LBB3_13: ## in Loop: Header=BB3_10 Depth=1
movl $3, %eax
movl %eax, %ecx
movq -24(%rbp), %rdx
subq -48(%rbp), %rdx
movq %rdx, %rax
xorl %esi, %esi
movl %esi, %edx
divq %rcx
movq %rax, -64(%rbp)
cmpq $0, -64(%rbp)
je LBB3_15
## BB#14: ## in Loop: Header=BB3_10 Depth=1
movl $3, %eax
movl %eax, %ecx
movq %rcx, -88(%rbp) ## 8-byte Spill
jmp LBB3_16
LBB3_15: ## in Loop: Header=BB3_10 Depth=1
movl $3, %eax
movl %eax, %ecx
movq -24(%rbp), %rdx
subq -48(%rbp), %rdx
movq %rdx, %rax
xorl %esi, %esi
movl %esi, %edx
divq %rcx
movq %rdx, -88(%rbp) ## 8-byte Spill
LBB3_16: ## in Loop: Header=BB3_10 Depth=1
movq -88(%rbp), %rax ## 8-byte Reload
movq %rax, -64(%rbp)
movl $0, -76(%rbp)
movq -64(%rbp), %rax
movq %rax, -72(%rbp)
LBB3_17: ## Parent Loop BB3_10 Depth=1
## => This Inner Loop Header: Depth=2
movq -72(%rbp), %rax
movq %rax, %rcx
addq $-1, %rcx
movq %rcx, -72(%rbp)
cmpq $0, %rax
je LBB3_19
## BB#18: ## in Loop: Header=BB3_17 Depth=2
movl -76(%rbp), %eax
shll $8, %eax
movq -48(%rbp), %rcx
addq -72(%rbp), %rcx
movq -16(%rbp), %rdx
movsbl (%rdx,%rcx), %esi
addl %esi, %eax
movl %eax, -76(%rbp)
jmp LBB3_17
LBB3_19: ## in Loop: Header=BB3_10 Depth=1
movq -64(%rbp), %rax
addq -48(%rbp), %rax
movq %rax, -48(%rbp)
movl -76(%rbp), %edi
movq -64(%rbp), %rax
movl %eax, %ecx
movl %ecx, %esi
callq _base64_xencode
movl %eax, -76(%rbp)
LBB3_20: ## Parent Loop BB3_10 Depth=1
## => This Inner Loop Header: Depth=2
cmpl $0, -76(%rbp)
jbe LBB3_23
## BB#21: ## in Loop: Header=BB3_20 Depth=2
movl -76(%rbp), %eax
andl $255, %eax
movb %al, %cl
movq -56(%rbp), %rdx
movq %rdx, %rsi
addq $1, %rsi
movq %rsi, -56(%rbp)
movq -32(%rbp), %rsi
movq (%rsi), %rsi
movb %cl, (%rsi,%rdx)
## BB#22: ## in Loop: Header=BB3_20 Depth=2
movl -76(%rbp), %eax
shrl $8, %eax
movl %eax, -76(%rbp)
jmp LBB3_20
LBB3_23: ## in Loop: Header=BB3_10 Depth=1
jmp LBB3_10
LBB3_24:
movq -56(%rbp), %rax
cmpq -40(%rbp), %rax
jne LBB3_26
## BB#25:
movq -40(%rbp), %rax
subq $1, %rax
movq %rax, -56(%rbp)
LBB3_26:
movq -56(%rbp), %rax
movq %rax, %rcx
addq $1, %rcx
movq %rcx, -56(%rbp)
movq -32(%rbp), %rcx
movq $0, (%rcx,%rax,8)
movq -56(%rbp), %rax
movq %rax, -8(%rbp)
LBB3_27:
movq -8(%rbp), %rax
addq $96, %rsp
popq %rbp
retq
.cfi_endproc
.globl _main
.align 4, 0x90
_main: ## @main
.cfi_startproc
## BB#0:
pushq %rbp
Ltmp12:
.cfi_def_cfa_offset 16
Ltmp13:
.cfi_offset %rbp, -16
movq %rsp, %rbp
Ltmp14:
.cfi_def_cfa_register %rbp
subq $64, %rsp
movl $0, -4(%rbp)
movl %edi, -8(%rbp)
movq %rsi, -16(%rbp)
movq $0, -24(%rbp)
movl $1, -28(%rbp)
LBB4_1: ## =>This Inner Loop Header: Depth=1
movl -28(%rbp), %eax
cmpl -8(%rbp), %eax
jge LBB4_6
## BB#2: ## in Loop: Header=BB4_1 Depth=1
leaq -24(%rbp), %rdx
xorl %eax, %eax
movl %eax, %ecx
movslq -28(%rbp), %rsi
movq -16(%rbp), %rdi
movq (%rdi,%rsi,8), %rdi
movslq -28(%rbp), %rsi
movq -16(%rbp), %r8
movq (%r8,%rsi,8), %rsi
movq %rdi, -40(%rbp) ## 8-byte Spill
movq %rsi, %rdi
movq %rdx, -48(%rbp) ## 8-byte Spill
movq %rcx, -56(%rbp) ## 8-byte Spill
callq _strlen
movq -40(%rbp), %rdi ## 8-byte Reload
movq %rax, %rsi
movq -48(%rbp), %rdx ## 8-byte Reload
movq -56(%rbp), %rcx ## 8-byte Reload
callq _base64_encode
movl %eax, %r9d
movl %r9d, -32(%rbp)
cmpl $0, -32(%rbp)
jle LBB4_4
## BB#3: ## in Loop: Header=BB4_1 Depth=1
leaq L_.str(%rip), %rdi
movq -24(%rbp), %rsi
movb $0, %al
callq _printf
movq -24(%rbp), %rdi
movl %eax, -60(%rbp) ## 4-byte Spill
callq _free
movq $0, -24(%rbp)
LBB4_4: ## in Loop: Header=BB4_1 Depth=1
jmp LBB4_5
LBB4_5: ## in Loop: Header=BB4_1 Depth=1
movl -28(%rbp), %eax
addl $1, %eax
movl %eax, -28(%rbp)
jmp LBB4_1
LBB4_6:
cmpl $1, -8(%rbp)
setg %al
xorb $1, %al
andb $1, %al
movzbl %al, %eax
addq $64, %rsp
popq %rbp
retq
.cfi_endproc
.section __TEXT,__cstring,cstring_literals
L_.str: ## @.str
.asciz "%s\n"
.subsections_via_symbols
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment