Skip to content

Instantly share code, notes, and snippets.

@heatd
Created March 6, 2023 17:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save heatd/76fa59893a76cc38a60962a9c67cf4b2 to your computer and use it in GitHub Desktop.
Save heatd/76fa59893a76cc38a60962a9c67cf4b2 to your computer and use it in GitHub Desktop.
(gdb) disassemble /s module_add
Dump of assembler code for function _Z10module_addP6module:
include/onyx/preempt.h:
57 inc_per_cpu(preemption_counter);
0xffffffff81013130 <+0>: push %rbp
0xffffffff81013131 <+1>: mov %rsp,%rbp
0xffffffff81013134 <+4>: push %rbx
0xffffffff81013135 <+5>: mov %rdi,%rbx
0xffffffff81013138 <+8>: sub $0x8,%rsp
0xffffffff8101313c <+12>: incq %gs:0xb8
include/onyx/spinlock.h:
89 __spin_lock(lock);
0xffffffff81013145 <+21>: mov $0xffffffff81139470,%rdi
0xffffffff8101314c <+28>: call 0xffffffff8101d6e0 <__spin_lock(spinlock*)>
kernel/modules.cpp:
57 if (!module_list)
0xffffffff81013151 <+33>: cmpq $0x0,0x12630f(%rip) # 0xffffffff81139468 <_ZL11module_list>
0xffffffff81013159 <+41>: je 0xffffffff810131b0 <_Z10module_addP6module+128>
60 }
61 else
62 {
63 tail->next = mod;
0xffffffff8101315b <+43>: mov 0x1262fe(%rip),%rax # 0xffffffff81139460 <_ZL4tail>
65 tail = mod;
0xffffffff81013162 <+50>: mov %rbx,0x1262f7(%rip) # 0xffffffff81139460 <_ZL4tail>
63 tail->next = mod;
0xffffffff81013169 <+57>: mov %rbx,0x58(%rax)
64 mod->prev = tail;
0xffffffff8101316d <+61>: mov %rax,0x50(%rbx)
include/onyx/preempt.h:
34 dec_per_cpu(preemption_counter);
0xffffffff81013171 <+65>: decq %gs:0xb8
35 }
36
37 __always_inline void sched_enable_preempt_no_softirq()
38 {
39 __sched_enable_preempt();
40 }
41
42 __always_inline void sched_enable_preempt()
43 {
44 __sched_enable_preempt();
45 unsigned long counter = get_per_cpu(preemption_counter);
0xffffffff8101317a <+74>: mov %gs:0xb8,%rax
46
47 // If preemption is enabled, try to do various tasks
48 // softirq, rescheduling, etc
49 if (counter == 0 && !irq_is_disabled()) [[unlikely]]
0xffffffff81013183 <+83>: test %rax,%rax
0xffffffff81013186 <+86>: je 0xffffffff81013198 <_Z10module_addP6module+104>
kernel/modules.cpp:
70 }
0xffffffff81013188 <+88>: mov -0x8(%rbp),%rbx
0xffffffff8101318c <+92>: leave
0xffffffff8101318d <+93>: jmp 0xffffffff81088080 <__x86_return_thunk>
0xffffffff81013192 <+98>: nopw 0x0(%rax,%rax,1)
include/onyx/x86/include/platform/irq.h:
34 __asm__ __volatile__("pushf; pop %0" : "=rm"(flags)::"memory");
0xffffffff81013198 <+104>: pushf
0xffffffff81013199 <+105>: pop %rax
35 return flags;
0xffffffff8101319a <+106>: test $0x2,%ah
0xffffffff8101319d <+109>: je 0xffffffff81013188 <_Z10module_addP6module+88>
0xffffffff8101319f <+111>: mov -0x8(%rbp),%rbx
0xffffffff810131a3 <+115>: mov $0x1,%edi
0xffffffff810131a8 <+120>: leave
0xffffffff810131a9 <+121>: jmp 0xffffffff8107b310 <_Z20sched_handle_preemptb>
0xffffffff810131ae <+126>: xchg %ax,%ax
0xffffffff810131b0 <+128>: mov %rbx,0x1262a9(%rip) # 0xffffffff81139460 <_ZL4tail>
0xffffffff810131b7 <+135>: mov %rbx,0x1262aa(%rip) # 0xffffffff81139468 <_ZL11module_list>
0xffffffff810131be <+142>: jmp 0xffffffff81013171 <_Z10module_addP6module+65>
Dump of assembler code for function __spin_lock(spinlock*):
include/onyx/smp.h:
78 __attribute__((always_inline)) static inline unsigned int get_cpu_nr()
79 {
80 return get_per_cpu(cpu_nr);
0xffffffff8101d6e0 <+0>: mov %gs:0x68,%esi
0xffffffff8101d6e8 <+8>: xor %eax,%eax
kernel/spinlock.cpp:
59 raw_spinlock_t what_to_insert = get_cpu_nr() + 1;
0xffffffff8101d6ea <+10>: add $0x1,%esi
34 return __atomic_compare_exchange_n(&lock->lock, &expected_val, cpu_nr_plus_one, false,
0xffffffff8101d6ed <+13>: lock cmpxchg %esi,(%rdi)
60 if (!spin_lock_fast_path(lock, what_to_insert)) [[unlikely]]
0xffffffff8101d6f1 <+17>: jne 0xffffffff8101d6f8 <__spin_lock(spinlock*)+24>
62
63 post_lock_actions(lock);
64 }
0xffffffff8101d6f3 <+19>: jmp 0xffffffff81088080 <__x86_return_thunk>
61 spin_lock_slow_path(lock, what_to_insert);
0xffffffff8101d6f8 <+24>: jmp 0xffffffff8101d6c0 <_Z19spin_lock_slow_pathP8spinlockj>
Dump of assembler code for function __spin_unlock(spinlock*):
kernel/spinlock.cpp:
67 {
68 #ifdef CONFIG_SPINLOCK_DEBUG
69 assert(lock->lock > 0);
70 #endif
71
72 post_release_actions(lock);
73
74 __atomic_store_n(&lock->lock, 0, __ATOMIC_RELEASE);
0xffffffff8101d700 <+0>: movl $0x0,(%rdi)
75 }
0xffffffff8101d706 <+6>: jmp 0xffffffff81088080 <__x86_return_thunk>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment