Skip to content

Instantly share code, notes, and snippets.

@thejh
Created August 24, 2018 23:45
Show Gist options
  • Save thejh/c91f9b4e3cc4c58659bb3cd056c4fa40 to your computer and use it in GitHub Desktop.
Save thejh/c91f9b4e3cc4c58659bb3cd056c4fa40 to your computer and use it in GitHub Desktop.
overzealous kasan stack fixup patch (completely untested)
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 957dfb693ecc..251ed2ca3f04 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1673,9 +1673,26 @@ ENTRY(rewind_stack_do_exit)
/* Prevent any naive code from trying to unwind to our caller. */
xorl %ebp, %ebp
+ movq %rdi, %r14
+
movq PER_CPU_VAR(cpu_current_top_of_stack), %rax
- leaq -PTREGS_SIZE(%rax), %rsp
+ leaq -PTREGS_SIZE(%rax), %r15
+
+#ifdef CONFIG_KASAN
+ /*
+ * Remove stack poisons left behind by our old stack.
+ * Do this before updating RSP to avoid problems in case we get some
+ * interrupt that is not handled on an interrupt stack before we're done
+ * with the unpoisoning.
+ */
+ movq %r15, %rdi
+ call kasan_unpoison_task_stack_below
+#endif
+
+ movq %rsp, %rsi
+ movq %r15, %rsp
UNWIND_HINT_FUNC sp_offset=PTREGS_SIZE
- call do_exit
+ movq %r14, %rdi
+ call do_exit_rewound
END(rewind_stack_do_exit)
diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
index 9c8652974f8e..ada51c4678e8 100644
--- a/arch/x86/kernel/dumpstack.c
+++ b/arch/x86/kernel/dumpstack.c
@@ -317,6 +317,46 @@ NOKPROBE_SYMBOL(oops_begin);
void __noreturn rewind_stack_do_exit(int signr);
+void do_exit_rewound(int signr, unsigned long old_rsp) {
+#ifdef CONFIG_KASAN
+ /*
+ * We might be coming from an exception stack. In that case, we just
+ * jumped out of an interrupt stack, we have preemption disabled (from
+ * ist_enter()), and EFLAGS.IF is off.
+ * rewind_stack_do_exit() has unpoisoned the current stack at this
+ * point, but the exception stack might still have poison on it.
+ * Since do_exit() will schedule() at some point, we have to scrub the
+ * KASAN shadows for our exception stacks now.
+ */
+ pr_warn("DEBUG: entering do_exit_rewound\n");
+ if (!preemptible()) {
+ pr_warn("DEBUG: do_exit_rewound: not preemptible\n");
+ int cpu = smp_processor_id();
+ struct cpu_entry_area *ea = get_cpu_entry_area(cpu);
+ unsigned long es_start = (unsigned long)ea->exception_stacks;
+ unsigned long es_end = es_start + sizeof(ea->exception_stacks);
+ pr_warn("DEBUG: do_exit_rewound: old_rsp=0x%lx es_start=0x%lx es_end=0x%lx\n",
+ old_rsp, es_start, es_end);
+ if (old_rsp >= es_start && old_rsp < es_end) {
+ pr_warn("DEBUG: do_exit_rewound: WIPING\n");
+ /*
+ * Alright, just wipe all the exception stacks
+ * associated with the current CPU.
+ * Note that we're over-wiping here, and an interrupt
+ * could execute on one of the interrupt stacks while
+ * we're in the middle of zeroing its KASAN shadow; but
+ * that's fine.
+ */
+ kasan_unpoison_shadow(ea->exception_stacks,
+ sizeof(ea->exception_stacks));
+ }
+ }
+#endif
+
+ do_exit();
+}
+NOKPROBE_SYMBOL(do_exit_rewound);
+
void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
{
if (regs && kexec_should_crash(current))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment