Skip to content

Instantly share code, notes, and snippets.

@Coreforge
Created November 20, 2023 17:01
Show Gist options
  • Save Coreforge/195a75c19061063266de523051d988c4 to your computer and use it in GitHub Desktop.
Save Coreforge/195a75c19061063266de523051d988c4 to your computer and use it in GitHub Desktop.
a very quick and dirty adaptation of the arm32 alignment trap to fix one stp instruction for aarch64 code, for the rpi5. Rather printk-heavy
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index 19713d0f013b..792d0d0473ba 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -72,6 +72,7 @@ void do_sp_pc_abort(unsigned long addr, unsigned long esr, struct pt_regs *regs)
void bad_el0_sync(struct pt_regs *regs, int reason, unsigned long esr);
void do_cp15instr(unsigned long esr, struct pt_regs *regs);
int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs);
+int do_alignment_fixup(unsigned long addr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
void do_el0_fpac(struct pt_regs *regs, unsigned long esr);
diff --git a/arch/arm64/kernel/compat_alignment.c b/arch/arm64/kernel/compat_alignment.c
index deff21bfa680..ec9c25d18316 100644
--- a/arch/arm64/kernel/compat_alignment.c
+++ b/arch/arm64/kernel/compat_alignment.c
@@ -318,7 +318,7 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
int thumb2_32b = 0;
instrptr = instruction_pointer(regs);
-
+ printk("Alignment fixup\n");
if (compat_thumb_mode(regs)) {
__le16 __user *ptr = (__le16 __user *)(instrptr & ~1);
u16 tinstr, tinst2;
@@ -381,3 +381,106 @@ int do_compat_alignment_fixup(unsigned long addr, struct pt_regs *regs)
return 0;
}
+
+// arm64
+
+static int alignment_get_arm64(struct pt_regs *regs, __le64 __user *ip, u32 *inst)
+{
+ __le32 instr = 0;
+ int fault;
+
+ fault = get_user(instr, ip);
+ if (fault)
+ return fault;
+
+ *inst = __le32_to_cpu(instr);
+ return 0;
+}
+
+int ldpstp_offset_fixup(u32 instr, struct pt_regs *regs){
+ uint8_t load = (instr >> 22) & 1;
+ uint8_t simd = (instr >> 26) & 1;
+ uint16_t imm7 = (instr >> 15) & 0x7f;
+ uint8_t Rt2 = (instr >> 10) & 0x1f;
+ uint8_t Rn = (instr >> 5) & 0x1f;
+ uint8_t Rt = instr & 0x1f;
+
+ int16_t imm = 0xffff & imm7;
+ printk("Variant: 0x%x Load: %x SIMD: %x IMM: 0x%x Rt: 0x%x Rt2: 0x%x Rn: 0x%x\n", ((instr >> 30) & 3),load, simd, imm, Rt, Rt2, Rn);
+ if(((instr >> 30) & 3) == 2){
+ // 64bit
+ if(!load){
+ if(!simd){
+ // 64bit store
+ u64 val1, val2;
+ val1 = regs->regs[Rt];
+ val2 = regs->regs[Rt2];
+ u64 addr = regs->regs[Rn] + imm;
+ printk("STP 64bit storing 0x%llx 0x%llx at 0x%llx\n", val1, val2, addr);
+ // for the first reg. Byte by byte to avoid any alignment issues
+ for(int i = 0; i < 8; i++){
+ uint8_t v = (val1 >> (i*8)) & 0xff;
+ put_user(v, (uint8_t __user *)addr);
+ addr++;
+ }
+ // second reg
+ for(int i = 0; i < 8; i++){
+ uint8_t v = (val2 >> (i*8)) & 0xff;
+ put_user(v, (uint8_t __user *)addr);
+ addr++;
+ }
+ arm64_skip_faulting_instruction(regs, 4);
+ }
+ }
+ }
+ return 0;
+}
+
+int ls_fixup(u32 instr, struct pt_regs *regs){
+ uint8_t op0;
+ uint8_t op1;
+ uint8_t op2;
+ uint8_t op3;
+ uint8_t op4;
+
+ op0 = (instr >> 28) & 0xf;
+ op1 = (instr >> 26) & 1;
+ op2 = (instr >> 23) & 3;
+ op3 = (instr >> 16) & 0x3f;
+ op4 = (instr >> 10) & 3;
+ printk("Load/Store: op0 0x%x op1 0x%x op2 0x%x op3 0x%x op4 0x%x\n", op0, op1, op2, op3, op4);
+ if((op0 & 3) == 2 && (op2 == 2)){
+ // Load/store pair offset
+ ldpstp_offset_fixup(instr, regs);
+ }
+ return 0;
+}
+
+int do_alignment_fixup(unsigned long addr, struct pt_regs *regs){
+ unsigned long long instrptr;
+ u32 instr = 0;
+
+ instrptr = instruction_pointer(regs);
+ printk("Alignment fixup\n");
+
+ if (alignment_get_arm64(regs, (__le64 __user *)instrptr, &instr)){
+ printk("Failed to get aarch64 instruction\n");
+ return 1;
+ }
+ printk("Faulting instruction: 0x%lx\n", instr);
+ /**
+ * List of seen faults: 020c00a9 (0xa9000c02) stp x2, x3, [x0]
+ *
+ */
+
+ uint8_t op0;
+
+ op0 = ((instr & 0x1E000000) >> 25);
+ if((op0 & 5) == 0x4){
+ printk("Load/Store\n");
+ ls_fixup(instr, regs);
+ } else {
+ printk("Not handling instruction with op0 0x%x ",op0);
+ }
+ return 0;
+}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 6b6b8a82f294..bdbc4df8753e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -25,6 +25,7 @@
#include <linux/perf_event.h>
#include <linux/preempt.h>
#include <linux/hugetlb.h>
+#include <linux/nmi.h>
#include <asm/acpi.h>
#include <asm/bug.h>
@@ -631,6 +632,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
* We had some memory, but were unable to successfully fix up
* this page fault.
*/
+ printk("Page fault bus error\n");
arm64_force_sig_fault(SIGBUS, BUS_ADRERR, far, inf->name);
} else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) {
unsigned int lsb;
@@ -673,9 +675,16 @@ static int __kprobes do_translation_fault(unsigned long far,
static int do_alignment_fault(unsigned long far, unsigned long esr,
struct pt_regs *regs)
{
+ printk("Alignment fault: fixup enabled?: %d, user mode: %d pstate: 0x%llx\n", IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS), compat_user_mode(regs), regs->pstate);
+ trigger_all_cpu_backtrace();
if (IS_ENABLED(CONFIG_COMPAT_ALIGNMENT_FIXUPS) &&
compat_user_mode(regs))
return do_compat_alignment_fixup(far, regs);
+
+ if(user_mode(regs)){
+ // aarch64 user mode
+ return do_alignment_fixup(far, regs);
+ }
do_bad_area(far, esr, regs);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment