Skip to content

Instantly share code, notes, and snippets.

View stffrdhrn's full-sized avatar
🌴
Working on it

Stafford Horne stffrdhrn

🌴
Working on it
View GitHub Profile

Cannot even run a hello world newlib elf binary!

Last instructions before failing

...
S 000063b4: d6519dd4 l.sw    -27180(r17),r19 [000095d4] = 00008800  flag: 1
S 000063b8: 1a200001 l.movhi r17,0x1         r17        = 00010000  flag: 1
S 000063bc: d6519dd0 l.sw    -27184(r17),r19 [000095d0] = 00008800  flag: 1
S 000063c0: 85c10000 l.lwz   r14,0x0(r1)     r14        = 00000000  flag: 1
@stffrdhrn
stffrdhrn / wireguard-ci-lockup.log
Last active June 23, 2022 21:22
running wireguard on or1k
...
[+] NS1: ping -W 1 -c 1 192.168.241.2
PING 192.168.241.2 (192.168.241.2) 56(84) bytes of data.
64 bytes from 192.168.241.2: icmp_seq=1 ttl=64 time=1.84 ms
--- 192.168.241.2 ping statistics ---
1 packets transmitted, 1 received, 0% packet loss, time 0ms
rtt min/avg/max/mdev = 1.843/1.843/1.843/0.000 ms
[+] NS2: wg show wg0 endpoints
[+] NS1: ip route add 10.0.0.0/24 dev veth3 src 10.0.0.3 metric 1
...
[ 68.384000] IPI: src_cpu: 3 send ipi_msg 2 to cpu: 1 (to cpu 1)
[ 68.384000] IPI: cpu: 0 handling IPI 2
[ 68.384000] IPI: src_cpu: 3 send ipi_msg 2 to cpu: 2 (to cpu 2)
[ 68.384000] IPI: cpu: 1 handling IPI 2 (cpu 1 gets it)
[ 68.384000] IPI: cpu: 2 handling IPI 2 (cpu 2 gets it) ... you get the point
[ 68.384000] IPI: src_cpu: 3 send ipi_msg 3 to cpu: 0
[ 68.384000] IPI: cpu: 0 handling IPI 3
[ 68.384000] IPI: src_cpu: 3 send ipi_msg 3 to cpu: 0 (to cpu 0)
[ 68.384000] IPI: src_cpu: 0 send ipi_msg 2 to cpu: 1 --- cpu 0 sends some things, but no more receives... --
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index 4a96a73a9804..146fb73f3b37 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -252,6 +252,7 @@ CHOST := or1k-linux-musl
QEMU_ARCH := or1k
KERNEL_ARCH := openrisc
KERNEL_BZIMAGE := $(KERNEL_BUILD_PATH)/vmlinux
+QEMU_VPORT_RESULT := virtio-serial-device
ifeq ($(HOST_ARCH),$(ARCH))
diff --git a/arch/openrisc/include/asm/cacheflush.h b/arch/openrisc/include/asm/cacheflush.h
index 19916b605b07..eeac40d4a854 100644
--- a/arch/openrisc/include/asm/cacheflush.h
+++ b/arch/openrisc/include/asm/cacheflush.h
@@ -44,11 +44,9 @@ extern void smp_icache_page_inv(struct page *page);
*/
static inline void sync_icache_dcache(struct page *page)
{
- preempt_disable();
if (!IS_ENABLED(CONFIG_DCACHE_WRITETHROUGH))

Who Cores are waiting at: csd_lock_wait

(gdb) info threads
  Id   Target Id                    Frame
  1    Thread 1.1 (CPU#0 [halted ]) 0xc00053c4 in arch_cpu_idle () at /home/shorne/work/linux/arch/openrisc/kernel/process.c:105
  2    Thread 1.2 (CPU#1 [running]) csd_lock_wait (csd=0xcfeb3d60) at /home/shorne/work/linux/kernel/smp.c:443
  3    Thread 1.3 (CPU#2 [halted ]) 0xc00053c4 in arch_cpu_idle () at /home/shorne/work/linux/arch/openrisc/kernel/process.c:105
* 4    Thread 1.4 (CPU#3 [running]) csd_lock_wait (csd=0xcfeb3d80) at /home/shorne/work/linux/kernel/smp.c:443
< shorne@antec ~/work/linux/tools/testing/selftests/wireguard/qemu > ARCH=or1k make
Building for or1k-linux-musl using x86_64-redhat-linux
mkdir -p /home/shorne/work/linux/tools/testing/selftests/wireguard/qemu/build/or1k
or1k-linux-musl-gcc -o /home/shorne/work/linux/tools/testing/selftests/wireguard/qemu/build/or1k/init -O3 -pipe -std=gnu11 init.c
/home/shorne/work/linux/tools/testing/selftests/wireguard/qemu/build/or1k/or1k-linux-musl-cross/bin/../lib/gcc/or1k-linux-musl/11.2.1/../../../../or1k-linux-musl/bin/ld: cannot find Scrt1.o: No such file or directory
/home/shorne/work/linux/tools/testing/selftests/wireguard/qemu/build/or1k/or1k-linux-musl-cross/bin/../lib/gcc/or1k-linux-musl/11.2.1/../../../../or1k-linux-musl/bin/ld: cannot find crti.o: No such file or directory
/home/shorne/work/linux/tools/testing/selftests/wireguard/qemu/build/or1k/or1k-linux-musl-cross/bin/../lib/gcc/or1k-linux-musl/11.2.1/../../../../or1k-linux-musl/bin/ld: cannot find crtbeginS.o: No such file or directory
/home/shorne/wor
@stffrdhrn
stffrdhrn / or1k-random-lockup.md
Created June 14, 2022 21:37
Running or1k virt SMP we get lockups with running tcg=single with more than 2 cores

Running:

qemu-system-or1k -cpu or1200 -M virt -no-reboot -kernel /home/shorne/work/linux/vmlinux \
 -initrd /home/shorne/work/linux/initramfs.cpio.gz \
 -device virtio-net-device,netdev=user -netdev user,id=user,net=10.9.0.1/24,host=10.9.0.100 \
 -serial mon:stdio -nographic \
 -device virtio-blk-device,drive=d0 -drive file=/home/shorne/work/linux/virt.qcow2,id=d0,if=none,format=qcow2 \
 -gdb tcp::10001 \
 -m 128 -accel tcg,thread=single \
 -smp cpus=3 \
diff --git a/target/openrisc/cpu.c b/target/openrisc/cpu.c
index dfbafc5236..41d1b2a24a 100644
--- a/target/openrisc/cpu.c
+++ b/target/openrisc/cpu.c
@@ -21,6 +21,7 @@
#include "qapi/error.h"
#include "qemu/qemu-print.h"
#include "cpu.h"
+#include "exec/exec-all.h"
Page bounadries
< shorne@antec ~/work/linux > ./addr2page.py c01e365c
c01e0000
c01e2000 -
c01e4000 <--- in a delay slot
c01e365c <fe_mul_impl>:
c01e365c: 9c 21 ff 58 l.addi r1,r1,-168