Skip to content

Instantly share code, notes, and snippets.

@xrivendell7
Created December 9, 2023 13:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save xrivendell7/f2a0fba9a55413c64c6153d03cd3df58 to your computer and use it in GitHub Desktop.
Save xrivendell7/f2a0fba9a55413c64c6153d03cd3df58 to your computer and use it in GitHub Desktop.
==================================================================
BUG: KASAN: wild-memory-access in instrument_atomic_read include/linux/instrumented.h:68 [inline]
BUG: KASAN: wild-memory-access in _test_bit include/asm-generic/bitops/instrumented-non-atomic.h:141 [inline]
BUG: KASAN: wild-memory-access in __lock_acquire+0xeaa/0x3b10 kernel/locking/lockdep.c:5106
Read of size 8 at addr 1fffffff83471f38 by task kworker/1:3/10424
CPU: 1 PID: 10424 Comm: kworker/1:3 Not tainted 6.6.0-rc6-g1b70ac811a14-dirty #1
Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014
Workqueue: events sco_sock_timeout
Call Trace:
<TASK>
__dump_stack lib/dump_stack.c:88 [inline]
dump_stack_lvl+0xee/0x1e0 lib/dump_stack.c:106
kasan_report+0xda/0x110 mm/kasan/report.c:588
check_region_inline mm/kasan/generic.c:181 [inline]
kasan_check_range+0xef/0x190 mm/kasan/generic.c:187
instrument_atomic_read include/linux/instrumented.h:68 [inline]
_test_bit include/asm-generic/bitops/instrumented-non-atomic.h:141 [inline]
__lock_acquire+0xeaa/0x3b10 kernel/locking/lockdep.c:5106
lock_acquire kernel/locking/lockdep.c:5753 [inline]
lock_acquire+0x1ae/0x520 kernel/locking/lockdep.c:5718
lock_sock_nested+0x3f/0x100 net/core/sock.c:3503
lock_sock include/net/sock.h:1720 [inline]
sco_sock_timeout+0xf9/0x300 net/bluetooth/sco.c:96
process_one_work+0x8ab/0x1730 kernel/workqueue.c:2630
process_scheduled_works kernel/workqueue.c:2703 [inline]
worker_thread+0x931/0x1380 kernel/workqueue.c:2784
kthread+0x2d3/0x3b0 kernel/kthread.c:388
ret_from_fork+0x4e/0x80 arch/x86/kernel/process.c:147
ret_from_fork_asm+0x11/0x20 arch/x86/entry/entry_64.S:304
</TASK>
==================================================================
// autogenerated by syzkaller (https://github.com/google/syzkaller)
#define _GNU_SOURCE
#include <dirent.h>
#include <endian.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/capability.h>
#include <linux/futex.h>
#include <linux/rfkill.h>
#include <pthread.h>
#include <sched.h>
#include <signal.h>
#include <stdarg.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/epoll.h>
#include <sys/ioctl.h>
#include <sys/mount.h>
#include <sys/prctl.h>
#include <sys/resource.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
static void sleep_ms(uint64_t ms) { usleep(ms * 1000); }
static uint64_t current_time_ms(void) {
struct timespec ts;
if (clock_gettime(CLOCK_MONOTONIC, &ts)) exit(1);
return (uint64_t)ts.tv_sec * 1000 + (uint64_t)ts.tv_nsec / 1000000;
}
static void thread_start(void* (*fn)(void*), void* arg) {
pthread_t th;
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, 128 << 10);
int i = 0;
for (; i < 100; i++) {
if (pthread_create(&th, &attr, fn, arg) == 0) {
pthread_attr_destroy(&attr);
return;
}
if (errno == EAGAIN) {
usleep(50);
continue;
}
break;
}
exit(1);
}
typedef struct {
int state;
} event_t;
static void event_init(event_t* ev) { ev->state = 0; }
static void event_reset(event_t* ev) { ev->state = 0; }
static void event_set(event_t* ev) {
if (ev->state) exit(1);
__atomic_store_n(&ev->state, 1, __ATOMIC_RELEASE);
syscall(SYS_futex, &ev->state, FUTEX_WAKE | FUTEX_PRIVATE_FLAG, 1000000);
}
static void event_wait(event_t* ev) {
while (!__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE))
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, 0);
}
static int event_isset(event_t* ev) {
return __atomic_load_n(&ev->state, __ATOMIC_ACQUIRE);
}
static int event_timedwait(event_t* ev, uint64_t timeout) {
uint64_t start = current_time_ms();
uint64_t now = start;
for (;;) {
uint64_t remain = timeout - (now - start);
struct timespec ts;
ts.tv_sec = remain / 1000;
ts.tv_nsec = (remain % 1000) * 1000 * 1000;
syscall(SYS_futex, &ev->state, FUTEX_WAIT | FUTEX_PRIVATE_FLAG, 0, &ts);
if (__atomic_load_n(&ev->state, __ATOMIC_ACQUIRE)) return 1;
now = current_time_ms();
if (now - start > timeout) return 0;
}
}
static bool write_file(const char* file, const char* what, ...) {
char buf[1024];
va_list args;
va_start(args, what);
vsnprintf(buf, sizeof(buf), what, args);
va_end(args);
buf[sizeof(buf) - 1] = 0;
int len = strlen(buf);
int fd = open(file, O_WRONLY | O_CLOEXEC);
if (fd == -1) return false;
if (write(fd, buf, len) != len) {
int err = errno;
close(fd);
errno = err;
return false;
}
close(fd);
return true;
}
const int kInitNetNsFd = 201;
#define MAX_FDS 30
static long syz_init_net_socket(volatile long domain, volatile long type,
volatile long proto) {
int netns = open("/proc/self/ns/net", O_RDONLY);
if (netns == -1) return netns;
if (setns(kInitNetNsFd, 0)) return -1;
int sock = syscall(__NR_socket, domain, type, proto);
int err = errno;
if (setns(netns, 0)) exit(1);
close(netns);
errno = err;
return sock;
}
#define BTPROTO_HCI 1
#define ACL_LINK 1
#define SCAN_PAGE 2
typedef struct {
uint8_t b[6];
} __attribute__((packed)) bdaddr_t;
#define HCI_COMMAND_PKT 1
#define HCI_EVENT_PKT 4
#define HCI_VENDOR_PKT 0xff
struct hci_command_hdr {
uint16_t opcode;
uint8_t plen;
} __attribute__((packed));
struct hci_event_hdr {
uint8_t evt;
uint8_t plen;
} __attribute__((packed));
#define HCI_EV_CONN_COMPLETE 0x03
struct hci_ev_conn_complete {
uint8_t status;
uint16_t handle;
bdaddr_t bdaddr;
uint8_t link_type;
uint8_t encr_mode;
} __attribute__((packed));
#define HCI_EV_CONN_REQUEST 0x04
struct hci_ev_conn_request {
bdaddr_t bdaddr;
uint8_t dev_class[3];
uint8_t link_type;
} __attribute__((packed));
#define HCI_EV_REMOTE_FEATURES 0x0b
struct hci_ev_remote_features {
uint8_t status;
uint16_t handle;
uint8_t features[8];
} __attribute__((packed));
#define HCI_EV_CMD_COMPLETE 0x0e
struct hci_ev_cmd_complete {
uint8_t ncmd;
uint16_t opcode;
} __attribute__((packed));
#define HCI_OP_WRITE_SCAN_ENABLE 0x0c1a
#define HCI_OP_READ_BUFFER_SIZE 0x1005
struct hci_rp_read_buffer_size {
uint8_t status;
uint16_t acl_mtu;
uint8_t sco_mtu;
uint16_t acl_max_pkt;
uint16_t sco_max_pkt;
} __attribute__((packed));
#define HCI_OP_READ_BD_ADDR 0x1009
struct hci_rp_read_bd_addr {
uint8_t status;
bdaddr_t bdaddr;
} __attribute__((packed));
#define HCI_EV_LE_META 0x3e
struct hci_ev_le_meta {
uint8_t subevent;
} __attribute__((packed));
#define HCI_EV_LE_CONN_COMPLETE 0x01
struct hci_ev_le_conn_complete {
uint8_t status;
uint16_t handle;
uint8_t role;
uint8_t bdaddr_type;
bdaddr_t bdaddr;
uint16_t interval;
uint16_t latency;
uint16_t supervision_timeout;
uint8_t clk_accurancy;
} __attribute__((packed));
struct hci_dev_req {
uint16_t dev_id;
uint32_t dev_opt;
};
struct vhci_vendor_pkt_request {
uint8_t type;
uint8_t opcode;
} __attribute__((packed));
struct vhci_pkt {
uint8_t type;
union {
struct {
uint8_t opcode;
uint16_t id;
} __attribute__((packed)) vendor_pkt;
struct hci_command_hdr command_hdr;
};
} __attribute__((packed));
#define HCIDEVUP _IOW('H', 201, int)
#define HCISETSCAN _IOW('H', 221, int)
static int vhci_fd = -1;
static void rfkill_unblock_all() {
int fd = open("/dev/rfkill", O_WRONLY);
if (fd < 0) exit(1);
struct rfkill_event event = {0};
event.idx = 0;
event.type = RFKILL_TYPE_ALL;
event.op = RFKILL_OP_CHANGE_ALL;
event.soft = 0;
event.hard = 0;
if (write(fd, &event, sizeof(event)) < 0) exit(1);
close(fd);
}
static void hci_send_event_packet(int fd, uint8_t evt, void* data,
size_t data_len) {
struct iovec iv[3];
struct hci_event_hdr hdr;
hdr.evt = evt;
hdr.plen = data_len;
uint8_t type = HCI_EVENT_PKT;
iv[0].iov_base = &type;
iv[0].iov_len = sizeof(type);
iv[1].iov_base = &hdr;
iv[1].iov_len = sizeof(hdr);
iv[2].iov_base = data;
iv[2].iov_len = data_len;
if (writev(fd, iv, sizeof(iv) / sizeof(struct iovec)) < 0) exit(1);
}
static void hci_send_event_cmd_complete(int fd, uint16_t opcode, void* data,
size_t data_len) {
struct iovec iv[4];
struct hci_event_hdr hdr;
hdr.evt = HCI_EV_CMD_COMPLETE;
hdr.plen = sizeof(struct hci_ev_cmd_complete) + data_len;
struct hci_ev_cmd_complete evt_hdr;
evt_hdr.ncmd = 1;
evt_hdr.opcode = opcode;
uint8_t type = HCI_EVENT_PKT;
iv[0].iov_base = &type;
iv[0].iov_len = sizeof(type);
iv[1].iov_base = &hdr;
iv[1].iov_len = sizeof(hdr);
iv[2].iov_base = &evt_hdr;
iv[2].iov_len = sizeof(evt_hdr);
iv[3].iov_base = data;
iv[3].iov_len = data_len;
if (writev(fd, iv, sizeof(iv) / sizeof(struct iovec)) < 0) exit(1);
}
static bool process_command_pkt(int fd, char* buf, ssize_t buf_size) {
struct hci_command_hdr* hdr = (struct hci_command_hdr*)buf;
if (buf_size < (ssize_t)sizeof(struct hci_command_hdr) ||
hdr->plen != buf_size - sizeof(struct hci_command_hdr))
exit(1);
switch (hdr->opcode) {
case HCI_OP_WRITE_SCAN_ENABLE: {
uint8_t status = 0;
hci_send_event_cmd_complete(fd, hdr->opcode, &status, sizeof(status));
return true;
}
case HCI_OP_READ_BD_ADDR: {
struct hci_rp_read_bd_addr rp = {0};
rp.status = 0;
memset(&rp.bdaddr, 0xaa, 6);
hci_send_event_cmd_complete(fd, hdr->opcode, &rp, sizeof(rp));
return false;
}
case HCI_OP_READ_BUFFER_SIZE: {
struct hci_rp_read_buffer_size rp = {0};
rp.status = 0;
rp.acl_mtu = 1021;
rp.sco_mtu = 96;
rp.acl_max_pkt = 4;
rp.sco_max_pkt = 6;
hci_send_event_cmd_complete(fd, hdr->opcode, &rp, sizeof(rp));
return false;
}
}
char dummy[0xf9] = {0};
hci_send_event_cmd_complete(fd, hdr->opcode, dummy, sizeof(dummy));
return false;
}
static void* event_thread(void* arg) {
while (1) {
char buf[1024] = {0};
ssize_t buf_size = read(vhci_fd, buf, sizeof(buf));
if (buf_size < 0) exit(1);
if (buf_size > 0 && buf[0] == HCI_COMMAND_PKT) {
if (process_command_pkt(vhci_fd, buf + 1, buf_size - 1)) break;
}
}
return NULL;
}
#define HCI_HANDLE_1 200
#define HCI_HANDLE_2 201
#define HCI_PRIMARY 0
#define HCI_OP_RESET 0x0c03
static void initialize_vhci() {
int hci_sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI);
if (hci_sock < 0) exit(1);
vhci_fd = open("/dev/vhci", O_RDWR);
if (vhci_fd == -1) exit(1);
const int kVhciFd = 202;
if (dup2(vhci_fd, kVhciFd) < 0) exit(1);
close(vhci_fd);
vhci_fd = kVhciFd;
struct vhci_vendor_pkt_request vendor_pkt_req = {HCI_VENDOR_PKT, HCI_PRIMARY};
if (write(vhci_fd, &vendor_pkt_req, sizeof(vendor_pkt_req)) !=
sizeof(vendor_pkt_req))
exit(1);
struct vhci_pkt vhci_pkt;
if (read(vhci_fd, &vhci_pkt, sizeof(vhci_pkt)) != sizeof(vhci_pkt)) exit(1);
if (vhci_pkt.type == HCI_COMMAND_PKT &&
vhci_pkt.command_hdr.opcode == HCI_OP_RESET) {
char response[1] = {0};
hci_send_event_cmd_complete(vhci_fd, HCI_OP_RESET, response,
sizeof(response));
if (read(vhci_fd, &vhci_pkt, sizeof(vhci_pkt)) != sizeof(vhci_pkt)) exit(1);
}
if (vhci_pkt.type != HCI_VENDOR_PKT) exit(1);
int dev_id = vhci_pkt.vendor_pkt.id;
pthread_t th;
if (pthread_create(&th, NULL, event_thread, NULL)) exit(1);
int ret = ioctl(hci_sock, HCIDEVUP, dev_id);
if (ret) {
if (errno == ERFKILL) {
rfkill_unblock_all();
ret = ioctl(hci_sock, HCIDEVUP, dev_id);
}
if (ret && errno != EALREADY) exit(1);
}
struct hci_dev_req dr = {0};
dr.dev_id = dev_id;
dr.dev_opt = SCAN_PAGE;
if (ioctl(hci_sock, HCISETSCAN, &dr)) exit(1);
struct hci_ev_conn_request request;
memset(&request, 0, sizeof(request));
memset(&request.bdaddr, 0xaa, 6);
*(uint8_t*)&request.bdaddr.b[5] = 0x10;
request.link_type = ACL_LINK;
hci_send_event_packet(vhci_fd, HCI_EV_CONN_REQUEST, &request,
sizeof(request));
struct hci_ev_conn_complete complete;
memset(&complete, 0, sizeof(complete));
complete.status = 0;
complete.handle = HCI_HANDLE_1;
memset(&complete.bdaddr, 0xaa, 6);
*(uint8_t*)&complete.bdaddr.b[5] = 0x10;
complete.link_type = ACL_LINK;
complete.encr_mode = 0;
hci_send_event_packet(vhci_fd, HCI_EV_CONN_COMPLETE, &complete,
sizeof(complete));
struct hci_ev_remote_features features;
memset(&features, 0, sizeof(features));
features.status = 0;
features.handle = HCI_HANDLE_1;
hci_send_event_packet(vhci_fd, HCI_EV_REMOTE_FEATURES, &features,
sizeof(features));
struct {
struct hci_ev_le_meta le_meta;
struct hci_ev_le_conn_complete le_conn;
} le_conn;
memset(&le_conn, 0, sizeof(le_conn));
le_conn.le_meta.subevent = HCI_EV_LE_CONN_COMPLETE;
memset(&le_conn.le_conn.bdaddr, 0xaa, 6);
*(uint8_t*)&le_conn.le_conn.bdaddr.b[5] = 0x11;
le_conn.le_conn.role = 1;
le_conn.le_conn.handle = HCI_HANDLE_2;
hci_send_event_packet(vhci_fd, HCI_EV_LE_META, &le_conn, sizeof(le_conn));
pthread_join(th, NULL);
close(hci_sock);
}
static void setup_common() {
if (mount(0, "/sys/fs/fuse/connections", "fusectl", 0, 0)) {
}
}
static void setup_binderfs() {
if (mkdir("/dev/binderfs", 0777)) {
}
if (mount("binder", "/dev/binderfs", "binder", 0, NULL)) {
}
if (symlink("/dev/binderfs", "./binderfs")) {
}
}
static void loop();
static void sandbox_common() {
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setsid();
int netns = open("/proc/self/ns/net", O_RDONLY);
if (netns == -1) exit(1);
if (dup2(netns, kInitNetNsFd) < 0) exit(1);
close(netns);
struct rlimit rlim;
rlim.rlim_cur = rlim.rlim_max = (200 << 20);
setrlimit(RLIMIT_AS, &rlim);
rlim.rlim_cur = rlim.rlim_max = 32 << 20;
setrlimit(RLIMIT_MEMLOCK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 136 << 20;
setrlimit(RLIMIT_FSIZE, &rlim);
rlim.rlim_cur = rlim.rlim_max = 1 << 20;
setrlimit(RLIMIT_STACK, &rlim);
rlim.rlim_cur = rlim.rlim_max = 128 << 20;
setrlimit(RLIMIT_CORE, &rlim);
rlim.rlim_cur = rlim.rlim_max = 256;
setrlimit(RLIMIT_NOFILE, &rlim);
if (unshare(CLONE_NEWNS)) {
}
if (mount(NULL, "/", NULL, MS_REC | MS_PRIVATE, NULL)) {
}
if (unshare(CLONE_NEWIPC)) {
}
if (unshare(0x02000000)) {
}
if (unshare(CLONE_NEWUTS)) {
}
if (unshare(CLONE_SYSVSEM)) {
}
typedef struct {
const char* name;
const char* value;
} sysctl_t;
static const sysctl_t sysctls[] = {
{"/proc/sys/kernel/shmmax", "16777216"},
{"/proc/sys/kernel/shmall", "536870912"},
{"/proc/sys/kernel/shmmni", "1024"},
{"/proc/sys/kernel/msgmax", "8192"},
{"/proc/sys/kernel/msgmni", "1024"},
{"/proc/sys/kernel/msgmnb", "1024"},
{"/proc/sys/kernel/sem", "1024 1048576 500 1024"},
};
unsigned i;
for (i = 0; i < sizeof(sysctls) / sizeof(sysctls[0]); i++)
write_file(sysctls[i].name, sysctls[i].value);
}
static int wait_for_loop(int pid) {
if (pid < 0) exit(1);
int status = 0;
while (waitpid(-1, &status, __WALL) != pid) {
}
return WEXITSTATUS(status);
}
static void drop_caps(void) {
struct __user_cap_header_struct cap_hdr = {};
struct __user_cap_data_struct cap_data[2] = {};
cap_hdr.version = _LINUX_CAPABILITY_VERSION_3;
cap_hdr.pid = getpid();
if (syscall(SYS_capget, &cap_hdr, &cap_data)) exit(1);
const int drop = (1 << CAP_SYS_PTRACE) | (1 << CAP_SYS_NICE);
cap_data[0].effective &= ~drop;
cap_data[0].permitted &= ~drop;
cap_data[0].inheritable &= ~drop;
if (syscall(SYS_capset, &cap_hdr, &cap_data)) exit(1);
}
static int do_sandbox_none(void) {
if (unshare(CLONE_NEWPID)) {
}
int pid = fork();
if (pid != 0) return wait_for_loop(pid);
setup_common();
initialize_vhci();
sandbox_common();
drop_caps();
if (unshare(CLONE_NEWNET)) {
}
write_file("/proc/sys/net/ipv4/ping_group_range", "0 65535");
setup_binderfs();
loop();
exit(1);
}
static void kill_and_wait(int pid, int* status) {
kill(-pid, SIGKILL);
kill(pid, SIGKILL);
for (int i = 0; i < 100; i++) {
if (waitpid(-1, status, WNOHANG | __WALL) == pid) return;
usleep(1000);
}
DIR* dir = opendir("/sys/fs/fuse/connections");
if (dir) {
for (;;) {
struct dirent* ent = readdir(dir);
if (!ent) break;
if (strcmp(ent->d_name, ".") == 0 || strcmp(ent->d_name, "..") == 0)
continue;
char abort[300];
snprintf(abort, sizeof(abort), "/sys/fs/fuse/connections/%s/abort",
ent->d_name);
int fd = open(abort, O_WRONLY);
if (fd == -1) {
continue;
}
if (write(fd, abort, 1) < 0) {
}
close(fd);
}
closedir(dir);
} else {
}
while (waitpid(-1, status, __WALL) != pid) {
}
}
static void setup_test() {
prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
setpgrp();
write_file("/proc/self/oom_score_adj", "1000");
}
static void close_fds() {
for (int fd = 3; fd < MAX_FDS; fd++) close(fd);
}
struct thread_t {
int created, call;
event_t ready, done;
};
static struct thread_t threads[16];
static void execute_call(int call);
static int running;
static void* thr(void* arg) {
struct thread_t* th = (struct thread_t*)arg;
for (;;) {
event_wait(&th->ready);
event_reset(&th->ready);
execute_call(th->call);
__atomic_fetch_sub(&running, 1, __ATOMIC_RELAXED);
event_set(&th->done);
}
return 0;
}
static void execute_one(void) {
int i, call, thread;
for (call = 0; call < 3; call++) {
for (thread = 0; thread < (int)(sizeof(threads) / sizeof(threads[0]));
thread++) {
struct thread_t* th = &threads[thread];
if (!th->created) {
th->created = 1;
event_init(&th->ready);
event_init(&th->done);
event_set(&th->done);
thread_start(thr, th);
}
if (!event_isset(&th->done)) continue;
event_reset(&th->done);
th->call = call;
__atomic_fetch_add(&running, 1, __ATOMIC_RELAXED);
event_set(&th->ready);
if (call == 1) break;
event_timedwait(&th->done, 50);
break;
}
}
for (i = 0; i < 100 && __atomic_load_n(&running, __ATOMIC_RELAXED); i++)
sleep_ms(1);
close_fds();
}
static void execute_one(void);
#define WAIT_FLAGS __WALL
static void loop(void) {
int iter = 0;
for (;; iter++) {
int pid = fork();
if (pid < 0) exit(1);
if (pid == 0) {
setup_test();
execute_one();
exit(0);
}
int status = 0;
uint64_t start = current_time_ms();
for (;;) {
if (waitpid(-1, &status, WNOHANG | WAIT_FLAGS) == pid) break;
sleep_ms(1);
if (current_time_ms() - start < 5000) continue;
kill_and_wait(pid, &status);
break;
}
}
}
uint64_t r[1] = {0xffffffffffffffff};
void execute_call(int call) {
intptr_t res = 0;
switch (call) {
case 0:
res = -1;
res = syz_init_net_socket(/*fam=*/0x1f, /*type=*/5, /*proto=*/2);
if (res != -1) r[0] = res;
break;
case 1:
*(uint16_t*)0x20000040 = 0x1f;
memset((void*)0x20000042, 170, 5);
*(uint8_t*)0x20000047 = 0x10;
syscall(__NR_connect, /*fd=*/r[0], /*addr=*/0x20000040ul,
/*addrlen=*/8ul);
break;
case 2:
*(uint16_t*)0x20000000 = 0x1f;
memset((void*)0x20000002, 0, 6);
syscall(__NR_connect, /*fd=*/r[0], /*addr=*/0x20000000ul,
/*addrlen=*/8ul);
break;
}
}
int main(void) {
syscall(__NR_mmap, /*addr=*/0x1ffff000ul, /*len=*/0x1000ul, /*prot=*/0ul,
/*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
syscall(__NR_mmap, /*addr=*/0x20000000ul, /*len=*/0x1000000ul, /*prot=*/7ul,
/*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
syscall(__NR_mmap, /*addr=*/0x21000000ul, /*len=*/0x1000ul, /*prot=*/0ul,
/*flags=*/0x32ul, /*fd=*/-1, /*offset=*/0ul);
do_sandbox_none();
return 0;
}
r0 = syz_init_net_socket$bt_sco(0x1f, 0x5, 0x2)
connect$bt_sco(r0, &(0x7f0000000040)={0x1f, @fixed={'\xaa\xaa\xaa\xaa\xaa', 0x10}}, 0x8) (async)
connect$bt_sco(r0, &(0x7f0000000000), 0x8)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment