Skip to content

Instantly share code, notes, and snippets.

@benquike
Created October 3, 2018 17:21
Show Gist options
  • Save benquike/c0146874a52a372146a9f721959675f4 to your computer and use it in GitHub Desktop.
Save benquike/c0146874a52a372146a9f721959675f4 to your computer and use it in GitHub Desktop.
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) "kcov: " fmt
#define DISABLE_BRANCH_PROFILING
#include <linux/atomic.h>
#include <linux/compiler.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/hash.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/preempt.h>
#include <linux/printk.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock_types.h>
#include <linux/vmalloc.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/kcov.h>
#include <asm/setup.h>
/* Number of 64-bit words written per one comparison: */
#define KCOV_WORDS_PER_CMP 4
/*
* kcov descriptor (one per opened debugfs file).
* State transitions of the descriptor:
* - initial state after open()
* - then there must be a single ioctl(KCOV_INIT_TRACE) call
* - then, mmap() call (several calls are allowed but not useful)
* - then, ioctl(KCOV_ENABLE, arg), where arg is
* KCOV_TRACE_PC - to trace only the PCs
* or
* KCOV_TRACE_CMP - to trace only the comparison operands
* - then, ioctl(KCOV_DISABLE) to disable the task.
* Enabling/disabling ioctls can be repeated (only one task a time allowed).
*/
struct kcov {
/*
* Reference counter. We keep one for:
* - opened file descriptor
* - task with enabled coverage (we can't unwire it from another task)
*/
atomic_t refcount;
/* The lock protects mode, size, area and t. */
spinlock_t lock;
enum kcov_mode mode;
/* Size of arena (in long's for KCOV_MODE_TRACE). */
unsigned size;
/* Coverage buffer shared with user space. */
unsigned char *area;
/* Task for which we collect coverage, or NULL. */
struct task_struct *t;
};
// we use this to collect code in
// non-task contexts
static volatile unsigned long prev_loc;
// static DEFINE_SPINLOCK(area_lock);
// static __SPIN_LOCK_UNLOCKED(area_lock);
static volatile unsigned char * volatile area = NULL;
static volatile int afl_style_inst = 0;
static volatile int afl_inst_start = 0;
// static int __i = 0;
extern unsigned char *ivshmem_bar2_map_base(void);
extern bool is_ivshmem_enabled(void);
void start_afl_inst(void) {
// unsigned long flags;
// printk("starting afl style inst A\n");
/* if (!is_ivshmem_enabled()) { */
/* printk("IVSHMEM not enabled \n"); */
/* return; */
/* } */
// printk("starting afl style inst B\n");
// spin_lock_irqsave(&area_lock, flags);
/* WRITE_ONCE(prev_loc, hash_long(0, BITS_PER_LONG)); */
/* WRITE_ONCE(area, ivshmem_bar2_map_base()); */
WRITE_ONCE(prev_loc, hash_long(0, BITS_PER_LONG));
WRITE_ONCE(area, ivshmem_bar2_map_base());
// printk("area: 0x%lx\n", area);
// spin_unlock_irqrestore(&area_lock, flags);
// printk("starting afl style inst B\n");
// barrier();
// printk("mark the flags\n");
// mark the flags
// afl_style_inst = 1;
// afl_inst_start = 1;
WRITE_ONCE(afl_style_inst, 1);
WRITE_ONCE(afl_inst_start, 1);
barrier();
// WRITE_ONCE(afl_style_inst, 1);
// WRITE_ONCE(afl_inst_start, 1);
}
EXPORT_SYMBOL(start_afl_inst);
void stop_afl_inst(void) {
// printk("stopping afl style inst\n");
afl_style_inst = 0;
afl_inst_start = 0;
prev_loc = 0;
area = NULL;
barrier();
}
static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
{
unsigned int mode;
/*
* We are interested in code coverage as a function of a syscall inputs,
* so we ignore code executed in interrupts.
*/
if (!in_task())
return false;
mode = READ_ONCE(t->kcov_mode);
/*
* There is some code that runs in interrupts but for which
* in_interrupt() returns false (e.g. preempt_schedule_irq()).
* READ_ONCE()/barrier() effectively provides load-acquire wrt
* interrupts, there are paired barrier()/WRITE_ONCE() in
* kcov_ioctl_locked().
*/
barrier();
return mode == needed_mode;
}
static unsigned long canonicalize_ip(unsigned long ip)
{
#ifdef CONFIG_RANDOMIZE_BASE
ip -= kaslr_offset();
#endif
return ip;
}
extern long long unsigned int ivshmem_io_addr(void);
/*
* Entry point from instrumented code.
* This is called once per basic-block/edge.
*/
void notrace __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
unsigned char *area;
unsigned long ip = canonicalize_ip(_RET_IP_);
unsigned long pos;
// unsigned long flags;
/* if (!in_task()) { */
/* return; */
/* } */
//return;
int inst = READ_ONCE(afl_style_inst);
int start = READ_ONCE(afl_style_inst);
barrier();
if (inst) {
// spin_lock_irqsave(&area_lock, flags);
if (!start)
goto exit1;
BUG_ON(!inst);
BUG_ON(!start);
/* if (!is_ivshmem_enabled()) { */
/* return; */
/* } */
// printk("afl style instrumenting A\n");
/* if (in_task()) { */
/* // get the prev loc from task specific kcov object */
/* } else { */
/* // get the prev loc from the static kcov object */
/* } */
// if (READ_ONCE(area) == NULL)
// goto exit1;
// printk("afl style instrumenting B\n");
// unsigned long prev = READ_ONCE(prev_loc);
// unsigned long prev = prev_loc;
// int pos = (prev_loc ^ (unsigned long) ip) & 0xFFFF;
// unsigned char v = READ_ONCE(area[pos]) + 1;
// unsigned char v = area[pos] + 1;
area = ivshmem_bar2_map_base();
/* ++ area[__i++]; */
/* unsigned long ioaddr = ivshmem_io_addr(); */
/* unsigned char v = ioread8(ioaddr + pos); */
/* iowrite8(v + 1, ioaddr + pos); */
int pos = (prev_loc ^ (unsigned long) ip) & 0xFFFF;
BUG_ON(pos < 0);
BUG_ON(pos > 0xFFFF);
++area[pos];
// WRITE_ONCE(area[pos], v);
// WRITE_ONCE(prev_loc, hash_long(ip, BITS_PER_LONG));
prev_loc = hash_long(ip, BITS_PER_LONG);
// printk("afl style instrumenting C\n");
exit1:
// spin_unlock_irqrestore(&area_lock, flags);
return;
} else {
t = current;
if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
return;
area = t->kcov_area;
/* The first 64-bit word is the number of subsequent PCs. */
pos = READ_ONCE(area[0]) + 1;
if (likely(pos < t->kcov_size)) {
area[pos] = ip;
WRITE_ONCE(area[0], pos);
}
}
}
EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
{
struct task_struct *t;
u64 *area;
u64 count, start_index, end_pos, max_pos;
t = current;
if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
return;
ip = canonicalize_ip(ip);
/*
* We write all comparison arguments and types as u64.
* The buffer was allocated for t->kcov_size unsigned longs.
*/
area = (u64 *)t->kcov_area;
max_pos = t->kcov_size * sizeof(unsigned long);
count = READ_ONCE(area[0]);
/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
start_index = 1 + count * KCOV_WORDS_PER_CMP;
end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
if (likely(end_pos <= max_pos)) {
area[start_index] = type;
area[start_index + 1] = arg1;
area[start_index + 2] = arg2;
area[start_index + 3] = ip;
WRITE_ONCE(area[0], count + 1);
}
}
void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
{
write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
{
write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
{
write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
{
write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
{
write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
{
write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
{
write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
{
write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
_RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
{
u64 i;
u64 count = cases[0];
u64 size = cases[1];
u64 type = KCOV_CMP_CONST;
switch (size) {
case 8:
type |= KCOV_CMP_SIZE(0);
break;
case 16:
type |= KCOV_CMP_SIZE(1);
break;
case 32:
type |= KCOV_CMP_SIZE(2);
break;
case 64:
type |= KCOV_CMP_SIZE(3);
break;
default:
return;
}
for (i = 0; i < count; i++)
write_comp_data(type, cases[i + 2], val, _RET_IP_);
}
EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
static void kcov_get(struct kcov *kcov)
{
atomic_inc(&kcov->refcount);
}
static void kcov_put(struct kcov *kcov)
{
if (atomic_dec_and_test(&kcov->refcount)) {
vfree(kcov->area);
kfree(kcov);
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment