Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kerneltoast/43983b672946f3f1df3aadfbef45dafa to your computer and use it in GitHub Desktop.
Save kerneltoast/43983b672946f3f1df3aadfbef45dafa to your computer and use it in GitHub Desktop.
From 59134acc5d1ee9fd3072947625eb30d76db71971 Mon Sep 17 00:00:00 2001
From: Sultan Alsawaf <sultan@openresty.com>
Date: Wed, 3 Feb 2021 17:02:30 -0800
Subject: [PATCH v7] stp_utrace: protect utrace structs with a reference
counter
---
runtime/stp_utrace.c | 520 +++++++++++++++++++++++--------------------
1 file changed, 275 insertions(+), 245 deletions(-)
diff --git a/runtime/stp_utrace.c b/runtime/stp_utrace.c
index 9cc910222..6bddfce37 100644
--- a/runtime/stp_utrace.c
+++ b/runtime/stp_utrace.c
@@ -121,7 +121,8 @@ struct utrace {
atomic_t report_work_added; /* called task_work_add() on
* 'report_work' */
- unsigned long utrace_flags;
+ atomic_t refcount;
+ unsigned int utrace_flags;
struct hlist_node hlist; /* task_utrace_table linkage */
struct task_struct *task;
@@ -446,7 +447,7 @@ error:
return rc;
}
-static void utrace_cleanup(struct utrace *utrace);
+static void utrace_cleanup(struct utrace_bucket *bucket, struct utrace *utrace);
static int utrace_exit(void)
{
@@ -480,11 +481,8 @@ static int utrace_exit(void)
rcu_read_lock();
stap_hlist_for_each_entry_rcu(utrace, node, &bucket->head, hlist) {
- stp_spin_lock_irqsave(&bucket->lock, flags);
- hlist_del_rcu(&utrace->hlist);
- stp_spin_unlock_irqrestore(&bucket->lock, flags);
-
- utrace_cleanup(utrace);
+ if (atomic_read(&utrace->refcount))
+ utrace_cleanup(bucket, utrace);
}
rcu_read_unlock();
}
@@ -513,8 +511,13 @@ static void
stp_task_notify_resume(struct task_struct *target, struct utrace *utrace)
{
if (atomic_add_unless(&utrace->resume_work_added, 1, 1)) {
- int rc = stp_task_work_add(target, &utrace->resume_work);
+ int rc;
+
+ /* Don't need to use utrace get/put because we have a ref */
+ atomic_inc(&utrace->refcount);
+ rc = stp_task_work_add(target, &utrace->resume_work);
if (rc != 0) {
+ atomic_dec(&utrace->refcount);
atomic_set(&utrace->resume_work_added, 0);
/* stp_task_work_add() returns -ESRCH if the
@@ -532,12 +535,19 @@ stp_task_notify_resume(struct task_struct *target, struct utrace *utrace)
/*
* Clean up everything associated with @task.utrace.
- *
- * This routine must be called under the utrace_cleanup_lock.
*/
-static void utrace_cleanup(struct utrace *utrace)
+static void utrace_cleanup(struct utrace_bucket *bucket, struct utrace *utrace)
{
struct utrace_engine *engine, *next;
+ unsigned long flags;
+
+ /* Put the reference on the task struct */
+ put_task_struct(utrace->task);
+
+ /* Remove this utrace from the mapping list of tasks to struct utrace */
+ stp_spin_lock_irqsave(&bucket->lock, flags);
+ hlist_del_rcu(&utrace->hlist);
+ stp_spin_unlock_irqrestore(&bucket->lock, flags);
/* Free engines associated with the struct utrace, starting
* with the 'attached' list then doing the 'attaching' list. */
@@ -575,28 +585,29 @@ static void utrace_syscall_exit_work(struct task_work *work);
static void utrace_exec_work(struct task_work *work);
static void utrace_clone_work(struct task_work *work);
-/* MACROS for utrace_cancel_all_task_work(): */
-#ifdef STP_TF_DEBUG
-#define CANCEL_IF_WORK(added_field, task_work_func) \
- if (atomic_add_unless(&added_field, \
- -1, 0)) { \
- if (stp_task_work_cancel(utrace->task, \
- &task_work_func) == NULL) \
- printk(KERN_ERR "%s:%d - task_work_cancel() failed for %s? task %p, %d, %s\n", \
- __FUNCTION__, __LINE__, #task_work_func, \
- utrace->task, utrace->task->tgid, \
- (utrace->task->comm \
- ? utrace->task->comm \
- : "UNKNOWN")); \
- }
-#else
-#define CANCEL_IF_WORK(added_field, task_work_func) \
- if (atomic_add_unless(&added_field, \
- -1, 0)) { \
- stp_task_work_cancel(utrace->task, \
- &task_work_func); \
- }
-#endif
+static void utrace_cancel_workers(struct utrace_bucket *bucket,
+ struct utrace *utrace)
+{
+ static const task_work_func_t task_workers[] = {
+ utrace_resume,
+ utrace_report_work,
+ utrace_exec_work,
+ utrace_syscall_entry_work,
+ utrace_syscall_exit_work,
+ utrace_clone_work
+ };
+ int i;
+
+ /*
+ * Cancel every task worker instance. stp_task_work_cancel() only
+ * cancels one worker at a time, so we need to run it multiple times to
+ * ensure all of the workers are really gone. We don't bother putting
+ * the references on the utrace struct because the structs will be freed
+ * by force later, after task workers can no longer run or be queued.
+ */
+ for (i = 0; i < ARRAY_SIZE(task_workers); i++)
+ while (stp_task_work_cancel(utrace->task, task_workers[i]));
+}
static void utrace_cancel_all_task_work(void)
{
@@ -605,7 +616,6 @@ static void utrace_cancel_all_task_work(void)
struct hlist_head *head;
struct hlist_node *utrace_node;
struct __stp_utrace_task_work *task_node;
- unsigned long flags;
/* Cancel any pending utrace task_work item(s). */
for (i = 0; i < TASK_UTRACE_TABLE_SIZE; i++) {
@@ -613,18 +623,11 @@ static void utrace_cancel_all_task_work(void)
rcu_read_lock();
stap_hlist_for_each_entry_rcu(utrace, utrace_node, &bucket->head, hlist) {
- CANCEL_IF_WORK(utrace->resume_work_added, utrace_resume);
- CANCEL_IF_WORK(utrace->report_work_added, utrace_report_work);
+ if (atomic_read(&utrace->refcount))
+ utrace_cancel_workers(bucket, utrace);
}
rcu_read_unlock();
}
-
- /* Cancel any pending task_work_list item(s). */
- stp_spin_lock_irqsave(&__stp_utrace_task_work_list_lock, flags);
- list_for_each_entry(task_node, &__stp_utrace_task_work_list, list) {
- stp_task_work_cancel(task_node->task, task_node->work.func);
- }
- stp_spin_unlock_irqrestore(&__stp_utrace_task_work_list_lock, flags);
}
static void utrace_shutdown(void)
@@ -658,13 +661,13 @@ static void utrace_shutdown(void)
utrace_cancel_all_task_work();
}
-static struct utrace_bucket *get_utrace_bucket(struct task_struct *task)
+static struct utrace_bucket *find_utrace_bucket(struct task_struct *task)
{
return &task_utrace_table[hash_ptr(task, TASK_UTRACE_HASH_BITS)];
}
-static struct utrace *task_utrace_struct(struct utrace_bucket *bucket,
- struct task_struct *task)
+static struct utrace *get_utrace_struct(struct utrace_bucket *bucket,
+ struct task_struct *task)
{
struct utrace *utrace, *found = NULL;
struct hlist_node *node;
@@ -672,7 +675,8 @@ static struct utrace *task_utrace_struct(struct utrace_bucket *bucket,
rcu_read_lock();
stap_hlist_for_each_entry_rcu(utrace, node, &bucket->head, hlist) {
if (utrace->task == task) {
- found = utrace;
+ if (atomic_add_unless(&utrace->refcount, 1, 0))
+ found = utrace;
break;
}
}
@@ -699,6 +703,9 @@ static struct utrace *utrace_task_alloc(struct utrace_bucket *bucket,
if (unlikely(!utrace))
return NULL;
+ /* Acquire a reference to the task struct so it remains valid */
+ get_task_struct(task);
+
spin_lock_init(&utrace->lock);
INIT_LIST_HEAD(&utrace->attached);
INIT_LIST_HEAD(&utrace->attaching);
@@ -708,6 +715,7 @@ static struct utrace *utrace_task_alloc(struct utrace_bucket *bucket,
stp_init_task_work(&utrace->report_work, &utrace_report_work);
atomic_set(&utrace->resume_work_added, 0);
atomic_set(&utrace->report_work_added, 0);
+ atomic_set(&utrace->refcount, 1);
stp_spin_lock_irqsave(&bucket->lock, flags);
hlist_add_head_rcu(&utrace->hlist, &bucket->head);
@@ -715,20 +723,6 @@ static struct utrace *utrace_task_alloc(struct utrace_bucket *bucket,
return utrace;
}
-/* macros for utrace_free(): */
-#define FREE_IF_WORK(added_field, task_work_func) \
- if (atomic_add_unless(&added_field, -1, 0)) { \
- if ((stp_task_work_cancel(utrace->task, &task_work_func) \
- == NULL) \
- && (utrace->task->flags & ~PF_EXITING) \
- && (utrace->task->exit_state == 0)) \
- printk(KERN_ERR "%s:%d * task_work_cancel() failed for %s? task %p, %d, %s, 0x%lx 0x%x\n", \
- __FUNCTION__, __LINE__, #task_work_func, \
- utrace->task, utrace->task->tgid, \
- (utrace->task->comm ? utrace->task->comm \
- : "UNKNOWN"), utrace->task->state, utrace->task->exit_state); \
- }
-
/*
* Correctly free a @utrace structure.
*
@@ -736,36 +730,13 @@ static struct utrace *utrace_task_alloc(struct utrace_bucket *bucket,
* free_task() when @task is being deallocated. But free_task() has no
* tracepoint we can easily hook.
*/
-static void utrace_free(struct utrace_bucket *bucket, struct utrace *utrace)
+static void put_utrace_struct(struct utrace_bucket *bucket,
+ struct utrace *utrace, int count)
{
- unsigned long flags;
-
- if (unlikely(!utrace))
+ if (!utrace || atomic_sub_return(count, &utrace->refcount))
return;
- /* Remove this utrace from the mapping list of tasks to
- * struct utrace. */
- stp_spin_lock_irqsave(&bucket->lock, flags);
- hlist_del_rcu(&utrace->hlist);
- stp_spin_unlock_irqrestore(&bucket->lock, flags);
-
- /* Free the utrace struct. */
-#ifdef STP_TF_DEBUG
- spin_lock(&utrace->lock);
- if (unlikely(utrace->reporting)
- || unlikely(!list_empty(&utrace->attached))
- || unlikely(!list_empty(&utrace->attaching)))
- printk(KERN_ERR "%s:%d - reporting? %p, attached empty %d, attaching empty %d\n",
- __FUNCTION__, __LINE__, utrace->reporting,
- list_empty(&utrace->attached),
- list_empty(&utrace->attaching));
- spin_unlock(&utrace->lock);
-#endif
-
- FREE_IF_WORK(utrace->resume_work_added, utrace_resume);
- FREE_IF_WORK(utrace->report_work_added, utrace_report_work);
-
- kfree_rcu(utrace, rcu);
+ utrace_cleanup(bucket, utrace);
}
/*
@@ -841,7 +812,7 @@ static int utrace_add_engine(struct task_struct *target,
* utrace_flags is not zero. Since we did unlock+lock
* at least once after utrace_task_alloc() installed
* ->utrace, we have the necessary barrier which pairs
- * with rmb() in task_utrace_struct().
+ * with rmb() in get_utrace_struct().
*/
ret = -ESRCH;
/* FIXME: Hmm, no reap in the brave new world... */
@@ -914,9 +885,10 @@ static struct utrace_engine *utrace_attach_task(
struct task_struct *target, int flags,
const struct utrace_engine_ops *ops, void *data)
{
- struct utrace_bucket *bucket = get_utrace_bucket(target);
- struct utrace *utrace = task_utrace_struct(bucket, target);
+ struct utrace_bucket *bucket = find_utrace_bucket(target);
+ struct utrace *utrace = get_utrace_struct(bucket, target);
struct utrace_engine *engine;
+ bool utrace_found = utrace;
int ret;
#ifdef STP_TF_DEBUG
@@ -925,6 +897,7 @@ static struct utrace_engine *utrace_attach_task(
#endif
if (!(flags & UTRACE_ATTACH_CREATE)) {
+ /* Don't need to put utrace struct if it's NULL */
if (unlikely(!utrace))
return ERR_PTR(-ENOENT);
spin_lock(&utrace->lock);
@@ -932,27 +905,35 @@ static struct utrace_engine *utrace_attach_task(
if (engine)
utrace_engine_get(engine);
spin_unlock(&utrace->lock);
+ put_utrace_struct(bucket, utrace, 1);
return engine ?: ERR_PTR(-ENOENT);
}
- if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops))
+ if (unlikely(!ops) || unlikely(ops == &utrace_detached_ops)) {
+ put_utrace_struct(bucket, utrace, 1);
return ERR_PTR(-EINVAL);
+ }
- if (unlikely(target->flags & PF_KTHREAD))
+ if (unlikely(target->flags & PF_KTHREAD)) {
/*
* Silly kernel, utrace is for users!
*/
+ put_utrace_struct(bucket, utrace, 1);
return ERR_PTR(-EPERM);
+ }
if (!utrace) {
utrace = utrace_task_alloc(bucket, target);
+ /* Don't need to put utrace struct if it's NULL */
if (unlikely(!utrace))
return ERR_PTR(-ENOMEM);
}
engine = _stp_kmalloc(sizeof(*engine));
- if (unlikely(!engine))
+ if (unlikely(!engine)) {
+ put_utrace_struct(bucket, utrace, 1);
return ERR_PTR(-ENOMEM);
+ }
/*
* Initialize the new engine structure. It starts out with one ref
@@ -971,6 +952,14 @@ static struct utrace_engine *utrace_attach_task(
engine = ERR_PTR(ret);
}
+ /*
+ * Don't put the utrace struct if it was just made and successfully
+ * attached to, otherwise it'll disappear. We only put the new utrace
+ * struct if the engine couldn't be attached. If the utrace struct was
+ * acquired via get_utrace_struct() then we must put our reference.
+ */
+ if (ret || utrace_found)
+ put_utrace_struct(bucket, utrace, 1);
return engine;
}
@@ -1016,15 +1005,11 @@ static const struct utrace_engine_ops utrace_detached_ops = {
* utrace_report_death
* utrace_release_task
*/
-static struct utrace *get_utrace_lock(struct task_struct *target,
- struct utrace_engine *engine,
- bool attached)
+static int get_utrace_lock(struct utrace *utrace, struct utrace_engine *engine,
+ bool attached)
__acquires(utrace->lock)
{
- struct utrace_bucket *bucket;
- struct utrace *utrace;
-
- rcu_read_lock();
+ int ret = 0;
/*
* If this engine was already detached, bail out before we look at
@@ -1036,22 +1021,11 @@ static struct utrace *get_utrace_lock(struct task_struct *target,
* on the list. In the latter case, utrace_barrier() still works,
* since the target might be in the middle of an old callback.
*/
- if (unlikely(!engine->ops)) {
- rcu_read_unlock();
- return ERR_PTR(-ESRCH);
- }
-
- if (unlikely(engine->ops == &utrace_detached_ops)) {
- rcu_read_unlock();
- return attached ? ERR_PTR(-ESRCH) : ERR_PTR(-ERESTARTSYS);
- }
+ if (unlikely(!engine->ops))
+ return -ESRCH;
- bucket = get_utrace_bucket(target);
- utrace = task_utrace_struct(bucket, target);
- if (unlikely(!utrace)) {
- rcu_read_unlock();
- return ERR_PTR(-ESRCH);
- }
+ if (unlikely(engine->ops == &utrace_detached_ops))
+ return attached ? -ESRCH : -ERESTARTSYS;
spin_lock(&utrace->lock);
if (unlikely(utrace->reap) || unlikely(!engine->ops) ||
@@ -1061,13 +1035,13 @@ static struct utrace *get_utrace_lock(struct task_struct *target,
* it had been reaped or detached already.
*/
spin_unlock(&utrace->lock);
- utrace = ERR_PTR(-ESRCH);
if (!attached && engine->ops == &utrace_detached_ops)
- utrace = ERR_PTR(-ERESTARTSYS);
+ ret = -ERESTARTSYS;
+ else
+ ret = -ESRCH;
}
- rcu_read_unlock();
- return utrace;
+ return ret;
}
/*
@@ -1154,9 +1128,10 @@ static int utrace_set_events(struct task_struct *target,
struct utrace_engine *engine,
unsigned long events)
{
+ struct utrace_bucket *bucket;
struct utrace *utrace;
unsigned long old_flags, old_utrace_flags;
- int ret = -EALREADY;
+ int ret;
/*
* We just ignore the internal bit, so callers can use
@@ -1164,9 +1139,14 @@ static int utrace_set_events(struct task_struct *target,
*/
events &= ~ENGINE_STOP;
- utrace = get_utrace_lock(target, engine, true);
- if (unlikely(IS_ERR(utrace)))
- return PTR_ERR(utrace);
+ bucket = find_utrace_bucket(target);
+ utrace = get_utrace_struct(bucket, target);
+ if (!utrace)
+ return -ESRCH;
+
+ ret = get_utrace_lock(utrace, engine, true);
+ if (ret)
+ goto put_utrace;
old_utrace_flags = utrace->utrace_flags;
old_flags = engine->flags & ~ENGINE_STOP;
@@ -1179,8 +1159,10 @@ static int utrace_set_events(struct task_struct *target,
(((events & ~old_flags) & _UTRACE_DEATH_EVENTS) ||
(utrace->death &&
((old_flags & ~events) & _UTRACE_DEATH_EVENTS)) ||
- (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP)))))
+ (utrace->reap && ((old_flags & ~events) & UTRACE_EVENT(REAP))))) {
+ ret = -EALREADY;
goto unlock;
+ }
/*
* When setting these flags, it's essential that we really
@@ -1201,6 +1183,7 @@ static int utrace_set_events(struct task_struct *target,
//read_lock(&tasklist_lock);
if (unlikely(target->exit_state)) {
//read_unlock(&tasklist_lock);
+ ret = -EALREADY;
goto unlock;
}
utrace->utrace_flags |= events;
@@ -1227,7 +1210,8 @@ static int utrace_set_events(struct task_struct *target,
}
unlock:
spin_unlock(&utrace->lock);
-
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
return ret;
}
@@ -1348,15 +1332,7 @@ static bool utrace_reset(struct task_struct *task, struct utrace *utrace)
if (task_is_traced(task) && !(flags & ENGINE_STOP))
utrace_wakeup(task, utrace);
- /*
- * In theory spin_lock() doesn't imply rcu_read_lock().
- * Once we clear ->utrace_flags this task_struct can go away
- * because tracehook_prepare_release_task() path does not take
- * utrace->lock when ->utrace_flags == 0.
- */
- rcu_read_lock();
utrace->utrace_flags = flags;
- rcu_read_unlock();
spin_unlock(&utrace->lock);
put_detached_list(&detached);
@@ -1371,10 +1347,14 @@ static void utrace_finish_stop(void)
* sure we do nothing until the tracer drops utrace->lock.
*/
if (unlikely(__fatal_signal_pending(current))) {
- struct utrace_bucket *bucket = get_utrace_bucket(current);
- struct utrace *utrace = task_utrace_struct(bucket, current);
- spin_lock(&utrace->lock);
- spin_unlock(&utrace->lock);
+ struct utrace_bucket *bucket = find_utrace_bucket(current);
+ struct utrace *utrace = get_utrace_struct(bucket, current);
+
+ if (utrace) {
+ spin_lock(&utrace->lock);
+ spin_unlock(&utrace->lock);
+ put_utrace_struct(bucket, utrace, 1);
+ }
}
}
@@ -1663,6 +1643,7 @@ static int utrace_control(struct task_struct *target,
struct utrace_engine *engine,
enum utrace_resume_action action)
{
+ struct utrace_bucket *bucket;
struct utrace *utrace;
bool reset;
int ret;
@@ -1686,9 +1667,14 @@ static int utrace_control(struct task_struct *target,
return -EINVAL;
}
- utrace = get_utrace_lock(target, engine, true);
- if (unlikely(IS_ERR(utrace)))
- return PTR_ERR(utrace);
+ bucket = find_utrace_bucket(target);
+ utrace = get_utrace_struct(bucket, target);
+ if (!utrace)
+ return -ESRCH;
+
+ ret = get_utrace_lock(utrace, engine, true);
+ if (ret)
+ goto put_utrace;
reset = task_is_traced(target);
ret = 0;
@@ -1703,7 +1689,7 @@ static int utrace_control(struct task_struct *target,
ret = utrace_control_dead(target, utrace, action);
if (ret) {
spin_unlock(&utrace->lock);
- return ret;
+ goto put_utrace;
}
reset = true;
}
@@ -1802,6 +1788,8 @@ static int utrace_control(struct task_struct *target,
else
spin_unlock(&utrace->lock);
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
return ret;
}
@@ -1829,8 +1817,9 @@ static int utrace_control(struct task_struct *target,
static int utrace_barrier(struct task_struct *target,
struct utrace_engine *engine)
{
+ struct utrace_bucket *bucket;
struct utrace *utrace;
- int ret = -ERESTARTSYS;
+ int ret;
if (unlikely(target == current))
return 0;
@@ -1838,10 +1827,15 @@ static int utrace_barrier(struct task_struct *target,
/* If we get here, we might call
* schedule_timeout_interruptible(), which sleeps. */
might_sleep();
+
+ bucket = find_utrace_bucket(target);
+ utrace = get_utrace_struct(bucket, target);
+ if (!utrace)
+ return -ESRCH;
+
do {
- utrace = get_utrace_lock(target, engine, false);
- if (unlikely(IS_ERR(utrace))) {
- ret = PTR_ERR(utrace);
+ ret = get_utrace_lock(utrace, engine, false);
+ if (ret) {
if (ret != -ERESTARTSYS)
break;
} else {
@@ -1859,6 +1853,8 @@ static int utrace_barrier(struct task_struct *target,
*/
if (utrace->reporting != engine)
ret = 0;
+ else
+ ret = -ERESTARTSYS;
spin_unlock(&utrace->lock);
if (!ret)
break;
@@ -1866,6 +1862,7 @@ static int utrace_barrier(struct task_struct *target,
schedule_timeout_interruptible(1);
} while (!signal_pending(current));
+ put_utrace_struct(bucket, utrace, 1);
return ret;
}
@@ -2163,11 +2160,14 @@ static void utrace_report_exec(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
- if (!utrace || !(utrace->utrace_flags & UTRACE_EVENT(EXEC)))
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
return;
+ if (!(utrace->utrace_flags & UTRACE_EVENT(EXEC)))
+ goto put_utrace;
+
#if 0
if (!in_atomic() && !irqs_disabled())
printk(KERN_ERR
@@ -2179,16 +2179,20 @@ static void utrace_report_exec(void *cb_data __attribute__ ((unused)),
work = __stp_utrace_alloc_task_work(utrace, (void *)NULL);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
- return;
+ goto put_utrace;
}
stp_init_task_work(work, &utrace_exec_work);
rc = stp_task_work_add(task, work);
+ /* Pass our utrace struct reference onto the worker */
+ if (!rc)
+ return;
// stp_task_work_add() returns -ESRCH if the task has already
// passed exit_task_work(). Just ignore this error.
- if (rc != 0 && rc != -ESRCH) {
+ if (rc != -ESRCH)
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
- }
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
#else
// This is the original version.
struct utrace_bucket *bucket;
@@ -2197,15 +2201,19 @@ static void utrace_report_exec(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
- if (utrace && utrace->utrace_flags & UTRACE_EVENT(EXEC)) {
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
+
+ if (utrace->utrace_flags & UTRACE_EVENT(EXEC)) {
INIT_REPORT(report);
/* FIXME: Hmm, can we get regs another way? */
REPORT(task, utrace, &report, UTRACE_EVENT(EXEC),
report_exec, NULL, NULL, NULL /* regs */);
}
+ put_utrace_struct(bucket, utrace, 1);
#endif
}
@@ -2215,6 +2223,7 @@ static void utrace_exec_work(struct task_work *work)
container_of(work, struct __stp_utrace_task_work, work);
struct utrace *utrace = utrace_work->utrace;
struct task_struct *task = current;
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
INIT_REPORT(report);
@@ -2225,6 +2234,8 @@ static void utrace_exec_work(struct task_work *work)
report_exec, NULL, NULL, NULL /* regs */);
__stp_utrace_free_task_work(work);
+ put_utrace_struct(bucket, utrace, 1);
+
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
}
@@ -2293,14 +2304,16 @@ static void utrace_report_syscall_entry(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
- /* FIXME: Is this 100% correct? */
- if (!utrace
- || !(utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_ENTRY)|ENGINE_STOP)))
+ if (!utrace)
return;
+ /* FIXME: Is this 100% correct? */
+ if (!(utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_ENTRY)|ENGINE_STOP)))
+ goto put_utrace;;
+
#if 0
if (!in_atomic() && !irqs_disabled())
printk(KERN_ERR
@@ -2312,17 +2325,21 @@ static void utrace_report_syscall_entry(void *cb_data __attribute__ ((unused)),
work = __stp_utrace_alloc_task_work(utrace, NULL);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
- return;
+ goto put_utrace;
}
__stp_utrace_save_regs(work, regs);
stp_init_task_work(work, &utrace_syscall_entry_work);
rc = stp_task_work_add(task, work);
+ /* Pass our utrace struct reference onto the worker */
+ if (!rc)
+ return;
// stp_task_work_add() returns -ESRCH if the task has already
// passed exit_task_work(). Just ignore this error.
- if (rc != 0 && rc != -ESRCH) {
+ if (rc != -ESRCH)
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
- }
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
#else
// This is the original version.
struct task_struct *task = current;
@@ -2332,12 +2349,13 @@ static void utrace_report_syscall_entry(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
/* FIXME: Is this 100% correct? */
- if (utrace
- && utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_ENTRY)|ENGINE_STOP)) {
+ if (utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_ENTRY)|ENGINE_STOP)) {
INIT_REPORT(report);
@@ -2346,6 +2364,7 @@ static void utrace_report_syscall_entry(void *cb_data __attribute__ ((unused)),
REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_ENTRY),
report_syscall_entry, regs);
}
+ put_utrace_struct(bucket, utrace, 1);
#if 0
INIT_REPORT(report);
@@ -2367,6 +2386,7 @@ static void utrace_syscall_entry_work(struct task_work *work)
container_of(work, struct __stp_utrace_task_work, work);
struct utrace *utrace = utrace_work->utrace;
struct task_struct *task = current;
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
struct pt_regs *regs = &utrace_work->regs;
INIT_REPORT(report);
@@ -2379,6 +2399,8 @@ static void utrace_syscall_entry_work(struct task_work *work)
report_syscall_entry, regs);
__stp_utrace_free_task_work(work);
+ put_utrace_struct(bucket, utrace, 1);
+
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
}
@@ -2400,13 +2422,14 @@ static void utrace_report_syscall_exit(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
/* FIXME: Is this 100% correct? */
- if (!utrace
- || !(utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_EXIT)|ENGINE_STOP)))
- return;
+ if (!(utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_EXIT)|ENGINE_STOP)))
+ goto put_utrace;
#if 0
if (!in_atomic() && !irqs_disabled())
@@ -2419,17 +2442,21 @@ static void utrace_report_syscall_exit(void *cb_data __attribute__ ((unused)),
work = __stp_utrace_alloc_task_work(utrace, NULL);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
- return;
+ goto put_utrace;
}
__stp_utrace_save_regs(work, regs);
stp_init_task_work(work, &utrace_syscall_exit_work);
rc = stp_task_work_add(task, work);
+ /* Pass our utrace struct reference onto the worker */
+ if (!rc)
+ return;
// stp_task_work_add() returns -ESRCH if the task has already
// passed exit_task_work(). Just ignore this error.
- if (rc != 0 && rc != -ESRCH) {
+ if (rc != -ESRCH)
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
- }
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
#else
// This is the original version.
struct task_struct *task = current;
@@ -2439,12 +2466,13 @@ static void utrace_report_syscall_exit(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
/* FIXME: Is this 100% correct? */
- if (utrace
- && utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_EXIT)|ENGINE_STOP)) {
+ if (utrace->utrace_flags & (UTRACE_EVENT(SYSCALL_EXIT)|ENGINE_STOP)) {
INIT_REPORT(report);
#ifdef STP_TF_DEBUG
@@ -2455,6 +2483,7 @@ static void utrace_report_syscall_exit(void *cb_data __attribute__ ((unused)),
REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT),
report_syscall_exit, regs);
}
+ put_utrace_struct(bucket, utrace, 1);
#endif
}
@@ -2464,6 +2493,7 @@ static void utrace_syscall_exit_work(struct task_work *work)
container_of(work, struct __stp_utrace_task_work, work);
struct utrace *utrace = utrace_work->utrace;
struct task_struct *task = current;
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
struct pt_regs *regs = &utrace_work->regs;
INIT_REPORT(report);
@@ -2485,6 +2515,8 @@ static void utrace_syscall_exit_work(struct task_work *work)
REPORT(task, utrace, &report, UTRACE_EVENT(SYSCALL_EXIT),
report_syscall_exit, regs);
+ put_utrace_struct(bucket, utrace, 1);
+
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
}
@@ -2512,17 +2544,18 @@ static void utrace_report_clone(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
#ifdef STP_TF_DEBUG
printk(KERN_ERR "%s:%d - parent %p, child %p, current %p\n",
__FUNCTION__, __LINE__, task, child, current);
#endif
- if (!utrace
- || !(utrace->utrace_flags & UTRACE_EVENT(CLONE)))
- return;
+ if (!(utrace->utrace_flags & UTRACE_EVENT(CLONE)))
+ goto put_utrace;
// TODO: Need to double-check the lifetime of struct child.
@@ -2530,16 +2563,20 @@ static void utrace_report_clone(void *cb_data __attribute__ ((unused)),
work = __stp_utrace_alloc_task_work(utrace, (void *)child);
if (work == NULL) {
_stp_error("Unable to allocate space for task_work");
- return;
+ goto put_utrace;
}
stp_init_task_work(work, &utrace_clone_work);
rc = stp_task_work_add(task, work);
+ /* Pass our utrace struct reference onto the worker */
+ if (!rc)
+ return;
// stp_task_work_add() returns -ESRCH if the task has already
// passed exit_task_work(). Just ignore this error.
- if (rc != 0 && rc != -ESRCH) {
+ if (rc != -ESRCH)
printk(KERN_ERR "%s:%d - stp_task_work_add() returned %d\n",
__FUNCTION__, __LINE__, rc);
- }
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
#else
// This is the original version.
struct utrace_bucket *bucket;
@@ -2548,15 +2585,17 @@ static void utrace_report_clone(void *cb_data __attribute__ ((unused)),
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
#ifdef STP_TF_DEBUG
printk(KERN_ERR "%s:%d - parent %p, child %p, current %p\n",
__FUNCTION__, __LINE__, task, child, current);
#endif
- if (utrace && utrace->utrace_flags & UTRACE_EVENT(CLONE)) {
+ if (utrace->utrace_flags & UTRACE_EVENT(CLONE)) {
unsigned long clone_flags = 0;
INIT_REPORT(report);
@@ -2618,6 +2657,7 @@ static void utrace_report_clone(void *cb_data __attribute__ ((unused)),
}
#endif
}
+ put_utrace_struct(bucket, utrace, 1);
#endif
}
@@ -2627,6 +2667,7 @@ static void utrace_clone_work(struct task_work *work)
container_of(work, struct __stp_utrace_task_work, work);
struct utrace *utrace = utrace_work->utrace;
struct task_struct *task = current;
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
struct task_struct *child = (struct task_struct *)utrace_work->data;
unsigned long clone_flags = 0;
@@ -2693,6 +2734,8 @@ static void utrace_clone_work(struct task_work *work)
}
#endif
+ put_utrace_struct(bucket, utrace, 1);
+
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
}
@@ -2709,8 +2752,8 @@ static void utrace_clone_work(struct task_work *work)
*/
static void utrace_finish_vfork(struct task_struct *task)
{
- struct utrace_bucket *bucket = get_utrace_bucket(task);
- struct utrace *utrace = task_utrace_struct(bucket, task);
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
+ struct utrace *utrace = get_utrace_struct(bucket, task);
if (utrace->vfork_stop) {
spin_lock(&utrace->lock);
@@ -2732,19 +2775,20 @@ static void utrace_report_death(void *cb_data __attribute__ ((unused)),
{
struct utrace_bucket *bucket;
struct utrace *utrace;
- INIT_REPORT(report);
if (atomic_read(&utrace_state) != __UTRACE_REGISTERED)
return;
- bucket = get_utrace_bucket(task);
- utrace = task_utrace_struct(bucket, task);
+ bucket = find_utrace_bucket(task);
+ utrace = get_utrace_struct(bucket, task);
+ if (!utrace)
+ return;
#ifdef STP_TF_DEBUG
printk(KERN_ERR "%s:%d - task %p, utrace %p, flags %lx\n", __FUNCTION__, __LINE__, task, utrace, utrace ? utrace->utrace_flags : 0);
#endif
- if (!utrace || !(utrace->utrace_flags & UTRACE_EVENT(DEATH)))
- return;
+ if (!(utrace->utrace_flags & UTRACE_EVENT(DEATH)))
+ goto put_utrace;
/* This code is called from the 'sched_process_exit'
* tracepoint, which really corresponds more to UTRACE_EXIT
@@ -2769,44 +2813,29 @@ static void utrace_report_death(void *cb_data __attribute__ ((unused)),
* flag and know that we are not yet fully quiescent for purposes
* of detach bookkeeping.
*/
- if (in_atomic() || irqs_disabled()) {
- if (atomic_add_unless(&utrace->report_work_added, 1, 1)) {
- int rc;
+ if (atomic_add_unless(&utrace->report_work_added, 1, 1)) {
+ int rc;
#ifdef STP_TF_DEBUG
- printk(KERN_ERR "%s:%d - adding task_work\n",
- __FUNCTION__, __LINE__);
+ printk(KERN_ERR "%s:%d - adding task_work\n",
+ __FUNCTION__, __LINE__);
#endif
- rc = stp_task_work_add(task,
- &utrace->report_work);
- if (rc != 0) {
- atomic_set(&utrace->report_work_added, 0);
- /* stp_task_work_add() returns -ESRCH
- * if the task has already passed
- * exit_task_work(). Just ignore this
- * error. */
- if (rc != -ESRCH) {
- printk(KERN_ERR
- "%s:%d - task_work_add() returned %d\n",
- __FUNCTION__, __LINE__, rc);
- }
- }
+ rc = stp_task_work_add(task, &utrace->report_work);
+ /* Pass our utrace struct reference onto the worker */
+ if (!rc)
+ return;
+ atomic_set(&utrace->report_work_added, 0);
+ /* stp_task_work_add() returns -ESRCH
+ * if the task has already passed
+ * exit_task_work(). Just ignore this
+ * error. */
+ if (rc != -ESRCH) {
+ printk(KERN_ERR
+ "%s:%d - task_work_add() returned %d\n",
+ __FUNCTION__, __LINE__, rc);
}
}
- else {
- spin_lock(&utrace->lock);
- BUG_ON(utrace->death);
- utrace->death = 1;
- utrace->resume = UTRACE_RESUME;
- splice_attaching(utrace);
- spin_unlock(&utrace->lock);
-
- REPORT_CALLBACKS(, task, utrace, &report, UTRACE_EVENT(DEATH),
- report_death, engine, -1/*group_dead*/,
- -1/*signal*/);
-
- utrace_maybe_reap(task, utrace, false);
- utrace_free(bucket, utrace);
- }
+put_utrace:
+ put_utrace_struct(bucket, utrace, 1);
}
/*
@@ -2845,13 +2874,14 @@ static void finish_resume_report(struct task_struct *task,
static void utrace_resume(struct task_work *work)
{
/*
- * We could also do 'task_utrace_struct()' here to find the
+ * We could also do 'get_utrace_struct()' here to find the
* task's 'struct utrace', but 'container_of()' should be
- * instantaneous (where 'task_utrace_struct()' has to do a
+ * instantaneous (where 'get_utrace_struct()' has to do a
* hash lookup).
*/
struct utrace *utrace = container_of(work, struct utrace, resume_work);
struct task_struct *task = current;
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
INIT_REPORT(report);
struct utrace_engine *engine;
@@ -2859,11 +2889,8 @@ static void utrace_resume(struct task_work *work)
atomic_set(&utrace->resume_work_added, 0);
/* Make sure the task isn't exiting. */
- if (task->flags & PF_EXITING) {
- /* Remember that this task_work_func is finished. */
- stp_task_work_func_done();
- return;
- }
+ if (task->flags & PF_EXITING)
+ goto exit;
/*
* Some machines get here with interrupts disabled. The same arch
@@ -2886,9 +2913,7 @@ static void utrace_resume(struct task_work *work)
* purposes as well as calling us.)
*/
- /* Remember that this task_work_func is finished. */
- stp_task_work_func_done();
- return;
+ goto exit;
case UTRACE_INTERRUPT:
/*
* Note that UTRACE_INTERRUPT reporting was handled by
@@ -2925,6 +2950,9 @@ static void utrace_resume(struct task_work *work)
*/
finish_resume_report(task, utrace, &report);
+exit:
+ put_utrace_struct(bucket, utrace, 1);
+
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
}
@@ -2933,14 +2961,14 @@ static void utrace_resume(struct task_work *work)
static void utrace_report_work(struct task_work *work)
{
/*
- * We could also do 'task_utrace_struct()' here to find the
+ * We could also do 'get_utrace_struct()' here to find the
* task's 'struct utrace', but 'container_of()' should be
- * instantaneous (where 'task_utrace_struct()' has to do a
+ * instantaneous (where 'get_utrace_struct()' has to do a
* hash lookup).
*/
struct utrace *utrace = container_of(work, struct utrace, report_work);
struct task_struct *task = current;
- struct utrace_bucket *bucket = get_utrace_bucket(task);
+ struct utrace_bucket *bucket = find_utrace_bucket(task);
INIT_REPORT(report);
struct utrace_engine *engine;
unsigned long clone_flags;
@@ -2964,7 +2992,9 @@ static void utrace_report_work(struct task_work *work)
-1/*signal*/);
utrace_maybe_reap(task, utrace, false);
- utrace_free(bucket, utrace);
+
+ /* Put two references on the utrace struct to free it */
+ put_utrace_struct(bucket, utrace, 2);
/* Remember that this task_work_func is finished. */
stp_task_work_func_done();
--
2.30.0
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment