Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kerneltoast/34470d2ed38f839fe4ce4c94db3b1e91 to your computer and use it in GitHub Desktop.
Save kerneltoast/34470d2ed38f839fe4ce4c94db3b1e91 to your computer and use it in GitHub Desktop.
From 6dc9bcc54cf839afeceef4210822aa56e5a1c73a Mon Sep 17 00:00:00 2001
From: Sultan Alsawaf <sultan@openresty.com>
Date: Tue, 1 Dec 2020 18:47:04 -0800
Subject: [PATCH] runtime_context: replace _stp_context_lock with an atomic
variable
We can't use any lock primitives here, such as spin locks or rw locks,
because lock_acquire() has tracepoints inside of it. This can cause a
deadlock, so we have to roll our own synchronization mechanism using an
atomic variable.
---
runtime/linux/runtime_context.h | 31 +++++++++++--------------------
1 file changed, 11 insertions(+), 20 deletions(-)
diff --git a/runtime/linux/runtime_context.h b/runtime/linux/runtime_context.h
index e716e6d39..94597ebb6 100644
--- a/runtime/linux/runtime_context.h
+++ b/runtime/linux/runtime_context.h
@@ -11,10 +11,9 @@
#ifndef _LINUX_RUNTIME_CONTEXT_H_
#define _LINUX_RUNTIME_CONTEXT_H_
-/* Can't use STP_DEFINE_RWLOCK() or this might be replaced with a spin lock */
-static DEFINE_RWLOCK(_stp_context_lock);
+/* Can't use a lock primitive for this because lock_acquire() has tracepoints */
+static atomic_t _stp_context_lock = ATOMIC_INIT(0);
static DEFINE_PER_CPU(struct context *, contexts);
-static bool _stp_context_stop;
static int _stp_runtime_contexts_alloc(void)
{
@@ -37,15 +36,14 @@ static int _stp_runtime_contexts_alloc(void)
/* We should be free of all probes by this time, but for example the timer for
* _stp_ctl_work_callback may still be running and looking for contexts. We
- * use _stp_context_stop and a write lock to be sure its safe to free them. */
+ * use _stp_context_lock to be sure its safe to free them. */
static void _stp_runtime_contexts_free(void)
{
unsigned int cpu;
/* Sync to make sure existing readers are done */
- write_lock(&_stp_context_lock);
- _stp_context_stop = true;
- write_unlock(&_stp_context_lock);
+ while (atomic_cmpxchg(&_stp_context_lock, 0, INT_MAX))
+ cpu_relax();
/* Now we can actually free the contexts */
for_each_possible_cpu(cpu)
@@ -54,18 +52,14 @@ static void _stp_runtime_contexts_free(void)
static inline struct context * _stp_runtime_get_context(void)
{
- if (_stp_context_stop)
- return NULL;
-
return per_cpu(contexts, smp_processor_id());
}
static struct context * _stp_runtime_entryfn_get_context(void)
- __acquires(&_stp_context_lock)
{
struct context* __restrict__ c = NULL;
- if (!read_trylock(&_stp_context_lock))
+ if (!atomic_add_unless(&_stp_context_lock, 1, INT_MAX))
return NULL;
c = _stp_runtime_get_context();
@@ -79,16 +73,15 @@ static struct context * _stp_runtime_entryfn_get_context(void)
return c;
}
}
- read_unlock(&_stp_context_lock);
+ atomic_dec(&_stp_context_lock);
return NULL;
}
static inline void _stp_runtime_entryfn_put_context(struct context *c)
- __releases(&_stp_context_lock)
{
if (c) {
atomic_set(&c->busy, 0);
- read_unlock(&_stp_context_lock);
+ atomic_dec(&_stp_context_lock);
}
}
@@ -104,11 +97,9 @@ static void _stp_runtime_context_wait(void)
int i;
holdon = 0;
- read_lock(&_stp_context_lock);
- if (_stp_context_stop) {
- read_unlock(&_stp_context_lock);
+ if (!atomic_add_unless(&_stp_context_lock, 1, INT_MAX))
break;
- }
+
for_each_possible_cpu(i) {
struct context *c = per_cpu(contexts, i);
if (c != NULL
@@ -124,7 +115,7 @@ static void _stp_runtime_context_wait(void)
}
}
}
- read_unlock(&_stp_context_lock);
+ atomic_dec(&_stp_context_lock);
/*
* Just in case things are really really stuck, a
--
2.29.2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment