Skip to content

Instantly share code, notes, and snippets.

@kerneltoast
Created November 6, 2020 21:55
Show Gist options
  • Save kerneltoast/2c00e7be1e5f9250efea463f79ff8efb to your computer and use it in GitHub Desktop.
Save kerneltoast/2c00e7be1e5f9250efea463f79ff8efb to your computer and use it in GitHub Desktop.
From 17739c97357a43001e7498f6a226c3e90af12626 Mon Sep 17 00:00:00 2001
From: Sultan Alsawaf <sultan@openresty.com>
Date: Fri, 6 Nov 2020 13:54:05 -0800
Subject: [PATCH] runtime_context: no rcu
---
runtime/linux/runtime_context.h | 66 +++------------------------------
1 file changed, 5 insertions(+), 61 deletions(-)
diff --git a/runtime/linux/runtime_context.h b/runtime/linux/runtime_context.h
index 41fecba81..2dddfbbff 100644
--- a/runtime/linux/runtime_context.h
+++ b/runtime/linux/runtime_context.h
@@ -15,24 +15,10 @@
#define __rcu
#endif
-static struct context __rcu *contexts[NR_CPUS] = { NULL };
+static DEFINE_PER_CPU(struct context, contexts);
static int _stp_runtime_contexts_alloc(void)
{
- int cpu;
-
- for_each_possible_cpu(cpu) {
- /* Module init, so in user context, safe to use
- * "sleeping" allocation. */
- struct context *c = _stp_vzalloc_node(sizeof (struct context),
- cpu_to_node(cpu));
- if (c == NULL) {
- _stp_error ("context (size %lu per cpu) allocation failed",
- (unsigned long) sizeof (struct context));
- return -ENOMEM;
- }
- rcu_assign_pointer(contexts[cpu], c);
- }
return 0;
}
@@ -41,50 +27,11 @@ static int _stp_runtime_contexts_alloc(void)
* use RCU-sched synchronization to be sure its safe to free them. */
static void _stp_runtime_contexts_free(void)
{
- // Note that 'free_contexts' is static because it is
- // (probably) too big to fit on a kernel function's stack.
- static struct context *free_contexts[NR_CPUS] = { NULL };
- int cpu;
-
- /* First, save all the pointers. */
- rcu_read_lock_sched();
- for_each_possible_cpu(cpu) {
- free_contexts[cpu] = rcu_dereference_sched(contexts[cpu]);
- }
- rcu_read_unlock_sched();
-
- /* Now clear all pointers to prevent new readers. */
- for_each_possible_cpu(cpu) {
- rcu_assign_pointer(contexts[cpu], NULL);
- }
-
- /* Sync to make sure existing readers are done. */
- stp_synchronize_sched();
-
- /* Now we can actually free the contexts. */
- for_each_possible_cpu(cpu) {
- struct context *c = free_contexts[cpu];
- if (c != NULL) {
- free_contexts[cpu] = NULL;
- _stp_vfree(c);
- }
- }
}
static inline struct context * _stp_runtime_get_context(void)
{
- // RHBZ1788662 rcu operations are rejected in idle-cpu contexts
- // in effect: skip probe if it's in rcu-idle state
-#if defined(STAPCONF_RCU_IS_WATCHING) || LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0) // linux commit #5c173eb8
- if (! rcu_is_watching())
- return 0;
-#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0) // linux commit #9b2e4f18
- if (! rcu_is_cpu_idle())
- return 0;
-#else
- ; // XXX older kernels didn't put tracepoints in idle-cpu
-#endif
- return rcu_dereference_sched(contexts[smp_processor_id()]);
+ return &per_cpu(contexts, smp_processor_id());
}
static struct context * _stp_runtime_entryfn_get_context(void)
@@ -93,7 +40,7 @@ static struct context * _stp_runtime_entryfn_get_context(void)
preempt_disable ();
c = _stp_runtime_get_context();
if (c != NULL) {
- if (atomic_inc_return(&c->busy) == 1) {
+ if (!atomic_cmpxchg(&c->busy, 0, 1)) {
// NB: Notice we're not re-enabling preemption
// here. We exepect the calling code to call
// _stp_runtime_entryfn_get_context() and
@@ -101,7 +48,6 @@ static struct context * _stp_runtime_entryfn_get_context(void)
// pair.
return c;
}
- atomic_dec(&c->busy);
}
preempt_enable_no_resched();
return NULL;
@@ -110,9 +56,7 @@ static struct context * _stp_runtime_entryfn_get_context(void)
static inline void _stp_runtime_entryfn_put_context(struct context *c)
{
if (c) {
- if (c == _stp_runtime_get_context())
- atomic_dec(&c->busy);
- /* else, warn about bad state? */
+ atomic_set(&c->busy, 0);
preempt_enable_no_resched();
}
return;
@@ -132,7 +76,7 @@ static void _stp_runtime_context_wait(void)
holdon = 0;
rcu_read_lock_sched();
for_each_possible_cpu(i) {
- struct context *c = rcu_dereference_sched(contexts[i]);
+ struct context *c = &per_cpu(contexts, i);
if (c != NULL
&& atomic_read (& c->busy)) {
holdon = 1;
--
2.29.2
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment