Created
February 25, 2020 20:48
-
-
Save vineethrp/887743608f42a6ce96bf7847b5b119ae to your computer and use it in GitHub Desktop.
Proposed fix for coresched migration issue with CFS shares
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c | |
index f42ceecb749f..215c6d546947 100644 | |
--- a/kernel/sched/fair.c | |
+++ b/kernel/sched/fair.c | |
@@ -1767,6 +1767,8 @@ static void task_numa_compare(struct task_numa_env *env, | |
rcu_read_unlock(); | |
} | |
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p); | |
+ | |
static void task_numa_find_cpu(struct task_numa_env *env, | |
long taskimp, long groupimp) | |
{ | |
@@ -5650,6 +5652,39 @@ static struct sched_group * | |
find_idlest_group(struct sched_domain *sd, struct task_struct *p, | |
int this_cpu, int sd_flag); | |
+#ifdef CONFIG_SCHED_CORE | |
+static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) | |
+{ | |
+ bool idle_core = true; | |
+ int cpu; | |
+ | |
+ /* Ignore cookie match if core scheduler is not enabled on the CPU. */ | |
+ if (!sched_core_enabled(rq)) | |
+ return true; | |
+ | |
+ if (rq->core->core_forceidle) | |
+ return true; | |
+ | |
+ if (task_h_load(p) > task_h_load(rq->curr)) | |
+ return true; | |
+ if (task_util_est(p) > task_util_est(rq->curr)) | |
+ return true; | |
+ | |
+ for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { | |
+ if (!available_idle_cpu(cpu)) { | |
+ idle_core = false; | |
+ break; | |
+ } | |
+ } | |
+ | |
+ /* | |
+ * A CPU in an idle core is always the best choice for tasks with | |
+ * cookies. | |
+ */ | |
+ return idle_core || rq->core->core_cookie == p->core_cookie; | |
+} | |
+#endif | |
+ | |
/* | |
* find_idlest_group_cpu - find the idlest CPU among the CPUs in the group. | |
*/ | |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h | |
index 262442c13e99..8c607e9e2bb5 100644 | |
--- a/kernel/sched/sched.h | |
+++ b/kernel/sched/sched.h | |
@@ -1061,29 +1061,6 @@ static inline raw_spinlock_t *rq_lockp(struct rq *rq) | |
return &rq->__lock; | |
} | |
-static inline bool sched_core_cookie_match(struct rq *rq, struct task_struct *p) | |
-{ | |
- bool idle_core = true; | |
- int cpu; | |
- | |
- /* Ignore cookie match if core scheduler is not enabled on the CPU. */ | |
- if (!sched_core_enabled(rq)) | |
- return true; | |
- | |
- for_each_cpu(cpu, cpu_smt_mask(cpu_of(rq))) { | |
- if (!available_idle_cpu(cpu)) { | |
- idle_core = false; | |
- break; | |
- } | |
- } | |
- | |
- /* | |
- * A CPU in an idle core is always the best choice for tasks with | |
- * cookies. | |
- */ | |
- return idle_core || rq->core->core_cookie == p->core_cookie; | |
-} | |
- | |
extern void queue_core_balance(struct rq *rq); | |
void sched_core_add(struct rq *rq, struct task_struct *p); |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment