OSDN Git Service

sched/rt: Optimize cpupri_find() on non-heterogenous systems
authorQais Yousef <qais.yousef@arm.com>
Mon, 2 Mar 2020 13:27:18 +0000 (13:27 +0000)
committerIngo Molnar <mingo@kernel.org>
Fri, 6 Mar 2020 11:57:27 +0000 (12:57 +0100)
By introducing a new cpupri_find_fitness() function that takes the
fitness_fn as an argument and only called when asym_system static key is
enabled.

cpupri_find() is now a wrapper function that calls cpupri_find_fitness()
passing NULL as a fitness_fn, hence disabling the logic that handles
fitness by default.

LINK: https://lore.kernel.org/lkml/c0772fca-0a4b-c88d-fdf2-5715fcf8447b@arm.com/
Reported-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Qais Yousef <qais.yousef@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Fixes: 804d402fb6f6 ("sched/rt: Make RT capacity-aware")
Link: https://lkml.kernel.org/r/20200302132721.8353-4-qais.yousef@arm.com
kernel/sched/cpupri.c
kernel/sched/cpupri.h
kernel/sched/rt.c

index 1bcfa19..dd3f16d 100644 (file)
@@ -94,8 +94,14 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
        return 1;
 }
 
+int cpupri_find(struct cpupri *cp, struct task_struct *p,
+               struct cpumask *lowest_mask)
+{
+       return cpupri_find_fitness(cp, p, lowest_mask, NULL);
+}
+
 /**
- * cpupri_find - find the best (lowest-pri) CPU in the system
+ * cpupri_find_fitness - find the best (lowest-pri) CPU in the system
  * @cp: The cpupri context
  * @p: The task
  * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
@@ -111,7 +117,7 @@ static inline int __cpupri_find(struct cpupri *cp, struct task_struct *p,
  *
  * Return: (int)bool - CPUs were found
  */
-int cpupri_find(struct cpupri *cp, struct task_struct *p,
+int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
                struct cpumask *lowest_mask,
                bool (*fitness_fn)(struct task_struct *p, int cpu))
 {
index 32dd520..efbb492 100644 (file)
@@ -19,8 +19,10 @@ struct cpupri {
 
 #ifdef CONFIG_SMP
 int  cpupri_find(struct cpupri *cp, struct task_struct *p,
-                struct cpumask *lowest_mask,
-                bool (*fitness_fn)(struct task_struct *p, int cpu));
+                struct cpumask *lowest_mask);
+int  cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
+                        struct cpumask *lowest_mask,
+                        bool (*fitness_fn)(struct task_struct *p, int cpu));
 void cpupri_set(struct cpupri *cp, int cpu, int pri);
 int  cpupri_init(struct cpupri *cp);
 void cpupri_cleanup(struct cpupri *cp);
index f0071fa..29a8695 100644 (file)
@@ -1504,7 +1504,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * let's hope p can move out.
         */
        if (rq->curr->nr_cpus_allowed == 1 ||
-           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL, NULL))
+           !cpupri_find(&rq->rd->cpupri, rq->curr, NULL))
                return;
 
        /*
@@ -1512,7 +1512,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
         * see if it is pushed or pulled somewhere else.
         */
        if (p->nr_cpus_allowed != 1 &&
-           cpupri_find(&rq->rd->cpupri, p, NULL, NULL))
+           cpupri_find(&rq->rd->cpupri, p, NULL))
                return;
 
        /*
@@ -1691,6 +1691,7 @@ static int find_lowest_rq(struct task_struct *task)
        struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
        int this_cpu = smp_processor_id();
        int cpu      = task_cpu(task);
+       int ret;
 
        /* Make sure the mask is initialized first */
        if (unlikely(!lowest_mask))
@@ -1699,8 +1700,22 @@ static int find_lowest_rq(struct task_struct *task)
        if (task->nr_cpus_allowed == 1)
                return -1; /* No other targets possible */
 
-       if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask,
-                        rt_task_fits_capacity))
+       /*
+        * If we're on asym system ensure we consider the different capacities
+        * of the CPUs when searching for the lowest_mask.
+        */
+       if (static_branch_unlikely(&sched_asym_cpucapacity)) {
+
+               ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
+                                         task, lowest_mask,
+                                         rt_task_fits_capacity);
+       } else {
+
+               ret = cpupri_find(&task_rq(task)->rd->cpupri,
+                                 task, lowest_mask);
+       }
+
+       if (!ret)
                return -1; /* No targets found */
 
        /*