OSDN Git Service

sched/fair: Code !is_big_little path into select_energy_cpu_brute()
authorDietmar Eggemann <dietmar.eggemann@arm.com>
Wed, 22 Mar 2017 18:16:03 +0000 (18:16 +0000)
committerAndres Oportus <andresoportus@google.com>
Fri, 2 Jun 2017 15:01:53 +0000 (08:01 -0700)
This patch replaces the existing EAS upstream implementation of
select_energy_cpu_brute() with the one of find_best_target() used
in Android previously.

It also removes the cpumask 'and' from select_energy_cpu_brute,
see the existing use of 'cpu = smp_processor_id()' in
select_task_rq_fair().

Change-Id: If678c002efaa87d1ba3ec9989a4e9f8df98b83ec
Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
[ added guarding for non-schedtune builds ]
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index 5e4bf1e..387950f 100644 (file)
@@ -5891,65 +5891,57 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
 
 static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync)
 {
-       int i;
-       int min_diff = 0, energy_cpu = prev_cpu, spare_cpu = prev_cpu;
-       unsigned long max_spare = 0;
        struct sched_domain *sd;
+       int target_cpu = prev_cpu, tmp_target;
+       bool boosted, prefer_idle;
 
        if (sysctl_sched_sync_hint_enable && sync) {
                int cpu = smp_processor_id();
-               cpumask_t search_cpus;
-               cpumask_and(&search_cpus, tsk_cpus_allowed(p), cpu_online_mask);
-               if (cpumask_test_cpu(cpu, &search_cpus))
+
+               if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
                        return cpu;
        }
 
        rcu_read_lock();
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+       boosted = schedtune_task_boost(p) > 0;
+       prefer_idle = schedtune_prefer_idle(p) > 0;
+#else
+       boosted = get_sysctl_sched_cfs_boost() > 0;
+       prefer_idle = 0;
+#endif
 
        sd = rcu_dereference(per_cpu(sd_ea, prev_cpu));
+       /* Find a cpu with sufficient capacity */
+       tmp_target = find_best_target(p, boosted, prefer_idle);
 
        if (!sd)
                goto unlock;
+       if (tmp_target >= 0) {
+               target_cpu = tmp_target;
+               if ((boosted || prefer_idle) && idle_cpu(target_cpu))
+                       goto unlock;
+       }
 
-       for_each_cpu_and(i, tsk_cpus_allowed(p), sched_domain_span(sd)) {
-               int diff;
-               unsigned long spare;
-
+       if (target_cpu != prev_cpu) {
                struct energy_env eenv = {
-                       .util_delta     = task_util(p),
-                       .src_cpu        = prev_cpu,
-                       .dst_cpu        = i,
-                       .task           = p,
+                       .util_delta     = task_util(p),
+                       .src_cpu        = prev_cpu,
+                       .dst_cpu        = target_cpu,
+                       .task           = p,
                };
 
-               spare = capacity_spare_wake(i, p);
-
-               if (i == prev_cpu)
-                       continue;
-
-               if (spare > max_spare) {
-                       max_spare = spare;
-                       spare_cpu = i;
-               }
-
-               if (spare * 1024 < capacity_margin * task_util(p))
-                       continue;
-
-               diff = energy_diff(&eenv);
+               /* Not enough spare capacity on previous cpu */
+               if (cpu_overutilized(prev_cpu))
+                       goto unlock;
 
-               if (diff < min_diff) {
-                       min_diff = diff;
-                       energy_cpu = i;
-               }
+               if (energy_diff(&eenv) >= 0)
+                       target_cpu = prev_cpu;
        }
 
 unlock:
        rcu_read_unlock();
-
-       if (energy_cpu == prev_cpu && !cpu_overutilized(prev_cpu))
-               return prev_cpu;
-
-       return energy_cpu != prev_cpu ? energy_cpu : spare_cpu;
+       return target_cpu;
 }
 
 /*