OSDN Git Service

sched: EAS/WALT: take into account of waking task's load
authorJoonwoo Park <joonwoop@codeaurora.org>
Thu, 26 Jan 2017 01:45:56 +0000 (17:45 -0800)
committerJoonwoo Park <joonwoop@codeaurora.org>
Sat, 2 Sep 2017 00:23:47 +0000 (17:23 -0700)
WALT's function cpu_util(cpu) reports CPU's load without taking into
account of waking task's load.  Thus currently cpu_overutilized()
underestimates load on the previous CPU of waking task.

Take into account of task's load to determine whether previous CPU is
overutilzed to bail out early without running energy_diff() which is
expensive.

Change-Id: I30f146984a880ad2cc1b8a4ce35bd239a8c9a607
Signed-off-by: Joonwoo Park <joonwoop@codeaurora.org>
(minor rebase conflicts)
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
kernel/sched/fair.c

index 3641dad..94bb578 100644 (file)
@@ -4656,6 +4656,7 @@ static inline void hrtick_update(struct rq *rq)
 #endif
 
 #ifdef CONFIG_SMP
+static bool __cpu_overutilized(int cpu, int delta);
 static bool cpu_overutilized(int cpu);
 unsigned long boosted_cpu_util(int cpu);
 #else
@@ -5856,9 +5857,14 @@ static inline bool task_fits_max(struct task_struct *p, int cpu)
        return __task_fits(p, cpu, 0);
 }
 
+static bool __cpu_overutilized(int cpu, int delta)
+{
+       return (capacity_of(cpu) * 1024) < ((cpu_util(cpu) + delta) * capacity_margin);
+}
+
 static bool cpu_overutilized(int cpu)
 {
-       return (capacity_of(cpu) * 1024) < (cpu_util(cpu) * capacity_margin);
+       return __cpu_overutilized(cpu, 0);
 }
 
 #ifdef CONFIG_SCHED_TUNE
@@ -6577,6 +6583,7 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
        }
 
        if (target_cpu != prev_cpu) {
+               int delta = 0;
                struct energy_env eenv = {
                        .util_delta     = task_util(p),
                        .src_cpu        = prev_cpu,
@@ -6584,8 +6591,13 @@ static int select_energy_cpu_brute(struct task_struct *p, int prev_cpu, int sync
                        .task           = p,
                };
 
+
+#ifdef CONFIG_SCHED_WALT
+               if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+                       delta = task_util(p);
+#endif
                /* Not enough spare capacity on previous cpu */
-               if (cpu_overutilized(prev_cpu)) {
+               if (__cpu_overutilized(prev_cpu, delta)) {
                        schedstat_inc(p, se.statistics.nr_wakeups_secb_insuff_cap);
                        schedstat_inc(this_rq(), eas_stats.secb_insuff_cap);
                        goto unlock;