OSDN Git Service

sched/fair: Merge for each idle cpu loop of ILB
authorVincent Guittot <vincent.guittot@linaro.org>
Wed, 24 Feb 2021 13:30:04 +0000 (14:30 +0100)
committerIngo Molnar <mingo@kernel.org>
Sat, 6 Mar 2021 11:40:21 +0000 (12:40 +0100)
Remove the specific case for handling this_cpu outside for_each_cpu() loop
when running ILB. Instead we use for_each_cpu_wrap() and start with the
next cpu after this_cpu so we will continue to finish with this_cpu.

update_nohz_stats() is now used for this_cpu too and will prevents
unnecessary update. We don't need a special case for handling the update of
nohz.next_balance for this_cpu anymore because it is now handled by the
loop like others.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-5-vincent.guittot@linaro.org
kernel/sched/fair.c

index 1b91030..3c00918 100644 (file)
@@ -10043,22 +10043,9 @@ out:
         * When the cpu is attached to null domain for ex, it will not be
         * updated.
         */
-       if (likely(update_next_balance)) {
+       if (likely(update_next_balance))
                rq->next_balance = next_balance;
 
-#ifdef CONFIG_NO_HZ_COMMON
-               /*
-                * If this CPU has been elected to perform the nohz idle
-                * balance. Other idle CPUs have already rebalanced with
-                * nohz_idle_balance() and nohz.next_balance has been
-                * updated accordingly. This CPU is now running the idle load
-                * balance for itself and we need to update the
-                * nohz.next_balance accordingly.
-                */
-               if ((idle == CPU_IDLE) && time_after(nohz.next_balance, rq->next_balance))
-                       nohz.next_balance = rq->next_balance;
-#endif
-       }
 }
 
 static inline int on_null_domain(struct rq *rq)
@@ -10385,8 +10372,12 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
         */
        smp_mb();
 
-       for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
-               if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
+       /*
+        * Start with the next CPU after this_cpu so we will end with this_cpu and let a
+        * chance for other idle cpu to pull load.
+        */
+       for_each_cpu_wrap(balance_cpu,  nohz.idle_cpus_mask, this_cpu+1) {
+               if (!idle_cpu(balance_cpu))
                        continue;
 
                /*
@@ -10432,15 +10423,6 @@ static void _nohz_idle_balance(struct rq *this_rq, unsigned int flags,
        if (likely(update_next_balance))
                nohz.next_balance = next_balance;
 
-       /* Newly idle CPU doesn't need an update */
-       if (idle != CPU_NEWLY_IDLE) {
-               update_blocked_averages(this_cpu);
-               has_blocked_load |= this_rq->has_blocked_load;
-       }
-
-       if (flags & NOHZ_BALANCE_KICK)
-               rebalance_domains(this_rq, CPU_IDLE);
-
        WRITE_ONCE(nohz.next_blocked,
                now + msecs_to_jiffies(LOAD_AVG_PERIOD));