OSDN Git Service

sched: Move rq->idle_stamp up to the core
authorDaniel Lezcano <daniel.lezcano@linaro.org>
Fri, 17 Jan 2014 09:04:03 +0000 (10:04 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 10 Feb 2014 15:17:07 +0000 (16:17 +0100)
idle_balance() modifies the rq->idle_stamp field, making this information
shared across core.c and fair.c.

As we know if the cpu is going to idle or not with the previous patch, let's
encapsulate the rq->idle_stamp information in core.c by moving it up to the
caller.

The idle_balance() function returns true in case a balancing occured and the
cpu won't be idle, false if no balance happened and the cpu is going idle.

Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
Cc: alex.shi@linaro.org
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1389949444-14821-3-git-send-email-daniel.lezcano@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/sched.h

index 74dd565..417cf65 100644 (file)
@@ -2704,8 +2704,15 @@ need_resched:
 
        pre_schedule(rq, prev);
 
-       if (unlikely(!rq->nr_running))
-               idle_balance(rq);
+       if (unlikely(!rq->nr_running)) {
+               /*
+                * We must set idle_stamp _before_ calling idle_balance(), such
+                * that we measure the duration of idle_balance() as idle time.
+                */
+               rq->idle_stamp = rq_clock(rq);
+               if (idle_balance(rq))
+                       rq->idle_stamp = 0;
+       }
 
        put_prev_task(rq, prev);
        next = pick_next_task(rq);
index 5ebc681..04fea77 100644 (file)
@@ -6531,7 +6531,7 @@ out:
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
  */
-void idle_balance(struct rq *this_rq)
+int idle_balance(struct rq *this_rq)
 {
        struct sched_domain *sd;
        int pulled_task = 0;
@@ -6539,10 +6539,8 @@ void idle_balance(struct rq *this_rq)
        u64 curr_cost = 0;
        int this_cpu = this_rq->cpu;
 
-       this_rq->idle_stamp = rq_clock(this_rq);
-
        if (this_rq->avg_idle < sysctl_sched_migration_cost)
-               return;
+               return 0;
 
        /*
         * Drop the rq->lock, but keep IRQ/preempt disabled.
@@ -6580,10 +6578,8 @@ void idle_balance(struct rq *this_rq)
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
                        next_balance = sd->last_balance + interval;
-               if (pulled_task) {
-                       this_rq->idle_stamp = 0;
+               if (pulled_task)
                        break;
-               }
        }
        rcu_read_unlock();
 
@@ -6594,7 +6590,7 @@ void idle_balance(struct rq *this_rq)
         * A task could have be enqueued in the meantime
         */
        if (this_rq->nr_running && !pulled_task)
-               return;
+               return 1;
 
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
@@ -6606,6 +6602,8 @@ void idle_balance(struct rq *this_rq)
 
        if (curr_cost > this_rq->max_idle_balance_cost)
                this_rq->max_idle_balance_cost = curr_cost;
+
+       return pulled_task;
 }
 
 /*
index 82c0e02..bb89991 100644 (file)
@@ -1158,7 +1158,7 @@ extern const struct sched_class idle_sched_class;
 extern void update_group_power(struct sched_domain *sd, int cpu);
 
 extern void trigger_load_balance(struct rq *rq);
-extern void idle_balance(struct rq *this_rq);
+extern int idle_balance(struct rq *this_rq);
 
 extern void idle_enter_fair(struct rq *this_rq);
 extern void idle_exit_fair(struct rq *this_rq);