OSDN Git Service

sched/core: Unify p->on_rq updates
authorPeter Zijlstra <peterz@infradead.org>
Tue, 9 Apr 2019 07:59:05 +0000 (09:59 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 16 Apr 2019 14:55:17 +0000 (16:55 +0200)
Almost all {,de}activate_task() invocations pair with p->on_rq
updates, the exception being the usage in rt/deadline which hold both
rq locks and therefore don't strictly need to set
TASK_ON_RQ_MIGRATING, but it is harmless if we do anyway.

Put the updates in {,de}activate_task() and cut down on repetition.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/fair.c

index 3feb83d..f4838b7 100644 (file)
@@ -792,10 +792,14 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
                rq->nr_uninterruptible--;
 
        enqueue_task(rq, p, flags);
+
+       p->on_rq = TASK_ON_RQ_QUEUED;
 }
 
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
+       p->on_rq = (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING;
+
        if (task_contributes_to_load(p))
                rq->nr_uninterruptible++;
 
@@ -1237,11 +1241,9 @@ static void __migrate_swap_task(struct task_struct *p, int cpu)
                rq_pin_lock(src_rq, &srf);
                rq_pin_lock(dst_rq, &drf);
 
-               p->on_rq = TASK_ON_RQ_MIGRATING;
                deactivate_task(src_rq, p, 0);
                set_task_cpu(p, cpu);
                activate_task(dst_rq, p, 0);
-               p->on_rq = TASK_ON_RQ_QUEUED;
                check_preempt_curr(dst_rq, p, 0);
 
                rq_unpin_lock(dst_rq, &drf);
@@ -1733,7 +1735,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
 #endif
 
        activate_task(rq, p, en_flags);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        ttwu_do_wakeup(rq, p, wake_flags, rf);
 }
 
@@ -2408,7 +2409,6 @@ void wake_up_new_task(struct task_struct *p)
        post_init_entity_util_avg(p);
 
        activate_task(rq, p, ENQUEUE_NOCLOCK);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        trace_sched_wakeup_new(p);
        check_preempt_curr(rq, p, WF_FORK);
 #ifdef CONFIG_SMP
@@ -3407,7 +3407,6 @@ static void __sched notrace __schedule(bool preempt)
                        prev->state = TASK_RUNNING;
                } else {
                        deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK);
-                       prev->on_rq = 0;
 
                        if (prev->in_iowait) {
                                atomic_inc(&rq->nr_iowait);
index b6cc070..e5b100b 100644 (file)
@@ -7491,7 +7491,6 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
 {
        lockdep_assert_held(&env->src_rq->lock);
 
-       p->on_rq = TASK_ON_RQ_MIGRATING;
        deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
        set_task_cpu(p, env->dst_cpu);
 }
@@ -7627,7 +7626,6 @@ static void attach_task(struct rq *rq, struct task_struct *p)
 
        BUG_ON(task_rq(p) != rq);
        activate_task(rq, p, ENQUEUE_NOCLOCK);
-       p->on_rq = TASK_ON_RQ_QUEUED;
        check_preempt_curr(rq, p, 0);
 }