OSDN Git Service

sched/core: Simplify helpers for rq clock update skip requests
authorDavidlohr Bueso <dave@stgolabs.net>
Wed, 4 Apr 2018 16:15:39 +0000 (09:15 -0700)
committerIngo Molnar <mingo@kernel.org>
Thu, 5 Apr 2018 07:20:46 +0000 (09:20 +0200)
By renaming the functions we can get rid of the skip parameter
and have better code redability. It makes zero sense to have
things such as:

  rq_clock_skip_update(rq, false)

When the skip request is in fact not going to happen. Ever. Rename
things such that we end up with:

  rq_clock_skip_update(rq)
  rq_clock_cancel_skipupdate(rq)

Signed-off-by: Davidlohr Bueso <dbueso@suse.de>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Cc: matt@codeblueprint.co.uk
Cc: rostedt@goodmis.org
Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/sched/sched.h

index 28b6899..550a07f 100644 (file)
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
         * this case, we can save a useless back to back clock update.
         */
        if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
-               rq_clock_skip_update(rq, true);
+               rq_clock_skip_update(rq);
 }
 
 #ifdef CONFIG_SMP
index d1c7bf7..e7b3008 100644 (file)
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
         * so we don't do microscopic update in schedule()
         * and double the fastpath cost.
         */
-       rq_clock_skip_update(rq, true);
+       rq_clock_skip_update(rq);
 }
 
 #ifdef CONFIG_SMP
index 0951d1c..54dc31e 100644 (file)
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
                 * so we don't do microscopic update in schedule()
                 * and double the fastpath cost.
                 */
-               rq_clock_skip_update(rq, true);
+               rq_clock_skip_update(rq);
        }
 
        set_skip_buddy(se);
index ad13e62..7aef6b4 100644 (file)
@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
                                 * 'runtime'.
                                 */
                                if (rt_rq->rt_nr_running && rq->curr == rq->idle)
-                                       rq_clock_skip_update(rq, false);
+                                       rq_clock_cancel_skipupdate(rq);
                        }
                        if (rt_rq->rt_time || rt_rq->rt_nr_running)
                                idle = 0;
index c3deaee..15750c2 100644 (file)
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
        return rq->clock_task;
 }
 
-static inline void rq_clock_skip_update(struct rq *rq, bool skip)
+static inline void rq_clock_skip_update(struct rq *rq)
 {
        lockdep_assert_held(&rq->lock);
-       if (skip)
-               rq->clock_update_flags |= RQCF_REQ_SKIP;
-       else
-               rq->clock_update_flags &= ~RQCF_REQ_SKIP;
+       rq->clock_update_flags |= RQCF_REQ_SKIP;
+}
+
+/*
+ * See rt task throttoling, which is the only time a skip
+ * request is cancelled.
+ */
+static inline void rq_clock_cancel_skipupdate(struct rq *rq)
+{
+       lockdep_assert_held(&rq->lock);
+       rq->clock_update_flags &= ~RQCF_REQ_SKIP;
 }
 
 struct rq_flags {