OSDN Git Service

sched: Fix a division by zero bug in scale_exec_time()
authorPavankumar Kondeti <pkondeti@codeaurora.org>
Fri, 30 Sep 2016 14:18:22 +0000 (19:48 +0530)
committerPavankumar Kondeti <pkondeti@codeaurora.org>
Sat, 1 Oct 2016 04:35:08 +0000 (10:05 +0530)
When cycle_counter is used to estimate the frequency, calling
update_task_ravg() twice on the same task without refreshing
the wallclock results in a division by zero bug. Add a safety
check in update_task_ravg() to prevent this.

The above bug is hit from __schedule() when next == prev. There
is no need to call update_task_ravg() twice for PUT_PREV_TASK
and PICK_NEXT_TASK events for the same task. Calling
update_task_ravg() with TASK_UPDATE event is sufficient.

Change-Id: Ib3af9004f2462618c535b8195377bedb584d0261
Signed-off-by: Pavankumar Kondeti <pkondeti@codeaurora.org>
kernel/sched/core.c
kernel/sched/hmp.c

index 609aa2e..8f7a452 100644 (file)
@@ -3384,16 +3384,17 @@ static void __sched notrace __schedule(bool preempt)
                update_rq_clock(rq);
 
        next = pick_next_task(rq, prev);
-       wallclock = sched_ktime_clock();
-       update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
-       update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
        clear_tsk_need_resched(prev);
        clear_preempt_need_resched();
        rq->clock_skip_update = 0;
 
        BUG_ON(task_cpu(next) != cpu_of(rq));
 
+       wallclock = sched_ktime_clock();
        if (likely(prev != next)) {
+               update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0);
+               update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0);
+
                rq->nr_switches++;
                rq->curr = next;
                ++*switch_count;
@@ -3404,6 +3405,7 @@ static void __sched notrace __schedule(bool preempt)
                rq = context_switch(rq, prev, next); /* unlocks the rq */
                cpu = cpu_of(rq);
        } else {
+               update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0);
                lockdep_unpin_lock(&rq->lock);
                raw_spin_unlock_irq(&rq->lock);
        }
index 6e1757a..9e6fe21 100644 (file)
@@ -2683,7 +2683,8 @@ static void update_task_demand(struct task_struct *p, struct rq *rq,
 void update_task_ravg(struct task_struct *p, struct rq *rq, int event,
                                                u64 wallclock, u64 irqtime)
 {
-       if (!rq->window_start || sched_disable_window_stats)
+       if (!rq->window_start || sched_disable_window_stats ||
+           p->ravg.mark_start == wallclock)
                return;
 
        lockdep_assert_held(&rq->lock);