From 7c3461a6acf2b2a36657215fd410f407e33862f2 Mon Sep 17 00:00:00 2001 From: Pavankumar Kondeti Date: Fri, 30 Sep 2016 19:48:22 +0530 Subject: [PATCH] sched: Fix a division by zero bug in scale_exec_time() When cycle_counter is used to estimate the frequency, calling update_task_ravg() twice on the same task without refreshing the wallclock results in a division by zero bug. Add a safety check in update_task_ravg() to prevent this. The above bug is hit from __schedule() when next == prev. There is no need to call update_task_ravg() twice for PUT_PREV_TASK and PICK_NEXT_TASK events for the same task. Calling update_task_ravg() with TASK_UPDATE event is sufficient. Change-Id: Ib3af9004f2462618c535b8195377bedb584d0261 Signed-off-by: Pavankumar Kondeti --- kernel/sched/core.c | 8 +++++--- kernel/sched/hmp.c | 3 ++- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 609aa2e588d7..8f7a452967b0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3384,16 +3384,17 @@ static void __sched notrace __schedule(bool preempt) update_rq_clock(rq); next = pick_next_task(rq, prev); - wallclock = sched_ktime_clock(); - update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); - update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); clear_tsk_need_resched(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; BUG_ON(task_cpu(next) != cpu_of(rq)); + wallclock = sched_ktime_clock(); if (likely(prev != next)) { + update_task_ravg(prev, rq, PUT_PREV_TASK, wallclock, 0); + update_task_ravg(next, rq, PICK_NEXT_TASK, wallclock, 0); + rq->nr_switches++; rq->curr = next; ++*switch_count; @@ -3404,6 +3405,7 @@ static void __sched notrace __schedule(bool preempt) rq = context_switch(rq, prev, next); /* unlocks the rq */ cpu = cpu_of(rq); } else { + update_task_ravg(prev, rq, TASK_UPDATE, wallclock, 0); lockdep_unpin_lock(&rq->lock); raw_spin_unlock_irq(&rq->lock); } diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c index 6e1757aa1541..9e6fe21f05b6 100644 --- a/kernel/sched/hmp.c +++ b/kernel/sched/hmp.c @@ -2683,7 +2683,8 @@ static void update_task_demand(struct task_struct *p, struct rq *rq, void update_task_ravg(struct task_struct *p, struct rq *rq, int event, u64 wallclock, u64 irqtime) { - if (!rq->window_start || sched_disable_window_stats) + if (!rq->window_start || sched_disable_window_stats || + p->ravg.mark_start == wallclock) return; lockdep_assert_held(&rq->lock); -- 2.11.0