OSDN Git Service

trace:sched: Make util_avg in load_avg trace reflect PELT/WALT as used
authorChris Redpath <chris.redpath@arm.com>
Wed, 8 Feb 2017 14:25:35 +0000 (14:25 +0000)
committerAndres Oportus <andresoportus@google.com>
Fri, 2 Jun 2017 15:01:54 +0000 (08:01 -0700)
With the ability to choose between WALT and PELT for utilisation tracking
we can have the situation where we're using WALT to make all the
decisions and reporting PELT figures in the sched_load_avg_(cpu|task)
trace points. This is not too much of an issue, but when analysing trace
it is nice to see numbers representing what the scheduler is using rather
than needing to add in additional sched_walt_* traces to figure it out.

Add reporting for both types, and make the util_avg member reflect what
will be seen from cpu or task_util functions in the scheduler.

Change-Id: I2abbd2c5fa70822096d0f3372b4c12b1c6af1590
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
include/trace/events/sched.h
kernel/sched/fair.c

index 1d758d1..433d391 100644 (file)
@@ -638,14 +638,21 @@ TRACE_EVENT(sched_contrib_scale_f,
 
 #ifdef CONFIG_SMP
 
+#ifdef CONFIG_SCHED_WALT
+extern unsigned int sysctl_sched_use_walt_cpu_util;
+extern unsigned int sysctl_sched_use_walt_task_util;
+extern unsigned int walt_ravg_window;
+extern unsigned int walt_disabled;
+#endif
+
 /*
  * Tracepoint for accounting sched averages for tasks.
  */
 TRACE_EVENT(sched_load_avg_task,
 
-       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg),
+       TP_PROTO(struct task_struct *tsk, struct sched_avg *avg, void *_ravg),
 
-       TP_ARGS(tsk, avg),
+       TP_ARGS(tsk, avg, _ravg),
 
        TP_STRUCT__entry(
                __array( char,  comm,   TASK_COMM_LEN           )
@@ -653,6 +660,8 @@ TRACE_EVENT(sched_load_avg_task,
                __field( int,   cpu                             )
                __field( unsigned long, load_avg                )
                __field( unsigned long, util_avg                )
+               __field( unsigned long, util_avg_pelt   )
+               __field( unsigned long, util_avg_walt   )
                __field( u64,           load_sum                )
                __field( u32,           util_sum                )
                __field( u32,           period_contrib          )
@@ -667,15 +676,25 @@ TRACE_EVENT(sched_load_avg_task,
                __entry->load_sum               = avg->load_sum;
                __entry->util_sum               = avg->util_sum;
                __entry->period_contrib         = avg->period_contrib;
-       ),
-
-       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu load_sum=%llu"
+               __entry->util_avg_pelt  = avg->util_avg;
+               __entry->util_avg_walt  = 0;
+#ifdef CONFIG_SCHED_WALT
+               __entry->util_avg_walt = (((unsigned long)((struct ravg*)_ravg)->demand) << SCHED_LOAD_SHIFT);
+               do_div(__entry->util_avg_walt, walt_ravg_window);
+               if (!walt_disabled && sysctl_sched_use_walt_task_util)
+                       __entry->util_avg = __entry->util_avg_walt;
+#endif
+       ),
+       TP_printk("comm=%s pid=%d cpu=%d load_avg=%lu util_avg=%lu "
+                       "util_avg_pelt=%lu util_avg_walt=%lu load_sum=%llu"
                  " util_sum=%u period_contrib=%u",
                  __entry->comm,
                  __entry->pid,
                  __entry->cpu,
                  __entry->load_avg,
                  __entry->util_avg,
+                 __entry->util_avg_pelt,
+                 __entry->util_avg_walt,
                  (u64)__entry->load_sum,
                  (u32)__entry->util_sum,
                  (u32)__entry->period_contrib)
@@ -694,16 +713,29 @@ TRACE_EVENT(sched_load_avg_cpu,
                __field( int,   cpu                             )
                __field( unsigned long, load_avg                )
                __field( unsigned long, util_avg                )
+               __field( unsigned long, util_avg_pelt   )
+               __field( unsigned long, util_avg_walt   )
        ),
 
        TP_fast_assign(
                __entry->cpu                    = cpu;
                __entry->load_avg               = cfs_rq->avg.load_avg;
                __entry->util_avg               = cfs_rq->avg.util_avg;
-       ),
-
-       TP_printk("cpu=%d load_avg=%lu util_avg=%lu",
-                 __entry->cpu, __entry->load_avg, __entry->util_avg)
+               __entry->util_avg_pelt  = cfs_rq->avg.util_avg;
+               __entry->util_avg_walt  = 0;
+#ifdef CONFIG_SCHED_WALT
+               __entry->util_avg_walt  =
+                               cpu_rq(cpu)->prev_runnable_sum << SCHED_LOAD_SHIFT;
+               do_div(__entry->util_avg_walt, walt_ravg_window);
+               if (!walt_disabled && sysctl_sched_use_walt_cpu_util)
+                       __entry->util_avg               = __entry->util_avg_walt;
+#endif
+       ),
+
+       TP_printk("cpu=%d load_avg=%lu util_avg=%lu "
+                         "util_avg_pelt=%lu util_avg_walt=%lu",
+                 __entry->cpu, __entry->load_avg, __entry->util_avg,
+                 __entry->util_avg_pelt, __entry->util_avg_walt)
 );
 
 /*
index dfd150b..f972df2 100644 (file)
@@ -3178,6 +3178,7 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
        u64 now = cfs_rq_clock_task(cfs_rq);
        int cpu = cpu_of(rq_of(cfs_rq));
        int decayed;
+       void *ptr = NULL;
 
        /*
         * Track task load average for carrying it to new CPU after migrated, and
@@ -3195,8 +3196,12 @@ static inline void update_load_avg(struct sched_entity *se, int flags)
        if (decayed && (flags & UPDATE_TG))
                update_tg_load_avg(cfs_rq, 0);
 
-       if (entity_is_task(se))
-               trace_sched_load_avg_task(task_of(se), &se->avg);
+       if (entity_is_task(se)) {
+#ifdef CONFIG_SCHED_WALT
+               ptr = (void *)&(task_of(se)->ravg);
+#endif
+               trace_sched_load_avg_task(task_of(se), &se->avg, ptr);
+       }
 }
 
 /**