OSDN Git Service

perf stat: Split process_counters() to share it with process_stat_round_event()
authorNamhyung Kim <namhyung@kernel.org>
Tue, 18 Oct 2022 02:02:21 +0000 (19:02 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 27 Oct 2022 19:37:25 +0000 (16:37 -0300)
It'd do more processing with aggregation.  Let's split the function so that it
can be shared with by process_stat_round_event() too.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Link: https://lore.kernel.org/r/20221018020227.85905-15-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-stat.c

index bff28a1..838d295 100644 (file)
@@ -465,15 +465,19 @@ static int read_bpf_map_counters(void)
        return 0;
 }
 
-static void read_counters(struct timespec *rs)
+static int read_counters(struct timespec *rs)
 {
-       struct evsel *counter;
-
        if (!stat_config.stop_read_counter) {
                if (read_bpf_map_counters() ||
                    read_affinity_counters(rs))
-                       return;
+                       return -1;
        }
+       return 0;
+}
+
+static void process_counters(void)
+{
+       struct evsel *counter;
 
        evlist__for_each_entry(evsel_list, counter) {
                if (counter->err)
@@ -494,7 +498,8 @@ static void process_interval(void)
        perf_stat__reset_shadow_per_stat(&rt_stat);
        evlist__reset_aggr_stats(evsel_list);
 
-       read_counters(&rs);
+       if (read_counters(&rs) == 0)
+               process_counters();
 
        if (STAT_RECORD) {
                if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL))
@@ -980,7 +985,8 @@ try_again_reset:
         * avoid arbitrary skew, we must read all counters before closing any
         * group leaders.
         */
-       read_counters(&(struct timespec) { .tv_nsec = t1-t0 });
+       if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0)
+               process_counters();
 
        /*
         * We need to keep evsel_list alive, because it's processed
@@ -2099,13 +2105,11 @@ static int process_stat_round_event(struct perf_session *session,
                                    union perf_event *event)
 {
        struct perf_record_stat_round *stat_round = &event->stat_round;
-       struct evsel *counter;
        struct timespec tsh, *ts = NULL;
        const char **argv = session->header.env.cmdline_argv;
        int argc = session->header.env.nr_cmdline;
 
-       evlist__for_each_entry(evsel_list, counter)
-               perf_stat_process_counter(&stat_config, counter);
+       process_counters();
 
        if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL)
                update_stats(&walltime_nsecs_stats, stat_round->time);