OSDN Git Service

perf lock contention: Skip stack trace from BPF
authorNamhyung Kim <namhyung@kernel.org>
Mon, 12 Sep 2022 05:53:14 +0000 (22:53 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Tue, 4 Oct 2022 11:55:22 +0000 (08:55 -0300)
Currently it collects stack traces to max size then skip entries.
Because we don't have control how to skip perf callchains.  But BPF can
do it with bpf_get_stackid() with a flag.

Say we have max-stack=4 and stack-skip=2, we get these stack traces.

Before:                    After:

     .---> +---+ <--.           .---> +---+ <--.
     |     |   |    |           |     |   |    |
     |     +---+  usable        |     +---+    |
    max    |   |    |          max    |   |    |
   stack   +---+ <--'         stack   +---+  usable
     |     | X |                |     |   |    |
     |     +---+   skip         |     +---+    |
     |     | X |                |     |   |    |
     `---> +---+                `---> +---+ <--'   <=== collection
                                      | X |
                                      +---+   skip
                                      | X |
                                      +---+

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Ian Rogers <irogers@google.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Song Liu <songliubraving@fb.com>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20220912055314.744552-5-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/bpf_lock_contention.c
tools/perf/util/bpf_skel/lock_contention.bpf.c

index ef5323c..efe5b99 100644 (file)
@@ -93,6 +93,8 @@ int lock_contention_prepare(struct lock_contention *con)
                bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
        }
 
+       skel->bss->stack_skip = con->stack_skip;
+
        lock_contention_bpf__attach(skel);
        return 0;
 }
@@ -127,7 +129,7 @@ int lock_contention_read(struct lock_contention *con)
        while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
                struct map *kmap;
                struct symbol *sym;
-               int idx;
+               int idx = 0;
 
                bpf_map_lookup_elem(fd, &key, &data);
                st = zalloc(sizeof(*st));
@@ -146,8 +148,7 @@ int lock_contention_read(struct lock_contention *con)
 
                bpf_map_lookup_elem(stack, &key, stack_trace);
 
-               /* skip BPF + lock internal functions */
-               idx = con->stack_skip;
+               /* skip lock internal functions */
                while (is_lock_function(machine, stack_trace[idx]) &&
                       idx < con->max_stack - 1)
                        idx++;
index 9e8b94e..e107d71 100644 (file)
@@ -72,6 +72,7 @@ struct {
 int enabled;
 int has_cpu;
 int has_task;
+int stack_skip;
 
 /* error stat */
 unsigned long lost;
@@ -117,7 +118,7 @@ int contention_begin(u64 *ctx)
        pelem->timestamp = bpf_ktime_get_ns();
        pelem->lock = (__u64)ctx[0];
        pelem->flags = (__u32)ctx[1];
-       pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP);
+       pelem->stack_id = bpf_get_stackid(ctx, &stacks, BPF_F_FAST_STACK_CMP | stack_skip);
 
        if (pelem->stack_id < 0)
                lost++;