OSDN Git Service

perf bpf: Deprecate bpf_map__resize() in favor of bpf_map_set_max_entries()
authorMuhammad Falak R Wani <falakreyaz@gmail.com>
Sun, 15 Aug 2021 10:36:10 +0000 (16:06 +0530)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 15 Sep 2021 20:57:30 +0000 (17:57 -0300)
As a part of libbpf 1.0 plan[0], this patch deprecates use of
bpf_map__resize in favour of bpf_map__set_max_entries.

Reference: https://github.com/libbpf/libbpf/issues/304
[0]: https://github.com/libbpf/libbpf/wiki/Libbpf:-the-road-to-v1.0#libbpfh-high-level-apis

Signed-off-by: Muhammad Falak R Wani <falakreyaz@gmail.com>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Muhammad Falak R Wani <falakreyaz@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Quentin Monnet <quentin@isovalent.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Yu Kuai <yukuai3@huawei.com>
Link: http //lore.kernel.org/lkml/20210815103610.27887-1-falakreyaz@gmail.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/bpf_counter.c
tools/perf/util/bpf_counter_cgroup.c

index ba0f208..ced2dac 100644 (file)
@@ -127,9 +127,9 @@ static int bpf_program_profiler_load_one(struct evsel *evsel, u32 prog_id)
 
        skel->rodata->num_cpu = evsel__nr_cpus(evsel);
 
-       bpf_map__resize(skel->maps.events, evsel__nr_cpus(evsel));
-       bpf_map__resize(skel->maps.fentry_readings, 1);
-       bpf_map__resize(skel->maps.accum_readings, 1);
+       bpf_map__set_max_entries(skel->maps.events, evsel__nr_cpus(evsel));
+       bpf_map__set_max_entries(skel->maps.fentry_readings, 1);
+       bpf_map__set_max_entries(skel->maps.accum_readings, 1);
 
        prog_name = bpf_target_prog_name(prog_fd);
        if (!prog_name) {
@@ -399,7 +399,7 @@ static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
                return -1;
        }
 
-       bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
+       bpf_map__set_max_entries(skel->maps.events, libbpf_num_possible_cpus());
        err = bperf_leader_bpf__load(skel);
        if (err) {
                pr_err("Failed to load leader skeleton\n");
index 89aa5e7..cbc6c2b 100644 (file)
@@ -65,14 +65,14 @@ static int bperf_load_program(struct evlist *evlist)
 
        /* we need one copy of events per cpu for reading */
        map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
-       bpf_map__resize(skel->maps.events, map_size);
-       bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
+       bpf_map__set_max_entries(skel->maps.events, map_size);
+       bpf_map__set_max_entries(skel->maps.cgrp_idx, nr_cgroups);
        /* previous result is saved in a per-cpu array */
        map_size = evlist->core.nr_entries / nr_cgroups;
-       bpf_map__resize(skel->maps.prev_readings, map_size);
+       bpf_map__set_max_entries(skel->maps.prev_readings, map_size);
        /* cgroup result needs all events (per-cpu) */
        map_size = evlist->core.nr_entries;
-       bpf_map__resize(skel->maps.cgrp_readings, map_size);
+       bpf_map__set_max_entries(skel->maps.cgrp_readings, map_size);
 
        set_max_rlimit();