OSDN Git Service

perf tools: Save bpf_prog_info and BTF of new BPF programs
[uclinux-h8/linux.git] / tools / perf / util / bpf-event.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <stdlib.h>
4 #include <bpf/bpf.h>
5 #include <bpf/btf.h>
6 #include <bpf/libbpf.h>
7 #include <linux/btf.h>
8 #include <linux/err.h>
9 #include "bpf-event.h"
10 #include "debug.h"
11 #include "symbol.h"
12 #include "machine.h"
13 #include "env.h"
14 #include "session.h"
15 #include "map.h"
16 #include "evlist.h"
17
18 #define ptr_to_u64(ptr)    ((__u64)(unsigned long)(ptr))
19
20 static int snprintf_hex(char *buf, size_t size, unsigned char *data, size_t len)
21 {
22         int ret = 0;
23         size_t i;
24
25         for (i = 0; i < len; i++)
26                 ret += snprintf(buf + ret, size - ret, "%02x", data[i]);
27         return ret;
28 }
29
30 static int machine__process_bpf_event_load(struct machine *machine,
31                                            union perf_event *event,
32                                            struct perf_sample *sample __maybe_unused)
33 {
34         struct bpf_prog_info_linear *info_linear;
35         struct bpf_prog_info_node *info_node;
36         struct perf_env *env = machine->env;
37         int id = event->bpf_event.id;
38         unsigned int i;
39
40         /* perf-record, no need to handle bpf-event */
41         if (env == NULL)
42                 return 0;
43
44         info_node = perf_env__find_bpf_prog_info(env, id);
45         if (!info_node)
46                 return 0;
47         info_linear = info_node->info_linear;
48
49         for (i = 0; i < info_linear->info.nr_jited_ksyms; i++) {
50                 u64 *addrs = (u64 *)(uintptr_t)(info_linear->info.jited_ksyms);
51                 u64 addr = addrs[i];
52                 struct map *map;
53
54                 map = map_groups__find(&machine->kmaps, addr);
55
56                 if (map) {
57                         map->dso->binary_type = DSO_BINARY_TYPE__BPF_PROG_INFO;
58                         map->dso->bpf_prog.id = id;
59                         map->dso->bpf_prog.sub_id = i;
60                         map->dso->bpf_prog.env = env;
61                 }
62         }
63         return 0;
64 }
65
66 int machine__process_bpf_event(struct machine *machine __maybe_unused,
67                                union perf_event *event,
68                                struct perf_sample *sample __maybe_unused)
69 {
70         if (dump_trace)
71                 perf_event__fprintf_bpf_event(event, stdout);
72
73         switch (event->bpf_event.type) {
74         case PERF_BPF_EVENT_PROG_LOAD:
75                 return machine__process_bpf_event_load(machine, event, sample);
76
77         case PERF_BPF_EVENT_PROG_UNLOAD:
78                 /*
79                  * Do not free bpf_prog_info and btf of the program here,
80                  * as annotation still need them. They will be freed at
81                  * the end of the session.
82                  */
83                 break;
84         default:
85                 pr_debug("unexpected bpf_event type of %d\n",
86                          event->bpf_event.type);
87                 break;
88         }
89         return 0;
90 }
91
92 static int perf_env__fetch_btf(struct perf_env *env,
93                                u32 btf_id,
94                                struct btf *btf)
95 {
96         struct btf_node *node;
97         u32 data_size;
98         const void *data;
99
100         data = btf__get_raw_data(btf, &data_size);
101
102         node = malloc(data_size + sizeof(struct btf_node));
103         if (!node)
104                 return -1;
105
106         node->id = btf_id;
107         node->data_size = data_size;
108         memcpy(node->data, data, data_size);
109
110         perf_env__insert_btf(env, node);
111         return 0;
112 }
113
114 /*
115  * Synthesize PERF_RECORD_KSYMBOL and PERF_RECORD_BPF_EVENT for one bpf
116  * program. One PERF_RECORD_BPF_EVENT is generated for the program. And
117  * one PERF_RECORD_KSYMBOL is generated for each sub program.
118  *
119  * Returns:
120  *    0 for success;
121  *   -1 for failures;
122  *   -2 for lack of kernel support.
123  */
124 static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
125                                                perf_event__handler_t process,
126                                                struct machine *machine,
127                                                int fd,
128                                                union perf_event *event,
129                                                struct record_opts *opts)
130 {
131         struct ksymbol_event *ksymbol_event = &event->ksymbol_event;
132         struct bpf_event *bpf_event = &event->bpf_event;
133         struct bpf_prog_info_linear *info_linear;
134         struct perf_tool *tool = session->tool;
135         struct bpf_prog_info_node *info_node;
136         struct bpf_prog_info *info;
137         struct btf *btf = NULL;
138         bool has_btf = false;
139         struct perf_env *env;
140         u32 sub_prog_cnt, i;
141         int err = 0;
142         u64 arrays;
143
144         /*
145          * for perf-record and perf-report use header.env;
146          * otherwise, use global perf_env.
147          */
148         env = session->data ? &session->header.env : &perf_env;
149
150         arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
151         arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
152         arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
153         arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
154         arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
155         arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
156         arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
157
158         info_linear = bpf_program__get_prog_info_linear(fd, arrays);
159         if (IS_ERR_OR_NULL(info_linear)) {
160                 info_linear = NULL;
161                 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
162                 return -1;
163         }
164
165         if (info_linear->info_len < offsetof(struct bpf_prog_info, prog_tags)) {
166                 pr_debug("%s: the kernel is too old, aborting\n", __func__);
167                 return -2;
168         }
169
170         info = &info_linear->info;
171
172         /* number of ksyms, func_lengths, and tags should match */
173         sub_prog_cnt = info->nr_jited_ksyms;
174         if (sub_prog_cnt != info->nr_prog_tags ||
175             sub_prog_cnt != info->nr_jited_func_lens)
176                 return -1;
177
178         /* check BTF func info support */
179         if (info->btf_id && info->nr_func_info && info->func_info_rec_size) {
180                 /* btf func info number should be same as sub_prog_cnt */
181                 if (sub_prog_cnt != info->nr_func_info) {
182                         pr_debug("%s: mismatch in BPF sub program count and BTF function info count, aborting\n", __func__);
183                         err = -1;
184                         goto out;
185                 }
186                 if (btf__get_from_id(info->btf_id, &btf)) {
187                         pr_debug("%s: failed to get BTF of id %u, aborting\n", __func__, info->btf_id);
188                         err = -1;
189                         btf = NULL;
190                         goto out;
191                 }
192                 has_btf = true;
193                 perf_env__fetch_btf(env, info->btf_id, btf);
194         }
195
196         /* Synthesize PERF_RECORD_KSYMBOL */
197         for (i = 0; i < sub_prog_cnt; i++) {
198                 u8 (*prog_tags)[BPF_TAG_SIZE] = (void *)(uintptr_t)(info->prog_tags);
199                 __u32 *prog_lens  = (__u32 *)(uintptr_t)(info->jited_func_lens);
200                 __u64 *prog_addrs = (__u64 *)(uintptr_t)(info->jited_ksyms);
201                 void *func_infos  = (void *)(uintptr_t)(info->func_info);
202                 const struct bpf_func_info *finfo;
203                 const char *short_name = NULL;
204                 const struct btf_type *t;
205                 int name_len;
206
207                 *ksymbol_event = (struct ksymbol_event){
208                         .header = {
209                                 .type = PERF_RECORD_KSYMBOL,
210                                 .size = offsetof(struct ksymbol_event, name),
211                         },
212                         .addr = prog_addrs[i],
213                         .len = prog_lens[i],
214                         .ksym_type = PERF_RECORD_KSYMBOL_TYPE_BPF,
215                         .flags = 0,
216                 };
217                 name_len = snprintf(ksymbol_event->name, KSYM_NAME_LEN,
218                                     "bpf_prog_");
219                 name_len += snprintf_hex(ksymbol_event->name + name_len,
220                                          KSYM_NAME_LEN - name_len,
221                                          prog_tags[i], BPF_TAG_SIZE);
222                 if (has_btf) {
223                         finfo = func_infos + i * info->func_info_rec_size;
224                         t = btf__type_by_id(btf, finfo->type_id);
225                         short_name = btf__name_by_offset(btf, t->name_off);
226                 } else if (i == 0 && sub_prog_cnt == 1) {
227                         /* no subprog */
228                         if (info->name[0])
229                                 short_name = info->name;
230                 } else
231                         short_name = "F";
232                 if (short_name)
233                         name_len += snprintf(ksymbol_event->name + name_len,
234                                              KSYM_NAME_LEN - name_len,
235                                              "_%s", short_name);
236
237                 ksymbol_event->header.size += PERF_ALIGN(name_len + 1,
238                                                          sizeof(u64));
239
240                 memset((void *)event + event->header.size, 0, machine->id_hdr_size);
241                 event->header.size += machine->id_hdr_size;
242                 err = perf_tool__process_synth_event(tool, event,
243                                                      machine, process);
244         }
245
246         if (!opts->no_bpf_event) {
247                 /* Synthesize PERF_RECORD_BPF_EVENT */
248                 *bpf_event = (struct bpf_event){
249                         .header = {
250                                 .type = PERF_RECORD_BPF_EVENT,
251                                 .size = sizeof(struct bpf_event),
252                         },
253                         .type = PERF_BPF_EVENT_PROG_LOAD,
254                         .flags = 0,
255                         .id = info->id,
256                 };
257                 memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
258                 memset((void *)event + event->header.size, 0, machine->id_hdr_size);
259                 event->header.size += machine->id_hdr_size;
260
261                 /* save bpf_prog_info to env */
262                 info_node = malloc(sizeof(struct bpf_prog_info_node));
263                 if (!info_node) {
264                         err = -1;
265                         goto out;
266                 }
267
268                 info_node->info_linear = info_linear;
269                 perf_env__insert_bpf_prog_info(env, info_node);
270                 info_linear = NULL;
271
272                 /*
273                  * process after saving bpf_prog_info to env, so that
274                  * required information is ready for look up
275                  */
276                 err = perf_tool__process_synth_event(tool, event,
277                                                      machine, process);
278         }
279
280 out:
281         free(info_linear);
282         free(btf);
283         return err ? -1 : 0;
284 }
285
286 int perf_event__synthesize_bpf_events(struct perf_session *session,
287                                       perf_event__handler_t process,
288                                       struct machine *machine,
289                                       struct record_opts *opts)
290 {
291         union perf_event *event;
292         __u32 id = 0;
293         int err;
294         int fd;
295
296         event = malloc(sizeof(event->bpf_event) + KSYM_NAME_LEN + machine->id_hdr_size);
297         if (!event)
298                 return -1;
299         while (true) {
300                 err = bpf_prog_get_next_id(id, &id);
301                 if (err) {
302                         if (errno == ENOENT) {
303                                 err = 0;
304                                 break;
305                         }
306                         pr_debug("%s: can't get next program: %s%s\n",
307                                  __func__, strerror(errno),
308                                  errno == EINVAL ? " -- kernel too old?" : "");
309                         /* don't report error on old kernel or EPERM  */
310                         err = (errno == EINVAL || errno == EPERM) ? 0 : -1;
311                         break;
312                 }
313                 fd = bpf_prog_get_fd_by_id(id);
314                 if (fd < 0) {
315                         pr_debug("%s: failed to get fd for prog_id %u\n",
316                                  __func__, id);
317                         continue;
318                 }
319
320                 err = perf_event__synthesize_one_bpf_prog(session, process,
321                                                           machine, fd,
322                                                           event, opts);
323                 close(fd);
324                 if (err) {
325                         /* do not return error for old kernel */
326                         if (err == -2)
327                                 err = 0;
328                         break;
329                 }
330         }
331         free(event);
332         return err;
333 }
334
335 static void perf_env__add_bpf_info(struct perf_env *env, u32 id)
336 {
337         struct bpf_prog_info_linear *info_linear;
338         struct bpf_prog_info_node *info_node;
339         struct btf *btf = NULL;
340         u64 arrays;
341         u32 btf_id;
342         int fd;
343
344         fd = bpf_prog_get_fd_by_id(id);
345         if (fd < 0)
346                 return;
347
348         arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
349         arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
350         arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
351         arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
352         arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
353         arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
354         arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
355
356         info_linear = bpf_program__get_prog_info_linear(fd, arrays);
357         if (IS_ERR_OR_NULL(info_linear)) {
358                 pr_debug("%s: failed to get BPF program info. aborting\n", __func__);
359                 goto out;
360         }
361
362         btf_id = info_linear->info.btf_id;
363
364         info_node = malloc(sizeof(struct bpf_prog_info_node));
365         if (info_node) {
366                 info_node->info_linear = info_linear;
367                 perf_env__insert_bpf_prog_info(env, info_node);
368         } else
369                 free(info_linear);
370
371         if (btf_id == 0)
372                 goto out;
373
374         if (btf__get_from_id(btf_id, &btf)) {
375                 pr_debug("%s: failed to get BTF of id %u, aborting\n",
376                          __func__, btf_id);
377                 goto out;
378         }
379         perf_env__fetch_btf(env, btf_id, btf);
380
381 out:
382         free(btf);
383         close(fd);
384 }
385
386 static int bpf_event__sb_cb(union perf_event *event, void *data)
387 {
388         struct perf_env *env = data;
389
390         if (event->header.type != PERF_RECORD_BPF_EVENT)
391                 return -1;
392
393         switch (event->bpf_event.type) {
394         case PERF_BPF_EVENT_PROG_LOAD:
395                 perf_env__add_bpf_info(env, event->bpf_event.id);
396
397         case PERF_BPF_EVENT_PROG_UNLOAD:
398                 /*
399                  * Do not free bpf_prog_info and btf of the program here,
400                  * as annotation still need them. They will be freed at
401                  * the end of the session.
402                  */
403                 break;
404         default:
405                 pr_debug("unexpected bpf_event type of %d\n",
406                          event->bpf_event.type);
407                 break;
408         }
409
410         return 0;
411 }
412
413 int bpf_event__add_sb_event(struct perf_evlist **evlist,
414                             struct perf_env *env)
415 {
416         struct perf_event_attr attr = {
417                 .type             = PERF_TYPE_SOFTWARE,
418                 .config           = PERF_COUNT_SW_DUMMY,
419                 .sample_id_all    = 1,
420                 .watermark        = 1,
421                 .bpf_event        = 1,
422                 .size      = sizeof(attr), /* to capture ABI version */
423         };
424
425         /*
426          * Older gcc versions don't support designated initializers, like above,
427          * for unnamed union members, such as the following:
428          */
429         attr.wakeup_watermark = 1;
430
431         return perf_evlist__add_sb_event(evlist, &attr, bpf_event__sb_cb, env);
432 }