2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
7 * Released under the GPL v2. (and only v2, not any later version)
10 #include <api/fs/debugfs.h>
11 #include <api/fs/fs.h>
14 #include "thread_map.h"
21 #include "parse-events.h"
22 #include "parse-options.h"
26 #include <linux/bitops.h>
27 #include <linux/hash.h>
29 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
30 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
32 #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
33 #define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
35 void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
36 struct thread_map *threads)
40 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
41 INIT_HLIST_HEAD(&evlist->heads[i]);
42 INIT_LIST_HEAD(&evlist->entries);
43 perf_evlist__set_maps(evlist, cpus, threads);
44 fdarray__init(&evlist->pollfd, 64);
45 evlist->workload.pid = -1;
48 struct perf_evlist *perf_evlist__new(void)
50 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
53 perf_evlist__init(evlist, NULL, NULL);
58 struct perf_evlist *perf_evlist__new_default(void)
60 struct perf_evlist *evlist = perf_evlist__new();
62 if (evlist && perf_evlist__add_default(evlist)) {
63 perf_evlist__delete(evlist);
71 * perf_evlist__set_id_pos - set the positions of event ids.
72 * @evlist: selected event list
74 * Events with compatible sample types all have the same id_pos
75 * and is_pos. For convenience, put a copy on evlist.
77 void perf_evlist__set_id_pos(struct perf_evlist *evlist)
79 struct perf_evsel *first = perf_evlist__first(evlist);
81 evlist->id_pos = first->id_pos;
82 evlist->is_pos = first->is_pos;
85 static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
87 struct perf_evsel *evsel;
89 evlist__for_each(evlist, evsel)
90 perf_evsel__calc_id_pos(evsel);
92 perf_evlist__set_id_pos(evlist);
95 static void perf_evlist__purge(struct perf_evlist *evlist)
97 struct perf_evsel *pos, *n;
99 evlist__for_each_safe(evlist, n, pos) {
100 list_del_init(&pos->node);
101 perf_evsel__delete(pos);
104 evlist->nr_entries = 0;
107 void perf_evlist__exit(struct perf_evlist *evlist)
109 zfree(&evlist->mmap);
110 fdarray__exit(&evlist->pollfd);
113 void perf_evlist__delete(struct perf_evlist *evlist)
115 perf_evlist__munmap(evlist);
116 perf_evlist__close(evlist);
117 cpu_map__delete(evlist->cpus);
118 thread_map__delete(evlist->threads);
120 evlist->threads = NULL;
121 perf_evlist__purge(evlist);
122 perf_evlist__exit(evlist);
126 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
128 list_add_tail(&entry->node, &evlist->entries);
129 entry->idx = evlist->nr_entries;
130 entry->tracking = !entry->idx;
132 if (!evlist->nr_entries++)
133 perf_evlist__set_id_pos(evlist);
136 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
137 struct list_head *list,
140 bool set_id_pos = !evlist->nr_entries;
142 list_splice_tail(list, &evlist->entries);
143 evlist->nr_entries += nr_entries;
145 perf_evlist__set_id_pos(evlist);
148 void __perf_evlist__set_leader(struct list_head *list)
150 struct perf_evsel *evsel, *leader;
152 leader = list_entry(list->next, struct perf_evsel, node);
153 evsel = list_entry(list->prev, struct perf_evsel, node);
155 leader->nr_members = evsel->idx - leader->idx + 1;
157 __evlist__for_each(list, evsel) {
158 evsel->leader = leader;
162 void perf_evlist__set_leader(struct perf_evlist *evlist)
164 if (evlist->nr_entries) {
165 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
166 __perf_evlist__set_leader(&evlist->entries);
170 int perf_evlist__add_default(struct perf_evlist *evlist)
172 struct perf_event_attr attr = {
173 .type = PERF_TYPE_HARDWARE,
174 .config = PERF_COUNT_HW_CPU_CYCLES,
176 struct perf_evsel *evsel;
178 event_attr_init(&attr);
180 evsel = perf_evsel__new(&attr);
184 /* use strdup() because free(evsel) assumes name is allocated */
185 evsel->name = strdup("cycles");
189 perf_evlist__add(evlist, evsel);
192 perf_evsel__delete(evsel);
197 static int perf_evlist__add_attrs(struct perf_evlist *evlist,
198 struct perf_event_attr *attrs, size_t nr_attrs)
200 struct perf_evsel *evsel, *n;
204 for (i = 0; i < nr_attrs; i++) {
205 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
207 goto out_delete_partial_list;
208 list_add_tail(&evsel->node, &head);
211 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
215 out_delete_partial_list:
216 __evlist__for_each_safe(&head, n, evsel)
217 perf_evsel__delete(evsel);
221 int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
222 struct perf_event_attr *attrs, size_t nr_attrs)
226 for (i = 0; i < nr_attrs; i++)
227 event_attr_init(attrs + i);
229 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
233 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
235 struct perf_evsel *evsel;
237 evlist__for_each(evlist, evsel) {
238 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
239 (int)evsel->attr.config == id)
247 perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
250 struct perf_evsel *evsel;
252 evlist__for_each(evlist, evsel) {
253 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
254 (strcmp(evsel->name, name) == 0))
261 int perf_evlist__add_newtp(struct perf_evlist *evlist,
262 const char *sys, const char *name, void *handler)
264 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
269 evsel->handler = handler;
270 perf_evlist__add(evlist, evsel);
274 static int perf_evlist__nr_threads(struct perf_evlist *evlist,
275 struct perf_evsel *evsel)
277 if (evsel->system_wide)
280 return thread_map__nr(evlist->threads);
283 void perf_evlist__disable(struct perf_evlist *evlist)
286 struct perf_evsel *pos;
287 int nr_cpus = cpu_map__nr(evlist->cpus);
290 for (cpu = 0; cpu < nr_cpus; cpu++) {
291 evlist__for_each(evlist, pos) {
292 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
294 nr_threads = perf_evlist__nr_threads(evlist, pos);
295 for (thread = 0; thread < nr_threads; thread++)
296 ioctl(FD(pos, cpu, thread),
297 PERF_EVENT_IOC_DISABLE, 0);
302 void perf_evlist__enable(struct perf_evlist *evlist)
305 struct perf_evsel *pos;
306 int nr_cpus = cpu_map__nr(evlist->cpus);
309 for (cpu = 0; cpu < nr_cpus; cpu++) {
310 evlist__for_each(evlist, pos) {
311 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
313 nr_threads = perf_evlist__nr_threads(evlist, pos);
314 for (thread = 0; thread < nr_threads; thread++)
315 ioctl(FD(pos, cpu, thread),
316 PERF_EVENT_IOC_ENABLE, 0);
321 int perf_evlist__disable_event(struct perf_evlist *evlist,
322 struct perf_evsel *evsel)
324 int cpu, thread, err;
325 int nr_cpus = cpu_map__nr(evlist->cpus);
326 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
331 for (cpu = 0; cpu < nr_cpus; cpu++) {
332 for (thread = 0; thread < nr_threads; thread++) {
333 err = ioctl(FD(evsel, cpu, thread),
334 PERF_EVENT_IOC_DISABLE, 0);
342 int perf_evlist__enable_event(struct perf_evlist *evlist,
343 struct perf_evsel *evsel)
345 int cpu, thread, err;
346 int nr_cpus = cpu_map__nr(evlist->cpus);
347 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
352 for (cpu = 0; cpu < nr_cpus; cpu++) {
353 for (thread = 0; thread < nr_threads; thread++) {
354 err = ioctl(FD(evsel, cpu, thread),
355 PERF_EVENT_IOC_ENABLE, 0);
363 static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
364 struct perf_evsel *evsel, int cpu)
367 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
372 for (thread = 0; thread < nr_threads; thread++) {
373 err = ioctl(FD(evsel, cpu, thread),
374 PERF_EVENT_IOC_ENABLE, 0);
381 static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
382 struct perf_evsel *evsel,
386 int nr_cpus = cpu_map__nr(evlist->cpus);
391 for (cpu = 0; cpu < nr_cpus; cpu++) {
392 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
399 int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
400 struct perf_evsel *evsel, int idx)
402 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
405 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
407 return perf_evlist__enable_event_thread(evlist, evsel, idx);
410 int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
412 int nr_cpus = cpu_map__nr(evlist->cpus);
413 int nr_threads = thread_map__nr(evlist->threads);
415 struct perf_evsel *evsel;
417 evlist__for_each(evlist, evsel) {
418 if (evsel->system_wide)
421 nfds += nr_cpus * nr_threads;
424 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
425 fdarray__grow(&evlist->pollfd, nfds) < 0)
431 static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
433 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
435 * Save the idx so that when we filter out fds POLLHUP'ed we can
436 * close the associated evlist->mmap[] entry.
439 evlist->pollfd.priv[pos].idx = idx;
441 fcntl(fd, F_SETFL, O_NONBLOCK);
447 int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
449 return __perf_evlist__add_pollfd(evlist, fd, -1);
452 static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
454 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
456 perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
459 int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
461 return fdarray__filter(&evlist->pollfd, revents_and_mask,
462 perf_evlist__munmap_filtered);
465 int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
467 return fdarray__poll(&evlist->pollfd, timeout);
470 static void perf_evlist__id_hash(struct perf_evlist *evlist,
471 struct perf_evsel *evsel,
472 int cpu, int thread, u64 id)
475 struct perf_sample_id *sid = SID(evsel, cpu, thread);
479 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
480 hlist_add_head(&sid->node, &evlist->heads[hash]);
483 void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
484 int cpu, int thread, u64 id)
486 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
487 evsel->id[evsel->ids++] = id;
490 static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
491 struct perf_evsel *evsel,
492 int cpu, int thread, int fd)
494 u64 read_data[4] = { 0, };
495 int id_idx = 1; /* The first entry is the counter value */
499 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
506 /* Legacy way to get event id.. All hail to old kernels! */
509 * This way does not work with group format read, so bail
512 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
515 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
516 read(fd, &read_data, sizeof(read_data)) == -1)
519 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
521 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
524 id = read_data[id_idx];
527 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
531 static void perf_evlist__set_sid_idx(struct perf_evlist *evlist,
532 struct perf_evsel *evsel, int idx, int cpu,
535 struct perf_sample_id *sid = SID(evsel, cpu, thread);
537 if (evlist->cpus && cpu >= 0)
538 sid->cpu = evlist->cpus->map[cpu];
541 if (!evsel->system_wide && evlist->threads && thread >= 0)
542 sid->tid = evlist->threads->map[thread];
547 struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
549 struct hlist_head *head;
550 struct perf_sample_id *sid;
553 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
554 head = &evlist->heads[hash];
556 hlist_for_each_entry(sid, head, node)
563 struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
565 struct perf_sample_id *sid;
567 if (evlist->nr_entries == 1)
568 return perf_evlist__first(evlist);
570 sid = perf_evlist__id2sid(evlist, id);
574 if (!perf_evlist__sample_id_all(evlist))
575 return perf_evlist__first(evlist);
580 static int perf_evlist__event2id(struct perf_evlist *evlist,
581 union perf_event *event, u64 *id)
583 const u64 *array = event->sample.array;
586 n = (event->header.size - sizeof(event->header)) >> 3;
588 if (event->header.type == PERF_RECORD_SAMPLE) {
589 if (evlist->id_pos >= n)
591 *id = array[evlist->id_pos];
593 if (evlist->is_pos > n)
601 static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
602 union perf_event *event)
604 struct perf_evsel *first = perf_evlist__first(evlist);
605 struct hlist_head *head;
606 struct perf_sample_id *sid;
610 if (evlist->nr_entries == 1)
613 if (!first->attr.sample_id_all &&
614 event->header.type != PERF_RECORD_SAMPLE)
617 if (perf_evlist__event2id(evlist, event, &id))
620 /* Synthesized events have an id of zero */
624 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
625 head = &evlist->heads[hash];
627 hlist_for_each_entry(sid, head, node) {
634 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
636 struct perf_mmap *md = &evlist->mmap[idx];
637 unsigned int head = perf_mmap__read_head(md);
638 unsigned int old = md->prev;
639 unsigned char *data = md->base + page_size;
640 union perf_event *event = NULL;
642 if (evlist->overwrite) {
644 * If we're further behind than half the buffer, there's a chance
645 * the writer will bite our tail and mess up the samples under us.
647 * If we somehow ended up ahead of the head, we got messed up.
649 * In either case, truncate and restart at head.
651 int diff = head - old;
652 if (diff > md->mask / 2 || diff < 0) {
653 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
656 * head points to a known good entry, start there.
665 event = (union perf_event *)&data[old & md->mask];
666 size = event->header.size;
669 * Event straddles the mmap boundary -- header should always
670 * be inside due to u64 alignment of output.
672 if ((old & md->mask) + size != ((old + size) & md->mask)) {
673 unsigned int offset = old;
674 unsigned int len = min(sizeof(*event), size), cpy;
675 void *dst = md->event_copy;
678 cpy = min(md->mask + 1 - (offset & md->mask), len);
679 memcpy(dst, &data[offset & md->mask], cpy);
685 event = (union perf_event *) md->event_copy;
696 static bool perf_mmap__empty(struct perf_mmap *md)
698 return perf_mmap__read_head(md) != md->prev;
701 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
703 ++evlist->mmap[idx].refcnt;
706 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
708 BUG_ON(evlist->mmap[idx].refcnt == 0);
710 if (--evlist->mmap[idx].refcnt == 0)
711 __perf_evlist__munmap(evlist, idx);
714 void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
716 struct perf_mmap *md = &evlist->mmap[idx];
718 if (!evlist->overwrite) {
719 unsigned int old = md->prev;
721 perf_mmap__write_tail(md, old);
724 if (md->refcnt == 1 && perf_mmap__empty(md))
725 perf_evlist__mmap_put(evlist, idx);
728 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
730 if (evlist->mmap[idx].base != NULL) {
731 munmap(evlist->mmap[idx].base, evlist->mmap_len);
732 evlist->mmap[idx].base = NULL;
733 evlist->mmap[idx].refcnt = 0;
737 void perf_evlist__munmap(struct perf_evlist *evlist)
741 if (evlist->mmap == NULL)
744 for (i = 0; i < evlist->nr_mmaps; i++)
745 __perf_evlist__munmap(evlist, i);
747 zfree(&evlist->mmap);
750 static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
752 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
753 if (cpu_map__empty(evlist->cpus))
754 evlist->nr_mmaps = thread_map__nr(evlist->threads);
755 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
756 return evlist->mmap != NULL ? 0 : -ENOMEM;
764 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
765 struct mmap_params *mp, int fd)
768 * The last one will be done at perf_evlist__mmap_consume(), so that we
769 * make sure we don't prevent tools from consuming every last event in
772 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
773 * anymore, but the last events for it are still in the ring buffer,
774 * waiting to be consumed.
776 * Tools can chose to ignore this at their own discretion, but the
777 * evlist layer can't just drop it when filtering events in
778 * perf_evlist__filter_pollfd().
780 evlist->mmap[idx].refcnt = 2;
781 evlist->mmap[idx].prev = 0;
782 evlist->mmap[idx].mask = mp->mask;
783 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
785 if (evlist->mmap[idx].base == MAP_FAILED) {
786 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
788 evlist->mmap[idx].base = NULL;
795 static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
796 struct mmap_params *mp, int cpu,
797 int thread, int *output)
799 struct perf_evsel *evsel;
801 evlist__for_each(evlist, evsel) {
804 if (evsel->system_wide && thread)
807 fd = FD(evsel, cpu, thread);
811 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
814 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
817 perf_evlist__mmap_get(evlist, idx);
821 * The system_wide flag causes a selected event to be opened
822 * always without a pid. Consequently it will never get a
823 * POLLHUP, but it is used for tracking in combination with
824 * other events, so it should not need to be polled anyway.
825 * Therefore don't add it for polling.
827 if (!evsel->system_wide &&
828 __perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
829 perf_evlist__mmap_put(evlist, idx);
833 if (evsel->attr.read_format & PERF_FORMAT_ID) {
834 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
837 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
845 static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
846 struct mmap_params *mp)
849 int nr_cpus = cpu_map__nr(evlist->cpus);
850 int nr_threads = thread_map__nr(evlist->threads);
852 pr_debug2("perf event ring buffer mmapped per cpu\n");
853 for (cpu = 0; cpu < nr_cpus; cpu++) {
856 for (thread = 0; thread < nr_threads; thread++) {
857 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
866 for (cpu = 0; cpu < nr_cpus; cpu++)
867 __perf_evlist__munmap(evlist, cpu);
871 static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
872 struct mmap_params *mp)
875 int nr_threads = thread_map__nr(evlist->threads);
877 pr_debug2("perf event ring buffer mmapped per thread\n");
878 for (thread = 0; thread < nr_threads; thread++) {
881 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
889 for (thread = 0; thread < nr_threads; thread++)
890 __perf_evlist__munmap(evlist, thread);
894 static size_t perf_evlist__mmap_size(unsigned long pages)
896 /* 512 kiB: default amount of unprivileged mlocked memory */
897 if (pages == UINT_MAX)
898 pages = (512 * 1024) / page_size;
899 else if (!is_power_of_2(pages))
902 return (pages + 1) * page_size;
905 static long parse_pages_arg(const char *str, unsigned long min,
908 unsigned long pages, val;
909 static struct parse_tag tags[] = {
910 { .tag = 'B', .mult = 1 },
911 { .tag = 'K', .mult = 1 << 10 },
912 { .tag = 'M', .mult = 1 << 20 },
913 { .tag = 'G', .mult = 1 << 30 },
920 val = parse_tag_value(str, tags);
921 if (val != (unsigned long) -1) {
922 /* we got file size value */
923 pages = PERF_ALIGN(val, page_size) / page_size;
925 /* we got pages count value */
927 pages = strtoul(str, &eptr, 10);
932 if (pages == 0 && min == 0) {
933 /* leave number of pages at 0 */
934 } else if (!is_power_of_2(pages)) {
935 /* round pages up to next power of 2 */
936 pages = next_pow2_l(pages);
939 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
940 pages * page_size, pages);
949 int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
950 int unset __maybe_unused)
952 unsigned int *mmap_pages = opt->value;
953 unsigned long max = UINT_MAX;
956 if (max > SIZE_MAX / page_size)
957 max = SIZE_MAX / page_size;
959 pages = parse_pages_arg(str, 1, max);
961 pr_err("Invalid argument for --mmap_pages/-m\n");
970 * perf_evlist__mmap - Create mmaps to receive events.
971 * @evlist: list of events
972 * @pages: map length in pages
973 * @overwrite: overwrite older events?
975 * If @overwrite is %false the user needs to signal event consumption using
976 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
979 * Return: %0 on success, negative error code otherwise.
981 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
984 struct perf_evsel *evsel;
985 const struct cpu_map *cpus = evlist->cpus;
986 const struct thread_map *threads = evlist->threads;
987 struct mmap_params mp = {
988 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
991 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
994 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
997 evlist->overwrite = overwrite;
998 evlist->mmap_len = perf_evlist__mmap_size(pages);
999 pr_debug("mmap size %zuB\n", evlist->mmap_len);
1000 mp.mask = evlist->mmap_len - page_size - 1;
1002 evlist__for_each(evlist, evsel) {
1003 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
1004 evsel->sample_id == NULL &&
1005 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
1009 if (cpu_map__empty(cpus))
1010 return perf_evlist__mmap_per_thread(evlist, &mp);
1012 return perf_evlist__mmap_per_cpu(evlist, &mp);
1015 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
1017 evlist->threads = thread_map__new_str(target->pid, target->tid,
1020 if (evlist->threads == NULL)
1023 if (target__uses_dummy_map(target))
1024 evlist->cpus = cpu_map__dummy_new();
1026 evlist->cpus = cpu_map__new(target->cpu_list);
1028 if (evlist->cpus == NULL)
1029 goto out_delete_threads;
1034 thread_map__delete(evlist->threads);
1035 evlist->threads = NULL;
1039 int perf_evlist__apply_filters(struct perf_evlist *evlist)
1041 struct perf_evsel *evsel;
1043 const int ncpus = cpu_map__nr(evlist->cpus),
1044 nthreads = thread_map__nr(evlist->threads);
1046 evlist__for_each(evlist, evsel) {
1047 if (evsel->filter == NULL)
1050 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
1058 int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
1060 struct perf_evsel *evsel;
1062 const int ncpus = cpu_map__nr(evlist->cpus),
1063 nthreads = thread_map__nr(evlist->threads);
1065 evlist__for_each(evlist, evsel) {
1066 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
1074 bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
1076 struct perf_evsel *pos;
1078 if (evlist->nr_entries == 1)
1081 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1084 evlist__for_each(evlist, pos) {
1085 if (pos->id_pos != evlist->id_pos ||
1086 pos->is_pos != evlist->is_pos)
1093 u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1095 struct perf_evsel *evsel;
1097 if (evlist->combined_sample_type)
1098 return evlist->combined_sample_type;
1100 evlist__for_each(evlist, evsel)
1101 evlist->combined_sample_type |= evsel->attr.sample_type;
1103 return evlist->combined_sample_type;
1106 u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1108 evlist->combined_sample_type = 0;
1109 return __perf_evlist__combined_sample_type(evlist);
1112 bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1114 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1115 u64 read_format = first->attr.read_format;
1116 u64 sample_type = first->attr.sample_type;
1118 evlist__for_each(evlist, pos) {
1119 if (read_format != pos->attr.read_format)
1123 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1124 if ((sample_type & PERF_SAMPLE_READ) &&
1125 !(read_format & PERF_FORMAT_ID)) {
1132 u64 perf_evlist__read_format(struct perf_evlist *evlist)
1134 struct perf_evsel *first = perf_evlist__first(evlist);
1135 return first->attr.read_format;
1138 u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
1140 struct perf_evsel *first = perf_evlist__first(evlist);
1141 struct perf_sample *data;
1145 if (!first->attr.sample_id_all)
1148 sample_type = first->attr.sample_type;
1150 if (sample_type & PERF_SAMPLE_TID)
1151 size += sizeof(data->tid) * 2;
1153 if (sample_type & PERF_SAMPLE_TIME)
1154 size += sizeof(data->time);
1156 if (sample_type & PERF_SAMPLE_ID)
1157 size += sizeof(data->id);
1159 if (sample_type & PERF_SAMPLE_STREAM_ID)
1160 size += sizeof(data->stream_id);
1162 if (sample_type & PERF_SAMPLE_CPU)
1163 size += sizeof(data->cpu) * 2;
1165 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1166 size += sizeof(data->id);
1171 bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
1173 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1175 evlist__for_each_continue(evlist, pos) {
1176 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1183 bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
1185 struct perf_evsel *first = perf_evlist__first(evlist);
1186 return first->attr.sample_id_all;
1189 void perf_evlist__set_selected(struct perf_evlist *evlist,
1190 struct perf_evsel *evsel)
1192 evlist->selected = evsel;
1195 void perf_evlist__close(struct perf_evlist *evlist)
1197 struct perf_evsel *evsel;
1198 int ncpus = cpu_map__nr(evlist->cpus);
1199 int nthreads = thread_map__nr(evlist->threads);
1202 evlist__for_each_reverse(evlist, evsel) {
1203 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1204 perf_evsel__close(evsel, n, nthreads);
1208 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
1213 * Try reading /sys/devices/system/cpu/online to get
1216 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1217 * code needs an overhaul to properly forward the
1218 * error, and we may not want to do that fallback to a
1219 * default cpu identity map :-\
1221 evlist->cpus = cpu_map__new(NULL);
1222 if (evlist->cpus == NULL)
1225 evlist->threads = thread_map__new_dummy();
1226 if (evlist->threads == NULL)
1233 cpu_map__delete(evlist->cpus);
1234 evlist->cpus = NULL;
1238 int perf_evlist__open(struct perf_evlist *evlist)
1240 struct perf_evsel *evsel;
1244 * Default: one fd per CPU, all threads, aka systemwide
1245 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1247 if (evlist->threads == NULL && evlist->cpus == NULL) {
1248 err = perf_evlist__create_syswide_maps(evlist);
1253 perf_evlist__update_id_pos(evlist);
1255 evlist__for_each(evlist, evsel) {
1256 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
1263 perf_evlist__close(evlist);
1268 int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
1269 const char *argv[], bool pipe_output,
1270 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1272 int child_ready_pipe[2], go_pipe[2];
1275 if (pipe(child_ready_pipe) < 0) {
1276 perror("failed to create 'ready' pipe");
1280 if (pipe(go_pipe) < 0) {
1281 perror("failed to create 'go' pipe");
1282 goto out_close_ready_pipe;
1285 evlist->workload.pid = fork();
1286 if (evlist->workload.pid < 0) {
1287 perror("failed to fork");
1288 goto out_close_pipes;
1291 if (!evlist->workload.pid) {
1297 signal(SIGTERM, SIG_DFL);
1299 close(child_ready_pipe[0]);
1301 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1304 * Tell the parent we're ready to go
1306 close(child_ready_pipe[1]);
1309 * Wait until the parent tells us to go.
1311 ret = read(go_pipe[0], &bf, 1);
1313 * The parent will ask for the execvp() to be performed by
1314 * writing exactly one byte, in workload.cork_fd, usually via
1315 * perf_evlist__start_workload().
1317 * For cancelling the workload without actuallin running it,
1318 * the parent will just close workload.cork_fd, without writing
1319 * anything, i.e. read will return zero and we just exit()
1324 perror("unable to read pipe");
1328 execvp(argv[0], (char **)argv);
1333 val.sival_int = errno;
1334 if (sigqueue(getppid(), SIGUSR1, val))
1342 struct sigaction act = {
1343 .sa_flags = SA_SIGINFO,
1344 .sa_sigaction = exec_error,
1346 sigaction(SIGUSR1, &act, NULL);
1349 if (target__none(target)) {
1350 if (evlist->threads == NULL) {
1351 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1352 __func__, __LINE__);
1353 goto out_close_pipes;
1355 evlist->threads->map[0] = evlist->workload.pid;
1358 close(child_ready_pipe[1]);
1361 * wait for child to settle
1363 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1364 perror("unable to read pipe");
1365 goto out_close_pipes;
1368 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1369 evlist->workload.cork_fd = go_pipe[1];
1370 close(child_ready_pipe[0]);
1376 out_close_ready_pipe:
1377 close(child_ready_pipe[0]);
1378 close(child_ready_pipe[1]);
1382 int perf_evlist__start_workload(struct perf_evlist *evlist)
1384 if (evlist->workload.cork_fd > 0) {
1388 * Remove the cork, let it rip!
1390 ret = write(evlist->workload.cork_fd, &bf, 1);
1392 perror("enable to write to pipe");
1394 close(evlist->workload.cork_fd);
1401 int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
1402 struct perf_sample *sample)
1404 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1408 return perf_evsel__parse_sample(evsel, event, sample);
1411 size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1413 struct perf_evsel *evsel;
1416 evlist__for_each(evlist, evsel) {
1417 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1418 perf_evsel__name(evsel));
1421 return printed + fprintf(fp, "\n");
1424 int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1425 int err, char *buf, size_t size)
1431 scnprintf(buf, size, "%s",
1432 "Error:\tUnable to find debugfs\n"
1433 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1434 "Hint:\tIs the debugfs filesystem mounted?\n"
1435 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1438 scnprintf(buf, size,
1439 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1440 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1441 debugfs_mountpoint, debugfs_mountpoint);
1444 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1451 int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1452 int err, char *buf, size_t size)
1455 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1460 printed = scnprintf(buf, size,
1462 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1464 value = perf_event_paranoid();
1466 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1469 printed += scnprintf(buf + printed, size - printed,
1470 "For your workloads it needs to be <= 1\nHint:\t");
1472 printed += scnprintf(buf + printed, size - printed,
1473 "For system wide tracing it needs to be set to -1.\n");
1475 printed += scnprintf(buf + printed, size - printed,
1476 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1477 "Hint:\tThe current value is %d.", value);
1480 scnprintf(buf, size, "%s", emsg);
1487 int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
1489 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1490 int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
1494 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1495 printed += scnprintf(buf + printed, size - printed,
1497 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1498 "Hint:\tTried using %zd kB.\n",
1499 emsg, pages_max_per_user, pages_attempted);
1501 if (pages_attempted >= pages_max_per_user) {
1502 printed += scnprintf(buf + printed, size - printed,
1503 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1504 pages_max_per_user + pages_attempted);
1507 printed += scnprintf(buf + printed, size - printed,
1508 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1511 scnprintf(buf, size, "%s", emsg);
1518 void perf_evlist__to_front(struct perf_evlist *evlist,
1519 struct perf_evsel *move_evsel)
1521 struct perf_evsel *evsel, *n;
1524 if (move_evsel == perf_evlist__first(evlist))
1527 evlist__for_each_safe(evlist, n, evsel) {
1528 if (evsel->leader == move_evsel->leader)
1529 list_move_tail(&evsel->node, &move);
1532 list_splice(&move, &evlist->entries);
1535 void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1536 struct perf_evsel *tracking_evsel)
1538 struct perf_evsel *evsel;
1540 if (tracking_evsel->tracking)
1543 evlist__for_each(evlist, evsel) {
1544 if (evsel != tracking_evsel)
1545 evsel->tracking = false;
1548 tracking_evsel->tracking = true;