OSDN Git Service

Merge tag 'perf-tools-for-v5.18-2022-03-26' of git://git.kernel.org/pub/scm/linux...
[uclinux-h8/linux.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "map.h"
8 #include "map_symbol.h"
9 #include "branch.h"
10 #include "mem-events.h"
11 #include "session.h"
12 #include "namespaces.h"
13 #include "cgroup.h"
14 #include "sort.h"
15 #include "units.h"
16 #include "evlist.h"
17 #include "evsel.h"
18 #include "annotate.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "thread.h"
22 #include "block-info.h"
23 #include "ui/progress.h"
24 #include <errno.h>
25 #include <math.h>
26 #include <inttypes.h>
27 #include <sys/param.h>
28 #include <linux/rbtree.h>
29 #include <linux/string.h>
30 #include <linux/time64.h>
31 #include <linux/zalloc.h>
32
33 static bool hists__filter_entry_by_dso(struct hists *hists,
34                                        struct hist_entry *he);
35 static bool hists__filter_entry_by_thread(struct hists *hists,
36                                           struct hist_entry *he);
37 static bool hists__filter_entry_by_symbol(struct hists *hists,
38                                           struct hist_entry *he);
39 static bool hists__filter_entry_by_socket(struct hists *hists,
40                                           struct hist_entry *he);
41
42 u16 hists__col_len(struct hists *hists, enum hist_column col)
43 {
44         return hists->col_len[col];
45 }
46
47 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
48 {
49         hists->col_len[col] = len;
50 }
51
52 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
53 {
54         if (len > hists__col_len(hists, col)) {
55                 hists__set_col_len(hists, col, len);
56                 return true;
57         }
58         return false;
59 }
60
61 void hists__reset_col_len(struct hists *hists)
62 {
63         enum hist_column col;
64
65         for (col = 0; col < HISTC_NR_COLS; ++col)
66                 hists__set_col_len(hists, col, 0);
67 }
68
69 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
70 {
71         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
72
73         if (hists__col_len(hists, dso) < unresolved_col_width &&
74             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
75             !symbol_conf.dso_list)
76                 hists__set_col_len(hists, dso, unresolved_col_width);
77 }
78
79 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
80 {
81         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
82         int symlen;
83         u16 len;
84
85         if (h->block_info)
86                 return;
87         /*
88          * +4 accounts for '[x] ' priv level info
89          * +2 accounts for 0x prefix on raw addresses
90          * +3 accounts for ' y ' symtab origin info
91          */
92         if (h->ms.sym) {
93                 symlen = h->ms.sym->namelen + 4;
94                 if (verbose > 0)
95                         symlen += BITS_PER_LONG / 4 + 2 + 3;
96                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
97         } else {
98                 symlen = unresolved_col_width + 4 + 2;
99                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
100                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
101         }
102
103         len = thread__comm_len(h->thread);
104         if (hists__new_col_len(hists, HISTC_COMM, len))
105                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
106
107         if (h->ms.map) {
108                 len = dso__name_len(h->ms.map->dso);
109                 hists__new_col_len(hists, HISTC_DSO, len);
110         }
111
112         if (h->parent)
113                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
114
115         if (h->branch_info) {
116                 if (h->branch_info->from.ms.sym) {
117                         symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
118                         if (verbose > 0)
119                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
120                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
121
122                         symlen = dso__name_len(h->branch_info->from.ms.map->dso);
123                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
124                 } else {
125                         symlen = unresolved_col_width + 4 + 2;
126                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
127                         hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
128                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
129                 }
130
131                 if (h->branch_info->to.ms.sym) {
132                         symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
133                         if (verbose > 0)
134                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
135                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
136
137                         symlen = dso__name_len(h->branch_info->to.ms.map->dso);
138                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
139                 } else {
140                         symlen = unresolved_col_width + 4 + 2;
141                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
142                         hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
143                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
144                 }
145
146                 if (h->branch_info->srcline_from)
147                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
148                                         strlen(h->branch_info->srcline_from));
149                 if (h->branch_info->srcline_to)
150                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
151                                         strlen(h->branch_info->srcline_to));
152         }
153
154         if (h->mem_info) {
155                 if (h->mem_info->daddr.ms.sym) {
156                         symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
157                                + unresolved_col_width + 2;
158                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
159                                            symlen);
160                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
161                                            symlen + 1);
162                 } else {
163                         symlen = unresolved_col_width + 4 + 2;
164                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
165                                            symlen);
166                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
167                                            symlen);
168                 }
169
170                 if (h->mem_info->iaddr.ms.sym) {
171                         symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
172                                + unresolved_col_width + 2;
173                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
174                                            symlen);
175                 } else {
176                         symlen = unresolved_col_width + 4 + 2;
177                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
178                                            symlen);
179                 }
180
181                 if (h->mem_info->daddr.ms.map) {
182                         symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
183                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
184                                            symlen);
185                 } else {
186                         symlen = unresolved_col_width + 4 + 2;
187                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
188                 }
189
190                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
191                                    unresolved_col_width + 4 + 2);
192
193                 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
194                                    unresolved_col_width + 4 + 2);
195
196         } else {
197                 symlen = unresolved_col_width + 4 + 2;
198                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
199                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
200                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
201         }
202
203         hists__new_col_len(hists, HISTC_CGROUP, 6);
204         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
205         hists__new_col_len(hists, HISTC_CPU, 3);
206         hists__new_col_len(hists, HISTC_SOCKET, 6);
207         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
208         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
209         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
210         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
211         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
212         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
213         hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
214         hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
215         hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
216         hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
217         hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
218
219         if (symbol_conf.nanosecs)
220                 hists__new_col_len(hists, HISTC_TIME, 16);
221         else
222                 hists__new_col_len(hists, HISTC_TIME, 12);
223         hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
224
225         if (h->srcline) {
226                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
227                 hists__new_col_len(hists, HISTC_SRCLINE, len);
228         }
229
230         if (h->srcfile)
231                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
232
233         if (h->transaction)
234                 hists__new_col_len(hists, HISTC_TRANSACTION,
235                                    hist_entry__transaction_len());
236
237         if (h->trace_output)
238                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
239
240         if (h->cgroup) {
241                 const char *cgrp_name = "unknown";
242                 struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
243                                                    h->cgroup);
244                 if (cgrp != NULL)
245                         cgrp_name = cgrp->name;
246
247                 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
248         }
249 }
250
251 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
252 {
253         struct rb_node *next = rb_first_cached(&hists->entries);
254         struct hist_entry *n;
255         int row = 0;
256
257         hists__reset_col_len(hists);
258
259         while (next && row++ < max_rows) {
260                 n = rb_entry(next, struct hist_entry, rb_node);
261                 if (!n->filtered)
262                         hists__calc_col_len(hists, n);
263                 next = rb_next(&n->rb_node);
264         }
265 }
266
267 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
268                                         unsigned int cpumode, u64 period)
269 {
270         switch (cpumode) {
271         case PERF_RECORD_MISC_KERNEL:
272                 he_stat->period_sys += period;
273                 break;
274         case PERF_RECORD_MISC_USER:
275                 he_stat->period_us += period;
276                 break;
277         case PERF_RECORD_MISC_GUEST_KERNEL:
278                 he_stat->period_guest_sys += period;
279                 break;
280         case PERF_RECORD_MISC_GUEST_USER:
281                 he_stat->period_guest_us += period;
282                 break;
283         default:
284                 break;
285         }
286 }
287
288 static long hist_time(unsigned long htime)
289 {
290         unsigned long time_quantum = symbol_conf.time_quantum;
291         if (time_quantum)
292                 return (htime / time_quantum) * time_quantum;
293         return htime;
294 }
295
296 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
297 {
298         he_stat->period         += period;
299         he_stat->nr_events      += 1;
300 }
301
302 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
303 {
304         dest->period            += src->period;
305         dest->period_sys        += src->period_sys;
306         dest->period_us         += src->period_us;
307         dest->period_guest_sys  += src->period_guest_sys;
308         dest->period_guest_us   += src->period_guest_us;
309         dest->nr_events         += src->nr_events;
310 }
311
312 static void he_stat__decay(struct he_stat *he_stat)
313 {
314         he_stat->period = (he_stat->period * 7) / 8;
315         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
316         /* XXX need decay for weight too? */
317 }
318
319 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
320
321 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
322 {
323         u64 prev_period = he->stat.period;
324         u64 diff;
325
326         if (prev_period == 0)
327                 return true;
328
329         he_stat__decay(&he->stat);
330         if (symbol_conf.cumulate_callchain)
331                 he_stat__decay(he->stat_acc);
332         decay_callchain(he->callchain);
333
334         diff = prev_period - he->stat.period;
335
336         if (!he->depth) {
337                 hists->stats.total_period -= diff;
338                 if (!he->filtered)
339                         hists->stats.total_non_filtered_period -= diff;
340         }
341
342         if (!he->leaf) {
343                 struct hist_entry *child;
344                 struct rb_node *node = rb_first_cached(&he->hroot_out);
345                 while (node) {
346                         child = rb_entry(node, struct hist_entry, rb_node);
347                         node = rb_next(node);
348
349                         if (hists__decay_entry(hists, child))
350                                 hists__delete_entry(hists, child);
351                 }
352         }
353
354         return he->stat.period == 0;
355 }
356
357 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
358 {
359         struct rb_root_cached *root_in;
360         struct rb_root_cached *root_out;
361
362         if (he->parent_he) {
363                 root_in  = &he->parent_he->hroot_in;
364                 root_out = &he->parent_he->hroot_out;
365         } else {
366                 if (hists__has(hists, need_collapse))
367                         root_in = &hists->entries_collapsed;
368                 else
369                         root_in = hists->entries_in;
370                 root_out = &hists->entries;
371         }
372
373         rb_erase_cached(&he->rb_node_in, root_in);
374         rb_erase_cached(&he->rb_node, root_out);
375
376         --hists->nr_entries;
377         if (!he->filtered)
378                 --hists->nr_non_filtered_entries;
379
380         hist_entry__delete(he);
381 }
382
383 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
384 {
385         struct rb_node *next = rb_first_cached(&hists->entries);
386         struct hist_entry *n;
387
388         while (next) {
389                 n = rb_entry(next, struct hist_entry, rb_node);
390                 next = rb_next(&n->rb_node);
391                 if (((zap_user && n->level == '.') ||
392                      (zap_kernel && n->level != '.') ||
393                      hists__decay_entry(hists, n))) {
394                         hists__delete_entry(hists, n);
395                 }
396         }
397 }
398
399 void hists__delete_entries(struct hists *hists)
400 {
401         struct rb_node *next = rb_first_cached(&hists->entries);
402         struct hist_entry *n;
403
404         while (next) {
405                 n = rb_entry(next, struct hist_entry, rb_node);
406                 next = rb_next(&n->rb_node);
407
408                 hists__delete_entry(hists, n);
409         }
410 }
411
412 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
413 {
414         struct rb_node *next = rb_first_cached(&hists->entries);
415         struct hist_entry *n;
416         int i = 0;
417
418         while (next) {
419                 n = rb_entry(next, struct hist_entry, rb_node);
420                 if (i == idx)
421                         return n;
422
423                 next = rb_next(&n->rb_node);
424                 i++;
425         }
426
427         return NULL;
428 }
429
430 /*
431  * histogram, sorted on item, collects periods
432  */
433
434 static int hist_entry__init(struct hist_entry *he,
435                             struct hist_entry *template,
436                             bool sample_self,
437                             size_t callchain_size)
438 {
439         *he = *template;
440         he->callchain_size = callchain_size;
441
442         if (symbol_conf.cumulate_callchain) {
443                 he->stat_acc = malloc(sizeof(he->stat));
444                 if (he->stat_acc == NULL)
445                         return -ENOMEM;
446                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
447                 if (!sample_self)
448                         memset(&he->stat, 0, sizeof(he->stat));
449         }
450
451         map__get(he->ms.map);
452
453         if (he->branch_info) {
454                 /*
455                  * This branch info is (a part of) allocated from
456                  * sample__resolve_bstack() and will be freed after
457                  * adding new entries.  So we need to save a copy.
458                  */
459                 he->branch_info = malloc(sizeof(*he->branch_info));
460                 if (he->branch_info == NULL)
461                         goto err;
462
463                 memcpy(he->branch_info, template->branch_info,
464                        sizeof(*he->branch_info));
465
466                 map__get(he->branch_info->from.ms.map);
467                 map__get(he->branch_info->to.ms.map);
468         }
469
470         if (he->mem_info) {
471                 map__get(he->mem_info->iaddr.ms.map);
472                 map__get(he->mem_info->daddr.ms.map);
473         }
474
475         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
476                 callchain_init(he->callchain);
477
478         if (he->raw_data) {
479                 he->raw_data = memdup(he->raw_data, he->raw_size);
480                 if (he->raw_data == NULL)
481                         goto err_infos;
482         }
483
484         if (he->srcline) {
485                 he->srcline = strdup(he->srcline);
486                 if (he->srcline == NULL)
487                         goto err_rawdata;
488         }
489
490         if (symbol_conf.res_sample) {
491                 he->res_samples = calloc(sizeof(struct res_sample),
492                                         symbol_conf.res_sample);
493                 if (!he->res_samples)
494                         goto err_srcline;
495         }
496
497         INIT_LIST_HEAD(&he->pairs.node);
498         thread__get(he->thread);
499         he->hroot_in  = RB_ROOT_CACHED;
500         he->hroot_out = RB_ROOT_CACHED;
501
502         if (!symbol_conf.report_hierarchy)
503                 he->leaf = true;
504
505         return 0;
506
507 err_srcline:
508         zfree(&he->srcline);
509
510 err_rawdata:
511         zfree(&he->raw_data);
512
513 err_infos:
514         if (he->branch_info) {
515                 map__put(he->branch_info->from.ms.map);
516                 map__put(he->branch_info->to.ms.map);
517                 zfree(&he->branch_info);
518         }
519         if (he->mem_info) {
520                 map__put(he->mem_info->iaddr.ms.map);
521                 map__put(he->mem_info->daddr.ms.map);
522         }
523 err:
524         map__zput(he->ms.map);
525         zfree(&he->stat_acc);
526         return -ENOMEM;
527 }
528
529 static void *hist_entry__zalloc(size_t size)
530 {
531         return zalloc(size + sizeof(struct hist_entry));
532 }
533
534 static void hist_entry__free(void *ptr)
535 {
536         free(ptr);
537 }
538
539 static struct hist_entry_ops default_ops = {
540         .new    = hist_entry__zalloc,
541         .free   = hist_entry__free,
542 };
543
544 static struct hist_entry *hist_entry__new(struct hist_entry *template,
545                                           bool sample_self)
546 {
547         struct hist_entry_ops *ops = template->ops;
548         size_t callchain_size = 0;
549         struct hist_entry *he;
550         int err = 0;
551
552         if (!ops)
553                 ops = template->ops = &default_ops;
554
555         if (symbol_conf.use_callchain)
556                 callchain_size = sizeof(struct callchain_root);
557
558         he = ops->new(callchain_size);
559         if (he) {
560                 err = hist_entry__init(he, template, sample_self, callchain_size);
561                 if (err) {
562                         ops->free(he);
563                         he = NULL;
564                 }
565         }
566
567         return he;
568 }
569
570 static u8 symbol__parent_filter(const struct symbol *parent)
571 {
572         if (symbol_conf.exclude_other && parent == NULL)
573                 return 1 << HIST_FILTER__PARENT;
574         return 0;
575 }
576
577 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
578 {
579         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
580                 return;
581
582         he->hists->callchain_period += period;
583         if (!he->filtered)
584                 he->hists->callchain_non_filtered_period += period;
585 }
586
587 static struct hist_entry *hists__findnew_entry(struct hists *hists,
588                                                struct hist_entry *entry,
589                                                struct addr_location *al,
590                                                bool sample_self)
591 {
592         struct rb_node **p;
593         struct rb_node *parent = NULL;
594         struct hist_entry *he;
595         int64_t cmp;
596         u64 period = entry->stat.period;
597         bool leftmost = true;
598
599         p = &hists->entries_in->rb_root.rb_node;
600
601         while (*p != NULL) {
602                 parent = *p;
603                 he = rb_entry(parent, struct hist_entry, rb_node_in);
604
605                 /*
606                  * Make sure that it receives arguments in a same order as
607                  * hist_entry__collapse() so that we can use an appropriate
608                  * function when searching an entry regardless which sort
609                  * keys were used.
610                  */
611                 cmp = hist_entry__cmp(he, entry);
612
613                 if (!cmp) {
614                         if (sample_self) {
615                                 he_stat__add_period(&he->stat, period);
616                                 hist_entry__add_callchain_period(he, period);
617                         }
618                         if (symbol_conf.cumulate_callchain)
619                                 he_stat__add_period(he->stat_acc, period);
620
621                         /*
622                          * This mem info was allocated from sample__resolve_mem
623                          * and will not be used anymore.
624                          */
625                         mem_info__zput(entry->mem_info);
626
627                         block_info__zput(entry->block_info);
628
629                         /* If the map of an existing hist_entry has
630                          * become out-of-date due to an exec() or
631                          * similar, update it.  Otherwise we will
632                          * mis-adjust symbol addresses when computing
633                          * the history counter to increment.
634                          */
635                         if (he->ms.map != entry->ms.map) {
636                                 map__put(he->ms.map);
637                                 he->ms.map = map__get(entry->ms.map);
638                         }
639                         goto out;
640                 }
641
642                 if (cmp < 0)
643                         p = &(*p)->rb_left;
644                 else {
645                         p = &(*p)->rb_right;
646                         leftmost = false;
647                 }
648         }
649
650         he = hist_entry__new(entry, sample_self);
651         if (!he)
652                 return NULL;
653
654         if (sample_self)
655                 hist_entry__add_callchain_period(he, period);
656         hists->nr_entries++;
657
658         rb_link_node(&he->rb_node_in, parent, p);
659         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
660 out:
661         if (sample_self)
662                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
663         if (symbol_conf.cumulate_callchain)
664                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
665         return he;
666 }
667
668 static unsigned random_max(unsigned high)
669 {
670         unsigned thresh = -high % high;
671         for (;;) {
672                 unsigned r = random();
673                 if (r >= thresh)
674                         return r % high;
675         }
676 }
677
678 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
679 {
680         struct res_sample *r;
681         int j;
682
683         if (he->num_res < symbol_conf.res_sample) {
684                 j = he->num_res++;
685         } else {
686                 j = random_max(symbol_conf.res_sample);
687         }
688         r = &he->res_samples[j];
689         r->time = sample->time;
690         r->cpu = sample->cpu;
691         r->tid = sample->tid;
692 }
693
694 static struct hist_entry*
695 __hists__add_entry(struct hists *hists,
696                    struct addr_location *al,
697                    struct symbol *sym_parent,
698                    struct branch_info *bi,
699                    struct mem_info *mi,
700                    struct block_info *block_info,
701                    struct perf_sample *sample,
702                    bool sample_self,
703                    struct hist_entry_ops *ops)
704 {
705         struct namespaces *ns = thread__namespaces(al->thread);
706         struct hist_entry entry = {
707                 .thread = al->thread,
708                 .comm = thread__comm(al->thread),
709                 .cgroup_id = {
710                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
711                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
712                 },
713                 .cgroup = sample->cgroup,
714                 .ms = {
715                         .maps   = al->maps,
716                         .map    = al->map,
717                         .sym    = al->sym,
718                 },
719                 .srcline = (char *) al->srcline,
720                 .socket  = al->socket,
721                 .cpu     = al->cpu,
722                 .cpumode = al->cpumode,
723                 .ip      = al->addr,
724                 .level   = al->level,
725                 .code_page_size = sample->code_page_size,
726                 .stat = {
727                         .nr_events = 1,
728                         .period = sample->period,
729                 },
730                 .parent = sym_parent,
731                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
732                 .hists  = hists,
733                 .branch_info = bi,
734                 .mem_info = mi,
735                 .block_info = block_info,
736                 .transaction = sample->transaction,
737                 .raw_data = sample->raw_data,
738                 .raw_size = sample->raw_size,
739                 .ops = ops,
740                 .time = hist_time(sample->time),
741                 .weight = sample->weight,
742                 .ins_lat = sample->ins_lat,
743                 .p_stage_cyc = sample->p_stage_cyc,
744         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
745
746         if (!hists->has_callchains && he && he->callchain_size != 0)
747                 hists->has_callchains = true;
748         if (he && symbol_conf.res_sample)
749                 hists__res_sample(he, sample);
750         return he;
751 }
752
753 struct hist_entry *hists__add_entry(struct hists *hists,
754                                     struct addr_location *al,
755                                     struct symbol *sym_parent,
756                                     struct branch_info *bi,
757                                     struct mem_info *mi,
758                                     struct perf_sample *sample,
759                                     bool sample_self)
760 {
761         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
762                                   sample, sample_self, NULL);
763 }
764
765 struct hist_entry *hists__add_entry_ops(struct hists *hists,
766                                         struct hist_entry_ops *ops,
767                                         struct addr_location *al,
768                                         struct symbol *sym_parent,
769                                         struct branch_info *bi,
770                                         struct mem_info *mi,
771                                         struct perf_sample *sample,
772                                         bool sample_self)
773 {
774         return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
775                                   sample, sample_self, ops);
776 }
777
778 struct hist_entry *hists__add_entry_block(struct hists *hists,
779                                           struct addr_location *al,
780                                           struct block_info *block_info)
781 {
782         struct hist_entry entry = {
783                 .block_info = block_info,
784                 .hists = hists,
785                 .ms = {
786                         .maps = al->maps,
787                         .map = al->map,
788                         .sym = al->sym,
789                 },
790         }, *he = hists__findnew_entry(hists, &entry, al, false);
791
792         return he;
793 }
794
795 static int
796 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
797                     struct addr_location *al __maybe_unused)
798 {
799         return 0;
800 }
801
802 static int
803 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
804                         struct addr_location *al __maybe_unused)
805 {
806         return 0;
807 }
808
809 static int
810 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
811 {
812         struct perf_sample *sample = iter->sample;
813         struct mem_info *mi;
814
815         mi = sample__resolve_mem(sample, al);
816         if (mi == NULL)
817                 return -ENOMEM;
818
819         iter->priv = mi;
820         return 0;
821 }
822
823 static int
824 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
825 {
826         u64 cost;
827         struct mem_info *mi = iter->priv;
828         struct hists *hists = evsel__hists(iter->evsel);
829         struct perf_sample *sample = iter->sample;
830         struct hist_entry *he;
831
832         if (mi == NULL)
833                 return -EINVAL;
834
835         cost = sample->weight;
836         if (!cost)
837                 cost = 1;
838
839         /*
840          * must pass period=weight in order to get the correct
841          * sorting from hists__collapse_resort() which is solely
842          * based on periods. We want sorting be done on nr_events * weight
843          * and this is indirectly achieved by passing period=weight here
844          * and the he_stat__add_period() function.
845          */
846         sample->period = cost;
847
848         he = hists__add_entry(hists, al, iter->parent, NULL, mi,
849                               sample, true);
850         if (!he)
851                 return -ENOMEM;
852
853         iter->he = he;
854         return 0;
855 }
856
857 static int
858 iter_finish_mem_entry(struct hist_entry_iter *iter,
859                       struct addr_location *al __maybe_unused)
860 {
861         struct evsel *evsel = iter->evsel;
862         struct hists *hists = evsel__hists(evsel);
863         struct hist_entry *he = iter->he;
864         int err = -EINVAL;
865
866         if (he == NULL)
867                 goto out;
868
869         hists__inc_nr_samples(hists, he->filtered);
870
871         err = hist_entry__append_callchain(he, iter->sample);
872
873 out:
874         /*
875          * We don't need to free iter->priv (mem_info) here since the mem info
876          * was either already freed in hists__findnew_entry() or passed to a
877          * new hist entry by hist_entry__new().
878          */
879         iter->priv = NULL;
880
881         iter->he = NULL;
882         return err;
883 }
884
885 static int
886 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
887 {
888         struct branch_info *bi;
889         struct perf_sample *sample = iter->sample;
890
891         bi = sample__resolve_bstack(sample, al);
892         if (!bi)
893                 return -ENOMEM;
894
895         iter->curr = 0;
896         iter->total = sample->branch_stack->nr;
897
898         iter->priv = bi;
899         return 0;
900 }
901
902 static int
903 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
904                              struct addr_location *al __maybe_unused)
905 {
906         return 0;
907 }
908
909 static int
910 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
911 {
912         struct branch_info *bi = iter->priv;
913         int i = iter->curr;
914
915         if (bi == NULL)
916                 return 0;
917
918         if (iter->curr >= iter->total)
919                 return 0;
920
921         al->maps = bi[i].to.ms.maps;
922         al->map = bi[i].to.ms.map;
923         al->sym = bi[i].to.ms.sym;
924         al->addr = bi[i].to.addr;
925         return 1;
926 }
927
928 static int
929 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
930 {
931         struct branch_info *bi;
932         struct evsel *evsel = iter->evsel;
933         struct hists *hists = evsel__hists(evsel);
934         struct perf_sample *sample = iter->sample;
935         struct hist_entry *he = NULL;
936         int i = iter->curr;
937         int err = 0;
938
939         bi = iter->priv;
940
941         if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
942                 goto out;
943
944         /*
945          * The report shows the percentage of total branches captured
946          * and not events sampled. Thus we use a pseudo period of 1.
947          */
948         sample->period = 1;
949         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
950
951         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
952                               sample, true);
953         if (he == NULL)
954                 return -ENOMEM;
955
956         hists__inc_nr_samples(hists, he->filtered);
957
958 out:
959         iter->he = he;
960         iter->curr++;
961         return err;
962 }
963
964 static int
965 iter_finish_branch_entry(struct hist_entry_iter *iter,
966                          struct addr_location *al __maybe_unused)
967 {
968         zfree(&iter->priv);
969         iter->he = NULL;
970
971         return iter->curr >= iter->total ? 0 : -1;
972 }
973
974 static int
975 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
976                           struct addr_location *al __maybe_unused)
977 {
978         return 0;
979 }
980
981 static int
982 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
983 {
984         struct evsel *evsel = iter->evsel;
985         struct perf_sample *sample = iter->sample;
986         struct hist_entry *he;
987
988         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
989                               sample, true);
990         if (he == NULL)
991                 return -ENOMEM;
992
993         iter->he = he;
994         return 0;
995 }
996
997 static int
998 iter_finish_normal_entry(struct hist_entry_iter *iter,
999                          struct addr_location *al __maybe_unused)
1000 {
1001         struct hist_entry *he = iter->he;
1002         struct evsel *evsel = iter->evsel;
1003         struct perf_sample *sample = iter->sample;
1004
1005         if (he == NULL)
1006                 return 0;
1007
1008         iter->he = NULL;
1009
1010         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1011
1012         return hist_entry__append_callchain(he, sample);
1013 }
1014
1015 static int
1016 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1017                               struct addr_location *al __maybe_unused)
1018 {
1019         struct hist_entry **he_cache;
1020
1021         callchain_cursor_commit(&callchain_cursor);
1022
1023         /*
1024          * This is for detecting cycles or recursions so that they're
1025          * cumulated only one time to prevent entries more than 100%
1026          * overhead.
1027          */
1028         he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
1029         if (he_cache == NULL)
1030                 return -ENOMEM;
1031
1032         iter->priv = he_cache;
1033         iter->curr = 0;
1034
1035         return 0;
1036 }
1037
1038 static int
1039 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1040                                  struct addr_location *al)
1041 {
1042         struct evsel *evsel = iter->evsel;
1043         struct hists *hists = evsel__hists(evsel);
1044         struct perf_sample *sample = iter->sample;
1045         struct hist_entry **he_cache = iter->priv;
1046         struct hist_entry *he;
1047         int err = 0;
1048
1049         he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
1050                               sample, true);
1051         if (he == NULL)
1052                 return -ENOMEM;
1053
1054         iter->he = he;
1055         he_cache[iter->curr++] = he;
1056
1057         hist_entry__append_callchain(he, sample);
1058
1059         /*
1060          * We need to re-initialize the cursor since callchain_append()
1061          * advanced the cursor to the end.
1062          */
1063         callchain_cursor_commit(&callchain_cursor);
1064
1065         hists__inc_nr_samples(hists, he->filtered);
1066
1067         return err;
1068 }
1069
1070 static int
1071 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1072                            struct addr_location *al)
1073 {
1074         struct callchain_cursor_node *node;
1075
1076         node = callchain_cursor_current(&callchain_cursor);
1077         if (node == NULL)
1078                 return 0;
1079
1080         return fill_callchain_info(al, node, iter->hide_unresolved);
1081 }
1082
1083 static bool
1084 hist_entry__fast__sym_diff(struct hist_entry *left,
1085                            struct hist_entry *right)
1086 {
1087         struct symbol *sym_l = left->ms.sym;
1088         struct symbol *sym_r = right->ms.sym;
1089
1090         if (!sym_l && !sym_r)
1091                 return left->ip != right->ip;
1092
1093         return !!_sort__sym_cmp(sym_l, sym_r);
1094 }
1095
1096
1097 static int
1098 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1099                                struct addr_location *al)
1100 {
1101         struct evsel *evsel = iter->evsel;
1102         struct perf_sample *sample = iter->sample;
1103         struct hist_entry **he_cache = iter->priv;
1104         struct hist_entry *he;
1105         struct hist_entry he_tmp = {
1106                 .hists = evsel__hists(evsel),
1107                 .cpu = al->cpu,
1108                 .thread = al->thread,
1109                 .comm = thread__comm(al->thread),
1110                 .ip = al->addr,
1111                 .ms = {
1112                         .maps = al->maps,
1113                         .map = al->map,
1114                         .sym = al->sym,
1115                 },
1116                 .srcline = (char *) al->srcline,
1117                 .parent = iter->parent,
1118                 .raw_data = sample->raw_data,
1119                 .raw_size = sample->raw_size,
1120         };
1121         int i;
1122         struct callchain_cursor cursor;
1123         bool fast = hists__has(he_tmp.hists, sym);
1124
1125         callchain_cursor_snapshot(&cursor, &callchain_cursor);
1126
1127         callchain_cursor_advance(&callchain_cursor);
1128
1129         /*
1130          * Check if there's duplicate entries in the callchain.
1131          * It's possible that it has cycles or recursive calls.
1132          */
1133         for (i = 0; i < iter->curr; i++) {
1134                 /*
1135                  * For most cases, there are no duplicate entries in callchain.
1136                  * The symbols are usually different. Do a quick check for
1137                  * symbols first.
1138                  */
1139                 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1140                         continue;
1141
1142                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1143                         /* to avoid calling callback function */
1144                         iter->he = NULL;
1145                         return 0;
1146                 }
1147         }
1148
1149         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1150                               sample, false);
1151         if (he == NULL)
1152                 return -ENOMEM;
1153
1154         iter->he = he;
1155         he_cache[iter->curr++] = he;
1156
1157         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1158                 callchain_append(he->callchain, &cursor, sample->period);
1159         return 0;
1160 }
1161
1162 static int
1163 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1164                              struct addr_location *al __maybe_unused)
1165 {
1166         zfree(&iter->priv);
1167         iter->he = NULL;
1168
1169         return 0;
1170 }
1171
1172 const struct hist_iter_ops hist_iter_mem = {
1173         .prepare_entry          = iter_prepare_mem_entry,
1174         .add_single_entry       = iter_add_single_mem_entry,
1175         .next_entry             = iter_next_nop_entry,
1176         .add_next_entry         = iter_add_next_nop_entry,
1177         .finish_entry           = iter_finish_mem_entry,
1178 };
1179
1180 const struct hist_iter_ops hist_iter_branch = {
1181         .prepare_entry          = iter_prepare_branch_entry,
1182         .add_single_entry       = iter_add_single_branch_entry,
1183         .next_entry             = iter_next_branch_entry,
1184         .add_next_entry         = iter_add_next_branch_entry,
1185         .finish_entry           = iter_finish_branch_entry,
1186 };
1187
1188 const struct hist_iter_ops hist_iter_normal = {
1189         .prepare_entry          = iter_prepare_normal_entry,
1190         .add_single_entry       = iter_add_single_normal_entry,
1191         .next_entry             = iter_next_nop_entry,
1192         .add_next_entry         = iter_add_next_nop_entry,
1193         .finish_entry           = iter_finish_normal_entry,
1194 };
1195
1196 const struct hist_iter_ops hist_iter_cumulative = {
1197         .prepare_entry          = iter_prepare_cumulative_entry,
1198         .add_single_entry       = iter_add_single_cumulative_entry,
1199         .next_entry             = iter_next_cumulative_entry,
1200         .add_next_entry         = iter_add_next_cumulative_entry,
1201         .finish_entry           = iter_finish_cumulative_entry,
1202 };
1203
1204 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1205                          int max_stack_depth, void *arg)
1206 {
1207         int err, err2;
1208         struct map *alm = NULL;
1209
1210         if (al)
1211                 alm = map__get(al->map);
1212
1213         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
1214                                         iter->evsel, al, max_stack_depth);
1215         if (err) {
1216                 map__put(alm);
1217                 return err;
1218         }
1219
1220         err = iter->ops->prepare_entry(iter, al);
1221         if (err)
1222                 goto out;
1223
1224         err = iter->ops->add_single_entry(iter, al);
1225         if (err)
1226                 goto out;
1227
1228         if (iter->he && iter->add_entry_cb) {
1229                 err = iter->add_entry_cb(iter, al, true, arg);
1230                 if (err)
1231                         goto out;
1232         }
1233
1234         while (iter->ops->next_entry(iter, al)) {
1235                 err = iter->ops->add_next_entry(iter, al);
1236                 if (err)
1237                         break;
1238
1239                 if (iter->he && iter->add_entry_cb) {
1240                         err = iter->add_entry_cb(iter, al, false, arg);
1241                         if (err)
1242                                 goto out;
1243                 }
1244         }
1245
1246 out:
1247         err2 = iter->ops->finish_entry(iter, al);
1248         if (!err)
1249                 err = err2;
1250
1251         map__put(alm);
1252
1253         return err;
1254 }
1255
1256 int64_t
1257 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1258 {
1259         struct hists *hists = left->hists;
1260         struct perf_hpp_fmt *fmt;
1261         int64_t cmp = 0;
1262
1263         hists__for_each_sort_list(hists, fmt) {
1264                 if (perf_hpp__is_dynamic_entry(fmt) &&
1265                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1266                         continue;
1267
1268                 cmp = fmt->cmp(fmt, left, right);
1269                 if (cmp)
1270                         break;
1271         }
1272
1273         return cmp;
1274 }
1275
1276 int64_t
1277 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1278 {
1279         struct hists *hists = left->hists;
1280         struct perf_hpp_fmt *fmt;
1281         int64_t cmp = 0;
1282
1283         hists__for_each_sort_list(hists, fmt) {
1284                 if (perf_hpp__is_dynamic_entry(fmt) &&
1285                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1286                         continue;
1287
1288                 cmp = fmt->collapse(fmt, left, right);
1289                 if (cmp)
1290                         break;
1291         }
1292
1293         return cmp;
1294 }
1295
1296 void hist_entry__delete(struct hist_entry *he)
1297 {
1298         struct hist_entry_ops *ops = he->ops;
1299
1300         thread__zput(he->thread);
1301         map__zput(he->ms.map);
1302
1303         if (he->branch_info) {
1304                 map__zput(he->branch_info->from.ms.map);
1305                 map__zput(he->branch_info->to.ms.map);
1306                 free_srcline(he->branch_info->srcline_from);
1307                 free_srcline(he->branch_info->srcline_to);
1308                 zfree(&he->branch_info);
1309         }
1310
1311         if (he->mem_info) {
1312                 map__zput(he->mem_info->iaddr.ms.map);
1313                 map__zput(he->mem_info->daddr.ms.map);
1314                 mem_info__zput(he->mem_info);
1315         }
1316
1317         if (he->block_info)
1318                 block_info__zput(he->block_info);
1319
1320         zfree(&he->res_samples);
1321         zfree(&he->stat_acc);
1322         free_srcline(he->srcline);
1323         if (he->srcfile && he->srcfile[0])
1324                 zfree(&he->srcfile);
1325         free_callchain(he->callchain);
1326         zfree(&he->trace_output);
1327         zfree(&he->raw_data);
1328         ops->free(he);
1329 }
1330
1331 /*
1332  * If this is not the last column, then we need to pad it according to the
1333  * pre-calculated max length for this column, otherwise don't bother adding
1334  * spaces because that would break viewing this with, for instance, 'less',
1335  * that would show tons of trailing spaces when a long C++ demangled method
1336  * names is sampled.
1337 */
1338 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1339                                    struct perf_hpp_fmt *fmt, int printed)
1340 {
1341         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1342                 const int width = fmt->width(fmt, hpp, he->hists);
1343                 if (printed < width) {
1344                         advance_hpp(hpp, printed);
1345                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1346                 }
1347         }
1348
1349         return printed;
1350 }
1351
1352 /*
1353  * collapse the histogram
1354  */
1355
1356 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1357 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1358                                        enum hist_filter type);
1359
1360 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1361
1362 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1363 {
1364         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1365 }
1366
1367 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1368                                                 enum hist_filter type,
1369                                                 fmt_chk_fn check)
1370 {
1371         struct perf_hpp_fmt *fmt;
1372         bool type_match = false;
1373         struct hist_entry *parent = he->parent_he;
1374
1375         switch (type) {
1376         case HIST_FILTER__THREAD:
1377                 if (symbol_conf.comm_list == NULL &&
1378                     symbol_conf.pid_list == NULL &&
1379                     symbol_conf.tid_list == NULL)
1380                         return;
1381                 break;
1382         case HIST_FILTER__DSO:
1383                 if (symbol_conf.dso_list == NULL)
1384                         return;
1385                 break;
1386         case HIST_FILTER__SYMBOL:
1387                 if (symbol_conf.sym_list == NULL)
1388                         return;
1389                 break;
1390         case HIST_FILTER__PARENT:
1391         case HIST_FILTER__GUEST:
1392         case HIST_FILTER__HOST:
1393         case HIST_FILTER__SOCKET:
1394         case HIST_FILTER__C2C:
1395         default:
1396                 return;
1397         }
1398
1399         /* if it's filtered by own fmt, it has to have filter bits */
1400         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1401                 if (check(fmt)) {
1402                         type_match = true;
1403                         break;
1404                 }
1405         }
1406
1407         if (type_match) {
1408                 /*
1409                  * If the filter is for current level entry, propagate
1410                  * filter marker to parents.  The marker bit was
1411                  * already set by default so it only needs to clear
1412                  * non-filtered entries.
1413                  */
1414                 if (!(he->filtered & (1 << type))) {
1415                         while (parent) {
1416                                 parent->filtered &= ~(1 << type);
1417                                 parent = parent->parent_he;
1418                         }
1419                 }
1420         } else {
1421                 /*
1422                  * If current entry doesn't have matching formats, set
1423                  * filter marker for upper level entries.  it will be
1424                  * cleared if its lower level entries is not filtered.
1425                  *
1426                  * For lower-level entries, it inherits parent's
1427                  * filter bit so that lower level entries of a
1428                  * non-filtered entry won't set the filter marker.
1429                  */
1430                 if (parent == NULL)
1431                         he->filtered |= (1 << type);
1432                 else
1433                         he->filtered |= (parent->filtered & (1 << type));
1434         }
1435 }
1436
1437 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1438 {
1439         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1440                                             check_thread_entry);
1441
1442         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1443                                             perf_hpp__is_dso_entry);
1444
1445         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1446                                             perf_hpp__is_sym_entry);
1447
1448         hists__apply_filters(he->hists, he);
1449 }
1450
1451 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1452                                                  struct rb_root_cached *root,
1453                                                  struct hist_entry *he,
1454                                                  struct hist_entry *parent_he,
1455                                                  struct perf_hpp_list *hpp_list)
1456 {
1457         struct rb_node **p = &root->rb_root.rb_node;
1458         struct rb_node *parent = NULL;
1459         struct hist_entry *iter, *new;
1460         struct perf_hpp_fmt *fmt;
1461         int64_t cmp;
1462         bool leftmost = true;
1463
1464         while (*p != NULL) {
1465                 parent = *p;
1466                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1467
1468                 cmp = 0;
1469                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1470                         cmp = fmt->collapse(fmt, iter, he);
1471                         if (cmp)
1472                                 break;
1473                 }
1474
1475                 if (!cmp) {
1476                         he_stat__add_stat(&iter->stat, &he->stat);
1477                         return iter;
1478                 }
1479
1480                 if (cmp < 0)
1481                         p = &parent->rb_left;
1482                 else {
1483                         p = &parent->rb_right;
1484                         leftmost = false;
1485                 }
1486         }
1487
1488         new = hist_entry__new(he, true);
1489         if (new == NULL)
1490                 return NULL;
1491
1492         hists->nr_entries++;
1493
1494         /* save related format list for output */
1495         new->hpp_list = hpp_list;
1496         new->parent_he = parent_he;
1497
1498         hist_entry__apply_hierarchy_filters(new);
1499
1500         /* some fields are now passed to 'new' */
1501         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1502                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1503                         he->trace_output = NULL;
1504                 else
1505                         new->trace_output = NULL;
1506
1507                 if (perf_hpp__is_srcline_entry(fmt))
1508                         he->srcline = NULL;
1509                 else
1510                         new->srcline = NULL;
1511
1512                 if (perf_hpp__is_srcfile_entry(fmt))
1513                         he->srcfile = NULL;
1514                 else
1515                         new->srcfile = NULL;
1516         }
1517
1518         rb_link_node(&new->rb_node_in, parent, p);
1519         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1520         return new;
1521 }
1522
1523 static int hists__hierarchy_insert_entry(struct hists *hists,
1524                                          struct rb_root_cached *root,
1525                                          struct hist_entry *he)
1526 {
1527         struct perf_hpp_list_node *node;
1528         struct hist_entry *new_he = NULL;
1529         struct hist_entry *parent = NULL;
1530         int depth = 0;
1531         int ret = 0;
1532
1533         list_for_each_entry(node, &hists->hpp_formats, list) {
1534                 /* skip period (overhead) and elided columns */
1535                 if (node->level == 0 || node->skip)
1536                         continue;
1537
1538                 /* insert copy of 'he' for each fmt into the hierarchy */
1539                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1540                 if (new_he == NULL) {
1541                         ret = -1;
1542                         break;
1543                 }
1544
1545                 root = &new_he->hroot_in;
1546                 new_he->depth = depth++;
1547                 parent = new_he;
1548         }
1549
1550         if (new_he) {
1551                 new_he->leaf = true;
1552
1553                 if (hist_entry__has_callchains(new_he) &&
1554                     symbol_conf.use_callchain) {
1555                         callchain_cursor_reset(&callchain_cursor);
1556                         if (callchain_merge(&callchain_cursor,
1557                                             new_he->callchain,
1558                                             he->callchain) < 0)
1559                                 ret = -1;
1560                 }
1561         }
1562
1563         /* 'he' is no longer used */
1564         hist_entry__delete(he);
1565
1566         /* return 0 (or -1) since it already applied filters */
1567         return ret;
1568 }
1569
1570 static int hists__collapse_insert_entry(struct hists *hists,
1571                                         struct rb_root_cached *root,
1572                                         struct hist_entry *he)
1573 {
1574         struct rb_node **p = &root->rb_root.rb_node;
1575         struct rb_node *parent = NULL;
1576         struct hist_entry *iter;
1577         int64_t cmp;
1578         bool leftmost = true;
1579
1580         if (symbol_conf.report_hierarchy)
1581                 return hists__hierarchy_insert_entry(hists, root, he);
1582
1583         while (*p != NULL) {
1584                 parent = *p;
1585                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1586
1587                 cmp = hist_entry__collapse(iter, he);
1588
1589                 if (!cmp) {
1590                         int ret = 0;
1591
1592                         he_stat__add_stat(&iter->stat, &he->stat);
1593                         if (symbol_conf.cumulate_callchain)
1594                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1595
1596                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1597                                 callchain_cursor_reset(&callchain_cursor);
1598                                 if (callchain_merge(&callchain_cursor,
1599                                                     iter->callchain,
1600                                                     he->callchain) < 0)
1601                                         ret = -1;
1602                         }
1603                         hist_entry__delete(he);
1604                         return ret;
1605                 }
1606
1607                 if (cmp < 0)
1608                         p = &(*p)->rb_left;
1609                 else {
1610                         p = &(*p)->rb_right;
1611                         leftmost = false;
1612                 }
1613         }
1614         hists->nr_entries++;
1615
1616         rb_link_node(&he->rb_node_in, parent, p);
1617         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1618         return 1;
1619 }
1620
1621 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1622 {
1623         struct rb_root_cached *root;
1624
1625         pthread_mutex_lock(&hists->lock);
1626
1627         root = hists->entries_in;
1628         if (++hists->entries_in > &hists->entries_in_array[1])
1629                 hists->entries_in = &hists->entries_in_array[0];
1630
1631         pthread_mutex_unlock(&hists->lock);
1632
1633         return root;
1634 }
1635
1636 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1637 {
1638         hists__filter_entry_by_dso(hists, he);
1639         hists__filter_entry_by_thread(hists, he);
1640         hists__filter_entry_by_symbol(hists, he);
1641         hists__filter_entry_by_socket(hists, he);
1642 }
1643
1644 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1645 {
1646         struct rb_root_cached *root;
1647         struct rb_node *next;
1648         struct hist_entry *n;
1649         int ret;
1650
1651         if (!hists__has(hists, need_collapse))
1652                 return 0;
1653
1654         hists->nr_entries = 0;
1655
1656         root = hists__get_rotate_entries_in(hists);
1657
1658         next = rb_first_cached(root);
1659
1660         while (next) {
1661                 if (session_done())
1662                         break;
1663                 n = rb_entry(next, struct hist_entry, rb_node_in);
1664                 next = rb_next(&n->rb_node_in);
1665
1666                 rb_erase_cached(&n->rb_node_in, root);
1667                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1668                 if (ret < 0)
1669                         return -1;
1670
1671                 if (ret) {
1672                         /*
1673                          * If it wasn't combined with one of the entries already
1674                          * collapsed, we need to apply the filters that may have
1675                          * been set by, say, the hist_browser.
1676                          */
1677                         hists__apply_filters(hists, n);
1678                 }
1679                 if (prog)
1680                         ui_progress__update(prog, 1);
1681         }
1682         return 0;
1683 }
1684
1685 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1686 {
1687         struct hists *hists = a->hists;
1688         struct perf_hpp_fmt *fmt;
1689         int64_t cmp = 0;
1690
1691         hists__for_each_sort_list(hists, fmt) {
1692                 if (perf_hpp__should_skip(fmt, a->hists))
1693                         continue;
1694
1695                 cmp = fmt->sort(fmt, a, b);
1696                 if (cmp)
1697                         break;
1698         }
1699
1700         return cmp;
1701 }
1702
1703 static void hists__reset_filter_stats(struct hists *hists)
1704 {
1705         hists->nr_non_filtered_entries = 0;
1706         hists->stats.total_non_filtered_period = 0;
1707 }
1708
1709 void hists__reset_stats(struct hists *hists)
1710 {
1711         hists->nr_entries = 0;
1712         hists->stats.total_period = 0;
1713
1714         hists__reset_filter_stats(hists);
1715 }
1716
1717 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1718 {
1719         hists->nr_non_filtered_entries++;
1720         hists->stats.total_non_filtered_period += h->stat.period;
1721 }
1722
1723 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1724 {
1725         if (!h->filtered)
1726                 hists__inc_filter_stats(hists, h);
1727
1728         hists->nr_entries++;
1729         hists->stats.total_period += h->stat.period;
1730 }
1731
1732 static void hierarchy_recalc_total_periods(struct hists *hists)
1733 {
1734         struct rb_node *node;
1735         struct hist_entry *he;
1736
1737         node = rb_first_cached(&hists->entries);
1738
1739         hists->stats.total_period = 0;
1740         hists->stats.total_non_filtered_period = 0;
1741
1742         /*
1743          * recalculate total period using top-level entries only
1744          * since lower level entries only see non-filtered entries
1745          * but upper level entries have sum of both entries.
1746          */
1747         while (node) {
1748                 he = rb_entry(node, struct hist_entry, rb_node);
1749                 node = rb_next(node);
1750
1751                 hists->stats.total_period += he->stat.period;
1752                 if (!he->filtered)
1753                         hists->stats.total_non_filtered_period += he->stat.period;
1754         }
1755 }
1756
1757 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1758                                           struct hist_entry *he)
1759 {
1760         struct rb_node **p = &root->rb_root.rb_node;
1761         struct rb_node *parent = NULL;
1762         struct hist_entry *iter;
1763         struct perf_hpp_fmt *fmt;
1764         bool leftmost = true;
1765
1766         while (*p != NULL) {
1767                 parent = *p;
1768                 iter = rb_entry(parent, struct hist_entry, rb_node);
1769
1770                 if (hist_entry__sort(he, iter) > 0)
1771                         p = &parent->rb_left;
1772                 else {
1773                         p = &parent->rb_right;
1774                         leftmost = false;
1775                 }
1776         }
1777
1778         rb_link_node(&he->rb_node, parent, p);
1779         rb_insert_color_cached(&he->rb_node, root, leftmost);
1780
1781         /* update column width of dynamic entry */
1782         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1783                 if (perf_hpp__is_dynamic_entry(fmt))
1784                         fmt->sort(fmt, he, NULL);
1785         }
1786 }
1787
1788 static void hists__hierarchy_output_resort(struct hists *hists,
1789                                            struct ui_progress *prog,
1790                                            struct rb_root_cached *root_in,
1791                                            struct rb_root_cached *root_out,
1792                                            u64 min_callchain_hits,
1793                                            bool use_callchain)
1794 {
1795         struct rb_node *node;
1796         struct hist_entry *he;
1797
1798         *root_out = RB_ROOT_CACHED;
1799         node = rb_first_cached(root_in);
1800
1801         while (node) {
1802                 he = rb_entry(node, struct hist_entry, rb_node_in);
1803                 node = rb_next(node);
1804
1805                 hierarchy_insert_output_entry(root_out, he);
1806
1807                 if (prog)
1808                         ui_progress__update(prog, 1);
1809
1810                 hists->nr_entries++;
1811                 if (!he->filtered) {
1812                         hists->nr_non_filtered_entries++;
1813                         hists__calc_col_len(hists, he);
1814                 }
1815
1816                 if (!he->leaf) {
1817                         hists__hierarchy_output_resort(hists, prog,
1818                                                        &he->hroot_in,
1819                                                        &he->hroot_out,
1820                                                        min_callchain_hits,
1821                                                        use_callchain);
1822                         continue;
1823                 }
1824
1825                 if (!use_callchain)
1826                         continue;
1827
1828                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1829                         u64 total = he->stat.period;
1830
1831                         if (symbol_conf.cumulate_callchain)
1832                                 total = he->stat_acc->period;
1833
1834                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1835                 }
1836
1837                 callchain_param.sort(&he->sorted_chain, he->callchain,
1838                                      min_callchain_hits, &callchain_param);
1839         }
1840 }
1841
1842 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1843                                          struct hist_entry *he,
1844                                          u64 min_callchain_hits,
1845                                          bool use_callchain)
1846 {
1847         struct rb_node **p = &entries->rb_root.rb_node;
1848         struct rb_node *parent = NULL;
1849         struct hist_entry *iter;
1850         struct perf_hpp_fmt *fmt;
1851         bool leftmost = true;
1852
1853         if (use_callchain) {
1854                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1855                         u64 total = he->stat.period;
1856
1857                         if (symbol_conf.cumulate_callchain)
1858                                 total = he->stat_acc->period;
1859
1860                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1861                 }
1862                 callchain_param.sort(&he->sorted_chain, he->callchain,
1863                                       min_callchain_hits, &callchain_param);
1864         }
1865
1866         while (*p != NULL) {
1867                 parent = *p;
1868                 iter = rb_entry(parent, struct hist_entry, rb_node);
1869
1870                 if (hist_entry__sort(he, iter) > 0)
1871                         p = &(*p)->rb_left;
1872                 else {
1873                         p = &(*p)->rb_right;
1874                         leftmost = false;
1875                 }
1876         }
1877
1878         rb_link_node(&he->rb_node, parent, p);
1879         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1880
1881         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1882                 if (perf_hpp__is_dynamic_entry(fmt) &&
1883                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1884                         fmt->sort(fmt, he, NULL);  /* update column width */
1885         }
1886 }
1887
1888 static void output_resort(struct hists *hists, struct ui_progress *prog,
1889                           bool use_callchain, hists__resort_cb_t cb,
1890                           void *cb_arg)
1891 {
1892         struct rb_root_cached *root;
1893         struct rb_node *next;
1894         struct hist_entry *n;
1895         u64 callchain_total;
1896         u64 min_callchain_hits;
1897
1898         callchain_total = hists->callchain_period;
1899         if (symbol_conf.filter_relative)
1900                 callchain_total = hists->callchain_non_filtered_period;
1901
1902         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1903
1904         hists__reset_stats(hists);
1905         hists__reset_col_len(hists);
1906
1907         if (symbol_conf.report_hierarchy) {
1908                 hists__hierarchy_output_resort(hists, prog,
1909                                                &hists->entries_collapsed,
1910                                                &hists->entries,
1911                                                min_callchain_hits,
1912                                                use_callchain);
1913                 hierarchy_recalc_total_periods(hists);
1914                 return;
1915         }
1916
1917         if (hists__has(hists, need_collapse))
1918                 root = &hists->entries_collapsed;
1919         else
1920                 root = hists->entries_in;
1921
1922         next = rb_first_cached(root);
1923         hists->entries = RB_ROOT_CACHED;
1924
1925         while (next) {
1926                 n = rb_entry(next, struct hist_entry, rb_node_in);
1927                 next = rb_next(&n->rb_node_in);
1928
1929                 if (cb && cb(n, cb_arg))
1930                         continue;
1931
1932                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1933                 hists__inc_stats(hists, n);
1934
1935                 if (!n->filtered)
1936                         hists__calc_col_len(hists, n);
1937
1938                 if (prog)
1939                         ui_progress__update(prog, 1);
1940         }
1941 }
1942
1943 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1944                              hists__resort_cb_t cb, void *cb_arg)
1945 {
1946         bool use_callchain;
1947
1948         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1949                 use_callchain = evsel__has_callchain(evsel);
1950         else
1951                 use_callchain = symbol_conf.use_callchain;
1952
1953         use_callchain |= symbol_conf.show_branchflag_count;
1954
1955         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1956 }
1957
1958 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1959 {
1960         return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1961 }
1962
1963 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1964 {
1965         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1966 }
1967
1968 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
1969                              hists__resort_cb_t cb)
1970 {
1971         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
1972 }
1973
1974 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1975 {
1976         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1977                 return false;
1978
1979         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1980                 return true;
1981
1982         return false;
1983 }
1984
1985 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1986 {
1987         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1988
1989         while (can_goto_child(he, HMD_NORMAL)) {
1990                 node = rb_last(&he->hroot_out.rb_root);
1991                 he = rb_entry(node, struct hist_entry, rb_node);
1992         }
1993         return node;
1994 }
1995
1996 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1997 {
1998         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1999
2000         if (can_goto_child(he, hmd))
2001                 node = rb_first_cached(&he->hroot_out);
2002         else
2003                 node = rb_next(node);
2004
2005         while (node == NULL) {
2006                 he = he->parent_he;
2007                 if (he == NULL)
2008                         break;
2009
2010                 node = rb_next(&he->rb_node);
2011         }
2012         return node;
2013 }
2014
2015 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2016 {
2017         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2018
2019         node = rb_prev(node);
2020         if (node)
2021                 return rb_hierarchy_last(node);
2022
2023         he = he->parent_he;
2024         if (he == NULL)
2025                 return NULL;
2026
2027         return &he->rb_node;
2028 }
2029
2030 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2031 {
2032         struct rb_node *node;
2033         struct hist_entry *child;
2034         float percent;
2035
2036         if (he->leaf)
2037                 return false;
2038
2039         node = rb_first_cached(&he->hroot_out);
2040         child = rb_entry(node, struct hist_entry, rb_node);
2041
2042         while (node && child->filtered) {
2043                 node = rb_next(node);
2044                 child = rb_entry(node, struct hist_entry, rb_node);
2045         }
2046
2047         if (node)
2048                 percent = hist_entry__get_percent_limit(child);
2049         else
2050                 percent = 0;
2051
2052         return node && percent >= limit;
2053 }
2054
2055 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2056                                        enum hist_filter filter)
2057 {
2058         h->filtered &= ~(1 << filter);
2059
2060         if (symbol_conf.report_hierarchy) {
2061                 struct hist_entry *parent = h->parent_he;
2062
2063                 while (parent) {
2064                         he_stat__add_stat(&parent->stat, &h->stat);
2065
2066                         parent->filtered &= ~(1 << filter);
2067
2068                         if (parent->filtered)
2069                                 goto next;
2070
2071                         /* force fold unfiltered entry for simplicity */
2072                         parent->unfolded = false;
2073                         parent->has_no_entry = false;
2074                         parent->row_offset = 0;
2075                         parent->nr_rows = 0;
2076 next:
2077                         parent = parent->parent_he;
2078                 }
2079         }
2080
2081         if (h->filtered)
2082                 return;
2083
2084         /* force fold unfiltered entry for simplicity */
2085         h->unfolded = false;
2086         h->has_no_entry = false;
2087         h->row_offset = 0;
2088         h->nr_rows = 0;
2089
2090         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2091
2092         hists__inc_filter_stats(hists, h);
2093         hists__calc_col_len(hists, h);
2094 }
2095
2096
2097 static bool hists__filter_entry_by_dso(struct hists *hists,
2098                                        struct hist_entry *he)
2099 {
2100         if (hists->dso_filter != NULL &&
2101             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
2102                 he->filtered |= (1 << HIST_FILTER__DSO);
2103                 return true;
2104         }
2105
2106         return false;
2107 }
2108
2109 static bool hists__filter_entry_by_thread(struct hists *hists,
2110                                           struct hist_entry *he)
2111 {
2112         if (hists->thread_filter != NULL &&
2113             he->thread != hists->thread_filter) {
2114                 he->filtered |= (1 << HIST_FILTER__THREAD);
2115                 return true;
2116         }
2117
2118         return false;
2119 }
2120
2121 static bool hists__filter_entry_by_symbol(struct hists *hists,
2122                                           struct hist_entry *he)
2123 {
2124         if (hists->symbol_filter_str != NULL &&
2125             (!he->ms.sym || strstr(he->ms.sym->name,
2126                                    hists->symbol_filter_str) == NULL)) {
2127                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2128                 return true;
2129         }
2130
2131         return false;
2132 }
2133
2134 static bool hists__filter_entry_by_socket(struct hists *hists,
2135                                           struct hist_entry *he)
2136 {
2137         if ((hists->socket_filter > -1) &&
2138             (he->socket != hists->socket_filter)) {
2139                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2140                 return true;
2141         }
2142
2143         return false;
2144 }
2145
2146 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2147
2148 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2149 {
2150         struct rb_node *nd;
2151
2152         hists->stats.nr_non_filtered_samples = 0;
2153
2154         hists__reset_filter_stats(hists);
2155         hists__reset_col_len(hists);
2156
2157         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2158                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2159
2160                 if (filter(hists, h))
2161                         continue;
2162
2163                 hists__remove_entry_filter(hists, h, type);
2164         }
2165 }
2166
2167 static void resort_filtered_entry(struct rb_root_cached *root,
2168                                   struct hist_entry *he)
2169 {
2170         struct rb_node **p = &root->rb_root.rb_node;
2171         struct rb_node *parent = NULL;
2172         struct hist_entry *iter;
2173         struct rb_root_cached new_root = RB_ROOT_CACHED;
2174         struct rb_node *nd;
2175         bool leftmost = true;
2176
2177         while (*p != NULL) {
2178                 parent = *p;
2179                 iter = rb_entry(parent, struct hist_entry, rb_node);
2180
2181                 if (hist_entry__sort(he, iter) > 0)
2182                         p = &(*p)->rb_left;
2183                 else {
2184                         p = &(*p)->rb_right;
2185                         leftmost = false;
2186                 }
2187         }
2188
2189         rb_link_node(&he->rb_node, parent, p);
2190         rb_insert_color_cached(&he->rb_node, root, leftmost);
2191
2192         if (he->leaf || he->filtered)
2193                 return;
2194
2195         nd = rb_first_cached(&he->hroot_out);
2196         while (nd) {
2197                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2198
2199                 nd = rb_next(nd);
2200                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2201
2202                 resort_filtered_entry(&new_root, h);
2203         }
2204
2205         he->hroot_out = new_root;
2206 }
2207
2208 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2209 {
2210         struct rb_node *nd;
2211         struct rb_root_cached new_root = RB_ROOT_CACHED;
2212
2213         hists->stats.nr_non_filtered_samples = 0;
2214
2215         hists__reset_filter_stats(hists);
2216         hists__reset_col_len(hists);
2217
2218         nd = rb_first_cached(&hists->entries);
2219         while (nd) {
2220                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2221                 int ret;
2222
2223                 ret = hist_entry__filter(h, type, arg);
2224
2225                 /*
2226                  * case 1. non-matching type
2227                  * zero out the period, set filter marker and move to child
2228                  */
2229                 if (ret < 0) {
2230                         memset(&h->stat, 0, sizeof(h->stat));
2231                         h->filtered |= (1 << type);
2232
2233                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2234                 }
2235                 /*
2236                  * case 2. matched type (filter out)
2237                  * set filter marker and move to next
2238                  */
2239                 else if (ret == 1) {
2240                         h->filtered |= (1 << type);
2241
2242                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2243                 }
2244                 /*
2245                  * case 3. ok (not filtered)
2246                  * add period to hists and parents, erase the filter marker
2247                  * and move to next sibling
2248                  */
2249                 else {
2250                         hists__remove_entry_filter(hists, h, type);
2251
2252                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2253                 }
2254         }
2255
2256         hierarchy_recalc_total_periods(hists);
2257
2258         /*
2259          * resort output after applying a new filter since filter in a lower
2260          * hierarchy can change periods in a upper hierarchy.
2261          */
2262         nd = rb_first_cached(&hists->entries);
2263         while (nd) {
2264                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2265
2266                 nd = rb_next(nd);
2267                 rb_erase_cached(&h->rb_node, &hists->entries);
2268
2269                 resort_filtered_entry(&new_root, h);
2270         }
2271
2272         hists->entries = new_root;
2273 }
2274
2275 void hists__filter_by_thread(struct hists *hists)
2276 {
2277         if (symbol_conf.report_hierarchy)
2278                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2279                                         hists->thread_filter);
2280         else
2281                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2282                                       hists__filter_entry_by_thread);
2283 }
2284
2285 void hists__filter_by_dso(struct hists *hists)
2286 {
2287         if (symbol_conf.report_hierarchy)
2288                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2289                                         hists->dso_filter);
2290         else
2291                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2292                                       hists__filter_entry_by_dso);
2293 }
2294
2295 void hists__filter_by_symbol(struct hists *hists)
2296 {
2297         if (symbol_conf.report_hierarchy)
2298                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2299                                         hists->symbol_filter_str);
2300         else
2301                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2302                                       hists__filter_entry_by_symbol);
2303 }
2304
2305 void hists__filter_by_socket(struct hists *hists)
2306 {
2307         if (symbol_conf.report_hierarchy)
2308                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2309                                         &hists->socket_filter);
2310         else
2311                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2312                                       hists__filter_entry_by_socket);
2313 }
2314
2315 void events_stats__inc(struct events_stats *stats, u32 type)
2316 {
2317         ++stats->nr_events[0];
2318         ++stats->nr_events[type];
2319 }
2320
2321 static void hists_stats__inc(struct hists_stats *stats)
2322 {
2323         ++stats->nr_samples;
2324 }
2325
2326 void hists__inc_nr_events(struct hists *hists)
2327 {
2328         hists_stats__inc(&hists->stats);
2329 }
2330
2331 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2332 {
2333         hists_stats__inc(&hists->stats);
2334         if (!filtered)
2335                 hists->stats.nr_non_filtered_samples++;
2336 }
2337
2338 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2339                                                  struct hist_entry *pair)
2340 {
2341         struct rb_root_cached *root;
2342         struct rb_node **p;
2343         struct rb_node *parent = NULL;
2344         struct hist_entry *he;
2345         int64_t cmp;
2346         bool leftmost = true;
2347
2348         if (hists__has(hists, need_collapse))
2349                 root = &hists->entries_collapsed;
2350         else
2351                 root = hists->entries_in;
2352
2353         p = &root->rb_root.rb_node;
2354
2355         while (*p != NULL) {
2356                 parent = *p;
2357                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2358
2359                 cmp = hist_entry__collapse(he, pair);
2360
2361                 if (!cmp)
2362                         goto out;
2363
2364                 if (cmp < 0)
2365                         p = &(*p)->rb_left;
2366                 else {
2367                         p = &(*p)->rb_right;
2368                         leftmost = false;
2369                 }
2370         }
2371
2372         he = hist_entry__new(pair, true);
2373         if (he) {
2374                 memset(&he->stat, 0, sizeof(he->stat));
2375                 he->hists = hists;
2376                 if (symbol_conf.cumulate_callchain)
2377                         memset(he->stat_acc, 0, sizeof(he->stat));
2378                 rb_link_node(&he->rb_node_in, parent, p);
2379                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2380                 hists__inc_stats(hists, he);
2381                 he->dummy = true;
2382         }
2383 out:
2384         return he;
2385 }
2386
2387 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2388                                                     struct rb_root_cached *root,
2389                                                     struct hist_entry *pair)
2390 {
2391         struct rb_node **p;
2392         struct rb_node *parent = NULL;
2393         struct hist_entry *he;
2394         struct perf_hpp_fmt *fmt;
2395         bool leftmost = true;
2396
2397         p = &root->rb_root.rb_node;
2398         while (*p != NULL) {
2399                 int64_t cmp = 0;
2400
2401                 parent = *p;
2402                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2403
2404                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2405                         cmp = fmt->collapse(fmt, he, pair);
2406                         if (cmp)
2407                                 break;
2408                 }
2409                 if (!cmp)
2410                         goto out;
2411
2412                 if (cmp < 0)
2413                         p = &parent->rb_left;
2414                 else {
2415                         p = &parent->rb_right;
2416                         leftmost = false;
2417                 }
2418         }
2419
2420         he = hist_entry__new(pair, true);
2421         if (he) {
2422                 rb_link_node(&he->rb_node_in, parent, p);
2423                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2424
2425                 he->dummy = true;
2426                 he->hists = hists;
2427                 memset(&he->stat, 0, sizeof(he->stat));
2428                 hists__inc_stats(hists, he);
2429         }
2430 out:
2431         return he;
2432 }
2433
2434 static struct hist_entry *hists__find_entry(struct hists *hists,
2435                                             struct hist_entry *he)
2436 {
2437         struct rb_node *n;
2438
2439         if (hists__has(hists, need_collapse))
2440                 n = hists->entries_collapsed.rb_root.rb_node;
2441         else
2442                 n = hists->entries_in->rb_root.rb_node;
2443
2444         while (n) {
2445                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2446                 int64_t cmp = hist_entry__collapse(iter, he);
2447
2448                 if (cmp < 0)
2449                         n = n->rb_left;
2450                 else if (cmp > 0)
2451                         n = n->rb_right;
2452                 else
2453                         return iter;
2454         }
2455
2456         return NULL;
2457 }
2458
2459 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2460                                                       struct hist_entry *he)
2461 {
2462         struct rb_node *n = root->rb_root.rb_node;
2463
2464         while (n) {
2465                 struct hist_entry *iter;
2466                 struct perf_hpp_fmt *fmt;
2467                 int64_t cmp = 0;
2468
2469                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2470                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2471                         cmp = fmt->collapse(fmt, iter, he);
2472                         if (cmp)
2473                                 break;
2474                 }
2475
2476                 if (cmp < 0)
2477                         n = n->rb_left;
2478                 else if (cmp > 0)
2479                         n = n->rb_right;
2480                 else
2481                         return iter;
2482         }
2483
2484         return NULL;
2485 }
2486
2487 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2488                                    struct rb_root_cached *other_root)
2489 {
2490         struct rb_node *nd;
2491         struct hist_entry *pos, *pair;
2492
2493         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2494                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2495                 pair = hists__find_hierarchy_entry(other_root, pos);
2496
2497                 if (pair) {
2498                         hist_entry__add_pair(pair, pos);
2499                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2500                 }
2501         }
2502 }
2503
2504 /*
2505  * Look for pairs to link to the leader buckets (hist_entries):
2506  */
2507 void hists__match(struct hists *leader, struct hists *other)
2508 {
2509         struct rb_root_cached *root;
2510         struct rb_node *nd;
2511         struct hist_entry *pos, *pair;
2512
2513         if (symbol_conf.report_hierarchy) {
2514                 /* hierarchy report always collapses entries */
2515                 return hists__match_hierarchy(&leader->entries_collapsed,
2516                                               &other->entries_collapsed);
2517         }
2518
2519         if (hists__has(leader, need_collapse))
2520                 root = &leader->entries_collapsed;
2521         else
2522                 root = leader->entries_in;
2523
2524         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2525                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2526                 pair = hists__find_entry(other, pos);
2527
2528                 if (pair)
2529                         hist_entry__add_pair(pair, pos);
2530         }
2531 }
2532
2533 static int hists__link_hierarchy(struct hists *leader_hists,
2534                                  struct hist_entry *parent,
2535                                  struct rb_root_cached *leader_root,
2536                                  struct rb_root_cached *other_root)
2537 {
2538         struct rb_node *nd;
2539         struct hist_entry *pos, *leader;
2540
2541         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2542                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2543
2544                 if (hist_entry__has_pairs(pos)) {
2545                         bool found = false;
2546
2547                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2548                                 if (leader->hists == leader_hists) {
2549                                         found = true;
2550                                         break;
2551                                 }
2552                         }
2553                         if (!found)
2554                                 return -1;
2555                 } else {
2556                         leader = add_dummy_hierarchy_entry(leader_hists,
2557                                                            leader_root, pos);
2558                         if (leader == NULL)
2559                                 return -1;
2560
2561                         /* do not point parent in the pos */
2562                         leader->parent_he = parent;
2563
2564                         hist_entry__add_pair(pos, leader);
2565                 }
2566
2567                 if (!pos->leaf) {
2568                         if (hists__link_hierarchy(leader_hists, leader,
2569                                                   &leader->hroot_in,
2570                                                   &pos->hroot_in) < 0)
2571                                 return -1;
2572                 }
2573         }
2574         return 0;
2575 }
2576
2577 /*
2578  * Look for entries in the other hists that are not present in the leader, if
2579  * we find them, just add a dummy entry on the leader hists, with period=0,
2580  * nr_events=0, to serve as the list header.
2581  */
2582 int hists__link(struct hists *leader, struct hists *other)
2583 {
2584         struct rb_root_cached *root;
2585         struct rb_node *nd;
2586         struct hist_entry *pos, *pair;
2587
2588         if (symbol_conf.report_hierarchy) {
2589                 /* hierarchy report always collapses entries */
2590                 return hists__link_hierarchy(leader, NULL,
2591                                              &leader->entries_collapsed,
2592                                              &other->entries_collapsed);
2593         }
2594
2595         if (hists__has(other, need_collapse))
2596                 root = &other->entries_collapsed;
2597         else
2598                 root = other->entries_in;
2599
2600         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2601                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2602
2603                 if (!hist_entry__has_pairs(pos)) {
2604                         pair = hists__add_dummy_entry(leader, pos);
2605                         if (pair == NULL)
2606                                 return -1;
2607                         hist_entry__add_pair(pos, pair);
2608                 }
2609         }
2610
2611         return 0;
2612 }
2613
2614 int hists__unlink(struct hists *hists)
2615 {
2616         struct rb_root_cached *root;
2617         struct rb_node *nd;
2618         struct hist_entry *pos;
2619
2620         if (hists__has(hists, need_collapse))
2621                 root = &hists->entries_collapsed;
2622         else
2623                 root = hists->entries_in;
2624
2625         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2626                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2627                 list_del_init(&pos->pairs.node);
2628         }
2629
2630         return 0;
2631 }
2632
2633 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2634                           struct perf_sample *sample, bool nonany_branch_mode,
2635                           u64 *total_cycles)
2636 {
2637         struct branch_info *bi;
2638         struct branch_entry *entries = perf_sample__branch_entries(sample);
2639
2640         /* If we have branch cycles always annotate them. */
2641         if (bs && bs->nr && entries[0].flags.cycles) {
2642                 int i;
2643
2644                 bi = sample__resolve_bstack(sample, al);
2645                 if (bi) {
2646                         struct addr_map_symbol *prev = NULL;
2647
2648                         /*
2649                          * Ignore errors, still want to process the
2650                          * other entries.
2651                          *
2652                          * For non standard branch modes always
2653                          * force no IPC (prev == NULL)
2654                          *
2655                          * Note that perf stores branches reversed from
2656                          * program order!
2657                          */
2658                         for (i = bs->nr - 1; i >= 0; i--) {
2659                                 addr_map_symbol__account_cycles(&bi[i].from,
2660                                         nonany_branch_mode ? NULL : prev,
2661                                         bi[i].flags.cycles);
2662                                 prev = &bi[i].to;
2663
2664                                 if (total_cycles)
2665                                         *total_cycles += bi[i].flags.cycles;
2666                         }
2667                         free(bi);
2668                 }
2669         }
2670 }
2671
2672 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2673                                  bool skip_empty)
2674 {
2675         struct evsel *pos;
2676         size_t ret = 0;
2677
2678         evlist__for_each_entry(evlist, pos) {
2679                 struct hists *hists = evsel__hists(pos);
2680
2681                 if (skip_empty && !hists->stats.nr_samples)
2682                         continue;
2683
2684                 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2685                 ret += fprintf(fp, "%16s events: %10d\n",
2686                                "SAMPLE", hists->stats.nr_samples);
2687         }
2688
2689         return ret;
2690 }
2691
2692
2693 u64 hists__total_period(struct hists *hists)
2694 {
2695         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2696                 hists->stats.total_period;
2697 }
2698
2699 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2700 {
2701         char unit;
2702         int printed;
2703         const struct dso *dso = hists->dso_filter;
2704         struct thread *thread = hists->thread_filter;
2705         int socket_id = hists->socket_filter;
2706         unsigned long nr_samples = hists->stats.nr_samples;
2707         u64 nr_events = hists->stats.total_period;
2708         struct evsel *evsel = hists_to_evsel(hists);
2709         const char *ev_name = evsel__name(evsel);
2710         char buf[512], sample_freq_str[64] = "";
2711         size_t buflen = sizeof(buf);
2712         char ref[30] = " show reference callgraph, ";
2713         bool enable_ref = false;
2714
2715         if (symbol_conf.filter_relative) {
2716                 nr_samples = hists->stats.nr_non_filtered_samples;
2717                 nr_events = hists->stats.total_non_filtered_period;
2718         }
2719
2720         if (evsel__is_group_event(evsel)) {
2721                 struct evsel *pos;
2722
2723                 evsel__group_desc(evsel, buf, buflen);
2724                 ev_name = buf;
2725
2726                 for_each_group_member(pos, evsel) {
2727                         struct hists *pos_hists = evsel__hists(pos);
2728
2729                         if (symbol_conf.filter_relative) {
2730                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2731                                 nr_events += pos_hists->stats.total_non_filtered_period;
2732                         } else {
2733                                 nr_samples += pos_hists->stats.nr_samples;
2734                                 nr_events += pos_hists->stats.total_period;
2735                         }
2736                 }
2737         }
2738
2739         if (symbol_conf.show_ref_callgraph &&
2740             strstr(ev_name, "call-graph=no"))
2741                 enable_ref = true;
2742
2743         if (show_freq)
2744                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2745
2746         nr_samples = convert_unit(nr_samples, &unit);
2747         printed = scnprintf(bf, size,
2748                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2749                            nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2750                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2751
2752
2753         if (hists->uid_filter_str)
2754                 printed += snprintf(bf + printed, size - printed,
2755                                     ", UID: %s", hists->uid_filter_str);
2756         if (thread) {
2757                 if (hists__has(hists, thread)) {
2758                         printed += scnprintf(bf + printed, size - printed,
2759                                     ", Thread: %s(%d)",
2760                                      (thread->comm_set ? thread__comm_str(thread) : ""),
2761                                     thread->tid);
2762                 } else {
2763                         printed += scnprintf(bf + printed, size - printed,
2764                                     ", Thread: %s",
2765                                      (thread->comm_set ? thread__comm_str(thread) : ""));
2766                 }
2767         }
2768         if (dso)
2769                 printed += scnprintf(bf + printed, size - printed,
2770                                     ", DSO: %s", dso->short_name);
2771         if (socket_id > -1)
2772                 printed += scnprintf(bf + printed, size - printed,
2773                                     ", Processor Socket: %d", socket_id);
2774
2775         return printed;
2776 }
2777
2778 int parse_filter_percentage(const struct option *opt __maybe_unused,
2779                             const char *arg, int unset __maybe_unused)
2780 {
2781         if (!strcmp(arg, "relative"))
2782                 symbol_conf.filter_relative = true;
2783         else if (!strcmp(arg, "absolute"))
2784                 symbol_conf.filter_relative = false;
2785         else {
2786                 pr_debug("Invalid percentage: %s\n", arg);
2787                 return -1;
2788         }
2789
2790         return 0;
2791 }
2792
2793 int perf_hist_config(const char *var, const char *value)
2794 {
2795         if (!strcmp(var, "hist.percentage"))
2796                 return parse_filter_percentage(NULL, value, 0);
2797
2798         return 0;
2799 }
2800
2801 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2802 {
2803         memset(hists, 0, sizeof(*hists));
2804         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2805         hists->entries_in = &hists->entries_in_array[0];
2806         hists->entries_collapsed = RB_ROOT_CACHED;
2807         hists->entries = RB_ROOT_CACHED;
2808         pthread_mutex_init(&hists->lock, NULL);
2809         hists->socket_filter = -1;
2810         hists->hpp_list = hpp_list;
2811         INIT_LIST_HEAD(&hists->hpp_formats);
2812         return 0;
2813 }
2814
2815 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2816 {
2817         struct rb_node *node;
2818         struct hist_entry *he;
2819
2820         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2821                 node = rb_first_cached(root);
2822                 rb_erase_cached(node, root);
2823
2824                 he = rb_entry(node, struct hist_entry, rb_node_in);
2825                 hist_entry__delete(he);
2826         }
2827 }
2828
2829 static void hists__delete_all_entries(struct hists *hists)
2830 {
2831         hists__delete_entries(hists);
2832         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2833         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2834         hists__delete_remaining_entries(&hists->entries_collapsed);
2835 }
2836
2837 static void hists_evsel__exit(struct evsel *evsel)
2838 {
2839         struct hists *hists = evsel__hists(evsel);
2840         struct perf_hpp_fmt *fmt, *pos;
2841         struct perf_hpp_list_node *node, *tmp;
2842
2843         hists__delete_all_entries(hists);
2844
2845         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2846                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2847                         list_del_init(&fmt->list);
2848                         free(fmt);
2849                 }
2850                 list_del_init(&node->list);
2851                 free(node);
2852         }
2853 }
2854
2855 static int hists_evsel__init(struct evsel *evsel)
2856 {
2857         struct hists *hists = evsel__hists(evsel);
2858
2859         __hists__init(hists, &perf_hpp_list);
2860         return 0;
2861 }
2862
2863 /*
2864  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2865  * stored in the rbtree...
2866  */
2867
2868 int hists__init(void)
2869 {
2870         int err = evsel__object_config(sizeof(struct hists_evsel),
2871                                        hists_evsel__init, hists_evsel__exit);
2872         if (err)
2873                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2874
2875         return err;
2876 }
2877
2878 void perf_hpp_list__init(struct perf_hpp_list *list)
2879 {
2880         INIT_LIST_HEAD(&list->fields);
2881         INIT_LIST_HEAD(&list->sorts);
2882 }