2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 """Convert directories of JSON events to C code."""
6 from functools import lru_cache
11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
14 # Global command line arguments.
16 # List of regular event tables.
18 # List of event tables generated from "/sys" directories.
19 _sys_event_tables = []
20 # List of regular metric tables.
22 # List of metric tables generated from "/sys" directories.
23 _sys_metric_tables = []
24 # Mapping between sys event table names and sys metric table names.
25 _sys_event_table_to_metric_table_mapping = {}
26 # Map from an event name to an architecture standard
27 # JsonEvent. Architecture standard events are in json files in the top
28 # f'{_args.starting_dir}/{_args.arch}' directory.
30 # Events to write out when the table is closed
32 # Name of events table to be written out
33 _pending_events_tblname = None
34 # Metrics to write out when the table is closed
36 # Name of metrics table to be written out
37 _pending_metrics_tblname = None
38 # Global BigCString shared by all structures.
40 # Map from the name of a metric group to a description of the group.
42 # Order specific JsonEvent attributes will be visited.
43 _json_event_attributes = [
44 # cmp_sevent related attributes.
45 'name', 'topic', 'desc',
46 # Seems useful, put it early.
48 # Short things in alphabetical order.
49 'compat', 'deprecated', 'perpkg', 'unit',
50 # Longer things (the last won't be iterated over during decompress).
54 # Attributes that are in pmu_metric rather than pmu_event.
55 _json_metric_attributes = [
56 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
58 'default_metricgroup_name', 'aggr_mode', 'event_grouping'
60 # Attributes that are bools or enum int values, encoded as '0', '1',...
61 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
63 def removesuffix(s: str, suffix: str) -> str:
64 """Remove the suffix from a string
66 The removesuffix function is added to str in Python 3.9. We aim for 3.6
67 compatibility and so provide our own function here.
69 return s[0:-len(suffix)] if s.endswith(suffix) else s
72 def file_name_to_table_name(prefix: str, parents: Sequence[str],
74 """Generate a C table name from directory names."""
78 tblname += '_' + dirname
79 return tblname.replace('-', '_')
82 def c_len(s: str) -> int:
83 """Return the length of s a C string
85 This doesn't handle all escape characters properly. It first assumes
86 all \ are for escaping, it then adjusts as it will have over counted
87 \\. The code uses \000 rather than \0 as a terminator as an adjacent
88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
89 equal a terminator followed by the number 5 but the escape of
90 \05). The code adjusts for \000 but not properly for all octal, hex
94 utf = s.encode(encoding='utf-8',errors='strict')
96 print(f'broken string {s}')
98 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
101 """A class to hold many strings concatenated together.
103 Generating a large number of stand-alone C strings creates a large
104 number of relocations in position independent code. The BigCString
105 is a helper for this case. It builds a single string which within it
106 are all the other C strings (to avoid memory issues the string
107 itself is held as a list of strings). The offsets within the big
108 string are recorded and when stored to disk these don't need
109 relocation. To reduce the size of the string further, identical
110 strings are merged. If a longer string ends-with the same value as a
111 shorter string, these entries are also merged.
114 big_string: Sequence[str]
115 offsets: Dict[str, int]
117 insert_point: Dict[str, int]
122 self.insert_number = 0;
123 self.insert_point = {}
126 def add(self, s: str, metric: bool) -> None:
127 """Called to add to the big string."""
128 if s not in self.strings:
130 self.insert_point[s] = self.insert_number
131 self.insert_number += 1
135 def compute(self) -> None:
136 """Called once all strings are added to compute the string and offsets."""
139 # Determine if two strings can be folded, ie. let 1 string use the
140 # end of another. First reverse all strings and sort them.
141 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
143 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
144 # for each string to see if there is a better candidate to fold it
145 # into, in the example rather than using 'yz' we can use'xyz' at
146 # an offset of 1. We record which string can be folded into which
147 # in folded_strings, we don't need to record the offset as it is
148 # trivially computed from the string lengths.
149 for pos,s in enumerate(sorted_reversed_strings):
151 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
152 if sorted_reversed_strings[check_pos].startswith(s):
157 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
159 # Compute reverse mappings for debugging.
160 fold_into_strings = collections.defaultdict(set)
161 for key, val in folded_strings.items():
163 fold_into_strings[val].add(key)
165 # big_string_offset is the current location within the C string
166 # being appended to - comments, etc. don't count. big_string is
167 # the string contents represented as a list. Strings are immutable
168 # in Python and so appending to one causes memory issues, while
170 big_string_offset = 0
174 def string_cmp_key(s: str) -> Tuple[bool, int, str]:
175 return (s in self.metrics, self.insert_point[s], s)
177 # Emit all strings that aren't folded in a sorted manner.
178 for s in sorted(self.strings, key=string_cmp_key):
179 if s not in folded_strings:
180 self.offsets[s] = big_string_offset
181 self.big_string.append(f'/* offset={big_string_offset} */ "')
182 self.big_string.append(s)
183 self.big_string.append('"')
184 if s in fold_into_strings:
185 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
186 self.big_string.append('\n')
187 big_string_offset += c_len(s)
190 # Compute the offsets of the folded strings.
191 for s in folded_strings.keys():
192 assert s not in self.offsets
193 folded_s = folded_strings[s]
194 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
199 """Representation of an event loaded from a json file dictionary."""
201 def __init__(self, jd: dict):
202 """Constructor passed the dictionary of parsed json values."""
204 def llx(x: int) -> str:
205 """Convert an int to a string similar to a printf modifier of %#llx."""
206 return '0' if x == 0 else hex(x)
208 def fixdesc(s: str) -> str:
209 """Fix formatting issue for the desc string."""
212 return removesuffix(removesuffix(removesuffix(s, '. '),
213 '. '), '.').replace('\n', '\\n').replace(
214 '\"', '\\"').replace('\r', '\\r')
216 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
217 """Returns the aggr_mode_class enum value associated with the JSON string."""
220 aggr_mode_to_enum = {
224 return aggr_mode_to_enum[aggr_mode]
226 def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
227 """Returns the metric_event_groups enum value associated with the JSON string."""
228 if not metric_constraint:
230 metric_constraint_to_enum = {
231 'NO_GROUP_EVENTS': '1',
232 'NO_GROUP_EVENTS_NMI': '2',
233 'NO_NMI_WATCHDOG': '2',
234 'NO_GROUP_EVENTS_SMT': '3',
236 return metric_constraint_to_enum[metric_constraint]
238 def lookup_msr(num: str) -> Optional[str]:
239 """Converts the msr number, or first in a list to the appropriate event field."""
244 0x1A6: 'offcore_rsp=',
245 0x1A7: 'offcore_rsp=',
248 return msrmap[int(num.split(',', 1)[0], 0)]
250 def real_event(name: str, event: str) -> Optional[str]:
251 """Convert well known event names to an event string otherwise use the event argument."""
253 'inst_retired.any': 'event=0xc0,period=2000003',
254 'inst_retired.any_p': 'event=0xc0,period=2000003',
255 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
256 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
257 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
258 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
262 if name.lower() in fixed:
263 return fixed[name.lower()]
266 def unit_to_pmu(unit: str) -> Optional[str]:
267 """Convert a JSON Unit to Linux PMU name."""
269 return 'default_core'
270 # Comment brought over from jevents.c:
271 # it's not realistic to keep adding these, we need something more scalable ...
273 'CBO': 'uncore_cbox',
274 'QPI LL': 'uncore_qpi',
275 'SBO': 'uncore_sbox',
276 'iMPH-U': 'uncore_arb',
277 'CPU-M-CF': 'cpum_cf',
278 'CPU-M-SF': 'cpum_sf',
279 'PAI-CRYPTO' : 'pai_crypto',
280 'PAI-EXT' : 'pai_ext',
281 'UPI LL': 'uncore_upi',
282 'hisi_sicl,cpa': 'hisi_sicl,cpa',
283 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
284 'hisi_sccl,hha': 'hisi_sccl,hha',
285 'hisi_sccl,l3c': 'hisi_sccl,l3c',
286 'imx8_ddr': 'imx8_ddr',
289 'cpu_core': 'cpu_core',
290 'cpu_atom': 'cpu_atom',
291 'ali_drw': 'ali_drw',
293 return table[unit] if unit in table else f'uncore_{unit.lower()}'
296 if 'EventCode' in jd:
297 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
299 eventcode |= int(jd['ExtSel']) << 8
300 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
301 self.name = jd['EventName'].lower() if 'EventName' in jd else None
303 self.compat = jd.get('Compat')
304 self.desc = fixdesc(jd.get('BriefDescription'))
305 self.long_desc = fixdesc(jd.get('PublicDescription'))
306 precise = jd.get('PEBS')
307 msr = lookup_msr(jd.get('MSRIndex'))
308 msrval = jd.get('MSRValue')
311 extra_desc += ' Supports address when precise'
315 extra_desc += ' Spec update: ' + jd['Errata']
316 self.pmu = unit_to_pmu(jd.get('Unit'))
317 filter = jd.get('Filter')
318 self.unit = jd.get('ScaleUnit')
319 self.perpkg = jd.get('PerPkg')
320 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
321 self.deprecated = jd.get('Deprecated')
322 self.metric_name = jd.get('MetricName')
323 self.metric_group = jd.get('MetricGroup')
324 self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
325 self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
326 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
327 self.metric_expr = None
328 if 'MetricExpr' in jd:
329 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
330 # Note, the metric formula for the threshold isn't parsed as the &
331 # and > have incorrect precedence.
332 self.metric_threshold = jd.get('MetricThreshold')
334 arch_std = jd.get('ArchStdEvent')
335 if precise and self.desc and '(Precise Event)' not in self.desc:
336 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
338 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
340 ('AnyThread', 'any='),
341 ('PortMask', 'ch_mask='),
342 ('CounterMask', 'cmask='),
343 ('EdgeDetect', 'edge='),
344 ('FCMask', 'fc_mask='),
346 ('SampleAfterValue', 'period='),
349 for key, value in event_fields:
350 if key in jd and jd[key] != '0':
351 event += ',' + value + jd[key]
353 event += f',{filter}'
355 event += f',{msr}{msrval}'
356 if self.desc and extra_desc:
357 self.desc += extra_desc
358 if self.long_desc and extra_desc:
359 self.long_desc += extra_desc
361 if arch_std.lower() in _arch_std_events:
362 event = _arch_std_events[arch_std.lower()].event
363 # Copy from the architecture standard event to self for undefined fields.
364 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
365 if hasattr(self, attr) and not getattr(self, attr):
366 setattr(self, attr, value)
368 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
370 self.event = real_event(self.name, event)
372 def __repr__(self) -> str:
373 """String representation primarily for debugging."""
375 for attr, value in self.__dict__.items():
377 s += f'\t{attr} = {value},\n'
380 def build_c_string(self, metric: bool) -> str:
382 for attr in _json_metric_attributes if metric else _json_event_attributes:
383 x = getattr(self, attr)
384 if metric and x and attr == 'metric_expr':
385 # Convert parsed metric expressions into a string. Slashes
386 # must be doubled in the file.
387 x = x.ToPerfJson().replace('\\', '\\\\')
388 if metric and x and attr == 'metric_threshold':
389 x = x.replace('\\', '\\\\')
390 if attr in _json_enum_attributes:
393 s += f'{x}\\000' if x else '\\000'
396 def to_c_string(self, metric: bool) -> str:
397 """Representation of the event as a C struct initializer."""
399 s = self.build_c_string(metric)
400 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
403 @lru_cache(maxsize=None)
404 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
405 """Read json events from the specified file."""
407 events = json.load(open(path), object_hook=JsonEvent)
408 except BaseException as err:
409 print(f"Exception processing {path}")
411 metrics: list[Tuple[str, str, metric.Expression]] = []
414 if event.metric_name and '-' not in event.metric_name:
415 metrics.append((event.pmu, event.metric_name, event.metric_expr))
416 updates = metric.RewriteMetricsInTermsOfOthers(metrics)
419 if event.metric_name in updates:
420 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
421 # f'to\n"{updates[event.metric_name]}"')
422 event.metric_expr = updates[event.metric_name]
426 def preprocess_arch_std_files(archpath: str) -> None:
427 """Read in all architecture standard events."""
428 global _arch_std_events
429 for item in os.scandir(archpath):
430 if item.is_file() and item.name.endswith('.json'):
431 for event in read_json_events(item.path, topic=''):
433 _arch_std_events[event.name.lower()] = event
434 if event.metric_name:
435 _arch_std_events[event.metric_name.lower()] = event
438 def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
439 """Add contents of file to _pending_events table."""
440 for e in read_json_events(item.path, topic):
442 _pending_events.append(e)
444 _pending_metrics.append(e)
447 def print_pending_events() -> None:
448 """Optionally close events table."""
450 def event_cmp_key(j: JsonEvent) -> Tuple[str, str, bool, str, str]:
451 def fix_none(s: Optional[str]) -> str:
456 return (fix_none(j.pmu).replace(',','_'), fix_none(j.name), j.desc is not None, fix_none(j.topic),
457 fix_none(j.metric_name))
459 global _pending_events
460 if not _pending_events:
463 global _pending_events_tblname
464 if _pending_events_tblname.endswith('_sys'):
465 global _sys_event_tables
466 _sys_event_tables.append(_pending_events_tblname)
469 _event_tables.append(_pending_events_tblname)
474 for event in sorted(_pending_events, key=event_cmp_key):
475 if event.pmu != last_pmu:
477 _args.output_file.write('};\n')
478 pmu_name = event.pmu.replace(',', '_')
479 _args.output_file.write(
480 f'static const struct compact_pmu_event {_pending_events_tblname}_{pmu_name}[] = {{\n')
483 pmus.add((event.pmu, pmu_name))
485 _args.output_file.write(event.to_c_string(metric=False))
488 _args.output_file.write(f"""
491 const struct pmu_table_entry {_pending_events_tblname}[] = {{
493 for (pmu, tbl_pmu) in sorted(pmus):
494 pmu_name = f"{pmu}\\000"
495 _args.output_file.write(f"""{{
496 .entries = {_pending_events_tblname}_{tbl_pmu},
497 .num_entries = ARRAY_SIZE({_pending_events_tblname}_{tbl_pmu}),
498 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
501 _args.output_file.write('};\n\n')
503 def print_pending_metrics() -> None:
504 """Optionally close metrics table."""
506 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
507 def fix_none(s: Optional[str]) -> str:
512 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
514 global _pending_metrics
515 if not _pending_metrics:
518 global _pending_metrics_tblname
519 if _pending_metrics_tblname.endswith('_sys'):
520 global _sys_metric_tables
521 _sys_metric_tables.append(_pending_metrics_tblname)
524 _metric_tables.append(_pending_metrics_tblname)
529 for metric in sorted(_pending_metrics, key=metric_cmp_key):
530 if metric.pmu != last_pmu:
532 _args.output_file.write('};\n')
533 pmu_name = metric.pmu.replace(',', '_')
534 _args.output_file.write(
535 f'static const struct compact_pmu_event {_pending_metrics_tblname}_{pmu_name}[] = {{\n')
537 last_pmu = metric.pmu
538 pmus.add((metric.pmu, pmu_name))
540 _args.output_file.write(metric.to_c_string(metric=True))
541 _pending_metrics = []
543 _args.output_file.write(f"""
546 const struct pmu_table_entry {_pending_metrics_tblname}[] = {{
548 for (pmu, tbl_pmu) in sorted(pmus):
549 pmu_name = f"{pmu}\\000"
550 _args.output_file.write(f"""{{
551 .entries = {_pending_metrics_tblname}_{tbl_pmu},
552 .num_entries = ARRAY_SIZE({_pending_metrics_tblname}_{tbl_pmu}),
553 .pmu_name = {{ {_bcs.offsets[pmu_name]} /* {pmu_name} */ }},
556 _args.output_file.write('};\n\n')
558 def get_topic(topic: str) -> str:
559 if topic.endswith('metrics.json'):
561 return removesuffix(topic, '.json').replace('-', ' ')
563 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
568 # base dir or too deep
570 if level == 0 or level > 4:
573 # Ignore other directories. If the file name does not have a .json
574 # extension, ignore it. It could be a readme.txt for instance.
575 if not item.is_file() or not item.name.endswith('.json'):
578 if item.name == 'metricgroups.json':
579 metricgroup_descriptions = json.load(open(item.path))
580 for mgroup in metricgroup_descriptions:
581 assert len(mgroup) > 1, parents
582 description = f"{metricgroup_descriptions[mgroup]}\\000"
583 mgroup = f"{mgroup}\\000"
584 _bcs.add(mgroup, metric=True)
585 _bcs.add(description, metric=True)
586 _metricgroups[mgroup] = description
589 topic = get_topic(item.name)
590 for event in read_json_events(item.path, topic):
591 pmu_name = f"{event.pmu}\\000"
593 _bcs.add(pmu_name, metric=False)
594 _bcs.add(event.build_c_string(metric=False), metric=False)
595 if event.metric_name:
596 _bcs.add(pmu_name, metric=True)
597 _bcs.add(event.build_c_string(metric=True), metric=True)
599 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
600 """Process a JSON file during the main walk."""
601 def is_leaf_dir(path: str) -> bool:
602 for item in os.scandir(path):
607 # model directory, reset topic
608 if item.is_dir() and is_leaf_dir(item.path):
609 print_pending_events()
610 print_pending_metrics()
612 global _pending_events_tblname
613 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
614 global _pending_metrics_tblname
615 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
617 if item.name == 'sys':
618 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
621 # base dir or too deep
623 if level == 0 or level > 4:
626 # Ignore other directories. If the file name does not have a .json
627 # extension, ignore it. It could be a readme.txt for instance.
628 if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
631 add_events_table_entries(item, get_topic(item.name))
634 def print_mapping_table(archs: Sequence[str]) -> None:
635 """Read the mapfile and generate the struct from cpuid string to event table."""
636 _args.output_file.write("""
637 /* Struct used to make the PMU event table implementation opaque to callers. */
638 struct pmu_events_table {
639 const struct pmu_table_entry *pmus;
643 /* Struct used to make the PMU metric table implementation opaque to callers. */
644 struct pmu_metrics_table {
645 const struct pmu_table_entry *pmus;
650 * Map a CPU to its table of PMU events. The CPU is identified by the
651 * cpuid field, which is an arch-specific identifier for the CPU.
652 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
653 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
655 * The cpuid can contain any character other than the comma.
657 struct pmu_events_map {
660 struct pmu_events_table event_table;
661 struct pmu_metrics_table metric_table;
665 * Global table mapping each known CPU for the architecture to its
666 * table of PMU events.
668 const struct pmu_events_map pmu_events_map[] = {
672 _args.output_file.write("""{
673 \t.arch = "testarch",
674 \t.cpuid = "testcpu",
676 \t\t.pmus = pmu_events__test_soc_cpu,
677 \t\t.num_pmus = ARRAY_SIZE(pmu_events__test_soc_cpu),
680 \t\t.pmus = pmu_metrics__test_soc_cpu,
681 \t\t.num_pmus = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
686 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
687 table = csv.reader(csvfile)
690 # Skip the first row or any row beginning with #.
691 if not first and len(row) > 0 and not row[0].startswith('#'):
692 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
693 if event_tblname in _event_tables:
694 event_size = f'ARRAY_SIZE({event_tblname})'
696 event_tblname = 'NULL'
698 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
699 if metric_tblname in _metric_tables:
700 metric_size = f'ARRAY_SIZE({metric_tblname})'
702 metric_tblname = 'NULL'
704 if event_size == '0' and metric_size == '0':
706 cpuid = row[0].replace('\\', '\\\\')
707 _args.output_file.write(f"""{{
709 \t.cpuid = "{cpuid}",
711 \t\t.pmus = {event_tblname},
712 \t\t.num_pmus = {event_size}
715 \t\t.pmus = {metric_tblname},
716 \t\t.num_pmus = {metric_size}
722 _args.output_file.write("""{
725 \t.event_table = { 0, 0 },
726 \t.metric_table = { 0, 0 },
732 def print_system_mapping_table() -> None:
733 """C struct mapping table array for tables from /sys directories."""
734 _args.output_file.write("""
735 struct pmu_sys_events {
737 \tstruct pmu_events_table event_table;
738 \tstruct pmu_metrics_table metric_table;
741 static const struct pmu_sys_events pmu_sys_event_tables[] = {
743 printed_metric_tables = []
744 for tblname in _sys_event_tables:
745 _args.output_file.write(f"""\t{{
746 \t\t.event_table = {{
747 \t\t\t.pmus = {tblname},
748 \t\t\t.num_pmus = ARRAY_SIZE({tblname})
750 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
751 if metric_tblname in _sys_metric_tables:
752 _args.output_file.write(f"""
753 \t\t.metric_table = {{
754 \t\t\t.pmus = {metric_tblname},
755 \t\t\t.num_pmus = ARRAY_SIZE({metric_tblname})
757 printed_metric_tables.append(metric_tblname)
758 _args.output_file.write(f"""
759 \t\t.name = \"{tblname}\",
762 for tblname in _sys_metric_tables:
763 if tblname in printed_metric_tables:
765 _args.output_file.write(f"""\t{{
766 \t\t.metric_table = {{
767 \t\t\t.entries = {tblname},
768 \t\t\t.length = ARRAY_SIZE({tblname})
770 \t\t.name = \"{tblname}\",
773 _args.output_file.write("""\t{
774 \t\t.event_table = { 0, 0 },
775 \t\t.metric_table = { 0, 0 },
779 static void decompress_event(int offset, struct pmu_event *pe)
781 \tconst char *p = &big_c_string[offset];
783 for attr in _json_event_attributes:
784 _args.output_file.write(f'\n\tpe->{attr} = ')
785 if attr in _json_enum_attributes:
786 _args.output_file.write("*p - '0';\n")
788 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
789 if attr == _json_event_attributes[-1]:
791 if attr in _json_enum_attributes:
792 _args.output_file.write('\tp++;')
794 _args.output_file.write('\twhile (*p++);')
795 _args.output_file.write("""}
797 static void decompress_metric(int offset, struct pmu_metric *pm)
799 \tconst char *p = &big_c_string[offset];
801 for attr in _json_metric_attributes:
802 _args.output_file.write(f'\n\tpm->{attr} = ')
803 if attr in _json_enum_attributes:
804 _args.output_file.write("*p - '0';\n")
806 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
807 if attr == _json_metric_attributes[-1]:
809 if attr in _json_enum_attributes:
810 _args.output_file.write('\tp++;')
812 _args.output_file.write('\twhile (*p++);')
813 _args.output_file.write("""}
815 static int pmu_events_table__for_each_event_pmu(const struct pmu_events_table *table,
816 const struct pmu_table_entry *pmu,
817 pmu_event_iter_fn fn,
821 struct pmu_event pe = {
822 .pmu = &big_c_string[pmu->pmu_name.offset],
825 for (uint32_t i = 0; i < pmu->num_entries; i++) {
826 decompress_event(pmu->entries[i].offset, &pe);
829 ret = fn(&pe, table, data);
836 static int pmu_events_table__find_event_pmu(const struct pmu_events_table *table,
837 const struct pmu_table_entry *pmu,
839 pmu_event_iter_fn fn,
842 struct pmu_event pe = {
843 .pmu = &big_c_string[pmu->pmu_name.offset],
845 int low = 0, high = pmu->num_entries - 1;
847 while (low <= high) {
848 int cmp, mid = (low + high) / 2;
850 decompress_event(pmu->entries[mid].offset, &pe);
852 if (!pe.name && !name)
855 if (!pe.name && name) {
859 if (pe.name && !name) {
864 cmp = strcasecmp(pe.name, name);
874 return fn ? fn(&pe, table, data) : 0;
879 int pmu_events_table__for_each_event(const struct pmu_events_table *table,
880 struct perf_pmu *pmu,
881 pmu_event_iter_fn fn,
884 for (size_t i = 0; i < table->num_pmus; i++) {
885 const struct pmu_table_entry *table_pmu = &table->pmus[i];
886 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
889 if (pmu && !pmu__name_match(pmu, pmu_name))
892 ret = pmu_events_table__for_each_event_pmu(table, table_pmu, fn, data);
899 int pmu_events_table__find_event(const struct pmu_events_table *table,
900 struct perf_pmu *pmu,
902 pmu_event_iter_fn fn,
905 for (size_t i = 0; i < table->num_pmus; i++) {
906 const struct pmu_table_entry *table_pmu = &table->pmus[i];
907 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
910 if (!pmu__name_match(pmu, pmu_name))
913 ret = pmu_events_table__find_event_pmu(table, table_pmu, name, fn, data);
920 size_t pmu_events_table__num_events(const struct pmu_events_table *table,
921 struct perf_pmu *pmu)
925 for (size_t i = 0; i < table->num_pmus; i++) {
926 const struct pmu_table_entry *table_pmu = &table->pmus[i];
927 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
929 if (pmu__name_match(pmu, pmu_name))
930 count += table_pmu->num_entries;
935 static int pmu_metrics_table__for_each_metric_pmu(const struct pmu_metrics_table *table,
936 const struct pmu_table_entry *pmu,
937 pmu_metric_iter_fn fn,
941 struct pmu_metric pm = {
942 .pmu = &big_c_string[pmu->pmu_name.offset],
945 for (uint32_t i = 0; i < pmu->num_entries; i++) {
946 decompress_metric(pmu->entries[i].offset, &pm);
949 ret = fn(&pm, table, data);
956 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
957 pmu_metric_iter_fn fn,
960 for (size_t i = 0; i < table->num_pmus; i++) {
961 int ret = pmu_metrics_table__for_each_metric_pmu(table, &table->pmus[i],
970 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
972 const struct pmu_events_table *table = NULL;
973 char *cpuid = perf_pmu__getcpuid(pmu);
976 /* on some platforms which uses cpus map, cpuid can be NULL for
977 * PMUs other than CORE PMUs.
984 const struct pmu_events_map *map = &pmu_events_map[i++];
988 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
989 table = &map->event_table;
997 for (i = 0; i < table->num_pmus; i++) {
998 const struct pmu_table_entry *table_pmu = &table->pmus[i];
999 const char *pmu_name = &big_c_string[table_pmu->pmu_name.offset];
1001 if (pmu__name_match(pmu, pmu_name))
1007 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
1009 const struct pmu_metrics_table *table = NULL;
1010 char *cpuid = perf_pmu__getcpuid(pmu);
1013 /* on some platforms which uses cpus map, cpuid can be NULL for
1014 * PMUs other than CORE PMUs.
1021 const struct pmu_events_map *map = &pmu_events_map[i++];
1025 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
1026 table = &map->metric_table;
1034 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
1036 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1039 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1040 return &tables->event_table;
1045 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
1047 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1050 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
1051 return &tables->metric_table;
1056 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
1058 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1061 int ret = pmu_events_table__for_each_event(&tables->event_table,
1062 /*pmu=*/ NULL, fn, data);
1070 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
1072 for (const struct pmu_events_map *tables = &pmu_events_map[0];
1075 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1083 const struct pmu_events_table *find_sys_events_table(const char *name)
1085 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1088 if (!strcmp(tables->name, name))
1089 return &tables->event_table;
1094 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
1096 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1099 int ret = pmu_events_table__for_each_event(&tables->event_table,
1100 /*pmu=*/ NULL, fn, data);
1108 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
1110 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
1113 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
1122 def print_metricgroups() -> None:
1123 _args.output_file.write("""
1124 static const int metricgroups[][2] = {
1126 for mgroup in sorted(_metricgroups):
1127 description = _metricgroups[mgroup]
1128 _args.output_file.write(
1129 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
1131 _args.output_file.write("""
1134 const char *describe_metricgroup(const char *group)
1136 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
1138 while (low <= high) {
1139 int mid = (low + high) / 2;
1140 const char *mgroup = &big_c_string[metricgroups[mid][0]];
1141 int cmp = strcmp(mgroup, group);
1144 return &big_c_string[metricgroups[mid][1]];
1145 } else if (cmp < 0) {
1158 def dir_path(path: str) -> str:
1159 """Validate path is a directory for argparse."""
1160 if os.path.isdir(path):
1162 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
1164 def ftw(path: str, parents: Sequence[str],
1165 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
1166 """Replicate the directory/file walking behavior of C's file tree walk."""
1167 for item in sorted(os.scandir(path), key=lambda e: e.name):
1168 if _args.model != 'all' and item.is_dir():
1169 # Check if the model matches one in _args.model.
1170 if len(parents) == _args.model.split(',')[0].count('/'):
1171 # We're testing the correct directory.
1172 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
1173 if 'test' not in item_path and item_path not in _args.model.split(','):
1175 action(parents, item)
1177 ftw(item.path, parents + [item.name], action)
1179 ap = argparse.ArgumentParser()
1180 ap.add_argument('arch', help='Architecture name like x86')
1181 ap.add_argument('model', help='''Select a model such as skylake to
1182 reduce the code size. Normally set to "all". For architectures like
1183 ARM64 with an implementor/model, the model must include the implementor
1184 such as "arm/cortex-a34".''',
1189 help='Root of tree containing architecture directories containing json files'
1192 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1193 _args = ap.parse_args()
1195 _args.output_file.write("""
1196 #include <pmu-events/pmu-events.h>
1197 #include "util/header.h"
1198 #include "util/pmu.h"
1202 struct compact_pmu_event {
1206 struct pmu_table_entry {
1207 const struct compact_pmu_event *entries;
1208 uint32_t num_entries;
1209 struct compact_pmu_event pmu_name;
1214 for item in os.scandir(_args.starting_dir):
1215 if not item.is_dir():
1217 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
1218 archs.append(item.name)
1221 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1225 arch_path = f'{_args.starting_dir}/{arch}'
1226 preprocess_arch_std_files(arch_path)
1227 ftw(arch_path, [], preprocess_one_file)
1230 _args.output_file.write('static const char *const big_c_string =\n')
1231 for s in _bcs.big_string:
1232 _args.output_file.write(s)
1233 _args.output_file.write(';\n\n')
1235 arch_path = f'{_args.starting_dir}/{arch}'
1236 ftw(arch_path, [], process_one_file)
1237 print_pending_events()
1238 print_pending_metrics()
1240 print_mapping_table(archs)
1241 print_system_mapping_table()
1242 print_metricgroups()
1244 if __name__ == '__main__':