2 # SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
3 """Convert directories of JSON events to C code."""
6 from functools import lru_cache
11 from typing import (Callable, Dict, Optional, Sequence, Set, Tuple)
14 # Global command line arguments.
16 # List of regular event tables.
18 # List of event tables generated from "/sys" directories.
19 _sys_event_tables = []
20 # List of regular metric tables.
22 # List of metric tables generated from "/sys" directories.
23 _sys_metric_tables = []
24 # Mapping between sys event table names and sys metric table names.
25 _sys_event_table_to_metric_table_mapping = {}
26 # Map from an event name to an architecture standard
27 # JsonEvent. Architecture standard events are in json files in the top
28 # f'{_args.starting_dir}/{_args.arch}' directory.
30 # Events to write out when the table is closed
32 # Name of events table to be written out
33 _pending_events_tblname = None
34 # Metrics to write out when the table is closed
36 # Name of metrics table to be written out
37 _pending_metrics_tblname = None
38 # Global BigCString shared by all structures.
40 # Map from the name of a metric group to a description of the group.
42 # Order specific JsonEvent attributes will be visited.
43 _json_event_attributes = [
44 # cmp_sevent related attributes.
45 'name', 'pmu', 'topic', 'desc',
46 # Seems useful, put it early.
48 # Short things in alphabetical order.
49 'compat', 'deprecated', 'perpkg', 'unit',
50 # Longer things (the last won't be iterated over during decompress).
54 # Attributes that are in pmu_metric rather than pmu_event.
55 _json_metric_attributes = [
56 'pmu', 'metric_name', 'metric_group', 'metric_expr', 'metric_threshold',
57 'desc', 'long_desc', 'unit', 'compat', 'metricgroup_no_group',
58 'default_metricgroup_name', 'aggr_mode', 'event_grouping'
60 # Attributes that are bools or enum int values, encoded as '0', '1',...
61 _json_enum_attributes = ['aggr_mode', 'deprecated', 'event_grouping', 'perpkg']
63 def removesuffix(s: str, suffix: str) -> str:
64 """Remove the suffix from a string
66 The removesuffix function is added to str in Python 3.9. We aim for 3.6
67 compatibility and so provide our own function here.
69 return s[0:-len(suffix)] if s.endswith(suffix) else s
72 def file_name_to_table_name(prefix: str, parents: Sequence[str],
74 """Generate a C table name from directory names."""
78 tblname += '_' + dirname
79 return tblname.replace('-', '_')
82 def c_len(s: str) -> int:
83 """Return the length of s a C string
85 This doesn't handle all escape characters properly. It first assumes
86 all \ are for escaping, it then adjusts as it will have over counted
87 \\. The code uses \000 rather than \0 as a terminator as an adjacent
88 number would be folded into a string of \0 (ie. "\0" + "5" doesn't
89 equal a terminator followed by the number 5 but the escape of
90 \05). The code adjusts for \000 but not properly for all octal, hex
94 utf = s.encode(encoding='utf-8',errors='strict')
96 print(f'broken string {s}')
98 return len(utf) - utf.count(b'\\') + utf.count(b'\\\\') - (utf.count(b'\\000') * 2)
101 """A class to hold many strings concatenated together.
103 Generating a large number of stand-alone C strings creates a large
104 number of relocations in position independent code. The BigCString
105 is a helper for this case. It builds a single string which within it
106 are all the other C strings (to avoid memory issues the string
107 itself is held as a list of strings). The offsets within the big
108 string are recorded and when stored to disk these don't need
109 relocation. To reduce the size of the string further, identical
110 strings are merged. If a longer string ends-with the same value as a
111 shorter string, these entries are also merged.
114 big_string: Sequence[str]
115 offsets: Dict[str, int]
120 def add(self, s: str) -> None:
121 """Called to add to the big string."""
124 def compute(self) -> None:
125 """Called once all strings are added to compute the string and offsets."""
128 # Determine if two strings can be folded, ie. let 1 string use the
129 # end of another. First reverse all strings and sort them.
130 sorted_reversed_strings = sorted([x[::-1] for x in self.strings])
132 # Strings 'xyz' and 'yz' will now be [ 'zy', 'zyx' ]. Scan forward
133 # for each string to see if there is a better candidate to fold it
134 # into, in the example rather than using 'yz' we can use'xyz' at
135 # an offset of 1. We record which string can be folded into which
136 # in folded_strings, we don't need to record the offset as it is
137 # trivially computed from the string lengths.
138 for pos,s in enumerate(sorted_reversed_strings):
140 for check_pos in range(pos + 1, len(sorted_reversed_strings)):
141 if sorted_reversed_strings[check_pos].startswith(s):
146 folded_strings[s[::-1]] = sorted_reversed_strings[best_pos][::-1]
148 # Compute reverse mappings for debugging.
149 fold_into_strings = collections.defaultdict(set)
150 for key, val in folded_strings.items():
152 fold_into_strings[val].add(key)
154 # big_string_offset is the current location within the C string
155 # being appended to - comments, etc. don't count. big_string is
156 # the string contents represented as a list. Strings are immutable
157 # in Python and so appending to one causes memory issues, while
159 big_string_offset = 0
163 # Emit all strings that aren't folded in a sorted manner.
164 for s in sorted(self.strings):
165 if s not in folded_strings:
166 self.offsets[s] = big_string_offset
167 self.big_string.append(f'/* offset={big_string_offset} */ "')
168 self.big_string.append(s)
169 self.big_string.append('"')
170 if s in fold_into_strings:
171 self.big_string.append(' /* also: ' + ', '.join(fold_into_strings[s]) + ' */')
172 self.big_string.append('\n')
173 big_string_offset += c_len(s)
176 # Compute the offsets of the folded strings.
177 for s in folded_strings.keys():
178 assert s not in self.offsets
179 folded_s = folded_strings[s]
180 self.offsets[s] = self.offsets[folded_s] + c_len(folded_s) - c_len(s)
185 """Representation of an event loaded from a json file dictionary."""
187 def __init__(self, jd: dict):
188 """Constructor passed the dictionary of parsed json values."""
190 def llx(x: int) -> str:
191 """Convert an int to a string similar to a printf modifier of %#llx."""
192 return '0' if x == 0 else hex(x)
194 def fixdesc(s: str) -> str:
195 """Fix formatting issue for the desc string."""
198 return removesuffix(removesuffix(removesuffix(s, '. '),
199 '. '), '.').replace('\n', '\\n').replace(
200 '\"', '\\"').replace('\r', '\\r')
202 def convert_aggr_mode(aggr_mode: str) -> Optional[str]:
203 """Returns the aggr_mode_class enum value associated with the JSON string."""
206 aggr_mode_to_enum = {
210 return aggr_mode_to_enum[aggr_mode]
212 def convert_metric_constraint(metric_constraint: str) -> Optional[str]:
213 """Returns the metric_event_groups enum value associated with the JSON string."""
214 if not metric_constraint:
216 metric_constraint_to_enum = {
217 'NO_GROUP_EVENTS': '1',
218 'NO_GROUP_EVENTS_NMI': '2',
219 'NO_NMI_WATCHDOG': '2',
220 'NO_GROUP_EVENTS_SMT': '3',
222 return metric_constraint_to_enum[metric_constraint]
224 def lookup_msr(num: str) -> Optional[str]:
225 """Converts the msr number, or first in a list to the appropriate event field."""
230 0x1A6: 'offcore_rsp=',
231 0x1A7: 'offcore_rsp=',
234 return msrmap[int(num.split(',', 1)[0], 0)]
236 def real_event(name: str, event: str) -> Optional[str]:
237 """Convert well known event names to an event string otherwise use the event argument."""
239 'inst_retired.any': 'event=0xc0,period=2000003',
240 'inst_retired.any_p': 'event=0xc0,period=2000003',
241 'cpu_clk_unhalted.ref': 'event=0x0,umask=0x03,period=2000003',
242 'cpu_clk_unhalted.thread': 'event=0x3c,period=2000003',
243 'cpu_clk_unhalted.core': 'event=0x3c,period=2000003',
244 'cpu_clk_unhalted.thread_any': 'event=0x3c,any=1,period=2000003',
248 if name.lower() in fixed:
249 return fixed[name.lower()]
252 def unit_to_pmu(unit: str) -> Optional[str]:
253 """Convert a JSON Unit to Linux PMU name."""
256 # Comment brought over from jevents.c:
257 # it's not realistic to keep adding these, we need something more scalable ...
259 'CBO': 'uncore_cbox',
260 'QPI LL': 'uncore_qpi',
261 'SBO': 'uncore_sbox',
262 'iMPH-U': 'uncore_arb',
263 'CPU-M-CF': 'cpum_cf',
264 'CPU-M-SF': 'cpum_sf',
265 'PAI-CRYPTO' : 'pai_crypto',
266 'PAI-EXT' : 'pai_ext',
267 'UPI LL': 'uncore_upi',
268 'hisi_sicl,cpa': 'hisi_sicl,cpa',
269 'hisi_sccl,ddrc': 'hisi_sccl,ddrc',
270 'hisi_sccl,hha': 'hisi_sccl,hha',
271 'hisi_sccl,l3c': 'hisi_sccl,l3c',
272 'imx8_ddr': 'imx8_ddr',
275 'cpu_core': 'cpu_core',
276 'cpu_atom': 'cpu_atom',
277 'ali_drw': 'ali_drw',
279 return table[unit] if unit in table else f'uncore_{unit.lower()}'
282 if 'EventCode' in jd:
283 eventcode = int(jd['EventCode'].split(',', 1)[0], 0)
285 eventcode |= int(jd['ExtSel']) << 8
286 configcode = int(jd['ConfigCode'], 0) if 'ConfigCode' in jd else None
287 self.name = jd['EventName'].lower() if 'EventName' in jd else None
289 self.compat = jd.get('Compat')
290 self.desc = fixdesc(jd.get('BriefDescription'))
291 self.long_desc = fixdesc(jd.get('PublicDescription'))
292 precise = jd.get('PEBS')
293 msr = lookup_msr(jd.get('MSRIndex'))
294 msrval = jd.get('MSRValue')
297 extra_desc += ' Supports address when precise'
301 extra_desc += ' Spec update: ' + jd['Errata']
302 self.pmu = unit_to_pmu(jd.get('Unit'))
303 filter = jd.get('Filter')
304 self.unit = jd.get('ScaleUnit')
305 self.perpkg = jd.get('PerPkg')
306 self.aggr_mode = convert_aggr_mode(jd.get('AggregationMode'))
307 self.deprecated = jd.get('Deprecated')
308 self.metric_name = jd.get('MetricName')
309 self.metric_group = jd.get('MetricGroup')
310 self.metricgroup_no_group = jd.get('MetricgroupNoGroup')
311 self.default_metricgroup_name = jd.get('DefaultMetricgroupName')
312 self.event_grouping = convert_metric_constraint(jd.get('MetricConstraint'))
313 self.metric_expr = None
314 if 'MetricExpr' in jd:
315 self.metric_expr = metric.ParsePerfJson(jd['MetricExpr']).Simplify()
316 # Note, the metric formula for the threshold isn't parsed as the &
317 # and > have incorrect precedence.
318 self.metric_threshold = jd.get('MetricThreshold')
320 arch_std = jd.get('ArchStdEvent')
321 if precise and self.desc and '(Precise Event)' not in self.desc:
322 extra_desc += ' (Must be precise)' if precise == '2' else (' (Precise '
324 event = f'config={llx(configcode)}' if configcode is not None else f'event={llx(eventcode)}'
326 ('AnyThread', 'any='),
327 ('PortMask', 'ch_mask='),
328 ('CounterMask', 'cmask='),
329 ('EdgeDetect', 'edge='),
330 ('FCMask', 'fc_mask='),
332 ('SampleAfterValue', 'period='),
335 for key, value in event_fields:
336 if key in jd and jd[key] != '0':
337 event += ',' + value + jd[key]
339 event += f',{filter}'
341 event += f',{msr}{msrval}'
342 if self.desc and extra_desc:
343 self.desc += extra_desc
344 if self.long_desc and extra_desc:
345 self.long_desc += extra_desc
347 if self.desc and not self.desc.endswith('. '):
349 self.desc = (self.desc if self.desc else '') + ('Unit: ' + self.pmu + ' ')
351 if arch_std.lower() in _arch_std_events:
352 event = _arch_std_events[arch_std.lower()].event
353 # Copy from the architecture standard event to self for undefined fields.
354 for attr, value in _arch_std_events[arch_std.lower()].__dict__.items():
355 if hasattr(self, attr) and not getattr(self, attr):
356 setattr(self, attr, value)
358 raise argparse.ArgumentTypeError('Cannot find arch std event:', arch_std)
360 self.event = real_event(self.name, event)
362 def __repr__(self) -> str:
363 """String representation primarily for debugging."""
365 for attr, value in self.__dict__.items():
367 s += f'\t{attr} = {value},\n'
370 def build_c_string(self, metric: bool) -> str:
372 for attr in _json_metric_attributes if metric else _json_event_attributes:
373 x = getattr(self, attr)
374 if metric and x and attr == 'metric_expr':
375 # Convert parsed metric expressions into a string. Slashes
376 # must be doubled in the file.
377 x = x.ToPerfJson().replace('\\', '\\\\')
378 if metric and x and attr == 'metric_threshold':
379 x = x.replace('\\', '\\\\')
380 if attr in _json_enum_attributes:
383 s += f'{x}\\000' if x else '\\000'
386 def to_c_string(self, metric: bool) -> str:
387 """Representation of the event as a C struct initializer."""
389 s = self.build_c_string(metric)
390 return f'{{ { _bcs.offsets[s] } }}, /* {s} */\n'
393 @lru_cache(maxsize=None)
394 def read_json_events(path: str, topic: str) -> Sequence[JsonEvent]:
395 """Read json events from the specified file."""
397 events = json.load(open(path), object_hook=JsonEvent)
398 except BaseException as err:
399 print(f"Exception processing {path}")
401 metrics: list[Tuple[str, str, metric.Expression]] = []
404 if event.metric_name and '-' not in event.metric_name:
405 metrics.append((event.pmu, event.metric_name, event.metric_expr))
406 updates = metric.RewriteMetricsInTermsOfOthers(metrics)
409 if event.metric_name in updates:
410 # print(f'Updated {event.metric_name} from\n"{event.metric_expr}"\n'
411 # f'to\n"{updates[event.metric_name]}"')
412 event.metric_expr = updates[event.metric_name]
416 def preprocess_arch_std_files(archpath: str) -> None:
417 """Read in all architecture standard events."""
418 global _arch_std_events
419 for item in os.scandir(archpath):
420 if item.is_file() and item.name.endswith('.json'):
421 for event in read_json_events(item.path, topic=''):
423 _arch_std_events[event.name.lower()] = event
424 if event.metric_name:
425 _arch_std_events[event.metric_name.lower()] = event
428 def add_events_table_entries(item: os.DirEntry, topic: str) -> None:
429 """Add contents of file to _pending_events table."""
430 for e in read_json_events(item.path, topic):
432 _pending_events.append(e)
434 _pending_metrics.append(e)
437 def print_pending_events() -> None:
438 """Optionally close events table."""
440 def event_cmp_key(j: JsonEvent) -> Tuple[bool, str, str, str, str]:
441 def fix_none(s: Optional[str]) -> str:
446 return (j.desc is not None, fix_none(j.topic), fix_none(j.name), fix_none(j.pmu),
447 fix_none(j.metric_name))
449 global _pending_events
450 if not _pending_events:
453 global _pending_events_tblname
454 if _pending_events_tblname.endswith('_sys'):
455 global _sys_event_tables
456 _sys_event_tables.append(_pending_events_tblname)
459 _event_tables.append(_pending_events_tblname)
461 _args.output_file.write(
462 f'static const struct compact_pmu_event {_pending_events_tblname}[] = {{\n')
464 for event in sorted(_pending_events, key=event_cmp_key):
465 _args.output_file.write(event.to_c_string(metric=False))
468 _args.output_file.write('};\n\n')
470 def print_pending_metrics() -> None:
471 """Optionally close metrics table."""
473 def metric_cmp_key(j: JsonEvent) -> Tuple[bool, str, str]:
474 def fix_none(s: Optional[str]) -> str:
479 return (j.desc is not None, fix_none(j.pmu), fix_none(j.metric_name))
481 global _pending_metrics
482 if not _pending_metrics:
485 global _pending_metrics_tblname
486 if _pending_metrics_tblname.endswith('_sys'):
487 global _sys_metric_tables
488 _sys_metric_tables.append(_pending_metrics_tblname)
491 _metric_tables.append(_pending_metrics_tblname)
493 _args.output_file.write(
494 f'static const struct compact_pmu_event {_pending_metrics_tblname}[] = {{\n')
496 for metric in sorted(_pending_metrics, key=metric_cmp_key):
497 _args.output_file.write(metric.to_c_string(metric=True))
498 _pending_metrics = []
500 _args.output_file.write('};\n\n')
502 def get_topic(topic: str) -> str:
503 if topic.endswith('metrics.json'):
505 return removesuffix(topic, '.json').replace('-', ' ')
507 def preprocess_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
512 # base dir or too deep
514 if level == 0 or level > 4:
517 # Ignore other directories. If the file name does not have a .json
518 # extension, ignore it. It could be a readme.txt for instance.
519 if not item.is_file() or not item.name.endswith('.json'):
522 if item.name == 'metricgroups.json':
523 metricgroup_descriptions = json.load(open(item.path))
524 for mgroup in metricgroup_descriptions:
525 assert len(mgroup) > 1, parents
526 description = f"{metricgroup_descriptions[mgroup]}\\000"
527 mgroup = f"{mgroup}\\000"
529 _bcs.add(description)
530 _metricgroups[mgroup] = description
533 topic = get_topic(item.name)
534 for event in read_json_events(item.path, topic):
536 _bcs.add(event.build_c_string(metric=False))
537 if event.metric_name:
538 _bcs.add(event.build_c_string(metric=True))
540 def process_one_file(parents: Sequence[str], item: os.DirEntry) -> None:
541 """Process a JSON file during the main walk."""
542 def is_leaf_dir(path: str) -> bool:
543 for item in os.scandir(path):
548 # model directory, reset topic
549 if item.is_dir() and is_leaf_dir(item.path):
550 print_pending_events()
551 print_pending_metrics()
553 global _pending_events_tblname
554 _pending_events_tblname = file_name_to_table_name('pmu_events_', parents, item.name)
555 global _pending_metrics_tblname
556 _pending_metrics_tblname = file_name_to_table_name('pmu_metrics_', parents, item.name)
558 if item.name == 'sys':
559 _sys_event_table_to_metric_table_mapping[_pending_events_tblname] = _pending_metrics_tblname
562 # base dir or too deep
564 if level == 0 or level > 4:
567 # Ignore other directories. If the file name does not have a .json
568 # extension, ignore it. It could be a readme.txt for instance.
569 if not item.is_file() or not item.name.endswith('.json') or item.name == 'metricgroups.json':
572 add_events_table_entries(item, get_topic(item.name))
575 def print_mapping_table(archs: Sequence[str]) -> None:
576 """Read the mapfile and generate the struct from cpuid string to event table."""
577 _args.output_file.write("""
578 /* Struct used to make the PMU event table implementation opaque to callers. */
579 struct pmu_events_table {
580 const struct compact_pmu_event *entries;
584 /* Struct used to make the PMU metric table implementation opaque to callers. */
585 struct pmu_metrics_table {
586 const struct compact_pmu_event *entries;
591 * Map a CPU to its table of PMU events. The CPU is identified by the
592 * cpuid field, which is an arch-specific identifier for the CPU.
593 * The identifier specified in tools/perf/pmu-events/arch/xxx/mapfile
594 * must match the get_cpuid_str() in tools/perf/arch/xxx/util/header.c)
596 * The cpuid can contain any character other than the comma.
598 struct pmu_events_map {
601 struct pmu_events_table event_table;
602 struct pmu_metrics_table metric_table;
606 * Global table mapping each known CPU for the architecture to its
607 * table of PMU events.
609 const struct pmu_events_map pmu_events_map[] = {
613 _args.output_file.write("""{
614 \t.arch = "testarch",
615 \t.cpuid = "testcpu",
617 \t\t.entries = pmu_events__test_soc_cpu,
618 \t\t.length = ARRAY_SIZE(pmu_events__test_soc_cpu),
621 \t\t.entries = pmu_metrics__test_soc_cpu,
622 \t\t.length = ARRAY_SIZE(pmu_metrics__test_soc_cpu),
627 with open(f'{_args.starting_dir}/{arch}/mapfile.csv') as csvfile:
628 table = csv.reader(csvfile)
631 # Skip the first row or any row beginning with #.
632 if not first and len(row) > 0 and not row[0].startswith('#'):
633 event_tblname = file_name_to_table_name('pmu_events_', [], row[2].replace('/', '_'))
634 if event_tblname in _event_tables:
635 event_size = f'ARRAY_SIZE({event_tblname})'
637 event_tblname = 'NULL'
639 metric_tblname = file_name_to_table_name('pmu_metrics_', [], row[2].replace('/', '_'))
640 if metric_tblname in _metric_tables:
641 metric_size = f'ARRAY_SIZE({metric_tblname})'
643 metric_tblname = 'NULL'
645 if event_size == '0' and metric_size == '0':
647 cpuid = row[0].replace('\\', '\\\\')
648 _args.output_file.write(f"""{{
650 \t.cpuid = "{cpuid}",
652 \t\t.entries = {event_tblname},
653 \t\t.length = {event_size}
656 \t\t.entries = {metric_tblname},
657 \t\t.length = {metric_size}
663 _args.output_file.write("""{
666 \t.event_table = { 0, 0 },
667 \t.metric_table = { 0, 0 },
673 def print_system_mapping_table() -> None:
674 """C struct mapping table array for tables from /sys directories."""
675 _args.output_file.write("""
676 struct pmu_sys_events {
678 \tstruct pmu_events_table event_table;
679 \tstruct pmu_metrics_table metric_table;
682 static const struct pmu_sys_events pmu_sys_event_tables[] = {
684 printed_metric_tables = []
685 for tblname in _sys_event_tables:
686 _args.output_file.write(f"""\t{{
687 \t\t.event_table = {{
688 \t\t\t.entries = {tblname},
689 \t\t\t.length = ARRAY_SIZE({tblname})
691 metric_tblname = _sys_event_table_to_metric_table_mapping[tblname]
692 if metric_tblname in _sys_metric_tables:
693 _args.output_file.write(f"""
694 \t\t.metric_table = {{
695 \t\t\t.entries = {metric_tblname},
696 \t\t\t.length = ARRAY_SIZE({metric_tblname})
698 printed_metric_tables.append(metric_tblname)
699 _args.output_file.write(f"""
700 \t\t.name = \"{tblname}\",
703 for tblname in _sys_metric_tables:
704 if tblname in printed_metric_tables:
706 _args.output_file.write(f"""\t{{
707 \t\t.metric_table = {{
708 \t\t\t.entries = {tblname},
709 \t\t\t.length = ARRAY_SIZE({tblname})
711 \t\t.name = \"{tblname}\",
714 _args.output_file.write("""\t{
715 \t\t.event_table = { 0, 0 },
716 \t\t.metric_table = { 0, 0 },
720 static void decompress_event(int offset, struct pmu_event *pe)
722 \tconst char *p = &big_c_string[offset];
724 for attr in _json_event_attributes:
725 _args.output_file.write(f'\n\tpe->{attr} = ')
726 if attr in _json_enum_attributes:
727 _args.output_file.write("*p - '0';\n")
729 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
730 if attr == _json_event_attributes[-1]:
732 if attr in _json_enum_attributes:
733 _args.output_file.write('\tp++;')
735 _args.output_file.write('\twhile (*p++);')
736 _args.output_file.write("""}
738 static void decompress_metric(int offset, struct pmu_metric *pm)
740 \tconst char *p = &big_c_string[offset];
742 for attr in _json_metric_attributes:
743 _args.output_file.write(f'\n\tpm->{attr} = ')
744 if attr in _json_enum_attributes:
745 _args.output_file.write("*p - '0';\n")
747 _args.output_file.write("(*p == '\\0' ? NULL : p);\n")
748 if attr == _json_metric_attributes[-1]:
750 if attr in _json_enum_attributes:
751 _args.output_file.write('\tp++;')
753 _args.output_file.write('\twhile (*p++);')
754 _args.output_file.write("""}
756 int pmu_events_table__for_each_event(const struct pmu_events_table *table,
757 pmu_event_iter_fn fn,
760 for (size_t i = 0; i < table->length; i++) {
764 decompress_event(table->entries[i].offset, &pe);
767 ret = fn(&pe, table, data);
774 int pmu_metrics_table__for_each_metric(const struct pmu_metrics_table *table,
775 pmu_metric_iter_fn fn,
778 for (size_t i = 0; i < table->length; i++) {
779 struct pmu_metric pm;
782 decompress_metric(table->entries[i].offset, &pm);
785 ret = fn(&pm, table, data);
792 const struct pmu_events_table *perf_pmu__find_events_table(struct perf_pmu *pmu)
794 const struct pmu_events_table *table = NULL;
795 char *cpuid = perf_pmu__getcpuid(pmu);
798 /* on some platforms which uses cpus map, cpuid can be NULL for
799 * PMUs other than CORE PMUs.
806 const struct pmu_events_map *map = &pmu_events_map[i++];
810 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
811 table = &map->event_table;
819 const struct pmu_metrics_table *perf_pmu__find_metrics_table(struct perf_pmu *pmu)
821 const struct pmu_metrics_table *table = NULL;
822 char *cpuid = perf_pmu__getcpuid(pmu);
825 /* on some platforms which uses cpus map, cpuid can be NULL for
826 * PMUs other than CORE PMUs.
833 const struct pmu_events_map *map = &pmu_events_map[i++];
837 if (!strcmp_cpuid_str(map->cpuid, cpuid)) {
838 table = &map->metric_table;
846 const struct pmu_events_table *find_core_events_table(const char *arch, const char *cpuid)
848 for (const struct pmu_events_map *tables = &pmu_events_map[0];
851 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
852 return &tables->event_table;
857 const struct pmu_metrics_table *find_core_metrics_table(const char *arch, const char *cpuid)
859 for (const struct pmu_events_map *tables = &pmu_events_map[0];
862 if (!strcmp(tables->arch, arch) && !strcmp_cpuid_str(tables->cpuid, cpuid))
863 return &tables->metric_table;
868 int pmu_for_each_core_event(pmu_event_iter_fn fn, void *data)
870 for (const struct pmu_events_map *tables = &pmu_events_map[0];
873 int ret = pmu_events_table__for_each_event(&tables->event_table, fn, data);
881 int pmu_for_each_core_metric(pmu_metric_iter_fn fn, void *data)
883 for (const struct pmu_events_map *tables = &pmu_events_map[0];
886 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
894 const struct pmu_events_table *find_sys_events_table(const char *name)
896 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
899 if (!strcmp(tables->name, name))
900 return &tables->event_table;
905 int pmu_for_each_sys_event(pmu_event_iter_fn fn, void *data)
907 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
910 int ret = pmu_events_table__for_each_event(&tables->event_table, fn, data);
918 int pmu_for_each_sys_metric(pmu_metric_iter_fn fn, void *data)
920 for (const struct pmu_sys_events *tables = &pmu_sys_event_tables[0];
923 int ret = pmu_metrics_table__for_each_metric(&tables->metric_table, fn, data);
932 def print_metricgroups() -> None:
933 _args.output_file.write("""
934 static const int metricgroups[][2] = {
936 for mgroup in sorted(_metricgroups):
937 description = _metricgroups[mgroup]
938 _args.output_file.write(
939 f'\t{{ {_bcs.offsets[mgroup]}, {_bcs.offsets[description]} }}, /* {mgroup} => {description} */\n'
941 _args.output_file.write("""
944 const char *describe_metricgroup(const char *group)
946 int low = 0, high = (int)ARRAY_SIZE(metricgroups) - 1;
948 while (low <= high) {
949 int mid = (low + high) / 2;
950 const char *mgroup = &big_c_string[metricgroups[mid][0]];
951 int cmp = strcmp(mgroup, group);
954 return &big_c_string[metricgroups[mid][1]];
955 } else if (cmp < 0) {
968 def dir_path(path: str) -> str:
969 """Validate path is a directory for argparse."""
970 if os.path.isdir(path):
972 raise argparse.ArgumentTypeError(f'\'{path}\' is not a valid directory')
974 def ftw(path: str, parents: Sequence[str],
975 action: Callable[[Sequence[str], os.DirEntry], None]) -> None:
976 """Replicate the directory/file walking behavior of C's file tree walk."""
977 for item in sorted(os.scandir(path), key=lambda e: e.name):
978 if _args.model != 'all' and item.is_dir():
979 # Check if the model matches one in _args.model.
980 if len(parents) == _args.model.split(',')[0].count('/'):
981 # We're testing the correct directory.
982 item_path = '/'.join(parents) + ('/' if len(parents) > 0 else '') + item.name
983 if 'test' not in item_path and item_path not in _args.model.split(','):
985 action(parents, item)
987 ftw(item.path, parents + [item.name], action)
989 ap = argparse.ArgumentParser()
990 ap.add_argument('arch', help='Architecture name like x86')
991 ap.add_argument('model', help='''Select a model such as skylake to
992 reduce the code size. Normally set to "all". For architectures like
993 ARM64 with an implementor/model, the model must include the implementor
994 such as "arm/cortex-a34".''',
999 help='Root of tree containing architecture directories containing json files'
1002 'output_file', type=argparse.FileType('w', encoding='utf-8'), nargs='?', default=sys.stdout)
1003 _args = ap.parse_args()
1005 _args.output_file.write("""
1006 #include <pmu-events/pmu-events.h>
1007 #include "util/header.h"
1008 #include "util/pmu.h"
1012 struct compact_pmu_event {
1018 for item in os.scandir(_args.starting_dir):
1019 if not item.is_dir():
1021 if item.name == _args.arch or _args.arch == 'all' or item.name == 'test':
1022 archs.append(item.name)
1025 raise IOError(f'Missing architecture directory \'{_args.arch}\'')
1029 arch_path = f'{_args.starting_dir}/{arch}'
1030 preprocess_arch_std_files(arch_path)
1031 ftw(arch_path, [], preprocess_one_file)
1034 _args.output_file.write('static const char *const big_c_string =\n')
1035 for s in _bcs.big_string:
1036 _args.output_file.write(s)
1037 _args.output_file.write(';\n\n')
1039 arch_path = f'{_args.starting_dir}/{arch}'
1040 ftw(arch_path, [], process_one_file)
1041 print_pending_events()
1042 print_pending_metrics()
1044 print_mapping_table(archs)
1045 print_system_mapping_table()
1046 print_metricgroups()
1048 if __name__ == '__main__':