1 // SPDX-License-Identifier: GPL-2.0
7 #include "metricgroup.h"
12 #include "expr-bison.h"
13 #include "expr-flex.h"
14 #include "util/hashmap.h"
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/zalloc.h>
24 extern int expr_debug;
35 const char *metric_name;
36 const char *metric_expr;
41 /* Holding a double value. */
43 /* Reference to another metric. */
45 /* A reference but the value has been computed. */
46 EXPR_ID_DATA__REF_VALUE,
50 static size_t key_hash(long key, void *ctx __maybe_unused)
52 const char *str = (const char *)key;
55 while (*str != '\0') {
63 static bool key_equal(long key1, long key2, void *ctx __maybe_unused)
65 return !strcmp((const char *)key1, (const char *)key2);
68 struct hashmap *ids__new(void)
72 hash = hashmap__new(key_hash, key_equal, NULL);
78 void ids__free(struct hashmap *ids)
80 struct hashmap_entry *cur;
86 hashmap__for_each_entry(ids, cur, bkt) {
87 free((void *)cur->pkey);
88 free((void *)cur->pvalue);
94 int ids__insert(struct hashmap *ids, const char *id)
96 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
100 ret = hashmap__set(ids, id, data_ptr, &old_key, &old_data);
108 struct hashmap *ids__union(struct hashmap *ids1, struct hashmap *ids2)
111 struct hashmap_entry *cur;
113 struct expr_id_data *old_data = NULL;
114 char *old_key = NULL;
122 if (hashmap__size(ids1) < hashmap__size(ids2)) {
123 struct hashmap *tmp = ids1;
128 hashmap__for_each_entry(ids2, cur, bkt) {
129 ret = hashmap__set(ids1, cur->key, cur->value, &old_key, &old_data);
143 /* Caller must make sure id is allocated */
144 int expr__add_id(struct expr_parse_ctx *ctx, const char *id)
146 return ids__insert(ctx->ids, id);
149 /* Caller must make sure id is allocated */
150 int expr__add_id_val(struct expr_parse_ctx *ctx, const char *id, double val)
152 return expr__add_id_val_source_count(ctx, id, val, /*source_count=*/1);
155 /* Caller must make sure id is allocated */
156 int expr__add_id_val_source_count(struct expr_parse_ctx *ctx, const char *id,
157 double val, int source_count)
159 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
160 char *old_key = NULL;
163 data_ptr = malloc(sizeof(*data_ptr));
166 data_ptr->val.val = val;
167 data_ptr->val.source_count = source_count;
168 data_ptr->kind = EXPR_ID_DATA__VALUE;
170 ret = hashmap__set(ctx->ids, id, data_ptr, &old_key, &old_data);
178 int expr__add_ref(struct expr_parse_ctx *ctx, struct metric_ref *ref)
180 struct expr_id_data *data_ptr = NULL, *old_data = NULL;
181 char *old_key = NULL;
185 data_ptr = zalloc(sizeof(*data_ptr));
189 name = strdup(ref->metric_name);
196 * Intentionally passing just const char pointers,
197 * originally from 'struct pmu_event' object.
198 * We don't need to change them, so there's no
199 * need to create our own copy.
201 data_ptr->ref.metric_name = ref->metric_name;
202 data_ptr->ref.metric_expr = ref->metric_expr;
203 data_ptr->kind = EXPR_ID_DATA__REF;
205 ret = hashmap__set(ctx->ids, name, data_ptr, &old_key, &old_data);
209 pr_debug2("adding ref metric %s: %s\n",
210 ref->metric_name, ref->metric_expr);
217 int expr__get_id(struct expr_parse_ctx *ctx, const char *id,
218 struct expr_id_data **data)
220 return hashmap__find(ctx->ids, id, data) ? 0 : -1;
223 bool expr__subset_of_ids(struct expr_parse_ctx *haystack,
224 struct expr_parse_ctx *needles)
226 struct hashmap_entry *cur;
228 struct expr_id_data *data;
230 hashmap__for_each_entry(needles->ids, cur, bkt) {
231 if (expr__get_id(haystack, cur->pkey, &data))
238 int expr__resolve_id(struct expr_parse_ctx *ctx, const char *id,
239 struct expr_id_data **datap)
241 struct expr_id_data *data;
243 if (expr__get_id(ctx, id, datap) || !*datap) {
244 pr_debug("%s not found\n", id);
250 switch (data->kind) {
251 case EXPR_ID_DATA__VALUE:
252 pr_debug2("lookup(%s): val %f\n", id, data->val.val);
254 case EXPR_ID_DATA__REF:
255 pr_debug2("lookup(%s): ref metric name %s\n", id,
256 data->ref.metric_name);
257 pr_debug("processing metric: %s ENTRY\n", id);
258 data->kind = EXPR_ID_DATA__REF_VALUE;
259 if (expr__parse(&data->ref.val, ctx, data->ref.metric_expr)) {
260 pr_debug("%s failed to count\n", id);
263 pr_debug("processing metric: %s EXIT: %f\n", id, data->ref.val);
265 case EXPR_ID_DATA__REF_VALUE:
266 pr_debug2("lookup(%s): ref val %f metric name %s\n", id,
267 data->ref.val, data->ref.metric_name);
270 assert(0); /* Unreachable. */
276 void expr__del_id(struct expr_parse_ctx *ctx, const char *id)
278 struct expr_id_data *old_val = NULL;
279 char *old_key = NULL;
281 hashmap__delete(ctx->ids, id, &old_key, &old_val);
286 struct expr_parse_ctx *expr__ctx_new(void)
288 struct expr_parse_ctx *ctx;
290 ctx = malloc(sizeof(struct expr_parse_ctx));
294 ctx->ids = hashmap__new(key_hash, key_equal, NULL);
295 if (IS_ERR(ctx->ids)) {
299 ctx->sctx.user_requested_cpu_list = NULL;
300 ctx->sctx.runtime = 0;
301 ctx->sctx.system_wide = false;
306 void expr__ctx_clear(struct expr_parse_ctx *ctx)
308 struct hashmap_entry *cur;
311 hashmap__for_each_entry(ctx->ids, cur, bkt) {
312 free((void *)cur->pkey);
315 hashmap__clear(ctx->ids);
318 void expr__ctx_free(struct expr_parse_ctx *ctx)
320 struct hashmap_entry *cur;
326 free(ctx->sctx.user_requested_cpu_list);
327 hashmap__for_each_entry(ctx->ids, cur, bkt) {
328 free((void *)cur->pkey);
331 hashmap__free(ctx->ids);
336 __expr__parse(double *val, struct expr_parse_ctx *ctx, const char *expr,
339 YY_BUFFER_STATE buffer;
343 pr_debug2("parsing metric: %s\n", expr);
345 ret = expr_lex_init_extra(&ctx->sctx, &scanner);
349 buffer = expr__scan_string(expr, scanner);
353 expr_set_debug(1, scanner);
356 ret = expr_parse(val, ctx, compute_ids, scanner);
358 expr__flush_buffer(buffer, scanner);
359 expr__delete_buffer(buffer, scanner);
360 expr_lex_destroy(scanner);
364 int expr__parse(double *final_val, struct expr_parse_ctx *ctx,
367 return __expr__parse(final_val, ctx, expr, /*compute_ids=*/false) ? -1 : 0;
370 int expr__find_ids(const char *expr, const char *one,
371 struct expr_parse_ctx *ctx)
373 int ret = __expr__parse(NULL, ctx, expr, /*compute_ids=*/true);
376 expr__del_id(ctx, one);
381 double expr_id_data__value(const struct expr_id_data *data)
383 if (data->kind == EXPR_ID_DATA__VALUE)
384 return data->val.val;
385 assert(data->kind == EXPR_ID_DATA__REF_VALUE);
386 return data->ref.val;
389 double expr_id_data__source_count(const struct expr_id_data *data)
391 assert(data->kind == EXPR_ID_DATA__VALUE);
392 return data->val.source_count;
395 #if !defined(__i386__) && !defined(__x86_64__)
396 double arch_get_tsc_freq(void)
402 double expr__get_literal(const char *literal, const struct expr_scanner_ctx *ctx)
404 static struct cpu_topology *topology;
407 if (!strcmp("#num_cpus", literal)) {
408 result = cpu__max_present_cpu().cpu;
412 if (!strcasecmp("#system_tsc_freq", literal)) {
413 result = arch_get_tsc_freq();
418 * Assume that topology strings are consistent, such as CPUs "0-1"
419 * wouldn't be listed as "0,1", and so after deduplication the number of
420 * these strings gives an indication of the number of packages, dies,
424 topology = cpu_topology__new();
426 pr_err("Error creating CPU topology");
430 if (!strcasecmp("#smt_on", literal)) {
431 result = smt_on(topology) ? 1.0 : 0.0;
434 if (!strcmp("#core_wide", literal)) {
435 result = core_wide(ctx->system_wide, ctx->user_requested_cpu_list, topology)
439 if (!strcmp("#num_packages", literal)) {
440 result = topology->package_cpus_lists;
443 if (!strcmp("#num_dies", literal)) {
444 result = topology->die_cpus_lists;
447 if (!strcmp("#num_cores", literal)) {
448 result = topology->core_cpus_lists;
452 pr_err("Unrecognized literal '%s'", literal);
454 pr_debug2("literal: %s = %f\n", literal, result);