2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
23 #include <linux/tracefs.h>
25 #include "tracing_map.h"
28 #define SYNTH_SYSTEM "synthetic"
29 #define SYNTH_FIELDS_MAX 16
31 #define STR_VAR_LEN_MAX 32 /* must be multiple of sizeof(u64) */
35 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
36 struct tracing_map_elt *elt,
37 struct ring_buffer_event *rbe,
40 #define HIST_FIELD_OPERANDS_MAX 2
41 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
42 #define HIST_ACTIONS_MAX 8
53 struct hist_trigger_data *hist_data;
58 struct ftrace_event_field *field;
63 unsigned int is_signed;
65 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
66 struct hist_trigger_data *hist_data;
68 enum field_op_id operator;
73 unsigned int var_ref_idx;
77 static u64 hist_field_none(struct hist_field *field,
78 struct tracing_map_elt *elt,
79 struct ring_buffer_event *rbe,
85 static u64 hist_field_counter(struct hist_field *field,
86 struct tracing_map_elt *elt,
87 struct ring_buffer_event *rbe,
93 static u64 hist_field_string(struct hist_field *hist_field,
94 struct tracing_map_elt *elt,
95 struct ring_buffer_event *rbe,
98 char *addr = (char *)(event + hist_field->field->offset);
100 return (u64)(unsigned long)addr;
103 static u64 hist_field_dynstring(struct hist_field *hist_field,
104 struct tracing_map_elt *elt,
105 struct ring_buffer_event *rbe,
108 u32 str_item = *(u32 *)(event + hist_field->field->offset);
109 int str_loc = str_item & 0xffff;
110 char *addr = (char *)(event + str_loc);
112 return (u64)(unsigned long)addr;
115 static u64 hist_field_pstring(struct hist_field *hist_field,
116 struct tracing_map_elt *elt,
117 struct ring_buffer_event *rbe,
120 char **addr = (char **)(event + hist_field->field->offset);
122 return (u64)(unsigned long)*addr;
125 static u64 hist_field_log2(struct hist_field *hist_field,
126 struct tracing_map_elt *elt,
127 struct ring_buffer_event *rbe,
130 struct hist_field *operand = hist_field->operands[0];
132 u64 val = operand->fn(operand, elt, rbe, event);
134 return (u64) ilog2(roundup_pow_of_two(val));
137 static u64 hist_field_plus(struct hist_field *hist_field,
138 struct tracing_map_elt *elt,
139 struct ring_buffer_event *rbe,
142 struct hist_field *operand1 = hist_field->operands[0];
143 struct hist_field *operand2 = hist_field->operands[1];
145 u64 val1 = operand1->fn(operand1, elt, rbe, event);
146 u64 val2 = operand2->fn(operand2, elt, rbe, event);
151 static u64 hist_field_minus(struct hist_field *hist_field,
152 struct tracing_map_elt *elt,
153 struct ring_buffer_event *rbe,
156 struct hist_field *operand1 = hist_field->operands[0];
157 struct hist_field *operand2 = hist_field->operands[1];
159 u64 val1 = operand1->fn(operand1, elt, rbe, event);
160 u64 val2 = operand2->fn(operand2, elt, rbe, event);
165 static u64 hist_field_unary_minus(struct hist_field *hist_field,
166 struct tracing_map_elt *elt,
167 struct ring_buffer_event *rbe,
170 struct hist_field *operand = hist_field->operands[0];
172 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
173 u64 val = (u64)-sval;
178 #define DEFINE_HIST_FIELD_FN(type) \
179 static u64 hist_field_##type(struct hist_field *hist_field, \
180 struct tracing_map_elt *elt, \
181 struct ring_buffer_event *rbe, \
184 type *addr = (type *)(event + hist_field->field->offset); \
186 return (u64)(unsigned long)*addr; \
189 DEFINE_HIST_FIELD_FN(s64);
190 DEFINE_HIST_FIELD_FN(u64);
191 DEFINE_HIST_FIELD_FN(s32);
192 DEFINE_HIST_FIELD_FN(u32);
193 DEFINE_HIST_FIELD_FN(s16);
194 DEFINE_HIST_FIELD_FN(u16);
195 DEFINE_HIST_FIELD_FN(s8);
196 DEFINE_HIST_FIELD_FN(u8);
198 #define for_each_hist_field(i, hist_data) \
199 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
201 #define for_each_hist_val_field(i, hist_data) \
202 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
204 #define for_each_hist_key_field(i, hist_data) \
205 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
207 #define HIST_STACKTRACE_DEPTH 16
208 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
209 #define HIST_STACKTRACE_SKIP 5
211 #define HITCOUNT_IDX 0
212 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
214 enum hist_field_flags {
215 HIST_FIELD_FL_HITCOUNT = 1 << 0,
216 HIST_FIELD_FL_KEY = 1 << 1,
217 HIST_FIELD_FL_STRING = 1 << 2,
218 HIST_FIELD_FL_HEX = 1 << 3,
219 HIST_FIELD_FL_SYM = 1 << 4,
220 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
221 HIST_FIELD_FL_EXECNAME = 1 << 6,
222 HIST_FIELD_FL_SYSCALL = 1 << 7,
223 HIST_FIELD_FL_STACKTRACE = 1 << 8,
224 HIST_FIELD_FL_LOG2 = 1 << 9,
225 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
226 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
227 HIST_FIELD_FL_VAR = 1 << 12,
228 HIST_FIELD_FL_EXPR = 1 << 13,
229 HIST_FIELD_FL_VAR_REF = 1 << 14,
230 HIST_FIELD_FL_CPU = 1 << 15,
231 HIST_FIELD_FL_ALIAS = 1 << 16,
236 char *name[TRACING_MAP_VARS_MAX];
237 char *expr[TRACING_MAP_VARS_MAX];
240 struct hist_trigger_attrs {
249 unsigned int map_bits;
251 char *assignment_str[TRACING_MAP_VARS_MAX];
252 unsigned int n_assignments;
254 char *action_str[HIST_ACTIONS_MAX];
255 unsigned int n_actions;
257 struct var_defs var_defs;
261 struct hist_field *var;
262 struct hist_field *val;
265 struct field_var_hist {
266 struct hist_trigger_data *hist_data;
270 struct hist_trigger_data {
271 struct hist_field *fields[HIST_FIELDS_MAX];
274 unsigned int n_fields;
276 unsigned int key_size;
277 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
278 unsigned int n_sort_keys;
279 struct trace_event_file *event_file;
280 struct hist_trigger_attrs *attrs;
281 struct tracing_map *map;
282 bool enable_timestamps;
284 struct hist_field *var_refs[TRACING_MAP_VARS_MAX];
285 unsigned int n_var_refs;
287 struct action_data *actions[HIST_ACTIONS_MAX];
288 unsigned int n_actions;
290 struct hist_field *synth_var_refs[SYNTH_FIELDS_MAX];
291 unsigned int n_synth_var_refs;
292 struct field_var *field_vars[SYNTH_FIELDS_MAX];
293 unsigned int n_field_vars;
294 unsigned int n_field_var_str;
295 struct field_var_hist *field_var_hists[SYNTH_FIELDS_MAX];
296 unsigned int n_field_var_hists;
298 struct field_var *max_vars[SYNTH_FIELDS_MAX];
299 unsigned int n_max_vars;
300 unsigned int n_max_var_str;
312 struct list_head list;
315 struct synth_field **fields;
316 unsigned int n_fields;
318 struct trace_event_class class;
319 struct trace_event_call call;
320 struct tracepoint *tp;
325 typedef void (*action_fn_t) (struct hist_trigger_data *hist_data,
326 struct tracing_map_elt *elt, void *rec,
327 struct ring_buffer_event *rbe,
328 struct action_data *data, u64 *var_ref_vals);
332 unsigned int n_params;
333 char *params[SYNTH_FIELDS_MAX];
337 unsigned int var_ref_idx;
339 char *match_event_system;
340 char *synth_event_name;
341 struct synth_event *synth_event;
347 unsigned int max_var_ref_idx;
348 struct hist_field *max_var;
349 struct hist_field *var;
355 static char last_hist_cmd[MAX_FILTER_STR_VAL];
356 static char hist_err_str[MAX_FILTER_STR_VAL];
358 static void last_cmd_set(char *str)
363 strncpy(last_hist_cmd, str, MAX_FILTER_STR_VAL - 1);
366 static void hist_err(char *str, char *var)
368 int maxlen = MAX_FILTER_STR_VAL - 1;
373 if (strlen(hist_err_str))
379 if (strlen(hist_err_str) + strlen(str) + strlen(var) > maxlen)
382 strcat(hist_err_str, str);
383 strcat(hist_err_str, var);
386 static void hist_err_event(char *str, char *system, char *event, char *var)
388 char err[MAX_FILTER_STR_VAL];
391 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s.%s", system, event, var);
393 snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
395 strncpy(err, var, MAX_FILTER_STR_VAL);
400 static void hist_err_clear(void)
402 hist_err_str[0] = '\0';
405 static bool have_hist_err(void)
407 if (strlen(hist_err_str))
413 static LIST_HEAD(synth_event_list);
414 static DEFINE_MUTEX(synth_event_mutex);
416 struct synth_trace_event {
417 struct trace_entry ent;
421 static int synth_event_define_fields(struct trace_event_call *call)
423 struct synth_trace_event trace;
424 int offset = offsetof(typeof(trace), fields);
425 struct synth_event *event = call->data;
426 unsigned int i, size, n_u64;
431 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
432 size = event->fields[i]->size;
433 is_signed = event->fields[i]->is_signed;
434 type = event->fields[i]->type;
435 name = event->fields[i]->name;
436 ret = trace_define_field(call, type, name, offset, size,
437 is_signed, FILTER_OTHER);
441 if (event->fields[i]->is_string) {
442 offset += STR_VAR_LEN_MAX;
443 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
445 offset += sizeof(u64);
450 event->n_u64 = n_u64;
455 static bool synth_field_signed(char *type)
457 if (strncmp(type, "u", 1) == 0)
463 static int synth_field_is_string(char *type)
465 if (strstr(type, "char[") != NULL)
471 static int synth_field_string_size(char *type)
473 char buf[4], *end, *start;
477 start = strstr(type, "char[");
480 start += strlen("char[");
482 end = strchr(type, ']');
483 if (!end || end < start)
490 strncpy(buf, start, len);
493 err = kstrtouint(buf, 0, &size);
497 if (size > STR_VAR_LEN_MAX)
503 static int synth_field_size(char *type)
507 if (strcmp(type, "s64") == 0)
509 else if (strcmp(type, "u64") == 0)
511 else if (strcmp(type, "s32") == 0)
513 else if (strcmp(type, "u32") == 0)
515 else if (strcmp(type, "s16") == 0)
517 else if (strcmp(type, "u16") == 0)
519 else if (strcmp(type, "s8") == 0)
521 else if (strcmp(type, "u8") == 0)
523 else if (strcmp(type, "char") == 0)
525 else if (strcmp(type, "unsigned char") == 0)
526 size = sizeof(unsigned char);
527 else if (strcmp(type, "int") == 0)
529 else if (strcmp(type, "unsigned int") == 0)
530 size = sizeof(unsigned int);
531 else if (strcmp(type, "long") == 0)
533 else if (strcmp(type, "unsigned long") == 0)
534 size = sizeof(unsigned long);
535 else if (strcmp(type, "pid_t") == 0)
536 size = sizeof(pid_t);
537 else if (synth_field_is_string(type))
538 size = synth_field_string_size(type);
543 static const char *synth_field_fmt(char *type)
545 const char *fmt = "%llu";
547 if (strcmp(type, "s64") == 0)
549 else if (strcmp(type, "u64") == 0)
551 else if (strcmp(type, "s32") == 0)
553 else if (strcmp(type, "u32") == 0)
555 else if (strcmp(type, "s16") == 0)
557 else if (strcmp(type, "u16") == 0)
559 else if (strcmp(type, "s8") == 0)
561 else if (strcmp(type, "u8") == 0)
563 else if (strcmp(type, "char") == 0)
565 else if (strcmp(type, "unsigned char") == 0)
567 else if (strcmp(type, "int") == 0)
569 else if (strcmp(type, "unsigned int") == 0)
571 else if (strcmp(type, "long") == 0)
573 else if (strcmp(type, "unsigned long") == 0)
575 else if (strcmp(type, "pid_t") == 0)
577 else if (synth_field_is_string(type))
583 static enum print_line_t print_synth_event(struct trace_iterator *iter,
585 struct trace_event *event)
587 struct trace_array *tr = iter->tr;
588 struct trace_seq *s = &iter->seq;
589 struct synth_trace_event *entry;
590 struct synth_event *se;
591 unsigned int i, n_u64;
595 entry = (struct synth_trace_event *)iter->ent;
596 se = container_of(event, struct synth_event, call.event);
598 trace_seq_printf(s, "%s: ", se->name);
600 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
601 if (trace_seq_has_overflowed(s))
604 fmt = synth_field_fmt(se->fields[i]->type);
606 /* parameter types */
607 if (tr->trace_flags & TRACE_ITER_VERBOSE)
608 trace_seq_printf(s, "%s ", fmt);
610 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
612 /* parameter values */
613 if (se->fields[i]->is_string) {
614 trace_seq_printf(s, print_fmt, se->fields[i]->name,
615 (char *)&entry->fields[n_u64],
616 i == se->n_fields - 1 ? "" : " ");
617 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
619 trace_seq_printf(s, print_fmt, se->fields[i]->name,
620 entry->fields[n_u64],
621 i == se->n_fields - 1 ? "" : " ");
626 trace_seq_putc(s, '\n');
628 return trace_handle_return(s);
631 static struct trace_event_functions synth_event_funcs = {
632 .trace = print_synth_event
635 static notrace void trace_event_raw_event_synth(void *__data,
637 unsigned int var_ref_idx)
639 struct trace_event_file *trace_file = __data;
640 struct synth_trace_event *entry;
641 struct trace_event_buffer fbuffer;
642 struct synth_event *event;
643 unsigned int i, n_u64;
646 event = trace_file->event_call->data;
648 if (trace_trigger_soft_disabled(trace_file))
651 fields_size = event->n_u64 * sizeof(u64);
653 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
654 sizeof(*entry) + fields_size);
658 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
659 if (event->fields[i]->is_string) {
660 char *str_val = (char *)(long)var_ref_vals[var_ref_idx + i];
661 char *str_field = (char *)&entry->fields[n_u64];
663 strncpy(str_field, str_val, STR_VAR_LEN_MAX);
664 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
666 entry->fields[n_u64] = var_ref_vals[var_ref_idx + i];
671 trace_event_buffer_commit(&fbuffer);
674 static void free_synth_event_print_fmt(struct trace_event_call *call)
677 kfree(call->print_fmt);
678 call->print_fmt = NULL;
682 static int __set_synth_event_print_fmt(struct synth_event *event,
689 /* When len=0, we just calculate the needed length */
690 #define LEN_OR_ZERO (len ? len - pos : 0)
692 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
693 for (i = 0; i < event->n_fields; i++) {
694 fmt = synth_field_fmt(event->fields[i]->type);
695 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
696 event->fields[i]->name, fmt,
697 i == event->n_fields - 1 ? "" : ", ");
699 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
701 for (i = 0; i < event->n_fields; i++) {
702 pos += snprintf(buf + pos, LEN_OR_ZERO,
703 ", REC->%s", event->fields[i]->name);
708 /* return the length of print_fmt */
712 static int set_synth_event_print_fmt(struct trace_event_call *call)
714 struct synth_event *event = call->data;
718 /* First: called with 0 length to calculate the needed length */
719 len = __set_synth_event_print_fmt(event, NULL, 0);
721 print_fmt = kmalloc(len + 1, GFP_KERNEL);
725 /* Second: actually write the @print_fmt */
726 __set_synth_event_print_fmt(event, print_fmt, len + 1);
727 call->print_fmt = print_fmt;
732 static void free_synth_field(struct synth_field *field)
739 static struct synth_field *parse_synth_field(char *field_type,
742 struct synth_field *field;
746 if (field_type[0] == ';')
749 len = strlen(field_name);
750 if (field_name[len - 1] == ';')
751 field_name[len - 1] = '\0';
753 field = kzalloc(sizeof(*field), GFP_KERNEL);
755 return ERR_PTR(-ENOMEM);
757 len = strlen(field_type) + 1;
758 array = strchr(field_name, '[');
760 len += strlen(array);
761 field->type = kzalloc(len, GFP_KERNEL);
766 strcat(field->type, field_type);
768 strcat(field->type, array);
772 field->size = synth_field_size(field->type);
778 if (synth_field_is_string(field->type))
779 field->is_string = true;
781 field->is_signed = synth_field_signed(field->type);
783 field->name = kstrdup(field_name, GFP_KERNEL);
791 free_synth_field(field);
792 field = ERR_PTR(ret);
796 static void free_synth_tracepoint(struct tracepoint *tp)
805 static struct tracepoint *alloc_synth_tracepoint(char *name)
807 struct tracepoint *tp;
809 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
811 return ERR_PTR(-ENOMEM);
813 tp->name = kstrdup(name, GFP_KERNEL);
816 return ERR_PTR(-ENOMEM);
822 typedef void (*synth_probe_func_t) (void *__data, u64 *var_ref_vals,
823 unsigned int var_ref_idx);
825 static inline void trace_synth(struct synth_event *event, u64 *var_ref_vals,
826 unsigned int var_ref_idx)
828 struct tracepoint *tp = event->tp;
830 if (unlikely(atomic_read(&tp->key.enabled) > 0)) {
831 struct tracepoint_func *probe_func_ptr;
832 synth_probe_func_t probe_func;
835 if (!(cpu_online(raw_smp_processor_id())))
838 probe_func_ptr = rcu_dereference_sched((tp)->funcs);
839 if (probe_func_ptr) {
841 probe_func = probe_func_ptr->func;
842 __data = probe_func_ptr->data;
843 probe_func(__data, var_ref_vals, var_ref_idx);
844 } while ((++probe_func_ptr)->func);
849 static struct synth_event *find_synth_event(const char *name)
851 struct synth_event *event;
853 list_for_each_entry(event, &synth_event_list, list) {
854 if (strcmp(event->name, name) == 0)
861 static int register_synth_event(struct synth_event *event)
863 struct trace_event_call *call = &event->call;
866 event->call.class = &event->class;
867 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
868 if (!event->class.system) {
873 event->tp = alloc_synth_tracepoint(event->name);
874 if (IS_ERR(event->tp)) {
875 ret = PTR_ERR(event->tp);
880 INIT_LIST_HEAD(&call->class->fields);
881 call->event.funcs = &synth_event_funcs;
882 call->class->define_fields = synth_event_define_fields;
884 ret = register_trace_event(&call->event);
889 call->flags = TRACE_EVENT_FL_TRACEPOINT;
890 call->class->reg = trace_event_reg;
891 call->class->probe = trace_event_raw_event_synth;
893 call->tp = event->tp;
895 ret = trace_add_event_call(call);
897 pr_warn("Failed to register synthetic event: %s\n",
898 trace_event_name(call));
902 ret = set_synth_event_print_fmt(call);
904 trace_remove_event_call(call);
910 unregister_trace_event(&call->event);
914 static int unregister_synth_event(struct synth_event *event)
916 struct trace_event_call *call = &event->call;
919 ret = trace_remove_event_call(call);
924 static void free_synth_event(struct synth_event *event)
931 for (i = 0; i < event->n_fields; i++)
932 free_synth_field(event->fields[i]);
934 kfree(event->fields);
936 kfree(event->class.system);
937 free_synth_tracepoint(event->tp);
938 free_synth_event_print_fmt(&event->call);
942 static struct synth_event *alloc_synth_event(char *event_name, int n_fields,
943 struct synth_field **fields)
945 struct synth_event *event;
948 event = kzalloc(sizeof(*event), GFP_KERNEL);
950 event = ERR_PTR(-ENOMEM);
954 event->name = kstrdup(event_name, GFP_KERNEL);
957 event = ERR_PTR(-ENOMEM);
961 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
962 if (!event->fields) {
963 free_synth_event(event);
964 event = ERR_PTR(-ENOMEM);
968 for (i = 0; i < n_fields; i++)
969 event->fields[i] = fields[i];
971 event->n_fields = n_fields;
976 static void action_trace(struct hist_trigger_data *hist_data,
977 struct tracing_map_elt *elt, void *rec,
978 struct ring_buffer_event *rbe,
979 struct action_data *data, u64 *var_ref_vals)
981 struct synth_event *event = data->onmatch.synth_event;
983 trace_synth(event, var_ref_vals, data->onmatch.var_ref_idx);
986 struct hist_var_data {
987 struct list_head list;
988 struct hist_trigger_data *hist_data;
991 static void add_or_delete_synth_event(struct synth_event *event, int delete)
994 free_synth_event(event);
996 mutex_lock(&synth_event_mutex);
997 if (!find_synth_event(event->name))
998 list_add(&event->list, &synth_event_list);
1000 free_synth_event(event);
1001 mutex_unlock(&synth_event_mutex);
1005 static int create_synth_event(int argc, char **argv)
1007 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1008 struct synth_event *event = NULL;
1009 bool delete_event = false;
1010 int i, n_fields = 0, ret = 0;
1013 mutex_lock(&synth_event_mutex);
1017 * - Add synthetic event: <event_name> field[;field] ...
1018 * - Remove synthetic event: !<event_name> field[;field] ...
1019 * where 'field' = type field_name
1027 if (name[0] == '!') {
1028 delete_event = true;
1032 event = find_synth_event(name);
1040 list_del(&event->list);
1046 } else if (delete_event)
1054 for (i = 1; i < argc - 1; i++) {
1055 if (strcmp(argv[i], ";") == 0)
1057 if (n_fields == SYNTH_FIELDS_MAX) {
1062 field = parse_synth_field(argv[i], argv[i + 1]);
1063 if (IS_ERR(field)) {
1064 ret = PTR_ERR(field);
1067 fields[n_fields] = field;
1076 event = alloc_synth_event(name, n_fields, fields);
1077 if (IS_ERR(event)) {
1078 ret = PTR_ERR(event);
1083 mutex_unlock(&synth_event_mutex);
1087 ret = unregister_synth_event(event);
1088 add_or_delete_synth_event(event, !ret);
1090 ret = register_synth_event(event);
1091 add_or_delete_synth_event(event, ret);
1097 mutex_unlock(&synth_event_mutex);
1099 for (i = 0; i < n_fields; i++)
1100 free_synth_field(fields[i]);
1101 free_synth_event(event);
1106 static int release_all_synth_events(void)
1108 struct list_head release_events;
1109 struct synth_event *event, *e;
1112 INIT_LIST_HEAD(&release_events);
1114 mutex_lock(&synth_event_mutex);
1116 list_for_each_entry(event, &synth_event_list, list) {
1118 mutex_unlock(&synth_event_mutex);
1123 list_splice_init(&event->list, &release_events);
1125 mutex_unlock(&synth_event_mutex);
1127 list_for_each_entry_safe(event, e, &release_events, list) {
1128 list_del(&event->list);
1130 ret = unregister_synth_event(event);
1131 add_or_delete_synth_event(event, !ret);
1138 static void *synth_events_seq_start(struct seq_file *m, loff_t *pos)
1140 mutex_lock(&synth_event_mutex);
1142 return seq_list_start(&synth_event_list, *pos);
1145 static void *synth_events_seq_next(struct seq_file *m, void *v, loff_t *pos)
1147 return seq_list_next(v, &synth_event_list, pos);
1150 static void synth_events_seq_stop(struct seq_file *m, void *v)
1152 mutex_unlock(&synth_event_mutex);
1155 static int synth_events_seq_show(struct seq_file *m, void *v)
1157 struct synth_field *field;
1158 struct synth_event *event = v;
1161 seq_printf(m, "%s\t", event->name);
1163 for (i = 0; i < event->n_fields; i++) {
1164 field = event->fields[i];
1166 /* parameter values */
1167 seq_printf(m, "%s %s%s", field->type, field->name,
1168 i == event->n_fields - 1 ? "" : "; ");
1176 static const struct seq_operations synth_events_seq_op = {
1177 .start = synth_events_seq_start,
1178 .next = synth_events_seq_next,
1179 .stop = synth_events_seq_stop,
1180 .show = synth_events_seq_show
1183 static int synth_events_open(struct inode *inode, struct file *file)
1187 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
1188 ret = release_all_synth_events();
1193 return seq_open(file, &synth_events_seq_op);
1196 static ssize_t synth_events_write(struct file *file,
1197 const char __user *buffer,
1198 size_t count, loff_t *ppos)
1200 return trace_parse_run_command(file, buffer, count, ppos,
1201 create_synth_event);
1204 static const struct file_operations synth_events_fops = {
1205 .open = synth_events_open,
1206 .write = synth_events_write,
1208 .llseek = seq_lseek,
1209 .release = seq_release,
1212 static u64 hist_field_timestamp(struct hist_field *hist_field,
1213 struct tracing_map_elt *elt,
1214 struct ring_buffer_event *rbe,
1217 struct hist_trigger_data *hist_data = hist_field->hist_data;
1218 struct trace_array *tr = hist_data->event_file->tr;
1220 u64 ts = ring_buffer_event_time_stamp(rbe);
1222 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
1228 static u64 hist_field_cpu(struct hist_field *hist_field,
1229 struct tracing_map_elt *elt,
1230 struct ring_buffer_event *rbe,
1233 int cpu = smp_processor_id();
1238 static struct hist_field *
1239 check_field_for_var_ref(struct hist_field *hist_field,
1240 struct hist_trigger_data *var_data,
1241 unsigned int var_idx)
1243 struct hist_field *found = NULL;
1245 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR_REF) {
1246 if (hist_field->var.idx == var_idx &&
1247 hist_field->var.hist_data == var_data) {
1255 static struct hist_field *
1256 check_field_for_var_refs(struct hist_trigger_data *hist_data,
1257 struct hist_field *hist_field,
1258 struct hist_trigger_data *var_data,
1259 unsigned int var_idx,
1262 struct hist_field *found = NULL;
1271 found = check_field_for_var_ref(hist_field, var_data, var_idx);
1275 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1276 struct hist_field *operand;
1278 operand = hist_field->operands[i];
1279 found = check_field_for_var_refs(hist_data, operand, var_data,
1280 var_idx, level + 1);
1288 static struct hist_field *find_var_ref(struct hist_trigger_data *hist_data,
1289 struct hist_trigger_data *var_data,
1290 unsigned int var_idx)
1292 struct hist_field *hist_field, *found = NULL;
1295 for_each_hist_field(i, hist_data) {
1296 hist_field = hist_data->fields[i];
1297 found = check_field_for_var_refs(hist_data, hist_field,
1298 var_data, var_idx, 0);
1303 for (i = 0; i < hist_data->n_synth_var_refs; i++) {
1304 hist_field = hist_data->synth_var_refs[i];
1305 found = check_field_for_var_refs(hist_data, hist_field,
1306 var_data, var_idx, 0);
1314 static struct hist_field *find_any_var_ref(struct hist_trigger_data *hist_data,
1315 unsigned int var_idx)
1317 struct trace_array *tr = hist_data->event_file->tr;
1318 struct hist_field *found = NULL;
1319 struct hist_var_data *var_data;
1321 list_for_each_entry(var_data, &tr->hist_vars, list) {
1322 if (var_data->hist_data == hist_data)
1324 found = find_var_ref(var_data->hist_data, hist_data, var_idx);
1332 static bool check_var_refs(struct hist_trigger_data *hist_data)
1334 struct hist_field *field;
1338 for_each_hist_field(i, hist_data) {
1339 field = hist_data->fields[i];
1340 if (field && field->flags & HIST_FIELD_FL_VAR) {
1341 if (find_any_var_ref(hist_data, field->var.idx)) {
1351 static struct hist_var_data *find_hist_vars(struct hist_trigger_data *hist_data)
1353 struct trace_array *tr = hist_data->event_file->tr;
1354 struct hist_var_data *var_data, *found = NULL;
1356 list_for_each_entry(var_data, &tr->hist_vars, list) {
1357 if (var_data->hist_data == hist_data) {
1366 static bool field_has_hist_vars(struct hist_field *hist_field,
1377 if (hist_field->flags & HIST_FIELD_FL_VAR ||
1378 hist_field->flags & HIST_FIELD_FL_VAR_REF)
1381 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++) {
1382 struct hist_field *operand;
1384 operand = hist_field->operands[i];
1385 if (field_has_hist_vars(operand, level + 1))
1392 static bool has_hist_vars(struct hist_trigger_data *hist_data)
1394 struct hist_field *hist_field;
1397 for_each_hist_field(i, hist_data) {
1398 hist_field = hist_data->fields[i];
1399 if (field_has_hist_vars(hist_field, 0))
1406 static int save_hist_vars(struct hist_trigger_data *hist_data)
1408 struct trace_array *tr = hist_data->event_file->tr;
1409 struct hist_var_data *var_data;
1411 var_data = find_hist_vars(hist_data);
1415 if (trace_array_get(tr) < 0)
1418 var_data = kzalloc(sizeof(*var_data), GFP_KERNEL);
1420 trace_array_put(tr);
1424 var_data->hist_data = hist_data;
1425 list_add(&var_data->list, &tr->hist_vars);
1430 static void remove_hist_vars(struct hist_trigger_data *hist_data)
1432 struct trace_array *tr = hist_data->event_file->tr;
1433 struct hist_var_data *var_data;
1435 var_data = find_hist_vars(hist_data);
1439 if (WARN_ON(check_var_refs(hist_data)))
1442 list_del(&var_data->list);
1446 trace_array_put(tr);
1449 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
1450 const char *var_name)
1452 struct hist_field *hist_field, *found = NULL;
1455 for_each_hist_field(i, hist_data) {
1456 hist_field = hist_data->fields[i];
1457 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
1458 strcmp(hist_field->var.name, var_name) == 0) {
1467 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
1468 struct trace_event_file *file,
1469 const char *var_name)
1471 struct hist_trigger_data *test_data;
1472 struct event_trigger_data *test;
1473 struct hist_field *hist_field;
1475 hist_field = find_var_field(hist_data, var_name);
1479 list_for_each_entry_rcu(test, &file->triggers, list) {
1480 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1481 test_data = test->private_data;
1482 hist_field = find_var_field(test_data, var_name);
1491 static struct trace_event_file *find_var_file(struct trace_array *tr,
1496 struct hist_trigger_data *var_hist_data;
1497 struct hist_var_data *var_data;
1498 struct trace_event_file *file, *found = NULL;
1501 return find_event_file(tr, system, event_name);
1503 list_for_each_entry(var_data, &tr->hist_vars, list) {
1504 var_hist_data = var_data->hist_data;
1505 file = var_hist_data->event_file;
1509 if (find_var_field(var_hist_data, var_name)) {
1511 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name);
1522 static struct hist_field *find_file_var(struct trace_event_file *file,
1523 const char *var_name)
1525 struct hist_trigger_data *test_data;
1526 struct event_trigger_data *test;
1527 struct hist_field *hist_field;
1529 list_for_each_entry_rcu(test, &file->triggers, list) {
1530 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1531 test_data = test->private_data;
1532 hist_field = find_var_field(test_data, var_name);
1541 static struct hist_field *
1542 find_match_var(struct hist_trigger_data *hist_data, char *var_name)
1544 struct trace_array *tr = hist_data->event_file->tr;
1545 struct hist_field *hist_field, *found = NULL;
1546 struct trace_event_file *file;
1549 for (i = 0; i < hist_data->n_actions; i++) {
1550 struct action_data *data = hist_data->actions[i];
1552 if (data->fn == action_trace) {
1553 char *system = data->onmatch.match_event_system;
1554 char *event_name = data->onmatch.match_event;
1556 file = find_var_file(tr, system, event_name, var_name);
1559 hist_field = find_file_var(file, var_name);
1562 hist_err_event("Variable name not unique, need to use fully qualified name (subsys.event.var) for variable: ", system, event_name, var_name);
1563 return ERR_PTR(-EINVAL);
1573 static struct hist_field *find_event_var(struct hist_trigger_data *hist_data,
1578 struct trace_array *tr = hist_data->event_file->tr;
1579 struct hist_field *hist_field = NULL;
1580 struct trace_event_file *file;
1582 if (!system || !event_name) {
1583 hist_field = find_match_var(hist_data, var_name);
1584 if (IS_ERR(hist_field))
1590 file = find_var_file(tr, system, event_name, var_name);
1594 hist_field = find_file_var(file, var_name);
1599 struct hist_elt_data {
1602 char *field_var_str[SYNTH_FIELDS_MAX];
1605 static u64 hist_field_var_ref(struct hist_field *hist_field,
1606 struct tracing_map_elt *elt,
1607 struct ring_buffer_event *rbe,
1610 struct hist_elt_data *elt_data;
1613 elt_data = elt->private_data;
1614 var_val = elt_data->var_ref_vals[hist_field->var_ref_idx];
1619 static bool resolve_var_refs(struct hist_trigger_data *hist_data, void *key,
1620 u64 *var_ref_vals, bool self)
1622 struct hist_trigger_data *var_data;
1623 struct tracing_map_elt *var_elt;
1624 struct hist_field *hist_field;
1625 unsigned int i, var_idx;
1626 bool resolved = true;
1629 for (i = 0; i < hist_data->n_var_refs; i++) {
1630 hist_field = hist_data->var_refs[i];
1631 var_idx = hist_field->var.idx;
1632 var_data = hist_field->var.hist_data;
1634 if (var_data == NULL) {
1639 if ((self && var_data != hist_data) ||
1640 (!self && var_data == hist_data))
1643 var_elt = tracing_map_lookup(var_data->map, key);
1649 if (!tracing_map_var_set(var_elt, var_idx)) {
1654 if (self || !hist_field->read_once)
1655 var_val = tracing_map_read_var(var_elt, var_idx);
1657 var_val = tracing_map_read_var_once(var_elt, var_idx);
1659 var_ref_vals[i] = var_val;
1665 static const char *hist_field_name(struct hist_field *field,
1668 const char *field_name = "";
1674 field_name = field->field->name;
1675 else if (field->flags & HIST_FIELD_FL_LOG2 ||
1676 field->flags & HIST_FIELD_FL_ALIAS)
1677 field_name = hist_field_name(field->operands[0], ++level);
1678 else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
1679 field_name = "common_timestamp";
1680 else if (field->flags & HIST_FIELD_FL_CPU)
1682 else if (field->flags & HIST_FIELD_FL_EXPR ||
1683 field->flags & HIST_FIELD_FL_VAR_REF) {
1684 if (field->system) {
1685 static char full_name[MAX_FILTER_STR_VAL];
1687 strcat(full_name, field->system);
1688 strcat(full_name, ".");
1689 strcat(full_name, field->event_name);
1690 strcat(full_name, ".");
1691 strcat(full_name, field->name);
1692 field_name = full_name;
1694 field_name = field->name;
1697 if (field_name == NULL)
1703 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
1705 hist_field_fn_t fn = NULL;
1707 switch (field_size) {
1709 if (field_is_signed)
1710 fn = hist_field_s64;
1712 fn = hist_field_u64;
1715 if (field_is_signed)
1716 fn = hist_field_s32;
1718 fn = hist_field_u32;
1721 if (field_is_signed)
1722 fn = hist_field_s16;
1724 fn = hist_field_u16;
1727 if (field_is_signed)
1737 static int parse_map_size(char *str)
1739 unsigned long size, map_bits;
1748 ret = kstrtoul(str, 0, &size);
1752 map_bits = ilog2(roundup_pow_of_two(size));
1753 if (map_bits < TRACING_MAP_BITS_MIN ||
1754 map_bits > TRACING_MAP_BITS_MAX)
1762 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
1769 for (i = 0; i < attrs->n_assignments; i++)
1770 kfree(attrs->assignment_str[i]);
1772 for (i = 0; i < attrs->n_actions; i++)
1773 kfree(attrs->action_str[i]);
1776 kfree(attrs->sort_key_str);
1777 kfree(attrs->keys_str);
1778 kfree(attrs->vals_str);
1782 static int parse_action(char *str, struct hist_trigger_attrs *attrs)
1786 if (attrs->n_actions >= HIST_ACTIONS_MAX)
1789 if ((strncmp(str, "onmatch(", strlen("onmatch(")) == 0) ||
1790 (strncmp(str, "onmax(", strlen("onmax(")) == 0)) {
1791 attrs->action_str[attrs->n_actions] = kstrdup(str, GFP_KERNEL);
1792 if (!attrs->action_str[attrs->n_actions]) {
1803 static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
1807 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
1808 (strncmp(str, "keys=", strlen("keys=")) == 0)) {
1809 attrs->keys_str = kstrdup(str, GFP_KERNEL);
1810 if (!attrs->keys_str) {
1814 } else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
1815 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
1816 (strncmp(str, "values=", strlen("values=")) == 0)) {
1817 attrs->vals_str = kstrdup(str, GFP_KERNEL);
1818 if (!attrs->vals_str) {
1822 } else if (strncmp(str, "sort=", strlen("sort=")) == 0) {
1823 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
1824 if (!attrs->sort_key_str) {
1828 } else if (strncmp(str, "name=", strlen("name=")) == 0) {
1829 attrs->name = kstrdup(str, GFP_KERNEL);
1834 } else if (strncmp(str, "size=", strlen("size=")) == 0) {
1835 int map_bits = parse_map_size(str);
1841 attrs->map_bits = map_bits;
1845 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
1846 hist_err("Too many variables defined: ", str);
1851 assignment = kstrdup(str, GFP_KERNEL);
1857 attrs->assignment_str[attrs->n_assignments++] = assignment;
1863 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
1865 struct hist_trigger_attrs *attrs;
1868 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
1870 return ERR_PTR(-ENOMEM);
1872 while (trigger_str) {
1873 char *str = strsep(&trigger_str, ":");
1875 if (strchr(str, '=')) {
1876 ret = parse_assignment(str, attrs);
1879 } else if (strcmp(str, "pause") == 0)
1880 attrs->pause = true;
1881 else if ((strcmp(str, "cont") == 0) ||
1882 (strcmp(str, "continue") == 0))
1884 else if (strcmp(str, "clear") == 0)
1885 attrs->clear = true;
1887 ret = parse_action(str, attrs);
1893 if (!attrs->keys_str) {
1900 destroy_hist_trigger_attrs(attrs);
1902 return ERR_PTR(ret);
1905 static inline void save_comm(char *comm, struct task_struct *task)
1908 strcpy(comm, "<idle>");
1912 if (WARN_ON_ONCE(task->pid < 0)) {
1913 strcpy(comm, "<XXX>");
1917 memcpy(comm, task->comm, TASK_COMM_LEN);
1920 static void hist_elt_data_free(struct hist_elt_data *elt_data)
1924 for (i = 0; i < SYNTH_FIELDS_MAX; i++)
1925 kfree(elt_data->field_var_str[i]);
1927 kfree(elt_data->comm);
1931 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
1933 struct hist_elt_data *elt_data = elt->private_data;
1935 hist_elt_data_free(elt_data);
1938 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
1940 struct hist_trigger_data *hist_data = elt->map->private_data;
1941 unsigned int size = TASK_COMM_LEN;
1942 struct hist_elt_data *elt_data;
1943 struct hist_field *key_field;
1944 unsigned int i, n_str;
1946 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
1950 for_each_hist_key_field(i, hist_data) {
1951 key_field = hist_data->fields[i];
1953 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
1954 elt_data->comm = kzalloc(size, GFP_KERNEL);
1955 if (!elt_data->comm) {
1963 n_str = hist_data->n_field_var_str + hist_data->n_max_var_str;
1965 size = STR_VAR_LEN_MAX;
1967 for (i = 0; i < n_str; i++) {
1968 elt_data->field_var_str[i] = kzalloc(size, GFP_KERNEL);
1969 if (!elt_data->field_var_str[i]) {
1970 hist_elt_data_free(elt_data);
1975 elt->private_data = elt_data;
1980 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
1982 struct hist_elt_data *elt_data = elt->private_data;
1985 save_comm(elt_data->comm, current);
1988 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
1989 .elt_alloc = hist_trigger_elt_data_alloc,
1990 .elt_free = hist_trigger_elt_data_free,
1991 .elt_init = hist_trigger_elt_data_init,
1994 static const char *get_hist_field_flags(struct hist_field *hist_field)
1996 const char *flags_str = NULL;
1998 if (hist_field->flags & HIST_FIELD_FL_HEX)
2000 else if (hist_field->flags & HIST_FIELD_FL_SYM)
2002 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
2003 flags_str = "sym-offset";
2004 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
2005 flags_str = "execname";
2006 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
2007 flags_str = "syscall";
2008 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
2010 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2011 flags_str = "usecs";
2016 static void expr_field_str(struct hist_field *field, char *expr)
2018 if (field->flags & HIST_FIELD_FL_VAR_REF)
2021 strcat(expr, hist_field_name(field, 0));
2024 const char *flags_str = get_hist_field_flags(field);
2028 strcat(expr, flags_str);
2033 static char *expr_str(struct hist_field *field, unsigned int level)
2040 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2044 if (!field->operands[0]) {
2045 expr_field_str(field, expr);
2049 if (field->operator == FIELD_OP_UNARY_MINUS) {
2053 subexpr = expr_str(field->operands[0], ++level);
2058 strcat(expr, subexpr);
2066 expr_field_str(field->operands[0], expr);
2068 switch (field->operator) {
2069 case FIELD_OP_MINUS:
2080 expr_field_str(field->operands[1], expr);
2085 static int contains_operator(char *str)
2087 enum field_op_id field_op = FIELD_OP_NONE;
2090 op = strpbrk(str, "+-");
2092 return FIELD_OP_NONE;
2097 field_op = FIELD_OP_UNARY_MINUS;
2099 field_op = FIELD_OP_MINUS;
2102 field_op = FIELD_OP_PLUS;
2111 static void destroy_hist_field(struct hist_field *hist_field,
2122 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
2123 destroy_hist_field(hist_field->operands[i], level + 1);
2125 kfree(hist_field->var.name);
2126 kfree(hist_field->name);
2127 kfree(hist_field->type);
2132 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
2133 struct ftrace_event_field *field,
2134 unsigned long flags,
2137 struct hist_field *hist_field;
2139 if (field && is_function_field(field))
2142 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
2146 hist_field->hist_data = hist_data;
2148 if (flags & HIST_FIELD_FL_EXPR || flags & HIST_FIELD_FL_ALIAS)
2149 goto out; /* caller will populate */
2151 if (flags & HIST_FIELD_FL_VAR_REF) {
2152 hist_field->fn = hist_field_var_ref;
2156 if (flags & HIST_FIELD_FL_HITCOUNT) {
2157 hist_field->fn = hist_field_counter;
2158 hist_field->size = sizeof(u64);
2159 hist_field->type = kstrdup("u64", GFP_KERNEL);
2160 if (!hist_field->type)
2165 if (flags & HIST_FIELD_FL_STACKTRACE) {
2166 hist_field->fn = hist_field_none;
2170 if (flags & HIST_FIELD_FL_LOG2) {
2171 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
2172 hist_field->fn = hist_field_log2;
2173 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
2174 hist_field->size = hist_field->operands[0]->size;
2175 hist_field->type = kstrdup(hist_field->operands[0]->type, GFP_KERNEL);
2176 if (!hist_field->type)
2181 if (flags & HIST_FIELD_FL_TIMESTAMP) {
2182 hist_field->fn = hist_field_timestamp;
2183 hist_field->size = sizeof(u64);
2184 hist_field->type = kstrdup("u64", GFP_KERNEL);
2185 if (!hist_field->type)
2190 if (flags & HIST_FIELD_FL_CPU) {
2191 hist_field->fn = hist_field_cpu;
2192 hist_field->size = sizeof(int);
2193 hist_field->type = kstrdup("unsigned int", GFP_KERNEL);
2194 if (!hist_field->type)
2199 if (WARN_ON_ONCE(!field))
2202 if (is_string_field(field)) {
2203 flags |= HIST_FIELD_FL_STRING;
2205 hist_field->size = MAX_FILTER_STR_VAL;
2206 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2207 if (!hist_field->type)
2210 if (field->filter_type == FILTER_STATIC_STRING)
2211 hist_field->fn = hist_field_string;
2212 else if (field->filter_type == FILTER_DYN_STRING)
2213 hist_field->fn = hist_field_dynstring;
2215 hist_field->fn = hist_field_pstring;
2217 hist_field->size = field->size;
2218 hist_field->is_signed = field->is_signed;
2219 hist_field->type = kstrdup(field->type, GFP_KERNEL);
2220 if (!hist_field->type)
2223 hist_field->fn = select_value_fn(field->size,
2225 if (!hist_field->fn) {
2226 destroy_hist_field(hist_field, 0);
2231 hist_field->field = field;
2232 hist_field->flags = flags;
2235 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
2236 if (!hist_field->var.name)
2242 destroy_hist_field(hist_field, 0);
2246 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
2250 for (i = 0; i < HIST_FIELDS_MAX; i++) {
2251 if (hist_data->fields[i]) {
2252 destroy_hist_field(hist_data->fields[i], 0);
2253 hist_data->fields[i] = NULL;
2258 static int init_var_ref(struct hist_field *ref_field,
2259 struct hist_field *var_field,
2260 char *system, char *event_name)
2264 ref_field->var.idx = var_field->var.idx;
2265 ref_field->var.hist_data = var_field->hist_data;
2266 ref_field->size = var_field->size;
2267 ref_field->is_signed = var_field->is_signed;
2268 ref_field->flags |= var_field->flags &
2269 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2272 ref_field->system = kstrdup(system, GFP_KERNEL);
2273 if (!ref_field->system)
2278 ref_field->event_name = kstrdup(event_name, GFP_KERNEL);
2279 if (!ref_field->event_name) {
2285 if (var_field->var.name) {
2286 ref_field->name = kstrdup(var_field->var.name, GFP_KERNEL);
2287 if (!ref_field->name) {
2291 } else if (var_field->name) {
2292 ref_field->name = kstrdup(var_field->name, GFP_KERNEL);
2293 if (!ref_field->name) {
2299 ref_field->type = kstrdup(var_field->type, GFP_KERNEL);
2300 if (!ref_field->type) {
2307 kfree(ref_field->system);
2308 kfree(ref_field->event_name);
2309 kfree(ref_field->name);
2314 static struct hist_field *create_var_ref(struct hist_field *var_field,
2315 char *system, char *event_name)
2317 unsigned long flags = HIST_FIELD_FL_VAR_REF;
2318 struct hist_field *ref_field;
2320 ref_field = create_hist_field(var_field->hist_data, NULL, flags, NULL);
2322 if (init_var_ref(ref_field, var_field, system, event_name)) {
2323 destroy_hist_field(ref_field, 0);
2331 static bool is_var_ref(char *var_name)
2333 if (!var_name || strlen(var_name) < 2 || var_name[0] != '$')
2339 static char *field_name_from_var(struct hist_trigger_data *hist_data,
2345 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
2346 name = hist_data->attrs->var_defs.name[i];
2348 if (strcmp(var_name, name) == 0) {
2349 field = hist_data->attrs->var_defs.expr[i];
2350 if (contains_operator(field) || is_var_ref(field))
2359 static char *local_field_var_ref(struct hist_trigger_data *hist_data,
2360 char *system, char *event_name,
2363 struct trace_event_call *call;
2365 if (system && event_name) {
2366 call = hist_data->event_file->event_call;
2368 if (strcmp(system, call->class->system) != 0)
2371 if (strcmp(event_name, trace_event_name(call)) != 0)
2375 if (!!system != !!event_name)
2378 if (!is_var_ref(var_name))
2383 return field_name_from_var(hist_data, var_name);
2386 static struct hist_field *parse_var_ref(struct hist_trigger_data *hist_data,
2387 char *system, char *event_name,
2390 struct hist_field *var_field = NULL, *ref_field = NULL;
2392 if (!is_var_ref(var_name))
2397 var_field = find_event_var(hist_data, system, event_name, var_name);
2399 ref_field = create_var_ref(var_field, system, event_name);
2402 hist_err_event("Couldn't find variable: $",
2403 system, event_name, var_name);
2408 static struct ftrace_event_field *
2409 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
2410 char *field_str, unsigned long *flags)
2412 struct ftrace_event_field *field = NULL;
2413 char *field_name, *modifier, *str;
2415 modifier = str = kstrdup(field_str, GFP_KERNEL);
2417 return ERR_PTR(-ENOMEM);
2419 field_name = strsep(&modifier, ".");
2421 if (strcmp(modifier, "hex") == 0)
2422 *flags |= HIST_FIELD_FL_HEX;
2423 else if (strcmp(modifier, "sym") == 0)
2424 *flags |= HIST_FIELD_FL_SYM;
2425 else if (strcmp(modifier, "sym-offset") == 0)
2426 *flags |= HIST_FIELD_FL_SYM_OFFSET;
2427 else if ((strcmp(modifier, "execname") == 0) &&
2428 (strcmp(field_name, "common_pid") == 0))
2429 *flags |= HIST_FIELD_FL_EXECNAME;
2430 else if (strcmp(modifier, "syscall") == 0)
2431 *flags |= HIST_FIELD_FL_SYSCALL;
2432 else if (strcmp(modifier, "log2") == 0)
2433 *flags |= HIST_FIELD_FL_LOG2;
2434 else if (strcmp(modifier, "usecs") == 0)
2435 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
2437 field = ERR_PTR(-EINVAL);
2442 if (strcmp(field_name, "common_timestamp") == 0) {
2443 *flags |= HIST_FIELD_FL_TIMESTAMP;
2444 hist_data->enable_timestamps = true;
2445 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
2446 hist_data->attrs->ts_in_usecs = true;
2447 } else if (strcmp(field_name, "cpu") == 0)
2448 *flags |= HIST_FIELD_FL_CPU;
2450 field = trace_find_event_field(file->event_call, field_name);
2451 if (!field || !field->size) {
2452 field = ERR_PTR(-EINVAL);
2462 static struct hist_field *create_alias(struct hist_trigger_data *hist_data,
2463 struct hist_field *var_ref,
2466 struct hist_field *alias = NULL;
2467 unsigned long flags = HIST_FIELD_FL_ALIAS | HIST_FIELD_FL_VAR;
2469 alias = create_hist_field(hist_data, NULL, flags, var_name);
2473 alias->fn = var_ref->fn;
2474 alias->operands[0] = var_ref;
2476 if (init_var_ref(alias, var_ref, var_ref->system, var_ref->event_name)) {
2477 destroy_hist_field(alias, 0);
2484 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
2485 struct trace_event_file *file, char *str,
2486 unsigned long *flags, char *var_name)
2488 char *s, *ref_system = NULL, *ref_event = NULL, *ref_var = str;
2489 struct ftrace_event_field *field = NULL;
2490 struct hist_field *hist_field = NULL;
2493 s = strchr(str, '.');
2495 s = strchr(++s, '.');
2497 ref_system = strsep(&str, ".");
2502 ref_event = strsep(&str, ".");
2511 s = local_field_var_ref(hist_data, ref_system, ref_event, ref_var);
2513 hist_field = parse_var_ref(hist_data, ref_system, ref_event, ref_var);
2515 hist_data->var_refs[hist_data->n_var_refs] = hist_field;
2516 hist_field->var_ref_idx = hist_data->n_var_refs++;
2518 hist_field = create_alias(hist_data, hist_field, var_name);
2529 field = parse_field(hist_data, file, str, flags);
2530 if (IS_ERR(field)) {
2531 ret = PTR_ERR(field);
2535 hist_field = create_hist_field(hist_data, field, *flags, var_name);
2543 return ERR_PTR(ret);
2546 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2547 struct trace_event_file *file,
2548 char *str, unsigned long flags,
2549 char *var_name, unsigned int level);
2551 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
2552 struct trace_event_file *file,
2553 char *str, unsigned long flags,
2554 char *var_name, unsigned int level)
2556 struct hist_field *operand1, *expr = NULL;
2557 unsigned long operand_flags;
2561 /* we support only -(xxx) i.e. explicit parens required */
2564 hist_err("Too many subexpressions (3 max): ", str);
2569 str++; /* skip leading '-' */
2571 s = strchr(str, '(');
2579 s = strrchr(str, ')');
2583 ret = -EINVAL; /* no closing ')' */
2587 flags |= HIST_FIELD_FL_EXPR;
2588 expr = create_hist_field(hist_data, NULL, flags, var_name);
2595 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2596 if (IS_ERR(operand1)) {
2597 ret = PTR_ERR(operand1);
2601 expr->flags |= operand1->flags &
2602 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2603 expr->fn = hist_field_unary_minus;
2604 expr->operands[0] = operand1;
2605 expr->operator = FIELD_OP_UNARY_MINUS;
2606 expr->name = expr_str(expr, 0);
2607 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2615 destroy_hist_field(expr, 0);
2616 return ERR_PTR(ret);
2619 static int check_expr_operands(struct hist_field *operand1,
2620 struct hist_field *operand2)
2622 unsigned long operand1_flags = operand1->flags;
2623 unsigned long operand2_flags = operand2->flags;
2625 if ((operand1_flags & HIST_FIELD_FL_VAR_REF) ||
2626 (operand1_flags & HIST_FIELD_FL_ALIAS)) {
2627 struct hist_field *var;
2629 var = find_var_field(operand1->var.hist_data, operand1->name);
2632 operand1_flags = var->flags;
2635 if ((operand2_flags & HIST_FIELD_FL_VAR_REF) ||
2636 (operand2_flags & HIST_FIELD_FL_ALIAS)) {
2637 struct hist_field *var;
2639 var = find_var_field(operand2->var.hist_data, operand2->name);
2642 operand2_flags = var->flags;
2645 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
2646 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS)) {
2647 hist_err("Timestamp units in expression don't match", NULL);
2654 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
2655 struct trace_event_file *file,
2656 char *str, unsigned long flags,
2657 char *var_name, unsigned int level)
2659 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
2660 unsigned long operand_flags;
2661 int field_op, ret = -EINVAL;
2662 char *sep, *operand1_str;
2665 hist_err("Too many subexpressions (3 max): ", str);
2666 return ERR_PTR(-EINVAL);
2669 field_op = contains_operator(str);
2671 if (field_op == FIELD_OP_NONE)
2672 return parse_atom(hist_data, file, str, &flags, var_name);
2674 if (field_op == FIELD_OP_UNARY_MINUS)
2675 return parse_unary(hist_data, file, str, flags, var_name, ++level);
2678 case FIELD_OP_MINUS:
2688 operand1_str = strsep(&str, sep);
2689 if (!operand1_str || !str)
2693 operand1 = parse_atom(hist_data, file, operand1_str,
2694 &operand_flags, NULL);
2695 if (IS_ERR(operand1)) {
2696 ret = PTR_ERR(operand1);
2701 /* rest of string could be another expression e.g. b+c in a+b+c */
2703 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
2704 if (IS_ERR(operand2)) {
2705 ret = PTR_ERR(operand2);
2710 ret = check_expr_operands(operand1, operand2);
2714 flags |= HIST_FIELD_FL_EXPR;
2716 flags |= operand1->flags &
2717 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
2719 expr = create_hist_field(hist_data, NULL, flags, var_name);
2725 operand1->read_once = true;
2726 operand2->read_once = true;
2728 expr->operands[0] = operand1;
2729 expr->operands[1] = operand2;
2730 expr->operator = field_op;
2731 expr->name = expr_str(expr, 0);
2732 expr->type = kstrdup(operand1->type, GFP_KERNEL);
2739 case FIELD_OP_MINUS:
2740 expr->fn = hist_field_minus;
2743 expr->fn = hist_field_plus;
2751 destroy_hist_field(operand1, 0);
2752 destroy_hist_field(operand2, 0);
2753 destroy_hist_field(expr, 0);
2755 return ERR_PTR(ret);
2758 static char *find_trigger_filter(struct hist_trigger_data *hist_data,
2759 struct trace_event_file *file)
2761 struct event_trigger_data *test;
2763 list_for_each_entry_rcu(test, &file->triggers, list) {
2764 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2765 if (test->private_data == hist_data)
2766 return test->filter_str;
2773 static struct event_command trigger_hist_cmd;
2774 static int event_hist_trigger_func(struct event_command *cmd_ops,
2775 struct trace_event_file *file,
2776 char *glob, char *cmd, char *param);
2778 static bool compatible_keys(struct hist_trigger_data *target_hist_data,
2779 struct hist_trigger_data *hist_data,
2780 unsigned int n_keys)
2782 struct hist_field *target_hist_field, *hist_field;
2783 unsigned int n, i, j;
2785 if (hist_data->n_fields - hist_data->n_vals != n_keys)
2788 i = hist_data->n_vals;
2789 j = target_hist_data->n_vals;
2791 for (n = 0; n < n_keys; n++) {
2792 hist_field = hist_data->fields[i + n];
2793 target_hist_field = target_hist_data->fields[j + n];
2795 if (strcmp(hist_field->type, target_hist_field->type) != 0)
2797 if (hist_field->size != target_hist_field->size)
2799 if (hist_field->is_signed != target_hist_field->is_signed)
2806 static struct hist_trigger_data *
2807 find_compatible_hist(struct hist_trigger_data *target_hist_data,
2808 struct trace_event_file *file)
2810 struct hist_trigger_data *hist_data;
2811 struct event_trigger_data *test;
2812 unsigned int n_keys;
2814 n_keys = target_hist_data->n_fields - target_hist_data->n_vals;
2816 list_for_each_entry_rcu(test, &file->triggers, list) {
2817 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2818 hist_data = test->private_data;
2820 if (compatible_keys(target_hist_data, hist_data, n_keys))
2828 static struct trace_event_file *event_file(struct trace_array *tr,
2829 char *system, char *event_name)
2831 struct trace_event_file *file;
2833 file = find_event_file(tr, system, event_name);
2835 return ERR_PTR(-EINVAL);
2840 static struct hist_field *
2841 find_synthetic_field_var(struct hist_trigger_data *target_hist_data,
2842 char *system, char *event_name, char *field_name)
2844 struct hist_field *event_var;
2845 char *synthetic_name;
2847 synthetic_name = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2848 if (!synthetic_name)
2849 return ERR_PTR(-ENOMEM);
2851 strcpy(synthetic_name, "synthetic_");
2852 strcat(synthetic_name, field_name);
2854 event_var = find_event_var(target_hist_data, system, event_name, synthetic_name);
2856 kfree(synthetic_name);
2862 * create_field_var_hist - Automatically create a histogram and var for a field
2863 * @target_hist_data: The target hist trigger
2864 * @subsys_name: Optional subsystem name
2865 * @event_name: Optional event name
2866 * @field_name: The name of the field (and the resulting variable)
2868 * Hist trigger actions fetch data from variables, not directly from
2869 * events. However, for convenience, users are allowed to directly
2870 * specify an event field in an action, which will be automatically
2871 * converted into a variable on their behalf.
2873 * If a user specifies a field on an event that isn't the event the
2874 * histogram currently being defined (the target event histogram), the
2875 * only way that can be accomplished is if a new hist trigger is
2876 * created and the field variable defined on that.
2878 * This function creates a new histogram compatible with the target
2879 * event (meaning a histogram with the same key as the target
2880 * histogram), and creates a variable for the specified field, but
2881 * with 'synthetic_' prepended to the variable name in order to avoid
2882 * collision with normal field variables.
2884 * Return: The variable created for the field.
2886 static struct hist_field *
2887 create_field_var_hist(struct hist_trigger_data *target_hist_data,
2888 char *subsys_name, char *event_name, char *field_name)
2890 struct trace_array *tr = target_hist_data->event_file->tr;
2891 struct hist_field *event_var = ERR_PTR(-EINVAL);
2892 struct hist_trigger_data *hist_data;
2893 unsigned int i, n, first = true;
2894 struct field_var_hist *var_hist;
2895 struct trace_event_file *file;
2896 struct hist_field *key_field;
2901 if (target_hist_data->n_field_var_hists >= SYNTH_FIELDS_MAX) {
2902 hist_err_event("onmatch: Too many field variables defined: ",
2903 subsys_name, event_name, field_name);
2904 return ERR_PTR(-EINVAL);
2907 file = event_file(tr, subsys_name, event_name);
2910 hist_err_event("onmatch: Event file not found: ",
2911 subsys_name, event_name, field_name);
2912 ret = PTR_ERR(file);
2913 return ERR_PTR(ret);
2917 * Look for a histogram compatible with target. We'll use the
2918 * found histogram specification to create a new matching
2919 * histogram with our variable on it. target_hist_data is not
2920 * yet a registered histogram so we can't use that.
2922 hist_data = find_compatible_hist(target_hist_data, file);
2924 hist_err_event("onmatch: Matching event histogram not found: ",
2925 subsys_name, event_name, field_name);
2926 return ERR_PTR(-EINVAL);
2929 /* See if a synthetic field variable has already been created */
2930 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2931 event_name, field_name);
2932 if (!IS_ERR_OR_NULL(event_var))
2935 var_hist = kzalloc(sizeof(*var_hist), GFP_KERNEL);
2937 return ERR_PTR(-ENOMEM);
2939 cmd = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
2942 return ERR_PTR(-ENOMEM);
2945 /* Use the same keys as the compatible histogram */
2946 strcat(cmd, "keys=");
2948 for_each_hist_key_field(i, hist_data) {
2949 key_field = hist_data->fields[i];
2952 strcat(cmd, key_field->field->name);
2956 /* Create the synthetic field variable specification */
2957 strcat(cmd, ":synthetic_");
2958 strcat(cmd, field_name);
2960 strcat(cmd, field_name);
2962 /* Use the same filter as the compatible histogram */
2963 saved_filter = find_trigger_filter(hist_data, file);
2965 strcat(cmd, " if ");
2966 strcat(cmd, saved_filter);
2969 var_hist->cmd = kstrdup(cmd, GFP_KERNEL);
2970 if (!var_hist->cmd) {
2973 return ERR_PTR(-ENOMEM);
2976 /* Save the compatible histogram information */
2977 var_hist->hist_data = hist_data;
2979 /* Create the new histogram with our variable */
2980 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
2984 kfree(var_hist->cmd);
2986 hist_err_event("onmatch: Couldn't create histogram for field: ",
2987 subsys_name, event_name, field_name);
2988 return ERR_PTR(ret);
2993 /* If we can't find the variable, something went wrong */
2994 event_var = find_synthetic_field_var(target_hist_data, subsys_name,
2995 event_name, field_name);
2996 if (IS_ERR_OR_NULL(event_var)) {
2997 kfree(var_hist->cmd);
2999 hist_err_event("onmatch: Couldn't find synthetic variable: ",
3000 subsys_name, event_name, field_name);
3001 return ERR_PTR(-EINVAL);
3004 n = target_hist_data->n_field_var_hists;
3005 target_hist_data->field_var_hists[n] = var_hist;
3006 target_hist_data->n_field_var_hists++;
3011 static struct hist_field *
3012 find_target_event_var(struct hist_trigger_data *hist_data,
3013 char *subsys_name, char *event_name, char *var_name)
3015 struct trace_event_file *file = hist_data->event_file;
3016 struct hist_field *hist_field = NULL;
3019 struct trace_event_call *call;
3024 call = file->event_call;
3026 if (strcmp(subsys_name, call->class->system) != 0)
3029 if (strcmp(event_name, trace_event_name(call)) != 0)
3033 hist_field = find_var_field(hist_data, var_name);
3038 static inline void __update_field_vars(struct tracing_map_elt *elt,
3039 struct ring_buffer_event *rbe,
3041 struct field_var **field_vars,
3042 unsigned int n_field_vars,
3043 unsigned int field_var_str_start)
3045 struct hist_elt_data *elt_data = elt->private_data;
3046 unsigned int i, j, var_idx;
3049 for (i = 0, j = field_var_str_start; i < n_field_vars; i++) {
3050 struct field_var *field_var = field_vars[i];
3051 struct hist_field *var = field_var->var;
3052 struct hist_field *val = field_var->val;
3054 var_val = val->fn(val, elt, rbe, rec);
3055 var_idx = var->var.idx;
3057 if (val->flags & HIST_FIELD_FL_STRING) {
3058 char *str = elt_data->field_var_str[j++];
3059 char *val_str = (char *)(uintptr_t)var_val;
3061 strncpy(str, val_str, STR_VAR_LEN_MAX);
3062 var_val = (u64)(uintptr_t)str;
3064 tracing_map_set_var(elt, var_idx, var_val);
3068 static void update_field_vars(struct hist_trigger_data *hist_data,
3069 struct tracing_map_elt *elt,
3070 struct ring_buffer_event *rbe,
3073 __update_field_vars(elt, rbe, rec, hist_data->field_vars,
3074 hist_data->n_field_vars, 0);
3077 static void update_max_vars(struct hist_trigger_data *hist_data,
3078 struct tracing_map_elt *elt,
3079 struct ring_buffer_event *rbe,
3082 __update_field_vars(elt, rbe, rec, hist_data->max_vars,
3083 hist_data->n_max_vars, hist_data->n_field_var_str);
3086 static struct hist_field *create_var(struct hist_trigger_data *hist_data,
3087 struct trace_event_file *file,
3088 char *name, int size, const char *type)
3090 struct hist_field *var;
3093 if (find_var(hist_data, file, name) && !hist_data->remove) {
3094 var = ERR_PTR(-EINVAL);
3098 var = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
3100 var = ERR_PTR(-ENOMEM);
3104 idx = tracing_map_add_var(hist_data->map);
3107 var = ERR_PTR(-EINVAL);
3111 var->flags = HIST_FIELD_FL_VAR;
3113 var->var.hist_data = var->hist_data = hist_data;
3115 var->var.name = kstrdup(name, GFP_KERNEL);
3116 var->type = kstrdup(type, GFP_KERNEL);
3117 if (!var->var.name || !var->type) {
3118 kfree(var->var.name);
3121 var = ERR_PTR(-ENOMEM);
3127 static struct field_var *create_field_var(struct hist_trigger_data *hist_data,
3128 struct trace_event_file *file,
3131 struct hist_field *val = NULL, *var = NULL;
3132 unsigned long flags = HIST_FIELD_FL_VAR;
3133 struct field_var *field_var;
3136 if (hist_data->n_field_vars >= SYNTH_FIELDS_MAX) {
3137 hist_err("Too many field variables defined: ", field_name);
3142 val = parse_atom(hist_data, file, field_name, &flags, NULL);
3144 hist_err("Couldn't parse field variable: ", field_name);
3149 var = create_var(hist_data, file, field_name, val->size, val->type);
3151 hist_err("Couldn't create or find variable: ", field_name);
3157 field_var = kzalloc(sizeof(struct field_var), GFP_KERNEL);
3165 field_var->var = var;
3166 field_var->val = val;
3170 field_var = ERR_PTR(ret);
3175 * create_target_field_var - Automatically create a variable for a field
3176 * @target_hist_data: The target hist trigger
3177 * @subsys_name: Optional subsystem name
3178 * @event_name: Optional event name
3179 * @var_name: The name of the field (and the resulting variable)
3181 * Hist trigger actions fetch data from variables, not directly from
3182 * events. However, for convenience, users are allowed to directly
3183 * specify an event field in an action, which will be automatically
3184 * converted into a variable on their behalf.
3186 * This function creates a field variable with the name var_name on
3187 * the hist trigger currently being defined on the target event. If
3188 * subsys_name and event_name are specified, this function simply
3189 * verifies that they do in fact match the target event subsystem and
3192 * Return: The variable created for the field.
3194 static struct field_var *
3195 create_target_field_var(struct hist_trigger_data *target_hist_data,
3196 char *subsys_name, char *event_name, char *var_name)
3198 struct trace_event_file *file = target_hist_data->event_file;
3201 struct trace_event_call *call;
3206 call = file->event_call;
3208 if (strcmp(subsys_name, call->class->system) != 0)
3211 if (strcmp(event_name, trace_event_name(call)) != 0)
3215 return create_field_var(target_hist_data, file, var_name);
3218 static void onmax_print(struct seq_file *m,
3219 struct hist_trigger_data *hist_data,
3220 struct tracing_map_elt *elt,
3221 struct action_data *data)
3223 unsigned int i, save_var_idx, max_idx = data->onmax.max_var->var.idx;
3225 seq_printf(m, "\n\tmax: %10llu", tracing_map_read_var(elt, max_idx));
3227 for (i = 0; i < hist_data->n_max_vars; i++) {
3228 struct hist_field *save_val = hist_data->max_vars[i]->val;
3229 struct hist_field *save_var = hist_data->max_vars[i]->var;
3232 save_var_idx = save_var->var.idx;
3234 val = tracing_map_read_var(elt, save_var_idx);
3236 if (save_val->flags & HIST_FIELD_FL_STRING) {
3237 seq_printf(m, " %s: %-32s", save_var->var.name,
3238 (char *)(uintptr_t)(val));
3240 seq_printf(m, " %s: %10llu", save_var->var.name, val);
3244 static void onmax_save(struct hist_trigger_data *hist_data,
3245 struct tracing_map_elt *elt, void *rec,
3246 struct ring_buffer_event *rbe,
3247 struct action_data *data, u64 *var_ref_vals)
3249 unsigned int max_idx = data->onmax.max_var->var.idx;
3250 unsigned int max_var_ref_idx = data->onmax.max_var_ref_idx;
3252 u64 var_val, max_val;
3254 var_val = var_ref_vals[max_var_ref_idx];
3255 max_val = tracing_map_read_var(elt, max_idx);
3257 if (var_val <= max_val)
3260 tracing_map_set_var(elt, max_idx, var_val);
3262 update_max_vars(hist_data, elt, rbe, rec);
3265 static void onmax_destroy(struct action_data *data)
3269 destroy_hist_field(data->onmax.max_var, 0);
3270 destroy_hist_field(data->onmax.var, 0);
3272 kfree(data->onmax.var_str);
3273 kfree(data->onmax.fn_name);
3275 for (i = 0; i < data->n_params; i++)
3276 kfree(data->params[i]);
3281 static int onmax_create(struct hist_trigger_data *hist_data,
3282 struct action_data *data)
3284 struct trace_event_file *file = hist_data->event_file;
3285 struct hist_field *var_field, *ref_field, *max_var;
3286 unsigned int var_ref_idx = hist_data->n_var_refs;
3287 struct field_var *field_var;
3288 char *onmax_var_str, *param;
3289 unsigned long flags;
3293 onmax_var_str = data->onmax.var_str;
3294 if (onmax_var_str[0] != '$') {
3295 hist_err("onmax: For onmax(x), x must be a variable: ", onmax_var_str);
3300 var_field = find_target_event_var(hist_data, NULL, NULL, onmax_var_str);
3302 hist_err("onmax: Couldn't find onmax variable: ", onmax_var_str);
3306 flags = HIST_FIELD_FL_VAR_REF;
3307 ref_field = create_hist_field(hist_data, NULL, flags, NULL);
3311 if (init_var_ref(ref_field, var_field, NULL, NULL)) {
3312 destroy_hist_field(ref_field, 0);
3316 hist_data->var_refs[hist_data->n_var_refs] = ref_field;
3317 ref_field->var_ref_idx = hist_data->n_var_refs++;
3318 data->onmax.var = ref_field;
3320 data->fn = onmax_save;
3321 data->onmax.max_var_ref_idx = var_ref_idx;
3322 max_var = create_var(hist_data, file, "max", sizeof(u64), "u64");
3323 if (IS_ERR(max_var)) {
3324 hist_err("onmax: Couldn't create onmax variable: ", "max");
3325 ret = PTR_ERR(max_var);
3328 data->onmax.max_var = max_var;
3330 for (i = 0; i < data->n_params; i++) {
3331 param = kstrdup(data->params[i], GFP_KERNEL);
3337 field_var = create_target_field_var(hist_data, NULL, NULL, param);
3338 if (IS_ERR(field_var)) {
3339 hist_err("onmax: Couldn't create field variable: ", param);
3340 ret = PTR_ERR(field_var);
3345 hist_data->max_vars[hist_data->n_max_vars++] = field_var;
3346 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3347 hist_data->n_max_var_str++;
3355 static int parse_action_params(char *params, struct action_data *data)
3357 char *param, *saved_param;
3361 if (data->n_params >= SYNTH_FIELDS_MAX)
3364 param = strsep(¶ms, ",");
3370 param = strstrip(param);
3371 if (strlen(param) < 2) {
3372 hist_err("Invalid action param: ", param);
3377 saved_param = kstrdup(param, GFP_KERNEL);
3383 data->params[data->n_params++] = saved_param;
3389 static struct action_data *onmax_parse(char *str)
3391 char *onmax_fn_name, *onmax_var_str;
3392 struct action_data *data;
3395 data = kzalloc(sizeof(*data), GFP_KERNEL);
3397 return ERR_PTR(-ENOMEM);
3399 onmax_var_str = strsep(&str, ")");
3400 if (!onmax_var_str || !str) {
3405 data->onmax.var_str = kstrdup(onmax_var_str, GFP_KERNEL);
3406 if (!data->onmax.var_str) {
3415 onmax_fn_name = strsep(&str, "(");
3416 if (!onmax_fn_name || !str)
3419 if (strncmp(onmax_fn_name, "save", strlen("save")) == 0) {
3420 char *params = strsep(&str, ")");
3427 ret = parse_action_params(params, data);
3433 data->onmax.fn_name = kstrdup(onmax_fn_name, GFP_KERNEL);
3434 if (!data->onmax.fn_name) {
3441 onmax_destroy(data);
3442 data = ERR_PTR(ret);
3446 static void onmatch_destroy(struct action_data *data)
3450 mutex_lock(&synth_event_mutex);
3452 kfree(data->onmatch.match_event);
3453 kfree(data->onmatch.match_event_system);
3454 kfree(data->onmatch.synth_event_name);
3456 for (i = 0; i < data->n_params; i++)
3457 kfree(data->params[i]);
3459 if (data->onmatch.synth_event)
3460 data->onmatch.synth_event->ref--;
3464 mutex_unlock(&synth_event_mutex);
3467 static void destroy_field_var(struct field_var *field_var)
3472 destroy_hist_field(field_var->var, 0);
3473 destroy_hist_field(field_var->val, 0);
3478 static void destroy_field_vars(struct hist_trigger_data *hist_data)
3482 for (i = 0; i < hist_data->n_field_vars; i++)
3483 destroy_field_var(hist_data->field_vars[i]);
3486 static void save_field_var(struct hist_trigger_data *hist_data,
3487 struct field_var *field_var)
3489 hist_data->field_vars[hist_data->n_field_vars++] = field_var;
3491 if (field_var->val->flags & HIST_FIELD_FL_STRING)
3492 hist_data->n_field_var_str++;
3496 static void destroy_synth_var_refs(struct hist_trigger_data *hist_data)
3500 for (i = 0; i < hist_data->n_synth_var_refs; i++)
3501 destroy_hist_field(hist_data->synth_var_refs[i], 0);
3504 static void save_synth_var_ref(struct hist_trigger_data *hist_data,
3505 struct hist_field *var_ref)
3507 hist_data->synth_var_refs[hist_data->n_synth_var_refs++] = var_ref;
3509 hist_data->var_refs[hist_data->n_var_refs] = var_ref;
3510 var_ref->var_ref_idx = hist_data->n_var_refs++;
3513 static int check_synth_field(struct synth_event *event,
3514 struct hist_field *hist_field,
3515 unsigned int field_pos)
3517 struct synth_field *field;
3519 if (field_pos >= event->n_fields)
3522 field = event->fields[field_pos];
3524 if (strcmp(field->type, hist_field->type) != 0)
3530 static struct hist_field *
3531 onmatch_find_var(struct hist_trigger_data *hist_data, struct action_data *data,
3532 char *system, char *event, char *var)
3534 struct hist_field *hist_field;
3536 var++; /* skip '$' */
3538 hist_field = find_target_event_var(hist_data, system, event, var);
3541 system = data->onmatch.match_event_system;
3542 event = data->onmatch.match_event;
3545 hist_field = find_event_var(hist_data, system, event, var);
3549 hist_err_event("onmatch: Couldn't find onmatch param: $", system, event, var);
3554 static struct hist_field *
3555 onmatch_create_field_var(struct hist_trigger_data *hist_data,
3556 struct action_data *data, char *system,
3557 char *event, char *var)
3559 struct hist_field *hist_field = NULL;
3560 struct field_var *field_var;
3563 * First try to create a field var on the target event (the
3564 * currently being defined). This will create a variable for
3565 * unqualified fields on the target event, or if qualified,
3566 * target fields that have qualified names matching the target.
3568 field_var = create_target_field_var(hist_data, system, event, var);
3570 if (field_var && !IS_ERR(field_var)) {
3571 save_field_var(hist_data, field_var);
3572 hist_field = field_var->var;
3576 * If no explicit system.event is specfied, default to
3577 * looking for fields on the onmatch(system.event.xxx)
3581 system = data->onmatch.match_event_system;
3582 event = data->onmatch.match_event;
3586 * At this point, we're looking at a field on another
3587 * event. Because we can't modify a hist trigger on
3588 * another event to add a variable for a field, we need
3589 * to create a new trigger on that event and create the
3590 * variable at the same time.
3592 hist_field = create_field_var_hist(hist_data, system, event, var);
3593 if (IS_ERR(hist_field))
3599 destroy_field_var(field_var);
3604 static int onmatch_create(struct hist_trigger_data *hist_data,
3605 struct trace_event_file *file,
3606 struct action_data *data)
3608 char *event_name, *param, *system = NULL;
3609 struct hist_field *hist_field, *var_ref;
3610 unsigned int i, var_ref_idx;
3611 unsigned int field_pos = 0;
3612 struct synth_event *event;
3615 mutex_lock(&synth_event_mutex);
3616 event = find_synth_event(data->onmatch.synth_event_name);
3618 hist_err("onmatch: Couldn't find synthetic event: ", data->onmatch.synth_event_name);
3619 mutex_unlock(&synth_event_mutex);
3623 mutex_unlock(&synth_event_mutex);
3625 var_ref_idx = hist_data->n_var_refs;
3627 for (i = 0; i < data->n_params; i++) {
3630 p = param = kstrdup(data->params[i], GFP_KERNEL);
3636 system = strsep(¶m, ".");
3638 param = (char *)system;
3639 system = event_name = NULL;
3641 event_name = strsep(¶m, ".");
3649 if (param[0] == '$')
3650 hist_field = onmatch_find_var(hist_data, data, system,
3653 hist_field = onmatch_create_field_var(hist_data, data,
3664 if (check_synth_field(event, hist_field, field_pos) == 0) {
3665 var_ref = create_var_ref(hist_field, system, event_name);
3672 save_synth_var_ref(hist_data, var_ref);
3678 hist_err_event("onmatch: Param type doesn't match synthetic event field type: ",
3679 system, event_name, param);
3685 if (field_pos != event->n_fields) {
3686 hist_err("onmatch: Param count doesn't match synthetic event field count: ", event->name);
3691 data->fn = action_trace;
3692 data->onmatch.synth_event = event;
3693 data->onmatch.var_ref_idx = var_ref_idx;
3697 mutex_lock(&synth_event_mutex);
3699 mutex_unlock(&synth_event_mutex);
3704 static struct action_data *onmatch_parse(struct trace_array *tr, char *str)
3706 char *match_event, *match_event_system;
3707 char *synth_event_name, *params;
3708 struct action_data *data;
3711 data = kzalloc(sizeof(*data), GFP_KERNEL);
3713 return ERR_PTR(-ENOMEM);
3715 match_event = strsep(&str, ")");
3716 if (!match_event || !str) {
3717 hist_err("onmatch: Missing closing paren: ", match_event);
3721 match_event_system = strsep(&match_event, ".");
3723 hist_err("onmatch: Missing subsystem for match event: ", match_event_system);
3727 if (IS_ERR(event_file(tr, match_event_system, match_event))) {
3728 hist_err_event("onmatch: Invalid subsystem or event name: ",
3729 match_event_system, match_event, NULL);
3733 data->onmatch.match_event = kstrdup(match_event, GFP_KERNEL);
3734 if (!data->onmatch.match_event) {
3739 data->onmatch.match_event_system = kstrdup(match_event_system, GFP_KERNEL);
3740 if (!data->onmatch.match_event_system) {
3747 hist_err("onmatch: Missing . after onmatch(): ", str);
3751 synth_event_name = strsep(&str, "(");
3752 if (!synth_event_name || !str) {
3753 hist_err("onmatch: Missing opening paramlist paren: ", synth_event_name);
3757 data->onmatch.synth_event_name = kstrdup(synth_event_name, GFP_KERNEL);
3758 if (!data->onmatch.synth_event_name) {
3763 params = strsep(&str, ")");
3764 if (!params || !str || (str && strlen(str))) {
3765 hist_err("onmatch: Missing closing paramlist paren: ", params);
3769 ret = parse_action_params(params, data);
3775 onmatch_destroy(data);
3776 data = ERR_PTR(ret);
3780 static int create_hitcount_val(struct hist_trigger_data *hist_data)
3782 hist_data->fields[HITCOUNT_IDX] =
3783 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
3784 if (!hist_data->fields[HITCOUNT_IDX])
3787 hist_data->n_vals++;
3788 hist_data->n_fields++;
3790 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
3796 static int __create_val_field(struct hist_trigger_data *hist_data,
3797 unsigned int val_idx,
3798 struct trace_event_file *file,
3799 char *var_name, char *field_str,
3800 unsigned long flags)
3802 struct hist_field *hist_field;
3805 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
3806 if (IS_ERR(hist_field)) {
3807 ret = PTR_ERR(hist_field);
3811 hist_data->fields[val_idx] = hist_field;
3813 ++hist_data->n_vals;
3814 ++hist_data->n_fields;
3816 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3822 static int create_val_field(struct hist_trigger_data *hist_data,
3823 unsigned int val_idx,
3824 struct trace_event_file *file,
3827 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
3830 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
3833 static int create_var_field(struct hist_trigger_data *hist_data,
3834 unsigned int val_idx,
3835 struct trace_event_file *file,
3836 char *var_name, char *expr_str)
3838 unsigned long flags = 0;
3840 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
3843 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
3844 hist_err("Variable already defined: ", var_name);
3848 flags |= HIST_FIELD_FL_VAR;
3849 hist_data->n_vars++;
3850 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
3853 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
3856 static int create_val_fields(struct hist_trigger_data *hist_data,
3857 struct trace_event_file *file)
3859 char *fields_str, *field_str;
3860 unsigned int i, j = 1;
3863 ret = create_hitcount_val(hist_data);
3867 fields_str = hist_data->attrs->vals_str;
3871 strsep(&fields_str, "=");
3875 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
3876 j < TRACING_MAP_VALS_MAX; i++) {
3877 field_str = strsep(&fields_str, ",");
3881 if (strcmp(field_str, "hitcount") == 0)
3884 ret = create_val_field(hist_data, j++, file, field_str);
3889 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
3895 static int create_key_field(struct hist_trigger_data *hist_data,
3896 unsigned int key_idx,
3897 unsigned int key_offset,
3898 struct trace_event_file *file,
3901 struct hist_field *hist_field = NULL;
3903 unsigned long flags = 0;
3904 unsigned int key_size;
3907 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
3910 flags |= HIST_FIELD_FL_KEY;
3912 if (strcmp(field_str, "stacktrace") == 0) {
3913 flags |= HIST_FIELD_FL_STACKTRACE;
3914 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
3915 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
3917 hist_field = parse_expr(hist_data, file, field_str, flags,
3919 if (IS_ERR(hist_field)) {
3920 ret = PTR_ERR(hist_field);
3924 if (hist_field->flags & HIST_FIELD_FL_VAR_REF) {
3925 hist_err("Using variable references as keys not supported: ", field_str);
3926 destroy_hist_field(hist_field, 0);
3931 key_size = hist_field->size;
3934 hist_data->fields[key_idx] = hist_field;
3936 key_size = ALIGN(key_size, sizeof(u64));
3937 hist_data->fields[key_idx]->size = key_size;
3938 hist_data->fields[key_idx]->offset = key_offset;
3940 hist_data->key_size += key_size;
3942 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
3947 hist_data->n_keys++;
3948 hist_data->n_fields++;
3950 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
3958 static int create_key_fields(struct hist_trigger_data *hist_data,
3959 struct trace_event_file *file)
3961 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
3962 char *fields_str, *field_str;
3965 fields_str = hist_data->attrs->keys_str;
3969 strsep(&fields_str, "=");
3973 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
3974 field_str = strsep(&fields_str, ",");
3977 ret = create_key_field(hist_data, i, key_offset,
3992 static int create_var_fields(struct hist_trigger_data *hist_data,
3993 struct trace_event_file *file)
3995 unsigned int i, j = hist_data->n_vals;
3998 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
4000 for (i = 0; i < n_vars; i++) {
4001 char *var_name = hist_data->attrs->var_defs.name[i];
4002 char *expr = hist_data->attrs->var_defs.expr[i];
4004 ret = create_var_field(hist_data, j++, file, var_name, expr);
4012 static void free_var_defs(struct hist_trigger_data *hist_data)
4016 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
4017 kfree(hist_data->attrs->var_defs.name[i]);
4018 kfree(hist_data->attrs->var_defs.expr[i]);
4021 hist_data->attrs->var_defs.n_vars = 0;
4024 static int parse_var_defs(struct hist_trigger_data *hist_data)
4026 char *s, *str, *var_name, *field_str;
4027 unsigned int i, j, n_vars = 0;
4030 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
4031 str = hist_data->attrs->assignment_str[i];
4032 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
4033 field_str = strsep(&str, ",");
4037 var_name = strsep(&field_str, "=");
4038 if (!var_name || !field_str) {
4039 hist_err("Malformed assignment: ", var_name);
4044 if (n_vars == TRACING_MAP_VARS_MAX) {
4045 hist_err("Too many variables defined: ", var_name);
4050 s = kstrdup(var_name, GFP_KERNEL);
4055 hist_data->attrs->var_defs.name[n_vars] = s;
4057 s = kstrdup(field_str, GFP_KERNEL);
4059 kfree(hist_data->attrs->var_defs.name[n_vars]);
4063 hist_data->attrs->var_defs.expr[n_vars++] = s;
4065 hist_data->attrs->var_defs.n_vars = n_vars;
4071 free_var_defs(hist_data);
4076 static int create_hist_fields(struct hist_trigger_data *hist_data,
4077 struct trace_event_file *file)
4081 ret = parse_var_defs(hist_data);
4085 ret = create_val_fields(hist_data, file);
4089 ret = create_var_fields(hist_data, file);
4093 ret = create_key_fields(hist_data, file);
4097 free_var_defs(hist_data);
4102 static int is_descending(const char *str)
4107 if (strcmp(str, "descending") == 0)
4110 if (strcmp(str, "ascending") == 0)
4116 static int create_sort_keys(struct hist_trigger_data *hist_data)
4118 char *fields_str = hist_data->attrs->sort_key_str;
4119 struct tracing_map_sort_key *sort_key;
4120 int descending, ret = 0;
4121 unsigned int i, j, k;
4123 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
4128 strsep(&fields_str, "=");
4134 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
4135 struct hist_field *hist_field;
4136 char *field_str, *field_name;
4137 const char *test_name;
4139 sort_key = &hist_data->sort_keys[i];
4141 field_str = strsep(&fields_str, ",");
4148 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
4153 field_name = strsep(&field_str, ".");
4159 if (strcmp(field_name, "hitcount") == 0) {
4160 descending = is_descending(field_str);
4161 if (descending < 0) {
4165 sort_key->descending = descending;
4169 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
4172 hist_field = hist_data->fields[j];
4173 if (hist_field->flags & HIST_FIELD_FL_VAR)
4178 test_name = hist_field_name(hist_field, 0);
4180 if (strcmp(field_name, test_name) == 0) {
4181 sort_key->field_idx = idx;
4182 descending = is_descending(field_str);
4183 if (descending < 0) {
4187 sort_key->descending = descending;
4191 if (j == hist_data->n_fields) {
4197 hist_data->n_sort_keys = i;
4202 static void destroy_actions(struct hist_trigger_data *hist_data)
4206 for (i = 0; i < hist_data->n_actions; i++) {
4207 struct action_data *data = hist_data->actions[i];
4209 if (data->fn == action_trace)
4210 onmatch_destroy(data);
4211 else if (data->fn == onmax_save)
4212 onmax_destroy(data);
4218 static int parse_actions(struct hist_trigger_data *hist_data)
4220 struct trace_array *tr = hist_data->event_file->tr;
4221 struct action_data *data;
4226 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4227 str = hist_data->attrs->action_str[i];
4229 if (strncmp(str, "onmatch(", strlen("onmatch(")) == 0) {
4230 char *action_str = str + strlen("onmatch(");
4232 data = onmatch_parse(tr, action_str);
4234 ret = PTR_ERR(data);
4237 data->fn = action_trace;
4238 } else if (strncmp(str, "onmax(", strlen("onmax(")) == 0) {
4239 char *action_str = str + strlen("onmax(");
4241 data = onmax_parse(action_str);
4243 ret = PTR_ERR(data);
4246 data->fn = onmax_save;
4252 hist_data->actions[hist_data->n_actions++] = data;
4258 static int create_actions(struct hist_trigger_data *hist_data,
4259 struct trace_event_file *file)
4261 struct action_data *data;
4265 for (i = 0; i < hist_data->attrs->n_actions; i++) {
4266 data = hist_data->actions[i];
4268 if (data->fn == action_trace) {
4269 ret = onmatch_create(hist_data, file, data);
4272 } else if (data->fn == onmax_save) {
4273 ret = onmax_create(hist_data, data);
4282 static void print_actions(struct seq_file *m,
4283 struct hist_trigger_data *hist_data,
4284 struct tracing_map_elt *elt)
4288 for (i = 0; i < hist_data->n_actions; i++) {
4289 struct action_data *data = hist_data->actions[i];
4291 if (data->fn == onmax_save)
4292 onmax_print(m, hist_data, elt, data);
4296 static void print_onmax_spec(struct seq_file *m,
4297 struct hist_trigger_data *hist_data,
4298 struct action_data *data)
4302 seq_puts(m, ":onmax(");
4303 seq_printf(m, "%s", data->onmax.var_str);
4304 seq_printf(m, ").%s(", data->onmax.fn_name);
4306 for (i = 0; i < hist_data->n_max_vars; i++) {
4307 seq_printf(m, "%s", hist_data->max_vars[i]->var->var.name);
4308 if (i < hist_data->n_max_vars - 1)
4314 static void print_onmatch_spec(struct seq_file *m,
4315 struct hist_trigger_data *hist_data,
4316 struct action_data *data)
4320 seq_printf(m, ":onmatch(%s.%s).", data->onmatch.match_event_system,
4321 data->onmatch.match_event);
4323 seq_printf(m, "%s(", data->onmatch.synth_event->name);
4325 for (i = 0; i < data->n_params; i++) {
4328 seq_printf(m, "%s", data->params[i]);
4334 static void print_actions_spec(struct seq_file *m,
4335 struct hist_trigger_data *hist_data)
4339 for (i = 0; i < hist_data->n_actions; i++) {
4340 struct action_data *data = hist_data->actions[i];
4342 if (data->fn == action_trace)
4343 print_onmatch_spec(m, hist_data, data);
4344 else if (data->fn == onmax_save)
4345 print_onmax_spec(m, hist_data, data);
4349 static void destroy_field_var_hists(struct hist_trigger_data *hist_data)
4353 for (i = 0; i < hist_data->n_field_var_hists; i++) {
4354 kfree(hist_data->field_var_hists[i]->cmd);
4355 kfree(hist_data->field_var_hists[i]);
4359 static void destroy_hist_data(struct hist_trigger_data *hist_data)
4364 destroy_hist_trigger_attrs(hist_data->attrs);
4365 destroy_hist_fields(hist_data);
4366 tracing_map_destroy(hist_data->map);
4368 destroy_actions(hist_data);
4369 destroy_field_vars(hist_data);
4370 destroy_field_var_hists(hist_data);
4371 destroy_synth_var_refs(hist_data);
4376 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
4378 struct tracing_map *map = hist_data->map;
4379 struct ftrace_event_field *field;
4380 struct hist_field *hist_field;
4383 for_each_hist_field(i, hist_data) {
4384 hist_field = hist_data->fields[i];
4385 if (hist_field->flags & HIST_FIELD_FL_KEY) {
4386 tracing_map_cmp_fn_t cmp_fn;
4388 field = hist_field->field;
4390 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
4391 cmp_fn = tracing_map_cmp_none;
4393 cmp_fn = tracing_map_cmp_num(hist_field->size,
4394 hist_field->is_signed);
4395 else if (is_string_field(field))
4396 cmp_fn = tracing_map_cmp_string;
4398 cmp_fn = tracing_map_cmp_num(field->size,
4400 idx = tracing_map_add_key_field(map,
4403 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
4404 idx = tracing_map_add_sum_field(map);
4409 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4410 idx = tracing_map_add_var(map);
4413 hist_field->var.idx = idx;
4414 hist_field->var.hist_data = hist_data;
4421 static struct hist_trigger_data *
4422 create_hist_data(unsigned int map_bits,
4423 struct hist_trigger_attrs *attrs,
4424 struct trace_event_file *file,
4427 const struct tracing_map_ops *map_ops = NULL;
4428 struct hist_trigger_data *hist_data;
4431 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
4433 return ERR_PTR(-ENOMEM);
4435 hist_data->attrs = attrs;
4436 hist_data->remove = remove;
4437 hist_data->event_file = file;
4439 ret = parse_actions(hist_data);
4443 ret = create_hist_fields(hist_data, file);
4447 ret = create_sort_keys(hist_data);
4451 map_ops = &hist_trigger_elt_data_ops;
4453 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
4454 map_ops, hist_data);
4455 if (IS_ERR(hist_data->map)) {
4456 ret = PTR_ERR(hist_data->map);
4457 hist_data->map = NULL;
4461 ret = create_tracing_map_fields(hist_data);
4467 hist_data->attrs = NULL;
4469 destroy_hist_data(hist_data);
4471 hist_data = ERR_PTR(ret);
4476 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
4477 struct tracing_map_elt *elt, void *rec,
4478 struct ring_buffer_event *rbe,
4481 struct hist_elt_data *elt_data;
4482 struct hist_field *hist_field;
4483 unsigned int i, var_idx;
4486 elt_data = elt->private_data;
4487 elt_data->var_ref_vals = var_ref_vals;
4489 for_each_hist_val_field(i, hist_data) {
4490 hist_field = hist_data->fields[i];
4491 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4492 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4493 var_idx = hist_field->var.idx;
4494 tracing_map_set_var(elt, var_idx, hist_val);
4497 tracing_map_update_sum(elt, i, hist_val);
4500 for_each_hist_key_field(i, hist_data) {
4501 hist_field = hist_data->fields[i];
4502 if (hist_field->flags & HIST_FIELD_FL_VAR) {
4503 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
4504 var_idx = hist_field->var.idx;
4505 tracing_map_set_var(elt, var_idx, hist_val);
4509 update_field_vars(hist_data, elt, rbe, rec);
4512 static inline void add_to_key(char *compound_key, void *key,
4513 struct hist_field *key_field, void *rec)
4515 size_t size = key_field->size;
4517 if (key_field->flags & HIST_FIELD_FL_STRING) {
4518 struct ftrace_event_field *field;
4520 field = key_field->field;
4521 if (field->filter_type == FILTER_DYN_STRING)
4522 size = *(u32 *)(rec + field->offset) >> 16;
4523 else if (field->filter_type == FILTER_PTR_STRING)
4525 else if (field->filter_type == FILTER_STATIC_STRING)
4528 /* ensure NULL-termination */
4529 if (size > key_field->size - 1)
4530 size = key_field->size - 1;
4533 memcpy(compound_key + key_field->offset, key, size);
4537 hist_trigger_actions(struct hist_trigger_data *hist_data,
4538 struct tracing_map_elt *elt, void *rec,
4539 struct ring_buffer_event *rbe, u64 *var_ref_vals)
4541 struct action_data *data;
4544 for (i = 0; i < hist_data->n_actions; i++) {
4545 data = hist_data->actions[i];
4546 data->fn(hist_data, elt, rec, rbe, data, var_ref_vals);
4550 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
4551 struct ring_buffer_event *rbe)
4553 struct hist_trigger_data *hist_data = data->private_data;
4554 bool use_compound_key = (hist_data->n_keys > 1);
4555 unsigned long entries[HIST_STACKTRACE_DEPTH];
4556 u64 var_ref_vals[TRACING_MAP_VARS_MAX];
4557 char compound_key[HIST_KEY_SIZE_MAX];
4558 struct tracing_map_elt *elt = NULL;
4559 struct stack_trace stacktrace;
4560 struct hist_field *key_field;
4565 memset(compound_key, 0, hist_data->key_size);
4567 for_each_hist_key_field(i, hist_data) {
4568 key_field = hist_data->fields[i];
4570 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4571 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
4572 stacktrace.entries = entries;
4573 stacktrace.nr_entries = 0;
4574 stacktrace.skip = HIST_STACKTRACE_SKIP;
4576 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
4577 save_stack_trace(&stacktrace);
4581 field_contents = key_field->fn(key_field, elt, rbe, rec);
4582 if (key_field->flags & HIST_FIELD_FL_STRING) {
4583 key = (void *)(unsigned long)field_contents;
4584 use_compound_key = true;
4586 key = (void *)&field_contents;
4589 if (use_compound_key)
4590 add_to_key(compound_key, key, key_field, rec);
4593 if (use_compound_key)
4596 if (hist_data->n_var_refs &&
4597 !resolve_var_refs(hist_data, key, var_ref_vals, false))
4600 elt = tracing_map_insert(hist_data->map, key);
4604 hist_trigger_elt_update(hist_data, elt, rec, rbe, var_ref_vals);
4606 if (resolve_var_refs(hist_data, key, var_ref_vals, true))
4607 hist_trigger_actions(hist_data, elt, rec, rbe, var_ref_vals);
4610 static void hist_trigger_stacktrace_print(struct seq_file *m,
4611 unsigned long *stacktrace_entries,
4612 unsigned int max_entries)
4614 char str[KSYM_SYMBOL_LEN];
4615 unsigned int spaces = 8;
4618 for (i = 0; i < max_entries; i++) {
4619 if (stacktrace_entries[i] == ULONG_MAX)
4622 seq_printf(m, "%*c", 1 + spaces, ' ');
4623 sprint_symbol(str, stacktrace_entries[i]);
4624 seq_printf(m, "%s\n", str);
4629 hist_trigger_entry_print(struct seq_file *m,
4630 struct hist_trigger_data *hist_data, void *key,
4631 struct tracing_map_elt *elt)
4633 struct hist_field *key_field;
4634 char str[KSYM_SYMBOL_LEN];
4635 bool multiline = false;
4636 const char *field_name;
4642 for_each_hist_key_field(i, hist_data) {
4643 key_field = hist_data->fields[i];
4645 if (i > hist_data->n_vals)
4648 field_name = hist_field_name(key_field, 0);
4650 if (key_field->flags & HIST_FIELD_FL_HEX) {
4651 uval = *(u64 *)(key + key_field->offset);
4652 seq_printf(m, "%s: %llx", field_name, uval);
4653 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
4654 uval = *(u64 *)(key + key_field->offset);
4655 sprint_symbol_no_offset(str, uval);
4656 seq_printf(m, "%s: [%llx] %-45s", field_name,
4658 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
4659 uval = *(u64 *)(key + key_field->offset);
4660 sprint_symbol(str, uval);
4661 seq_printf(m, "%s: [%llx] %-55s", field_name,
4663 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
4664 struct hist_elt_data *elt_data = elt->private_data;
4667 if (WARN_ON_ONCE(!elt_data))
4670 comm = elt_data->comm;
4672 uval = *(u64 *)(key + key_field->offset);
4673 seq_printf(m, "%s: %-16s[%10llu]", field_name,
4675 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
4676 const char *syscall_name;
4678 uval = *(u64 *)(key + key_field->offset);
4679 syscall_name = get_syscall_name(uval);
4681 syscall_name = "unknown_syscall";
4683 seq_printf(m, "%s: %-30s[%3llu]", field_name,
4684 syscall_name, uval);
4685 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
4686 seq_puts(m, "stacktrace:\n");
4687 hist_trigger_stacktrace_print(m,
4688 key + key_field->offset,
4689 HIST_STACKTRACE_DEPTH);
4691 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
4692 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
4693 *(u64 *)(key + key_field->offset));
4694 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
4695 seq_printf(m, "%s: %-50s", field_name,
4696 (char *)(key + key_field->offset));
4698 uval = *(u64 *)(key + key_field->offset);
4699 seq_printf(m, "%s: %10llu", field_name, uval);
4708 seq_printf(m, " hitcount: %10llu",
4709 tracing_map_read_sum(elt, HITCOUNT_IDX));
4711 for (i = 1; i < hist_data->n_vals; i++) {
4712 field_name = hist_field_name(hist_data->fields[i], 0);
4714 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
4715 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
4718 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
4719 seq_printf(m, " %s: %10llx", field_name,
4720 tracing_map_read_sum(elt, i));
4722 seq_printf(m, " %s: %10llu", field_name,
4723 tracing_map_read_sum(elt, i));
4727 print_actions(m, hist_data, elt);
4732 static int print_entries(struct seq_file *m,
4733 struct hist_trigger_data *hist_data)
4735 struct tracing_map_sort_entry **sort_entries = NULL;
4736 struct tracing_map *map = hist_data->map;
4739 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
4740 hist_data->n_sort_keys,
4745 for (i = 0; i < n_entries; i++)
4746 hist_trigger_entry_print(m, hist_data,
4747 sort_entries[i]->key,
4748 sort_entries[i]->elt);
4750 tracing_map_destroy_sort_entries(sort_entries, n_entries);
4755 static void hist_trigger_show(struct seq_file *m,
4756 struct event_trigger_data *data, int n)
4758 struct hist_trigger_data *hist_data;
4762 seq_puts(m, "\n\n");
4764 seq_puts(m, "# event histogram\n#\n# trigger info: ");
4765 data->ops->print(m, data->ops, data);
4766 seq_puts(m, "#\n\n");
4768 hist_data = data->private_data;
4769 n_entries = print_entries(m, hist_data);
4773 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
4774 (u64)atomic64_read(&hist_data->map->hits),
4775 n_entries, (u64)atomic64_read(&hist_data->map->drops));
4778 static int hist_show(struct seq_file *m, void *v)
4780 struct event_trigger_data *data;
4781 struct trace_event_file *event_file;
4784 mutex_lock(&event_mutex);
4786 event_file = event_file_data(m->private);
4787 if (unlikely(!event_file)) {
4792 list_for_each_entry_rcu(data, &event_file->triggers, list) {
4793 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
4794 hist_trigger_show(m, data, n++);
4797 if (have_hist_err()) {
4798 seq_printf(m, "\nERROR: %s\n", hist_err_str);
4799 seq_printf(m, " Last command: %s\n", last_hist_cmd);
4803 mutex_unlock(&event_mutex);
4808 static int event_hist_open(struct inode *inode, struct file *file)
4810 return single_open(file, hist_show, file);
4813 const struct file_operations event_hist_fops = {
4814 .open = event_hist_open,
4816 .llseek = seq_lseek,
4817 .release = single_release,
4820 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
4822 const char *field_name = hist_field_name(hist_field, 0);
4824 if (hist_field->var.name)
4825 seq_printf(m, "%s=", hist_field->var.name);
4827 if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
4828 seq_puts(m, "common_timestamp");
4829 else if (hist_field->flags & HIST_FIELD_FL_CPU)
4831 else if (field_name) {
4832 if (hist_field->flags & HIST_FIELD_FL_VAR_REF ||
4833 hist_field->flags & HIST_FIELD_FL_ALIAS)
4835 seq_printf(m, "%s", field_name);
4838 if (hist_field->flags) {
4839 const char *flags_str = get_hist_field_flags(hist_field);
4842 seq_printf(m, ".%s", flags_str);
4846 static int event_hist_trigger_print(struct seq_file *m,
4847 struct event_trigger_ops *ops,
4848 struct event_trigger_data *data)
4850 struct hist_trigger_data *hist_data = data->private_data;
4851 struct hist_field *field;
4852 bool have_var = false;
4855 seq_puts(m, "hist:");
4858 seq_printf(m, "%s:", data->name);
4860 seq_puts(m, "keys=");
4862 for_each_hist_key_field(i, hist_data) {
4863 field = hist_data->fields[i];
4865 if (i > hist_data->n_vals)
4868 if (field->flags & HIST_FIELD_FL_STACKTRACE)
4869 seq_puts(m, "stacktrace");
4871 hist_field_print(m, field);
4874 seq_puts(m, ":vals=");
4876 for_each_hist_val_field(i, hist_data) {
4877 field = hist_data->fields[i];
4878 if (field->flags & HIST_FIELD_FL_VAR) {
4883 if (i == HITCOUNT_IDX)
4884 seq_puts(m, "hitcount");
4887 hist_field_print(m, field);
4896 for_each_hist_val_field(i, hist_data) {
4897 field = hist_data->fields[i];
4899 if (field->flags & HIST_FIELD_FL_VAR) {
4902 hist_field_print(m, field);
4907 seq_puts(m, ":sort=");
4909 for (i = 0; i < hist_data->n_sort_keys; i++) {
4910 struct tracing_map_sort_key *sort_key;
4911 unsigned int idx, first_key_idx;
4914 first_key_idx = hist_data->n_vals - hist_data->n_vars;
4916 sort_key = &hist_data->sort_keys[i];
4917 idx = sort_key->field_idx;
4919 if (WARN_ON(idx >= HIST_FIELDS_MAX))
4925 if (idx == HITCOUNT_IDX)
4926 seq_puts(m, "hitcount");
4928 if (idx >= first_key_idx)
4929 idx += hist_data->n_vars;
4930 hist_field_print(m, hist_data->fields[idx]);
4933 if (sort_key->descending)
4934 seq_puts(m, ".descending");
4936 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
4938 print_actions_spec(m, hist_data);
4940 if (data->filter_str)
4941 seq_printf(m, " if %s", data->filter_str);
4944 seq_puts(m, " [paused]");
4946 seq_puts(m, " [active]");
4953 static int event_hist_trigger_init(struct event_trigger_ops *ops,
4954 struct event_trigger_data *data)
4956 struct hist_trigger_data *hist_data = data->private_data;
4958 if (!data->ref && hist_data->attrs->name)
4959 save_named_trigger(hist_data->attrs->name, data);
4966 static void unregister_field_var_hists(struct hist_trigger_data *hist_data)
4968 struct trace_event_file *file;
4973 for (i = 0; i < hist_data->n_field_var_hists; i++) {
4974 file = hist_data->field_var_hists[i]->hist_data->event_file;
4975 cmd = hist_data->field_var_hists[i]->cmd;
4976 ret = event_hist_trigger_func(&trigger_hist_cmd, file,
4977 "!hist", "hist", cmd);
4981 static void event_hist_trigger_free(struct event_trigger_ops *ops,
4982 struct event_trigger_data *data)
4984 struct hist_trigger_data *hist_data = data->private_data;
4986 if (WARN_ON_ONCE(data->ref <= 0))
4992 del_named_trigger(data);
4994 trigger_data_free(data);
4996 remove_hist_vars(hist_data);
4998 unregister_field_var_hists(hist_data);
5000 destroy_hist_data(hist_data);
5004 static struct event_trigger_ops event_hist_trigger_ops = {
5005 .func = event_hist_trigger,
5006 .print = event_hist_trigger_print,
5007 .init = event_hist_trigger_init,
5008 .free = event_hist_trigger_free,
5011 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
5012 struct event_trigger_data *data)
5016 save_named_trigger(data->named_data->name, data);
5018 event_hist_trigger_init(ops, data->named_data);
5023 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
5024 struct event_trigger_data *data)
5026 if (WARN_ON_ONCE(data->ref <= 0))
5029 event_hist_trigger_free(ops, data->named_data);
5033 del_named_trigger(data);
5034 trigger_data_free(data);
5038 static struct event_trigger_ops event_hist_trigger_named_ops = {
5039 .func = event_hist_trigger,
5040 .print = event_hist_trigger_print,
5041 .init = event_hist_trigger_named_init,
5042 .free = event_hist_trigger_named_free,
5045 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
5048 return &event_hist_trigger_ops;
5051 static void hist_clear(struct event_trigger_data *data)
5053 struct hist_trigger_data *hist_data = data->private_data;
5056 pause_named_trigger(data);
5058 synchronize_sched();
5060 tracing_map_clear(hist_data->map);
5063 unpause_named_trigger(data);
5066 static bool compatible_field(struct ftrace_event_field *field,
5067 struct ftrace_event_field *test_field)
5069 if (field == test_field)
5071 if (field == NULL || test_field == NULL)
5073 if (strcmp(field->name, test_field->name) != 0)
5075 if (strcmp(field->type, test_field->type) != 0)
5077 if (field->size != test_field->size)
5079 if (field->is_signed != test_field->is_signed)
5085 static bool hist_trigger_match(struct event_trigger_data *data,
5086 struct event_trigger_data *data_test,
5087 struct event_trigger_data *named_data,
5090 struct tracing_map_sort_key *sort_key, *sort_key_test;
5091 struct hist_trigger_data *hist_data, *hist_data_test;
5092 struct hist_field *key_field, *key_field_test;
5095 if (named_data && (named_data != data_test) &&
5096 (named_data != data_test->named_data))
5099 if (!named_data && is_named_trigger(data_test))
5102 hist_data = data->private_data;
5103 hist_data_test = data_test->private_data;
5105 if (hist_data->n_vals != hist_data_test->n_vals ||
5106 hist_data->n_fields != hist_data_test->n_fields ||
5107 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
5110 if (!ignore_filter) {
5111 if ((data->filter_str && !data_test->filter_str) ||
5112 (!data->filter_str && data_test->filter_str))
5116 for_each_hist_field(i, hist_data) {
5117 key_field = hist_data->fields[i];
5118 key_field_test = hist_data_test->fields[i];
5120 if (key_field->flags != key_field_test->flags)
5122 if (!compatible_field(key_field->field, key_field_test->field))
5124 if (key_field->offset != key_field_test->offset)
5126 if (key_field->size != key_field_test->size)
5128 if (key_field->is_signed != key_field_test->is_signed)
5130 if (!!key_field->var.name != !!key_field_test->var.name)
5132 if (key_field->var.name &&
5133 strcmp(key_field->var.name, key_field_test->var.name) != 0)
5137 for (i = 0; i < hist_data->n_sort_keys; i++) {
5138 sort_key = &hist_data->sort_keys[i];
5139 sort_key_test = &hist_data_test->sort_keys[i];
5141 if (sort_key->field_idx != sort_key_test->field_idx ||
5142 sort_key->descending != sort_key_test->descending)
5146 if (!ignore_filter && data->filter_str &&
5147 (strcmp(data->filter_str, data_test->filter_str) != 0))
5153 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
5154 struct event_trigger_data *data,
5155 struct trace_event_file *file)
5157 struct hist_trigger_data *hist_data = data->private_data;
5158 struct event_trigger_data *test, *named_data = NULL;
5161 if (hist_data->attrs->name) {
5162 named_data = find_named_trigger(hist_data->attrs->name);
5164 if (!hist_trigger_match(data, named_data, named_data,
5166 hist_err("Named hist trigger doesn't match existing named trigger (includes variables): ", hist_data->attrs->name);
5173 if (hist_data->attrs->name && !named_data)
5176 list_for_each_entry_rcu(test, &file->triggers, list) {
5177 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5178 if (!hist_trigger_match(data, test, named_data, false))
5180 if (hist_data->attrs->pause)
5181 test->paused = true;
5182 else if (hist_data->attrs->cont)
5183 test->paused = false;
5184 else if (hist_data->attrs->clear)
5187 hist_err("Hist trigger already exists", NULL);
5194 if (hist_data->attrs->cont || hist_data->attrs->clear) {
5195 hist_err("Can't clear or continue a nonexistent hist trigger", NULL);
5200 if (hist_data->attrs->pause)
5201 data->paused = true;
5204 destroy_hist_data(data->private_data);
5205 data->private_data = named_data->private_data;
5206 set_named_trigger_data(data, named_data);
5207 data->ops = &event_hist_trigger_named_ops;
5210 if (data->ops->init) {
5211 ret = data->ops->init(data->ops, data);
5218 if (hist_data->enable_timestamps)
5219 tracing_set_time_stamp_abs(file->tr, true);
5224 static int hist_trigger_enable(struct event_trigger_data *data,
5225 struct trace_event_file *file)
5229 list_add_tail_rcu(&data->list, &file->triggers);
5231 update_cond_flag(file);
5233 if (trace_event_trigger_enable_disable(file, 1) < 0) {
5234 list_del_rcu(&data->list);
5235 update_cond_flag(file);
5242 static bool have_hist_trigger_match(struct event_trigger_data *data,
5243 struct trace_event_file *file)
5245 struct hist_trigger_data *hist_data = data->private_data;
5246 struct event_trigger_data *test, *named_data = NULL;
5249 if (hist_data->attrs->name)
5250 named_data = find_named_trigger(hist_data->attrs->name);
5252 list_for_each_entry_rcu(test, &file->triggers, list) {
5253 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5254 if (hist_trigger_match(data, test, named_data, false)) {
5264 static bool hist_trigger_check_refs(struct event_trigger_data *data,
5265 struct trace_event_file *file)
5267 struct hist_trigger_data *hist_data = data->private_data;
5268 struct event_trigger_data *test, *named_data = NULL;
5270 if (hist_data->attrs->name)
5271 named_data = find_named_trigger(hist_data->attrs->name);
5273 list_for_each_entry_rcu(test, &file->triggers, list) {
5274 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5275 if (!hist_trigger_match(data, test, named_data, false))
5277 hist_data = test->private_data;
5278 if (check_var_refs(hist_data))
5287 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
5288 struct event_trigger_data *data,
5289 struct trace_event_file *file)
5291 struct hist_trigger_data *hist_data = data->private_data;
5292 struct event_trigger_data *test, *named_data = NULL;
5293 bool unregistered = false;
5295 if (hist_data->attrs->name)
5296 named_data = find_named_trigger(hist_data->attrs->name);
5298 list_for_each_entry_rcu(test, &file->triggers, list) {
5299 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5300 if (!hist_trigger_match(data, test, named_data, false))
5302 unregistered = true;
5303 list_del_rcu(&test->list);
5304 trace_event_trigger_enable_disable(file, 0);
5305 update_cond_flag(file);
5310 if (unregistered && test->ops->free)
5311 test->ops->free(test->ops, test);
5313 if (hist_data->enable_timestamps) {
5314 if (!hist_data->remove || unregistered)
5315 tracing_set_time_stamp_abs(file->tr, false);
5319 static bool hist_file_check_refs(struct trace_event_file *file)
5321 struct hist_trigger_data *hist_data;
5322 struct event_trigger_data *test;
5324 list_for_each_entry_rcu(test, &file->triggers, list) {
5325 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5326 hist_data = test->private_data;
5327 if (check_var_refs(hist_data))
5335 static void hist_unreg_all(struct trace_event_file *file)
5337 struct event_trigger_data *test, *n;
5338 struct hist_trigger_data *hist_data;
5339 struct synth_event *se;
5340 const char *se_name;
5342 if (hist_file_check_refs(file))
5345 list_for_each_entry_safe(test, n, &file->triggers, list) {
5346 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5347 hist_data = test->private_data;
5348 list_del_rcu(&test->list);
5349 trace_event_trigger_enable_disable(file, 0);
5351 mutex_lock(&synth_event_mutex);
5352 se_name = trace_event_name(file->event_call);
5353 se = find_synth_event(se_name);
5356 mutex_unlock(&synth_event_mutex);
5358 update_cond_flag(file);
5359 if (hist_data->enable_timestamps)
5360 tracing_set_time_stamp_abs(file->tr, false);
5361 if (test->ops->free)
5362 test->ops->free(test->ops, test);
5367 static int event_hist_trigger_func(struct event_command *cmd_ops,
5368 struct trace_event_file *file,
5369 char *glob, char *cmd, char *param)
5371 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
5372 struct event_trigger_data *trigger_data;
5373 struct hist_trigger_attrs *attrs;
5374 struct event_trigger_ops *trigger_ops;
5375 struct hist_trigger_data *hist_data;
5376 struct synth_event *se;
5377 const char *se_name;
5378 bool remove = false;
5382 if (glob && strlen(glob)) {
5383 last_cmd_set(param);
5394 * separate the trigger from the filter (k:v [if filter])
5395 * allowing for whitespace in the trigger
5397 p = trigger = param;
5399 p = strstr(p, "if");
5404 if (*(p - 1) != ' ' && *(p - 1) != '\t') {
5408 if (p >= param + strlen(param) - strlen("if") - 1)
5410 if (*(p + strlen("if")) != ' ' && *(p + strlen("if")) != '\t') {
5421 param = strstrip(p);
5422 trigger = strstrip(trigger);
5425 attrs = parse_hist_trigger_attrs(trigger);
5427 return PTR_ERR(attrs);
5429 if (attrs->map_bits)
5430 hist_trigger_bits = attrs->map_bits;
5432 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
5433 if (IS_ERR(hist_data)) {
5434 destroy_hist_trigger_attrs(attrs);
5435 return PTR_ERR(hist_data);
5438 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
5440 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
5441 if (!trigger_data) {
5446 trigger_data->count = -1;
5447 trigger_data->ops = trigger_ops;
5448 trigger_data->cmd_ops = cmd_ops;
5450 INIT_LIST_HEAD(&trigger_data->list);
5451 RCU_INIT_POINTER(trigger_data->filter, NULL);
5453 trigger_data->private_data = hist_data;
5455 /* if param is non-empty, it's supposed to be a filter */
5456 if (param && cmd_ops->set_filter) {
5457 ret = cmd_ops->set_filter(param, trigger_data, file);
5463 if (!have_hist_trigger_match(trigger_data, file))
5466 if (hist_trigger_check_refs(trigger_data, file)) {
5471 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5473 mutex_lock(&synth_event_mutex);
5474 se_name = trace_event_name(file->event_call);
5475 se = find_synth_event(se_name);
5478 mutex_unlock(&synth_event_mutex);
5484 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
5486 * The above returns on success the # of triggers registered,
5487 * but if it didn't register any it returns zero. Consider no
5488 * triggers registered a failure too.
5491 if (!(attrs->pause || attrs->cont || attrs->clear))
5497 if (get_named_trigger_data(trigger_data))
5500 if (has_hist_vars(hist_data))
5501 save_hist_vars(hist_data);
5503 ret = create_actions(hist_data, file);
5507 ret = tracing_map_init(hist_data->map);
5511 ret = hist_trigger_enable(trigger_data, file);
5515 mutex_lock(&synth_event_mutex);
5516 se_name = trace_event_name(file->event_call);
5517 se = find_synth_event(se_name);
5520 mutex_unlock(&synth_event_mutex);
5522 /* Just return zero, not the number of registered triggers */
5530 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
5532 if (cmd_ops->set_filter)
5533 cmd_ops->set_filter(NULL, trigger_data, NULL);
5535 remove_hist_vars(hist_data);
5537 kfree(trigger_data);
5539 destroy_hist_data(hist_data);
5543 static struct event_command trigger_hist_cmd = {
5545 .trigger_type = ETT_EVENT_HIST,
5546 .flags = EVENT_CMD_FL_NEEDS_REC,
5547 .func = event_hist_trigger_func,
5548 .reg = hist_register_trigger,
5549 .unreg = hist_unregister_trigger,
5550 .unreg_all = hist_unreg_all,
5551 .get_trigger_ops = event_hist_get_trigger_ops,
5552 .set_filter = set_trigger_filter,
5555 __init int register_trigger_hist_cmd(void)
5559 ret = register_event_command(&trigger_hist_cmd);
5566 hist_enable_trigger(struct event_trigger_data *data, void *rec,
5567 struct ring_buffer_event *event)
5569 struct enable_trigger_data *enable_data = data->private_data;
5570 struct event_trigger_data *test;
5572 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
5573 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
5574 if (enable_data->enable)
5575 test->paused = false;
5577 test->paused = true;
5583 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
5584 struct ring_buffer_event *event)
5589 if (data->count != -1)
5592 hist_enable_trigger(data, rec, event);
5595 static struct event_trigger_ops hist_enable_trigger_ops = {
5596 .func = hist_enable_trigger,
5597 .print = event_enable_trigger_print,
5598 .init = event_trigger_init,
5599 .free = event_enable_trigger_free,
5602 static struct event_trigger_ops hist_enable_count_trigger_ops = {
5603 .func = hist_enable_count_trigger,
5604 .print = event_enable_trigger_print,
5605 .init = event_trigger_init,
5606 .free = event_enable_trigger_free,
5609 static struct event_trigger_ops hist_disable_trigger_ops = {
5610 .func = hist_enable_trigger,
5611 .print = event_enable_trigger_print,
5612 .init = event_trigger_init,
5613 .free = event_enable_trigger_free,
5616 static struct event_trigger_ops hist_disable_count_trigger_ops = {
5617 .func = hist_enable_count_trigger,
5618 .print = event_enable_trigger_print,
5619 .init = event_trigger_init,
5620 .free = event_enable_trigger_free,
5623 static struct event_trigger_ops *
5624 hist_enable_get_trigger_ops(char *cmd, char *param)
5626 struct event_trigger_ops *ops;
5629 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
5632 ops = param ? &hist_enable_count_trigger_ops :
5633 &hist_enable_trigger_ops;
5635 ops = param ? &hist_disable_count_trigger_ops :
5636 &hist_disable_trigger_ops;
5641 static void hist_enable_unreg_all(struct trace_event_file *file)
5643 struct event_trigger_data *test, *n;
5645 list_for_each_entry_safe(test, n, &file->triggers, list) {
5646 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
5647 list_del_rcu(&test->list);
5648 update_cond_flag(file);
5649 trace_event_trigger_enable_disable(file, 0);
5650 if (test->ops->free)
5651 test->ops->free(test->ops, test);
5656 static struct event_command trigger_hist_enable_cmd = {
5657 .name = ENABLE_HIST_STR,
5658 .trigger_type = ETT_HIST_ENABLE,
5659 .func = event_enable_trigger_func,
5660 .reg = event_enable_register_trigger,
5661 .unreg = event_enable_unregister_trigger,
5662 .unreg_all = hist_enable_unreg_all,
5663 .get_trigger_ops = hist_enable_get_trigger_ops,
5664 .set_filter = set_trigger_filter,
5667 static struct event_command trigger_hist_disable_cmd = {
5668 .name = DISABLE_HIST_STR,
5669 .trigger_type = ETT_HIST_ENABLE,
5670 .func = event_enable_trigger_func,
5671 .reg = event_enable_register_trigger,
5672 .unreg = event_enable_unregister_trigger,
5673 .unreg_all = hist_enable_unreg_all,
5674 .get_trigger_ops = hist_enable_get_trigger_ops,
5675 .set_filter = set_trigger_filter,
5678 static __init void unregister_trigger_hist_enable_disable_cmds(void)
5680 unregister_event_command(&trigger_hist_enable_cmd);
5681 unregister_event_command(&trigger_hist_disable_cmd);
5684 __init int register_trigger_hist_enable_disable_cmds(void)
5688 ret = register_event_command(&trigger_hist_enable_cmd);
5689 if (WARN_ON(ret < 0))
5691 ret = register_event_command(&trigger_hist_disable_cmd);
5692 if (WARN_ON(ret < 0))
5693 unregister_trigger_hist_enable_disable_cmds();
5698 static __init int trace_events_hist_init(void)
5700 struct dentry *entry = NULL;
5701 struct dentry *d_tracer;
5704 d_tracer = tracing_init_dentry();
5705 if (IS_ERR(d_tracer)) {
5706 err = PTR_ERR(d_tracer);
5710 entry = tracefs_create_file("synthetic_events", 0644, d_tracer,
5711 NULL, &synth_events_fops);
5719 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
5724 fs_initcall(trace_events_hist_init);