2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
22 #include <linux/rculist.h>
24 #include "tracing_map.h"
29 typedef u64 (*hist_field_fn_t) (struct hist_field *field,
30 struct tracing_map_elt *elt,
31 struct ring_buffer_event *rbe,
34 #define HIST_FIELD_OPERANDS_MAX 2
35 #define HIST_FIELDS_MAX (TRACING_MAP_FIELDS_MAX + TRACING_MAP_VARS_MAX)
46 struct hist_trigger_data *hist_data;
51 struct ftrace_event_field *field;
56 unsigned int is_signed;
57 struct hist_field *operands[HIST_FIELD_OPERANDS_MAX];
58 struct hist_trigger_data *hist_data;
60 enum field_op_id operator;
64 static u64 hist_field_none(struct hist_field *field,
65 struct tracing_map_elt *elt,
66 struct ring_buffer_event *rbe,
72 static u64 hist_field_counter(struct hist_field *field,
73 struct tracing_map_elt *elt,
74 struct ring_buffer_event *rbe,
80 static u64 hist_field_string(struct hist_field *hist_field,
81 struct tracing_map_elt *elt,
82 struct ring_buffer_event *rbe,
85 char *addr = (char *)(event + hist_field->field->offset);
87 return (u64)(unsigned long)addr;
90 static u64 hist_field_dynstring(struct hist_field *hist_field,
91 struct tracing_map_elt *elt,
92 struct ring_buffer_event *rbe,
95 u32 str_item = *(u32 *)(event + hist_field->field->offset);
96 int str_loc = str_item & 0xffff;
97 char *addr = (char *)(event + str_loc);
99 return (u64)(unsigned long)addr;
102 static u64 hist_field_pstring(struct hist_field *hist_field,
103 struct tracing_map_elt *elt,
104 struct ring_buffer_event *rbe,
107 char **addr = (char **)(event + hist_field->field->offset);
109 return (u64)(unsigned long)*addr;
112 static u64 hist_field_log2(struct hist_field *hist_field,
113 struct tracing_map_elt *elt,
114 struct ring_buffer_event *rbe,
117 struct hist_field *operand = hist_field->operands[0];
119 u64 val = operand->fn(operand, elt, rbe, event);
121 return (u64) ilog2(roundup_pow_of_two(val));
124 static u64 hist_field_plus(struct hist_field *hist_field,
125 struct tracing_map_elt *elt,
126 struct ring_buffer_event *rbe,
129 struct hist_field *operand1 = hist_field->operands[0];
130 struct hist_field *operand2 = hist_field->operands[1];
132 u64 val1 = operand1->fn(operand1, elt, rbe, event);
133 u64 val2 = operand2->fn(operand2, elt, rbe, event);
138 static u64 hist_field_minus(struct hist_field *hist_field,
139 struct tracing_map_elt *elt,
140 struct ring_buffer_event *rbe,
143 struct hist_field *operand1 = hist_field->operands[0];
144 struct hist_field *operand2 = hist_field->operands[1];
146 u64 val1 = operand1->fn(operand1, elt, rbe, event);
147 u64 val2 = operand2->fn(operand2, elt, rbe, event);
152 static u64 hist_field_unary_minus(struct hist_field *hist_field,
153 struct tracing_map_elt *elt,
154 struct ring_buffer_event *rbe,
157 struct hist_field *operand = hist_field->operands[0];
159 s64 sval = (s64)operand->fn(operand, elt, rbe, event);
160 u64 val = (u64)-sval;
165 #define DEFINE_HIST_FIELD_FN(type) \
166 static u64 hist_field_##type(struct hist_field *hist_field, \
167 struct tracing_map_elt *elt, \
168 struct ring_buffer_event *rbe, \
171 type *addr = (type *)(event + hist_field->field->offset); \
173 return (u64)(unsigned long)*addr; \
176 DEFINE_HIST_FIELD_FN(s64);
177 DEFINE_HIST_FIELD_FN(u64);
178 DEFINE_HIST_FIELD_FN(s32);
179 DEFINE_HIST_FIELD_FN(u32);
180 DEFINE_HIST_FIELD_FN(s16);
181 DEFINE_HIST_FIELD_FN(u16);
182 DEFINE_HIST_FIELD_FN(s8);
183 DEFINE_HIST_FIELD_FN(u8);
185 #define for_each_hist_field(i, hist_data) \
186 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
188 #define for_each_hist_val_field(i, hist_data) \
189 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
191 #define for_each_hist_key_field(i, hist_data) \
192 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
194 #define HIST_STACKTRACE_DEPTH 16
195 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
196 #define HIST_STACKTRACE_SKIP 5
198 #define HITCOUNT_IDX 0
199 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
201 enum hist_field_flags {
202 HIST_FIELD_FL_HITCOUNT = 1 << 0,
203 HIST_FIELD_FL_KEY = 1 << 1,
204 HIST_FIELD_FL_STRING = 1 << 2,
205 HIST_FIELD_FL_HEX = 1 << 3,
206 HIST_FIELD_FL_SYM = 1 << 4,
207 HIST_FIELD_FL_SYM_OFFSET = 1 << 5,
208 HIST_FIELD_FL_EXECNAME = 1 << 6,
209 HIST_FIELD_FL_SYSCALL = 1 << 7,
210 HIST_FIELD_FL_STACKTRACE = 1 << 8,
211 HIST_FIELD_FL_LOG2 = 1 << 9,
212 HIST_FIELD_FL_TIMESTAMP = 1 << 10,
213 HIST_FIELD_FL_TIMESTAMP_USECS = 1 << 11,
214 HIST_FIELD_FL_VAR = 1 << 12,
215 HIST_FIELD_FL_EXPR = 1 << 13,
220 char *name[TRACING_MAP_VARS_MAX];
221 char *expr[TRACING_MAP_VARS_MAX];
224 struct hist_trigger_attrs {
233 unsigned int map_bits;
235 char *assignment_str[TRACING_MAP_VARS_MAX];
236 unsigned int n_assignments;
238 struct var_defs var_defs;
241 struct hist_trigger_data {
242 struct hist_field *fields[HIST_FIELDS_MAX];
245 unsigned int n_fields;
247 unsigned int key_size;
248 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
249 unsigned int n_sort_keys;
250 struct trace_event_file *event_file;
251 struct hist_trigger_attrs *attrs;
252 struct tracing_map *map;
253 bool enable_timestamps;
257 static u64 hist_field_timestamp(struct hist_field *hist_field,
258 struct tracing_map_elt *elt,
259 struct ring_buffer_event *rbe,
262 struct hist_trigger_data *hist_data = hist_field->hist_data;
263 struct trace_array *tr = hist_data->event_file->tr;
265 u64 ts = ring_buffer_event_time_stamp(rbe);
267 if (hist_data->attrs->ts_in_usecs && trace_clock_in_ns(tr))
273 static struct hist_field *find_var_field(struct hist_trigger_data *hist_data,
274 const char *var_name)
276 struct hist_field *hist_field, *found = NULL;
279 for_each_hist_field(i, hist_data) {
280 hist_field = hist_data->fields[i];
281 if (hist_field && hist_field->flags & HIST_FIELD_FL_VAR &&
282 strcmp(hist_field->var.name, var_name) == 0) {
291 static struct hist_field *find_var(struct hist_trigger_data *hist_data,
292 struct trace_event_file *file,
293 const char *var_name)
295 struct hist_trigger_data *test_data;
296 struct event_trigger_data *test;
297 struct hist_field *hist_field;
299 hist_field = find_var_field(hist_data, var_name);
303 list_for_each_entry_rcu(test, &file->triggers, list) {
304 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
305 test_data = test->private_data;
306 hist_field = find_var_field(test_data, var_name);
315 struct hist_elt_data {
319 static const char *hist_field_name(struct hist_field *field,
322 const char *field_name = "";
328 field_name = field->field->name;
329 else if (field->flags & HIST_FIELD_FL_LOG2)
330 field_name = hist_field_name(field->operands[0], ++level);
331 else if (field->flags & HIST_FIELD_FL_TIMESTAMP)
332 field_name = "common_timestamp";
333 else if (field->flags & HIST_FIELD_FL_EXPR)
334 field_name = field->name;
336 if (field_name == NULL)
342 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
344 hist_field_fn_t fn = NULL;
346 switch (field_size) {
376 static int parse_map_size(char *str)
378 unsigned long size, map_bits;
387 ret = kstrtoul(str, 0, &size);
391 map_bits = ilog2(roundup_pow_of_two(size));
392 if (map_bits < TRACING_MAP_BITS_MIN ||
393 map_bits > TRACING_MAP_BITS_MAX)
401 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
408 for (i = 0; i < attrs->n_assignments; i++)
409 kfree(attrs->assignment_str[i]);
412 kfree(attrs->sort_key_str);
413 kfree(attrs->keys_str);
414 kfree(attrs->vals_str);
418 static int parse_assignment(char *str, struct hist_trigger_attrs *attrs)
422 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
423 (strncmp(str, "keys=", strlen("keys=")) == 0)) {
424 attrs->keys_str = kstrdup(str, GFP_KERNEL);
425 if (!attrs->keys_str) {
429 } else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
430 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
431 (strncmp(str, "values=", strlen("values=")) == 0)) {
432 attrs->vals_str = kstrdup(str, GFP_KERNEL);
433 if (!attrs->vals_str) {
437 } else if (strncmp(str, "sort=", strlen("sort=")) == 0) {
438 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
439 if (!attrs->sort_key_str) {
443 } else if (strncmp(str, "name=", strlen("name=")) == 0) {
444 attrs->name = kstrdup(str, GFP_KERNEL);
449 } else if (strncmp(str, "size=", strlen("size=")) == 0) {
450 int map_bits = parse_map_size(str);
456 attrs->map_bits = map_bits;
460 if (attrs->n_assignments == TRACING_MAP_VARS_MAX) {
465 assignment = kstrdup(str, GFP_KERNEL);
471 attrs->assignment_str[attrs->n_assignments++] = assignment;
477 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
479 struct hist_trigger_attrs *attrs;
482 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
484 return ERR_PTR(-ENOMEM);
486 while (trigger_str) {
487 char *str = strsep(&trigger_str, ":");
489 if (strchr(str, '=')) {
490 ret = parse_assignment(str, attrs);
493 } else if (strcmp(str, "pause") == 0)
495 else if ((strcmp(str, "cont") == 0) ||
496 (strcmp(str, "continue") == 0))
498 else if (strcmp(str, "clear") == 0)
506 if (!attrs->keys_str) {
513 destroy_hist_trigger_attrs(attrs);
518 static inline void save_comm(char *comm, struct task_struct *task)
521 strcpy(comm, "<idle>");
525 if (WARN_ON_ONCE(task->pid < 0)) {
526 strcpy(comm, "<XXX>");
530 memcpy(comm, task->comm, TASK_COMM_LEN);
533 static void hist_elt_data_free(struct hist_elt_data *elt_data)
535 kfree(elt_data->comm);
539 static void hist_trigger_elt_data_free(struct tracing_map_elt *elt)
541 struct hist_elt_data *elt_data = elt->private_data;
543 hist_elt_data_free(elt_data);
546 static int hist_trigger_elt_data_alloc(struct tracing_map_elt *elt)
548 struct hist_trigger_data *hist_data = elt->map->private_data;
549 unsigned int size = TASK_COMM_LEN;
550 struct hist_elt_data *elt_data;
551 struct hist_field *key_field;
554 elt_data = kzalloc(sizeof(*elt_data), GFP_KERNEL);
558 for_each_hist_key_field(i, hist_data) {
559 key_field = hist_data->fields[i];
561 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
562 elt_data->comm = kzalloc(size, GFP_KERNEL);
563 if (!elt_data->comm) {
571 elt->private_data = elt_data;
576 static void hist_trigger_elt_data_init(struct tracing_map_elt *elt)
578 struct hist_elt_data *elt_data = elt->private_data;
581 save_comm(elt_data->comm, current);
584 static const struct tracing_map_ops hist_trigger_elt_data_ops = {
585 .elt_alloc = hist_trigger_elt_data_alloc,
586 .elt_free = hist_trigger_elt_data_free,
587 .elt_init = hist_trigger_elt_data_init,
590 static const char *get_hist_field_flags(struct hist_field *hist_field)
592 const char *flags_str = NULL;
594 if (hist_field->flags & HIST_FIELD_FL_HEX)
596 else if (hist_field->flags & HIST_FIELD_FL_SYM)
598 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
599 flags_str = "sym-offset";
600 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
601 flags_str = "execname";
602 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
603 flags_str = "syscall";
604 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
606 else if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP_USECS)
612 static void expr_field_str(struct hist_field *field, char *expr)
614 strcat(expr, hist_field_name(field, 0));
617 const char *flags_str = get_hist_field_flags(field);
621 strcat(expr, flags_str);
626 static char *expr_str(struct hist_field *field, unsigned int level)
633 expr = kzalloc(MAX_FILTER_STR_VAL, GFP_KERNEL);
637 if (!field->operands[0]) {
638 expr_field_str(field, expr);
642 if (field->operator == FIELD_OP_UNARY_MINUS) {
646 subexpr = expr_str(field->operands[0], ++level);
651 strcat(expr, subexpr);
659 expr_field_str(field->operands[0], expr);
661 switch (field->operator) {
673 expr_field_str(field->operands[1], expr);
678 static int contains_operator(char *str)
680 enum field_op_id field_op = FIELD_OP_NONE;
683 op = strpbrk(str, "+-");
685 return FIELD_OP_NONE;
690 field_op = FIELD_OP_UNARY_MINUS;
692 field_op = FIELD_OP_MINUS;
695 field_op = FIELD_OP_PLUS;
704 static void destroy_hist_field(struct hist_field *hist_field,
715 for (i = 0; i < HIST_FIELD_OPERANDS_MAX; i++)
716 destroy_hist_field(hist_field->operands[i], level + 1);
718 kfree(hist_field->var.name);
719 kfree(hist_field->name);
724 static struct hist_field *create_hist_field(struct hist_trigger_data *hist_data,
725 struct ftrace_event_field *field,
729 struct hist_field *hist_field;
731 if (field && is_function_field(field))
734 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
738 hist_field->hist_data = hist_data;
740 if (flags & HIST_FIELD_FL_EXPR)
741 goto out; /* caller will populate */
743 if (flags & HIST_FIELD_FL_HITCOUNT) {
744 hist_field->fn = hist_field_counter;
748 if (flags & HIST_FIELD_FL_STACKTRACE) {
749 hist_field->fn = hist_field_none;
753 if (flags & HIST_FIELD_FL_LOG2) {
754 unsigned long fl = flags & ~HIST_FIELD_FL_LOG2;
755 hist_field->fn = hist_field_log2;
756 hist_field->operands[0] = create_hist_field(hist_data, field, fl, NULL);
757 hist_field->size = hist_field->operands[0]->size;
761 if (flags & HIST_FIELD_FL_TIMESTAMP) {
762 hist_field->fn = hist_field_timestamp;
763 hist_field->size = sizeof(u64);
767 if (WARN_ON_ONCE(!field))
770 if (is_string_field(field)) {
771 flags |= HIST_FIELD_FL_STRING;
773 if (field->filter_type == FILTER_STATIC_STRING)
774 hist_field->fn = hist_field_string;
775 else if (field->filter_type == FILTER_DYN_STRING)
776 hist_field->fn = hist_field_dynstring;
778 hist_field->fn = hist_field_pstring;
780 hist_field->fn = select_value_fn(field->size,
782 if (!hist_field->fn) {
783 destroy_hist_field(hist_field, 0);
788 hist_field->field = field;
789 hist_field->flags = flags;
792 hist_field->var.name = kstrdup(var_name, GFP_KERNEL);
793 if (!hist_field->var.name)
799 destroy_hist_field(hist_field, 0);
803 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
807 for (i = 0; i < HIST_FIELDS_MAX; i++) {
808 if (hist_data->fields[i]) {
809 destroy_hist_field(hist_data->fields[i], 0);
810 hist_data->fields[i] = NULL;
815 static struct ftrace_event_field *
816 parse_field(struct hist_trigger_data *hist_data, struct trace_event_file *file,
817 char *field_str, unsigned long *flags)
819 struct ftrace_event_field *field = NULL;
820 char *field_name, *modifier, *str;
822 modifier = str = kstrdup(field_str, GFP_KERNEL);
824 return ERR_PTR(-ENOMEM);
826 field_name = strsep(&modifier, ".");
828 if (strcmp(modifier, "hex") == 0)
829 *flags |= HIST_FIELD_FL_HEX;
830 else if (strcmp(modifier, "sym") == 0)
831 *flags |= HIST_FIELD_FL_SYM;
832 else if (strcmp(modifier, "sym-offset") == 0)
833 *flags |= HIST_FIELD_FL_SYM_OFFSET;
834 else if ((strcmp(modifier, "execname") == 0) &&
835 (strcmp(field_name, "common_pid") == 0))
836 *flags |= HIST_FIELD_FL_EXECNAME;
837 else if (strcmp(modifier, "syscall") == 0)
838 *flags |= HIST_FIELD_FL_SYSCALL;
839 else if (strcmp(modifier, "log2") == 0)
840 *flags |= HIST_FIELD_FL_LOG2;
841 else if (strcmp(modifier, "usecs") == 0)
842 *flags |= HIST_FIELD_FL_TIMESTAMP_USECS;
844 field = ERR_PTR(-EINVAL);
849 if (strcmp(field_name, "common_timestamp") == 0) {
850 *flags |= HIST_FIELD_FL_TIMESTAMP;
851 hist_data->enable_timestamps = true;
852 if (*flags & HIST_FIELD_FL_TIMESTAMP_USECS)
853 hist_data->attrs->ts_in_usecs = true;
855 field = trace_find_event_field(file->event_call, field_name);
856 if (!field || !field->size) {
857 field = ERR_PTR(-EINVAL);
867 static struct hist_field *parse_atom(struct hist_trigger_data *hist_data,
868 struct trace_event_file *file, char *str,
869 unsigned long *flags, char *var_name)
871 struct ftrace_event_field *field = NULL;
872 struct hist_field *hist_field = NULL;
875 field = parse_field(hist_data, file, str, flags);
877 ret = PTR_ERR(field);
881 hist_field = create_hist_field(hist_data, field, *flags, var_name);
892 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
893 struct trace_event_file *file,
894 char *str, unsigned long flags,
895 char *var_name, unsigned int level);
897 static struct hist_field *parse_unary(struct hist_trigger_data *hist_data,
898 struct trace_event_file *file,
899 char *str, unsigned long flags,
900 char *var_name, unsigned int level)
902 struct hist_field *operand1, *expr = NULL;
903 unsigned long operand_flags;
907 /* we support only -(xxx) i.e. explicit parens required */
914 str++; /* skip leading '-' */
916 s = strchr(str, '(');
924 s = strrchr(str, ')');
928 ret = -EINVAL; /* no closing ')' */
932 flags |= HIST_FIELD_FL_EXPR;
933 expr = create_hist_field(hist_data, NULL, flags, var_name);
940 operand1 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
941 if (IS_ERR(operand1)) {
942 ret = PTR_ERR(operand1);
946 expr->flags |= operand1->flags &
947 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
948 expr->fn = hist_field_unary_minus;
949 expr->operands[0] = operand1;
950 expr->operator = FIELD_OP_UNARY_MINUS;
951 expr->name = expr_str(expr, 0);
955 destroy_hist_field(expr, 0);
959 static int check_expr_operands(struct hist_field *operand1,
960 struct hist_field *operand2)
962 unsigned long operand1_flags = operand1->flags;
963 unsigned long operand2_flags = operand2->flags;
965 if ((operand1_flags & HIST_FIELD_FL_TIMESTAMP_USECS) !=
966 (operand2_flags & HIST_FIELD_FL_TIMESTAMP_USECS))
972 static struct hist_field *parse_expr(struct hist_trigger_data *hist_data,
973 struct trace_event_file *file,
974 char *str, unsigned long flags,
975 char *var_name, unsigned int level)
977 struct hist_field *operand1 = NULL, *operand2 = NULL, *expr = NULL;
978 unsigned long operand_flags;
979 int field_op, ret = -EINVAL;
980 char *sep, *operand1_str;
983 return ERR_PTR(-EINVAL);
985 field_op = contains_operator(str);
987 if (field_op == FIELD_OP_NONE)
988 return parse_atom(hist_data, file, str, &flags, var_name);
990 if (field_op == FIELD_OP_UNARY_MINUS)
991 return parse_unary(hist_data, file, str, flags, var_name, ++level);
1004 operand1_str = strsep(&str, sep);
1005 if (!operand1_str || !str)
1009 operand1 = parse_atom(hist_data, file, operand1_str,
1010 &operand_flags, NULL);
1011 if (IS_ERR(operand1)) {
1012 ret = PTR_ERR(operand1);
1017 /* rest of string could be another expression e.g. b+c in a+b+c */
1019 operand2 = parse_expr(hist_data, file, str, operand_flags, NULL, ++level);
1020 if (IS_ERR(operand2)) {
1021 ret = PTR_ERR(operand2);
1026 ret = check_expr_operands(operand1, operand2);
1030 flags |= HIST_FIELD_FL_EXPR;
1032 flags |= operand1->flags &
1033 (HIST_FIELD_FL_TIMESTAMP | HIST_FIELD_FL_TIMESTAMP_USECS);
1035 expr = create_hist_field(hist_data, NULL, flags, var_name);
1041 expr->operands[0] = operand1;
1042 expr->operands[1] = operand2;
1043 expr->operator = field_op;
1044 expr->name = expr_str(expr, 0);
1047 case FIELD_OP_MINUS:
1048 expr->fn = hist_field_minus;
1051 expr->fn = hist_field_plus;
1059 destroy_hist_field(operand1, 0);
1060 destroy_hist_field(operand2, 0);
1061 destroy_hist_field(expr, 0);
1063 return ERR_PTR(ret);
1066 static int create_hitcount_val(struct hist_trigger_data *hist_data)
1068 hist_data->fields[HITCOUNT_IDX] =
1069 create_hist_field(hist_data, NULL, HIST_FIELD_FL_HITCOUNT, NULL);
1070 if (!hist_data->fields[HITCOUNT_IDX])
1073 hist_data->n_vals++;
1074 hist_data->n_fields++;
1076 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
1082 static int __create_val_field(struct hist_trigger_data *hist_data,
1083 unsigned int val_idx,
1084 struct trace_event_file *file,
1085 char *var_name, char *field_str,
1086 unsigned long flags)
1088 struct hist_field *hist_field;
1091 hist_field = parse_expr(hist_data, file, field_str, flags, var_name, 0);
1092 if (IS_ERR(hist_field)) {
1093 ret = PTR_ERR(hist_field);
1097 hist_data->fields[val_idx] = hist_field;
1099 ++hist_data->n_vals;
1100 ++hist_data->n_fields;
1102 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
1108 static int create_val_field(struct hist_trigger_data *hist_data,
1109 unsigned int val_idx,
1110 struct trace_event_file *file,
1113 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
1116 return __create_val_field(hist_data, val_idx, file, NULL, field_str, 0);
1119 static int create_var_field(struct hist_trigger_data *hist_data,
1120 unsigned int val_idx,
1121 struct trace_event_file *file,
1122 char *var_name, char *expr_str)
1124 unsigned long flags = 0;
1126 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX + TRACING_MAP_VARS_MAX))
1128 if (find_var(hist_data, file, var_name) && !hist_data->remove) {
1132 flags |= HIST_FIELD_FL_VAR;
1133 hist_data->n_vars++;
1134 if (WARN_ON(hist_data->n_vars > TRACING_MAP_VARS_MAX))
1137 return __create_val_field(hist_data, val_idx, file, var_name, expr_str, flags);
1140 static int create_val_fields(struct hist_trigger_data *hist_data,
1141 struct trace_event_file *file)
1143 char *fields_str, *field_str;
1144 unsigned int i, j = 1;
1147 ret = create_hitcount_val(hist_data);
1151 fields_str = hist_data->attrs->vals_str;
1155 strsep(&fields_str, "=");
1159 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
1160 j < TRACING_MAP_VALS_MAX; i++) {
1161 field_str = strsep(&fields_str, ",");
1165 if (strcmp(field_str, "hitcount") == 0)
1168 ret = create_val_field(hist_data, j++, file, field_str);
1173 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
1179 static int create_key_field(struct hist_trigger_data *hist_data,
1180 unsigned int key_idx,
1181 unsigned int key_offset,
1182 struct trace_event_file *file,
1185 struct hist_field *hist_field = NULL;
1187 unsigned long flags = 0;
1188 unsigned int key_size;
1191 if (WARN_ON(key_idx >= HIST_FIELDS_MAX))
1194 flags |= HIST_FIELD_FL_KEY;
1196 if (strcmp(field_str, "stacktrace") == 0) {
1197 flags |= HIST_FIELD_FL_STACKTRACE;
1198 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
1199 hist_field = create_hist_field(hist_data, NULL, flags, NULL);
1201 hist_field = parse_expr(hist_data, file, field_str, flags,
1203 if (IS_ERR(hist_field)) {
1204 ret = PTR_ERR(hist_field);
1208 key_size = hist_field->size;
1211 hist_data->fields[key_idx] = hist_field;
1213 key_size = ALIGN(key_size, sizeof(u64));
1214 hist_data->fields[key_idx]->size = key_size;
1215 hist_data->fields[key_idx]->offset = key_offset;
1217 hist_data->key_size += key_size;
1219 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
1224 hist_data->n_keys++;
1225 hist_data->n_fields++;
1227 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
1235 static int create_key_fields(struct hist_trigger_data *hist_data,
1236 struct trace_event_file *file)
1238 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
1239 char *fields_str, *field_str;
1242 fields_str = hist_data->attrs->keys_str;
1246 strsep(&fields_str, "=");
1250 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
1251 field_str = strsep(&fields_str, ",");
1254 ret = create_key_field(hist_data, i, key_offset,
1269 static int create_var_fields(struct hist_trigger_data *hist_data,
1270 struct trace_event_file *file)
1272 unsigned int i, j = hist_data->n_vals;
1275 unsigned int n_vars = hist_data->attrs->var_defs.n_vars;
1277 for (i = 0; i < n_vars; i++) {
1278 char *var_name = hist_data->attrs->var_defs.name[i];
1279 char *expr = hist_data->attrs->var_defs.expr[i];
1281 ret = create_var_field(hist_data, j++, file, var_name, expr);
1289 static void free_var_defs(struct hist_trigger_data *hist_data)
1293 for (i = 0; i < hist_data->attrs->var_defs.n_vars; i++) {
1294 kfree(hist_data->attrs->var_defs.name[i]);
1295 kfree(hist_data->attrs->var_defs.expr[i]);
1298 hist_data->attrs->var_defs.n_vars = 0;
1301 static int parse_var_defs(struct hist_trigger_data *hist_data)
1303 char *s, *str, *var_name, *field_str;
1304 unsigned int i, j, n_vars = 0;
1307 for (i = 0; i < hist_data->attrs->n_assignments; i++) {
1308 str = hist_data->attrs->assignment_str[i];
1309 for (j = 0; j < TRACING_MAP_VARS_MAX; j++) {
1310 field_str = strsep(&str, ",");
1314 var_name = strsep(&field_str, "=");
1315 if (!var_name || !field_str) {
1320 if (n_vars == TRACING_MAP_VARS_MAX) {
1325 s = kstrdup(var_name, GFP_KERNEL);
1330 hist_data->attrs->var_defs.name[n_vars] = s;
1332 s = kstrdup(field_str, GFP_KERNEL);
1334 kfree(hist_data->attrs->var_defs.name[n_vars]);
1338 hist_data->attrs->var_defs.expr[n_vars++] = s;
1340 hist_data->attrs->var_defs.n_vars = n_vars;
1346 free_var_defs(hist_data);
1351 static int create_hist_fields(struct hist_trigger_data *hist_data,
1352 struct trace_event_file *file)
1356 ret = parse_var_defs(hist_data);
1360 ret = create_val_fields(hist_data, file);
1364 ret = create_var_fields(hist_data, file);
1368 ret = create_key_fields(hist_data, file);
1372 free_var_defs(hist_data);
1377 static int is_descending(const char *str)
1382 if (strcmp(str, "descending") == 0)
1385 if (strcmp(str, "ascending") == 0)
1391 static int create_sort_keys(struct hist_trigger_data *hist_data)
1393 char *fields_str = hist_data->attrs->sort_key_str;
1394 struct tracing_map_sort_key *sort_key;
1395 int descending, ret = 0;
1396 unsigned int i, j, k;
1398 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
1403 strsep(&fields_str, "=");
1409 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
1410 struct hist_field *hist_field;
1411 char *field_str, *field_name;
1412 const char *test_name;
1414 sort_key = &hist_data->sort_keys[i];
1416 field_str = strsep(&fields_str, ",");
1423 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
1428 field_name = strsep(&field_str, ".");
1434 if (strcmp(field_name, "hitcount") == 0) {
1435 descending = is_descending(field_str);
1436 if (descending < 0) {
1440 sort_key->descending = descending;
1444 for (j = 1, k = 1; j < hist_data->n_fields; j++) {
1447 hist_field = hist_data->fields[j];
1448 if (hist_field->flags & HIST_FIELD_FL_VAR)
1453 test_name = hist_field_name(hist_field, 0);
1455 if (strcmp(field_name, test_name) == 0) {
1456 sort_key->field_idx = idx;
1457 descending = is_descending(field_str);
1458 if (descending < 0) {
1462 sort_key->descending = descending;
1466 if (j == hist_data->n_fields) {
1472 hist_data->n_sort_keys = i;
1477 static void destroy_hist_data(struct hist_trigger_data *hist_data)
1479 destroy_hist_trigger_attrs(hist_data->attrs);
1480 destroy_hist_fields(hist_data);
1481 tracing_map_destroy(hist_data->map);
1485 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
1487 struct tracing_map *map = hist_data->map;
1488 struct ftrace_event_field *field;
1489 struct hist_field *hist_field;
1492 for_each_hist_field(i, hist_data) {
1493 hist_field = hist_data->fields[i];
1494 if (hist_field->flags & HIST_FIELD_FL_KEY) {
1495 tracing_map_cmp_fn_t cmp_fn;
1497 field = hist_field->field;
1499 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
1500 cmp_fn = tracing_map_cmp_none;
1502 cmp_fn = tracing_map_cmp_num(hist_field->size,
1503 hist_field->is_signed);
1504 else if (is_string_field(field))
1505 cmp_fn = tracing_map_cmp_string;
1507 cmp_fn = tracing_map_cmp_num(field->size,
1509 idx = tracing_map_add_key_field(map,
1512 } else if (!(hist_field->flags & HIST_FIELD_FL_VAR))
1513 idx = tracing_map_add_sum_field(map);
1518 if (hist_field->flags & HIST_FIELD_FL_VAR) {
1519 idx = tracing_map_add_var(map);
1522 hist_field->var.idx = idx;
1523 hist_field->var.hist_data = hist_data;
1530 static struct hist_trigger_data *
1531 create_hist_data(unsigned int map_bits,
1532 struct hist_trigger_attrs *attrs,
1533 struct trace_event_file *file,
1536 const struct tracing_map_ops *map_ops = NULL;
1537 struct hist_trigger_data *hist_data;
1540 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
1542 return ERR_PTR(-ENOMEM);
1544 hist_data->attrs = attrs;
1545 hist_data->remove = remove;
1547 ret = create_hist_fields(hist_data, file);
1551 ret = create_sort_keys(hist_data);
1555 map_ops = &hist_trigger_elt_data_ops;
1557 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
1558 map_ops, hist_data);
1559 if (IS_ERR(hist_data->map)) {
1560 ret = PTR_ERR(hist_data->map);
1561 hist_data->map = NULL;
1565 ret = create_tracing_map_fields(hist_data);
1569 ret = tracing_map_init(hist_data->map);
1573 hist_data->event_file = file;
1577 hist_data->attrs = NULL;
1579 destroy_hist_data(hist_data);
1581 hist_data = ERR_PTR(ret);
1586 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
1587 struct tracing_map_elt *elt, void *rec,
1588 struct ring_buffer_event *rbe)
1590 struct hist_field *hist_field;
1591 unsigned int i, var_idx;
1594 for_each_hist_val_field(i, hist_data) {
1595 hist_field = hist_data->fields[i];
1596 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
1597 if (hist_field->flags & HIST_FIELD_FL_VAR) {
1598 var_idx = hist_field->var.idx;
1599 tracing_map_set_var(elt, var_idx, hist_val);
1602 tracing_map_update_sum(elt, i, hist_val);
1605 for_each_hist_key_field(i, hist_data) {
1606 hist_field = hist_data->fields[i];
1607 if (hist_field->flags & HIST_FIELD_FL_VAR) {
1608 hist_val = hist_field->fn(hist_field, elt, rbe, rec);
1609 var_idx = hist_field->var.idx;
1610 tracing_map_set_var(elt, var_idx, hist_val);
1615 static inline void add_to_key(char *compound_key, void *key,
1616 struct hist_field *key_field, void *rec)
1618 size_t size = key_field->size;
1620 if (key_field->flags & HIST_FIELD_FL_STRING) {
1621 struct ftrace_event_field *field;
1623 field = key_field->field;
1624 if (field->filter_type == FILTER_DYN_STRING)
1625 size = *(u32 *)(rec + field->offset) >> 16;
1626 else if (field->filter_type == FILTER_PTR_STRING)
1628 else if (field->filter_type == FILTER_STATIC_STRING)
1631 /* ensure NULL-termination */
1632 if (size > key_field->size - 1)
1633 size = key_field->size - 1;
1636 memcpy(compound_key + key_field->offset, key, size);
1639 static void event_hist_trigger(struct event_trigger_data *data, void *rec,
1640 struct ring_buffer_event *rbe)
1642 struct hist_trigger_data *hist_data = data->private_data;
1643 bool use_compound_key = (hist_data->n_keys > 1);
1644 unsigned long entries[HIST_STACKTRACE_DEPTH];
1645 char compound_key[HIST_KEY_SIZE_MAX];
1646 struct tracing_map_elt *elt = NULL;
1647 struct stack_trace stacktrace;
1648 struct hist_field *key_field;
1653 memset(compound_key, 0, hist_data->key_size);
1655 for_each_hist_key_field(i, hist_data) {
1656 key_field = hist_data->fields[i];
1658 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
1659 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
1660 stacktrace.entries = entries;
1661 stacktrace.nr_entries = 0;
1662 stacktrace.skip = HIST_STACKTRACE_SKIP;
1664 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
1665 save_stack_trace(&stacktrace);
1669 field_contents = key_field->fn(key_field, elt, rbe, rec);
1670 if (key_field->flags & HIST_FIELD_FL_STRING) {
1671 key = (void *)(unsigned long)field_contents;
1672 use_compound_key = true;
1674 key = (void *)&field_contents;
1677 if (use_compound_key)
1678 add_to_key(compound_key, key, key_field, rec);
1681 if (use_compound_key)
1684 elt = tracing_map_insert(hist_data->map, key);
1686 hist_trigger_elt_update(hist_data, elt, rec, rbe);
1689 static void hist_trigger_stacktrace_print(struct seq_file *m,
1690 unsigned long *stacktrace_entries,
1691 unsigned int max_entries)
1693 char str[KSYM_SYMBOL_LEN];
1694 unsigned int spaces = 8;
1697 for (i = 0; i < max_entries; i++) {
1698 if (stacktrace_entries[i] == ULONG_MAX)
1701 seq_printf(m, "%*c", 1 + spaces, ' ');
1702 sprint_symbol(str, stacktrace_entries[i]);
1703 seq_printf(m, "%s\n", str);
1708 hist_trigger_entry_print(struct seq_file *m,
1709 struct hist_trigger_data *hist_data, void *key,
1710 struct tracing_map_elt *elt)
1712 struct hist_field *key_field;
1713 char str[KSYM_SYMBOL_LEN];
1714 bool multiline = false;
1715 const char *field_name;
1721 for_each_hist_key_field(i, hist_data) {
1722 key_field = hist_data->fields[i];
1724 if (i > hist_data->n_vals)
1727 field_name = hist_field_name(key_field, 0);
1729 if (key_field->flags & HIST_FIELD_FL_HEX) {
1730 uval = *(u64 *)(key + key_field->offset);
1731 seq_printf(m, "%s: %llx", field_name, uval);
1732 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
1733 uval = *(u64 *)(key + key_field->offset);
1734 sprint_symbol_no_offset(str, uval);
1735 seq_printf(m, "%s: [%llx] %-45s", field_name,
1737 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
1738 uval = *(u64 *)(key + key_field->offset);
1739 sprint_symbol(str, uval);
1740 seq_printf(m, "%s: [%llx] %-55s", field_name,
1742 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
1743 struct hist_elt_data *elt_data = elt->private_data;
1746 if (WARN_ON_ONCE(!elt_data))
1749 comm = elt_data->comm;
1751 uval = *(u64 *)(key + key_field->offset);
1752 seq_printf(m, "%s: %-16s[%10llu]", field_name,
1754 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
1755 const char *syscall_name;
1757 uval = *(u64 *)(key + key_field->offset);
1758 syscall_name = get_syscall_name(uval);
1760 syscall_name = "unknown_syscall";
1762 seq_printf(m, "%s: %-30s[%3llu]", field_name,
1763 syscall_name, uval);
1764 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
1765 seq_puts(m, "stacktrace:\n");
1766 hist_trigger_stacktrace_print(m,
1767 key + key_field->offset,
1768 HIST_STACKTRACE_DEPTH);
1770 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
1771 seq_printf(m, "%s: ~ 2^%-2llu", field_name,
1772 *(u64 *)(key + key_field->offset));
1773 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
1774 seq_printf(m, "%s: %-50s", field_name,
1775 (char *)(key + key_field->offset));
1777 uval = *(u64 *)(key + key_field->offset);
1778 seq_printf(m, "%s: %10llu", field_name, uval);
1787 seq_printf(m, " hitcount: %10llu",
1788 tracing_map_read_sum(elt, HITCOUNT_IDX));
1790 for (i = 1; i < hist_data->n_vals; i++) {
1791 field_name = hist_field_name(hist_data->fields[i], 0);
1793 if (hist_data->fields[i]->flags & HIST_FIELD_FL_VAR ||
1794 hist_data->fields[i]->flags & HIST_FIELD_FL_EXPR)
1797 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1798 seq_printf(m, " %s: %10llx", field_name,
1799 tracing_map_read_sum(elt, i));
1801 seq_printf(m, " %s: %10llu", field_name,
1802 tracing_map_read_sum(elt, i));
1809 static int print_entries(struct seq_file *m,
1810 struct hist_trigger_data *hist_data)
1812 struct tracing_map_sort_entry **sort_entries = NULL;
1813 struct tracing_map *map = hist_data->map;
1816 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1817 hist_data->n_sort_keys,
1822 for (i = 0; i < n_entries; i++)
1823 hist_trigger_entry_print(m, hist_data,
1824 sort_entries[i]->key,
1825 sort_entries[i]->elt);
1827 tracing_map_destroy_sort_entries(sort_entries, n_entries);
1832 static void hist_trigger_show(struct seq_file *m,
1833 struct event_trigger_data *data, int n)
1835 struct hist_trigger_data *hist_data;
1839 seq_puts(m, "\n\n");
1841 seq_puts(m, "# event histogram\n#\n# trigger info: ");
1842 data->ops->print(m, data->ops, data);
1843 seq_puts(m, "#\n\n");
1845 hist_data = data->private_data;
1846 n_entries = print_entries(m, hist_data);
1850 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1851 (u64)atomic64_read(&hist_data->map->hits),
1852 n_entries, (u64)atomic64_read(&hist_data->map->drops));
1855 static int hist_show(struct seq_file *m, void *v)
1857 struct event_trigger_data *data;
1858 struct trace_event_file *event_file;
1861 mutex_lock(&event_mutex);
1863 event_file = event_file_data(m->private);
1864 if (unlikely(!event_file)) {
1869 list_for_each_entry_rcu(data, &event_file->triggers, list) {
1870 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1871 hist_trigger_show(m, data, n++);
1875 mutex_unlock(&event_mutex);
1880 static int event_hist_open(struct inode *inode, struct file *file)
1882 return single_open(file, hist_show, file);
1885 const struct file_operations event_hist_fops = {
1886 .open = event_hist_open,
1888 .llseek = seq_lseek,
1889 .release = single_release,
1892 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1894 const char *field_name = hist_field_name(hist_field, 0);
1896 if (hist_field->var.name)
1897 seq_printf(m, "%s=", hist_field->var.name);
1899 if (hist_field->flags & HIST_FIELD_FL_TIMESTAMP)
1900 seq_puts(m, "common_timestamp");
1901 else if (field_name)
1902 seq_printf(m, "%s", field_name);
1904 if (hist_field->flags) {
1905 const char *flags_str = get_hist_field_flags(hist_field);
1908 seq_printf(m, ".%s", flags_str);
1912 static int event_hist_trigger_print(struct seq_file *m,
1913 struct event_trigger_ops *ops,
1914 struct event_trigger_data *data)
1916 struct hist_trigger_data *hist_data = data->private_data;
1917 struct hist_field *field;
1918 bool have_var = false;
1921 seq_puts(m, "hist:");
1924 seq_printf(m, "%s:", data->name);
1926 seq_puts(m, "keys=");
1928 for_each_hist_key_field(i, hist_data) {
1929 field = hist_data->fields[i];
1931 if (i > hist_data->n_vals)
1934 if (field->flags & HIST_FIELD_FL_STACKTRACE)
1935 seq_puts(m, "stacktrace");
1937 hist_field_print(m, field);
1940 seq_puts(m, ":vals=");
1942 for_each_hist_val_field(i, hist_data) {
1943 field = hist_data->fields[i];
1944 if (field->flags & HIST_FIELD_FL_VAR) {
1949 if (i == HITCOUNT_IDX)
1950 seq_puts(m, "hitcount");
1953 hist_field_print(m, field);
1962 for_each_hist_val_field(i, hist_data) {
1963 field = hist_data->fields[i];
1965 if (field->flags & HIST_FIELD_FL_VAR) {
1968 hist_field_print(m, field);
1973 seq_puts(m, ":sort=");
1975 for (i = 0; i < hist_data->n_sort_keys; i++) {
1976 struct tracing_map_sort_key *sort_key;
1977 unsigned int idx, first_key_idx;
1980 first_key_idx = hist_data->n_vals - hist_data->n_vars;
1982 sort_key = &hist_data->sort_keys[i];
1983 idx = sort_key->field_idx;
1985 if (WARN_ON(idx >= HIST_FIELDS_MAX))
1991 if (idx == HITCOUNT_IDX)
1992 seq_puts(m, "hitcount");
1994 if (idx >= first_key_idx)
1995 idx += hist_data->n_vars;
1996 hist_field_print(m, hist_data->fields[idx]);
1999 if (sort_key->descending)
2000 seq_puts(m, ".descending");
2002 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
2004 if (data->filter_str)
2005 seq_printf(m, " if %s", data->filter_str);
2008 seq_puts(m, " [paused]");
2010 seq_puts(m, " [active]");
2017 static int event_hist_trigger_init(struct event_trigger_ops *ops,
2018 struct event_trigger_data *data)
2020 struct hist_trigger_data *hist_data = data->private_data;
2022 if (!data->ref && hist_data->attrs->name)
2023 save_named_trigger(hist_data->attrs->name, data);
2030 static void event_hist_trigger_free(struct event_trigger_ops *ops,
2031 struct event_trigger_data *data)
2033 struct hist_trigger_data *hist_data = data->private_data;
2035 if (WARN_ON_ONCE(data->ref <= 0))
2041 del_named_trigger(data);
2042 trigger_data_free(data);
2043 destroy_hist_data(hist_data);
2047 static struct event_trigger_ops event_hist_trigger_ops = {
2048 .func = event_hist_trigger,
2049 .print = event_hist_trigger_print,
2050 .init = event_hist_trigger_init,
2051 .free = event_hist_trigger_free,
2054 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
2055 struct event_trigger_data *data)
2059 save_named_trigger(data->named_data->name, data);
2061 event_hist_trigger_init(ops, data->named_data);
2066 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
2067 struct event_trigger_data *data)
2069 if (WARN_ON_ONCE(data->ref <= 0))
2072 event_hist_trigger_free(ops, data->named_data);
2076 del_named_trigger(data);
2077 trigger_data_free(data);
2081 static struct event_trigger_ops event_hist_trigger_named_ops = {
2082 .func = event_hist_trigger,
2083 .print = event_hist_trigger_print,
2084 .init = event_hist_trigger_named_init,
2085 .free = event_hist_trigger_named_free,
2088 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
2091 return &event_hist_trigger_ops;
2094 static void hist_clear(struct event_trigger_data *data)
2096 struct hist_trigger_data *hist_data = data->private_data;
2099 pause_named_trigger(data);
2101 synchronize_sched();
2103 tracing_map_clear(hist_data->map);
2106 unpause_named_trigger(data);
2109 static bool compatible_field(struct ftrace_event_field *field,
2110 struct ftrace_event_field *test_field)
2112 if (field == test_field)
2114 if (field == NULL || test_field == NULL)
2116 if (strcmp(field->name, test_field->name) != 0)
2118 if (strcmp(field->type, test_field->type) != 0)
2120 if (field->size != test_field->size)
2122 if (field->is_signed != test_field->is_signed)
2128 static bool hist_trigger_match(struct event_trigger_data *data,
2129 struct event_trigger_data *data_test,
2130 struct event_trigger_data *named_data,
2133 struct tracing_map_sort_key *sort_key, *sort_key_test;
2134 struct hist_trigger_data *hist_data, *hist_data_test;
2135 struct hist_field *key_field, *key_field_test;
2138 if (named_data && (named_data != data_test) &&
2139 (named_data != data_test->named_data))
2142 if (!named_data && is_named_trigger(data_test))
2145 hist_data = data->private_data;
2146 hist_data_test = data_test->private_data;
2148 if (hist_data->n_vals != hist_data_test->n_vals ||
2149 hist_data->n_fields != hist_data_test->n_fields ||
2150 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
2153 if (!ignore_filter) {
2154 if ((data->filter_str && !data_test->filter_str) ||
2155 (!data->filter_str && data_test->filter_str))
2159 for_each_hist_field(i, hist_data) {
2160 key_field = hist_data->fields[i];
2161 key_field_test = hist_data_test->fields[i];
2163 if (key_field->flags != key_field_test->flags)
2165 if (!compatible_field(key_field->field, key_field_test->field))
2167 if (key_field->offset != key_field_test->offset)
2169 if (key_field->size != key_field_test->size)
2171 if (key_field->is_signed != key_field_test->is_signed)
2173 if (!!key_field->var.name != !!key_field_test->var.name)
2175 if (key_field->var.name &&
2176 strcmp(key_field->var.name, key_field_test->var.name) != 0)
2180 for (i = 0; i < hist_data->n_sort_keys; i++) {
2181 sort_key = &hist_data->sort_keys[i];
2182 sort_key_test = &hist_data_test->sort_keys[i];
2184 if (sort_key->field_idx != sort_key_test->field_idx ||
2185 sort_key->descending != sort_key_test->descending)
2189 if (!ignore_filter && data->filter_str &&
2190 (strcmp(data->filter_str, data_test->filter_str) != 0))
2196 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
2197 struct event_trigger_data *data,
2198 struct trace_event_file *file)
2200 struct hist_trigger_data *hist_data = data->private_data;
2201 struct event_trigger_data *test, *named_data = NULL;
2204 if (hist_data->attrs->name) {
2205 named_data = find_named_trigger(hist_data->attrs->name);
2207 if (!hist_trigger_match(data, named_data, named_data,
2215 if (hist_data->attrs->name && !named_data)
2218 list_for_each_entry_rcu(test, &file->triggers, list) {
2219 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2220 if (!hist_trigger_match(data, test, named_data, false))
2222 if (hist_data->attrs->pause)
2223 test->paused = true;
2224 else if (hist_data->attrs->cont)
2225 test->paused = false;
2226 else if (hist_data->attrs->clear)
2234 if (hist_data->attrs->cont || hist_data->attrs->clear) {
2239 if (hist_data->attrs->pause)
2240 data->paused = true;
2243 destroy_hist_data(data->private_data);
2244 data->private_data = named_data->private_data;
2245 set_named_trigger_data(data, named_data);
2246 data->ops = &event_hist_trigger_named_ops;
2249 if (data->ops->init) {
2250 ret = data->ops->init(data->ops, data);
2255 list_add_rcu(&data->list, &file->triggers);
2258 update_cond_flag(file);
2260 if (hist_data->enable_timestamps)
2261 tracing_set_time_stamp_abs(file->tr, true);
2263 if (trace_event_trigger_enable_disable(file, 1) < 0) {
2264 list_del_rcu(&data->list);
2265 update_cond_flag(file);
2272 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
2273 struct event_trigger_data *data,
2274 struct trace_event_file *file)
2276 struct hist_trigger_data *hist_data = data->private_data;
2277 struct event_trigger_data *test, *named_data = NULL;
2278 bool unregistered = false;
2280 if (hist_data->attrs->name)
2281 named_data = find_named_trigger(hist_data->attrs->name);
2283 list_for_each_entry_rcu(test, &file->triggers, list) {
2284 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2285 if (!hist_trigger_match(data, test, named_data, false))
2287 unregistered = true;
2288 list_del_rcu(&test->list);
2289 trace_event_trigger_enable_disable(file, 0);
2290 update_cond_flag(file);
2295 if (unregistered && test->ops->free)
2296 test->ops->free(test->ops, test);
2298 if (hist_data->enable_timestamps) {
2299 if (!hist_data->remove || unregistered)
2300 tracing_set_time_stamp_abs(file->tr, false);
2304 static void hist_unreg_all(struct trace_event_file *file)
2306 struct event_trigger_data *test, *n;
2307 struct hist_trigger_data *hist_data;
2309 list_for_each_entry_safe(test, n, &file->triggers, list) {
2310 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2311 hist_data = test->private_data;
2312 list_del_rcu(&test->list);
2313 trace_event_trigger_enable_disable(file, 0);
2314 update_cond_flag(file);
2315 if (hist_data->enable_timestamps)
2316 tracing_set_time_stamp_abs(file->tr, false);
2317 if (test->ops->free)
2318 test->ops->free(test->ops, test);
2323 static int event_hist_trigger_func(struct event_command *cmd_ops,
2324 struct trace_event_file *file,
2325 char *glob, char *cmd, char *param)
2327 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
2328 struct event_trigger_data *trigger_data;
2329 struct hist_trigger_attrs *attrs;
2330 struct event_trigger_ops *trigger_ops;
2331 struct hist_trigger_data *hist_data;
2332 bool remove = false;
2342 /* separate the trigger from the filter (k:v [if filter]) */
2343 trigger = strsep(¶m, " \t");
2347 attrs = parse_hist_trigger_attrs(trigger);
2349 return PTR_ERR(attrs);
2351 if (attrs->map_bits)
2352 hist_trigger_bits = attrs->map_bits;
2354 hist_data = create_hist_data(hist_trigger_bits, attrs, file, remove);
2355 if (IS_ERR(hist_data)) {
2356 destroy_hist_trigger_attrs(attrs);
2357 return PTR_ERR(hist_data);
2360 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
2363 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
2367 trigger_data->count = -1;
2368 trigger_data->ops = trigger_ops;
2369 trigger_data->cmd_ops = cmd_ops;
2371 INIT_LIST_HEAD(&trigger_data->list);
2372 RCU_INIT_POINTER(trigger_data->filter, NULL);
2374 trigger_data->private_data = hist_data;
2376 /* if param is non-empty, it's supposed to be a filter */
2377 if (param && cmd_ops->set_filter) {
2378 ret = cmd_ops->set_filter(param, trigger_data, file);
2384 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
2389 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
2391 * The above returns on success the # of triggers registered,
2392 * but if it didn't register any it returns zero. Consider no
2393 * triggers registered a failure too.
2396 if (!(attrs->pause || attrs->cont || attrs->clear))
2401 /* Just return zero, not the number of registered triggers */
2406 if (cmd_ops->set_filter)
2407 cmd_ops->set_filter(NULL, trigger_data, NULL);
2409 kfree(trigger_data);
2411 destroy_hist_data(hist_data);
2415 static struct event_command trigger_hist_cmd = {
2417 .trigger_type = ETT_EVENT_HIST,
2418 .flags = EVENT_CMD_FL_NEEDS_REC,
2419 .func = event_hist_trigger_func,
2420 .reg = hist_register_trigger,
2421 .unreg = hist_unregister_trigger,
2422 .unreg_all = hist_unreg_all,
2423 .get_trigger_ops = event_hist_get_trigger_ops,
2424 .set_filter = set_trigger_filter,
2427 __init int register_trigger_hist_cmd(void)
2431 ret = register_event_command(&trigger_hist_cmd);
2438 hist_enable_trigger(struct event_trigger_data *data, void *rec,
2439 struct ring_buffer_event *event)
2441 struct enable_trigger_data *enable_data = data->private_data;
2442 struct event_trigger_data *test;
2444 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
2445 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
2446 if (enable_data->enable)
2447 test->paused = false;
2449 test->paused = true;
2455 hist_enable_count_trigger(struct event_trigger_data *data, void *rec,
2456 struct ring_buffer_event *event)
2461 if (data->count != -1)
2464 hist_enable_trigger(data, rec, event);
2467 static struct event_trigger_ops hist_enable_trigger_ops = {
2468 .func = hist_enable_trigger,
2469 .print = event_enable_trigger_print,
2470 .init = event_trigger_init,
2471 .free = event_enable_trigger_free,
2474 static struct event_trigger_ops hist_enable_count_trigger_ops = {
2475 .func = hist_enable_count_trigger,
2476 .print = event_enable_trigger_print,
2477 .init = event_trigger_init,
2478 .free = event_enable_trigger_free,
2481 static struct event_trigger_ops hist_disable_trigger_ops = {
2482 .func = hist_enable_trigger,
2483 .print = event_enable_trigger_print,
2484 .init = event_trigger_init,
2485 .free = event_enable_trigger_free,
2488 static struct event_trigger_ops hist_disable_count_trigger_ops = {
2489 .func = hist_enable_count_trigger,
2490 .print = event_enable_trigger_print,
2491 .init = event_trigger_init,
2492 .free = event_enable_trigger_free,
2495 static struct event_trigger_ops *
2496 hist_enable_get_trigger_ops(char *cmd, char *param)
2498 struct event_trigger_ops *ops;
2501 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
2504 ops = param ? &hist_enable_count_trigger_ops :
2505 &hist_enable_trigger_ops;
2507 ops = param ? &hist_disable_count_trigger_ops :
2508 &hist_disable_trigger_ops;
2513 static void hist_enable_unreg_all(struct trace_event_file *file)
2515 struct event_trigger_data *test, *n;
2517 list_for_each_entry_safe(test, n, &file->triggers, list) {
2518 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
2519 list_del_rcu(&test->list);
2520 update_cond_flag(file);
2521 trace_event_trigger_enable_disable(file, 0);
2522 if (test->ops->free)
2523 test->ops->free(test->ops, test);
2528 static struct event_command trigger_hist_enable_cmd = {
2529 .name = ENABLE_HIST_STR,
2530 .trigger_type = ETT_HIST_ENABLE,
2531 .func = event_enable_trigger_func,
2532 .reg = event_enable_register_trigger,
2533 .unreg = event_enable_unregister_trigger,
2534 .unreg_all = hist_enable_unreg_all,
2535 .get_trigger_ops = hist_enable_get_trigger_ops,
2536 .set_filter = set_trigger_filter,
2539 static struct event_command trigger_hist_disable_cmd = {
2540 .name = DISABLE_HIST_STR,
2541 .trigger_type = ETT_HIST_ENABLE,
2542 .func = event_enable_trigger_func,
2543 .reg = event_enable_register_trigger,
2544 .unreg = event_enable_unregister_trigger,
2545 .unreg_all = hist_enable_unreg_all,
2546 .get_trigger_ops = hist_enable_get_trigger_ops,
2547 .set_filter = set_trigger_filter,
2550 static __init void unregister_trigger_hist_enable_disable_cmds(void)
2552 unregister_event_command(&trigger_hist_enable_cmd);
2553 unregister_event_command(&trigger_hist_disable_cmd);
2556 __init int register_trigger_hist_enable_disable_cmds(void)
2560 ret = register_event_command(&trigger_hist_enable_cmd);
2561 if (WARN_ON(ret < 0))
2563 ret = register_event_command(&trigger_hist_disable_cmd);
2564 if (WARN_ON(ret < 0))
2565 unregister_trigger_hist_enable_disable_cmds();