2 * kp_tab.c - ktap table data structure manipulation
4 * This file is part of ktap by Jovi Zhangwei.
6 * Copyright (C) 2012-2013 Jovi Zhangwei <jovi.zhangwei@gmail.com>.
8 * Copyright (C) 1994-2013 Lua.org, PUC-Rio.
9 * - The part of code in this file is copied from lua initially.
10 * - lua's MIT license is compatible with GPL.
12 * ktap is free software; you can redistribute it and/or modify it
13 * under the terms and conditions of the GNU General Public License,
14 * version 2, as published by the Free Software Foundation.
16 * ktap is distributed in the hope it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * You should have received a copy of the GNU General Public License along with
22 * this program; if not, write to the Free Software Foundation, Inc.,
23 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include "../include/ktap_types.h"
29 #include <linux/spinlock.h>
30 #include <linux/module.h>
31 #include <linux/kallsyms.h>
32 #include <linux/sort.h>
36 static inline void sort(void *base, size_t num, size_t size,
37 int (*cmp_func)(const void *, const void *),
38 void (*swap_func)(void *, void *, int size))
46 #define kp_tab_lock_init(t) \
48 (t)->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
50 #define kp_tab_lock(t) \
52 local_irq_save(flags); \
53 arch_spin_lock(&(t)->lock); \
55 #define kp_tab_unlock(t) \
57 arch_spin_unlock(&(t)->lock); \
58 local_irq_restore(flags); \
62 #define kp_tab_lock_init(t)
63 #define kp_tab_lock(t)
64 #define kp_tab_unlock(t)
68 #define MAXASIZE (1 << MAXBITS)
71 #define NILCONSTANT {NULL}, KTAP_TNIL
72 const struct ktap_value ktap_nilobjectv = {NILCONSTANT};
73 #define ktap_nilobject (&ktap_nilobjectv)
75 static const ktap_tnode dummynode_ = {
76 {NILCONSTANT}, /* value */
77 {NULL, {NILCONSTANT}}, /* key */
80 #define gnode(t,i) (&(t)->node[i])
81 #define gkey(n) (&(n)->i_key.tvk)
82 #define gval(n) (&(n)->i_val)
83 #define gnext(n) ((n)->i_key.next)
85 #define twoto(x) (1<<(x))
86 #define sizenode(t) (twoto((t)->lsizenode))
88 #define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
90 #define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
92 #define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
93 #define hashboolean(t,p) hashpow2(t, p)
94 #define hashnum(t, n) hashmod(t, (unsigned int)n)
95 #define hashpointer(t,p) hashmod(t, (unsigned long)(p))
97 #define dummynode (&dummynode_)
98 #define isdummy(n) ((n) == dummynode)
100 static void table_setint(ktap_state *ks, ktap_tab *t, int key, ktap_value *v);
101 static ktap_value *table_set(ktap_state *ks, ktap_tab *t,
102 const ktap_value *key);
103 static void setnodevector(ktap_state *ks, ktap_tab *t, int size);
105 static int ceillog2(unsigned int x)
107 static const u8 log_2[256] = {
108 0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,
109 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,
110 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
111 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,
112 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
113 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
114 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,
115 8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
121 while (x >= 256) { l += 8; x >>= 8; }
126 static inline ktap_stat_data *_read_sd(const ktap_value *v,
127 ktap_tnode *hnode, ktap_stat_data *hsd)
129 ktap_tnode *node = container_of(v, ktap_tnode, i_val);
130 return hsd + (node - hnode);
133 static inline ktap_stat_data *read_sd(ktap_tab *t, const ktap_value *v)
135 if (v >= &t->array[0] && v < &t->array[t->sizearray])
136 return &t->sd_arr[v - &t->array[0]];
138 return _read_sd(v, t->node, t->sd_rec);
142 static inline ktap_stat_data *_read_sd(const ktap_value *v,
143 ktap_tnode *hnode, ktap_stat_data *hsd)
148 static inline ktap_stat_data *read_sd(ktap_tab *t, const ktap_value *v)
155 ktap_tab *kp_tab_new(ktap_state *ks)
157 ktap_tab *t = &kp_newobject(ks, KTAP_TTABLE, sizeof(ktap_tab),
162 t->node = (ktap_tnode *)dummynode;
167 setnodevector(ks, t, 0);
176 static const ktap_value *table_getint(ktap_tab *t, int key)
180 if ((unsigned int)(key - 1) < (unsigned int)t->sizearray)
181 return &t->array[key - 1];
185 if (is_number(gkey(n)) && nvalue(gkey(n)) == key)
191 return ktap_nilobject;
194 const ktap_value *kp_tab_getint(ktap_tab *t, int key)
196 const ktap_value *val;
197 unsigned long __maybe_unused flags;
200 val = table_getint(t, key);
206 static ktap_tnode *mainposition (const ktap_tab *t, const ktap_value *key)
208 switch (ttype(key)) {
210 return hashnum(t, nvalue(key));
212 ktap_string *s = rawtsvalue(key);
213 if (s->tsv.extra == 0) { /* no hash? */
214 s->tsv.hash = kp_string_hash(getstr(s), s->tsv.len,
216 s->tsv.extra = 1; /* now it has its hash */
218 return hashstr(t, rawtsvalue(key));
221 return hashstr(t, rawtsvalue(key));
223 return hashboolean(t, bvalue(key));
224 case KTAP_TLIGHTUSERDATA:
225 return hashpointer(t, pvalue(key));
226 case KTAP_TCFUNCTION:
227 return hashpointer(t, fvalue(key));
229 /* use first entry as hash key, cannot use gcvalue as key */
230 unsigned long *entries = (unsigned long *)(btvalue(key) + 1);
231 return hashpointer(t, entries[0]);
234 return hashpointer(t, gcvalue(key));
238 static int arrayindex(const ktap_value *key)
240 if (is_number(key)) {
241 ktap_number n = nvalue(key);
243 if ((ktap_number)k == n)
247 /* `key' did not match some condition */
252 * returns the index of a `key' for table traversals. First goes all
253 * elements in the array part, then elements in the hash part. The
254 * beginning of a traversal is signaled by -1.
256 static int findindex(ktap_state *ks, ktap_tab *t, StkId key)
261 return -1; /* first iteration */
264 if (i > 0 && i <= t->sizearray) /* is `key' inside array part? */
265 return i - 1; /* yes; that's the index (corrected to C) */
267 ktap_tnode *n = mainposition(t, key);
268 for (;;) { /* check whether `key' is somewhere in the chain */
269 /* key may be dead already, but it is ok to use it in `next' */
270 if (kp_equalobjv(ks, gkey(n), key)) {
271 i = n - gnode(t, 0); /* key index in hash table */
272 /* hash elements are numbered after array ones */
273 return i + t->sizearray;
279 kp_error(ks, "invalid table key to next");
284 int kp_tab_next(ktap_state *ks, ktap_tab *t, StkId key)
286 unsigned long __maybe_unused flags;
291 i = findindex(ks, t, key); /* find original element */
293 for (i++; i < t->sizearray; i++) { /* try first array part */
294 if (!is_nil(&t->array[i])) { /* a non-nil value? */
295 set_number(key, i+1);
296 set_obj(key+1, &t->array[i]);
302 for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
303 if (!is_nil(gval(gnode(t, i)))) { /* a non-nil value? */
304 set_obj(key, gkey(gnode(t, i)));
305 set_obj(key+1, gval(gnode(t, i)));
312 return 0; /* no more elements */
316 int kp_tab_sort_next(ktap_state *ks, ktap_tab *t, StkId key)
318 unsigned long __maybe_unused flags;
319 ktap_tnode *node = t->sort_head;
324 /* first iteration */
325 set_obj(key, gkey(node));
326 set_obj(key + 1, gval(node));
331 while (node && !is_nil(gval(node))) {
332 if (kp_equalobjv(ks, gkey(node), key)) {
337 set_obj(key, gkey(node));
338 set_obj(key + 1, gval(node));
347 return 0; /* no more elements */
351 static int default_compare(ktap_state *ks, ktap_closure *cmp_func,
352 ktap_value *v1, ktap_value *v2)
354 return nvalue(v1) < nvalue(v2);
357 static int closure_compare(ktap_state *ks, ktap_closure *cmp_func,
358 ktap_value *v1, ktap_value *v2)
364 set_closure(ks->top++, cmp_func);
365 set_obj(ks->top++, v1);
366 set_obj(ks->top++, v2);
368 kp_call(ks, func, 1);
370 res = !is_false(ks->top - 1);
372 ks->top = func; /* restore ks->top */
377 static void insert_sorted_list(ktap_state *ks, ktap_tab *t,
378 ktap_closure *cmp_func,
379 ktap_value *key, ktap_value *val)
381 ktap_tnode *node = t->sort_head;
382 ktap_tnode *newnode, *prevnode = NULL;
383 int (*compare)(ktap_state *ks, ktap_closure *cmp_func,
384 ktap_value *v1, ktap_value *v2);
387 if (is_nil(gval(node))) {
394 compare = default_compare;
396 compare = closure_compare;
399 //if (nvalue(gval(node)) < nvalue(val)) {
400 if (compare(ks, cmp_func, gval(node), val)) {
408 /* find free position */
409 while (!is_nil(gval(&t->sorted[i]))) {
413 newnode = &t->sorted[i];
414 *gkey(newnode) = *key;
415 *gval(newnode) = *val;
416 gnext(newnode) = node;
418 gnext(prevnode) = newnode;
420 t->sort_head = newnode;
423 void kp_tab_sort(ktap_state *ks, ktap_tab *t, ktap_closure *cmp_func)
425 unsigned long __maybe_unused flags;
426 int size = t->sizearray + sizenode(t);
431 kp_realloc(ks, t->sorted, 0, size, ktap_tnode);
432 memset(t->sorted, 0, size * sizeof(ktap_tnode));
433 t->sort_head = t->sorted;
435 for (i = 0; i < t->sizearray; i++) {
436 ktap_value *v = &t->array[i];
440 set_number(&key, i + 1);
441 insert_sorted_list(ks, t, cmp_func, &key, v);
445 for (i = 0; i < sizenode(t); i++) {
446 ktap_tnode *node = &t->node[i];
448 if (is_nil(gkey(node)))
451 insert_sorted_list(ks, t, cmp_func, gkey(node), gval(node));
458 static int computesizes (int nums[], int *narray)
461 int twotoi; /* 2^i */
462 int a = 0; /* number of elements smaller than 2^i */
463 int na = 0; /* number of elements to go to array part */
464 int n = 0; /* optimal size for array part */
466 for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
469 /* more than half elements present? */
471 /* optimal size (till now) */
474 * all elements smaller than n will go to
481 break; /* all elements already counted */
488 static int countint(const ktap_value *key, int *nums)
490 int k = arrayindex(key);
492 /* is `key' an appropriate array index? */
493 if (0 < k && k <= MAXASIZE) {
494 nums[ceillog2(k)]++; /* count as such */
501 static int numusearray(const ktap_tab *t, int *nums)
505 int ause = 0; /* summation of `nums' */
506 int i = 1; /* count to traverse all array keys */
509 for (lg=0, ttlg=1; lg <= MAXBITS; lg++, ttlg *= 2) {
510 int lc = 0; /* counter */
513 if (lim > t->sizearray) {
514 lim = t->sizearray; /* adjust upper limit */
516 break; /* no more elements to count */
519 /* count elements in range (2^(lg-1), 2^lg] */
520 for (; i <= lim; i++) {
521 if (!is_nil(&t->array[i-1]))
530 static int numusehash(const ktap_tab *t, int *nums, int *pnasize)
532 int totaluse = 0; /* total number of elements */
533 int ause = 0; /* summation of `nums' */
537 ktap_tnode *n = &t->node[i];
538 if (!is_nil(gval(n))) {
539 ause += countint(gkey(n), nums);
548 static void update_array_sd(ktap_tab *t)
552 for (i = 0; i < t->sizearray; i++) {
553 ktap_value *v = &t->array[i];
558 set_statdata(v, &t->sd_arr[i]);
562 static void setarrayvector(ktap_state *ks, ktap_tab *t, int size)
566 kp_realloc(ks, t->array, t->sizearray, size, ktap_value);
568 kp_realloc(ks, t->sd_arr, t->sizearray, size,
573 for (i = t->sizearray; i < size; i++)
574 set_nil(&t->array[i]);
579 static void setnodevector(ktap_state *ks, ktap_tab *t, int size)
583 if (size == 0) { /* no elements to hash part? */
584 t->node = (ktap_tnode *)dummynode; /* use common `dummynode' */
588 lsize = ceillog2(size);
589 if (lsize > MAXBITS) {
590 kp_error(ks, "table overflow\n");
595 t->node = kp_malloc(ks, size * sizeof(ktap_tnode));
597 t->sd_rec = kp_malloc(ks, size *
598 sizeof(ktap_stat_data));
599 for (i = 0; i < size; i++) {
600 ktap_tnode *n = gnode(t, i);
607 t->lsizenode = (u8)lsize;
608 t->lastfree = gnode(t, size); /* all positions are free */
611 static void table_resize(ktap_state *ks, ktap_tab *t, int nasize, int nhsize)
613 int oldasize = t->sizearray;
614 int oldhsize = t->lsizenode;
615 ktap_tnode *nold = t->node; /* save old hash */
616 ktap_stat_data *sd_rec_old = t->sd_rec; /* save stat_data */
620 kp_verbose_printf(ks, "table resize, nasize: %d, nhsize: %d\n",
624 if (nasize > oldasize) /* array part must grow? */
625 setarrayvector(ks, t, nasize);
627 /* create new hash part with appropriate size */
628 setnodevector(ks, t, nhsize);
630 if (nasize < oldasize) { /* array part must shrink? */
631 t->sizearray = nasize;
632 /* re-insert elements from vanishing slice */
633 for (i = nasize; i < oldasize; i++) {
634 if (!is_nil(&t->array[i])) {
636 v = (ktap_value *)table_getint(t, i + 1);
637 set_obj(v, &t->array[i]);
640 *read_sd(t, v) = t->sd_arr[i];
641 set_statdata(v, read_sd(t, v));
647 kp_realloc(ks, t->array, oldasize, nasize, ktap_value);
649 kp_realloc(ks, t->sd_arr, oldasize, nasize,
655 /* re-insert elements from hash part */
656 for (i = twoto(oldhsize) - 1; i >= 0; i--) {
657 ktap_tnode *old = nold + i;
658 if (!is_nil(gval(old))) {
659 ktap_value *v = table_set(ks, t, gkey(old));
661 * doesn't need barrier/invalidate cache, as entry was
662 * already present in the table
664 set_obj(v, gval(old));
670 *sd = *_read_sd(gval(old), nold, sd_rec_old);
676 if (!isdummy(nold)) {
677 kp_free(ks, nold); /* free old array */
678 kp_free(ks, sd_rec_old);
682 void kp_tab_resize(ktap_state *ks, ktap_tab *t, int nasize, int nhsize)
684 unsigned long __maybe_unused flags;
687 table_resize(ks, t, nasize, nhsize);
691 void kp_tab_resizearray(ktap_state *ks, ktap_tab *t, int nasize)
693 unsigned long __maybe_unused flags;
698 nsize = isdummy(t->node) ? 0 : sizenode(t);
699 table_resize(ks, t, nasize, nsize);
704 static void rehash(ktap_state *ks, ktap_tab *t, const ktap_value *ek)
707 /* nums[i] = number of keys with 2^(i-1) < k <= 2^i */
712 for (i = 0; i <= MAXBITS; i++)
713 nums[i] = 0; /* reset counts */
715 nasize = numusearray(t, nums); /* count keys in array part */
716 totaluse = nasize; /* all those keys are integer keys */
717 totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
718 /* count extra key */
719 nasize += countint(ek, nums);
721 /* compute new size for array part */
722 na = computesizes(nums, &nasize);
723 /* resize the table to new computed sizes */
724 table_resize(ks, t, nasize, totaluse - na);
728 static ktap_tnode *getfreepos(ktap_tab *t)
730 while (t->lastfree > t->node) {
732 if (is_nil(gkey(t->lastfree)))
735 return NULL; /* could not find a free place */
739 static ktap_value *table_newkey(ktap_state *ks, ktap_tab *t,
740 const ktap_value *key)
745 mp = mainposition(t, key);
746 if (!is_nil(gval(mp)) || isdummy(mp)) { /* main position is taken? */
748 ktap_tnode *n = getfreepos(t); /* get a free place */
749 if (n == NULL) { /* cannot find a free place? */
750 rehash(ks, t, key); /* grow table */
751 /* insert key into grown table */
752 return table_set(ks, t, key);
755 othern = mainposition(t, gkey(mp));
757 /* is colliding node out of its main position? */
759 /* move colliding node into free position */
760 while (gnext(othern) != mp)
761 othern = gnext(othern); /* find previous */
763 /* redo the chain with `n' in place of `mp' */
766 /* copy colliding node into free pos */
770 ktap_stat_data *sd = read_sd(t, gval(n));
771 *sd = *read_sd(t, gval(mp));
772 set_statdata(gval(n), sd);
775 gnext(mp) = NULL; /* now `mp' is free */
778 /* colliding node is in its own main position */
780 /* new node will go into free position */
781 gnext(n) = gnext(mp); /* chain new position */
787 /* special handling for cloneable object, maily for btrace object */
788 if (is_needclone(key))
789 kp_objclone(ks, key, &newkey, &t->gclist);
793 set_obj(gkey(mp), &newkey);
799 * search function for short strings
801 static const ktap_value *table_getstr(ktap_tab *t, ktap_string *key)
803 ktap_tnode *n = hashstr(t, key);
805 do { /* check whether `key' is somewhere in the chain */
806 if (is_shrstring(gkey(n)) && eqshrstr(rawtsvalue(gkey(n)),
808 return gval(n); /* that's it */
813 return ktap_nilobject;
818 * main search function
820 static const ktap_value *table_get(ktap_tab *t, const ktap_value *key)
822 switch (ttype(key)) {
824 return ktap_nilobject;
826 return table_getstr(t, rawtsvalue(key));
828 ktap_number n = nvalue(key);
830 if ((ktap_number)k == nvalue(key)) /* index is int? */
831 return table_getint(t, k); /* use specialized version */
832 /* else go through */
835 ktap_tnode *n = mainposition(t, key);
836 do { /* check whether `key' is somewhere in the chain */
837 if (rawequalobj(gkey(n), key))
838 return gval(n); /* that's it */
843 return ktap_nilobject;
848 const ktap_value *kp_tab_get(ktap_tab *t, const ktap_value *key)
850 const ktap_value *val;
851 unsigned long __maybe_unused flags;
854 val = table_get(t, key);
860 static ktap_value *table_set(ktap_state *ks, ktap_tab *t,
861 const ktap_value *key)
863 const ktap_value *p = table_get(t, key);
865 if (p != ktap_nilobject)
866 return (ktap_value *)p;
868 return table_newkey(ks, t, key);
871 void kp_tab_setvalue(ktap_state *ks, ktap_tab *t,
872 const ktap_value *key, ktap_value *val)
874 unsigned long __maybe_unused flags;
877 kp_printf(ks, "table index is nil\n");
883 set_obj(table_set(ks, t, key), val);
887 static void table_setint(ktap_state *ks, ktap_tab *t, int key, ktap_value *v)
892 p = table_getint(t, key);
894 if (p != ktap_nilobject)
895 cell = (ktap_value *)p;
899 cell = table_newkey(ks, t, &k);
905 void kp_tab_setint(ktap_state *ks, ktap_tab *t, int key, ktap_value *val)
907 unsigned long __maybe_unused flags;
910 table_setint(ks, t, key, val);
914 void kp_tab_atomic_inc(ktap_state *ks, ktap_tab *t, ktap_value *key, int n)
916 unsigned long __maybe_unused flags;
920 kp_printf(ks, "table index is nil\n");
927 v = table_set(ks, t, key);
931 set_number(v, nvalue(v) + n);
936 int kp_tab_length(ktap_state *ks, ktap_tab *t)
938 unsigned long __maybe_unused flags;
943 for (i = 0; i < t->sizearray; i++) {
944 ktap_value *v = &t->array[i];
951 for (i = 0; i < sizenode(t); i++) {
952 ktap_tnode *n = &t->node[i];
964 void kp_tab_free(ktap_state *ks, ktap_tab *t)
966 if (t->sizearray > 0) {
967 kp_free(ks, t->array);
968 kp_free(ks, t->sd_arr);
971 if (!isdummy(t->node)) {
972 kp_free(ks, t->node);
973 kp_free(ks, t->sd_rec);
976 kp_free(ks, t->sorted);
977 kp_free_gclist(ks, t->gclist);
981 void kp_tab_dump(ktap_state *ks, ktap_tab *t)
985 for (i = 0; i < t->sizearray; i++) {
986 ktap_value *v = &t->array[i];
991 kp_printf(ks, "%d:\t", i + 1);
996 for (i = 0; i < sizenode(t); i++) {
997 ktap_tnode *n = &t->node[i];
1002 kp_showobj(ks, gkey(n));
1004 kp_showobj(ks, gval(n));
1010 * table-clear only set nil of all elements, not free t->array and nodes.
1011 * we assume user will reuse table soon after clear table, so reserve array
1012 * and nodes will avoid memory allocation when insert key-value again.
1014 void kp_tab_clear(ktap_state *ks, ktap_tab *t)
1016 unsigned long __maybe_unused flags;
1020 memset(t->array, 0, t->sizearray * sizeof(ktap_value));
1021 memset(t->node, 0, sizenode(t) * sizeof(ktap_tnode));
1027 static void string_convert(char *output, const char *input)
1029 if (strlen(input) > 32) {
1030 strncpy(output, input, 32-4);
1031 memset(output + 32-4, '.', 3);
1033 memcpy(output, input, strlen(input));
1036 struct table_hist_record {
1041 static int hist_record_cmp(const void *r1, const void *r2)
1043 const struct table_hist_record *i = r1;
1044 const struct table_hist_record *j = r2;
1046 if ((nvalue(&i->val) == nvalue(&j->val))) {
1048 } else if ((nvalue(&i->val) < nvalue(&j->val))) {
1054 /* todo: make histdump to be faster */
1056 /* histogram: key should be number or string, value must be number */
1057 static void table_histdump(ktap_state *ks, ktap_tab *t, int shownums)
1059 struct table_hist_record *thr;
1060 unsigned long __maybe_unused flags;
1062 int i, ratio, total = 0, count = 0, top_num, is_kernel_address = 0;
1065 size = sizeof(*thr) * (t->sizearray + sizenode(t));
1066 thr = kp_malloc(ks, size);
1068 kp_error(ks, "Cannot allocate %d of histogram memory", size);
1074 for (i = 0; i < t->sizearray; i++) {
1075 ktap_value *v = &t->array[i];
1082 else if (is_statdata(v))
1083 num = sdvalue(v)->count;
1089 set_number(&thr[count].key, i + 1);
1090 set_number(&thr[count].val, num);
1095 for (i = 0; i < sizenode(t); i++) {
1096 ktap_tnode *n = &t->node[i];
1097 ktap_value *v = gval(n);
1099 if (is_nil(gkey(n)))
1104 else if (is_statdata(v))
1105 num = sdvalue(v)->count;
1111 set_obj(&thr[count].key, gkey(n));
1112 set_number(&thr[count].val, num);
1119 sort(thr, count, sizeof(struct table_hist_record),
1120 hist_record_cmp, NULL);
1122 dist_str[sizeof(dist_str) - 1] = '\0';
1124 /* check the first key is a kernel text symbol or not */
1125 if (is_number(&thr[0].key)) {
1126 char str[KSYM_SYMBOL_LEN];
1128 SPRINT_SYMBOL(str, nvalue(&thr[0].key));
1129 if (str[0] != '0' || str[1] != 'x')
1130 is_kernel_address = 1;
1133 top_num = min(shownums, count);
1134 for (i = 0; i < top_num; i++) {
1135 ktap_value *key = &thr[i].key;
1136 ktap_value *val = &thr[i].val;
1138 memset(dist_str, ' ', sizeof(dist_str) - 1);
1139 ratio = (nvalue(val) * (sizeof(dist_str) - 1)) / total;
1140 memset(dist_str, '@', ratio);
1142 if (is_string(key)) {
1143 char buf[32 + 1] = {0};
1145 string_convert(buf, svalue(key));
1146 kp_printf(ks, "%32s |%s%-7d\n", buf, dist_str,
1148 } else if (is_number(key)) {
1149 char str[KSYM_SYMBOL_LEN];
1150 char buf[32 + 1] = {0};
1152 if (is_kernel_address) {
1153 /* suppose it's a symbol, fix it in future */
1154 SPRINT_SYMBOL(str, nvalue(key));
1155 string_convert(buf, str);
1156 kp_printf(ks, "%32s |%s%-7d\n", buf, dist_str,
1159 kp_printf(ks, "%32d |%s%-7d\n", nvalue(key),
1160 dist_str, nvalue(val));
1165 if (count > shownums)
1166 kp_printf(ks, "%32s |\n", "...");
1171 kp_puts(ks, "error: table histogram only handle "
1172 " (key: string/number val: number)\n");
1177 #define HISTOGRAM_DEFAULT_TOP_NUM 20
1179 #define DISTRIBUTION_STR "------------- Distribution -------------"
1180 void kp_tab_histogram(ktap_state *ks, ktap_tab *t)
1182 kp_printf(ks, "%32s%s%s\n", "value ", DISTRIBUTION_STR, " count");
1183 table_histdump(ks, t, HISTOGRAM_DEFAULT_TOP_NUM);
1190 void kp_statdata_dump(ktap_state *ks, ktap_stat_data *sd)
1192 kp_printf(ks, "[count: %6d sum: %6d max: %6d min: %6d avg: %6d]",
1193 sd->count, sd->sum, sd->max, sd->min, sd->sum/sd->count);
1196 static void statdata_add(ktap_stat_data *sd1, ktap_stat_data *sd2)
1198 sd2->count += sd1->count;
1199 sd2->sum += sd1->sum;
1200 if (sd1->max > sd2->max)
1201 sd2->max = sd1->max;
1202 if (sd1->min < sd2->min)
1203 sd2->min = sd1->min;
1206 static void merge_table(ktap_state *ks, ktap_tab *t1, ktap_tab *t2)
1208 unsigned long __maybe_unused flags;
1216 for (i = 0; i < t1->sizearray; i++) {
1217 ktap_value *v = &t1->array[i];
1225 newv = table_set(ks, t2, &n);
1226 sd = read_sd(t2, newv);
1228 *sd = *read_sd(t1, v);
1229 set_statdata(newv, sd);
1231 statdata_add(read_sd(t1, v), sd);
1234 for (i = 0; i < sizenode(t1); i++) {
1235 ktap_tnode *node = &t1->node[i];
1237 if (is_nil(gkey(node)))
1240 newv = table_set(ks, t2, gkey(node));
1242 *read_sd(t2, newv) = *read_sd(t1, gval(node));
1243 set_statdata(newv, read_sd(t2, newv));
1245 statdata_add(read_sd(t1, gval(node)),
1253 ktap_tab *kp_ptab_synthesis(ktap_state *ks, ktap_ptab *ph)
1260 /* clear the table content before store new elements */
1261 kp_tab_clear(ks, agg);
1263 for_each_possible_cpu(cpu) {
1264 ktap_tab **t = per_cpu_ptr(ph->tbl, cpu);
1265 merge_table(ks, *t, agg);
1271 void kp_ptab_dump(ktap_state *ks, ktap_ptab *ph)
1273 kp_tab_dump(ks, kp_ptab_synthesis(ks, ph));
1276 ktap_ptab *kp_ptab_new(ktap_state *ks)
1281 ph = &kp_newobject(ks, KTAP_TPTABLE, sizeof(ktap_ptab),
1283 ph->tbl = alloc_percpu(ktap_tab *);
1285 for_each_possible_cpu(cpu) {
1286 ktap_tab **t = per_cpu_ptr(ph->tbl, cpu);
1287 *t = kp_tab_new(ks);
1289 (*t)->with_stats = 1;
1291 /* todo: make this value to be configuable, MAXENTRIES? */
1292 table_resize(ks, *t, 0, 2000);
1295 ph->agg = kp_tab_new(ks);
1296 ph->agg->with_stats = 1;
1297 table_resize(ks, ph->agg, 0, 2000);
1302 void kp_ptab_free(ktap_state *ks, ktap_ptab *ph)
1304 free_percpu(ph->tbl);
1308 void kp_ptab_set(ktap_state *ks, ktap_ptab *ph,
1309 ktap_value *key, ktap_value *val)
1311 ktap_tab *t = *__this_cpu_ptr(ph->tbl);
1312 unsigned long __maybe_unused flags;
1317 if (unlikely(!is_number(val))) {
1318 kp_error(ks, "add non number value to aggregation table\n");
1322 aggval = nvalue(val);
1326 v = table_set(ks, t, key);
1331 sd->sum = sd->min = sd->max = aggval;
1332 set_statdata(v, sd);
1339 if (aggval > sd->max)
1341 if (aggval < sd->min)
1347 void kp_ptab_get(ktap_state *ks, ktap_ptab *ph,
1348 ktap_value *key, ktap_value *val)
1350 unsigned long __maybe_unused flags;
1351 ktap_stat_data sd, *aggsd;
1352 const ktap_value *v;
1356 sd.count = sd.sum = sd.max = sd.min = -1;
1358 for_each_possible_cpu(cpu) {
1359 ktap_tab **t = per_cpu_ptr(ph->tbl, cpu);
1362 v = table_get(*t, key);
1368 if (sd.count == -1) {
1369 sd = *read_sd(*t, v);
1374 statdata_add(read_sd(*t, v), &sd);
1378 if (sd.count == -1) {
1383 kp_tab_lock(ph->agg);
1384 aggval = table_set(ks, ph->agg, key);
1385 aggsd = read_sd(ph->agg, aggval);
1387 set_statdata(aggval, aggsd);
1388 set_statdata(val, aggsd);
1389 kp_tab_unlock(ph->agg);
1392 void kp_ptab_histogram(ktap_state *ks, ktap_ptab *ph)
1394 kp_tab_histogram(ks, kp_ptab_synthesis(ks, ph));