1 ////////////////////////////////////////////////////////////////////////////////////
6 // This file is C source for SWAP.
9 // AUTHOR: L.Komkov, S.Dianov, A.Gerenkov, S.Andreev
10 // COMPANY NAME: Samsung Research Center in Moscow
11 // DEPT NAME: Advanced Software Group
12 // CREATED: 2008.02.15
14 // REVISION DATE: 2008.12.03
16 ////////////////////////////////////////////////////////////////////////////////////
18 #include <linux/types.h>
19 #include <linux/hash.h>
20 #include <linux/list.h>
21 #include <linux/unistd.h>
22 #include <linux/spinlock.h>
23 #include <linux/kernel.h>
24 #include <linux/time.h>
26 #include <dbi_kprobes_deps.h>
29 #include "handlers_core.h"
31 #include "sspt/sspt.h"
32 #include "sspt/sspt_debug.h"
33 #include "ks_def_handler.h"
34 #include "../us_manager/us_manager.h"
36 #define after_buffer ec_info.buffer_size
38 char *p_buffer = NULL;
39 inst_us_proc_t us_proc_info;
40 inst_dex_proc_t dex_proc_info;
43 unsigned int inst_pid = 0;
45 struct cond cond_list;
46 int paused = 0; /* a state after a stop condition (events are not collected) */
47 struct timeval last_attach_time = {0, 0};
49 static struct dbi_modules_handlers dbi_mh;
51 struct dbi_modules_handlers *get_dbi_modules_handlers(void)
55 EXPORT_SYMBOL_GPL(get_dbi_modules_handlers);
57 inline unsigned long find_dbi_jp_handler(unsigned long p_addr, struct dbi_modules_handlers_info *mhi)
61 /* Possibly we can find less expensive way */
62 for (i = 0; i < mhi->dbi_nr_handlers; i++) {
63 if (mhi->dbi_handlers[i].func_addr == p_addr) {
64 printk("Found jp_handler for %0lX address of %s module\n", p_addr, mhi->dbi_module->name);
65 return mhi->dbi_handlers[i].jp_handler_addr;
71 inline unsigned long find_dbi_rp_handler(unsigned long p_addr, struct dbi_modules_handlers_info *mhi)
75 /* Possibly we can find less expensive way */
76 for (i = 0; i < mhi->dbi_nr_handlers; i++) {
77 if (mhi->dbi_handlers[i].func_addr == p_addr) {
78 printk("Found rp_handler for %0lX address of %s module\n", p_addr, mhi->dbi_module->name);
79 return mhi->dbi_handlers[i].rp_handler_addr;
86 * Search of handler in global list of modules for defined probe
88 void dbi_find_and_set_handler_for_probe(unsigned long addr,
89 unsigned long *pre_entry,
90 unsigned long *jp_handler,
91 unsigned long *rp_handler)
93 unsigned long jp_handler_addr, rp_handler_addr;
94 struct dbi_modules_handlers_info *local_mhi;
95 unsigned long dbi_flags;
96 unsigned int local_module_refcount = 0;
98 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
99 list_for_each_entry_rcu(local_mhi, &dbi_mh.modules_handlers, dbi_list_head) {
100 printk("Searching handlers in %s module for %0lX address\n",
101 (local_mhi->dbi_module)->name, addr);
102 // XXX: absent code for pre_handlers because we suppose that they are not used
103 if ((jp_handler_addr = find_dbi_jp_handler(addr, local_mhi)) != 0) {
105 printk("Skipping jp_handler for %s module (address %0lX)\n",
106 (local_mhi->dbi_module)->name, addr);
108 local_module_refcount = module_refcount(local_mhi->dbi_module);
109 if (local_module_refcount == 0) {
110 if (!try_module_get(local_mhi->dbi_module))
111 printk("Error of try_module_get() for module %s\n",
112 (local_mhi->dbi_module)->name);
114 printk("Module %s in use now\n",
115 (local_mhi->dbi_module)->name);
118 *jp_handler = jp_handler_addr;
119 printk("Set jp_handler for %s module (address %0lX)\n",
120 (local_mhi->dbi_module)->name, addr);
124 if ((rp_handler_addr = find_dbi_rp_handler(addr, local_mhi)) != 0) {
126 printk("Skipping kretprobe_handler for %s module (address %0lX)\n",
127 (local_mhi->dbi_module)->name, addr);
129 local_module_refcount = module_refcount(local_mhi->dbi_module);
130 if (local_module_refcount == 0) {
131 if (!try_module_get(local_mhi->dbi_module))
132 printk("Error of try_module_get() for module %s\n",
133 (local_mhi->dbi_module)->name);
135 printk("Module %s in use now\n",
136 (local_mhi->dbi_module)->name);
139 *rp_handler = rp_handler_addr;
140 printk("Set rp_handler for %s module (address %0lX)\n",
141 (local_mhi->dbi_module)->name, addr);
146 // not found pre_handler - set default (always true for now since pre_handlers not used)
147 if (*pre_entry == 0) {
148 *pre_entry = (unsigned long)def_jprobe_event_pre_handler;
149 printk("Set default pre_handler (address %0lX)\n", addr);
152 // not found jp_handler - set default
153 if (*jp_handler == 0) {
154 *jp_handler = (unsigned long)def_jprobe_event_handler;
155 printk("Set default jp_handler (address %0lX)\n", addr);
158 // not found kretprobe_handler - set default
159 if (*rp_handler == 0) {
160 *rp_handler = (unsigned long)def_retprobe_event_handler;
161 printk("Set default rp_handler (address %0lX)\n", addr);
163 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
166 // XXX TODO: possible mess when start-register/unregister-stop operation
167 // so we should refuse register/unregister operation while we are in unsafe state
168 int dbi_register_handlers_module(struct dbi_modules_handlers_info *dbi_mhi)
170 unsigned long dbi_flags;
171 // struct dbi_modules_handlers_info *local_mhi;
173 int nr_handlers=dbi_mhi->dbi_nr_handlers;
175 for (i = 0; i < nr_handlers; ++i) {
176 dbi_mhi->dbi_handlers[i].func_addr = swap_ksyms(dbi_mhi->dbi_handlers[i].func_name);
177 printk("[0x%08lx]-%s\n", dbi_mhi->dbi_handlers[i].func_addr, dbi_mhi->dbi_handlers[i].func_name);
180 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
181 // local_mhi = container_of(&dbi_mhi->dbi_list_head, struct dbi_modules_handlers_info, dbi_list_head);
182 list_add_rcu(&dbi_mhi->dbi_list_head, &dbi_mh.modules_handlers);
183 printk("Added module %s (head is %p)\n", (dbi_mhi->dbi_module)->name, &dbi_mhi->dbi_list_head);
184 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
187 EXPORT_SYMBOL_GPL(dbi_register_handlers_module);
189 // XXX TODO: possible mess when start-register/unregister-stop operation
190 // so we should refuse register/unregister operation while we are in unsafe state
191 int dbi_unregister_handlers_module(struct dbi_modules_handlers_info *dbi_mhi)
193 unsigned long dbi_flags;
194 // Next code block is for far future possible usage in case when removing will be implemented for unsafe state
195 // (i.e. between attach and stop)
197 struct hlist_node *node;
198 unsigned long jp_handler_addr, rp_handler_addr, pre_handler_addr;*/
200 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
201 list_del_rcu(&dbi_mhi->dbi_list_head);
202 // Next code block is for far future possible usage in case when removing will be implemented for unsafe state
203 // (i.e. between attach and stop)
204 /*swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
205 // XXX: absent code for pre_handlers because we suppose that they are not used
206 if ((p->jprobe.entry != ((kprobe_pre_entry_handler_t )def_jprobe_event_pre_handler)) ||
207 (p->retprobe.handler != ((kretprobe_handler_t )def_retprobe_event_handler))) {
208 printk("Searching handlers for %p address for removing in %s registered module...\n",
209 p->addr, (dbi_mhi->dbi_module)->name);
210 jp_handler_addr = find_dbi_jp_handler(p->addr, dbi_mhi);
211 rp_handler_addr = find_dbi_rp_handler(p->addr, dbi_mhi);
212 if ((jp_handler_addr != 0) || (rp_handler_addr != 0)) {
213 // search and set to another handlers or default
214 dbi_find_and_set_handler_for_probe(p);
215 printk("Removed handler(s) for %s module (address %p)\n",
216 (dbi_mhi->dbi_module)->name, p->addr);
220 printk("Removed module %s (head was %p)\n", (dbi_mhi->dbi_module)->name, &dbi_mhi->dbi_list_head);
221 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
224 EXPORT_SYMBOL_GPL(dbi_unregister_handlers_module);
226 static inst_us_proc_t empty_uprobes_info =
232 static inst_us_proc_t *get_uprobes(void)
234 unsigned long dbi_flags;
235 inst_us_proc_t *ret = &empty_uprobes_info;
236 struct dbi_modules_handlers_info *mhi;
237 struct list_head *head = &dbi_mh.modules_handlers;
239 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
240 list_for_each_entry_rcu(mhi, head, dbi_list_head) {
241 if (mhi->get_uprobes) {
242 ret = mhi->get_uprobes();
246 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
251 EXPORT_SYMBOL_GPL(us_proc_info);
252 EXPORT_SYMBOL_GPL(dex_proc_info);
253 typedef void *(*get_my_uprobes_info_t)(void);
254 #ifdef MEMORY_CHECKER
255 typedef int (*mec_post_event_pointer)(char *data, unsigned long len);
256 static mec_post_event_pointer mec_post_event = NULL;
259 static unsigned copy_into_cyclic_buffer (char *buffer, unsigned dst_offset,
260 char *src, unsigned size)
262 memcpy(buffer + dst_offset, src, size);
263 return dst_offset + size;
266 static int CheckBufferSize (unsigned int nSize)
268 if (nSize < EC_BUFFER_SIZE_MIN) {
269 EPRINTF("Too small buffer size! [Size=%u KB]", nSize / 1024);
272 if (nSize > EC_BUFFER_SIZE_MAX) {
273 EPRINTF("Too big buffer size! [Size=%u KB]", nSize / 1024);
279 static int AllocateSingleBuffer(unsigned int nSize)
281 unsigned long spinlock_flags = 0L;
283 p_buffer = vmalloc_user(nSize);
285 EPRINTF("Memory allocation error! [Size=%u KB]", nSize / 1024);
289 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
290 ec_info.buffer_effect = ec_info.buffer_size = nSize;
291 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
296 static void FreeSingleBuffer (void)
298 VFREE_USER(p_buffer, ec_info.buffer_size);
302 //////////////////////////////////////////////////////////////////////////////////////////////////
304 int EnableContinuousRetrieval(void)
306 unsigned long spinlock_flags = 0L;
308 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
309 ec_info.m_nMode |= MODEMASK_CONTINUOUS_RETRIEVAL;
310 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
315 int DisableContinuousRetrieval(void)
317 unsigned long spinlock_flags = 0L;
319 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
320 ec_info.m_nMode &= ~MODEMASK_CONTINUOUS_RETRIEVAL;
321 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
326 //////////////////////////////////////////////////////////////////////////////////////////////////
328 static int InitializeBuffer(unsigned int nSize) {
329 return AllocateSingleBuffer(nSize);
332 static int UninitializeBuffer(void) {
337 unsigned int GetBufferSize(void) { return ec_info.buffer_size; };
339 int SetBufferSize(unsigned int nSize) {
340 if (GetECState() != EC_STATE_IDLE) {
341 EPRINTF("Buffer changes are allowed in IDLE state only (%d)!", GetECState());
344 if(GetBufferSize() == nSize)
346 if(CheckBufferSize(nSize) == -1) {
347 EPRINTF("Invalid buffer size!");
350 unset_kernel_probes();
351 if(UninitializeBuffer() == -1)
352 EPRINTF("Cannot uninitialize buffer!");
353 if(InitializeBuffer(nSize) == -1) {
354 EPRINTF("Cannot initialize buffer! [Size=%u KB]", nSize / 1024);
360 int SetPid(unsigned int pid)
362 if (GetECState() != EC_STATE_IDLE)
364 EPRINTF("PID changes are allowed in IDLE state only (%d)!", GetECState());
369 DPRINTF("SetPid pid:%d\n", pid);
373 static void ResetSingleBuffer(void) {
376 int ResetBuffer(void) {
377 unsigned long spinlock_flags = 0L;
379 if (GetECState() != EC_STATE_IDLE) {
380 EPRINTF("Buffer changes are allowed in IDLE state only!");
386 unset_kernel_probes();
388 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
389 ec_info.buffer_effect = ec_info.buffer_size;
390 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
397 static int WriteEventIntoSingleBuffer(char* pEvent, unsigned long nEventSize) {
398 unsigned int unused_space;
401 EPRINTF("Invalid pointer to buffer!");
402 ++ec_info.lost_events_count;
405 if (ec_info.trace_size == 0 || ec_info.after_last > ec_info.first) {
406 unused_space = ec_info.buffer_size - ec_info.after_last;
407 if (unused_space > nEventSize) {
408 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
412 ec_info.saved_events_count++;
413 ec_info.buffer_effect = ec_info.buffer_size;
414 ec_info.trace_size = ec_info.after_last - ec_info.first;
416 if (ec_info.first > nEventSize) {
417 ec_info.buffer_effect = ec_info.after_last;
418 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
422 ec_info.saved_events_count++;
423 ec_info.trace_size = ec_info.buffer_effect
425 + ec_info.after_last;
427 // TODO: consider two variants!
429 ec_info.discarded_events_count++;
433 unused_space = ec_info.first - ec_info.after_last;
434 if (unused_space > nEventSize) {
435 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
439 ec_info.saved_events_count++;
440 ec_info.trace_size = ec_info.buffer_effect
442 + ec_info.after_last;
445 ec_info.discarded_events_count++;
451 static int WriteEventIntoBuffer(char* pEvent, unsigned long nEventSize) {
454 for(i = 0; i < nEventSize; i++)
455 printk("%02X ", pEvent[i]);
458 return WriteEventIntoSingleBuffer(pEvent, nEventSize);
461 //////////////////////////////////////////////////////////////////////////////////////////////////
463 int set_event_mask (int new_mask)
465 unsigned long spinlock_flags = 0L;
466 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
467 event_mask = new_mask;
468 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
473 get_event_mask (int *mask)
480 generic_swap (void *a, void *b, int size)
485 *(char *) a++ = *(char *) b;
487 } while (--size > 0);
490 static void sort (void *base, size_t num, size_t size, int (*cmp) (const void *, const void *), void (*fswap) (void *, void *, int size))
492 /* pre-scale counters for performance */
493 int i = (num / 2) * size, n = num * size, c, r;
496 for (; i >= 0; i -= size)
498 for (r = i; r * 2 < n; r = c)
501 if (c < n - size && cmp (base + c, base + c + size) < 0)
503 if (cmp (base + r, base + c) >= 0)
505 fswap (base + r, base + c, size);
510 for (i = n - size; i >= 0; i -= size)
512 fswap (base, base + i, size);
513 for (r = 0; r * 2 < i; r = c)
516 if (c < i - size && cmp (base + c, base + c + size) < 0)
518 if (cmp (base + r, base + c) >= 0)
520 fswap (base + r, base + c, size);
525 static int addr_cmp (const void *a, const void *b)
527 return *(unsigned long *) a > *(unsigned long *) b ? -1 : 1;
530 static char *find_lib_path(const char *lib_name)
532 char *p = deps + sizeof(size_t);
537 DPRINTF("p is at %s", p);
539 match = strstr(p, lib_name);
541 len = strlen(p) + 1; /* we are at path now */
545 DPRINTF("Found match: %s", match);
553 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)
554 #define list_for_each_rcu(pos, head) __list_for_each_rcu(pos, head)
557 void unlink_bundle(void)
560 us_proc_lib_t *d_lib;
562 struct list_head *pos; //, *tmp;
564 path = us_proc_info.path;
565 us_proc_info.path = NULL;
567 // first make sure "d_lib" is not used any more and only
568 // then release storage
569 if (us_proc_info.p_libs)
571 int count1 = us_proc_info.libs_count;
572 us_proc_info.libs_count = 0;
573 for (i = 0; i < count1; i++)
575 d_lib = &us_proc_info.p_libs[i];
578 // first make sure "d_lib->p_ips" is not used any more and only
579 // then release storage
580 //int count2 = d_lib->ips_count;
581 d_lib->ips_count = 0;
582 /*for (k = 0; k < count2; k++)
583 kfree ((void *) d_lib->p_ips[k].name);*/
584 vfree ((void *) d_lib->p_ips);
588 // first make sure "d_lib->p_vtps" is not used any more and only
589 // then release storage
590 int count2 = d_lib->vtps_count;
591 d_lib->vtps_count = 0;
592 for (k = 0; k < count2; k++)
594 //list_for_each_safe_rcu(pos, tmp, &d_lib->p_vtps[k].list) {
595 list_for_each (pos, &d_lib->p_vtps[k].list)
597 us_proc_vtp_data_t *vtp = list_entry (pos, us_proc_vtp_data_t, list);
603 kfree ((void *) d_lib->p_vtps);
605 d_lib->plt_count = 0;
606 kfree((void*) d_lib->p_plt);
607 us_proc_info.is_plt = 0;
609 kfree ((void *) us_proc_info.p_libs);
610 us_proc_info.p_libs = NULL;
614 /* kfree ((void *) path); */
615 /* //putname(path); */
618 us_proc_info.tgid = 0;
621 extern struct dentry *dentry_by_path(const char *path);
623 int link_bundle(void)
625 inst_us_proc_t *my_uprobes_info = get_uprobes();
626 char *p = bundle; /* read pointer for bundle */
630 us_proc_lib_t *d_lib, *pd_lib;
631 ioctl_usr_space_lib_t s_lib;
632 ioctl_usr_space_vtp_t *s_vtp;
637 struct cond *c, *c_tmp, *p_cond;
642 DPRINTF("Going to release us_proc_info");
643 if (us_proc_info.path)
646 /* Skip size - it has been used before */
647 p += sizeof(u_int32_t);
650 if (SetECMode(*(u_int32_t *)p) == -1)
652 EPRINTF("Cannot set mode!\n");
656 p += sizeof(u_int32_t);
659 if (SetBufferSize(*(u_int32_t *)p) == -1)
661 EPRINTF("Cannot set buffer size!\n");
665 p += sizeof(u_int32_t);
668 if (SetPid(*(u_int32_t *)p) == -1)
670 EPRINTF("Cannot set pid!\n");
674 p += sizeof(u_int32_t);
677 nr_kern_probes = *(u_int32_t *)p;
678 p += sizeof(u_int32_t);
679 for (i = 0; i < nr_kern_probes; i++)
681 unsigned long addr = *(u_int32_t *)p;
682 unsigned long pre_handler = 0, jp_handler = 0, rp_handler = 0;
684 dbi_find_and_set_handler_for_probe(addr, &pre_handler, &jp_handler, &rp_handler);
686 if (add_probe(addr, pre_handler, jp_handler, rp_handler)) {
687 EPRINTF("Cannot add kernel probe at 0x%x!\n", addr);
690 p += sizeof(u_int32_t);
694 len = *(u_int32_t *)p; /* App path len */
695 p += sizeof(u_int32_t);
697 us_proc_info.is_plt = 0;
700 us_proc_info.path = NULL;
707 us_proc_info.path = (char *)p;
708 DPRINTF("app path = %s", us_proc_info.path);
711 if (strcmp(us_proc_info.path, "*")) {
712 us_proc_info.m_f_dentry = dentry_by_path(us_proc_info.path);
713 if (us_proc_info.m_f_dentry == NULL) {
714 update_errno_buffer(us_proc_info.path, IS_APP);
720 us_proc_info.m_f_dentry = NULL;
723 us_proc_info.libs_count = *(u_int32_t *)p;
724 DPRINTF("nr of libs = %d", us_proc_info.libs_count);
725 p += sizeof(u_int32_t);
726 us_proc_info.p_libs =
727 kmalloc(us_proc_info.libs_count * sizeof(us_proc_lib_t), GFP_KERNEL);
729 if (!us_proc_info.p_libs)
731 EPRINTF("Cannot alloc p_libs!");
734 memset(us_proc_info.p_libs, 0,
735 us_proc_info.libs_count * sizeof(us_proc_lib_t));
737 for (i = 0; i < us_proc_info.libs_count; i++)
739 int abs_handler_idx = 0;
741 d_lib = &us_proc_info.p_libs[i];
743 lib_name_len = *(u_int32_t *)p;
744 p += sizeof(u_int32_t);
745 d_lib->path = (char *)p;
746 DPRINTF("d_lib->path = %s", d_lib->path);
750 lib_name_len = *(u_int32_t *)p;
751 p += sizeof(u_int32_t);
752 d_lib->path_dyn = (char *)p;
753 DPRINTF("d_lib->path_dyn = %s", d_lib->path_dyn);
757 d_lib->ips_count = *(u_int32_t *)p;
758 DPRINTF("d_lib->ips_count = %d", d_lib->ips_count);
759 p += sizeof(u_int32_t);
761 /* If there are any probes for "*" app we have to drop them */
762 if (strcmp(d_lib->path, "*") == 0)
764 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
765 d_lib->ips_count = 0;
766 d_lib->plt_count = *(u_int32_t*)p;
767 p += sizeof(u_int32_t);
768 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
769 d_lib->plt_count = 0;
773 if (strcmp(us_proc_info.path, d_lib->path) == 0)
778 DPRINTF("Searching path for lib %s", d_lib->path);
779 d_lib->path = find_lib_path(d_lib->path);
782 if (strcmp(d_lib->path_dyn, "") == 0) {
783 EPRINTF("Cannot find path for lib %s!", d_lib->path);
784 if (update_errno_buffer(d_lib->path, IS_LIB) == -1) {
787 /* Just skip all the IPs and go to next lib */
788 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
789 d_lib->ips_count = 0;
790 d_lib->plt_count = *(u_int32_t*)p;
791 p += sizeof(u_int32_t);
792 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
793 d_lib->plt_count = 0;
797 d_lib->path = d_lib->path_dyn;
798 DPRINTF("Assign path for lib as %s (in suggestion of dyn lib)", d_lib->path);
803 d_lib->m_f_dentry = dentry_by_path(d_lib->path);
804 if (d_lib->m_f_dentry == NULL) {
805 EPRINTF ("failed to lookup dentry for path %s!", d_lib->path);
806 if (update_errno_buffer(d_lib->path, IS_LIB) == -1) {
809 /* Just skip all the IPs and go to next lib */
810 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
811 d_lib->ips_count = 0;
812 d_lib->plt_count = *(u_int32_t*)p;
813 p += sizeof(u_int32_t);
814 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
815 d_lib->plt_count = 0;
820 ptr = strrchr(d_lib->path, '/');
826 for (l = 0; l < my_uprobes_info->libs_count; l++)
828 if ((strcmp(ptr, my_uprobes_info->p_libs[l].path) == 0) ||
829 (is_app && *(my_uprobes_info->p_libs[l].path) == '\0'))
831 pd_lib = &my_uprobes_info->p_libs[l];
834 abs_handler_idx += my_uprobes_info->p_libs[l].ips_count;
837 if (d_lib->ips_count > 0)
839 us_proc_info.unres_ips_count += d_lib->ips_count;
840 d_lib->p_ips = vmalloc(d_lib->ips_count * sizeof(us_proc_ip_t));
841 DPRINTF("d_lib[%i]->p_ips=%p/%u [%s]", i, d_lib->p_ips,
842 us_proc_info.unres_ips_count, d_lib->path);
846 EPRINTF("Cannot alloc p_ips!\n");
850 memset (d_lib->p_ips, 0, d_lib->ips_count * sizeof(us_proc_ip_t));
851 for (k = 0; k < d_lib->ips_count; k++)
853 d_ip = &d_lib->p_ips[k];
854 d_ip->offset = *(u_int32_t *)p;
855 p += sizeof(u_int32_t);
856 p += sizeof(u_int32_t); /* Skip inst type */
857 handler_index = *(u_int32_t *)p;
858 p += sizeof(u_int32_t);
862 DPRINTF("pd_lib->ips_count = 0x%x", pd_lib->ips_count);
863 if (handler_index != -1)
865 DPRINTF("found handler for 0x%x", d_ip->offset);
866 d_ip->jprobe.pre_entry =
867 pd_lib->p_ips[handler_index - abs_handler_idx].jprobe.pre_entry;
869 pd_lib->p_ips[handler_index - abs_handler_idx].jprobe.entry;
870 d_ip->retprobe.handler =
871 pd_lib->p_ips[handler_index - abs_handler_idx].retprobe.handler;
877 d_lib->plt_count = *(u_int32_t*)p;
878 p += sizeof(u_int32_t);
879 if (d_lib->plt_count > 0)
882 us_proc_info.is_plt = 1;
883 d_lib->p_plt = kmalloc(d_lib->plt_count * sizeof(us_proc_plt_t), GFP_KERNEL);
886 EPRINTF("Cannot alloc p_plt!");
889 memset(d_lib->p_plt, 0, d_lib->plt_count * sizeof(us_proc_plt_t));
890 for (j = 0; j < d_lib->plt_count; j++)
892 d_lib->p_plt[j].func_addr = *(u_int32_t*)p;
893 p += sizeof(u_int32_t);
894 d_lib->p_plt[j].got_addr = *(u_int32_t*)p;
895 p += sizeof(u_int32_t);
896 d_lib->p_plt[j].real_func_addr = 0;
902 lib_path_len = *(u_int32_t *)p;
903 DPRINTF("lib_path_len = %d", lib_path_len);
904 p += sizeof(u_int32_t);
906 DPRINTF("lib_path = %s", lib_path);
910 d_lib = &us_proc_info.p_libs[0];
911 s_lib.vtps_count = *(u_int32_t *)p;
912 DPRINTF("s_lib.vtps_count = %d", s_lib.vtps_count);
913 p += sizeof(u_int32_t);
914 if (s_lib.vtps_count > 0)
916 unsigned long ucount = 1, pre_addr;
917 unsigned long *addrs;
919 s_lib.p_vtps = kmalloc(s_lib.vtps_count
920 * sizeof(ioctl_usr_space_vtp_t), GFP_KERNEL);
927 for (i = 0; i < s_lib.vtps_count; i++)
929 int var_name_len = *(u_int32_t *)p;
930 p += sizeof(u_int32_t);
931 s_lib.p_vtps[i].name = p;
933 s_lib.p_vtps[i].addr = *(u_int32_t *)p;
934 p += sizeof(u_int32_t);
935 s_lib.p_vtps[i].type = *(u_int32_t *)p;
936 p += sizeof(u_int32_t);
937 s_lib.p_vtps[i].size = *(u_int32_t *)p;
938 p += sizeof(u_int32_t);
939 s_lib.p_vtps[i].reg = *(u_int32_t *)p;
940 p += sizeof(u_int32_t);
941 s_lib.p_vtps[i].off = *(u_int32_t *)p;
942 p += sizeof(u_int32_t);
945 // array containing elements like (addr, index)
946 addrs = kmalloc (s_lib.vtps_count * 2 * sizeof (unsigned long), GFP_KERNEL);
947 // DPRINTF ("addrs=%p/%u", addrs, s_lib.vtps_count);
950 //note: storage will released next time or at clean-up moment
953 memset (addrs, 0, s_lib.vtps_count * 2 * sizeof (unsigned long));
955 for (k = 0; k < s_lib.vtps_count; k++)
957 s_vtp = &s_lib.p_vtps[k];
958 addrs[2 * k] = s_vtp->addr;
959 addrs[2 * k + 1] = k;
961 // sort by VTP addresses, i.e. make VTPs with the same addresses adjacent;
962 // organize them into bundles
963 sort (addrs, s_lib.vtps_count, 2 * sizeof (unsigned long), addr_cmp, generic_swap);
965 // calc number of VTPs with unique addresses
966 for (k = 1, pre_addr = addrs[0]; k < s_lib.vtps_count; k++)
968 if (addrs[2 * k] != pre_addr)
969 ucount++; // count different only
970 pre_addr = addrs[2 * k];
972 us_proc_info.unres_vtps_count += ucount;
973 d_lib->vtps_count = ucount;
974 d_lib->p_vtps = kmalloc (ucount * sizeof (us_proc_vtp_t), GFP_KERNEL);
975 DPRINTF ("d_lib[%i]->p_vtps=%p/%lu", i, d_lib->p_vtps, ucount); //, d_lib->path);
978 //note: storage will released next time or at clean-up moment
982 memset (d_lib->p_vtps, 0, d_lib->vtps_count * sizeof (us_proc_vtp_t));
983 // go through sorted VTPS.
984 for (k = 0, j = 0, pre_addr = 0, mvtp = NULL; k < s_lib.vtps_count; k++)
986 us_proc_vtp_data_t *vtp_data;
988 s_vtp = &s_lib.p_vtps[addrs[2 * k + 1]];
989 // if this is the first VTP in bundle (master VTP)
990 if (addrs[2 * k] != pre_addr)
992 // data are in the array of master VTPs
993 mvtp = &d_lib->p_vtps[j++];
994 mvtp->addr = s_vtp->addr;
995 INIT_LIST_HEAD (&mvtp->list);
997 // data are in the list of slave VTPs
998 vtp_data = kmalloc (sizeof (us_proc_vtp_data_t), GFP_KERNEL);
1001 //note: storage will released next time or at clean-up moment
1006 /*len = strlen_user (s_vtp->name);
1007 vtp_data->name = kmalloc (len, GFP_KERNEL);
1008 if (!vtp_data->name)
1010 //note: storage will released next time or at clean-up moment
1015 if (strncpy_from_user (vtp_data->name, s_vtp->name, len) != (len-1))
1017 //note: storage will released next time or at clean-up moment
1018 EPRINTF ("strncpy_from_user VTP name failed %p (%ld)", vtp_data->name, len);
1019 kfree (vtp_data->name);
1024 //vtp_data->name[len] = 0;*/
1025 vtp_data->name = s_vtp->name;
1026 vtp_data->type = s_vtp->type;
1027 vtp_data->size = s_vtp->size;
1028 vtp_data->reg = s_vtp->reg;
1029 vtp_data->off = s_vtp->off;
1030 list_add_tail_rcu (&vtp_data->list, &mvtp->list);
1031 pre_addr = addrs[2 * k];
1034 kfree(s_lib.p_vtps);
1039 /* first, delete all the conds */
1040 list_for_each_entry_safe(c, c_tmp, &cond_list.list, list) {
1044 /* second, add new conds */
1045 /* This can be improved (by placing conds into array) */
1046 nr_conds = *(u_int32_t *)p;
1047 DPRINTF("nr_conds = %d", nr_conds);
1048 p += sizeof(u_int32_t);
1049 for (i = 0; i < nr_conds; i++) {
1050 p_cond = kmalloc(sizeof(struct cond), GFP_KERNEL);
1052 EPRINTF("Cannot alloc cond!\n");
1056 memcpy(&p_cond->tmpl, p, sizeof(struct event_tmpl));
1057 p_cond->applied = 0;
1058 list_add(&(p_cond->list), &(cond_list.list));
1059 p += sizeof(struct event_tmpl);
1063 if (set_event_mask(*(u_int32_t *)p)) {
1064 EPRINTF("Cannot set event mask!");
1068 p += sizeof(u_int32_t);
1071 // print_inst_us_proc(&us_proc_info);
1073 us_proc_info.pp = get_file_probes(&us_proc_info);
1078 //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
1079 int storage_init (void)
1081 unsigned long spinlock_flags = 0L;
1083 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1084 ec_info.m_nMode = 0; // MASK IS CLEAR (SINGLE NON_CONTINUOUS BUFFER)
1085 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1087 if(InitializeBuffer(EC_BUFFER_SIZE_DEFAULT) == -1) {
1088 EPRINTF("Cannot initialize buffer! [Size=%u KB]", EC_BUFFER_SIZE_DEFAULT / 1024 );
1092 spin_lock_init(&dbi_mh.lock);
1093 INIT_LIST_HEAD(&dbi_mh.modules_handlers);
1095 ptr_pack_task_event_info = pack_task_event_info;
1101 Shuts down "storage".
1102 Assumes that all probes are already deactivated.
1104 void storage_down (void)
1106 if(UninitializeBuffer() == -1)
1107 EPRINTF("Cannot uninitialize buffer!");
1109 if (ec_info.collision_count)
1110 EPRINTF ("ec_info.collision_count=%d", ec_info.collision_count);
1111 if (ec_info.lost_events_count)
1112 EPRINTF ("ec_info.lost_events_count=%d", ec_info.lost_events_count);
1115 static u_int32_t get_probe_func_addr(const char *fmt, va_list args)
1120 return va_arg(args, u_int32_t);
1123 void pack_task_event_info(struct task_struct *task, probe_id_t probe_id,
1124 record_type_t record_type, const char *fmt, ...)
1126 unsigned long spinlock_flags = 0L;
1127 static char buf[EVENT_MAX_SIZE] = "";
1128 TYPEOF_EVENT_LENGTH event_len = 0L;
1129 struct timeval tv = { 0, 0 };
1130 TYPEOF_THREAD_ID current_pid = task->pid;
1131 TYPEOF_PROCESS_ID current_tgid = task->tgid;
1132 unsigned current_cpu = task_cpu(task);
1134 unsigned long addr = 0;
1135 struct cond *p_cond;
1136 struct event_tmpl *p_tmpl;
1138 do_gettimeofday (&tv);
1140 if (probe_id == KS_PROBE_ID) {
1141 va_start(args, fmt);
1142 addr = get_probe_func_addr(fmt, args);
1145 if (probe_id == US_PROBE_ID) {
1146 va_start(args, fmt);
1147 addr = get_probe_func_addr(fmt, args);
1151 /* Checking for all the conditions
1152 * except stop condition that we process after saving the event */
1153 list_for_each_entry(p_cond, &cond_list.list, list) {
1154 p_tmpl = &p_cond->tmpl;
1155 switch (p_tmpl->type) {
1156 case ET_TYPE_START_COND:
1157 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1158 (addr == p_tmpl->addr)) &&
1159 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1160 (current_tgid == p_tmpl->pid)) &&
1161 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1162 (current_pid == p_tmpl->tid)) &&
1163 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1164 (current_cpu == p_tmpl->cpu_num)) &&
1165 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1166 (strcmp(task->comm, p_tmpl->bin_name) == 0)) &&
1167 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1168 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1169 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1170 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1172 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1174 p_cond->applied = 1;
1175 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1178 case ET_TYPE_IGNORE_COND:
1179 /* if (probe_id == PROBE_SCHEDULE) */
1181 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1182 (addr == p_tmpl->addr)) &&
1183 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1184 (current_tgid == p_tmpl->pid)) &&
1185 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1186 (current_pid == p_tmpl->tid)) &&
1187 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1188 (current_cpu == p_tmpl->cpu_num)) &&
1189 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1190 (strcmp(task->comm, p_tmpl->bin_name) == 0))) {
1191 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1192 ec_info.ignored_events_count++;
1193 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1200 /* Save only not masked entry or return kernel and user space events */
1201 if (likely(!((probe_id == KS_PROBE_ID || probe_id == US_PROBE_ID)
1202 && ((record_type == RECORD_ENTRY && (event_mask & IOCTL_EMASK_ENTRY))
1203 || (record_type == RECORD_RET && (event_mask & IOCTL_EMASK_EXIT)))))) {
1205 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1207 if (paused && (!(probe_id == EVENT_FMT_PROBE_ID || probe_id == DYN_LIB_PROBE_ID))) {
1208 ec_info.ignored_events_count++;
1209 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1213 va_start (args, fmt);
1214 event_len = VPackEvent(buf, sizeof(buf), event_mask, probe_id, record_type, (TYPEOF_TIME *)&tv,
1215 current_tgid, current_pid, current_cpu, fmt, args);
1218 if(event_len == 0) {
1219 EPRINTF ("ERROR: failed to pack event!");
1220 ++ec_info.lost_events_count;
1222 } else if(WriteEventIntoBuffer(buf, event_len) == -1) {
1223 EPRINTF("Cannot write event into buffer!");
1224 ++ec_info.lost_events_count;
1226 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1230 /* Check for stop condition. We pause collecting the trace right after
1231 * storing this event */
1232 list_for_each_entry(p_cond, &cond_list.list, list) {
1233 p_tmpl = &p_cond->tmpl;
1234 switch (p_tmpl->type) {
1235 case ET_TYPE_STOP_COND:
1236 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1237 (addr == p_tmpl->addr)) &&
1238 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1239 (current_tgid == p_tmpl->pid)) &&
1240 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1241 (current_pid == p_tmpl->tid)) &&
1242 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1243 (current_cpu == p_tmpl->cpu_num)) &&
1244 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1245 (strcmp(task->comm, p_tmpl->bin_name) == 0)) &&
1246 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1247 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1248 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1249 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1251 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1253 p_cond->applied = 1;
1254 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1260 EXPORT_SYMBOL_GPL(pack_task_event_info);
1262 int put_us_event (char *data, unsigned long len)
1264 unsigned long spinlock_flags = 0L;
1266 SWAP_TYPE_EVENT_HEADER *pEventHeader = (SWAP_TYPE_EVENT_HEADER *)data;
1267 char *cur = data + sizeof(TYPEOF_EVENT_LENGTH) + sizeof(TYPEOF_EVENT_TYPE)
1268 + sizeof(TYPEOF_PROBE_ID);
1269 TYPEOF_NUMBER_OF_ARGS nArgs = pEventHeader->m_nNumberOfArgs;
1270 TYPEOF_PROBE_ID probe_id = pEventHeader->m_nProbeID;
1273 /*if(probe_id == US_PROBE_ID){
1274 printk("esrc %p/%d[", data, len);
1275 for(i = 0; i < len; i++)
1276 printk("%02x ", data[i]);
1280 // set pid/tid/cpu/time i
1281 //pEventHeader->m_time.tv_sec = tv.tv_sec;
1282 //pEventHeader->m_time.tv_usec = tv.tv_usec;
1284 #ifdef MEMORY_CHECKER
1285 //TODO: move this part to special MEC event posting routine, new IOCTL is needed
1286 if((probe_id >= MEC_PROBE_ID_MIN) && (probe_id <= MEC_PROBE_ID_MAX))
1288 if(mec_post_event != NULL)
1290 int res = mec_post_event(data, len);
1298 // FIXME: 'mec_post_event' - not found
1299 mec_post_event = (mec_post_event_pointer) swap_ksyms("mec_post_event");
1300 if(mec_post_event == NULL)
1302 EPRINTF ("Failed to find function 'mec_post_event' from mec_handlers.ko. Memory Error Checker will work incorrectly.");
1306 int res = mec_post_event(data, len);
1316 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TIME)){
1317 struct timeval tv = { 0, 0 };
1318 do_gettimeofday (&tv);
1319 memcpy(cur, &tv, sizeof(TYPEOF_TIME));
1320 cur += sizeof(TYPEOF_TIME);
1322 //pEventHeader->m_nProcessID = current_tgid;
1323 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_PID)){
1324 //TYPEOF_PROCESS_ID current_tgid = current->tgid;
1325 (*(TYPEOF_PROCESS_ID *)cur) = current->tgid;
1326 cur += sizeof(TYPEOF_PROCESS_ID);
1328 //pEventHeader->m_nThreadID = current_pid;
1329 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TID)){
1330 //TYPEOF_THREAD_ID current_pid = current->pid;
1331 (*(TYPEOF_THREAD_ID *)cur) = current->pid;
1332 cur += sizeof(TYPEOF_THREAD_ID);
1334 //pEventHeader->m_nCPU = current_cpu;
1335 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_CPU)){
1336 //TYPEOF_CPU_NUMBER current_cpu = task_cpu(current);
1337 (*(TYPEOF_CPU_NUMBER *)cur) = task_cpu(current);
1338 cur += sizeof(TYPEOF_CPU_NUMBER);
1340 //printk("%d %x", probe_id, event_mask);
1341 // dyn lib event should have all args, it is for internal use and not visible to user
1342 if((probe_id == EVENT_FMT_PROBE_ID) || (probe_id == DYN_LIB_PROBE_ID) || !(event_mask & IOCTL_EMASK_ARGS)){
1343 // move only if any of prev fields has been skipped
1344 if(event_mask & (IOCTL_EMASK_TIME|IOCTL_EMASK_PID|IOCTL_EMASK_TID|IOCTL_EMASK_CPU)){
1345 memmove(cur, data+sizeof(SWAP_TYPE_EVENT_HEADER)-sizeof(TYPEOF_NUMBER_OF_ARGS),
1346 len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1347 -sizeof(TYPEOF_EVENT_LENGTH));
1349 cur += len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1350 -sizeof(TYPEOF_EVENT_LENGTH);
1353 // user space probes should have at least one argument to identify them
1354 if((probe_id == US_PROBE_ID) || (probe_id == VTP_PROBE_ID)){
1356 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 1;
1357 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1358 // pack args using format string for the 1st arg only
1359 memset(cur, 0, ALIGN_VALUE(2));
1360 cur[0] = 'p'; cur[1] = '\0';
1361 cur += ALIGN_VALUE(2);
1362 pArg1 = data + sizeof(SWAP_TYPE_EVENT_HEADER)+ALIGN_VALUE(nArgs+1);
1363 memmove(cur, pArg1, sizeof(unsigned long));
1364 cur += sizeof(unsigned long);
1367 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 0;
1368 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1371 pEventHeader->m_nLength = cur - data + sizeof(TYPEOF_EVENT_LENGTH);
1372 *((TYPEOF_EVENT_LENGTH *)cur) = pEventHeader->m_nLength;
1373 len = pEventHeader->m_nLength;
1375 if(WriteEventIntoBuffer(data, len) == -1) {
1376 EPRINTF("Cannot write event into buffer!");
1378 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1379 ++ec_info.lost_events_count;
1380 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1387 int get_predef_uprobes_size(int *size)
1390 inst_us_proc_t *my_uprobes_info = get_uprobes();
1393 for(i = 0; i < my_uprobes_info->libs_count; i++)
1395 int lib_size = strlen(my_uprobes_info->p_libs[i].path);
1396 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++)
1398 // libc.so.6:printf:
1399 *size += lib_size + 1 + strlen(my_uprobes_info->p_libs[i].p_ips[k].name) + 2;
1406 int get_predef_uprobes(ioctl_predef_uprobes_info_t *udata)
1408 ioctl_predef_uprobes_info_t data;
1409 int i, k, size, lib_size, func_size, result;
1412 inst_us_proc_t *my_uprobes_info = get_uprobes();
1414 // get addr of array
1415 if (copy_from_user ((void *)&data, (void __user *) udata, sizeof (data)))
1417 EPRINTF("failed to copy from user!");
1422 for(i = 0; i < my_uprobes_info->libs_count; i++)
1424 lib_size = strlen(my_uprobes_info->p_libs[i].path);
1425 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++)
1428 result = copy_to_user ((void __user *)(data.p_probes+size),
1429 (void *) my_uprobes_info->p_libs[i].path, lib_size);
1432 EPRINTF("failed to copy to user!");
1437 result = copy_to_user ((void __user *)(data.p_probes+size), sep, 1);
1440 EPRINTF("failed to copy to user!");
1445 //DPRINTF("'%s'", my_uprobes_info->p_libs[i].p_ips[k].name);
1446 func_size = strlen(my_uprobes_info->p_libs[i].p_ips[k].name);
1447 result = copy_to_user ((void __user *)(data.p_probes+size), my_uprobes_info->p_libs[i].p_ips[k].name, func_size);
1450 EPRINTF("failed to copy to user!");
1455 result = copy_to_user ((void __user *)(data.p_probes+size), sep, 2);
1458 EPRINTF("failed to copy to user!");
1467 result = copy_to_user ((void __user *)&(udata->probes_count), &count, sizeof(count));
1470 EPRINTF("failed to copy to user!");