1 ////////////////////////////////////////////////////////////////////////////////////
6 // This file is C source for SWAP.
9 // AUTHOR: L.Komkov, S.Dianov, A.Gerenkov, S.Andreev
10 // COMPANY NAME: Samsung Research Center in Moscow
11 // DEPT NAME: Advanced Software Group
12 // CREATED: 2008.02.15
14 // REVISION DATE: 2008.12.03
16 ////////////////////////////////////////////////////////////////////////////////////
18 #include <linux/types.h>
19 #include <linux/hash.h>
20 #include <linux/list.h>
21 #include <linux/unistd.h>
22 #include <linux/spinlock.h>
23 #include <linux/kernel.h>
24 #include <linux/time.h>
27 #include "handlers_core.h"
30 #define after_buffer ec_info.buffer_size
32 char *p_buffer = NULL;
33 inst_us_proc_t us_proc_info;
34 struct list_head otg_us_proc_info;
35 inst_dex_proc_t dex_proc_info;
38 unsigned int inst_pid = 0;
39 struct hlist_head kernel_probes;
40 struct hlist_head otg_kernel_probes;
42 struct cond cond_list;
43 int paused = 0; /* a state after a stop condition (events are not collected) */
44 struct timeval last_attach_time = {0, 0};
46 struct dbi_modules_handlers dbi_mh;
48 struct dbi_modules_handlers *get_dbi_modules_handlers(void)
52 EXPORT_SYMBOL_GPL(get_dbi_modules_handlers);
54 inline unsigned long find_dbi_jp_handler(unsigned long p_addr, struct dbi_modules_handlers_info *mhi)
58 /* Possibly we can find less expensive way */
59 for (i = 0; i < mhi->dbi_nr_handlers; i++) {
60 if (mhi->dbi_handlers[i].func_addr == p_addr) {
61 printk("Found jp_handler for %0lX address of %s module\n", p_addr, mhi->dbi_module->name);
62 return mhi->dbi_handlers[i].jp_handler_addr;
68 inline unsigned long find_dbi_rp_handler(unsigned long p_addr, struct dbi_modules_handlers_info *mhi)
72 /* Possibly we can find less expensive way */
73 for (i = 0; i < mhi->dbi_nr_handlers; i++) {
74 if (mhi->dbi_handlers[i].func_addr == p_addr) {
75 printk("Found rp_handler for %0lX address of %s module\n", p_addr, mhi->dbi_module->name);
76 return mhi->dbi_handlers[i].rp_handler_addr;
83 * Search of handler in global list of modules for defined probe
85 void dbi_find_and_set_handler_for_probe(kernel_probe_t *p)
87 unsigned long jp_handler_addr, rp_handler_addr;
88 struct dbi_modules_handlers_info *local_mhi;
89 unsigned long dbi_flags;
90 unsigned int local_module_refcount = 0;
92 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
93 list_for_each_entry_rcu(local_mhi, &dbi_mh.modules_handlers, dbi_list_head) {
94 printk("Searching handlers in %s module for %0lX address\n",
95 (local_mhi->dbi_module)->name, p->addr);
96 // XXX: absent code for pre_handlers because we suppose that they are not used
97 if ((jp_handler_addr = find_dbi_jp_handler(p->addr, local_mhi)) != 0) {
98 if (p->jprobe.entry != 0) {
99 printk("Skipping jp_handler for %s module (address %0lX)\n",
100 (local_mhi->dbi_module)->name, p->addr);
103 local_module_refcount = module_refcount(local_mhi->dbi_module);
104 if (local_module_refcount == 0) {
105 if (!try_module_get(local_mhi->dbi_module))
106 printk("Error of try_module_get() for module %s\n",
107 (local_mhi->dbi_module)->name);
109 printk("Module %s in use now\n",
110 (local_mhi->dbi_module)->name);
112 p->jprobe.entry = (kprobe_opcode_t *)jp_handler_addr;
113 printk("Set jp_handler for %s module (address %0lX)\n",
114 (local_mhi->dbi_module)->name, p->addr);
117 if ((rp_handler_addr = find_dbi_rp_handler(p->addr, local_mhi)) != 0) {
118 if (p->retprobe.handler != 0) {
119 printk("Skipping kretprobe_handler for %s module (address %0lX)\n",
120 (local_mhi->dbi_module)->name, p->addr);
123 local_module_refcount = module_refcount(local_mhi->dbi_module);
124 if (local_module_refcount == 0) {
125 if (!try_module_get(local_mhi->dbi_module))
126 printk("Error of try_module_get() for module %s\n",
127 (local_mhi->dbi_module)->name);
129 printk("Module %s in use now\n",
130 (local_mhi->dbi_module)->name);
132 p->retprobe.handler = (kretprobe_handler_t)rp_handler_addr;
133 printk("Set rp_handler for %s module (address %0lX)\n",
134 (local_mhi->dbi_module)->name, p->addr);
138 // not found pre_handler - set default (always true for now since pre_handlers not used)
139 if (p->jprobe.pre_entry == 0) {
140 p->jprobe.pre_entry = (kprobe_pre_entry_handler_t) def_jprobe_event_pre_handler;
141 printk("Set default pre_handler (address %0lX)\n", p->addr);
143 // not found jp_handler - set default
144 if (p->jprobe.entry == 0) {
145 p->jprobe.entry = (kprobe_opcode_t *) def_jprobe_event_handler;
146 printk("Set default jp_handler (address %0lX)\n", p->addr);
148 // not found kretprobe_handler - set default
149 if (p->retprobe.handler == 0) {
150 p->retprobe.handler = (kretprobe_handler_t) def_retprobe_event_handler;
151 printk("Set default rp_handler (address %0lX)\n", p->addr);
153 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
156 // XXX TODO: possible mess when start-register/unregister-stop operation
157 // so we should refuse register/unregister operation while we are in unsafe state
158 int dbi_register_handlers_module(struct dbi_modules_handlers_info *dbi_mhi)
160 unsigned long dbi_flags;
161 // struct dbi_modules_handlers_info *local_mhi;
163 int nr_handlers=dbi_mhi->dbi_nr_handlers;
164 printk ("lookup_name=0x%08x\n", lookup_name);
166 if ( lookup_name != NULL){
167 for (i=0;i<nr_handlers;i++){
168 //handlers[i].func_addr = (void (*)(pte_t) ) lookup_name (handlers[i].func_name);
169 dbi_mhi->dbi_handlers[i].func_addr = (void (*)(pte_t) ) lookup_name (dbi_mhi->dbi_handlers[i].func_name);
170 printk("[0x%08x]-%s\n",dbi_mhi->dbi_handlers[i].func_addr,dbi_mhi->dbi_handlers[i].func_name);
175 printk("[ERROR] lookup_name is NULL\n");
178 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
179 // local_mhi = container_of(&dbi_mhi->dbi_list_head, struct dbi_modules_handlers_info, dbi_list_head);
180 list_add_rcu(&dbi_mhi->dbi_list_head, &dbi_mh.modules_handlers);
181 printk("Added module %s (head is %p)\n", (dbi_mhi->dbi_module)->name, &dbi_mhi->dbi_list_head);
182 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
185 EXPORT_SYMBOL_GPL(dbi_register_handlers_module);
187 // XXX TODO: possible mess when start-register/unregister-stop operation
188 // so we should refuse register/unregister operation while we are in unsafe state
189 int dbi_unregister_handlers_module(struct dbi_modules_handlers_info *dbi_mhi)
191 unsigned long dbi_flags;
192 // Next code block is for far future possible usage in case when removing will be implemented for unsafe state
193 // (i.e. between attach and stop)
195 struct hlist_node *node;
196 unsigned long jp_handler_addr, rp_handler_addr, pre_handler_addr;*/
198 spin_lock_irqsave(&dbi_mh.lock, dbi_flags);
199 list_del_rcu(&dbi_mhi->dbi_list_head);
200 // Next code block is for far future possible usage in case when removing will be implemented for unsafe state
201 // (i.e. between attach and stop)
202 /*hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
203 // XXX: absent code for pre_handlers because we suppose that they are not used
204 if ((p->jprobe.entry != ((kprobe_pre_entry_handler_t )def_jprobe_event_pre_handler)) ||
205 (p->retprobe.handler != ((kretprobe_handler_t )def_retprobe_event_handler))) {
206 printk("Searching handlers for %p address for removing in %s registered module...\n",
207 p->addr, (dbi_mhi->dbi_module)->name);
208 jp_handler_addr = find_dbi_jp_handler(p->addr, dbi_mhi);
209 rp_handler_addr = find_dbi_rp_handler(p->addr, dbi_mhi);
210 if ((jp_handler_addr != 0) || (rp_handler_addr != 0)) {
211 // search and set to another handlers or default
212 dbi_find_and_set_handler_for_probe(p);
213 printk("Removed handler(s) for %s module (address %p)\n",
214 (dbi_mhi->dbi_module)->name, p->addr);
218 printk("Removed module %s (head was %p)\n", (dbi_mhi->dbi_module)->name, &dbi_mhi->dbi_list_head);
219 spin_unlock_irqrestore(&dbi_mh.lock, dbi_flags);
222 EXPORT_SYMBOL_GPL(dbi_unregister_handlers_module);
224 EXPORT_SYMBOL_GPL(us_proc_info);
225 EXPORT_SYMBOL_GPL(dex_proc_info);
226 typedef void *(*get_my_uprobes_info_t)(void);
227 int (*mec_post_event)(char *data, unsigned long len) = NULL;
229 unsigned copy_into_cyclic_buffer (char *buffer, unsigned dst_offset, char *src, unsigned size)
231 unsigned nOffset = dst_offset;
234 buffer[nOffset++] = *pSource++;
238 unsigned copy_from_cyclic_buffer (char *dst, char *buffer, unsigned src_offset, unsigned size)
240 unsigned nOffset = src_offset;
241 char* pDestination = dst;
243 *pDestination++ = buffer[nOffset++];
247 int CheckBufferSize (unsigned int nSize)
249 if (nSize < EC_BUFFER_SIZE_MIN) {
250 EPRINTF("Too small buffer size! [Size=%u KB]", nSize / 1024);
253 if (nSize > EC_BUFFER_SIZE_MAX) {
254 EPRINTF("Too big buffer size! [Size=%u KB]", nSize / 1024);
260 int AllocateSingleBuffer(unsigned int nSize)
262 unsigned long spinlock_flags = 0L;
264 p_buffer = vmalloc_user(nSize);
266 EPRINTF("Memory allocation error! [Size=%lu KB]", nSize / 1024);
270 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
271 ec_info.buffer_effect = ec_info.buffer_size = nSize;
272 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
277 void FreeSingleBuffer (void)
279 VFREE_USER(p_buffer, ec_info.buffer_size);
283 //////////////////////////////////////////////////////////////////////////////////////////////////
285 int EnableContinuousRetrieval() {
286 unsigned long spinlock_flags = 0L;
288 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
289 ec_info.m_nMode |= MODEMASK_CONTINUOUS_RETRIEVAL;
290 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
295 int DisableContinuousRetrieval() {
296 unsigned long spinlock_flags = 0L;
298 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
299 ec_info.m_nMode &= ~MODEMASK_CONTINUOUS_RETRIEVAL;
300 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
305 //////////////////////////////////////////////////////////////////////////////////////////////////
307 int InitializeBuffer(unsigned int nSize) {
308 return AllocateSingleBuffer(nSize);
311 int UninitializeBuffer(void) {
316 unsigned int GetBufferSize(void) { return ec_info.buffer_size; };
318 int SetBufferSize(unsigned int nSize) {
319 if (GetECState() != EC_STATE_IDLE) {
320 EPRINTF("Buffer changes are allowed in IDLE state only (%d)!", GetECState());
323 if(GetBufferSize() == nSize)
325 if(CheckBufferSize(nSize) == -1) {
326 EPRINTF("Invalid buffer size!");
329 detach_selected_probes ();
330 if(UninitializeBuffer() == -1)
331 EPRINTF("Cannot uninitialize buffer!");
332 if(InitializeBuffer(nSize) == -1) {
333 EPRINTF("Cannot initialize buffer! [Size=%u KB]", nSize / 1024);
339 int SetPid(unsigned int pid)
341 if (GetECState() != EC_STATE_IDLE)
343 EPRINTF("PID changes are allowed in IDLE state only (%d)!", GetECState());
348 DPRINTF("SetPid pid:%d\n", pid);
352 void ResetSingleBuffer(void) {
355 int ResetBuffer(void) {
356 unsigned long spinlock_flags = 0L;
358 if (GetECState() != EC_STATE_IDLE) {
359 EPRINTF("Buffer changes are allowed in IDLE state only!");
365 detach_selected_probes ();
367 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
368 ec_info.buffer_effect = ec_info.buffer_size;
369 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
376 int WriteEventIntoSingleBuffer(char* pEvent, unsigned long nEventSize) {
377 unsigned int unused_space;
380 EPRINTF("Invalid pointer to buffer!");
381 ++ec_info.lost_events_count;
384 if (ec_info.trace_size == 0 || ec_info.after_last > ec_info.first) {
385 unused_space = ec_info.buffer_size - ec_info.after_last;
386 if (unused_space > nEventSize) {
387 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
391 ec_info.saved_events_count++;
392 ec_info.buffer_effect = ec_info.buffer_size;
393 ec_info.trace_size = ec_info.after_last - ec_info.first;
395 if (ec_info.first > nEventSize) {
396 ec_info.buffer_effect = ec_info.after_last;
397 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
401 ec_info.saved_events_count++;
402 ec_info.trace_size = ec_info.buffer_effect
404 + ec_info.after_last;
406 // TODO: consider two variants!
408 ec_info.discarded_events_count++;
412 unused_space = ec_info.first - ec_info.after_last;
413 if (unused_space > nEventSize) {
414 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
418 ec_info.saved_events_count++;
419 ec_info.trace_size = ec_info.buffer_effect
421 + ec_info.after_last;
424 ec_info.discarded_events_count++;
430 int WriteEventIntoBuffer(char* pEvent, unsigned long nEventSize) {
433 for(i = 0; i < nEventSize; i++)
434 printk("%02X ", pEvent[i]);
437 return WriteEventIntoSingleBuffer(pEvent, nEventSize);
440 //////////////////////////////////////////////////////////////////////////////////////////////////
442 int set_event_mask (int new_mask)
444 unsigned long spinlock_flags = 0L;
445 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
446 event_mask = new_mask;
447 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
452 get_event_mask (int *mask)
459 generic_swap (void *a, void *b, int size)
464 *(char *) a++ = *(char *) b;
466 } while (--size > 0);
469 static void sort (void *base, size_t num, size_t size, int (*cmp) (const void *, const void *), void (*fswap) (void *, void *, int size))
471 /* pre-scale counters for performance */
472 int i = (num / 2) * size, n = num * size, c, r;
475 for (; i >= 0; i -= size)
477 for (r = i; r * 2 < n; r = c)
480 if (c < n - size && cmp (base + c, base + c + size) < 0)
482 if (cmp (base + r, base + c) >= 0)
484 fswap (base + r, base + c, size);
489 for (i = n - size; i >= 0; i -= size)
491 fswap (base, base + i, size);
492 for (r = 0; r * 2 < i; r = c)
495 if (c < i - size && cmp (base + c, base + c + size) < 0)
497 if (cmp (base + r, base + c) >= 0)
499 fswap (base + r, base + c, size);
504 static int addr_cmp (const void *a, const void *b)
506 return *(unsigned long *) a > *(unsigned long *) b ? -1 : 1;
509 char *find_lib_path(const char *lib_name)
511 char *p = deps + sizeof(size_t);
516 DPRINTF("p is at %s", p);
518 match = strstr(p, lib_name);
520 len = strlen(p) + 1; /* we are at path now */
524 DPRINTF("Found match: %s", match);
532 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)
533 #define list_for_each_rcu(pos, head) __list_for_each_rcu(pos, head)
536 void unlink_bundle(void)
539 us_proc_lib_t *d_lib;
541 struct list_head *pos; //, *tmp;
544 path = us_proc_info.path;
545 us_proc_info.path = 0;
547 // first make sure "d_lib" is not used any more and only
548 // then release storage
549 if (us_proc_info.p_libs)
551 int count1 = us_proc_info.libs_count;
552 us_proc_info.libs_count = 0;
553 for (i = 0; i < count1; i++)
555 d_lib = &us_proc_info.p_libs[i];
558 // first make sure "d_lib->p_ips" is not used any more and only
559 // then release storage
560 //int count2 = d_lib->ips_count;
561 d_lib->ips_count = 0;
562 /*for (k = 0; k < count2; k++)
563 kfree ((void *) d_lib->p_ips[k].name);*/
564 vfree ((void *) d_lib->p_ips);
568 // first make sure "d_lib->p_vtps" is not used any more and only
569 // then release storage
570 int count2 = d_lib->vtps_count;
571 d_lib->vtps_count = 0;
572 for (k = 0; k < count2; k++)
574 //list_for_each_safe_rcu(pos, tmp, &d_lib->p_vtps[k].list) {
575 list_for_each (pos, &d_lib->p_vtps[k].list)
577 us_proc_vtp_data_t *vtp = list_entry (pos, us_proc_vtp_data_t, list);
583 kfree ((void *) d_lib->p_vtps);
585 d_lib->plt_count = 0;
586 kfree((void*) d_lib->p_plt);
587 us_proc_info.is_plt = 0;
589 kfree ((void *) us_proc_info.p_libs);
590 us_proc_info.p_libs = 0;
594 /* kfree ((void *) path); */
595 /* //putname(path); */
598 us_proc_info.tgid = 0;
600 /* OTG probes list cleaning */
601 list_for_each_entry_rcu (p, &otg_us_proc_info, list) {
602 list_del_rcu(&p->list);
606 extern struct dentry *dentry_by_path(const char *path);
610 get_my_uprobes_info_t get_uprobes = NULL;
611 inst_us_proc_t *my_uprobes_info = 0;
612 inst_us_proc_t empty_uprobes_info =
617 char *p = bundle; /* read pointer for bundle */
621 us_proc_lib_t *d_lib, *pd_lib;
622 dex_proc_ip_t *dex_proc;
623 ioctl_usr_space_lib_t s_lib;
624 ioctl_usr_space_vtp_t *s_vtp;
629 struct cond *c, *c_tmp, *p_cond;
635 /* Get user-defined us handlers (if they are provided) */
636 get_uprobes = (get_my_uprobes_info_t)lookup_name("get_my_uprobes_info");
638 my_uprobes_info = (inst_us_proc_t *)get_uprobes();
640 if (my_uprobes_info == 0)
641 my_uprobes_info = &empty_uprobes_info;
643 DPRINTF("Going to release us_proc_info");
644 if (us_proc_info.path)
647 /* Skip size - it has been used before */
648 p += sizeof(u_int32_t);
651 if (SetECMode(*(u_int32_t *)p) == -1)
653 EPRINTF("Cannot set mode!\n");
657 p += sizeof(u_int32_t);
660 if (SetBufferSize(*(u_int32_t *)p) == -1)
662 EPRINTF("Cannot set buffer size!\n");
666 p += sizeof(u_int32_t);
669 if (SetPid(*(u_int32_t *)p) == -1)
671 EPRINTF("Cannot set pid!\n");
675 p += sizeof(u_int32_t);
678 nr_kern_probes = *(u_int32_t *)p;
679 p += sizeof(u_int32_t);
680 for (i = 0; i < nr_kern_probes; i++)
682 if (add_probe(*(u_int32_t *)p))
684 EPRINTF("Cannot add kernel probe at 0x%x!\n", *(u_int32_t *)p);
687 p += sizeof(u_int32_t);
691 len = *(u_int32_t *)p; /* App path len */
692 p += sizeof(u_int32_t);
694 us_proc_info.is_plt = 0;
697 us_proc_info.path = NULL;
704 us_proc_info.path = (char *)p;
705 DPRINTF("app path = %s", us_proc_info.path);
708 if (strcmp(us_proc_info.path, "*")) {
709 us_proc_info.m_f_dentry = dentry_by_path(us_proc_info.path);
710 if (us_proc_info.m_f_dentry == NULL) {
716 us_proc_info.m_f_dentry = NULL;
719 us_proc_info.libs_count = *(u_int32_t *)p;
720 DPRINTF("nr of libs = %d", us_proc_info.libs_count);
721 p += sizeof(u_int32_t);
722 us_proc_info.p_libs =
723 kmalloc(us_proc_info.libs_count * sizeof(us_proc_lib_t), GFP_KERNEL);
725 if (!us_proc_info.p_libs)
727 EPRINTF("Cannot alloc p_libs!");
730 memset(us_proc_info.p_libs, 0,
731 us_proc_info.libs_count * sizeof(us_proc_lib_t));
733 for (i = 0; i < us_proc_info.libs_count; i++)
735 int abs_handler_idx = 0;
737 d_lib = &us_proc_info.p_libs[i];
739 lib_name_len = *(u_int32_t *)p;
740 p += sizeof(u_int32_t);
741 d_lib->path = (char *)p;
742 DPRINTF("d_lib->path = %s", d_lib->path);
746 lib_name_len = *(u_int32_t *)p;
747 p += sizeof(u_int32_t);
748 d_lib->path_dyn = (char *)p;
749 DPRINTF("d_lib->path_dyn = %s", d_lib->path_dyn);
753 d_lib->ips_count = *(u_int32_t *)p;
754 DPRINTF("d_lib->ips_count = %d", d_lib->ips_count);
755 p += sizeof(u_int32_t);
757 /* If there are any probes for "*" app we have to drop them */
758 if (strcmp(d_lib->path, "*") == 0)
760 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
761 d_lib->ips_count = 0;
762 d_lib->plt_count = *(u_int32_t*)p;
763 p += sizeof(u_int32_t);
764 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
765 d_lib->plt_count = 0;
769 if (strcmp(us_proc_info.path, d_lib->path) == 0)
774 DPRINTF("Searching path for lib %s", d_lib->path);
775 d_lib->path = find_lib_path(d_lib->path);
778 if (strcmp(d_lib->path_dyn, "") == 0) {
779 EPRINTF("Cannot find path for lib %s!", d_lib->path);
780 /* Just skip all the IPs and go to next lib */
781 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
782 d_lib->ips_count = 0;
783 d_lib->plt_count = *(u_int32_t*)p;
784 p += sizeof(u_int32_t);
785 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
786 d_lib->plt_count = 0;
790 d_lib->path = d_lib->path_dyn;
791 DPRINTF("Assign path for lib as %s (in suggestion of dyn lib)", d_lib->path);
796 d_lib->m_f_dentry = dentry_by_path(d_lib->path);
797 if (d_lib->m_f_dentry == NULL) {
798 EPRINTF ("failed to lookup dentry for path %s!", d_lib->path);
799 /* Just skip all the IPs and go to next lib */
800 p += d_lib->ips_count * 3 * sizeof(u_int32_t);
801 d_lib->ips_count = 0;
802 d_lib->plt_count = *(u_int32_t*)p;
803 p += sizeof(u_int32_t);
804 p += d_lib->plt_count * 2 * sizeof(u_int32_t);
805 d_lib->plt_count = 0;
810 ptr = strrchr(d_lib->path, '/');
816 for (l = 0; l < my_uprobes_info->libs_count; l++)
818 if ((strcmp(ptr, my_uprobes_info->p_libs[l].path) == 0) ||
819 (is_app && *(my_uprobes_info->p_libs[l].path) == '\0'))
821 pd_lib = &my_uprobes_info->p_libs[l];
824 abs_handler_idx += my_uprobes_info->p_libs[l].ips_count;
827 if (d_lib->ips_count > 0)
829 us_proc_info.unres_ips_count += d_lib->ips_count;
830 d_lib->p_ips = vmalloc(d_lib->ips_count * sizeof(us_proc_ip_t));
831 DPRINTF("d_lib[%i]->p_ips=%p/%u [%s]", i, d_lib->p_ips,
832 us_proc_info.unres_ips_count, d_lib->path);
836 EPRINTF("Cannot alloc p_ips!\n");
840 memset (d_lib->p_ips, 0, d_lib->ips_count * sizeof(us_proc_ip_t));
841 for (k = 0; k < d_lib->ips_count; k++)
843 d_ip = &d_lib->p_ips[k];
844 d_ip->offset = *(u_int32_t *)p;
845 p += sizeof(u_int32_t);
846 p += sizeof(u_int32_t); /* Skip inst type */
847 handler_index = *(u_int32_t *)p;
848 p += sizeof(u_int32_t);
852 DPRINTF("pd_lib->ips_count = 0x%x", pd_lib->ips_count);
853 if (handler_index != -1)
855 DPRINTF("found handler for 0x%x", d_ip->offset);
856 d_ip->jprobe.pre_entry =
857 pd_lib->p_ips[handler_index - abs_handler_idx].jprobe.pre_entry;
859 pd_lib->p_ips[handler_index - abs_handler_idx].jprobe.entry;
860 d_ip->retprobe.handler =
861 pd_lib->p_ips[handler_index - abs_handler_idx].retprobe.handler;
867 d_lib->plt_count = *(u_int32_t*)p;
868 p += sizeof(u_int32_t);
869 if (d_lib->plt_count > 0)
872 us_proc_info.is_plt = 1;
873 d_lib->p_plt = kmalloc(d_lib->plt_count * sizeof(us_proc_plt_t), GFP_KERNEL);
876 EPRINTF("Cannot alloc p_plt!");
879 memset(d_lib->p_plt, 0, d_lib->plt_count * sizeof(us_proc_plt_t));
880 for (j = 0; j < d_lib->plt_count; j++)
882 d_lib->p_plt[j].func_addr = *(u_int32_t*)p;
883 p += sizeof(u_int32_t);
884 d_lib->p_plt[j].got_addr = *(u_int32_t*)p;
885 p += sizeof(u_int32_t);
886 d_lib->p_plt[j].real_func_addr = 0;
892 lib_path_len = *(u_int32_t *)p;
893 DPRINTF("lib_path_len = %d", lib_path_len);
894 p += sizeof(u_int32_t);
896 DPRINTF("lib_path = %s", lib_path);
900 d_lib = &us_proc_info.p_libs[0];
901 s_lib.vtps_count = *(u_int32_t *)p;
902 DPRINTF("s_lib.vtps_count = %d", s_lib.vtps_count);
903 p += sizeof(u_int32_t);
904 if (s_lib.vtps_count > 0)
906 unsigned long ucount = 1, pre_addr;
907 unsigned long *addrs;
909 s_lib.p_vtps = kmalloc(s_lib.vtps_count
910 * sizeof(ioctl_usr_space_vtp_t), GFP_KERNEL);
917 for (i = 0; i < s_lib.vtps_count; i++)
919 int var_name_len = *(u_int32_t *)p;
920 p += sizeof(u_int32_t);
921 s_lib.p_vtps[i].name = p;
923 s_lib.p_vtps[i].addr = *(u_int32_t *)p;
924 p += sizeof(u_int32_t);
925 s_lib.p_vtps[i].type = *(u_int32_t *)p;
926 p += sizeof(u_int32_t);
927 s_lib.p_vtps[i].size = *(u_int32_t *)p;
928 p += sizeof(u_int32_t);
929 s_lib.p_vtps[i].reg = *(u_int32_t *)p;
930 p += sizeof(u_int32_t);
931 s_lib.p_vtps[i].off = *(u_int32_t *)p;
932 p += sizeof(u_int32_t);
935 // array containing elements like (addr, index)
936 addrs = kmalloc (s_lib.vtps_count * 2 * sizeof (unsigned long), GFP_KERNEL);
937 // DPRINTF ("addrs=%p/%u", addrs, s_lib.vtps_count);
940 //note: storage will released next time or at clean-up moment
943 memset (addrs, 0, s_lib.vtps_count * 2 * sizeof (unsigned long));
945 for (k = 0; k < s_lib.vtps_count; k++)
947 s_vtp = &s_lib.p_vtps[k];
948 addrs[2 * k] = s_vtp->addr;
949 addrs[2 * k + 1] = k;
951 // sort by VTP addresses, i.e. make VTPs with the same addresses adjacent;
952 // organize them into bundles
953 sort (addrs, s_lib.vtps_count, 2 * sizeof (unsigned long), addr_cmp, generic_swap);
955 // calc number of VTPs with unique addresses
956 for (k = 1, pre_addr = addrs[0]; k < s_lib.vtps_count; k++)
958 if (addrs[2 * k] != pre_addr)
959 ucount++; // count different only
960 pre_addr = addrs[2 * k];
962 us_proc_info.unres_vtps_count += ucount;
963 d_lib->vtps_count = ucount;
964 d_lib->p_vtps = kmalloc (ucount * sizeof (us_proc_vtp_t), GFP_KERNEL);
965 DPRINTF ("d_lib[%i]->p_vtps=%p/%lu", i, d_lib->p_vtps, ucount); //, d_lib->path);
968 //note: storage will released next time or at clean-up moment
972 memset (d_lib->p_vtps, 0, d_lib->vtps_count * sizeof (us_proc_vtp_t));
973 // go through sorted VTPS.
974 for (k = 0, j = 0, pre_addr = 0, mvtp = NULL; k < s_lib.vtps_count; k++)
976 us_proc_vtp_data_t *vtp_data;
978 s_vtp = &s_lib.p_vtps[addrs[2 * k + 1]];
979 // if this is the first VTP in bundle (master VTP)
980 if (addrs[2 * k] != pre_addr)
982 // data are in the array of master VTPs
983 mvtp = &d_lib->p_vtps[j++];
984 mvtp->addr = s_vtp->addr;
985 INIT_LIST_HEAD (&mvtp->list);
987 // data are in the list of slave VTPs
988 vtp_data = kmalloc (sizeof (us_proc_vtp_data_t), GFP_KERNEL);
991 //note: storage will released next time or at clean-up moment
996 /*len = strlen_user (s_vtp->name);
997 vtp_data->name = kmalloc (len, GFP_KERNEL);
1000 //note: storage will released next time or at clean-up moment
1005 if (strncpy_from_user (vtp_data->name, s_vtp->name, len) != (len-1))
1007 //note: storage will released next time or at clean-up moment
1008 EPRINTF ("strncpy_from_user VTP name failed %p (%ld)", vtp_data->name, len);
1009 kfree (vtp_data->name);
1014 //vtp_data->name[len] = 0;*/
1015 vtp_data->name = s_vtp->name;
1016 vtp_data->type = s_vtp->type;
1017 vtp_data->size = s_vtp->size;
1018 vtp_data->reg = s_vtp->reg;
1019 vtp_data->off = s_vtp->off;
1020 list_add_tail_rcu (&vtp_data->list, &mvtp->list);
1021 pre_addr = addrs[2 * k];
1024 kfree(s_lib.p_vtps);
1029 /* first, delete all the conds */
1030 list_for_each_entry_safe(c, c_tmp, &cond_list.list, list) {
1034 /* second, add new conds */
1035 /* This can be improved (by placing conds into array) */
1036 nr_conds = *(u_int32_t *)p;
1037 DPRINTF("nr_conds = %d", nr_conds);
1038 p += sizeof(u_int32_t);
1039 for (i = 0; i < nr_conds; i++) {
1040 p_cond = kmalloc(sizeof(struct cond), GFP_KERNEL);
1042 EPRINTF("Cannot alloc cond!\n");
1046 memcpy(&p_cond->tmpl, p, sizeof(struct event_tmpl));
1047 p_cond->applied = 0;
1048 list_add(&(p_cond->list), &(cond_list.list));
1049 p += sizeof(struct event_tmpl);
1053 if (set_event_mask(*(u_int32_t *)p)) {
1054 EPRINTF("Cannot set event mask!");
1058 p += sizeof(u_int32_t);
1063 //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
1064 int storage_init (void)
1066 unsigned long spinlock_flags = 0L;
1068 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1069 ec_info.m_nMode = 0; // MASK IS CLEAR (SINGLE NON_CONTINUOUS BUFFER)
1070 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1072 if(InitializeBuffer(EC_BUFFER_SIZE_DEFAULT) == -1) {
1073 EPRINTF("Cannot initialize buffer! [Size=%u KB]", EC_BUFFER_SIZE_DEFAULT / 1024 );
1077 INIT_HLIST_HEAD(&kernel_probes);
1078 INIT_HLIST_HEAD(&otg_kernel_probes);
1079 INIT_LIST_HEAD(&otg_us_proc_info);
1080 spin_lock_init(&dbi_mh.lock);
1081 INIT_LIST_HEAD(&dbi_mh.modules_handlers);
1086 Shuts down "storage".
1087 Assumes that all probes are already deactivated.
1089 void storage_down (void)
1091 if(UninitializeBuffer() == -1)
1092 EPRINTF("Cannot uninitialize buffer!");
1094 if (ec_info.collision_count)
1095 EPRINTF ("ec_info.collision_count=%d", ec_info.collision_count);
1096 if (ec_info.lost_events_count)
1097 EPRINTF ("ec_info.lost_events_count=%d", ec_info.lost_events_count);
1100 u_int32_t get_probe_func_addr(const char *fmt, va_list args)
1105 return va_arg(args, u_int32_t);
1108 void pack_task_event_info(struct task_struct *task, probe_id_t probe_id,
1109 record_type_t record_type, const char *fmt, ...)
1111 unsigned long spinlock_flags = 0L;
1112 static char buf[EVENT_MAX_SIZE] = "";
1113 TYPEOF_EVENT_LENGTH event_len = 0L;
1114 struct timeval tv = { 0, 0 };
1115 TYPEOF_THREAD_ID current_pid = task->pid;
1116 TYPEOF_PROCESS_ID current_tgid = task->tgid;
1117 unsigned current_cpu = task_cpu(task);
1119 unsigned long addr = 0;
1120 struct cond *p_cond;
1121 struct event_tmpl *p_tmpl;
1123 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1124 memset(buf, 0, EVENT_MAX_SIZE);
1125 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1127 do_gettimeofday (&tv);
1129 if (probe_id == KS_PROBE_ID) {
1130 va_start(args, fmt);
1131 addr = get_probe_func_addr(fmt, args);
1133 if( ((addr == pf_addr) && !(probes_flags & PROBE_FLAG_PF_INSTLD)) ||
1134 ((addr == cp_addr) && !(probes_flags & PROBE_FLAG_CP_INSTLD)) ||
1135 ((addr == mr_addr) && !(probes_flags & PROBE_FLAG_MR_INSTLD)) ||
1136 ((addr == unmap_addr) && !(probes_flags & PROBE_FLAG_UNMAP_INSTLD)) ||
1137 ((addr == exit_addr) && !(probes_flags & PROBE_FLAG_EXIT_INSTLD)) ) {
1141 if (probe_id == US_PROBE_ID) {
1142 va_start(args, fmt);
1143 addr = get_probe_func_addr(fmt, args);
1147 /* Checking for all the conditions
1148 * except stop condition that we process after saving the event */
1149 list_for_each_entry(p_cond, &cond_list.list, list) {
1150 p_tmpl = &p_cond->tmpl;
1151 switch (p_tmpl->type) {
1152 case ET_TYPE_START_COND:
1153 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1154 (addr == p_tmpl->addr)) &&
1155 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1156 (current_tgid == p_tmpl->pid)) &&
1157 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1158 (current_pid == p_tmpl->tid)) &&
1159 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1160 (current_cpu == p_tmpl->cpu_num)) &&
1161 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1162 (strcmp(task->comm, p_tmpl->bin_name) == 0)) &&
1163 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1164 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1165 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1166 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1168 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1170 p_cond->applied = 1;
1171 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1174 case ET_TYPE_IGNORE_COND:
1175 /* if (probe_id == PROBE_SCHEDULE) */
1177 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1178 (addr == p_tmpl->addr)) &&
1179 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1180 (current_tgid == p_tmpl->pid)) &&
1181 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1182 (current_pid == p_tmpl->tid)) &&
1183 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1184 (current_cpu == p_tmpl->cpu_num)) &&
1185 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1186 (strcmp(task->comm, p_tmpl->bin_name) == 0))) {
1187 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1188 ec_info.ignored_events_count++;
1189 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1196 /* Save only not masked entry or return kernel and user space events */
1197 if (likely(!((probe_id == KS_PROBE_ID || probe_id == US_PROBE_ID)
1198 && ((record_type == RECORD_ENTRY && (event_mask & IOCTL_EMASK_ENTRY))
1199 || (record_type == RECORD_RET && (event_mask & IOCTL_EMASK_EXIT)))))) {
1201 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1203 if (paused && (!(probe_id == EVENT_FMT_PROBE_ID || probe_id == DYN_LIB_PROBE_ID))) {
1204 ec_info.ignored_events_count++;
1205 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1209 va_start (args, fmt);
1210 event_len = VPackEvent(buf, sizeof(buf), event_mask, probe_id, record_type, (TYPEOF_TIME *)&tv,
1211 current_tgid, current_pid, current_cpu, fmt, args);
1214 if(event_len == 0) {
1215 EPRINTF ("ERROR: failed to pack event!");
1216 ++ec_info.lost_events_count;
1218 } else if(WriteEventIntoBuffer(buf, event_len) == -1) {
1219 EPRINTF("Cannot write event into buffer!");
1220 ++ec_info.lost_events_count;
1222 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1226 /* Check for stop condition. We pause collecting the trace right after
1227 * storing this event */
1228 list_for_each_entry(p_cond, &cond_list.list, list) {
1229 p_tmpl = &p_cond->tmpl;
1230 switch (p_tmpl->type) {
1231 case ET_TYPE_STOP_COND:
1232 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1233 (addr == p_tmpl->addr)) &&
1234 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1235 (current_tgid == p_tmpl->pid)) &&
1236 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1237 (current_pid == p_tmpl->tid)) &&
1238 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1239 (current_cpu == p_tmpl->cpu_num)) &&
1240 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1241 (strcmp(task->comm, p_tmpl->bin_name) == 0)) &&
1242 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1243 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1244 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1245 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1247 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1249 p_cond->applied = 1;
1250 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1256 EXPORT_SYMBOL_GPL(pack_task_event_info);
1258 kernel_probe_t* find_probe (unsigned long addr)
1261 struct hlist_node *node;
1263 //check if such probe does exist
1264 hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
1265 if (p->addr == addr)
1268 return node ? p : NULL;
1272 int add_probe_to_list (unsigned long addr, kernel_probe_t ** pprobe)
1274 kernel_probe_t *new_probe;
1275 kernel_probe_t *probe;
1279 //check if such probe does already exist
1280 probe = find_probe(addr);
1282 /* It is not a problem if we have already registered
1283 this probe before */
1286 new_probe = kmalloc (sizeof (kernel_probe_t), GFP_KERNEL);
1289 EPRINTF ("no memory for new probe!");
1292 memset (new_probe, 0, sizeof (kernel_probe_t));
1293 new_probe->addr = addr;
1294 new_probe->jprobe.kp.addr = new_probe->retprobe.kp.addr = (kprobe_opcode_t *)addr;
1295 new_probe->jprobe.priv_arg = new_probe->retprobe.priv_arg = new_probe;
1296 //new_probe->jprobe.pre_entry = (kprobe_pre_entry_handler_t) def_jprobe_event_pre_handler;
1297 dbi_find_and_set_handler_for_probe(new_probe);
1298 INIT_HLIST_NODE (&new_probe->hlist);
1299 hlist_add_head_rcu (&new_probe->hlist, &kernel_probes);
1301 *pprobe = new_probe;
1305 int remove_probe_from_list (unsigned long addr)
1309 //check if such probe does exist
1310 p = find_probe (addr);
1312 /* We do not care about it. Nothing bad. */
1316 hlist_del_rcu (&p->hlist);
1324 int put_us_event (char *data, unsigned long len)
1326 unsigned long spinlock_flags = 0L;
1328 SWAP_TYPE_EVENT_HEADER *pEventHeader = (SWAP_TYPE_EVENT_HEADER *)data;
1329 char *cur = data + sizeof(TYPEOF_EVENT_LENGTH) + sizeof(TYPEOF_EVENT_TYPE)
1330 + sizeof(TYPEOF_PROBE_ID);
1331 TYPEOF_NUMBER_OF_ARGS nArgs = pEventHeader->m_nNumberOfArgs;
1332 TYPEOF_PROBE_ID probe_id = pEventHeader->m_nProbeID;
1335 /*if(probe_id == US_PROBE_ID){
1336 printk("esrc %p/%d[", data, len);
1337 for(i = 0; i < len; i++)
1338 printk("%02x ", data[i]);
1342 // set pid/tid/cpu/time i
1343 //pEventHeader->m_time.tv_sec = tv.tv_sec;
1344 //pEventHeader->m_time.tv_usec = tv.tv_usec;
1346 #ifdef MEMORY_CHECKER
1347 //TODO: move this part to special MEC event posting routine, new IOCTL is needed
1348 if((probe_id >= MEC_PROBE_ID_MIN) && (probe_id <= MEC_PROBE_ID_MAX))
1350 if(mec_post_event != NULL)
1352 int res = mec_post_event(data, len);
1360 mec_post_event = lookup_name("mec_post_event");
1361 if(mec_post_event == NULL)
1363 EPRINTF ("Failed to find function 'mec_post_event' from mec_handlers.ko. Memory Error Checker will work incorrectly.");
1367 int res = mec_post_event(data, len);
1377 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TIME)){
1378 struct timeval tv = { 0, 0 };
1379 do_gettimeofday (&tv);
1380 memcpy(cur, &tv, sizeof(TYPEOF_TIME));
1381 cur += sizeof(TYPEOF_TIME);
1383 //pEventHeader->m_nProcessID = current_tgid;
1384 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_PID)){
1385 //TYPEOF_PROCESS_ID current_tgid = current->tgid;
1386 (*(TYPEOF_PROCESS_ID *)cur) = current->tgid;
1387 cur += sizeof(TYPEOF_PROCESS_ID);
1389 //pEventHeader->m_nThreadID = current_pid;
1390 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TID)){
1391 //TYPEOF_THREAD_ID current_pid = current->pid;
1392 (*(TYPEOF_THREAD_ID *)cur) = current->pid;
1393 cur += sizeof(TYPEOF_THREAD_ID);
1395 //pEventHeader->m_nCPU = current_cpu;
1396 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_CPU)){
1397 //TYPEOF_CPU_NUMBER current_cpu = task_cpu(current);
1398 (*(TYPEOF_CPU_NUMBER *)cur) = task_cpu(current);
1399 cur += sizeof(TYPEOF_CPU_NUMBER);
1401 //printk("%d %x", probe_id, event_mask);
1402 // dyn lib event should have all args, it is for internal use and not visible to user
1403 if((probe_id == EVENT_FMT_PROBE_ID) || (probe_id == DYN_LIB_PROBE_ID) || !(event_mask & IOCTL_EMASK_ARGS)){
1404 // move only if any of prev fields has been skipped
1405 if(event_mask & (IOCTL_EMASK_TIME|IOCTL_EMASK_PID|IOCTL_EMASK_TID|IOCTL_EMASK_CPU)){
1406 memmove(cur, data+sizeof(SWAP_TYPE_EVENT_HEADER)-sizeof(TYPEOF_NUMBER_OF_ARGS),
1407 len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1408 -sizeof(TYPEOF_EVENT_LENGTH));
1410 cur += len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1411 -sizeof(TYPEOF_EVENT_LENGTH);
1414 // user space probes should have at least one argument to identify them
1415 if((probe_id == US_PROBE_ID) || (probe_id == VTP_PROBE_ID)){
1417 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 1;
1418 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1419 // pack args using format string for the 1st arg only
1420 memset(cur, 0, ALIGN_VALUE(2));
1421 cur[0] = 'p'; cur[1] = '\0';
1422 cur += ALIGN_VALUE(2);
1423 pArg1 = data + sizeof(SWAP_TYPE_EVENT_HEADER)+ALIGN_VALUE(nArgs+1);
1424 memmove(cur, pArg1, sizeof(unsigned long));
1425 cur += sizeof(unsigned long);
1428 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 0;
1429 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1432 pEventHeader->m_nLength = cur - data + sizeof(TYPEOF_EVENT_LENGTH);
1433 *((TYPEOF_EVENT_LENGTH *)cur) = pEventHeader->m_nLength;
1434 len = pEventHeader->m_nLength;
1436 if(WriteEventIntoBuffer(data, len) == -1) {
1437 EPRINTF("Cannot write event into buffer!");
1439 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1440 ++ec_info.lost_events_count;
1441 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1447 int set_predef_uprobes (ioctl_predef_uprobes_info_t *data)
1449 int i, k, size = 0, probe_size, result, j;
1450 char *buf, *sep1, *sep2;
1451 get_my_uprobes_info_t get_uprobes = NULL;
1452 inst_us_proc_t *my_uprobes_info = NULL;
1454 inst_us_proc_t empty_uprobes_info =
1460 get_uprobes = (get_my_uprobes_info_t)lookup_name("get_my_uprobes_info");
1462 my_uprobes_info = (inst_us_proc_t *)get_uprobes();
1464 DPRINTF("my_uprobes_info lookup result: 0x%p", my_uprobes_info);
1465 if (my_uprobes_info == 0)
1466 my_uprobes_info = &empty_uprobes_info;
1468 for(j = 0; j < data->probes_count; j++)
1470 probe_size = strlen_user(data->p_probes+size);
1471 buf = kmalloc(probe_size, GFP_KERNEL);
1475 EPRINTF("failed to alloc mem!");
1479 result = strncpy_from_user(buf, data->p_probes+size, probe_size);
1480 if (result != (probe_size-1))
1482 EPRINTF("failed to copy from user!");
1486 //DPRINTF("%s", buf);
1487 sep1 = strchr(buf, ':');
1490 EPRINTF("skipping invalid predefined uprobe string '%s'!", buf);
1495 sep2 = strchr(sep1+1, ':');
1496 if(!sep2 || (sep2 == sep1) || (sep2+2 == buf+probe_size))
1498 EPRINTF("skipping invalid predefined uprobe string '%s'!", buf);
1503 for(i = 0; i < my_uprobes_info->libs_count; i++)
1505 if(strncmp(buf, my_uprobes_info->p_libs[i].path, sep1-buf) != 0)
1507 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++)
1509 if(strncmp(sep1+1, my_uprobes_info->p_libs[i].p_ips[k].name, sep2-sep1-1) != 0)
1511 my_uprobes_info->p_libs[i].p_ips[k].offset = simple_strtoul(sep2+1, NULL, 16);
1521 int get_predef_uprobes_size(int *size)
1524 get_my_uprobes_info_t get_uprobes = NULL;
1525 inst_us_proc_t *my_uprobes_info = NULL;
1527 inst_us_proc_t empty_uprobes_info =
1533 get_uprobes = (get_my_uprobes_info_t)lookup_name("get_my_uprobes_info");
1535 my_uprobes_info = (inst_us_proc_t *)get_uprobes();
1537 if (my_uprobes_info == 0)
1538 my_uprobes_info = &empty_uprobes_info;
1541 for(i = 0; i < my_uprobes_info->libs_count; i++)
1543 int lib_size = strlen(my_uprobes_info->p_libs[i].path);
1544 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++)
1546 // libc.so.6:printf:
1547 *size += lib_size + 1 + strlen(my_uprobes_info->p_libs[i].p_ips[k].name) + 2;
1554 int get_predef_uprobes(ioctl_predef_uprobes_info_t *udata)
1556 ioctl_predef_uprobes_info_t data;
1557 int i, k, size, lib_size, func_size, result;
1561 inst_us_proc_t empty_uprobes_info =
1567 get_my_uprobes_info_t get_uprobes = NULL;
1568 inst_us_proc_t *my_uprobes_info = NULL;
1570 get_uprobes = (get_my_uprobes_info_t)lookup_name("get_my_uprobes_info");
1572 my_uprobes_info = (inst_us_proc_t *)get_uprobes();
1574 if (my_uprobes_info == 0)
1575 my_uprobes_info = &empty_uprobes_info;
1577 // get addr of array
1578 if (copy_from_user ((void *)&data, udata, sizeof (data)))
1580 EPRINTF("failed to copy from user!");
1585 for(i = 0; i < my_uprobes_info->libs_count; i++)
1587 lib_size = strlen(my_uprobes_info->p_libs[i].path);
1588 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++)
1591 result = copy_to_user ((void *)(data.p_probes+size), my_uprobes_info->p_libs[i].path, lib_size);
1594 EPRINTF("failed to copy to user!");
1599 result = copy_to_user ((void *)(data.p_probes+size), sep, 1);
1602 EPRINTF("failed to copy to user!");
1607 //DPRINTF("'%s'", my_uprobes_info->p_libs[i].p_ips[k].name);
1608 func_size = strlen(my_uprobes_info->p_libs[i].p_ips[k].name);
1609 result = copy_to_user ((void *)(data.p_probes+size), my_uprobes_info->p_libs[i].p_ips[k].name, func_size);
1612 EPRINTF("failed to copy to user!");
1617 result = copy_to_user ((void *)(data.p_probes+size), sep, 2);
1620 EPRINTF("failed to copy to user!");
1629 result = copy_to_user ((void *)&(udata->probes_count), &count, sizeof(count));
1632 EPRINTF("failed to copy to user!");