1 ////////////////////////////////////////////////////////////////////////////////////
6 // This file is C source for SWAP.
9 // AUTHOR: L.Komkov, S.Dianov, A.Gerenkov
10 // COMPANY NAME: Samsung Research Center in Moscow
11 // DEPT NAME: Advanced Software Group
12 // CREATED: 2008.02.15
14 // REVISION DATE: 2008.12.03
16 ////////////////////////////////////////////////////////////////////////////////////
18 #include <linux/types.h>
19 #include <linux/hash.h>
20 #include <linux/list.h>
21 #include <linux/unistd.h>
26 #define after_buffer ec_info.buffer_size
29 char *p_buffer = NULL;
30 inst_us_proc_t us_proc_info;
33 struct hlist_head kernel_probes;
35 struct cond cond_list;
36 int paused = 0; /* a state after a stop condition (events are not collected) */
37 struct timeval last_attach_time = {0, 0};
39 EXPORT_SYMBOL_GPL(us_proc_info);
40 int (*mec_post_event)(char *data, unsigned long len) = NULL;
42 unsigned copy_into_cyclic_buffer (char *buffer, unsigned dst_offset, char *src, unsigned size)
44 unsigned nOffset = dst_offset;
47 buffer[nOffset++] = *pSource++;
51 unsigned copy_from_cyclic_buffer (char *dst, char *buffer, unsigned src_offset, unsigned size)
53 unsigned nOffset = src_offset;
54 char* pDestination = dst;
56 *pDestination++ = buffer[nOffset++];
60 int CheckBufferSize (unsigned int nSize)
62 if (nSize < EC_BUFFER_SIZE_MIN) {
63 EPRINTF("Too small buffer size! [Size=%u KB]", nSize / 1024);
66 if (nSize > EC_BUFFER_SIZE_MAX) {
67 EPRINTF("Too big buffer size! [Size=%u KB]", nSize / 1024);
73 int AllocateSingleBuffer(unsigned int nSize)
75 unsigned long spinlock_flags = 0L;
77 unsigned int nSubbufferSize = ec_info.m_nSubbufSize;
78 unsigned int nNumOfSubbufers = GetNumOfSubbuffers(nSize);
79 unsigned long nAllocatedSize = nSubbufferSize * nNumOfSubbufers;
81 p_buffer = vmalloc_user(nAllocatedSize);
83 EPRINTF("Memory allocation error! [Size=%lu KB]", nAllocatedSize / 1024);
87 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
88 ec_info.m_nNumOfSubbuffers = nNumOfSubbufers;
89 ec_info.buffer_effect = ec_info.buffer_size = nAllocatedSize;
90 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
95 void FreeSingleBuffer (void)
97 VFREE_USER(p_buffer, ec_info.buffer_size);
101 //////////////////////////////////////////////////////////////////////////////////////////////////
103 int EnableContinuousRetrieval() {
104 unsigned long spinlock_flags = 0L;
106 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
107 ec_info.m_nMode |= MODEMASK_CONTINUOUS_RETRIEVAL;
108 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
113 int DisableContinuousRetrieval() {
114 unsigned long spinlock_flags = 0L;
116 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
117 ec_info.m_nMode &= ~MODEMASK_CONTINUOUS_RETRIEVAL;
118 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
123 //////////////////////////////////////////////////////////////////////////////////////////////////
125 #ifndef __DISABLE_RELAYFS
127 struct rchan* gl_pRelayChannel = NULL;
128 struct rchan* GetRelayChannel(void) { return gl_pRelayChannel; };
130 struct dentry* gl_pdirRelay = NULL;
131 struct dentry* GetRelayDir(void) { return gl_pdirRelay; };
135 struct proc_dir_entry* alt_pde = NULL;
137 static inline struct dentry *_dir_create (const char *dirname, struct dentry *parent, struct proc_dir_entry **p2pde)
140 struct proc_dir_entry *pde;
142 pde = proc_mkdir (dirname, PDE (parent->d_inode));
149 mutex_lock (&parent->d_inode->i_mutex);
150 dir = lookup_one_len (dirname, parent, strlen (dirname));
151 mutex_unlock (&parent->d_inode->i_mutex);
156 remove_proc_entry (dirname, PDE (parent->d_inode));
165 static inline struct dentry *_get_proc_root (void)
167 struct file_system_type *procfs_type;
168 struct super_block *procfs_sb;
170 procfs_type = get_fs_type ("proc");
172 if (!procfs_type || list_empty (&procfs_type->fs_supers))
175 procfs_sb = list_entry (procfs_type->fs_supers.next, \
176 struct super_block, s_instances);
178 return procfs_sb->s_root;
182 static struct dentry *create_buf (const char *filename, struct dentry *parent, int mode, struct rchan_buf *buf, int *is_global)
184 struct proc_dir_entry *pde;
185 struct proc_dir_entry *parent_pde = NULL;
186 struct dentry *dentry;
189 parent_pde = PDE (parent->d_inode);
191 parent = _get_proc_root ();
193 pde = create_proc_entry (filename, S_IFREG|S_IRUSR, parent_pde);
198 pde->proc_fops = &relay_file_operations;
200 mutex_lock (&parent->d_inode->i_mutex);
201 dentry = lookup_one_len (filename, parent, strlen (filename));
202 mutex_unlock (&parent->d_inode->i_mutex);
204 if (IS_ERR(dentry)) {
205 remove_proc_entry (filename, parent_pde);
208 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
209 dentry->d_inode->i_private = buf;
211 dentry->d_inode->u.generic_ip = buf;
218 static int remove_buf (struct dentry *dentry)
222 struct proc_dir_entry *pde = PDE (dentry->d_inode);
224 remove_proc_entry (pde->name, pde->parent);
230 #endif // __USE_PROCFS
232 * subbuf_start - called on buffer-switch to a new sub-buffer
233 * @buf: the channel buffer containing the new sub-buffer
234 * @subbuf: the start of the new sub-buffer
235 * @prev_subbuf: the start of the previous sub-buffer
236 * @prev_padding: unused space at the end of previous sub-buffer
238 * The client should return 1 to continue logging, 0 to stop
241 * NOTE: subbuf_start will also be invoked when the buffer is
242 * created, so that the first sub-buffer can be initialized
243 * if necessary. In this case, prev_subbuf will be NULL.
245 * NOTE: the client can reserve bytes at the beginning of the new
246 * sub-buffer by calling subbuf_start_reserve() in this callback.
248 int RelayCallbackSubbufStart(struct rchan_buf *buf,
253 struct rchan* pRelayChannel = NULL;
254 unsigned int nNumOfSubbufs = 0;
256 unsigned long spinlock_flags = 0L;
257 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
259 subbuf_start_reserve(buf, RELAY_SUBBUF_HEADER_SIZE);
260 ec_info.buffer_effect += RELAY_SUBBUF_HEADER_SIZE;
261 ec_info.m_nEndOffset = RELAY_SUBBUF_HEADER_SIZE;
263 if(prev_subbuf == NULL) {
264 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
267 memcpy(prev_subbuf, &prev_padding, sizeof(unsigned int));
268 memcpy(prev_subbuf + sizeof(unsigned int), &ec_info.m_nSubbufSavedEvents, sizeof(unsigned int));
269 ec_info.m_nSubbufSavedEvents = 0;
270 pRelayChannel = GetRelayChannel();
271 if(pRelayChannel == NULL) {
272 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
273 EPRINTF("Null pointer to relay channel!");
276 nNumOfSubbufs = pRelayChannel->n_subbufs;
277 ec_info.m_nBeginSubbufNum = buf->subbufs_consumed % nNumOfSubbufs;
278 ec_info.m_nEndSubbufNum = buf->subbufs_produced % nNumOfSubbufs;
279 if(relay_buf_full(buf)) {
280 void* pConsume = NULL;
281 unsigned int nPaddingLength = 0;
282 unsigned int nSubbufSize = 0;
283 unsigned int nDataSize = 0;
284 unsigned int nEffectSize = 0;
285 unsigned int nSubbufDiscardedCount = 0;
286 nSubbufSize = pRelayChannel->subbuf_size;
287 pConsume = buf->start + buf->subbufs_consumed % nNumOfSubbufs * nSubbufSize;
288 memcpy(&nPaddingLength, pConsume, sizeof(unsigned int));
289 memcpy(&nSubbufDiscardedCount, pConsume + sizeof(unsigned int), sizeof(unsigned int));
290 nEffectSize = nSubbufSize - nPaddingLength;
291 nDataSize = nEffectSize - RELAY_SUBBUF_HEADER_SIZE;
292 ec_info.discarded_events_count += nSubbufDiscardedCount;
293 relay_subbufs_consumed(pRelayChannel, 0, 1);
294 ec_info.m_nBeginSubbufNum = buf->subbufs_consumed % nNumOfSubbufs;
295 ec_info.m_nEndSubbufNum = buf->subbufs_produced % nNumOfSubbufs;
296 ec_info.buffer_effect -= nEffectSize;
297 ec_info.trace_size -= nDataSize;
298 buf->dentry->d_inode->i_size = ec_info.trace_size;
299 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
300 return 1; // Overwrite mode
302 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
307 * buf_mapped - relay buffer mmap notification
308 * @buf: the channel buffer
309 * @filp: relay file pointer
311 * Called when a relay file is successfully mmapped
313 void RelayCallbackBufMapped(struct rchan_buf *buf,
319 * buf_unmapped - relay buffer unmap notification
320 * @buf: the channel buffer
321 * @filp: relay file pointer
323 * Called when a relay file is successfully unmapped
325 void RelayCallbackBufUnmapped(struct rchan_buf *buf,
330 * create_buf_file - create file to represent a relay channel buffer
331 * @filename: the name of the file to create
332 * @parent: the parent of the file to create
333 * @mode: the mode of the file to create
334 * @buf: the channel buffer
335 * @is_global: outparam - set non-zero if the buffer should be global
337 * Called during relay_open(), once for each per-cpu buffer,
338 * to allow the client to create a file to be used to
339 * represent the corresponding channel buffer. If the file is
340 * created outside of relay, the parent must also exist in
343 * The callback should return the dentry of the file created
344 * to represent the relay buffer.
346 * Setting the is_global outparam to a non-zero value will
347 * cause relay_open() to create a single global buffer rather
348 * than the default set of per-cpu buffers.
350 * See Documentation/filesystems/relayfs.txt for more info.
352 struct dentry * RelayCallbackCreateBufFile(const char *filename,
353 struct dentry *parent,
355 struct rchan_buf *buf,
360 DPRINTF("\"%s\" is creating in procfs...!", filename);
361 return create_buf(filename, parent, mode, buf, is_global);
363 DPRINTF("\"%s\" is creating in debugfs...!", filename);
364 return debugfs_create_file(filename, (mode_t)mode, parent, buf, &relay_file_operations);
365 #endif // __USE_PROCFS
369 * remove_buf_file - remove file representing a relay channel buffer
370 * @dentry: the dentry of the file to remove
372 * Called during relay_close(), once for each per-cpu buffer,
373 * to allow the client to remove a file used to represent a
376 * The callback should return 0 if successful, negative if not.
378 int RelayCallbackRemoveBufFile(struct dentry *dentry)
383 debugfs_remove(dentry);
384 #endif // __USE_PROCFS
388 struct rchan_callbacks gl_RelayCallbacks = {
389 .subbuf_start = RelayCallbackSubbufStart,
390 .buf_mapped = RelayCallbackBufMapped,
391 .buf_unmapped = RelayCallbackBufUnmapped,
392 .create_buf_file = RelayCallbackCreateBufFile,
393 .remove_buf_file = RelayCallbackRemoveBufFile
395 #endif //__DISABLE_RELAYFS
397 int AllocateMultipleBuffer(unsigned int nSize) {
398 #ifndef __DISABLE_RELAYFS
399 unsigned long spinlock_flags = 0L;
401 unsigned int nSubbufferSize = ec_info.m_nSubbufSize;
402 unsigned int nNumOfSubbufers = GetNumOfSubbuffers(nSize);
404 gl_pRelayChannel = relay_open(DEFAULT_RELAY_BASE_FILENAME,
409 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
413 if(gl_pRelayChannel == NULL) {
414 EPRINTF("Cannot create relay buffer channel! [%d subbufers by %u Kb = %u Kb]",
415 nNumOfSubbufers, nSubbufferSize / 1024, nSize / 1024);
419 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
420 ec_info.m_nNumOfSubbuffers = nNumOfSubbufers;
421 ec_info.buffer_effect = ec_info.buffer_size = nSubbufferSize * nNumOfSubbufers;
422 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
426 EPRINTF("RelayFS not supported!");
428 #endif //__DISABLE_RELAYFS
431 void FreeMultipleBuffer(void) {
432 #ifndef __DISABLE_RELAYFS
433 relay_close(gl_pRelayChannel);
436 EPRINTF("RelayFS not supported!");
437 #endif //__DISABLE_RELAYFS
440 int InitializeBuffer(unsigned int nSize) {
441 if(IsMultipleBuffer())
442 return AllocateMultipleBuffer(nSize);
443 return AllocateSingleBuffer(nSize);
446 int UninitializeBuffer(void) {
447 if(IsMultipleBuffer())
448 FreeMultipleBuffer();
453 int EnableMultipleBuffer() {
454 unsigned long spinlock_flags = 0L;
456 if(IsMultipleBuffer())
459 if(UninitializeBuffer() == -1)
460 EPRINTF("Cannot uninitialize buffer!");
462 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
463 ec_info.m_nMode |= MODEMASK_MULTIPLE_BUFFER;
464 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
466 if(InitializeBuffer(GetBufferSize()) == -1) {
467 EPRINTF("Cannot initialize buffer!");
473 int DisableMultipleBuffer() {
474 unsigned long spinlock_flags = 0L;
476 if(!IsMultipleBuffer())
479 if(UninitializeBuffer() == -1)
480 EPRINTF("Cannot uninitialize buffer!");
482 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
483 ec_info.m_nMode &= ~MODEMASK_MULTIPLE_BUFFER;
484 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
486 if(InitializeBuffer(GetBufferSize()) == -1) {
487 EPRINTF("Cannot initialize buffer!");
493 unsigned int GetBufferSize(void) { return ec_info.buffer_size; };
495 int SetBufferSize(unsigned int nSize) {
496 if (GetECState() != EC_STATE_IDLE) {
497 EPRINTF("Buffer changes are allowed in IDLE state only (%d)!", GetECState());
500 if(GetBufferSize() == nSize)
502 if(CheckBufferSize(nSize) == -1) {
503 EPRINTF("Invalid buffer size!");
506 detach_selected_probes ();
507 if(UninitializeBuffer() == -1)
508 EPRINTF("Cannot uninitialize buffer!");
509 if(InitializeBuffer(nSize) == -1) {
510 EPRINTF("Cannot initialize buffer! [Size=%u KB]", nSize / 1024);
516 void ResetSingleBuffer(void) {
519 void ResetMultipleBuffer(void) {
520 #ifndef __DISABLE_RELAYFS
521 relay_reset(gl_pRelayChannel);
523 EPRINTF("RelayFS not supported!");
524 #endif //__DISABLE_RELAYFS
527 int ResetBuffer(void) {
528 unsigned long spinlock_flags = 0L;
530 if (GetECState() != EC_STATE_IDLE) {
531 EPRINTF("Buffer changes are allowed in IDLE state only!");
535 if(IsMultipleBuffer())
536 ResetMultipleBuffer();
540 detach_selected_probes ();
542 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
543 ec_info.buffer_effect = ec_info.buffer_size;
544 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
551 int WriteEventIntoSingleBuffer(char* pEvent, unsigned long nEventSize) {
552 unsigned long spinlock_flags = 0L;
556 EPRINTF("Invalid pointer to buffer!");
557 ++ec_info.lost_events_count;
560 unsigned int unused_space;
561 if (ec_info.trace_size == 0 || ec_info.after_last > ec_info.first) {
562 unused_space = ec_info.buffer_size - ec_info.after_last;
563 if (unused_space > nEventSize) {
564 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
568 ec_info.saved_events_count++;
569 ec_info.buffer_effect = ec_info.buffer_size;
570 ec_info.trace_size = ec_info.after_last - ec_info.first;
572 if (ec_info.first > nEventSize) {
573 ec_info.buffer_effect = ec_info.after_last;
574 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
578 ec_info.saved_events_count++;
579 ec_info.trace_size = ec_info.buffer_effect
581 + ec_info.after_last;
583 // TODO: consider two variants!
585 ec_info.discarded_events_count++;
589 unused_space = ec_info.first - ec_info.after_last;
590 if (unused_space > nEventSize) {
591 ec_info.after_last = copy_into_cyclic_buffer(p_buffer,
595 ec_info.saved_events_count++;
596 ec_info.trace_size = ec_info.buffer_effect
598 + ec_info.after_last;
601 ec_info.discarded_events_count++;
607 int WriteEventIntoMultipleBuffer(char* pEvent, unsigned long nEventSize) {
608 #ifndef __DISABLE_RELAYFS
609 unsigned long spinlock_flags = 0L;
610 __relay_write(GetRelayChannel(), pEvent, nEventSize);
611 ec_info.buffer_effect += nEventSize;
612 ec_info.trace_size += nEventSize;
613 ec_info.saved_events_count++;
614 ec_info.m_nEndOffset += nEventSize;
615 ec_info.m_nSubbufSavedEvents++;
618 EPRINTF("RelayFS not supported!");
620 #endif //__DISABLE_RELAYFS
623 int WriteEventIntoBuffer(char* pEvent, unsigned long nEventSize) {
626 for(i = 0; i < nEventSize; i++)
627 printk("%02X ", pEvent[i]);
630 if(IsMultipleBuffer())
631 return WriteEventIntoMultipleBuffer(pEvent, nEventSize);
632 return WriteEventIntoSingleBuffer(pEvent, nEventSize);
635 //////////////////////////////////////////////////////////////////////////////////////////////////
637 int set_event_mask (int new_mask)
639 unsigned long spinlock_flags = 0L;
640 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
641 event_mask = new_mask;
642 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
647 get_event_mask (int *mask)
654 generic_swap (void *a, void *b, int size)
659 *(char *) a++ = *(char *) b;
661 } while (--size > 0);
664 static void sort (void *base, size_t num, size_t size, int (*cmp) (const void *, const void *), void (*fswap) (void *, void *, int size))
666 /* pre-scale counters for performance */
667 int i = (num / 2) * size, n = num * size, c, r;
670 for (; i >= 0; i -= size)
672 for (r = i; r * 2 < n; r = c)
675 if (c < n - size && cmp (base + c, base + c + size) < 0)
677 if (cmp (base + r, base + c) >= 0)
679 fswap (base + r, base + c, size);
684 for (i = n - size; i >= 0; i -= size)
686 fswap (base, base + i, size);
687 for (r = 0; r * 2 < i; r = c)
690 if (c < i - size && cmp (base + c, base + c + size) < 0)
692 if (cmp (base + r, base + c) >= 0)
694 fswap (base + r, base + c, size);
699 static int addr_cmp (const void *a, const void *b)
701 return *(unsigned long *) a > *(unsigned long *) b ? -1 : 1;
704 char *find_lib_path(const char *lib_name)
706 char *p = deps + sizeof(size_t);
711 DPRINTF("p is at %s", p);
713 match = strstr(p, lib_name);
715 len = strlen(p) + 1; /* we are at path now */
719 DPRINTF("Found match: %s", match);
727 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 27)
728 #define list_for_each_rcu(pos, head) __list_for_each_rcu(pos, head)
731 void unlink_bundle(void)
734 us_proc_lib_t *d_lib;
736 struct list_head *pos; //, *tmp;
738 path = us_proc_info.path;
739 us_proc_info.path = 0;
741 // first make sure "d_lib" is not used any more and only
742 // then release storage
743 if (us_proc_info.p_libs)
745 int count1 = us_proc_info.libs_count;
746 us_proc_info.libs_count = 0;
747 for (i = 0; i < count1; i++)
749 d_lib = &us_proc_info.p_libs[i];
752 // first make sure "d_lib->p_ips" is not used any more and only
753 // then release storage
754 //int count2 = d_lib->ips_count;
755 d_lib->ips_count = 0;
756 /*for (k = 0; k < count2; k++)
757 kfree ((void *) d_lib->p_ips[k].name);*/
758 vfree ((void *) d_lib->p_ips);
762 // first make sure "d_lib->p_vtps" is not used any more and only
763 // then release storage
764 int count2 = d_lib->vtps_count;
765 d_lib->vtps_count = 0;
766 for (k = 0; k < count2; k++)
768 //list_for_each_safe_rcu(pos, tmp, &d_lib->p_vtps[k].list) {
769 list_for_each_rcu (pos, &d_lib->p_vtps[k].list)
771 us_proc_vtp_data_t *vtp = list_entry (pos, us_proc_vtp_data_t, list);
777 kfree ((void *) d_lib->p_vtps);
780 kfree ((void *) us_proc_info.p_libs);
781 us_proc_info.p_libs = 0;
785 /* kfree ((void *) path); */
786 /* //putname(path); */
789 us_proc_info.tgid = 0;
794 inst_us_proc_t *my_uprobes_info;
795 inst_us_proc_t empty_uprobes_info =
800 char *p = bundle; /* read pointer for bundle */
804 us_proc_lib_t *d_lib, *pd_lib;
805 ioctl_usr_space_lib_t s_lib;
806 ioctl_usr_space_vtp_t *s_vtp;
812 struct cond *c, *c_tmp, *p_cond;
817 /* Get user-defined us handlers (if they are provided) */
818 my_uprobes_info = (inst_us_proc_t *)lookup_name("my_uprobes_info");
819 if (my_uprobes_info == 0)
820 my_uprobes_info = &empty_uprobes_info;
822 DPRINTF("Going to release us_proc_info");
823 if (us_proc_info.path)
826 /* Skip size - it has been used before */
827 p += sizeof(u_int32_t);
830 if (SetECMode(*(u_int32_t *)p) == -1) {
831 EPRINTF("Cannot set mode!\n");
834 p += sizeof(u_int32_t);
837 if (SetBufferSize(*(u_int32_t *)p) == -1) {
838 EPRINTF("Cannot set buffer size!\n");
841 p += sizeof(u_int32_t);
844 nr_kern_probes = *(u_int32_t *)p;
845 p += sizeof(u_int32_t);
846 for (i = 0; i < nr_kern_probes; i++) {
847 if (add_probe(*(u_int32_t *)p)) {
848 EPRINTF("Cannot add kernel probe at 0x%x!\n", *(u_int32_t *)p);
851 p += sizeof(u_int32_t);
855 len = *(u_int32_t *)p; /* App path len */
856 p += sizeof(u_int32_t);
857 us_proc_info.path = (char *)p;
858 DPRINTF("app path = %s", us_proc_info.path);
860 if (strcmp(us_proc_info.path, "*")) {
861 if (path_lookup(us_proc_info.path, LOOKUP_FOLLOW, &nd) != 0) {
862 EPRINTF("failed to lookup dentry for path %s!", us_proc_info.path);
865 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
866 us_proc_info.m_f_dentry = nd.dentry;
869 us_proc_info.m_f_dentry = nd.path.dentry;
873 us_proc_info.m_f_dentry = NULL;
875 us_proc_info.libs_count = *(u_int32_t *)p;
876 DPRINTF("nr of libs = %d", us_proc_info.libs_count);
877 p += sizeof(u_int32_t);
878 us_proc_info.p_libs =
879 kmalloc(us_proc_info.libs_count * sizeof(us_proc_lib_t), GFP_KERNEL);
880 if (!us_proc_info.p_libs) {
881 EPRINTF("Cannot alloc p_libs!");
884 memset(us_proc_info.p_libs, 0,
885 us_proc_info.libs_count * sizeof(us_proc_lib_t));
887 for (i = 0; i < us_proc_info.libs_count; i++) {
888 d_lib = &us_proc_info.p_libs[i];
889 lib_name_len = *(u_int32_t *)p;
890 p += sizeof(u_int32_t);
891 d_lib->path = (char *)p;
892 DPRINTF("d_lib->path = %s", d_lib->path);
894 if (strcmp(us_proc_info.path, d_lib->path) == 0)
898 DPRINTF("Searching path for lib %s", d_lib->path);
899 d_lib->path = find_lib_path(d_lib->path);
901 EPRINTF("Cannot find path!");
906 if (path_lookup(d_lib->path, LOOKUP_FOLLOW, &nd) != 0) {
907 EPRINTF ("failed to lookup dentry for path %s!", d_lib->path);
909 p += sizeof(u_int32_t);
912 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
913 d_lib->m_f_dentry = nd.dentry;
916 d_lib->m_f_dentry = nd.path.dentry;
920 d_lib->ips_count = *(u_int32_t *)p;
921 DPRINTF("d_lib->ips_count = %d", d_lib->ips_count);
922 p += sizeof(u_int32_t);
925 ptr = strrchr(d_lib->path, '/');
930 for (l = 0; l < my_uprobes_info->libs_count; l++) {
931 if ((strcmp(ptr, my_uprobes_info->p_libs[l].path) == 0) ||
932 (is_app && *(my_uprobes_info->p_libs[l].path) == '\0')) {
933 pd_lib = &my_uprobes_info->p_libs[l];
938 if (d_lib->ips_count > 0) {
939 us_proc_info.unres_ips_count += d_lib->ips_count;
940 d_lib->p_ips = vmalloc(d_lib->ips_count * sizeof(us_proc_ip_t));
941 DPRINTF("d_lib[%i]->p_ips=%p/%u [%s]", i, d_lib->p_ips,
942 us_proc_info.unres_ips_count, d_lib->path);
944 EPRINTF("Cannot alloc p_ips!\n");
947 memset (d_lib->p_ips, 0, d_lib->ips_count * sizeof(us_proc_ip_t));
948 for (k = 0; k < d_lib->ips_count; k++) {
949 d_ip = &d_lib->p_ips[k];
950 d_ip->offset = *(u_int32_t *)p;
951 p += sizeof(u_int32_t);
952 p += sizeof(u_int32_t); /* Skip inst type */
953 handler_index = *(u_int32_t *)p;
954 p += sizeof(u_int32_t);
956 DPRINTF("pd_lib = 0x%x", pd_lib);
958 DPRINTF("pd_lib->ips_count = 0x%x", pd_lib->ips_count);
959 if (handler_index != -1) {
960 DPRINTF("found handler for 0x%x", d_ip->offset);
961 d_ip->jprobe.pre_entry =
962 pd_lib->p_ips[handler_index].jprobe.pre_entry;
964 pd_lib->p_ips[handler_index].jprobe.entry;
965 d_ip->retprobe.handler =
966 pd_lib->p_ips[handler_index].retprobe.handler;
974 int lib_path_len = *(u_int32_t *)p;
975 DPRINTF("lib_path_len = %d", lib_path_len);
976 p += sizeof(u_int32_t);
978 DPRINTF("lib_path = %s", lib_path);
982 d_lib = &us_proc_info.p_libs[0];
983 s_lib.vtps_count = *(u_int32_t *)p;
984 DPRINTF("s_lib.vtps_count = %d", s_lib.vtps_count);
985 p += sizeof(u_int32_t);
986 s_lib.p_vtps = kmalloc(s_lib.vtps_count
987 * sizeof(ioctl_usr_space_vtp_t), GFP_KERNEL);
992 for (i = 0; i < s_lib.vtps_count; i++) {
993 int var_name_len = *(u_int32_t *)p;
994 p += sizeof(u_int32_t);
995 s_lib.p_vtps[i].name = p;
997 s_lib.p_vtps[i].addr = *(u_int32_t *)p;
998 p += sizeof(u_int32_t);
999 s_lib.p_vtps[i].type = *(u_int32_t *)p;
1000 p += sizeof(u_int32_t);
1001 s_lib.p_vtps[i].size = *(u_int32_t *)p;
1002 p += sizeof(u_int32_t);
1003 s_lib.p_vtps[i].reg = *(u_int32_t *)p;
1004 p += sizeof(u_int32_t);
1005 s_lib.p_vtps[i].off = *(u_int32_t *)p;
1006 p += sizeof(u_int32_t);
1009 if (s_lib.vtps_count > 0)
1011 unsigned long ucount = 1, pre_addr;
1012 // array containing elements like (addr, index)
1013 unsigned long *addrs = kmalloc (s_lib.vtps_count * 2 * sizeof (unsigned long), GFP_KERNEL);
1014 // DPRINTF ("addrs=%p/%u", addrs, s_lib.vtps_count);
1017 //note: storage will released next time or at clean-up moment
1020 memset (addrs, 0, s_lib.vtps_count * 2 * sizeof (unsigned long));
1021 // fill the array in
1022 for (k = 0; k < s_lib.vtps_count; k++)
1024 s_vtp = &s_lib.p_vtps[k];
1025 addrs[2 * k] = s_vtp->addr;
1026 addrs[2 * k + 1] = k;
1028 // sort by VTP addresses, i.e. make VTPs with the same addresses adjacent;
1029 // organize them into bundles
1030 sort (addrs, s_lib.vtps_count, 2 * sizeof (unsigned long), addr_cmp, generic_swap);
1032 // calc number of VTPs with unique addresses
1033 for (k = 1, pre_addr = addrs[0]; k < s_lib.vtps_count; k++)
1035 if (addrs[2 * k] != pre_addr)
1036 ucount++; // count different only
1037 pre_addr = addrs[2 * k];
1039 us_proc_info.unres_vtps_count += ucount;
1040 d_lib->vtps_count = ucount;
1041 d_lib->p_vtps = kmalloc (ucount * sizeof (us_proc_vtp_t), GFP_KERNEL);
1042 DPRINTF ("d_lib[%i]->p_vtps=%p/%lu", i, d_lib->p_vtps, ucount); //, d_lib->path);
1045 //note: storage will released next time or at clean-up moment
1049 memset (d_lib->p_vtps, 0, d_lib->vtps_count * sizeof (us_proc_vtp_t));
1050 // go through sorted VTPS.
1051 for (k = 0, j = 0, pre_addr = 0, mvtp = NULL; k < s_lib.vtps_count; k++)
1053 us_proc_vtp_data_t *vtp_data;
1055 s_vtp = &s_lib.p_vtps[addrs[2 * k + 1]];
1056 // if this is the first VTP in bundle (master VTP)
1057 if (addrs[2 * k] != pre_addr)
1059 // data are in the array of master VTPs
1060 mvtp = &d_lib->p_vtps[j++];
1061 mvtp->addr = s_vtp->addr;
1062 INIT_LIST_HEAD (&mvtp->list);
1064 // data are in the list of slave VTPs
1065 vtp_data = kmalloc (sizeof (us_proc_vtp_data_t), GFP_KERNEL);
1068 //note: storage will released next time or at clean-up moment
1073 /*len = strlen_user (s_vtp->name);
1074 vtp_data->name = kmalloc (len, GFP_KERNEL);
1075 if (!vtp_data->name)
1077 //note: storage will released next time or at clean-up moment
1082 if (strncpy_from_user (vtp_data->name, s_vtp->name, len) != (len-1))
1084 //note: storage will released next time or at clean-up moment
1085 EPRINTF ("strncpy_from_user VTP name failed %p (%ld)", vtp_data->name, len);
1086 kfree (vtp_data->name);
1091 //vtp_data->name[len] = 0;*/
1092 vtp_data->type = s_vtp->type;
1093 vtp_data->size = s_vtp->size;
1094 vtp_data->reg = s_vtp->reg;
1095 vtp_data->off = s_vtp->off;
1096 list_add_tail_rcu (&vtp_data->list, &mvtp->list);
1097 pre_addr = addrs[2 * k];
1101 kfree(s_lib.p_vtps);
1104 /* first, delete all the conds */
1105 list_for_each_entry_safe(c, c_tmp, &cond_list.list, list) {
1109 /* second, add new conds */
1110 /* This can be improved (by placing conds into array) */
1111 nr_conds = *(u_int32_t *)p;
1112 DPRINTF("nr_conds = %d", nr_conds);
1113 p += sizeof(u_int32_t);
1114 for (i = 0; i < nr_conds; i++) {
1115 p_cond = kmalloc(sizeof(struct cond), GFP_KERNEL);
1117 EPRINTF("Cannot alloc cond!\n");
1121 memcpy(&p_cond->tmpl, p, sizeof(struct event_tmpl));
1122 p_cond->applied = 0;
1123 list_add(&(p_cond->list), &(cond_list.list));
1124 p += sizeof(struct event_tmpl);
1128 if (set_event_mask(*(u_int32_t *)p)) {
1129 EPRINTF("Cannot set event mask!");
1133 p += sizeof(u_int32_t);
1138 //++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++//
1139 int storage_init (void)
1141 unsigned long spinlock_flags = 0L;
1143 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1144 ec_info.m_nMode = 0; // MASK IS CLEAR (SINGLE NON_CONTINUOUS BUFFER)
1145 // ec_info.m_nMode |= ECMODEMASK_MULTIPLE_BUFFER;
1146 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1148 #ifndef __DISABLE_RELAYFS
1151 gl_pdirRelay = _dir_create (DEFAULT_RELAY_BASE_DIR, _get_proc_root(), &alt_pde);
1152 if(gl_pdirRelay == NULL) {
1153 EPRINTF("Cannot create procfs directory for relay buffer!");
1157 gl_pdirRelay = debugfs_create_dir(DEFAULT_RELAY_BASE_DIR, NULL);
1158 if(gl_pdirRelay == NULL) {
1159 EPRINTF("Cannot create directory for relay buffer!");
1163 #endif // __USE_PROCFS
1165 #endif //__DISABLE_RELAYFS
1167 if(InitializeBuffer(EC_BUFFER_SIZE_DEFAULT) == -1) {
1168 EPRINTF("Cannot initialize buffer! [Size=%u KB]", EC_BUFFER_SIZE_DEFAULT / 1024 );
1172 INIT_HLIST_HEAD (&kernel_probes);
1178 Shuts down "storage".
1179 Assumes that all probes are already deactivated.
1181 void storage_down (void)
1183 if(UninitializeBuffer() == -1)
1184 EPRINTF("Cannot uninitialize buffer!");
1186 #ifndef __DISABLE_RELAYFS
1189 // remove_buf(gl_pdirRelay);
1191 debugfs_remove(gl_pdirRelay);
1192 #endif // __USE_PROCFS
1194 #endif //__DISABLE_RELAYFS
1196 if (ec_info.collision_count)
1197 EPRINTF ("ec_info.collision_count=%d", ec_info.collision_count);
1198 if (ec_info.lost_events_count)
1199 EPRINTF ("ec_info.lost_events_count=%d", ec_info.lost_events_count);
1202 static u_int32_t get_probe_func_addr(const char *fmt, va_list args)
1207 return va_arg(args, u_int32_t);
1210 void pack_event_info (probe_id_t probe_id, record_type_t record_type, const char *fmt, ...)
1212 unsigned long spinlock_flags = 0L;
1213 static char buf[EVENT_MAX_SIZE] = "";
1214 TYPEOF_EVENT_LENGTH event_len = 0L;
1215 TYPEOF_TIME tv = { 0, 0 };
1216 TYPEOF_THREAD_ID current_pid = current->pid;
1217 TYPEOF_PROCESS_ID current_tgid = current->tgid;
1218 unsigned current_cpu = task_cpu(current);
1220 unsigned long addr = 0;
1221 struct cond *p_cond;
1222 struct event_tmpl *p_tmpl;
1224 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1225 memset(buf, 0, EVENT_MAX_SIZE);
1226 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1228 do_gettimeofday (&tv);
1230 if (probe_id == KS_PROBE_ID) {
1231 va_start(args, fmt);
1232 addr = get_probe_func_addr(fmt, args);
1234 if (!find_probe(addr))
1236 if (((addr == pf_addr) && !(probes_flags & PROBE_FLAG_PF_INSTLD)) ||
1237 ((addr == exit_addr) && !(probes_flags & PROBE_FLAG_EXIT_INSTLD)))
1240 if (probe_id == US_PROBE_ID) {
1241 va_start(args, fmt);
1242 addr = get_probe_func_addr(fmt, args);
1246 /* Checking for all the conditions
1247 * except stop condition that we process after saving the event */
1248 list_for_each_entry(p_cond, &cond_list.list, list) {
1249 p_tmpl = &p_cond->tmpl;
1250 switch (p_tmpl->type) {
1251 case ET_TYPE_START_COND:
1252 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1253 (addr == p_tmpl->addr)) &&
1254 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1255 (current_tgid == p_tmpl->pid)) &&
1256 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1257 (current_pid == p_tmpl->tid)) &&
1258 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1259 (current_cpu == p_tmpl->cpu_num)) &&
1260 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1261 (strcmp(current->comm, p_tmpl->bin_name) == 0)) &&
1262 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1263 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1264 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1265 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1267 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1269 p_cond->applied = 1;
1270 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1273 case ET_TYPE_IGNORE_COND:
1274 /* if (probe_id == PROBE_SCHEDULE) */
1276 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1277 (addr == p_tmpl->addr)) &&
1278 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1279 (current_tgid == p_tmpl->pid)) &&
1280 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1281 (current_pid == p_tmpl->tid)) &&
1282 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1283 (current_cpu == p_tmpl->cpu_num)) &&
1284 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1285 (strcmp(current->comm, p_tmpl->bin_name) == 0))) {
1286 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1287 ec_info.ignored_events_count++;
1288 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1295 /* Save only not masked entry or return kernel and user space events */
1296 if (likely(!((probe_id == KS_PROBE_ID || probe_id == US_PROBE_ID)
1297 && ((record_type == RECORD_ENTRY && (event_mask & IOCTL_EMASK_ENTRY))
1298 || (record_type == RECORD_RET && (event_mask & IOCTL_EMASK_EXIT)))))) {
1300 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1302 if (paused && (!(probe_id == EVENT_FMT_PROBE_ID || probe_id == DYN_LIB_PROBE_ID))) {
1303 ec_info.ignored_events_count++;
1304 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1308 va_start (args, fmt);
1309 event_len = VPackEvent(buf, sizeof(buf), event_mask, probe_id, record_type, &tv,
1310 current_tgid, current_pid, current_cpu, fmt, args);
1313 if(event_len == 0) {
1314 EPRINTF ("ERROR: failed to pack event!");
1315 ++ec_info.lost_events_count;
1317 } else if(WriteEventIntoBuffer(buf, event_len) == -1) {
1318 EPRINTF("Cannot write event into buffer!");
1319 ++ec_info.lost_events_count;
1321 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1325 /* Check for stop condition. We pause collecting the trace right after
1326 * storing this event */
1327 list_for_each_entry(p_cond, &cond_list.list, list) {
1328 p_tmpl = &p_cond->tmpl;
1329 switch (p_tmpl->type) {
1330 case ET_TYPE_STOP_COND:
1331 if ((!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_ADDR) ||
1332 (addr == p_tmpl->addr)) &&
1333 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_PID) ||
1334 (current_tgid == p_tmpl->pid)) &&
1335 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TID) ||
1336 (current_pid == p_tmpl->tid)) &&
1337 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_CPU_NUM) ||
1338 (current_cpu == p_tmpl->cpu_num)) &&
1339 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_BIN_NAME) ||
1340 (strcmp(current->comm, p_tmpl->bin_name) == 0)) &&
1341 (!ET_FIELD_ISSET(p_tmpl->flags, ET_MATCH_TIME) ||
1342 (tv.tv_sec > last_attach_time.tv_sec + p_tmpl->sec) ||
1343 (tv.tv_sec == last_attach_time.tv_sec + p_tmpl->sec &&
1344 tv.tv_usec >= last_attach_time.tv_usec + p_tmpl->usec)) &&
1346 spin_lock_irqsave(&ec_spinlock, spinlock_flags);
1348 p_cond->applied = 1;
1349 spin_unlock_irqrestore(&ec_spinlock, spinlock_flags);
1355 EXPORT_SYMBOL_GPL(pack_event_info);
1357 kernel_probe_t* find_probe (unsigned long addr)
1360 struct hlist_node *node;
1362 //check if such probe does exist
1363 hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
1364 if (p->addr == addr)
1367 return node ? p : NULL;
1370 int add_probe_to_list (unsigned long addr, kernel_probe_t ** pprobe)
1372 kernel_probe_t *new_probe;
1373 unsigned long jp_handler_addr, rp_handler_addr, pre_handler_addr;
1374 unsigned long (*find_jp_handler)(unsigned long) =
1375 (unsigned long (*)(unsigned long))lookup_name("find_jp_handler");
1376 unsigned long (*find_rp_handler)(unsigned long) =
1377 (unsigned long (*)(unsigned long))lookup_name("find_rp_handler");
1378 unsigned long (*find_pre_handler)(unsigned long) =
1379 (unsigned long (*)(unsigned long))lookup_name("find_pre_handler");
1381 kernel_probe_t *probe;
1385 //check if such probe does already exist
1386 probe = find_probe(addr);
1388 /* It is not a problem if we have already registered
1389 this probe before */
1393 new_probe = kmalloc (sizeof (kernel_probe_t), GFP_KERNEL);
1396 EPRINTF ("no memory for new probe!");
1399 memset (new_probe, 0, sizeof (kernel_probe_t));
1401 new_probe->addr = addr;
1402 new_probe->jprobe.kp.addr = new_probe->retprobe.kp.addr = (kprobe_opcode_t *)addr;
1403 new_probe->jprobe.priv_arg = new_probe->retprobe.priv_arg = new_probe;
1405 //new_probe->jprobe.pre_entry = (kprobe_pre_entry_handler_t) def_jprobe_event_pre_handler;
1406 if (find_pre_handler == 0 ||
1407 (pre_handler_addr = find_pre_handler(new_probe->addr)) == 0)
1408 new_probe->jprobe.pre_entry = (kprobe_pre_entry_handler_t) def_jprobe_event_pre_handler;
1410 new_probe->jprobe.pre_entry = (kprobe_pre_entry_handler_t)pre_handler_addr;
1412 if (find_jp_handler == 0 ||
1413 (jp_handler_addr = find_jp_handler(new_probe->addr)) == 0)
1414 new_probe->jprobe.entry = (kprobe_opcode_t *) def_jprobe_event_handler;
1416 new_probe->jprobe.entry = (kprobe_opcode_t *)jp_handler_addr;
1418 if (find_rp_handler == 0 ||
1419 (rp_handler_addr = find_rp_handler(new_probe->addr)) == 0)
1420 new_probe->retprobe.handler =
1421 (kretprobe_handler_t) def_retprobe_event_handler;
1423 new_probe->retprobe.handler = (kretprobe_handler_t)rp_handler_addr;
1425 INIT_HLIST_NODE (&new_probe->hlist);
1426 hlist_add_head_rcu (&new_probe->hlist, &kernel_probes);
1429 *pprobe = new_probe;
1434 int remove_probe_from_list (unsigned long addr)
1438 //check if such probe does exist
1439 p = find_probe (addr);
1441 /* We do not care about it. Nothing bad. */
1445 hlist_del_rcu (&p->hlist);
1453 int put_us_event (char *data, unsigned long len)
1455 unsigned long spinlock_flags = 0L;
1457 SWAP_TYPE_EVENT_HEADER *pEventHeader = (SWAP_TYPE_EVENT_HEADER *)data;
1458 char *cur = data + sizeof(TYPEOF_EVENT_LENGTH) + sizeof(TYPEOF_EVENT_TYPE)
1459 + sizeof(TYPEOF_PROBE_ID);
1460 TYPEOF_NUMBER_OF_ARGS nArgs = pEventHeader->m_nNumberOfArgs;
1461 TYPEOF_PROBE_ID probe_id = pEventHeader->m_nProbeID;
1464 /*if(probe_id == US_PROBE_ID){
1465 printk("esrc %p/%d[", data, len);
1466 for(i = 0; i < len; i++)
1467 printk("%02x ", data[i]);
1471 // set pid/tid/cpu/time i
1472 //pEventHeader->m_time.tv_sec = tv.tv_sec;
1473 //pEventHeader->m_time.tv_usec = tv.tv_usec;
1475 #ifdef MEMORY_CHECKER
1476 //TODO: move this part to special MEC event posting routine, new IOCTL is needed
1477 if((probe_id >= MEC_PROBE_ID_MIN) && (probe_id <= MEC_PROBE_ID_MAX))
1479 if(mec_post_event != NULL)
1481 int res = mec_post_event(data, len);
1489 mec_post_event = lookup_name("mec_post_event");
1490 if(mec_post_event == NULL)
1492 EPRINTF ("Failed to find function 'mec_post_event' from mec_handlers.ko. Memory Error Checker will work incorrectly.");
1496 int res = mec_post_event(data, len);
1506 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TIME)){
1507 TYPEOF_TIME tv = { 0, 0 };
1508 do_gettimeofday (&tv);
1509 memcpy(cur, &tv, sizeof(TYPEOF_TIME));
1510 cur += sizeof(TYPEOF_TIME);
1512 //pEventHeader->m_nProcessID = current_tgid;
1513 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_PID)){
1514 //TYPEOF_PROCESS_ID current_tgid = current->tgid;
1515 (*(TYPEOF_PROCESS_ID *)cur) = current->tgid;
1516 cur += sizeof(TYPEOF_PROCESS_ID);
1518 //pEventHeader->m_nThreadID = current_pid;
1519 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_TID)){
1520 //TYPEOF_THREAD_ID current_pid = current->pid;
1521 (*(TYPEOF_THREAD_ID *)cur) = current->pid;
1522 cur += sizeof(TYPEOF_THREAD_ID);
1524 //pEventHeader->m_nCPU = current_cpu;
1525 if((probe_id == EVENT_FMT_PROBE_ID) || !(event_mask & IOCTL_EMASK_CPU)){
1526 //TYPEOF_CPU_NUMBER current_cpu = task_cpu(current);
1527 (*(TYPEOF_CPU_NUMBER *)cur) = task_cpu(current);
1528 cur += sizeof(TYPEOF_CPU_NUMBER);
1530 //printk("%d %x", probe_id, event_mask);
1531 // dyn lib event should have all args, it is for internal use and not visible to user
1532 if((probe_id == EVENT_FMT_PROBE_ID) || (probe_id == DYN_LIB_PROBE_ID) || !(event_mask & IOCTL_EMASK_ARGS)){
1533 // move only if any of prev fields has been skipped
1534 if(event_mask & (IOCTL_EMASK_TIME|IOCTL_EMASK_PID|IOCTL_EMASK_TID|IOCTL_EMASK_CPU)){
1535 memmove(cur, data+sizeof(SWAP_TYPE_EVENT_HEADER)-sizeof(TYPEOF_NUMBER_OF_ARGS),
1536 len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1537 -sizeof(TYPEOF_EVENT_LENGTH));
1539 cur += len-sizeof(SWAP_TYPE_EVENT_HEADER)+sizeof(TYPEOF_NUMBER_OF_ARGS)
1540 -sizeof(TYPEOF_EVENT_LENGTH);
1543 // user space probes should have at least one argument to identify them
1544 if((probe_id == US_PROBE_ID) || (probe_id == VTP_PROBE_ID)){
1546 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 1;
1547 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1548 // pack args using format string for the 1st arg only
1549 memset(cur, 0, ALIGN_VALUE(2));
1550 cur[0] = 'p'; cur[1] = '\0';
1551 cur += ALIGN_VALUE(2);
1552 pArg1 = data + sizeof(SWAP_TYPE_EVENT_HEADER)+ALIGN_VALUE(nArgs+1);
1553 memmove(cur, pArg1, sizeof(unsigned long));
1554 cur += sizeof(unsigned long);
1557 (*(TYPEOF_NUMBER_OF_ARGS *)cur) = 0;
1558 cur += sizeof(TYPEOF_NUMBER_OF_ARGS);
1561 pEventHeader->m_nLength = cur - data + sizeof(TYPEOF_EVENT_LENGTH);
1562 *((TYPEOF_EVENT_LENGTH *)cur) = pEventHeader->m_nLength;
1563 len = pEventHeader->m_nLength;
1565 if(WriteEventIntoBuffer(data, len) == -1) {
1566 EPRINTF("Cannot write event into buffer!");
1568 spin_lock_irqsave (&ec_spinlock, spinlock_flags);
1569 ++ec_info.lost_events_count;
1570 spin_unlock_irqrestore (&ec_spinlock, spinlock_flags);
1576 int set_predef_uprobes (ioctl_predef_uprobes_info_t *data)
1578 int i, k, size = 0, probe_size, result, j;
1579 char *buf, *sep1, *sep2;
1581 inst_us_proc_t *my_uprobes_info = (inst_us_proc_t *)lookup_name("my_uprobes_info");
1582 DPRINTF("my_uprobes_info lookup result: 0x%p", my_uprobes_info);
1583 inst_us_proc_t empty_uprobes_info =
1588 if (my_uprobes_info == 0)
1589 my_uprobes_info = &empty_uprobes_info;
1591 for(j = 0; j < data->probes_count; j++){
1592 probe_size = strlen_user(data->p_probes+size);
1593 buf = kmalloc(probe_size, GFP_KERNEL);
1595 EPRINTF("failed to alloc mem!");
1598 result = strncpy_from_user(buf, data->p_probes+size, probe_size);
1599 if (result != (probe_size-1))
1601 EPRINTF("failed to copy from user!");
1605 //DPRINTF("%s", buf);
1606 sep1 = strchr(buf, ':');
1608 EPRINTF("skipping invalid predefined uprobe string '%s'!", buf);
1613 sep2 = strchr(sep1+1, ':');
1614 if(!sep2 || (sep2 == sep1) || (sep2+2 == buf+probe_size)){
1615 EPRINTF("skipping invalid predefined uprobe string '%s'!", buf);
1620 for(i = 0; i < my_uprobes_info->libs_count; i++){
1621 if(strncmp(buf, my_uprobes_info->p_libs[i].path, sep1-buf) != 0)
1623 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++){
1624 if(strncmp(sep1+1, my_uprobes_info->p_libs[i].p_ips[k].name, sep2-sep1-1) != 0)
1626 my_uprobes_info->p_libs[i].p_ips[k].offset = simple_strtoul(sep2+1, NULL, 16);
1635 int get_predef_uprobes_size(int *size)
1639 inst_us_proc_t *my_uprobes_info = (inst_us_proc_t *)lookup_name("my_uprobes_info");
1640 inst_us_proc_t empty_uprobes_info =
1646 if (my_uprobes_info == 0)
1647 my_uprobes_info = &empty_uprobes_info;
1650 for(i = 0; i < my_uprobes_info->libs_count; i++){
1651 int lib_size = strlen(my_uprobes_info->p_libs[i].path);
1652 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++){
1653 // libc.so.6:printf:
1654 *size += lib_size + 1 + strlen(my_uprobes_info->p_libs[i].p_ips[k].name) + 2;
1660 int get_predef_uprobes(ioctl_predef_uprobes_info_t *udata)
1662 ioctl_predef_uprobes_info_t data;
1663 int i, k, size, lib_size, func_size, result;
1667 inst_us_proc_t *my_uprobes_info = (inst_us_proc_t *)lookup_name("my_uprobes_info");
1668 inst_us_proc_t empty_uprobes_info =
1673 if (my_uprobes_info == 0)
1674 my_uprobes_info = &empty_uprobes_info;
1676 // get addr of array
1677 if (copy_from_user ((void *)&data, udata, sizeof (data)))
1679 EPRINTF("failed to copy from user!");
1684 for(i = 0; i < my_uprobes_info->libs_count; i++){
1685 lib_size = strlen(my_uprobes_info->p_libs[i].path);
1686 for(k = 0; k < my_uprobes_info->p_libs[i].ips_count; k++){
1688 result = copy_to_user ((void *)(data.p_probes+size), my_uprobes_info->p_libs[i].path, lib_size);
1691 EPRINTF("failed to copy to user!");
1696 result = copy_to_user ((void *)(data.p_probes+size), sep, 1);
1699 EPRINTF("failed to copy to user!");
1704 //DPRINTF("'%s'", my_uprobes_info->p_libs[i].p_ips[k].name);
1705 func_size = strlen(my_uprobes_info->p_libs[i].p_ips[k].name);
1706 result = copy_to_user ((void *)(data.p_probes+size), my_uprobes_info->p_libs[i].p_ips[k].name, func_size);
1709 EPRINTF("failed to copy to user!");
1714 result = copy_to_user ((void *)(data.p_probes+size), sep, 2);
1717 EPRINTF("failed to copy to user!");
1726 result = copy_to_user ((void *)&(udata->probes_count), &count, sizeof(count));
1729 EPRINTF("failed to copy to user!");