#include <linux/percpu.h>
#include <ksyms.h>
+#include <dbi_kprobes_deps.h>
#include "module.h"
#include "probes_manager.h"
kernel_probe_t *p;
struct hlist_node *node;
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
{
partial_result = register_kernel_probe (p);
if (partial_result)
kernel_probe_t *p;
struct hlist_node *node;
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
unregister_kernel_probe (p);
- hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
+ swap_hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
unregister_kernel_probe(p);
}
struct hlist_node *node, *tnode;
kernel_probe_t *p;
- hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
+ swap_hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
hlist_del(node);
kfree(p);
}
- hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
+ swap_hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
hlist_del(node);
kfree(p);
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_rcu(page, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
print_page_probes(page);
}
}
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/hash.h>
+#include <dbi_kprobes_deps.h>
static int calculation_hash_bits(int cnt)
{
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe(page, p, n, head, hlist) {
+ swap_hlist_for_each_entry_safe(page, p, n, head, hlist) {
hlist_del(&page->hlist);
sspt_page_free(page);
}
// copy pages
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry(page, node, head, hlist) {
+ swap_hlist_for_each_entry(page, node, head, hlist) {
sspt_add_page(file_out, sspt_page_copy(page));
}
}
struct sspt_page *page;
head = &file->page_probes_table[hash_ptr((void *)offset, file->page_probes_hash_bits)];
- hlist_for_each_entry(page, node, head, hlist) {
+ swap_hlist_for_each_entry(page, node, head, hlist) {
if (page->offset == offset) {
return page;
}
#include <linux/kernel.h>
#include <linux/time.h>
#include <ksyms.h>
+#include <dbi_kprobes_deps.h>
#include "module.h"
#include "storage.h"
#include "handlers_core.h"
list_del_rcu(&dbi_mhi->dbi_list_head);
// Next code block is for far future possible usage in case when removing will be implemented for unsafe state
// (i.e. between attach and stop)
- /*hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
+ /*swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
// XXX: absent code for pre_handlers because we suppose that they are not used
if ((p->jprobe.entry != ((kprobe_pre_entry_handler_t )def_jprobe_event_pre_handler)) ||
(p->retprobe.handler != ((kretprobe_handler_t )def_retprobe_event_handler))) {
struct hlist_node *node;
//check if such probe does exist
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
if (p->addr == addr)
break;
struct task_struct *task)
{
int err = 0;
- struct us_ip *ip;
+ struct us_ip *ip, *n;
spin_lock(&page->lock);
sspt_page_assert_install(page);
sspt_set_all_ip_addr(page, file);
- list_for_each_entry(ip, &page->ip_list, list) {
+ list_for_each_entry_safe(ip, n, &page->ip_list, list) {
err = register_usprobe_my(task, ip);
- if (err != 0) {
- //TODO: ERROR
- goto unlock;
+ if (err == -ENOEXEC) {
+ list_del(&ip->list);
+ free_ip(ip);
+ continue;
+ } else if (err) {
+ EPRINTF("Failed to install probe");
}
}
-
- sspt_page_installed(page);
-
unlock:
+ sspt_page_installed(page);
spin_unlock(&page->lock);
- return err;
+ return 0;
}
static int unregister_us_page_probe(struct task_struct *task,
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_rcu(page, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
register_us_page_probe(page, file, task);
}
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
if (page->install) {
return 1;
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
err = unregister_us_page_probe(task, page, flag);
if (err != 0) {
// TODO: ERROR
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <dbi_kprobes_deps.h>
struct chunk {
unsigned long *data;
struct fixed_alloc *fa;
struct hlist_node *pos;
- hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+ swap_hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
free_slot = chunk_allocate(&fa->chunk, sm->slot_size);
if (free_slot)
return free_slot;
struct fixed_alloc *fa;
struct hlist_node *pos;
- hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+ swap_hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
if (!chunk_check_ptr(&fa->chunk, slot, PAGE_SIZE))
continue;
struct kprobe *p;
head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
if (p->addr == addr) {
return p;
}
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
if (!alloc_nodes_kretprobe(rp)) {
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
}
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
return ri;
}
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate);
+DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate)
+IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff, populate)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff);
DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff)
IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff)
static DECLARE_MOD_FUNC_DEP(in_gate_area_no_task, int, unsigned long addr);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+static DECLARE_MOD_FUNC_DEP(follow_page_mask, \
+ struct page *, struct vm_area_struct * vma, \
+ unsigned long address, unsigned int foll_flags, \
+ unsigned int *page_mask);
+DECLARE_MOD_DEP_WRAPPER(follow_page_mask, struct page *, \
+ struct vm_area_struct * vma, \
+ unsigned long address, \
+ unsigned int foll_flags, \
+ unsigned int *page_mask)
+IMP_MOD_DEP_WRAPPER (follow_page_mask, vma, address, foll_flags, page_mask)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
static DECLARE_MOD_FUNC_DEP(follow_page, \
struct page *, struct vm_area_struct * vma, \
unsigned long address, unsigned int foll_flags);
+DECLARE_MOD_DEP_WRAPPER(follow_page, struct page *, \
+ struct vm_area_struct * vma, \
+ unsigned long address, \
+ unsigned int foll_flags)
+IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
static DECLARE_MOD_FUNC_DEP(__flush_anon_page, \
void, struct vm_area_struct *vma, struct page *page, \
unsigned long vmaddr);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
}
-
-#if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 11))
-DECLARE_MOD_DEP_WRAPPER (follow_page, \
- struct page *, struct vm_area_struct * vma, \
- unsigned long address, unsigned int foll_flags)
-IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
-#endif
DECLARE_MOD_DEP_WRAPPER (__flush_anon_page, \
void, struct vm_area_struct *vma, \
struct page *page, unsigned long vmaddr)
int init_module_dependencies(void)
{
+
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
INIT_MOD_DEP_VAR(handle_mm_fault, handle_mm_fault);
#endif
INIT_MOD_DEP_VAR(in_gate_area, in_gate_area);
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ INIT_MOD_DEP_VAR(follow_page_mask, follow_page_mask);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+ INIT_MOD_DEP_VAR(follow_page, follow_page);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
#ifndef is_zero_pfn
swap_zero_pfn = page_to_pfn(ZERO_PAGE(0));
INIT_MOD_DEP_VAR(in_gate_area_no_task, in_gate_area_no_task);
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)) */
- INIT_MOD_DEP_VAR(follow_page, follow_page);
-
INIT_MOD_DEP_VAR(__flush_anon_page, __flush_anon_page);
INIT_MOD_DEP_VAR(vm_normal_page, vm_normal_page);
INIT_MOD_DEP_VAR(access_process_vm, access_process_vm);
#else /*2.6.16 */
INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct_cb);
#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
INIT_MOD_DEP_VAR(do_mmap_pgoff, do_mmap_pgoff);
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) */
return 0;
}
return !vma->vm_ops || !vma->vm_ops->fault;
}
-
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
stack_guard_page_end(vma, addr+PAGE_SIZE);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+
+static inline struct page *follow_page_uprobe(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags)
+{
+ unsigned int unused_page_mask;
+ return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+}
+
+long __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *nonblocking)
+{
+ long i;
+ unsigned long vm_flags;
+ unsigned int page_mask;
+
+ if (!nr_pages)
+ return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+ /*
+ * Require read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+ /*
+ * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
+ * would be called on PROT_NONE ranges. We must never invoke
+ * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
+ * page faults would unprotect the PROT_NONE ranges if
+ * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
+ * bitflag. So to avoid that, don't set FOLL_NUMA if
+ * FOLL_FORCE is set.
+ */
+ if (!(gup_flags & FOLL_FORCE))
+ gup_flags |= FOLL_NUMA;
+
+ i = 0;
+
+ do {
+ struct vm_area_struct *vma;
+
+ vma = find_extend_vma(mm, start);
+ if (!vma && dbi_in_gate_area(tsk, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ /* user gate pages are read-only */
+ if (gup_flags & FOLL_WRITE)
+ return i ? : -EFAULT;
+ if (pg > TASK_SIZE)
+ pgd = pgd_offset_k(pg);
+ else
+ pgd = pgd_offset_gate(mm, pg);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd, pg);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, pg);
+ if (pmd_none(*pmd))
+ return i ? : -EFAULT;
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ pte = pte_offset_map(pmd, pg);
+ if (pte_none(*pte)) {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
+ vma = get_gate_vma(mm);
+ if (pages) {
+ struct page *page;
+
+ page = vm_normal_page(vma, start, *pte);
+ if (!page) {
+ if (!(gup_flags & FOLL_DUMP) &&
+ swap_is_zero_pfn(pte_pfn(*pte)))
+ page = pte_page(*pte);
+ else {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
+ }
+ pages[i] = page;
+ get_page(page);
+ }
+ pte_unmap(pte);
+ page_mask = 0;
+ goto next_page;
+ }
+
+ if (!vma ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+
+ if (is_vm_hugetlb_page(vma)) {
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i, gup_flags);
+ continue;
+ }
+
+ do {
+ struct page *page;
+ unsigned int foll_flags = gup_flags;
+ unsigned int page_increm;
+
+ /*
+ * If we have a pending SIGKILL, don't keep faulting
+ * pages and potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current)))
+ return i ? i : -ERESTARTSYS;
+
+ /* cond_resched(); */
+ while (!(page = follow_page_mask(vma, start,
+ foll_flags, &page_mask))) {
+ int ret;
+ unsigned int fault_flags = 0;
+
+ /* For mlock, just skip the stack guard page. */
+ if (foll_flags & FOLL_MLOCK) {
+ if (stack_guard_page(vma, start))
+ goto next_page;
+ }
+ if (foll_flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (nonblocking)
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+ if (foll_flags & FOLL_NOWAIT)
+ fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
+
+ ret = handle_mm_fault(mm, vma, start,
+ fault_flags);
+
+ if (ret & VM_FAULT_ERROR) {
+ if (ret & VM_FAULT_OOM)
+ return i ? i : -ENOMEM;
+ if (ret & (VM_FAULT_HWPOISON |
+ VM_FAULT_HWPOISON_LARGE)) {
+ if (i)
+ return i;
+ else if (gup_flags & FOLL_HWPOISON)
+ return -EHWPOISON;
+ else
+ return -EFAULT;
+ }
+ if (ret & VM_FAULT_SIGBUS)
+ return i ? i : -EFAULT;
+ BUG();
+ }
+
+ if (tsk) {
+ if (ret & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ }
+
+ if (ret & VM_FAULT_RETRY) {
+ if (nonblocking)
+ *nonblocking = 0;
+ return i;
+ }
+
+ /*
+ * The VM_FAULT_WRITE bit tells us that
+ * do_wp_page has broken COW when necessary,
+ * even if maybe_mkwrite decided not to set
+ * pte_write. We can thus safely do subsequent
+ * page lookups as if they were reads. But only
+ * do so when looping for pte_write is futile:
+ * in some cases userspace may also be wanting
+ * to write to the gotten user page, which a
+ * read fault here might prevent (a readonly
+ * page might get reCOWed by userspace write).
+ */
+ if ((ret & VM_FAULT_WRITE) &&
+ !(vma->vm_flags & VM_WRITE))
+ foll_flags &= ~FOLL_WRITE;
+
+ /* cond_resched(); */
+ }
+ if (IS_ERR(page))
+ return i ? i : PTR_ERR(page);
+ if (pages) {
+ pages[i] = page;
+
+ flush_anon_page(vma, page, start);
+ flush_dcache_page(page);
+ page_mask = 0;
+ }
+next_page:
+ if (vmas) {
+ vmas[i] = vma;
+ page_mask = 0;
+ }
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ if (page_increm > nr_pages)
+ page_increm = nr_pages;
+ i += page_increm;
+ start += page_increm * PAGE_SIZE;
+ nr_pages -= page_increm;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
+ return i;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas,
return i;
}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
#include <linux/pagemap.h>
#include "../ksyms/ksyms.h"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, head, member)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, pos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, pos, head, member)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
#define synchronize_sched synchronize_kernel
// print uprobe table
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
printk("####### find K tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
// print uprobe table
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &kretprobe_inst_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
printk("####### find KR tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
// print uprobe table
for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
head = &uprobe_insn_slot_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
printk("####### find U tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
struct kprobe *p;
head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
return p;
}
/* TODO: test - two processes invokes instrumented function */
head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
return p;
}
/* TODO: test - two processes invokes instrumented function */
head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
return p;
}
/* TODO: test - two processes invokes instrumented function */
head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, is_hlist) {
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
return p;
}
struct hlist_node *node;
struct uretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
return ri;
}
struct hlist_node *node;
struct uretprobe_instance *ri;
- hlist_for_each_entry (ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
struct hlist_node *node;
struct uretprobe_instance *ri;
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
if (!alloc_nodes_uretprobe(rp)) {
- hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
return ri;
}
}
* real return address, and all the rest will point to
* uretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current) {
/* another task is sharing our hash bucket */
continue;
struct hlist_node *node, *tmp;
struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (parent == ri->task) {
dbi_disarm_urp_inst(ri, task);
}
for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
head = &uprobe_table[i];
- hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
+ swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
if (kp2up(p)->task->tgid == task->tgid) {
struct uprobe *up = container_of(p, struct uprobe, kp);
printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",