kernel_probe_t *p;
struct hlist_node *node;
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
{
partial_result = register_kernel_probe (p);
if (partial_result)
kernel_probe_t *p;
struct hlist_node *node;
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
unregister_kernel_probe (p);
- hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
+ swap_hlist_for_each_entry_rcu (p, node, &otg_kernel_probes, hlist) {
unregister_kernel_probe(p);
}
struct hlist_node *node, *tnode;
kernel_probe_t *p;
- hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
+ swap_hlist_for_each_entry_safe (p, node, tnode, &kernel_probes, hlist) {
if (p->addr == pf_addr) {
probes_flags &= ~PROBE_FLAG_PF_INSTLD;
pf_probe = NULL;
kfree(p);
}
- hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
+ swap_hlist_for_each_entry_safe (p, node, tnode, &otg_kernel_probes, hlist) {
if (p->addr == pf_addr) {
probes_flags &= ~PROBE_FLAG_PF_INSTLD;
pf_probe = NULL;
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_rcu(page, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
print_page_probes(page);
}
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe(page, p, n, head, hlist) {
+ swap_hlist_for_each_entry_safe(page, p, n, head, hlist) {
hlist_del(&page->hlist);
sspt_page_free(page);
}
// copy pages
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry(page, node, head, hlist) {
+ swap_hlist_for_each_entry(page, node, head, hlist) {
sspt_add_page(file_out, sspt_page_copy(page));
}
}
struct sspt_page *page;
head = &file->page_probes_table[hash_ptr((void *)offset, file->page_probes_hash_bits)];
- hlist_for_each_entry(page, node, head, hlist) {
+ swap_hlist_for_each_entry(page, node, head, hlist) {
if (page->offset == offset) {
return page;
}
list_del_rcu(&dbi_mhi->dbi_list_head);
// Next code block is for far future possible usage in case when removing will be implemented for unsafe state
// (i.e. between attach and stop)
- /*hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
+ /*swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist) {
// XXX: absent code for pre_handlers because we suppose that they are not used
if ((p->jprobe.entry != ((kprobe_pre_entry_handler_t )def_jprobe_event_pre_handler)) ||
(p->retprobe.handler != ((kretprobe_handler_t )def_retprobe_event_handler))) {
struct hlist_node *node;
//check if such probe does exist
- hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
+ swap_hlist_for_each_entry_rcu (p, node, &kernel_probes, hlist)
if (p->addr == addr)
break;
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_rcu(page, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(page, node, head, hlist) {
if (page_present(mm, page->offset)) {
register_us_page_probe(page, file, task);
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
if (page->install) {
return 1;
}
for (i = 0; i < table_size; ++i) {
head = &file->page_probes_table[i];
- hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe (page, node, tmp, head, hlist) {
err = unregister_us_page_probe(task, page, flag);
if (err != 0) {
// TODO: ERROR
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
+ swap_hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
{
if (ri->task != current)
/* another task is sharing our hash bucket */
crp->kp.addr, *kaddrs[0], *kaddrs[1], *kaddrs[2]);
DIE(die_msg, regs); */
// look for other instances for the same retprobe
- hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
+ swap_hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
{
/*
* Trying to find another retprobe instance associated with
#include <linux/slab.h>
#include <linux/spinlock.h>
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long pgoff, unsigned long *populate);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot,
unsigned long flags, unsigned long pgoff);
-
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
struct hlist_head kprobe_insn_pages;
struct hlist_head uprobe_insn_pages;
}
// FIXME: its seems to be bad decision to replace 'current' pointer temporarily
current_thread_info()->task = task;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+ ret = do_mmap_pgoff(NULL, 0, len, prot, flags, 0, 0);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
ret = do_mmap_pgoff(NULL, 0, len, prot, flags, 0);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+
current_thread_info()->task = otask;
if (!atomic) {
downgrade_write (&mm->mmap_sem);
struct hlist_node *pos;
struct hlist_head *page_list = task ? &uprobe_insn_pages : &kprobe_insn_pages;
- hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
+ swap_hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
if (!task || (kip->task->tgid == task->tgid)) {
free_slot = chunk_allocate(&kip->chunk, slot_size(task));
if (free_slot == NULL) {
struct kprobe_insn_page *kip;
struct hlist_node *pos;
- hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
+ swap_hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
if (!(!task || (kip->task->tgid == task->tgid)))
continue;
//TODO: test - two processes invokes instrumented function
head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
if (p->ainsn.insn == addr && tgid == p->tgid) {
retVal = p;
break;
//TODO: test - two processes invokes instrumented function
head = &uprobe_insn_slot_table[hash_ptr (addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_thumb) {
+ swap_hlist_for_each_entry_rcu (p, node, head, is_hlist_thumb) {
if (p->ainsn.insn == addr && tgid == p->tgid) {
retVal = p;
break;
struct kprobe *p, *retVal = NULL;
head = &kprobe_table[hash_ptr (addr, KPROBE_HASH_BITS)];
- hlist_for_each_entry_rcu(p, node, head, hlist) {
+ swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
if (p->addr == addr && p->tgid == tgid) {
retVal = p;
break;
{
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
+ swap_hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
return ri;
if(!alloc_nodes_kretprobe(rp)){
- hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
+ swap_hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
return ri;
}
return NULL;
{
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
+ swap_hlist_for_each_entry (ri, node, &rp->free_instances, uflist)
return ri;
return NULL;
}
{
struct hlist_node *node;
struct kretprobe_instance *ri;
- hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
+ swap_hlist_for_each_entry (ri, node, &rp->used_instances, uflist) return ri;
return NULL;
}
struct hlist_node *node, *tmp;
struct hlist_head *head = kretprobe_inst_table_head(parent->mm);
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (parent == ri->task && ri->rp->kp.tgid) {
dbi_disarm_urp_inst(ri, task);
}
}
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) */
-
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate);
+DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff, unsigned long *populate)
+IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff, populate)
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
DECLARE_MOD_FUNC_DEP(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff);
DECLARE_MOD_DEP_WRAPPER(do_mmap_pgoff, unsigned long, struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long pgoff)
IMP_MOD_DEP_WRAPPER(do_mmap_pgoff, file, addr, len, prot, flags, pgoff)
static DECLARE_MOD_FUNC_DEP(in_gate_area_no_task, int, unsigned long addr);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
-
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+static DECLARE_MOD_FUNC_DEP(follow_page_mask, \
+ struct page *, struct vm_area_struct * vma, \
+ unsigned long address, unsigned int foll_flags, \
+ unsigned int *page_mask);
+DECLARE_MOD_DEP_WRAPPER(follow_page_mask, struct page *, \
+ struct vm_area_struct * vma, \
+ unsigned long address, \
+ unsigned int foll_flags, \
+ unsigned int *page_mask)
+IMP_MOD_DEP_WRAPPER (follow_page_mask, vma, address, foll_flags, page_mask)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
static DECLARE_MOD_FUNC_DEP(follow_page, \
struct page *, struct vm_area_struct * vma, \
unsigned long address, unsigned int foll_flags);
+DECLARE_MOD_DEP_WRAPPER(follow_page, struct page *, \
+ struct vm_area_struct * vma, \
+ unsigned long address, \
+ unsigned int foll_flags)
+IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
static DECLARE_MOD_FUNC_DEP(__flush_anon_page, \
void, struct vm_area_struct *vma, struct page *page, \
unsigned long vmaddr);
#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
}
-
-#if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 11))
-DECLARE_MOD_DEP_WRAPPER (follow_page, \
- struct page *, struct vm_area_struct * vma, \
- unsigned long address, unsigned int foll_flags)
-IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
-#endif
DECLARE_MOD_DEP_WRAPPER (__flush_anon_page, \
void, struct vm_area_struct *vma, \
struct page *page, unsigned long vmaddr)
int init_module_dependencies(void)
{
+
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
INIT_MOD_DEP_VAR(handle_mm_fault, handle_mm_fault);
#endif
INIT_MOD_DEP_VAR(in_gate_area, in_gate_area);
#endif
-#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+ INIT_MOD_DEP_VAR(follow_page_mask, follow_page_mask);
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+ INIT_MOD_DEP_VAR(follow_page, follow_page);
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
#ifndef is_zero_pfn
swap_zero_pfn = page_to_pfn(ZERO_PAGE(0));
INIT_MOD_DEP_VAR(in_gate_area_no_task, in_gate_area_no_task);
#endif /* (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)) */
- INIT_MOD_DEP_VAR(follow_page, follow_page);
-
INIT_MOD_DEP_VAR(__flush_anon_page, __flush_anon_page);
INIT_MOD_DEP_VAR(vm_normal_page, vm_normal_page);
INIT_MOD_DEP_VAR(access_process_vm, access_process_vm);
#else /*2.6.16 */
INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct_cb);
#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0))
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0)
INIT_MOD_DEP_VAR(do_mmap_pgoff, do_mmap_pgoff);
-#endif
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) */
return 0;
}
return !vma->vm_ops || !vma->vm_ops->fault;
}
-
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38)
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
stack_guard_page_end(vma, addr+PAGE_SIZE);
}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0)
+
+long __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *nonblocking)
+{
+ long i;
+ unsigned long vm_flags;
+ unsigned int page_mask;
+
+ if (!nr_pages)
+ return 0;
+
+ VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
+
+ /*
+ * Require read or write permissions.
+ * If FOLL_FORCE is set, we only require the "MAY" flags.
+ */
+ vm_flags = (gup_flags & FOLL_WRITE) ?
+ (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
+ vm_flags &= (gup_flags & FOLL_FORCE) ?
+ (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
+
+ /*
+ * If FOLL_FORCE and FOLL_NUMA are both set, handle_mm_fault
+ * would be called on PROT_NONE ranges. We must never invoke
+ * handle_mm_fault on PROT_NONE ranges or the NUMA hinting
+ * page faults would unprotect the PROT_NONE ranges if
+ * _PAGE_NUMA and _PAGE_PROTNONE are sharing the same pte/pmd
+ * bitflag. So to avoid that, don't set FOLL_NUMA if
+ * FOLL_FORCE is set.
+ */
+ if (!(gup_flags & FOLL_FORCE))
+ gup_flags |= FOLL_NUMA;
+
+ i = 0;
+
+ do {
+ struct vm_area_struct *vma;
+
+ vma = find_extend_vma(mm, start);
+ if (!vma && dbi_in_gate_area(tsk, start)) {
+ unsigned long pg = start & PAGE_MASK;
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+
+ /* user gate pages are read-only */
+ if (gup_flags & FOLL_WRITE)
+ return i ? : -EFAULT;
+ if (pg > TASK_SIZE)
+ pgd = pgd_offset_k(pg);
+ else
+ pgd = pgd_offset_gate(mm, pg);
+ BUG_ON(pgd_none(*pgd));
+ pud = pud_offset(pgd, pg);
+ BUG_ON(pud_none(*pud));
+ pmd = pmd_offset(pud, pg);
+ if (pmd_none(*pmd))
+ return i ? : -EFAULT;
+ VM_BUG_ON(pmd_trans_huge(*pmd));
+ pte = pte_offset_map(pmd, pg);
+ if (pte_none(*pte)) {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
+ vma = get_gate_vma(mm);
+ if (pages) {
+ struct page *page;
+
+ page = vm_normal_page(vma, start, *pte);
+ if (!page) {
+ if (!(gup_flags & FOLL_DUMP) &&
+ swap_is_zero_pfn(pte_pfn(*pte)))
+ page = pte_page(*pte);
+ else {
+ pte_unmap(pte);
+ return i ? : -EFAULT;
+ }
+ }
+ pages[i] = page;
+ get_page(page);
+ }
+ pte_unmap(pte);
+ page_mask = 0;
+ goto next_page;
+ }
+
+ if (!vma ||
+ (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
+ !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+
+ if (is_vm_hugetlb_page(vma)) {
+ i = follow_hugetlb_page(mm, vma, pages, vmas,
+ &start, &nr_pages, i, gup_flags);
+ continue;
+ }
+
+ do {
+ struct page *page;
+ unsigned int foll_flags = gup_flags;
+ unsigned int page_increm;
+
+ /*
+ * If we have a pending SIGKILL, don't keep faulting
+ * pages and potentially allocating memory.
+ */
+ if (unlikely(fatal_signal_pending(current)))
+ return i ? i : -ERESTARTSYS;
+
+ /* cond_resched(); */
+ while (!(page = follow_page_mask(vma, start,
+ foll_flags, &page_mask))) {
+ int ret;
+ unsigned int fault_flags = 0;
+
+ /* For mlock, just skip the stack guard page. */
+ if (foll_flags & FOLL_MLOCK) {
+ if (stack_guard_page(vma, start))
+ goto next_page;
+ }
+ if (foll_flags & FOLL_WRITE)
+ fault_flags |= FAULT_FLAG_WRITE;
+ if (nonblocking)
+ fault_flags |= FAULT_FLAG_ALLOW_RETRY;
+ if (foll_flags & FOLL_NOWAIT)
+ fault_flags |= (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_RETRY_NOWAIT);
+
+ ret = handle_mm_fault(mm, vma, start,
+ fault_flags);
+
+ if (ret & VM_FAULT_ERROR) {
+ if (ret & VM_FAULT_OOM)
+ return i ? i : -ENOMEM;
+ if (ret & (VM_FAULT_HWPOISON |
+ VM_FAULT_HWPOISON_LARGE)) {
+ if (i)
+ return i;
+ else if (gup_flags & FOLL_HWPOISON)
+ return -EHWPOISON;
+ else
+ return -EFAULT;
+ }
+ if (ret & VM_FAULT_SIGBUS)
+ return i ? i : -EFAULT;
+ BUG();
+ }
+
+ if (tsk) {
+ if (ret & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+ }
+
+ if (ret & VM_FAULT_RETRY) {
+ if (nonblocking)
+ *nonblocking = 0;
+ return i;
+ }
+
+ /*
+ * The VM_FAULT_WRITE bit tells us that
+ * do_wp_page has broken COW when necessary,
+ * even if maybe_mkwrite decided not to set
+ * pte_write. We can thus safely do subsequent
+ * page lookups as if they were reads. But only
+ * do so when looping for pte_write is futile:
+ * in some cases userspace may also be wanting
+ * to write to the gotten user page, which a
+ * read fault here might prevent (a readonly
+ * page might get reCOWed by userspace write).
+ */
+ if ((ret & VM_FAULT_WRITE) &&
+ !(vma->vm_flags & VM_WRITE))
+ foll_flags &= ~FOLL_WRITE;
+
+ /* cond_resched(); */
+ }
+ if (IS_ERR(page))
+ return i ? i : PTR_ERR(page);
+ if (pages) {
+ pages[i] = page;
+
+ flush_anon_page(vma, page, start);
+ flush_dcache_page(page);
+ page_mask = 0;
+ }
+next_page:
+ if (vmas) {
+ vmas[i] = vma;
+ page_mask = 0;
+ }
+ page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ if (page_increm > nr_pages)
+ page_increm = nr_pages;
+ i += page_increm;
+ start += page_increm * PAGE_SIZE;
+ nr_pages -= page_increm;
+ } while (nr_pages && start < vma->vm_end);
+ } while (nr_pages);
+ return i;
+}
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int nr_pages, unsigned int gup_flags,
struct page **pages, struct vm_area_struct **vmas,
return i;
}
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 8, 0) */
+
#else /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 38) */
+static inline struct page *follow_page_uprobe(struct vm_area_struct *vma,
+ unsigned long address, unsigned int foll_flags)
+{
+ unsigned int unused_page_mask;
+ return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
+}
+
static int __get_user_pages_uprobe(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, int len, int flags,
struct page **pages, struct vm_area_struct **vmas)
//cond_resched();
DBPRINTF ("pages = %p vma = %p\n", pages, vma);
- while (!(page = follow_page(vma, start, foll_flags))) {
+ while (!(page = follow_page_uprobe(vma, start, foll_flags))) {
int ret;
ret = handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
#include <linux/pagemap.h>
#include "../ksyms/ksyms.h"
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, head, member)
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
+#define swap_hlist_for_each_entry_rcu(tpos, pos, head, member) hlist_for_each_entry_rcu(tpos, pos, head, member)
+#define swap_hlist_for_each_entry_safe(tpos, pos, n, head, member) hlist_for_each_entry_safe(tpos, pos, n, head, member)
+#define swap_hlist_for_each_entry(tpos, pos, head, member) hlist_for_each_entry(tpos, pos, head, member)
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0) */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
#define synchronize_sched synchronize_kernel
// print uprobe table
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &kprobe_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
printk("####### find K tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
// print uprobe table
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &kretprobe_inst_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
printk("####### find KR tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
// print uprobe table
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &uprobe_insn_slot_table[i];
- hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
+ swap_hlist_for_each_entry_rcu (p, node, head, is_hlist_arm) {
printk("####### find U tgid=%u, addr=%x\n",
p->tgid, p->addr);
}
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
head = &kprobe_table[i];
- hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
+ swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
if (p->tgid == task->tgid) {
printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
p->addr, (unsigned long)p->opcode, task->comm, task->pid);