INIT_LIST_HEAD(&procs->list);
procs->tgid = tgid;
procs->dentry = dentry;
+ procs->sm = NULL;
INIT_LIST_HEAD(&procs->file_list);
}
#include <linux/types.h>
#include "sspt_file.h"
+struct slot_manager;
+
struct sspt_procs {
struct list_head list;
pid_t tgid;
struct dentry *dentry;
+ struct slot_manager *sm;
struct list_head file_list;
};
#include "sspt/sspt.h"
#include "java_inst.h"
+#include <dbi_insn_slots.h>
+
#define mm_read_lock(task, mm, atomic, lock) \
mm = atomic ? task->active_mm : get_task_mm(task); \
if (mm == NULL) { \
return !!us_proc_info.path;
}
+static unsigned long alloc_user_pages(struct task_struct *task, unsigned long len, unsigned long prot, unsigned long flags)
+{
+ unsigned long ret = 0;
+ struct task_struct *otask = current;
+ struct mm_struct *mm;
+ int atomic = in_atomic();
+
+ mm = atomic ? task->active_mm : get_task_mm (task);
+ if (mm) {
+ if (!atomic) {
+ if (!down_write_trylock(&mm->mmap_sem)) {
+ rcu_read_lock();
+
+ up_read(&mm->mmap_sem);
+ down_write(&mm->mmap_sem);
+
+ rcu_read_unlock();
+ }
+ }
+ // FIXME: its seems to be bad decision to replace 'current' pointer temporarily
+ current_thread_info()->task = task;
+ ret = do_mmap_pgoff(NULL, 0, len, prot, flags, 0);
+ current_thread_info()->task = otask;
+ if (!atomic) {
+ downgrade_write (&mm->mmap_sem);
+ mmput(mm);
+ }
+ } else {
+ printk("proc %d has no mm", task->tgid);
+ }
+
+ return ret;
+}
+
+static void *sm_alloc_us(struct slot_manager *sm)
+{
+ struct task_struct *task = sm->data;
+
+ return (void *)alloc_user_pages(task, PAGE_SIZE,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_ANONYMOUS|MAP_PRIVATE);
+}
+
+static void sm_free_us(struct slot_manager *sm, void *ptr)
+{
+ struct task_struct *task = sm->data;
+
+ /*
+ * E. G.: This code provides kernel dump because of rescheduling while atomic.
+ * As workaround, this code was commented. In this case we will have memory leaks
+ * for instrumented process, but instrumentation process should functionate correctly.
+ * Planned that good solution for this problem will be done during redesigning KProbe
+ * for improving supportability and performance.
+ */
+#if 0
+ mm = get_task_mm(task);
+ if (mm) {
+ down_write(&mm->mmap_sem);
+ do_munmap(mm, (unsigned long)(ptr), PAGE_SIZE);
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ }
+#endif
+ /* FIXME: implement the removal of memory for task */
+}
+
+struct slot_manager *create_sm_us(struct task_struct *task)
+{
+ struct slot_manager *sm = kmalloc(sizeof(*sm), GFP_ATOMIC);
+ sm->slot_size = UPROBES_TRAMP_LEN;
+ sm->alloc = sm_alloc_us;
+ sm->free = sm_free_us;
+ INIT_HLIST_NODE(&sm->page_list);
+ sm->data = task;
+}
+
+void free_sm_us(struct slot_manager *sm)
+{
+ /* FIXME: free */
+}
+
static struct sspt_procs *get_proc_probes_by_task(struct task_struct *task)
{
struct sspt_procs *procs, *tmp;
struct sspt_procs *procs = get_proc_probes_by_task(task);
if (procs == NULL) {
procs = sspt_procs_copy(us_proc_info.pp, task);
+ procs->sm = create_sm_us(task);
add_proc_probes(task, procs);
}
{
DPRINTF("task found. installing probes");
us_proc_info.tgid = task->pid;
+ us_proc_info.pp->sm = create_sm_us(task);
install_proc_probes(task, us_proc_info.pp, 0);
put_task_struct (task);
}
if (tgid) {
us_proc_info.tgid = gl_nNotifyTgid = tgid;
+ us_proc_info.pp->sm = create_sm_us(task);
/* install probes in already mapped memory */
install_proc_probes(task, us_proc_info.pp, 1);
}
ip->jprobe.priv_arg = ip;
ip->jprobe.up.task = task;
+ ip->jprobe.up.sm = ip->page->file->procs->sm;
ret = dbi_register_ujprobe(&ip->jprobe, atomic);
if (ret) {
if (ret == -ENOEXEC) {
ip->retprobe.priv_arg = ip;
ip->retprobe.up.task = task;
+ ip->retprobe.up.sm = ip->page->file->procs->sm;
ret = dbi_register_uretprobe(&ip->retprobe, atomic);
if (ret) {
EPRINTF ("dbi_register_uretprobe() failure %d", ret);
}
EXPORT_SYMBOL_GPL(arch_check_insn_arm);
-int arch_prepare_kprobe(struct kprobe *p, struct hlist_head *page_list)
+int arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm)
{
kprobe_opcode_t insns[KPROBES_TRAMP_LEN];
int uregs, pc_dep, ret = 0;
struct arch_specific_insn ainsn;
/* insn: must be on special executable page on i386. */
- p->ainsn.insn = get_insn_slot(NULL, page_list, 0);
+ p->ainsn.insn = alloc_insn_slot(sm);
if (!p->ainsn.insn)
return -ENOMEM;
// check instructions that can write result to SP andu uses PC
if (pc_dep && (ARM_INSN_REG_RD(ainsn.insn[0]) == 13)) {
- free_insn_slot(page_list, NULL, p->ainsn.insn);
+ free_insn_slot(sm, p->ainsn.insn);
ret = -EFAULT;
} else {
if (uregs && pc_dep) {
memcpy(insns, pc_dep_insn_execbuf, sizeof(insns));
if (prep_pc_dep_insn_execbuf(insns, insn[0], uregs) != 0) {
DBPRINTF ("failed to prepare exec buffer for insn %lx!", insn[0]);
- free_insn_slot(page_list, NULL, p->ainsn.insn);
+ free_insn_slot(sm, p->ainsn.insn);
return -EINVAL;
}
insns[6] = (kprobe_opcode_t)(p->addr + 2);
#endif
}
} else {
- free_insn_slot(page_list, NULL, p->ainsn.insn);
+ free_insn_slot(sm, p->ainsn.insn);
printk("arch_prepare_kprobe: instruction 0x%lx not instrumentation, addr=0x%p\n", insn[0], p->addr);
}
int arch_check_insn_arm(struct arch_specific_insn *ainsn);
int prep_pc_dep_insn_execbuf(kprobe_opcode_t *insns, kprobe_opcode_t insn, int uregs);
+struct slot_manager;
struct kretprobe;
-int arch_prepare_kprobe(struct kprobe *p, struct hlist_head *page_list);
+int arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm);
void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs);
void arch_arm_kprobe(struct kprobe *p);
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
* Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
* 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
- * 2012 Vyacheslav Cherkashin <v.cherkashin@samsung.com> new memory allocator for slots
+ * 2012-2013 Vyacheslav Cherkashin <v.cherkashin@samsung.com> new memory allocator for slots
*/
#include "dbi_insn_slots.h"
-#include "dbi_kdebug.h"
-
-#include <linux/hash.h>
-#include <linux/mman.h>
-#include <linux/hugetlb.h>
-
+#include <linux/module.h>
+#include <linux/rculist.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/module.h>
-
-
-extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flags, unsigned long pgoff);
struct chunk {
unsigned long *data;
unsigned long *index;
};
-struct kprobe_insn_page
+struct fixed_alloc
{
struct hlist_node hlist;
-
struct chunk chunk;
- struct task_struct *task;
};
static void chunk_init(struct chunk *chunk, void *data, size_t size, size_t size_block)
return (chunk->count_available == chunk->size);
}
-static unsigned long alloc_user_pages(struct task_struct *task, unsigned long len, unsigned long prot, unsigned long flags, int atomic)
-{
- unsigned long ret = 0;
- struct task_struct *otask = current;
- struct mm_struct *mm;
-
- mm = atomic ? task->active_mm : get_task_mm (task);
- if (mm) {
- if (!atomic) {
- if (!down_write_trylock(&mm->mmap_sem)) {
- rcu_read_lock();
-
- up_read(&mm->mmap_sem);
- down_write(&mm->mmap_sem);
-
- rcu_read_unlock();
- }
- }
- // FIXME: its seems to be bad decision to replace 'current' pointer temporarily
- current_thread_info()->task = task;
- ret = do_mmap_pgoff(NULL, 0, len, prot, flags, 0);
- current_thread_info()->task = otask;
- if (!atomic) {
- downgrade_write (&mm->mmap_sem);
- mmput(mm);
- }
- } else {
- printk("proc %d has no mm", task->tgid);
- }
-
- return ret;
-}
-
-static void *page_new(struct task_struct *task, int atomic)
-{
- if (task) {
- return (void *)alloc_user_pages(task, PAGE_SIZE,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_ANONYMOUS|MAP_PRIVATE/*MAP_SHARED*/, atomic);
- } else {
- return kmalloc(PAGE_SIZE, GFP_ATOMIC);
- }
-}
-
-static void page_free(void *data, struct task_struct *task)
-{
- if (task) {
- //E. G.: This code provides kernel dump because of rescheduling while atomic.
- //As workaround, this code was commented. In this case we will have memory leaks
- //for instrumented process, but instrumentation process should functionate correctly.
- //Planned that good solution for this problem will be done during redesigning KProbe
- //for improving supportability and performance.
-#if 0
- mm = get_task_mm (task);
- if (mm) {
- down_write (&mm->mmap_sem);
- do_munmap(mm, (unsigned long)(data), PAGE_SIZE);
- up_write (&mm->mmap_sem);
- mmput(mm);
- }
-#endif
- // FIXME: implement the removal of memory for task
- } else {
- kfree(data);
- }
-}
-
-static inline size_t slot_size(struct task_struct *task)
-{
- if (task) {
- return UPROBES_TRAMP_LEN;
- } else {
- return KPROBES_TRAMP_LEN;
- }
-}
-
-static struct kprobe_insn_page *kip_new(struct task_struct *task, int atomic)
+static struct fixed_alloc *create_fixed_alloc(struct slot_manager *sm)
{
void *data;
- struct kprobe_insn_page *kip;
+ struct fixed_alloc *fa;
- kip = kmalloc(sizeof(*kip), GFP_ATOMIC);
- if (kip == NULL) {
+ fa = kmalloc(sizeof(*fa), GFP_ATOMIC);
+ if (fa == NULL) {
return NULL;
}
- data = page_new(task, atomic);
+ data = sm->alloc(sm);
if(data == NULL) {
- kfree(kip);
+ kfree(fa);
return NULL;
}
- chunk_init(&kip->chunk, data, PAGE_SIZE/sizeof(unsigned long), slot_size(task));
- kip->task = task;
+ chunk_init(&fa->chunk, data, PAGE_SIZE/sizeof(unsigned long), sm->slot_size);
- return kip;
+ return fa;
}
-static void kip_free(struct kprobe_insn_page * kip)
+static void free_fixed_alloc(struct slot_manager *sm, struct fixed_alloc *fa)
{
- chunk_uninit(&kip->chunk);
- page_free(kip->chunk.data, kip->task);
- kfree(kip);
+ chunk_uninit(&fa->chunk);
+ sm->free(sm, fa->chunk.data);
+ kfree(fa);
}
-/**
- * get_us_insn_slot() - Find a slot on an executable page for an instruction.
- * We allocate an executable page if there's no room on existing ones.
- */
-kprobe_opcode_t *get_insn_slot(struct task_struct *task, struct hlist_head *page_list, int atomic)
+
+void *alloc_insn_slot(struct slot_manager *sm)
{
- kprobe_opcode_t * free_slot;
- struct kprobe_insn_page *kip;
+ void *free_slot;
+ struct fixed_alloc *fa;
struct hlist_node *pos;
- hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
- if (!task || (kip->task->tgid == task->tgid)) {
- free_slot = chunk_allocate(&kip->chunk, slot_size(task));
- if (free_slot == NULL) {
- break;
- }
-
+ hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+ free_slot = chunk_allocate(&fa->chunk, sm->slot_size);
+ if (free_slot)
return free_slot;
- }
}
- kip = kip_new(task, atomic);
- if(kip == NULL)
+ fa = create_fixed_alloc(sm);
+ if(fa == NULL)
return NULL;
- INIT_HLIST_NODE (&kip->hlist);
- hlist_add_head_rcu(&kip->hlist, page_list);
+ INIT_HLIST_NODE(&fa->hlist);
+ hlist_add_head_rcu(&fa->hlist, &sm->page_list);
- return chunk_allocate(&kip->chunk, slot_size(task));
+ return chunk_allocate(&fa->chunk, sm->slot_size);
}
-EXPORT_SYMBOL_GPL(get_insn_slot);
+EXPORT_SYMBOL_GPL(alloc_insn_slot);
-void free_insn_slot(struct hlist_head *page_list, struct task_struct *task, kprobe_opcode_t *slot)
+void free_insn_slot(struct slot_manager *sm, void *slot)
{
- struct kprobe_insn_page *kip;
+ struct fixed_alloc *fa;
struct hlist_node *pos;
- hlist_for_each_entry_rcu(kip, pos, page_list, hlist) {
- if (!(!task || (kip->task->tgid == task->tgid)))
- continue;
-
- if (!chunk_check_ptr(&kip->chunk, slot, PAGE_SIZE))
+ hlist_for_each_entry_rcu(fa, pos, &sm->page_list, hlist) {
+ if (!chunk_check_ptr(&fa->chunk, slot, PAGE_SIZE))
continue;
- chunk_deallocate(&kip->chunk, slot, slot_size(task));
+ chunk_deallocate(&fa->chunk, slot, sm->slot_size);
- if (chunk_free(&kip->chunk)) {
- hlist_del_rcu(&kip->hlist);
- kip_free(kip);
+ if (chunk_free(&fa->chunk)) {
+ hlist_del_rcu(&fa->hlist);
+ free_fixed_alloc(sm, fa);
}
return;
-#ifndef _SRC_INSNS_SLOTS_H
-#define _SRC_INSNS_SLOTS_H
+#ifndef _DBI_INSNS_SLOTS_H
+#define _DBI_INSNS_SLOTS_H
/*
* Kernel Probes (KProbes)
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
* Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
* 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
- *
+ * 2012-2013 Vyacheslav Cherkashin <v.cherkashin@samsung.com> new memory allocator for slots
*/
-#include "dbi_kprobes.h"
+#include <linux/types.h>
+struct slot_manager {
+ unsigned long slot_size;
+ void *(*alloc)(struct slot_manager *sm);
+ void (*free)(struct slot_manager *sm, void *ptr);
+ struct hlist_head page_list;
+ void *data;
+};
-kprobe_opcode_t *get_insn_slot(struct task_struct *task, struct hlist_head *page_list, int atomic);
-void free_insn_slot(struct hlist_head *page_list, struct task_struct *task, kprobe_opcode_t *slot);
+void *alloc_insn_slot(struct slot_manager *sm);
+void free_insn_slot(struct slot_manager *sm, void *slot);
-#endif /* _SRC_INSNS_SLOTS_H */
+#endif /* _DBI_INSNS_SLOTS_H */
#include <linux/mm.h>
#include <linux/pagemap.h>
-struct hlist_head kprobe_insn_pages;
+struct slot_manager sm;
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
static DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
atomic_t kprobe_count;
EXPORT_SYMBOL_GPL(kprobe_count);
+static void *sm_alloc(struct slot_manager *sm)
+{
+ return kmalloc(PAGE_SIZE, GFP_ATOMIC);
+}
+
+static void sm_free(struct slot_manager *sm, void *ptr)
+{
+ kfree(ptr);
+}
+
+static void init_sm()
+{
+ sm.slot_size = KPROBES_TRAMP_LEN;
+ sm.alloc = sm_alloc;
+ sm.free = sm_free;
+ INIT_HLIST_NODE(&sm.page_list);
+}
+
+static void exit_sm()
+{
+ /* FIXME: free */
+}
+
void kretprobe_assert(struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address)
{
if (!orig_ret_address || (orig_ret_address == trampoline_address)) {
static void remove_kprobe(struct kprobe *p)
{
/* TODO: check boostable for x86 and MIPS */
- free_insn_slot(&kprobe_insn_pages, NULL, p->ainsn.insn);
+ free_insn_slot(&sm, p->ainsn.insn);
}
int dbi_register_kprobe(struct kprobe *p)
goto out;
}
- if ((ret = arch_prepare_kprobe(p, &kprobe_insn_pages)) != 0)
+ if ((ret = arch_prepare_kprobe(p, &sm)) != 0)
goto out;
DBPRINTF ("before out ret = 0x%x\n", ret);
{
int i, err = 0;
+ init_sm();
+
/* FIXME allocate the probe table, currently defined statically */
/* initialize all list heads */
for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
static void __exit exit_kprobes(void)
{
arch_exit_kprobes();
+ exit_sm();
}
module_init(init_kprobes);
}
p->opcode = insn[0];
- p->ainsn.insn_arm = get_insn_slot(task, page_list, atomic);
+ p->ainsn.insn_arm = alloc_insn_slot(up->sm);
if (!p->ainsn.insn_arm) {
printk("Error in %s at %d: kprobe slot allocation error (arm)\n", __FILE__, __LINE__);
return -ENOMEM;
ret = arch_copy_trampoline_arm_uprobe(p, task, 1);
if (ret) {
- free_insn_slot(page_list, task, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
return -EFAULT;
}
- p->ainsn.insn_thumb = get_insn_slot(task, page_list, atomic);
+ p->ainsn.insn_thumb = alloc_insn_slot(up->sm);
if (!p->ainsn.insn_thumb) {
printk("Error in %s at %d: kprobe slot allocation error (thumb)\n", __FILE__, __LINE__);
return -ENOMEM;
ret = arch_copy_trampoline_thumb_uprobe(p, task, 1);
if (ret) {
- free_insn_slot(page_list, task, p->ainsn.insn_arm);
- free_insn_slot(page_list, task, p->ainsn.insn_thumb);
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
return -EFAULT;
}
panic("Failed to write memory %p!\n", p->addr);
}
- free_insn_slot(page_list, task, p->ainsn.insn_arm);
- free_insn_slot(page_list, task, p->ainsn.insn_thumb);
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
return -EFAULT;
}
struct task_struct *task = up->task;
#ifdef CONFIG_ARM
- free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_arm);
- free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn_thumb);
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
#else /* CONFIG_ARM */
- free_insn_slot(&uprobe_insn_pages, task, p->ainsn.insn);
+ free_insn_slot(up->sm, p->ainsn.insn);
#endif /* CONFIG_ARM */
}
struct uprobe {
struct kprobe kp;
struct task_struct *task;
+ struct slot_manager *sm;
};
typedef unsigned long (*uprobe_pre_entry_handler_t)(void *priv_arg, struct pt_regs * regs);