static struct hlist_head slot_table[UPROBE_TABLE_SIZE];
struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
-DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
+static DEFINE_MUTEX(urp_mtx); /* Protects uretprobe_inst_table */
static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
#define DEBUG_PRINT_HASH_TABLE 0
return &uretprobe_inst_table[hash_ptr(hash_key, UPROBE_HASH_BITS)];
}
-/* Called with uretprobe_lock held */
+/* Called with urp_mtx held */
static void add_urp_inst(struct uretprobe_instance *ri)
{
/*
hlist_add_head(&ri->uflist, &ri->rp->used_instances);
}
-/* Called with uretprobe_lock held */
+/* Called with urp_mtx held */
static void recycle_urp_inst(struct uretprobe_instance *ri)
{
if (ri->rp) {
}
}
-/* Called with uretprobe_lock held */
+/* Called with urp_mtx held */
static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
{
struct uretprobe_instance *ri;
/**
* @brief Gets free uretprobe instanse for the specified uretprobe without
- * allocation. Called with uretprobe_lock held.
+ * allocation. Called with urp_mtx held.
*
* @param rp Pointer to the uretprobe.
* @return Pointer to the uretprobe_instance on success,\n
return NULL;
}
-/* Called with uretprobe_lock held */
+/* Called with urp_mtx held */
static void free_urp_inst(struct uretprobe *rp)
{
struct uretprobe_instance *ri;
return 0;
}
-/* Called with uretprobe_lock held */
+/* Called with urp_mtx held */
static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
{
struct uretprobe_instance *ri;
{
struct uretprobe_instance *ri = NULL;
struct uprobe *up;
- struct hlist_head *head;
- unsigned long flags, tramp_addr, orig_ret_addr = 0;
+ struct hlist_head *head = uretprobe_inst_table_head(current->mm);
+ unsigned long tramp_addr = arch_get_trampoline_addr(p, regs);
+ unsigned long orig_ret_addr = 0;
struct hlist_node *tmp;
DECLARE_NODE_PTR_FOR_HLIST(node);
- tramp_addr = arch_get_trampoline_addr(p, regs);
- spin_lock_irqsave(&uretprobe_lock, flags);
-
- head = uretprobe_inst_table_head(current->mm);
-
/*
* It is possible to have multiple instances associated with a given
* task either because an multiple functions in the call path
* real return address, and all the rest will point to
* uretprobe_trampoline
*/
+ mutex_lock(&urp_mtx);
swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
if (ri->task != current) {
/* another task is sharing our hash bucket */
break;
}
}
+ mutex_unlock(&urp_mtx);
- spin_unlock_irqrestore(&uretprobe_lock, flags);
/* orig_ret_addr is NULL when there is no need to restore anything
* (all the magic is performed inside handler) */
if (likely(orig_ret_addr))
int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
#endif
struct uretprobe_instance *ri;
- unsigned long flags;
int ret = 0;
#ifdef CONFIG_ARM
/* TODO: consider to only swap the
* RA after the last pre_handler fired */
- spin_lock_irqsave(&uretprobe_lock, flags);
/* TODO: test - remove retprobe after func entry but before its exit */
+ mutex_lock(&urp_mtx);
ri = get_free_urp_inst(rp);
if (ri != NULL) {
int err;
#ifdef CONFIG_ARM
ri->preload.use = false;
#endif
-
if (rp->entry_handler)
ret = rp->entry_handler(ri, regs);
- err = arch_prepare_uretprobe(ri, regs);
add_urp_inst(ri);
+
+ err = arch_prepare_uretprobe(ri, regs);
if (err) {
recycle_urp_inst(ri);
++rp->nmissed;
} else {
++rp->nmissed;
}
-
- spin_unlock_irqrestore(&uretprobe_lock, flags);
+ mutex_unlock(&urp_mtx);
return ret;
}
*/
void __swap_unregister_uretprobe(struct uretprobe *rp, int disarm)
{
- unsigned long flags;
struct uretprobe_instance *ri;
__swap_unregister_uprobe(&rp->up, disarm);
- spin_lock_irqsave(&uretprobe_lock, flags);
+ mutex_lock(&urp_mtx);
while ((ri = get_used_urp_inst(rp)) != NULL) {
- bool is_current = ri->task == current;
-
- if (is_current)
- spin_unlock_irqrestore(&uretprobe_lock, flags);
-
/* FIXME: arch_disarm_urp_inst() for no current context */
if (arch_disarm_urp_inst(ri, ri->task, 0) != 0)
printk(KERN_INFO "%s (%d/%d): "
ri->task->comm, ri->task->tgid, ri->task->pid,
(unsigned long)rp->up.addr);
- if (is_current)
- spin_lock_irqsave(&uretprobe_lock, flags);
-
recycle_urp_inst(ri);
}
ri->rp = NULL;
hlist_del(&ri->uflist);
}
- spin_unlock_irqrestore(&uretprobe_lock, flags);
+ mutex_unlock(&urp_mtx);
free_urp_inst(rp);
}
arch_disarm_urp_inst(&ri, task, tramp);
}
+void swap_uretprobe_free_task(struct task_struct *armed,
+ struct task_struct *will_disarm, bool recycle)
+{
+ struct uretprobe_instance *ri;
+ struct hlist_head *hhead = uretprobe_inst_table_head(armed->mm);
+ struct hlist_node *n;
+ DECLARE_NODE_PTR_FOR_HLIST(node);
+
+ mutex_lock(&urp_mtx);
+ swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
+ if (armed != ri->task)
+ continue;
+
+ if (will_disarm)
+ arch_disarm_urp_inst(ri, will_disarm, 0);
+
+ if (recycle)
+ recycle_urp_inst(ri);
+ }
+ mutex_unlock(&urp_mtx);
+}
+EXPORT_SYMBOL_GPL(swap_uretprobe_free_task);
+
void urinst_info_get_current_hlist(struct hlist_head *head, bool recycle)
{
- unsigned long flags;
struct task_struct *task = current;
struct uretprobe_instance *ri;
struct hlist_head *hhead;
struct hlist_node *last = NULL;
DECLARE_NODE_PTR_FOR_HLIST(node);
- spin_lock_irqsave(&uretprobe_lock, flags);
+ mutex_lock(&urp_mtx);
hhead = uretprobe_inst_table_head(task->mm);
swap_hlist_for_each_entry_safe(ri, node, n, hhead, hlist) {
if (task == ri->task) {
recycle_urp_inst(ri);
}
}
- spin_unlock_irqrestore(&uretprobe_lock, flags);
+ mutex_unlock(&urp_mtx);
}
EXPORT_SYMBOL_GPL(urinst_info_get_current_hlist);
* copy_process() *
******************************************************************************
*/
-static void func_uinst_creare(struct sspt_ip *ip, void *data)
+static void disarm_ip(struct sspt_ip *ip, void *data)
{
- struct hlist_head *head = (struct hlist_head *)data;
+ struct task_struct *child = (struct task_struct *)data;
struct uprobe *up;
up = probe_info_get_uprobe(ip->desc->type, ip);
- if (up) {
- struct uinst_info *uinst;
- unsigned long vaddr = (unsigned long)up->addr;
-
- uinst = uinst_info_create(vaddr, up->opcode);
- if (uinst)
- hlist_add_head(&uinst->hlist, head);
- }
-}
-
-static void disarm_for_task(struct task_struct *child, struct hlist_head *head)
-{
- struct uinst_info *uinst;
- struct hlist_node *tmp;
- DECLARE_NODE_PTR_FOR_HLIST(node);
-
- swap_hlist_for_each_entry_safe(uinst, node, tmp, head, hlist) {
- uinst_info_disarm(uinst, child);
- hlist_del(&uinst->hlist);
- uinst_info_destroy(uinst);
- }
+ if (up)
+ disarm_uprobe(up, child);
}
-struct clean_data {
- struct task_struct *task;
-
- struct hlist_head head;
- struct hlist_head rhead;
-};
-
static atomic_t rm_uprobes_child_cnt = ATOMIC_INIT(0);
static unsigned long cb_clean_child(void *data)
{
- struct clean_data *cdata = (struct clean_data *)data;
- struct task_struct *child = cdata->task;
+ struct task_struct *parent = current;
+ struct sspt_proc *proc;
- /* disarm up for child */
- disarm_for_task(child, &cdata->head);
+ proc = sspt_proc_by_task(parent);
+ if (proc) {
+ struct task_struct *child = *(struct task_struct **)data;
+
+ /* disarm up for child */
+ sspt_proc_on_each_ip(proc, disarm_ip, (void *)child);
- /* disarm urp for child */
- urinst_info_put_current_hlist(&cdata->rhead, child);
+ /* disarm urp for child */
+ swap_uretprobe_free_task(parent, child, false);
+ }
atomic_dec(&rm_uprobes_child_cnt);
return 0;
}
-
static void rm_uprobes_child(struct kretprobe_instance *ri,
struct pt_regs *regs, struct task_struct *child)
{
- struct sspt_proc *proc;
- struct clean_data cdata = {
- .task = child,
- .head = HLIST_HEAD_INIT,
- .rhead = HLIST_HEAD_INIT
- };
+ int ret;
- sspt_proc_write_lock();
- proc = sspt_proc_by_task(current);
- if (proc) {
- sspt_proc_on_each_ip(proc, func_uinst_creare, (void *)&cdata.head);
- urinst_info_get_current_hlist(&cdata.rhead, false);
- }
- sspt_proc_write_unlock();
+ if (!sspt_proc_by_task(current))
+ return;
- if (proc) {
- int ret;
-
- /* set jumper */
- ret = set_jump_cb((unsigned long)ri->ret_addr, regs,
- cb_clean_child, &cdata, sizeof(cdata));
- if (ret == 0) {
- atomic_inc(&rm_uprobes_child_cnt);
- ri->ret_addr = (unsigned long *)get_jump_addr();
- }
+ /* set jumper */
+ ret = set_jump_cb((unsigned long)ri->ret_addr, regs,
+ cb_clean_child, &child, sizeof(child));
+ if (ret == 0) {
+ atomic_inc(&rm_uprobes_child_cnt);
+ ri->ret_addr = (unsigned long *)get_jump_addr();
+ } else {
+ WARN_ON(1);
}
}
down_write(&mm->mmap_sem);
if (task != task->group_leader) {
struct sspt_proc *proc;
- struct hlist_head head = HLIST_HEAD_INIT;
if (task != current) {
pr_err("call mm_release in isn't current context\n");
/* if the thread is killed we need to discard pending
* uretprobe instances which have not triggered yet */
- sspt_proc_write_lock();
proc = sspt_proc_by_task(task);
- if (proc) {
- urinst_info_get_current_hlist(&head, true);
- }
- sspt_proc_write_unlock();
-
- if (proc) {
- /* disarm urp for task */
- urinst_info_put_current_hlist(&head, task);
- }
+ if (proc)
+ swap_uretprobe_free_task(task, task, true);
} else {
call_mm_release(task);
}