return 0; // ok - life is life
}
-void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp)
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_node *node, *tmp;
- struct hlist_head *head;
- unsigned long flags;
- int found = 0;
-
- spin_lock_irqsave (&kretprobe_lock, flags);
- head = kretprobe_inst_table_head (p);
- hlist_for_each_entry_safe (ri, node, tmp, head, hlist){
- if ((ri->rp == rp) && (p == ri->task)){
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore (&kretprobe_lock, flags);
-
-#ifndef task_thread_info
-#define task_thread_info(task) (task)->thread_info
-#endif // task_thread_info
-
- if (found){
- // update PC
- if(thread_saved_pc(p) != (unsigned long)&kretprobe_trampoline){
- ri->ret_addr = (kprobe_opcode_t *)thread_saved_pc(p);
- task_thread_info(p)->cpu_context.pc = (unsigned long) &kretprobe_trampoline;
- }
- return;
- }
-
- spin_lock_irqsave (&kretprobe_lock, flags);
- if ((ri = get_free_rp_inst(rp)) != NULL)
- {
- ri->rp = rp;
- ri->rp2 = NULL;
- ri->task = p;
- ri->ret_addr = (kprobe_opcode_t *)thread_saved_pc(p);
- task_thread_info(p)->cpu_context.pc = (unsigned long) &kretprobe_trampoline;
- add_rp_inst (ri);
- // printk("change2 saved pc %p->%p for %d/%d/%p\n", ri->ret_addr, &kretprobe_trampoline, p->tgid, p->pid, p);
- }
- else{
- printk("no ri for %d\n", p->pid);
- BUG();
- }
- spin_unlock_irqrestore (&kretprobe_lock, flags);
-}
-
int setjmp_pre_handler (struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of (p, struct jprobe, kp);
//call handler for all kernel probes and user space ones which belong to current tgid
if (!p->tgid || (p->tgid == current->tgid))
{
- if(!p->tgid && ((unsigned int)p->addr == sched_addr) && sched_rp){
- struct task_struct *p, *g;
- rcu_read_lock();
- //swapper task
- if(current != &init_task)
- patch_suspended_task_ret_addr(&init_task, sched_rp);
- // other tasks
- do_each_thread(g, p){
- if(p == current)
- continue;
- patch_suspended_task_ret_addr(p, sched_rp);
- } while_each_thread(g, p);
- rcu_read_unlock();
+ if(!p->tgid && ((unsigned int)p->addr == sched_addr) && sched_rp) {
+ patch_suspended_all_task_ret_addr(sched_rp);
}
if (pre_entry)
p->ss_addr = (void *)pre_entry (jp->priv_arg, regs);
}
orig_ret_address = (unsigned long) ri->ret_addr;
- recycle_rp_inst (ri, &empty_rp);
+ recycle_rp_inst (ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
#ifdef CONFIG_CPU_S3C2443
#define BREAKPOINT_INSTRUCTION 0xe1200070
-#else
+#else
#define BREAKPOINT_INSTRUCTION 0xffffdeff
#endif /* CONFIG_CPU_S3C2443 */
#ifdef CONFIG_CPU_S3C2443
#define UNDEF_INSTRUCTION 0xe1200071
-#else
+#else
#define UNDEF_INSTRUCTION 0xfffffffe
#endif /* CONFIG_CPU_S3C2443 */
# define KPROBES_TRAMP_SS_BREAK_IDX UPROBES_TRAMP_SS_BREAK_IDX
# define KPROBES_TRAMP_RET_BREAK_IDX UPROBES_TRAMP_RET_BREAK_IDX
+static inline unsigned long arch_get_task_pc(struct task_struct *p)
+{
+ return task_thread_info(p)->cpu_context.pc;
+}
+
+static inline void arch_set_task_pc(struct task_struct *p, unsigned long val)
+{
+ task_thread_info(p)->cpu_context.pc = val;
+}
+
static inline unsigned long dbi_get_stack_ptr(struct pt_regs *regs)
{
return regs->ARM_sp;
//void gen_insn_execbuf_holder (void);
//void pc_dep_insn_execbuf_holder (void);
-void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp);
-
#endif /* _DBI_ASM_ARM_KPROBES_H */
}
orig_ret_address = (unsigned long) ri->ret_addr;
- recycle_rp_inst (ri, &empty_rp);
+ recycle_rp_inst (ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
return ret;
}
-void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp)
-{
- struct kretprobe_instance *ri = NULL;
- struct hlist_node *node, *tmp;
- struct hlist_head *head;
- unsigned long flags;
- int found = 0;
-
- spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(p);
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if ((ri->rp == rp) && (p == ri->task)) {
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&kretprobe_lock, flags);
-
- if (found) {
- /* update PC */
- if (p->thread.ip != &kretprobe_trampoline) {
- ri->ret_addr = (kprobe_opcode_t *)p->thread.ip;
- p->thread.ip = &kretprobe_trampoline;
- }
- return;
- }
-
- spin_lock_irqsave(&kretprobe_lock, flags);
- if ((ri = get_free_rp_inst(rp)) != NULL) {
- ri->rp = rp;
- ri->rp2 = NULL;
- ri->task = p;
- ri->ret_addr = (kprobe_opcode_t *)p->thread.ip;
- p->thread.ip = &kretprobe_trampoline;
- add_rp_inst(ri);
- } else {
- printk("no ri for %d\n", p->pid);
- BUG();
- }
- spin_unlock_irqrestore(&kretprobe_lock, flags);
-}
-
int setjmp_pre_handler (struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of (p, struct jprobe, kp);
if (!p->tgid || (p->tgid == current->tgid)) {
/* handle __switch_to probe */
if(!p->tgid && (p->addr == sched_addr) && sched_rp) {
- struct task_struct *p, *g;
- rcu_read_lock();
- //swapper task
- if(current != &init_task)
- patch_suspended_task_ret_addr(&init_task, sched_rp);
- // other tasks
- do_each_thread(g, p){
- if(current != p)
- patch_suspended_task_ret_addr(p, sched_rp);
- } while_each_thread(g, p);
- /* workaround for do_exit probe on x86 targets */
- if ((current->flags & PF_EXITING) ||
- (current->flags & PF_EXITPIDONE))
- patch_suspended_task_ret_addr(current, sched_rp);
- rcu_read_unlock();
+ patch_suspended_all_task_ret_addr(sched_rp);
}
}
}
orig_ret_address = (unsigned long) ri->ret_addr;
- recycle_rp_inst (ri, &empty_rp);
+ recycle_rp_inst (ri);
if (orig_ret_address != trampoline_address)
/*
* This is the real return address. Any other
*
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
* Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
- * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
*
*/
#define XREG(rg) x##rg
#define ORIG_EAX_REG orig_eax
-#else
+#else
#define EREG(rg) rg
#define XREG(rg) rg
#define KPROBES_TRAMP_LEN MAX_INSN_SIZE
#define KPROBES_TRAMP_INSN_IDX 0
+static inline unsigned long arch_get_task_pc(struct task_struct *p)
+{
+ return p->thread.ip;
+}
+
+static inline void arch_set_task_pc(struct task_struct *p, unsigned long val)
+{
+ p->thread.ip = val;
+}
+
static inline unsigned long dbi_get_stack_ptr(struct pt_regs *regs)
{
return regs->EREG(sp);
* 2006-2007 Ekaterina Gorelkina <e.gorelkina@samsung.com>: initial implementation for ARM and MIPS
* 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
* Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
- * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
*
*/
extern void arch_disarm_uretprobe (struct kretprobe *p, struct task_struct *tsk);
extern int arch_init_kprobes (void);
extern void dbi_arch_exit_kprobes (void);
+extern void patch_suspended_all_task_ret_addr(struct kretprobe *rp);
void dbi_arch_uprobe_return (void);
int arch_init_module_dependencies(void);
int asm_init_module_dependencies(void);
-
-
#endif /* _DBI_ARCH_KPROBES_H */
}
/* Called with kretprobe_lock held */
-void recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head)
+void recycle_rp_inst (struct kretprobe_instance *ri)
{
if (ri->rp)
{
return ret;
}
+static void unpatch_suspended_all_task_ret_addr(struct kretprobe *rp);
+
void dbi_unregister_kretprobe (struct kretprobe *rp)
{
unsigned long flags;
dbi_unregister_kprobe (&rp->kp, 0);
- if((unsigned int)rp->kp.addr == sched_addr)
+ if((unsigned int)rp->kp.addr == sched_addr) {
+ unpatch_suspended_all_task_ret_addr(rp);
sched_rp = NULL;
+ }
/* No race here */
spin_lock_irqsave (&kretprobe_lock, flags);
}
+static void inline set_task_trampoline(struct task_struct *p, struct kretprobe_instance *ri, unsigned long tramp_addr)
+{
+ ri->ret_addr = arch_get_task_pc(p);
+ arch_set_task_pc(p, tramp_addr);
+}
+
+static void inline rm_task_trampoline(struct task_struct *p, struct kretprobe_instance *ri)
+{
+ arch_set_task_pc(p, ri->ret_addr);
+}
+
+static struct kretprobe_instance* find_ri_pc_mod(struct task_struct *p, struct kretprobe *rp)
+{
+ struct kretprobe_instance *ri;
+ struct hlist_node *node, *tmp;
+ struct hlist_head *head;
+ unsigned long flags;
+
+ spin_lock_irqsave (&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head (p);
+ hlist_for_each_entry_safe (ri, node, tmp, head, hlist) {
+ if ((ri->rp == rp) && (p == ri->task)) {
+ spin_unlock_irqrestore (&kretprobe_lock, flags);
+ return ri;
+ }
+ }
+ spin_unlock_irqrestore (&kretprobe_lock, flags);
+
+ return NULL;
+}
+
+static void add_ri_pc_mod(struct task_struct *p, struct kretprobe *rp, unsigned long tramp_addr)
+{
+ struct kretprobe_instance *ri;
+ unsigned long flags;
+
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->rp2 = NULL;
+ ri->task = p;
+ // set PC
+ set_task_trampoline(p, ri, tramp_addr);
+ add_rp_inst(ri);
+ } else {
+ printk("no ri for %d\n", p->pid);
+ BUG();
+ }
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+}
+
+static void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp)
+{
+ unsigned long flags;
+ struct kretprobe_instance *ri = find_ri_pc_mod(p, rp);
+
+ if(ri) {
+ // update PC
+ if( arch_get_task_pc(p) != (unsigned long) &kretprobe_trampoline)
+ set_task_trampoline(p, ri, (unsigned long) &kretprobe_trampoline);
+ } else {
+ add_ri_pc_mod(p, rp, (unsigned long) &kretprobe_trampoline);
+ }
+}
+
+static void unpatch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp)
+{
+ struct kretprobe_instance *ri;
+
+ if( arch_get_task_pc(p) == (unsigned long)&kretprobe_trampoline )
+ {
+ ri = find_ri_pc_mod(p, rp);
+ if(ri) {
+ rm_task_trampoline(p, ri);
+ recycle_rp_inst(ri);
+ }
+ }
+}
+
+void patch_suspended_all_task_ret_addr(struct kretprobe *rp)
+{
+ struct task_struct *p, *g;
+
+ rcu_read_lock();
+ // swapper task
+ if(current != &init_task)
+ patch_suspended_task_ret_addr(&init_task, rp);
+
+ // other tasks
+ do_each_thread(g, p) {
+ if(p == current)
+ continue;
+ patch_suspended_task_ret_addr(p, rp);
+ } while_each_thread(g, p);
+ rcu_read_unlock();
+}
+
+static void unpatch_suspended_all_task_ret_addr(struct kretprobe *rp)
+{
+ struct task_struct *p, *g;
+
+ rcu_read_lock();
+ // swapper task
+ unpatch_suspended_task_ret_addr(&init_task, rp);
+
+ // other tasks
+ do_each_thread(g, p) {
+ unpatch_suspended_task_ret_addr(p, rp);
+ } while_each_thread(g, p);
+ rcu_read_unlock();
+}
+
int __init init_kprobes (void)
{
int i, err = 0;
struct kretprobe_instance *get_free_rp_inst_no_alloc (struct kretprobe *rp);
void free_rp_inst (struct kretprobe *rp);
void add_rp_inst (struct kretprobe_instance *ri);
-void recycle_rp_inst (struct kretprobe_instance *ri, struct hlist_head *head);
+void recycle_rp_inst (struct kretprobe_instance *ri);
//void kretprobe_trampoline_holder (void);
int trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs);