1 #include <linux/kdebug.h>
2 #include <asm/dbi_kprobes.h>
3 #include <swap_uprobes.h>
4 #include <asm/swap_uprobes.h>
5 #include <dbi_insn_slots.h>
12 static DEFINE_PER_CPU(struct uprobe_ctlblk, ucb) = { 0, NULL };
14 int arch_prepare_uprobe(struct uprobe *up, struct hlist_head *page_list)
17 struct kprobe *p = &up->kp;
18 struct task_struct *task = up->task;
19 kprobe_opcode_t insns[UPROBES_TRAMP_LEN];
22 kprobe_opcode_t insn[MAX_INSN_SIZE];
23 struct arch_specific_insn ainsn;
25 if (!read_proc_vm_atomic(task, (unsigned long)p->addr, &insn, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
26 panic("failed to read memory %p!\n", p->addr);
29 ret = arch_check_insn(&ainsn);
32 p->ainsn.insn = alloc_insn_slot(up->sm);
37 p->ainsn.boostable = 0;
39 p->ainsn.boostable = -1;
41 memcpy(&insns[UPROBES_TRAMP_INSN_IDX], insn, MAX_INSN_SIZE*sizeof(kprobe_opcode_t));
42 insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
44 if (!write_proc_vm_atomic(task, (unsigned long)p->ainsn.insn, insns, sizeof(insns))) {
45 free_insn_slot(up->sm, p->ainsn.insn);
46 panic("failed to write memory %p!\n", p->ainsn.insn);
55 int setjmp_upre_handler(struct kprobe *p, struct pt_regs *regs)
57 struct uprobe *up = container_of(p, struct uprobe, kp);
58 struct ujprobe *jp = container_of(up, struct ujprobe, up);
59 kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
60 entry_point_t entry = (entry_point_t)jp->entry;
61 unsigned long addr, args[6];
63 /* FIXME some user space apps crash if we clean interrupt bit */
64 //regs->EREG(flags) &= ~IF_MASK;
65 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
69 /* read first 6 args from stack */
70 if (!read_proc_vm_atomic(current, regs->EREG(sp) + 4, args, sizeof(args)))
71 panic("failed to read user space func arguments %lx!\n", regs->EREG(sp) + 4);
74 p->ss_addr = pre_entry(jp->priv_arg, regs);
77 entry(args[0], args[1], args[2], args[3], args[4], args[5]);
79 arch_ujprobe_return();
84 void arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs)
86 /* Replace the return addr with trampoline addr */
87 unsigned long ra = (unsigned long)(ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
89 if (!read_proc_vm_atomic(current, regs->EREG(sp), &(ri->ret_addr), sizeof(ri->ret_addr)))
90 panic("failed to read user space func ra %lx!\n", regs->EREG(sp));
92 if (!write_proc_vm_atomic(current, regs->EREG(sp), &ra, sizeof(ra)))
93 panic("failed to write user space func ra %lx!\n", regs->EREG(sp));
96 unsigned long arch_get_trampoline_addr(struct kprobe *p, struct pt_regs *regs)
98 return (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
101 void arch_set_orig_ret_addr(unsigned long orig_ret_addr, struct pt_regs *regs)
103 regs->EREG(ip) = orig_ret_addr;
106 static void set_user_jmp_op(void *from, void *to)
112 } __attribute__ ((packed)) jop;
114 jop.raddr = (long)(to) - ((long)(from) + 5);
115 jop.op = RELATIVEJUMP_INSTRUCTION;
117 if (!write_proc_vm_atomic(current, (unsigned long)from, &jop, sizeof(jop)))
118 panic("failed to write jump opcode to user space %p!\n", from);
121 static void resume_execution(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
123 unsigned long *tos, tos_dword = 0;
124 unsigned long copy_eip = (unsigned long)p->ainsn.insn;
125 unsigned long orig_eip = (unsigned long)p->addr;
126 kprobe_opcode_t insns[2];
128 regs->EREG(flags) &= ~TF_MASK;
130 tos = (unsigned long *)&tos_dword;
131 if (!read_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
132 panic("failed to read dword from top of the user space stack %lx!\n", regs->EREG(sp));
134 if (!read_proc_vm_atomic(current, (unsigned long)p->ainsn.insn, insns, 2 * sizeof(kprobe_opcode_t)))
135 panic("failed to read first 2 opcodes of instruction copy from user space %p!\n", p->ainsn.insn);
138 case 0x9c: /* pushfl */
139 *tos &= ~(TF_MASK | IF_MASK);
142 case 0xc2: /* iret/ret/lret */
147 case 0xea: /* jmp absolute -- eip is correct */
148 /* eip is already adjusted, no more changes required */
149 p->ainsn.boostable = 1;
151 case 0xe8: /* call relative - Fix return addr */
152 *tos = orig_eip + (*tos - copy_eip);
154 case 0x9a: /* call absolute -- same as call absolute, indirect */
155 *tos = orig_eip + (*tos - copy_eip);
157 if (!write_proc_vm_atomic(current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
158 panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
162 if ((insns[1] & 0x30) == 0x10) {
164 * call absolute, indirect
165 * Fix return addr; eip is correct.
166 * But this is not boostable
168 *tos = orig_eip + (*tos - copy_eip);
170 if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
171 panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
174 } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
175 ((insns[1] & 0x31) == 0x21)) {
176 /* jmp far, absolute indirect */
177 /* eip is correct. And this is boostable */
178 p->ainsn.boostable = 1;
185 if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
186 panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
188 if (p->ainsn.boostable == 0) {
189 if ((regs->EREG(ip) > copy_eip) && (regs->EREG(ip) - copy_eip) + 5 < MAX_INSN_SIZE) {
191 * These instructions can be executed directly if it
192 * jumps back to correct address.
194 set_user_jmp_op((void *) regs->EREG(ip), (void *)orig_eip + (regs->EREG(ip) - copy_eip));
195 p->ainsn.boostable = 1;
197 p->ainsn.boostable = -1;
201 regs->EREG(ip) = orig_eip + (regs->EREG(ip) - copy_eip);
207 static int uprobe_handler(struct pt_regs *regs)
210 kprobe_opcode_t *addr;
211 struct task_struct *task = current;
212 pid_t tgid = task->tgid;
214 addr = (kprobe_opcode_t *)(regs->EREG(ip) - sizeof(kprobe_opcode_t));
215 p = get_ukprobe(addr, tgid);
218 p = get_ukprobe_by_insn_slot(addr, tgid, regs);
221 printk("no_uprobe\n");
225 trampoline_uprobe_handler(p, regs);
227 if (!p->pre_handler || !p->pre_handler(p, regs))
228 prepare_singlestep(p, regs);
231 __get_cpu_var(ucb).p = p;
232 __get_cpu_var(ucb).flags = (regs->EREG(flags) & (TF_MASK | IF_MASK));
237 static int post_uprobe_handler(struct pt_regs *regs)
239 struct kprobe *p = __get_cpu_var(ucb).p;
240 unsigned long flags = __get_cpu_var(ucb).flags;
242 resume_execution(p, regs, flags);
247 static int uprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
249 struct die_args *args = (struct die_args *)data;
250 int ret = NOTIFY_DONE;
252 if (args->regs && !user_mode_vm(args->regs))
256 #ifdef CONFIG_KPROBES
261 if (uprobe_handler(args->regs))
265 if (post_uprobe_handler(args->regs))
275 static struct notifier_block uprobe_exceptions_nb = {
276 .notifier_call = uprobe_exceptions_notify,
280 int swap_arch_init_uprobes(void)
282 return register_die_notifier(&uprobe_exceptions_nb);
285 void swap_arch_exit_uprobes(void)
287 unregister_die_notifier(&uprobe_exceptions_nb);