b03ef062b8c517131a1d730896711bda2ce6a318
[kernel/swap-modules.git] / uprobe / arch / asm-x86 / swap_uprobes.c
1 /*
2  *  Dynamic Binary Instrumentation Module based on KProbes
3  *  modules/uprobe/arch/asm-x86/swap_uprobes.c
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program; if not, write to the Free Software
17  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18  *
19  * Copyright (C) Samsung Electronics, 2006-2010
20  *
21  * 2008-2009    Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
22  *              Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
23  * 2010         Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
24  *
25  */
26
27 #include <linux/kdebug.h>
28 #include <asm/dbi_kprobes.h>
29 #include <swap_uprobes.h>
30 #include <asm/swap_uprobes.h>
31 #include <dbi_insn_slots.h>
32
33 struct uprobe_ctlblk {
34         unsigned long flags;
35         struct kprobe *p;
36 };
37
38 static DEFINE_PER_CPU(struct uprobe_ctlblk, ucb) = { 0, NULL };
39
40 static struct kprobe *get_current_probe(void)
41 {
42         return __get_cpu_var(ucb).p;
43 }
44
45 static void set_current_probe(struct kprobe *p)
46 {
47         __get_cpu_var(ucb).p = p;
48 }
49
50 static void reset_current_probe(void)
51 {
52         set_current_probe(NULL);
53 }
54
55 static void save_current_flags(struct pt_regs *regs)
56 {
57         __get_cpu_var(ucb).flags = regs->EREG(flags);
58 }
59
60 static void restore_current_flags(struct pt_regs *regs)
61 {
62         regs->EREG(flags) &= ~IF_MASK;
63         regs->EREG(flags) |= __get_cpu_var(ucb).flags & IF_MASK;
64 }
65
66 int arch_prepare_uprobe(struct uprobe *up, struct hlist_head *page_list)
67 {
68         int ret = 0;
69         struct kprobe *p = &up->kp;
70         struct task_struct *task = up->task;
71         kprobe_opcode_t insns[UPROBES_TRAMP_LEN];
72
73         if (!ret) {
74                 kprobe_opcode_t insn[MAX_INSN_SIZE];
75                 struct arch_specific_insn ainsn;
76
77                 if (!read_proc_vm_atomic(task, (unsigned long)p->addr, &insn, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
78                         panic("failed to read memory %p!\n", p->addr);
79
80                 ainsn.insn = insn;
81                 ret = arch_check_insn(&ainsn);
82                 if (!ret) {
83                         p->opcode = insn[0];
84                         p->ainsn.insn = alloc_insn_slot(up->sm);
85                         if (!p->ainsn.insn)
86                                 return -ENOMEM;
87
88                         if (can_boost(insn))
89                                 p->ainsn.boostable = 0;
90                         else
91                                 p->ainsn.boostable = -1;
92
93                         memcpy(&insns[UPROBES_TRAMP_INSN_IDX], insn, MAX_INSN_SIZE*sizeof(kprobe_opcode_t));
94                         insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
95
96                         if (!write_proc_vm_atomic(task, (unsigned long)p->ainsn.insn, insns, sizeof(insns))) {
97                                 free_insn_slot(up->sm, p->ainsn.insn);
98                                 panic("failed to write memory %p!\n", p->ainsn.insn);
99                                 return -EINVAL;
100                         }
101                 }
102         }
103
104         return ret;
105 }
106
107 int setjmp_upre_handler(struct kprobe *p, struct pt_regs *regs)
108 {
109         struct uprobe *up = container_of(p, struct uprobe, kp);
110         struct ujprobe *jp = container_of(up, struct ujprobe, up);
111         kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
112         entry_point_t entry = (entry_point_t)jp->entry;
113         unsigned long addr, args[6];
114
115         /* FIXME some user space apps crash if we clean interrupt bit */
116         //regs->EREG(flags) &= ~IF_MASK;
117 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
118         trace_hardirqs_off();
119 #endif
120
121         /* read first 6 args from stack */
122         if (!read_proc_vm_atomic(current, regs->EREG(sp) + 4, args, sizeof(args)))
123                 panic("failed to read user space func arguments %lx!\n", regs->EREG(sp) + 4);
124
125         if (pre_entry)
126                 p->ss_addr = pre_entry(jp->priv_arg, regs);
127
128         if (entry)
129                 entry(args[0], args[1], args[2], args[3], args[4], args[5]);
130         else
131                 arch_ujprobe_return();
132
133         return 0;
134 }
135
136 void arch_prepare_uretprobe(struct uretprobe_instance *ri, struct pt_regs *regs)
137 {
138         /* Replace the return addr with trampoline addr */
139         unsigned long ra = (unsigned long)(ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
140
141         if (!read_proc_vm_atomic(current, regs->EREG(sp), &(ri->ret_addr), sizeof(ri->ret_addr)))
142                 panic("failed to read user space func ra %lx!\n", regs->EREG(sp));
143
144         if (!write_proc_vm_atomic(current, regs->EREG(sp), &ra, sizeof(ra)))
145                 panic("failed to write user space func ra %lx!\n", regs->EREG(sp));
146 }
147
148 unsigned long arch_get_trampoline_addr(struct kprobe *p, struct pt_regs *regs)
149 {
150         return (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
151 }
152
153 void arch_set_orig_ret_addr(unsigned long orig_ret_addr, struct pt_regs *regs)
154 {
155         regs->EREG(ip) = orig_ret_addr;
156 }
157
158 static void set_user_jmp_op(void *from, void *to)
159 {
160         struct __arch_jmp_op
161         {
162                 char op;
163                 long raddr;
164         } __attribute__ ((packed)) jop;
165
166         jop.raddr = (long)(to) - ((long)(from) + 5);
167         jop.op = RELATIVEJUMP_INSTRUCTION;
168
169         if (!write_proc_vm_atomic(current, (unsigned long)from, &jop, sizeof(jop)))
170                 panic("failed to write jump opcode to user space %p!\n", from);
171 }
172
173 static void resume_execution(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
174 {
175         unsigned long *tos, tos_dword = 0;
176         unsigned long copy_eip = (unsigned long)p->ainsn.insn;
177         unsigned long orig_eip = (unsigned long)p->addr;
178         kprobe_opcode_t insns[2];
179
180         regs->EREG(flags) &= ~TF_MASK;
181
182         tos = (unsigned long *)&tos_dword;
183         if (!read_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
184                 panic("failed to read dword from top of the user space stack %lx!\n", regs->EREG(sp));
185
186         if (!read_proc_vm_atomic(current, (unsigned long)p->ainsn.insn, insns, 2 * sizeof(kprobe_opcode_t)))
187                 panic("failed to read first 2 opcodes of instruction copy from user space %p!\n", p->ainsn.insn);
188
189         switch (insns[0]) {
190                 case 0x9c:              /* pushfl */
191                         *tos &= ~(TF_MASK | IF_MASK);
192                         *tos |= flags & (TF_MASK | IF_MASK);
193                         break;
194                 case 0xc2:              /* iret/ret/lret */
195                 case 0xc3:
196                 case 0xca:
197                 case 0xcb:
198                 case 0xcf:
199                 case 0xea:              /* jmp absolute -- eip is correct */
200                         /* eip is already adjusted, no more changes required */
201                         p->ainsn.boostable = 1;
202                         goto no_change;
203                 case 0xe8:              /* call relative - Fix return addr */
204                         *tos = orig_eip + (*tos - copy_eip);
205                         break;
206                 case 0x9a:              /* call absolute -- same as call absolute, indirect */
207                         *tos = orig_eip + (*tos - copy_eip);
208
209                         if (!write_proc_vm_atomic(current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
210                                 panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
211
212                         goto no_change;
213                 case 0xff:
214                         if ((insns[1] & 0x30) == 0x10) {
215                                 /*
216                                  * call absolute, indirect
217                                  * Fix return addr; eip is correct.
218                                  * But this is not boostable
219                                  */
220                                 *tos = orig_eip + (*tos - copy_eip);
221
222                                 if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
223                                         panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
224
225                                 goto no_change;
226                         } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
227                                    ((insns[1] & 0x31) == 0x21)) {
228                                 /* jmp far, absolute indirect */
229                                 /* eip is correct. And this is boostable */
230                                 p->ainsn.boostable = 1;
231                                 goto no_change;
232                         }
233                 default:
234                         break;
235         }
236
237         if (!write_proc_vm_atomic(current, regs->EREG(sp), &tos_dword, sizeof(tos_dword)))
238                 panic("failed to write dword to top of the user space stack %lx!\n", regs->EREG(sp));
239
240         if (p->ainsn.boostable == 0) {
241                 if ((regs->EREG(ip) > copy_eip) && (regs->EREG(ip) - copy_eip) + 5 < MAX_INSN_SIZE) {
242                         /*
243                          * These instructions can be executed directly if it
244                          * jumps back to correct address.
245                          */
246                         set_user_jmp_op((void *) regs->EREG(ip), (void *)orig_eip + (regs->EREG(ip) - copy_eip));
247                         p->ainsn.boostable = 1;
248                 } else {
249                         p->ainsn.boostable = -1;
250                 }
251         }
252
253         regs->EREG(ip) = orig_eip + (regs->EREG(ip) - copy_eip);
254
255 no_change:
256         return;
257 }
258
259 static int uprobe_handler(struct pt_regs *regs)
260 {
261         struct kprobe *p;
262         kprobe_opcode_t *addr;
263         struct task_struct *task = current;
264         pid_t tgid = task->tgid;
265
266         save_current_flags(regs);
267
268         addr = (kprobe_opcode_t *)(regs->EREG(ip) - sizeof(kprobe_opcode_t));
269         p = get_ukprobe(addr, tgid);
270
271         if (p == NULL) {
272                 p = get_ukprobe_by_insn_slot(addr, tgid, regs);
273
274                 if (p == NULL) {
275                         printk("no_uprobe\n");
276                         return 0;
277                 }
278
279                 trampoline_uprobe_handler(p, regs);
280                 return 1;
281         } else {
282                 if (!p->pre_handler || !p->pre_handler(p, regs)) {
283                         if (p->ainsn.boostable == 1 && !p->post_handler) {
284                                 regs->EREG(ip) = (unsigned long)p->ainsn.insn;
285                                 return 1;
286                         }
287
288                         prepare_singlestep(p, regs);
289                 }
290         }
291
292         set_current_probe(p);
293
294         return 1;
295 }
296
297 static int post_uprobe_handler(struct pt_regs *regs)
298 {
299         struct kprobe *p = get_current_probe();
300         unsigned long flags = __get_cpu_var(ucb).flags;
301
302         if (p == NULL)
303                 return 0;
304
305         resume_execution(p, regs, flags);
306         restore_current_flags(regs);
307
308         reset_current_probe();
309
310         return 1;
311 }
312
313 static int uprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data)
314 {
315         struct die_args *args = (struct die_args *)data;
316         int ret = NOTIFY_DONE;
317
318         if (args->regs && !user_mode_vm(args->regs))
319                 return ret;
320
321         switch (val) {
322 #ifdef CONFIG_KPROBES
323                 case DIE_INT3:
324 #else
325                 case DIE_TRAP:
326 #endif
327                         if (uprobe_handler(args->regs))
328                                 ret = NOTIFY_STOP;
329                         break;
330                 case DIE_DEBUG:
331                         if (post_uprobe_handler(args->regs))
332                                 ret = NOTIFY_STOP;
333                         break;
334                 default:
335                         break;
336         }
337
338         return ret;
339 }
340
341 static struct notifier_block uprobe_exceptions_nb = {
342         .notifier_call = uprobe_exceptions_notify,
343         .priority = INT_MAX
344 };
345
346 int swap_arch_init_uprobes(void)
347 {
348         return register_die_notifier(&uprobe_exceptions_nb);
349 }
350
351 void swap_arch_exit_uprobes(void)
352 {
353         unregister_die_notifier(&uprobe_exceptions_nb);
354 }
355