2 * arch/asm-x86/swap_kprobes.c
3 * @author Alexey Gerenkov <a.gerenkov@samsung.com> User-Space Probes initial implementation;
4 * Support x86/ARM/MIPS for both user and kernel spaces.
5 * @author Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
6 * @author Stanislav Andreev <s.andreev@samsung.com>: added time debug profiling support; BUG() message fix
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 * Copyright (C) IBM Corporation, 2002, 2004
28 * @section DESCRIPTION
30 * SWAP krpobes arch-dependend part for x86.
33 #include<linux/module.h>
34 #include <linux/kdebug.h>
36 #include "swap_kprobes.h"
37 #include <kprobe/swap_kprobes.h>
39 #include <kprobe/swap_kdebug.h>
40 #include <kprobe/swap_slots.h>
41 #include <kprobe/swap_kprobes_deps.h>
42 #define SUPRESS_BUG_MESSAGES /**< Debug-off definition. */
45 static int (*swap_fixup_exception)(struct pt_regs *regs);
46 static void *(*swap_text_poke)(void *addr, const void *opcode, size_t len);
47 static void (*swap_show_registers)(struct pt_regs *regs);
51 #define stack_addr(regs) ((unsigned long *)kernel_stack_pointer(regs))
54 #define SWAP_SAVE_REGS_STRING \
55 /* Skip cs, ip, orig_ax and gs. */ \
67 #define SWAP_RESTORE_REGS_STRING \
75 /* Skip ds, es, fs, gs, orig_ax, and ip. Note: don't pop cs here*/\
80 * Function return probe trampoline:
81 * - init_kprobes() establishes a probepoint here
82 * - When the probed function returns, this probe
83 * causes the handlers to fire
86 ".global swap_kretprobe_trampoline\n"
87 "swap_kretprobe_trampoline:\n"
91 "call trampoline_probe_handler_x86\n"
92 /* move eflags to cs */
93 "movl 56(%esp), %edx\n"
94 "movl %edx, 52(%esp)\n"
95 /* replace saved flags with true return address. */
96 "movl %eax, 56(%esp)\n"
97 SWAP_RESTORE_REGS_STRING
102 /* insert a jmp code */
103 static __always_inline void set_jmp_op(void *from, void *to)
105 struct __arch_jmp_op {
109 jop = (struct __arch_jmp_op *) from;
110 jop->raddr = (long) (to) - ((long) (from) + 5);
111 jop->op = RELATIVEJUMP_INSTRUCTION;
115 * @brief Check if opcode can be boosted.
117 * @param opcodes Opcode to check.
118 * @return Non-zero if opcode can be boosted.
120 int swap_can_boost(kprobe_opcode_t *opcodes)
122 #define W(row, b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, ba, bb, bc, bd, be, bf) \
123 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
124 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
125 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
126 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
129 * Undefined/reserved opcodes, conditional jump, Opcode Extension
130 * Groups, and some special opcodes can not be boost.
132 static const unsigned long twobyte_is_boostable[256 / 32] = {
133 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
134 W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) |
135 W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
136 W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
137 W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
138 W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) |
139 W(0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
140 W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) |
141 W(0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1),
142 W(0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) |
143 W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1),
144 W(0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
145 W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1),
146 W(0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) |
147 W(0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1),
148 W(0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) |
149 W(0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0)
150 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
154 kprobe_opcode_t opcode;
155 kprobe_opcode_t *orig_opcodes = opcodes;
157 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
159 opcode = *(opcodes++);
161 /* 2nd-byte opcode */
162 if (opcode == 0x0f) {
163 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
165 return test_bit(*opcodes, twobyte_is_boostable);
168 switch (opcode & 0xf0) {
170 if (0x63 < opcode && opcode < 0x67)
171 goto retry; /* prefixes */
172 /* can't boost Address-size override and bound */
173 return (opcode != 0x62 && opcode != 0x67);
175 return 0; /* can't boost conditional jump */
177 /* can't boost software-interruptions */
178 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
180 /* can boost AA* and XLAT */
181 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
183 /* can boost in/out and absolute jmps */
184 return ((opcode & 0x04) || opcode == 0xea);
186 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
187 goto retry; /* lock/rep(ne) prefix */
188 /* clear and set flags can be boost */
189 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
191 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
192 goto retry; /* prefixes */
193 /* can't boost CS override and call */
194 return (opcode != 0x2e && opcode != 0x9a);
197 EXPORT_SYMBOL_GPL(swap_can_boost);
200 * returns non-zero if opcode modifies the interrupt flag.
202 static int is_IF_modifier(kprobe_opcode_t opcode)
207 case 0xcf: /* iret/iretd */
208 case 0x9d: /* popf/popfd */
215 * @brief Creates trampoline for kprobe.
217 * @param p Pointer to kprobe.
218 * @param sm Pointer to slot manager
219 * @return 0 on success, error code on error.
221 int swap_arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm)
223 /* insn: must be on special executable page on i386. */
224 p->ainsn.insn = swap_slot_alloc(sm);
225 if (p->ainsn.insn == NULL)
228 memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
230 p->opcode = *p->addr;
231 p->ainsn.boostable = swap_can_boost(p->addr) ? 0 : -1;
237 * @brief Prepares singlestep for current CPU.
239 * @param p Pointer to kprobe.
240 * @param regs Pointer to CPU registers data.
243 void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
245 int cpu = smp_processor_id();
247 if (p->ss_addr[cpu]) {
248 regs->EREG(ip) = (unsigned long)p->ss_addr[cpu];
249 p->ss_addr[cpu] = NULL;
251 regs->EREG(flags) |= TF_MASK;
252 regs->EREG(flags) &= ~IF_MASK;
253 /* single step inline if the instruction is an int3 */
254 if (p->opcode == BREAKPOINT_INSTRUCTION) {
255 regs->EREG(ip) = (unsigned long) p->addr;
256 /* printk(KERN_INFO "break_insn!!!\n"); */
258 regs->EREG(ip) = (unsigned long) p->ainsn.insn;
261 EXPORT_SYMBOL_GPL(prepare_singlestep);
264 * @brief Saves previous kprobe.
266 * @param kcb Pointer to kprobe_ctlblk struct whereto save current kprobe.
267 * @param p_run Pointer to kprobe.
270 void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
272 if (kcb->prev_kprobe.kp != NULL) {
273 panic("no space to save new probe[]: "
274 "task = %d/%s, prev %p, current %p, new %p,",
275 current->pid, current->comm, kcb->prev_kprobe.kp->addr,
276 swap_kprobe_running()->addr, cur_p->addr);
280 kcb->prev_kprobe.kp = swap_kprobe_running();
281 kcb->prev_kprobe.status = kcb->kprobe_status;
286 * @brief Restores previous kprobe.
288 * @param kcb Pointer to kprobe_ctlblk which contains previous kprobe.
291 void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
293 __get_cpu_var(swap_current_kprobe) = kcb->prev_kprobe.kp;
294 kcb->kprobe_status = kcb->prev_kprobe.status;
295 kcb->prev_kprobe.kp = NULL;
296 kcb->prev_kprobe.status = 0;
300 * @brief Sets currently running kprobe.
302 * @param p Pointer to currently running kprobe.
303 * @param regs Pointer to CPU registers data.
304 * @param kcb Pointer to kprobe_ctlblk.
307 void set_current_kprobe(struct kprobe *p,
308 struct pt_regs *regs,
309 struct kprobe_ctlblk *kcb)
311 __get_cpu_var(swap_current_kprobe) = p;
312 DBPRINTF("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
313 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags =
314 (regs->EREG(flags) & (TF_MASK | IF_MASK));
315 if (is_IF_modifier(p->opcode))
316 kcb->kprobe_saved_eflags &= ~IF_MASK;
319 static int setup_singlestep(struct kprobe *p, struct pt_regs *regs,
320 struct kprobe_ctlblk *kcb)
322 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
323 if (p->ainsn.boostable == 1 && !p->post_handler) {
324 /* Boost up -- we can execute copied instructions directly */
325 swap_reset_current_kprobe();
326 regs->ip = (unsigned long)p->ainsn.insn;
327 swap_preempt_enable_no_resched();
331 #endif /* !CONFIG_PREEMPT */
333 prepare_singlestep(p, regs);
334 kcb->kprobe_status = KPROBE_HIT_SS;
339 static int __kprobe_handler(struct pt_regs *regs)
341 struct kprobe *p = 0;
342 int ret = 0, reenter = 0;
343 kprobe_opcode_t *addr = NULL;
344 struct kprobe_ctlblk *kcb;
346 addr = (kprobe_opcode_t *) (regs->EREG(ip) - sizeof(kprobe_opcode_t));
350 kcb = swap_get_kprobe_ctlblk();
351 p = swap_get_kprobe(addr);
353 /* Check we're not actually recursing */
354 if (swap_kprobe_running()) {
356 if (kcb->kprobe_status == KPROBE_HIT_SS &&
357 *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
358 regs->EREG(flags) &= ~TF_MASK;
359 regs->EREG(flags) |= kcb->kprobe_saved_eflags;
364 /* We have reentered the kprobe_handler(), since
365 * another probe was hit while within the handler.
366 * We here save the original kprobes variables and
367 * just single step on the instruction of the new probe
368 * without calling any user handlers.
370 save_previous_kprobe(kcb, p);
371 set_current_kprobe(p, regs, kcb);
372 swap_kprobes_inc_nmissed_count(p);
373 prepare_singlestep(p, regs);
374 kcb->kprobe_status = KPROBE_REENTER;
378 if (*addr != BREAKPOINT_INSTRUCTION) {
379 /* The breakpoint instruction was removed by
380 * another cpu right after we hit, no further
381 * handling of this interrupt is appropriate
383 regs->EREG(ip) -= sizeof(kprobe_opcode_t);
388 p = __get_cpu_var(swap_current_kprobe);
389 if (p->break_handler && p->break_handler(p, regs))
397 if (*addr != BREAKPOINT_INSTRUCTION) {
399 * The breakpoint instruction was removed right
400 * after we hit it. Another cpu has removed
401 * either a probepoint or a debugger breakpoint
402 * at this address. In either case, no further
403 * handling of this interrupt is appropriate.
404 * Back up over the (now missing) int3 and run
405 * the original instruction.
407 regs->EREG(ip) -= sizeof(kprobe_opcode_t);
412 /* Not one of ours: let kernel handle it */
413 DBPRINTF("no_kprobe");
418 set_current_kprobe(p, regs, kcb);
421 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
423 if (p->pre_handler) {
424 ret = p->pre_handler(p, regs);
430 setup_singlestep(p, regs, kcb);
435 swap_preempt_enable_no_resched();
440 static int kprobe_handler(struct pt_regs *regs)
443 #ifdef SUPRESS_BUG_MESSAGES
444 int swap_oops_in_progress;
446 * oops_in_progress used to avoid BUG() messages
447 * that slow down kprobe_handler() execution
449 swap_oops_in_progress = oops_in_progress;
450 oops_in_progress = 1;
453 ret = __kprobe_handler(regs);
455 #ifdef SUPRESS_BUG_MESSAGES
456 oops_in_progress = swap_oops_in_progress;
463 * @brief Probe pre handler.
465 * @param p Pointer to fired kprobe.
466 * @param regs Pointer to CPU registers data.
469 int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
471 struct jprobe *jp = container_of(p, struct jprobe, kp);
472 kprobe_pre_entry_handler_t pre_entry;
476 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
478 pre_entry = (kprobe_pre_entry_handler_t) jp->pre_entry;
479 entry = (entry_point_t) jp->entry;
481 kcb->jprobe_saved_regs = *regs;
482 kcb->jprobe_saved_esp = stack_addr(regs);
483 addr = (unsigned long)(kcb->jprobe_saved_esp);
485 /* TBD: As Linus pointed out, gcc assumes that the callee
486 * owns the argument space and could overwrite it, e.g.
487 * tailcall optimization. So, to be absolutely safe
488 * we also save and restore enough stack bytes to cover
489 * the argument area. */
490 memcpy(kcb->jprobes_stack, (kprobe_opcode_t *)addr,
491 MIN_STACK_SIZE(addr));
492 regs->EREG(flags) &= ~IF_MASK;
493 trace_hardirqs_off();
495 p->ss_addr[smp_processor_id()] = (kprobe_opcode_t *)
496 pre_entry(jp->priv_arg, regs);
498 regs->EREG(ip) = (unsigned long)(jp->entry);
504 * @brief Jprobe return end.
508 void swap_jprobe_return_end(void);
511 * @brief Jprobe return code.
515 void swap_jprobe_return(void)
517 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
519 asm volatile(" xchgl %%ebx,%%esp\n"
521 " .globl swap_jprobe_return_end\n"
522 " swap_jprobe_return_end:\n"
524 : : "b" (kcb->jprobe_saved_esp) : "memory");
526 EXPORT_SYMBOL_GPL(swap_jprobe_return);
528 void arch_ujprobe_return(void)
533 * Called after single-stepping. p->addr is the address of the
534 * instruction whose first byte has been replaced by the "int 3"
535 * instruction. To avoid the SMP problems that can occur when we
536 * temporarily put back the original opcode to single-step, we
537 * single-stepped a copy of the instruction. The address of this
538 * copy is p->ainsn.insn.
540 * This function prepares to return from the post-single-step
541 * interrupt. We have to fix up the stack as follows:
543 * 0) Except in the case of absolute or indirect jump or call instructions,
544 * the new eip is relative to the copied instruction. We need to make
545 * it relative to the original instruction.
547 * 1) If the single-stepped instruction was pushfl, then the TF and IF
548 * flags are set in the just-pushed eflags, and may need to be cleared.
550 * 2) If the single-stepped instruction was a call, the return address
551 * that is atop the stack is the address following the copied instruction.
552 * We need to make it the address following the original instruction.
554 * This function also checks instruction size for preparing direct execution.
556 static void resume_execution(struct kprobe *p,
557 struct pt_regs *regs,
558 struct kprobe_ctlblk *kcb)
561 unsigned long copy_eip = (unsigned long) p->ainsn.insn;
562 unsigned long orig_eip = (unsigned long) p->addr;
563 kprobe_opcode_t insns[2];
565 regs->EREG(flags) &= ~TF_MASK;
567 tos = stack_addr(regs);
568 insns[0] = p->ainsn.insn[0];
569 insns[1] = p->ainsn.insn[1];
572 case 0x9c: /* pushfl */
573 *tos &= ~(TF_MASK | IF_MASK);
574 *tos |= kcb->kprobe_old_eflags;
576 case 0xc2: /* iret/ret/lret */
581 case 0xea: /* jmp absolute -- eip is correct */
582 /* eip is already adjusted, no more changes required */
583 p->ainsn.boostable = 1;
585 case 0xe8: /* call relative - Fix return addr */
586 *tos = orig_eip + (*tos - copy_eip);
588 case 0x9a: /* call absolute -- same as call absolute, indirect */
589 *tos = orig_eip + (*tos - copy_eip);
592 if ((insns[1] & 0x30) == 0x10) {
594 * call absolute, indirect
595 * Fix return addr; eip is correct.
596 * But this is not boostable
598 *tos = orig_eip + (*tos - copy_eip);
600 } else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute
602 ((insns[1] & 0x31) == 0x21)) {
603 /* jmp far, absolute indirect */
604 /* eip is correct. And this is boostable */
605 p->ainsn.boostable = 1;
612 if (p->ainsn.boostable == 0) {
613 if ((regs->EREG(ip) > copy_eip) &&
614 (regs->EREG(ip) - copy_eip) + 5 < MAX_INSN_SIZE) {
616 * These instructions can be executed directly if it
617 * jumps back to correct address.
619 set_jmp_op((void *)regs->EREG(ip),
621 (regs->EREG(ip) - copy_eip));
622 p->ainsn.boostable = 1;
624 p->ainsn.boostable = -1;
628 regs->EREG(ip) = orig_eip + (regs->EREG(ip) - copy_eip);
635 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
636 * remain disabled thoroughout this function.
638 static int post_kprobe_handler(struct pt_regs *regs)
640 struct kprobe *cur = swap_kprobe_running();
641 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
645 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
646 kcb->kprobe_status = KPROBE_HIT_SSDONE;
647 cur->post_handler(cur, regs, 0);
650 resume_execution(cur, regs, kcb);
651 regs->EREG(flags) |= kcb->kprobe_saved_eflags;
653 trace_hardirqs_fixup_flags(regs->EREG(flags));
654 #endif /* CONFIG_X86 */
655 /* Restore back the original saved kprobes variables and continue. */
656 if (kcb->kprobe_status == KPROBE_REENTER) {
657 restore_previous_kprobe(kcb);
660 swap_reset_current_kprobe();
662 swap_preempt_enable_no_resched();
665 * if somebody else is singlestepping across a probe point, eflags
666 * will have TF set, in which case, continue the remaining processing
667 * of do_debug, as if this is not a probe hit.
669 if (regs->EREG(flags) & TF_MASK)
675 static int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
677 struct kprobe *cur = swap_kprobe_running();
678 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
680 switch (kcb->kprobe_status) {
684 * We are here because the instruction being single
685 * stepped caused a page fault. We reset the current
686 * kprobe and the eip points back to the probe address
687 * and allow the page fault handler to continue as a
690 regs->EREG(ip) = (unsigned long) cur->addr;
691 regs->EREG(flags) |= kcb->kprobe_old_eflags;
692 if (kcb->kprobe_status == KPROBE_REENTER)
693 restore_previous_kprobe(kcb);
695 swap_reset_current_kprobe();
696 swap_preempt_enable_no_resched();
698 case KPROBE_HIT_ACTIVE:
699 case KPROBE_HIT_SSDONE:
701 * We increment the nmissed count for accounting,
702 * we can also use npre/npostfault count for accouting
703 * these specific fault cases.
705 swap_kprobes_inc_nmissed_count(cur);
708 * We come here because instructions in the pre/post
709 * handler caused the page_fault, this could happen
710 * if handler tries to access user space by
711 * copy_from_user(), get_user() etc. Let the
712 * user-specified handler try to fix it first.
714 if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
718 * In case the user-specified fault handler returned
719 * zero, try to fix up.
721 if (swap_fixup_exception(regs))
725 * fixup_exception() could not handle it,
726 * Let do_page_fault() fix it.
735 static int kprobe_exceptions_notify(struct notifier_block *self,
736 unsigned long val, void *data)
738 struct die_args *args = (struct die_args *) data;
739 int ret = NOTIFY_DONE;
741 DBPRINTF("val = %ld, data = 0x%X", val, (unsigned int) data);
743 if (args->regs == NULL || user_mode_vm(args->regs))
746 DBPRINTF("switch (val) %lu %d %d", val, DIE_INT3, DIE_TRAP);
748 #ifdef CONFIG_KPROBES
753 DBPRINTF("before kprobe_handler ret=%d %p",
755 if (kprobe_handler (args->regs))
757 DBPRINTF("after kprobe_handler ret=%d %p",
761 if (post_kprobe_handler(args->regs))
765 /* swap_kprobe_running() needs smp_processor_id() */
767 if (swap_kprobe_running() &&
768 kprobe_fault_handler(args->regs, args->trapnr))
775 DBPRINTF("ret=%d", ret);
776 /* if(ret == NOTIFY_STOP) */
777 /* handled_exceptions++; */
782 static struct notifier_block kprobe_exceptions_nb = {
783 .notifier_call = kprobe_exceptions_notify,
788 * @brief Longjump break handler.
790 * @param p Pointer to fired kprobe.
791 * @param regs Pointer to CPU registers data.
792 * @return 0 on success.
794 int swap_longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
796 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
797 u8 *addr = (u8 *) (regs->EREG(ip) - 1);
798 unsigned long stack_addr = (unsigned long) (kcb->jprobe_saved_esp);
799 struct jprobe *jp = container_of(p, struct jprobe, kp);
801 DBPRINTF("p = %p\n", p);
803 if ((addr > (u8 *)swap_jprobe_return) &&
804 (addr < (u8 *)swap_jprobe_return_end)) {
805 if (stack_addr(regs) != kcb->jprobe_saved_esp) {
806 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
807 printk(KERN_INFO "current esp %p does not match saved esp %p\n",
808 stack_addr(regs), kcb->jprobe_saved_esp);
809 printk(KERN_INFO "Saved registers for jprobe %p\n", jp);
810 swap_show_registers(saved_regs);
811 printk(KERN_INFO "Current registers\n");
812 swap_show_registers(regs);
816 *regs = kcb->jprobe_saved_regs;
817 memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
818 MIN_STACK_SIZE(stack_addr));
819 swap_preempt_enable_no_resched();
827 * @brief Arms kprobe.
829 * @param p Pointer to target kprobe.
832 void swap_arch_arm_kprobe(struct kprobe *p)
834 swap_text_poke(p->addr,
835 ((unsigned char[]){BREAKPOINT_INSTRUCTION}), 1);
839 * @brief Disarms kprobe.
841 * @param p Pointer to target kprobe.
844 void swap_arch_disarm_kprobe(struct kprobe *p)
846 swap_text_poke(p->addr, &p->opcode, 1);
849 static __used void *trampoline_probe_handler_x86(struct pt_regs *regs)
851 return (void *)trampoline_probe_handler(NULL, regs);
855 * @brief Prepares kretprobes, saves ret address, makes function return to
858 * @param ri Pointer to kretprobe_instance.
859 * @param regs Pointer to CPU registers data.
862 void swap_arch_prepare_kretprobe(struct kretprobe_instance *ri,
863 struct pt_regs *regs)
865 unsigned long *ptr_ret_addr = stack_addr(regs);
867 /* for __switch_to probe */
868 if ((unsigned long)ri->rp->kp.addr == sched_addr) {
870 ri->task = (struct task_struct *)regs->dx;
872 ri->sp = ptr_ret_addr;
875 /* Save the return address */
876 ri->ret_addr = (unsigned long *)*ptr_ret_addr;
878 /* Replace the return addr with trampoline addr */
879 *ptr_ret_addr = (unsigned long)&swap_kretprobe_trampoline;
887 ******************************************************************************
889 ******************************************************************************
899 static struct kj_cb_data * __used kjump_handler(struct kj_cb_data *data)
902 data->cb(data->data);
907 void kjump_trampoline(void);
908 void kjump_trampoline_int3(void);
910 "kjump_trampoline:\n"
911 "call kjump_handler\n"
912 "kjump_trampoline_int3:\n"
913 "nop\n" /* for restore_regs_kp */
916 int set_kjump_cb(struct pt_regs *regs, jumper_cb_t cb, void *data, size_t size)
918 struct kj_cb_data *cb_data;
920 cb_data = kmalloc(sizeof(*cb_data) + size, GFP_ATOMIC);
925 cb_data->regs = *regs;
927 cb_data->p = swap_kprobe_running();
932 memcpy(cb_data->data, data, size);
934 /* save pointer cb_data at ax */
935 regs->ax = (long)cb_data;
937 /* jump to kjump_trampoline */
938 regs->ip = (unsigned long)&kjump_trampoline;
940 swap_reset_current_kprobe();
941 swap_preempt_enable_no_resched();
945 EXPORT_SYMBOL_GPL(set_kjump_cb);
947 static int restore_regs_pre_handler(struct kprobe *p, struct pt_regs *regs)
949 struct kj_cb_data *data = (struct kj_cb_data *)regs->ax;
950 struct kprobe *kp = data->p;
951 struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
956 /* FIXME: potential memory leak, when process kill */
959 kcb = swap_get_kprobe_ctlblk();
961 set_current_kprobe(kp, regs, kcb);
962 setup_singlestep(kp, regs, kcb);
967 static struct kprobe restore_regs_kp = {
968 .pre_handler = restore_regs_pre_handler,
969 .addr = (kprobe_opcode_t *)&kjump_trampoline_int3, /* nop */
972 static int kjump_init(void)
976 ret = swap_register_kprobe(&restore_regs_kp);
978 printk(KERN_INFO "ERROR: kjump_init(), ret=%d\n", ret);
983 static void kjump_exit(void)
985 swap_unregister_kprobe(&restore_regs_kp);
993 ******************************************************************************
995 ******************************************************************************
998 unsigned long ret_addr;
1005 static unsigned long __used get_bx(struct cb_data *data)
1010 static unsigned long __used jump_handler(struct cb_data *data)
1012 unsigned long ret_addr = data->ret_addr;
1015 data->cb(data->data);
1017 /* FIXME: potential memory leak, when process kill */
1023 void jump_trampoline(void);
1025 "jump_trampoline:\n"
1027 SWAP_SAVE_REGS_STRING
1028 "movl %ebx, %eax\n" /* data --> ax */
1030 "movl %eax, (%esp)\n" /* restore bx */
1031 "movl %ebx, %eax\n" /* data --> ax */
1032 "call jump_handler\n"
1033 /* move flags to cs */
1034 "movl 56(%esp), %edx\n"
1035 "movl %edx, 52(%esp)\n"
1036 /* replace saved flags with true return address. */
1037 "movl %eax, 56(%esp)\n"
1038 SWAP_RESTORE_REGS_STRING
1043 unsigned long get_jump_addr(void)
1045 return (unsigned long)&jump_trampoline;
1047 EXPORT_SYMBOL_GPL(get_jump_addr);
1049 int set_jump_cb(unsigned long ret_addr, struct pt_regs *regs,
1050 jumper_cb_t cb, void *data, size_t size)
1052 struct cb_data *cb_data;
1054 cb_data = kmalloc(sizeof(*cb_data) + size, GFP_ATOMIC);
1055 if (cb_data == NULL)
1060 memcpy(cb_data->data, data, size);
1062 /* save info for restore */
1063 cb_data->ret_addr = ret_addr;
1065 cb_data->bx = regs->bx;
1067 /* save cb_data to bx */
1068 regs->bx = (long)cb_data;
1072 EXPORT_SYMBOL_GPL(set_jump_cb);
1079 * @brief Initializes x86 module deps.
1081 * @return 0 on success, negative error code on error.
1083 int arch_init_module_deps()
1087 sym = "fixup_exception";
1088 swap_fixup_exception = (void *)swap_ksyms(sym);
1089 if (swap_fixup_exception == NULL)
1093 swap_text_poke = (void *)swap_ksyms(sym);
1094 if (swap_text_poke == NULL)
1098 swap_show_registers = (void *)swap_ksyms(sym);
1099 if (swap_show_registers == NULL)
1105 printk(KERN_INFO "ERROR: symbol %s(...) not found\n", sym);
1110 * @brief Initializes kprobes module for ARM arch.
1112 * @return 0 on success, error code on error.
1114 int swap_arch_init_kprobes(void)
1118 ret = register_die_notifier(&kprobe_exceptions_nb);
1124 unregister_die_notifier(&kprobe_exceptions_nb);
1130 * @brief Uninitializes kprobe module.
1134 void swap_arch_exit_kprobes(void)
1137 unregister_die_notifier(&kprobe_exceptions_nb);