1 #include <linux/version.h> // LINUX_VERSION_CODE, KERNEL_VERSION()
2 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
3 #include <linux/config.h>
5 #include <linux/ptrace.h>
6 #include <linux/spinlock.h>
7 #include <linux/preempt.h>
8 #include <linux/module.h>
9 #include <linux/highmem.h> // kmap_atomic, kunmap_atomic, copy_from_user_page, copy_to_user_page
10 #include <linux/pagemap.h> // page_cache_release
11 #include <asm/system.h>
12 #include <asm/cacheflush.h>
13 #include <linux/kallsyms.h>
14 #include <linux/vmalloc.h>
15 #include <linux/syscalls.h>
16 #include <linux/security.h>
17 #include <linux/mount.h>
18 #include <linux/mman.h>
19 #include <linux/personality.h>
20 #include <linux/hugetlb.h>
21 #include <linux/file.h>
22 #include <linux/mempolicy.h>
23 #if defined(CONFIG_X86)
24 #include <linux/kdebug.h>
25 #include <linux/moduleloader.h>
26 #include <linux/freezer.h>
27 #include <linux/hardirq.h>
29 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19))
30 #include <linux/freezer.h>
35 #if defined(CONFIG_X86)
36 # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
37 # define TF_MASK X86_EFLAGS_TF
38 # define IF_MASK X86_EFLAGS_IF
40 # define UPROBES_TRAMP_LEN (MAX_INSN_SIZE+sizeof(kprobe_opcode_t))
41 # define UPROBES_TRAMP_INSN_IDX 0
42 # define UPROBES_TRAMP_RET_BREAK_IDX MAX_INSN_SIZE
43 # define KPROBES_TRAMP_LEN MAX_INSN_SIZE
44 # define KPROBES_TRAMP_INSN_IDX 0
45 #elif defined(CONFIG_ARM)
46 # define UPROBES_TRAMP_LEN 8
47 # define UPROBES_TRAMP_INSN_IDX 2
48 # define UPROBES_TRAMP_SS_BREAK_IDX 4
49 # define UPROBES_TRAMP_RET_BREAK_IDX 5
50 # define KPROBES_TRAMP_LEN 8
51 # define KPROBES_TRAMP_INSN_IDX UPROBES_TRAMP_INSN_IDX
52 # define KPROBES_TRAMP_SS_BREAK_IDX UPROBES_TRAMP_SS_BREAK_IDX
53 # define KPROBES_TRAMP_RET_BREAK_IDX UPROBES_TRAMP_RET_BREAK_IDX
54 #elif defined(CONFIG_MIPS)
55 # define UPROBES_TRAMP_LEN 3
56 # define UPROBES_TRAMP_INSN_IDX 0
57 # define UPROBES_TRAMP_SS_BREAK_IDX 1
58 # define UPROBES_TRAMP_RET_BREAK_IDX 2
59 # define KPROBES_TRAMP_LEN UPROBES_TRAMP_LEN
60 # define KPROBES_TRAMP_INSN_IDX UPROBES_TRAMP_INSN_IDX
61 # define KPROBES_TRAMP_SS_BREAK_IDX UPROBES_TRAMP_SS_BREAK_IDX
62 # define KPROBES_TRAMP_RET_BREAK_IDX UPROBES_TRAMP_RET_BREAK_IDX
65 DEFINE_PER_CPU (struct kprobe *, current_kprobe) = NULL;
66 DEFINE_PER_CPU (struct kprobe_ctlblk, kprobe_ctlblk);
68 /* kprobe_status settings */
69 #define KPROBE_HIT_ACTIVE 0x00000001
70 #define KPROBE_HIT_SS 0x00000002
72 #define INVALID_VALUE 0xFFFFFFFF
73 #define INVALID_POINTER (void*)INVALID_VALUE
75 static int ksyms = INVALID_VALUE;
76 module_param (ksyms, int, 0);
78 extern unsigned long handled_exceptions;
80 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12))
81 #define synchronize_sched synchronize_kernel
84 void jprobe_return_end (void);
85 void uprobe_return_end (void);
87 #if defined(CONFIG_X86)
88 /*fastcall*/ void *__kprobes trampoline_probe_handler_x86 (struct pt_regs *regs);
92 kretprobe_assert (struct kretprobe_instance *ri, unsigned long orig_ret_address, unsigned long trampoline_address)
94 if (!orig_ret_address || (orig_ret_address == trampoline_address))
95 panic ("kretprobe BUG!: Processing kretprobe %p @ %p\n", ri->rp, ri->rp->kp.addr);
98 #define HIWORD(x) (((x) & 0xFFFF0000) >> 16)
99 #define LOWORD(x) ((x) & 0x0000FFFF)
101 unsigned int gl_nNumberOfInstructions = 0;
102 unsigned int gl_nCodeSize = 0;
104 unsigned int arrTrapsTemplate[] = {
105 #if defined(CONFIG_MIPS)
106 0x3c010000, // lui a1 [0]
107 0x24210000, // addiu a1, a1 [1]
108 0x00200008, // jr a1 [2]
111 #elif defined(CONFIG_ARM)
112 0xe1a0c00d, // mov ip, sp
113 0xe92dd800, // stmdb sp!, {fp, ip, lr, pc}
114 0xe24cb004, // sub fp, ip, #4 ; 0x4
116 0xe3500000, // cmp r0, #0 ; 0x0
117 0xe89da800, // ldmia sp, {fp, sp, pc}
123 unsigned long nCount;
125 kprobe_opcode_t *sched_addr;
126 kprobe_opcode_t *fork_addr;
128 #if defined(CONFIG_MIPS)
129 #define REG_HI_INDEX 0
130 #define REG_LO_INDEX 1
131 #define NOTIFIER_CALL_CHAIN_INDEX 0
133 #elif defined(CONFIG_ARM)
134 #define NOTIFIER_CALL_CHAIN_INDEX 3
135 //#define NOTIFIER_CALL_CHAIN_INDEX1 6
136 //#define NOTIFIER_CALL_CHAIN_INDEX2 11
139 arch_construct_brunch (unsigned int base, unsigned int addr, int link)
141 kprobe_opcode_t insn;
142 unsigned int bpi = (unsigned int) base - (unsigned int) addr - 8;
144 DBPRINTF ("base=%x addr=%x base-addr-8=%x\n", base, addr, bpi);
145 if (abs (insn & 0xffffff) > 0xffffff)
147 DBPRINTF ("ERROR: kprobe address out of range\n");
150 insn = insn & 0xffffff;
151 insn = insn | ((link != 0) ? 0xeb000000 : 0xea000000);
152 DBPRINTF ("insn=%lX\n", insn);
153 return (unsigned int) insn;
157 unsigned int *arrTrapsOriginal = NULL;
159 #ifndef KERNEL_HAS_ISPAGEPRESENT
161 page_present (struct mm_struct *mm, unsigned long addr)
167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
171 //printk("page_present\n");
172 //BUG_ON(down_read_trylock(&mm->mmap_sem) == 0);
173 down_read (&mm->mmap_sem);
174 spin_lock (&(mm->page_table_lock));
175 pgd = pgd_offset (mm, addr);
176 //printk("pgd %p\n", pgd);
177 if ((pgd != NULL) && pgd_present (*pgd))
179 //printk("pgd_present\n");
180 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11)
181 pud = pud_offset (pgd, addr);
182 //printk("pud %p\n", pud);
183 if ((pud != NULL) && pud_present (*pud))
185 pmd = pmd_offset (pud, addr);
188 pmd = pmd_offset (pgd, addr);
190 //printk("pmd %p\n", pmd);
191 if ((pmd != NULL) && pmd_present (*pmd))
194 //printk("pmd_present\n");
195 pte = pte_offset_map (pmd, addr);
196 //pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
197 //printk("pte %p/%lx\n", pte, addr);
198 if ((pte != NULL) && pte_present (*pte))
201 //printk("pte_present\n");
204 //pte_unmap_unlock(pte, ptl);
208 spin_unlock (&(mm->page_table_lock));
209 up_read (&mm->mmap_sem);
210 //printk("page_present %d\n", ret);
215 #if defined(CONFIG_MIPS)
216 #define MIPS_INSN_OPCODE_MASK 0xFC000000
217 #define MIPS_INSN_RS_MASK 0x03E00000
218 #define MIPS_INSN_RT_MASK 0x001F0000
219 //#define MIPS_INSN_UN_MASK 0x0000FFC0
220 #define MIPS_INSN_FUNC_MASK 0x0000003F
221 #define MIPS_INSN_OPCODE(insn) (insn & MIPS_INSN_OPCODE_MASK)
222 #define MIPS_INSN_RS(insn) (insn & MIPS_INSN_RS_MASK)
223 #define MIPS_INSN_RT(insn) (insn & MIPS_INSN_RT_MASK)
224 #define MIPS_INSN_FUNC(insn) (insn & MIPS_INSN_FUNC_MASK)
226 #define MIPS_BEQ_OPCODE 0x10000000
227 #define MIPS_BNE_OPCODE 0x14000000
228 #define MIPS_BLEZ_OPCODE 0x18000000
229 #define MIPS_BGTZ_OPCODE 0x1C000000
230 #define MIPS_BEQL_OPCODE 0x50000000
231 #define MIPS_BNEL_OPCODE 0x54000000
232 #define MIPS_BLEZL_OPCODE 0x58000000
233 #define MIPS_BGTZL_OPCODE 0x5C000000
234 #define MIPS_REGIMM_OPCODE 0x04000000
235 #define MIPS_SPECIAL_OPCODE 0x00000000
236 #define MIPS_COP1_OPCODE 0x44000000
237 #define MIPS_COP2_OPCODE 0x48000000
238 #define MIPS_J_OPCODE 0x08000000
239 #define MIPS_JAL_OPCODE 0x0C000000
240 #define MIPS_JALX_OPCODE 0x74000000
242 #define MIPS_BC_RS 0x01000000
244 #define MIPS_BLTZ_RT 0x00000000
245 #define MIPS_BGEZ_RT 0x00010000
246 #define MIPS_BLTZL_RT 0x00020000
247 #define MIPS_BGEZL_RT 0x00030000
248 #define MIPS_BLTZAL_RT 0x00100000
249 #define MIPS_BGEZAL_RT 0x00110000
250 #define MIPS_BLTZALL_RT 0x00120000
251 #define MIPS_BGEZALL_RT 0x00130000
254 #define MIPS_JR_FUNC 0x00000008
255 #define MIPS_JALR_FUNC 0x00000009
256 #define MIPS_BREAK_FUNC 0x0000000D
257 #define MIPS_SYSCALL_FUNC 0x0000000C
259 #elif defined(CONFIG_ARM)
261 #define MASK_ARM_INSN_UNDEF 0x0FF00000
262 #define PTRN_ARM_INSN_UNDEF 0x03000000
263 // architecturally undefined
264 #define MASK_ARM_INSN_AUNDEF 0x0FF000F0
265 #define PTRN_ARM_INSN_AUNDEF 0x07F000F0
267 #define MASK_ARM_INSN_B 0x0E000000
268 #define PTRN_ARM_INSN_B 0x0A000000
269 #define MASK_ARM_INSN_BL 0x0E000000
270 #define PTRN_ARM_INSN_BL 0x0B000000
271 #define MASK_ARM_INSN_BLX1 0xFF000000
272 #define PTRN_ARM_INSN_BLX1 0xFA000000
273 #define MASK_ARM_INSN_BLX2 0x0FF000F0
274 #define PTRN_ARM_INSN_BLX2 0x01200030
275 #define MASK_ARM_INSN_BX 0x0FF000F0
276 #define PTRN_ARM_INSN_BX 0x01200010
277 #define MASK_ARM_INSN_BXJ 0x0FF000F0
278 #define PTRN_ARM_INSN_BXJ 0x01200020
279 // software interrupts
280 #define MASK_ARM_INSN_SWI 0x0F000000
281 #define PTRN_ARM_INSN_SWI 0x0F000000
283 #define MASK_ARM_INSN_BREAK 0xFFF000F0
284 #define PTRN_ARM_INSN_BREAK 0xE1200070
285 // Data processing immediate shift
286 #define MASK_ARM_INSN_DPIS 0x0E000010
287 #define PTRN_ARM_INSN_DPIS 0x00000000
288 // Data processing register shift
289 #define MASK_ARM_INSN_DPRS 0x0E000090
290 #define PTRN_ARM_INSN_DPRS 0x00000010
291 // Data processing immediate
292 #define MASK_ARM_INSN_DPI 0x0E000000
293 #define PTRN_ARM_INSN_DPI 0x02000000
294 // Load immediate offset
295 #define MASK_ARM_INSN_LIO 0x0E100000
296 #define PTRN_ARM_INSN_LIO 0x04100000
297 // Store immediate offset
298 #define MASK_ARM_INSN_SIO MASK_ARM_INSN_LIO
299 #define PTRN_ARM_INSN_SIO 0x04000000
300 // Load register offset
301 #define MASK_ARM_INSN_LRO 0x0E100010
302 #define PTRN_ARM_INSN_LRO 0x06100000
303 // Store register offset
304 #define MASK_ARM_INSN_SRO MASK_ARM_INSN_LRO
305 #define PTRN_ARM_INSN_SRO 0x06000000
307 #define MASK_ARM_INSN_LM 0x0E100000
308 #define PTRN_ARM_INSN_LM 0x08100000
310 #define MASK_ARM_INSN_SM MASK_ARM_INSN_LM
311 #define PTRN_ARM_INSN_SM 0x08000000
312 // Coprocessor load/store and double register transfers
313 #define MASK_ARM_INSN_CLS 0x0E000000
314 #define PTRN_ARM_INSN_CLS 0x0C000000
315 // Coprocessor register transfers
316 #define MASK_ARM_INSN_CRT 0x0F000010
317 #define PTRN_ARM_INSN_CRT 0x0E000010
319 #define ARM_INSN_MATCH(name, insn) ((insn & MASK_ARM_INSN_##name) == PTRN_ARM_INSN_##name)
321 #define ARM_INSN_REG_RN(insn) ((insn & 0x000F0000)>>16)
322 #define ARM_INSN_REG_SET_RN(insn, nreg) {insn &= ~0x000F0000; insn |= nreg<<16;}
323 #define ARM_INSN_REG_RD(insn) ((insn & 0x0000F000)>>12)
324 #define ARM_INSN_REG_SET_RD(insn, nreg) {insn &= ~0x0000F000; insn |= nreg<<12;}
325 #define ARM_INSN_REG_RS(insn) ((insn & 0x00000F00)>>8)
326 #define ARM_INSN_REG_SET_RS(insn, nreg) {insn &= ~0x00000F00; insn |= nreg<<8;}
327 #define ARM_INSN_REG_RM(insn) (insn & 0x0000000F)
328 #define ARM_INSN_REG_SET_RM(insn, nreg) {insn &= ~0x0000000F; insn |= nreg;}
329 #define ARM_INSN_REG_MR(insn, nreg) (insn & (1 << nreg))
330 #define ARM_INSN_REG_SET_MR(insn, nreg) {insn |= (1 << nreg);}
331 #define ARM_INSN_REG_CLEAR_MR(insn, nreg) {insn &= ~(1 << nreg);}
333 #elif defined(CONFIG_X86)
334 //# warning Branch instruction patterns are not defined for x86 arch!!!
337 #if defined(CONFIG_X86)
338 /* insert a jmp code */
339 static __always_inline void
340 set_jmp_op (void *from, void *to)
346 } __attribute__ ((packed)) * jop;
347 jop = (struct __arch_jmp_op *) from;
348 jop->raddr = (long) (to) - ((long) (from) + 5);
349 jop->op = RELATIVEJUMP_INSTRUCTION;
353 set_user_jmp_op (void *from, void *to)
359 } __attribute__ ((packed)) jop;
360 //jop = (struct __arch_jmp_op *) from;
361 jop.raddr = (long) (to) - ((long) (from) + 5);
362 jop.op = RELATIVEJUMP_INSTRUCTION;
363 if (!write_proc_vm_atomic (current, (unsigned long)from, &jop, sizeof(jop)))
364 panic ("failed to write jump opcode to user space %p!\n", from);
368 * returns non-zero if opcodes can be boosted.
370 static __always_inline int
371 can_boost (kprobe_opcode_t * opcodes)
373 #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
374 (((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
375 (b4##UL << 0x4)|(b5##UL << 0x5)|(b6##UL << 0x6)|(b7##UL << 0x7) | \
376 (b8##UL << 0x8)|(b9##UL << 0x9)|(ba##UL << 0xa)|(bb##UL << 0xb) | \
377 (bc##UL << 0xc)|(bd##UL << 0xd)|(be##UL << 0xe)|(bf##UL << 0xf)) \
380 * Undefined/reserved opcodes, conditional jump, Opcode Extension
381 * Groups, and some special opcodes can not be boost.
383 static const unsigned long twobyte_is_boostable[256 / 32] = {
384 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
385 /* ------------------------------- */
386 W (0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
387 W (0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 10 */
388 W (0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
389 W (0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 30 */
390 W (0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
391 W (0x50, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), /* 50 */
392 W (0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1) | /* 60 */
393 W (0x70, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1), /* 70 */
394 W (0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 80 */
395 W (0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1), /* 90 */
396 W (0xa0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* a0 */
397 W (0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1), /* b0 */
398 W (0xc0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
399 W (0xd0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1), /* d0 */
400 W (0xe0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1) | /* e0 */
401 W (0xf0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0) /* f0 */
402 /* ------------------------------- */
403 /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */
406 kprobe_opcode_t opcode;
407 kprobe_opcode_t *orig_opcodes = opcodes;
409 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
411 opcode = *(opcodes++);
413 /* 2nd-byte opcode */
416 if (opcodes - orig_opcodes > MAX_INSN_SIZE - 1)
418 return test_bit (*opcodes, twobyte_is_boostable);
421 switch (opcode & 0xf0)
424 if (0x63 < opcode && opcode < 0x67)
425 goto retry; /* prefixes */
426 /* can't boost Address-size override and bound */
427 return (opcode != 0x62 && opcode != 0x67);
429 return 0; /* can't boost conditional jump */
431 /* can't boost software-interruptions */
432 return (0xc1 < opcode && opcode < 0xcc) || opcode == 0xcf;
434 /* can boost AA* and XLAT */
435 return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7);
437 /* can boost in/out and absolute jmps */
438 return ((opcode & 0x04) || opcode == 0xea);
440 if ((opcode & 0x0c) == 0 && opcode != 0xf1)
441 goto retry; /* lock/rep(ne) prefix */
442 /* clear and set flags can be boost */
443 return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe));
445 if (opcode == 0x26 || opcode == 0x36 || opcode == 0x3e)
446 goto retry; /* prefixes */
447 /* can't boost CS override and call */
448 return (opcode != 0x2e && opcode != 0x9a);
453 * returns non-zero if opcode modifies the interrupt flag.
456 is_IF_modifier (kprobe_opcode_t opcode)
462 case 0xcf: /* iret/iretd */
463 case 0x9d: /* popf/popfd */
471 arch_check_insn (struct arch_specific_insn *ainsn)
475 #if defined(CONFIG_MIPS)
476 switch (MIPS_INSN_OPCODE (ainsn->insn[0]))
478 case MIPS_BEQ_OPCODE: //B, BEQ
479 case MIPS_BEQL_OPCODE: //BEQL
480 case MIPS_BNE_OPCODE: //BNE
481 case MIPS_BNEL_OPCODE: //BNEL
482 case MIPS_BGTZ_OPCODE: //BGTZ
483 case MIPS_BGTZL_OPCODE: //BGTZL
484 case MIPS_BLEZ_OPCODE: //BLEZ
485 case MIPS_BLEZL_OPCODE: //BLEZL
486 case MIPS_J_OPCODE: //J
487 case MIPS_JAL_OPCODE: //JAL
488 DBPRINTF ("arch_check_insn: opcode");
491 case MIPS_REGIMM_OPCODE:
492 //BAL, BGEZ, BGEZAL, BGEZALL, BGEZL, BLTZ, BLTZAL, BLTZALL, BLTZL
493 switch (MIPS_INSN_RT (ainsn->insn[0]))
501 case MIPS_BLTZALL_RT:
502 case MIPS_BGEZALL_RT:
503 DBPRINTF ("arch_check_insn: REGIMM opcode\n");
508 //BC1F, BC1FL, BC1T, BC1TL
509 case MIPS_COP1_OPCODE:
510 //BC2F, BC2FL, BC2T, BC2TL
511 case MIPS_COP2_OPCODE:
512 if (MIPS_INSN_RS (ainsn->insn[0]) == MIPS_BC_RS)
514 DBPRINTF ("arch_check_insn: COP1 opcode\n");
518 case MIPS_SPECIAL_OPCODE:
519 //BREAK, JALR, JALR.HB, JR, JR.HB
520 switch (MIPS_INSN_FUNC (ainsn->insn[0]))
524 case MIPS_BREAK_FUNC:
525 case MIPS_SYSCALL_FUNC:
526 DBPRINTF ("arch_check_insn: SPECIAL opcode\n");
532 #elif defined(CONFIG_ARM)
533 // check instructions that can change PC by nature
534 if (ARM_INSN_MATCH (UNDEF, ainsn->insn[0]) ||
535 ARM_INSN_MATCH (AUNDEF, ainsn->insn[0]) ||
536 ARM_INSN_MATCH (SWI, ainsn->insn[0]) ||
537 ARM_INSN_MATCH (BREAK, ainsn->insn[0]) ||
538 ARM_INSN_MATCH (B, ainsn->insn[0]) ||
539 ARM_INSN_MATCH (BL, ainsn->insn[0]) ||
540 ARM_INSN_MATCH (BLX1, ainsn->insn[0]) ||
541 ARM_INSN_MATCH (BLX2, ainsn->insn[0]) ||
542 ARM_INSN_MATCH (BX, ainsn->insn[0]) ||
543 ARM_INSN_MATCH (BXJ, ainsn->insn[0]))
545 DBPRINTF ("arch_check_insn: %lx\n", ainsn->insn[0]);
548 #ifndef CONFIG_CPU_V7
549 // check instructions that can write result to PC
550 else if ((ARM_INSN_MATCH (DPIS, ainsn->insn[0]) ||
551 ARM_INSN_MATCH (DPRS, ainsn->insn[0]) ||
552 ARM_INSN_MATCH (DPI, ainsn->insn[0]) ||
553 ARM_INSN_MATCH (LIO, ainsn->insn[0]) ||
554 ARM_INSN_MATCH (LRO, ainsn->insn[0])) &&
555 (ARM_INSN_REG_RD (ainsn->insn[0]) == 15))
557 DBPRINTF ("arch_check_insn: %lx\n", ainsn->insn[0]);
560 #endif // CONFIG_CPU_V7
561 // check special instruction loads store multiple registers
562 else if ((ARM_INSN_MATCH (LM, ainsn->insn[0]) || ARM_INSN_MATCH (SM, ainsn->insn[0])) &&
563 // store pc or load to pc
564 (ARM_INSN_REG_MR (ainsn->insn[0], 15) ||
565 // store/load with pc update
566 ((ARM_INSN_REG_RN (ainsn->insn[0]) == 15) && (ainsn->insn[0] & 0x200000))))
568 DBPRINTF ("arch_check_insn: %lx\n", ainsn->insn[0]);
571 #elif defined(CONFIG_X86)
572 //# warning arch_check_insn is not implemented for x86 arch!!!
579 * kprobe->ainsn.insn points to the copy of the instruction to be
580 * single-stepped. x86_64, POWER4 and above have no-exec support and
581 * stepping on the instruction on a vmalloced/kmalloced/data page
582 * is a recipe for disaster
584 #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
586 struct kprobe_insn_page
588 struct hlist_node hlist;
589 kprobe_opcode_t *insns; /* Page of instruction slots */
590 char *slot_used;//[INSNS_PER_PAGE];
596 enum kprobe_slot_state
603 static struct hlist_head kprobe_insn_pages;
604 static int kprobe_garbage_slots;
605 static struct hlist_head uprobe_insn_pages;
606 static int uprobe_garbage_slots;
607 static int collect_garbage_slots (struct hlist_head *page_list, struct task_struct *task);
609 void gen_insn_execbuf (void);
610 void pc_dep_insn_execbuf (void);
611 void gen_insn_execbuf_holder (void);
612 void pc_dep_insn_execbuf_holder (void);
615 gen_insn_execbuf_holder (void)
617 asm volatile (".global gen_insn_execbuf\n"
618 "gen_insn_execbuf:\n"
619 #if defined(CONFIG_ARM)
622 "nop\n" // original instruction
624 "ldr pc, [pc, #4]\n" //ssbreak
627 "nop\n"); //stored PC-4(next insn addr)
628 #elif defined(CONFIG_MIPS)
629 "nop\n" // original instruction
637 #if defined(CONFIG_ARM)
639 pc_dep_uinsn_execbuf_holder (void)
641 asm volatile (".global pc_dep_uinsn_execbuf\n"
642 "pc_dep_uinsn_execbuf:\n"
643 "str r0, [pc, #20]\n"
644 "ldr r0, [pc, #12]\n"
645 "nop\n" // instruction with replaced PC
650 "nop\n");// stored Rx
653 * 0. push Rx on stack
654 * 1. load address to Rx
655 * 2. do insn using Rx
656 * 3. pop Rx from stack
660 * 7. stored PC-4(next insn addr)
663 pc_dep_insn_execbuf_holder (void)
665 asm volatile (".global pc_dep_insn_execbuf\n"
666 "pc_dep_insn_execbuf:\n"
667 "str r0, [sp, #-4]\n"
668 "ldr r0, [pc, #12]\n"
669 "nop\n" // instruction with replaced PC
670 "ldr r0, [sp, #-4]\n"
671 "ldr pc, [pc, #4]\n" //ssbreak
674 "nop\n");// stored PC-4 (next insn addr)
678 prep_pc_dep_insn_execbuf (kprobe_opcode_t * insns, kprobe_opcode_t insn, int uregs)
686 for (i = 0; i < 13; i++, reg_mask <<= 1)
688 if (!(insn & reg_mask))
694 for (i = 0; i < 13; i++)
696 // DBPRINTF("prep_pc_dep_insn_execbuf: check R%d/%d, changing regs %x in %x",
697 // i, ARM_INSN_REG_RN(insn), uregs, insn);
698 if ((uregs & 0x1) && (ARM_INSN_REG_RN (insn) == i))
700 if ((uregs & 0x2) && (ARM_INSN_REG_RD (insn) == i))
702 if ((uregs & 0x4) && (ARM_INSN_REG_RS (insn) == i))
704 if ((uregs & 0x8) && (ARM_INSN_REG_RM (insn) == i))
711 DBPRINTF ("there are no free register %x in insn %lx!", uregs, insn);
714 DBPRINTF ("prep_pc_dep_insn_execbuf: using R%d, changing regs %x", i, uregs);
716 // set register to save
717 ARM_INSN_REG_SET_RD (insns[0], i);
718 // set register to load address to
719 ARM_INSN_REG_SET_RD (insns[1], i);
720 // set instruction to execute and patch it
723 ARM_INSN_REG_CLEAR_MR (insn, 15);
724 ARM_INSN_REG_SET_MR (insn, i);
728 if ((uregs & 0x1) && (ARM_INSN_REG_RN (insn) == 15))
729 ARM_INSN_REG_SET_RN (insn, i);
730 if ((uregs & 0x2) && (ARM_INSN_REG_RD (insn) == 15))
731 ARM_INSN_REG_SET_RD (insn, i);
732 if ((uregs & 0x4) && (ARM_INSN_REG_RS (insn) == 15))
733 ARM_INSN_REG_SET_RS (insn, i);
734 if ((uregs & 0x8) && (ARM_INSN_REG_RM (insn) == 15))
735 ARM_INSN_REG_SET_RM (insn, i);
737 insns[UPROBES_TRAMP_INSN_IDX] = insn;
738 // set register to restore
739 ARM_INSN_REG_SET_RD (insns[3], i);
745 arch_prepare_kprobe (struct kprobe *p)
747 #if !defined(CONFIG_X86)
748 kprobe_opcode_t insns[KPROBES_TRAMP_LEN];
750 #if defined(CONFIG_ARM)
755 #if !defined(CONFIG_X86)
756 if ((unsigned long) p->addr & 0x01)
758 DBPRINTF ("Attempt to register kprobe at an unaligned address\n");
762 /* XXX: Might be a good idea to check if p->addr is a valid
763 * kernel address as well... */
767 kprobe_opcode_t insn[MAX_INSN_SIZE];
768 struct arch_specific_insn ainsn;
769 /* insn: must be on special executable page on i386. */
770 p->ainsn.insn = get_insn_slot (NULL, 0);
773 memcpy (insn, p->addr, MAX_INSN_SIZE * sizeof (kprobe_opcode_t));
775 ret = arch_check_insn (&ainsn);
778 p->opcode = *p->addr;
779 #if defined(CONFIG_ARM)
780 p->ainsn.boostable = 1;
783 if (ARM_INSN_MATCH (DPIS, insn[0]) || ARM_INSN_MATCH (LRO, insn[0]) ||
784 ARM_INSN_MATCH (SRO, insn[0]))
788 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_REG_RM (insn[0]) == 15) ||
789 (ARM_INSN_MATCH (SRO, insn[0]) && (ARM_INSN_REG_RD (insn[0]) == 15)))
792 DBPRINTF ("Unboostable insn %lx, DPIS/LRO/SRO\n", insn[0]);
797 else if (ARM_INSN_MATCH (DPI, insn[0]) || ARM_INSN_MATCH (LIO, insn[0]) ||
798 ARM_INSN_MATCH (SIO, insn[0]))
802 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_MATCH (SIO, insn[0]) &&
803 (ARM_INSN_REG_RD (insn[0]) == 15)))
807 DBPRINTF ("Unboostable insn %lx/%p/%d, DPI/LIO/SIO\n", insn[0], p, p->ainsn.boostable);
811 else if (ARM_INSN_MATCH (DPRS, insn[0]))
815 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_REG_RM (insn[0]) == 15) ||
816 (ARM_INSN_REG_RS (insn[0]) == 15))
820 DBPRINTF ("Unboostable insn %lx, DPRS\n", insn[0]);
824 else if (ARM_INSN_MATCH (SM, insn[0]))
828 if (ARM_INSN_REG_MR (insn[0], 15))
831 DBPRINTF ("Unboostable insn %lx, SM\n", insn[0]);
835 // check instructions that can write result to SP andu uses PC
836 if (pc_dep && (ARM_INSN_REG_RD (ainsn.insn[0]) == 13))
840 //printk ("insn writes result to SP and uses PC: %lx/%d\n", ainsn.insn[0], count);
841 free_insn_slot (&kprobe_insn_pages, NULL, p->ainsn.insn, 0);
847 memcpy (insns, pc_dep_insn_execbuf, sizeof (insns));
848 if (prep_pc_dep_insn_execbuf (insns, insn[0], uregs) != 0)
850 DBPRINTF ("failed to prepare exec buffer for insn %lx!", insn[0]);
851 free_insn_slot (&kprobe_insn_pages, NULL, p->ainsn.insn, 0);
854 //insns[KPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
855 insns[6] = (kprobe_opcode_t) (p->addr + 2);
859 memcpy (insns, gen_insn_execbuf, sizeof (insns));
860 insns[KPROBES_TRAMP_INSN_IDX] = insn[0];
862 //insns[KPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
863 insns[7] = (kprobe_opcode_t) (p->addr + 1);
864 DBPRINTF ("arch_prepare_kprobe: insn %lx", insn[0]);
865 DBPRINTF ("arch_prepare_kprobe: to %p - %lx %lx %lx %lx %lx %lx %lx %lx %lx",
866 p->ainsn.insn, insns[0], insns[1], insns[2], insns[3], insns[4],
867 insns[5], insns[6], insns[7], insns[8]);
868 memcpy (p->ainsn.insn, insns, sizeof(insns));
870 #elif defined(CONFIG_MIPS)
871 p->ainsn.boostable = 0;
872 memcpy (insns, gen_insn_execbuf, sizeof (insns));
873 insns[KPROBES_TRAMP_INSN_IDX] = insn[0];
874 insns[KPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
875 insns[KPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
876 DBPRINTF ("arch_prepare_kprobe: insn %lx", insn[0]);
877 DBPRINTF ("arch_prepare_kprobe: to %p - %lx %lx %lx",
878 p->ainsn.insn, insns[0], insns[1], insns[2]);
879 memcpy (p->ainsn.insn, insns, sizeof(insns));
880 #elif defined(CONFIG_X86)
881 if (can_boost (p->addr))
882 p->ainsn.boostable = 0;
884 p->ainsn.boostable = -1;
885 memcpy (p->ainsn.insn, insn, MAX_INSN_SIZE * sizeof (kprobe_opcode_t));
890 free_insn_slot (&kprobe_insn_pages, NULL, p->ainsn.insn, 0);
898 arch_prepare_kretprobe (struct kretprobe *p)
902 if ((unsigned long) p->kp.addr & 0x01)
904 DBPRINTF ("Attempt to register kprobe at an unaligned address\n");
908 /* XXX: Might be a good idea to check if p->addr is a valid
909 * kernel address as well... */
913 kprobe_opcode_t insn;
914 struct arch_specific_insn ainsn;
915 memcpy (&insn, p->kp.addr, MAX_INSN_SIZE * sizeof (kprobe_opcode_t));
917 ret = arch_check_insn (&ainsn);
920 p->kp.opcode = *p->kp.addr;
921 #if defined(CONFIG_X86)
922 memcpy (p->kp.ainsn.insn, p->kp.addr, MAX_INSN_SIZE * sizeof (kprobe_opcode_t));
931 arch_prepare_uprobe (struct kprobe *p, struct task_struct *task, int atomic)
934 kprobe_opcode_t insns[UPROBES_TRAMP_LEN];
935 #if defined(CONFIG_ARM)
939 #if !defined(CONFIG_X86)
940 if ((unsigned long) p->addr & 0x01)
942 DBPRINTF ("Attempt to register kprobe at an unaligned address");
949 kprobe_opcode_t insn[MAX_INSN_SIZE];
950 struct arch_specific_insn ainsn;
952 if (!read_proc_vm_atomic (task, (unsigned long) p->addr, &insn, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
953 panic ("failed to read memory %p!\n", p->addr);
955 ret = arch_check_insn (&ainsn);
959 p->ainsn.insn = get_insn_slot(task, atomic);
962 #if defined(CONFIG_ARM)
963 p->ainsn.boostable = 1;
966 if (ARM_INSN_MATCH (DPIS, insn[0]) || ARM_INSN_MATCH (LRO, insn[0]) ||
967 ARM_INSN_MATCH (SRO, insn[0]))
971 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_REG_RM (insn[0]) == 15) ||
972 (ARM_INSN_MATCH (SRO, insn[0]) && (ARM_INSN_REG_RD (insn[0]) == 15)))
975 DBPRINTF ("Unboostable insn %lx, DPIS/LRO/SRO\n", insn[0]);
980 else if (ARM_INSN_MATCH (DPI, insn[0]) || ARM_INSN_MATCH (LIO, insn[0]) ||
981 ARM_INSN_MATCH (SIO, insn[0]))
985 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_MATCH (SIO, insn[0]) &&
986 (ARM_INSN_REG_RD (insn[0]) == 15)))
990 DBPRINTF ("Unboostable insn %lx/%p/%d, DPI/LIO/SIO\n", insn[0], p, p->ainsn.boostable);
994 else if (ARM_INSN_MATCH (DPRS, insn[0]))
998 if ((ARM_INSN_REG_RN (insn[0]) == 15) || (ARM_INSN_REG_RM (insn[0]) == 15) ||
999 (ARM_INSN_REG_RS (insn[0]) == 15))
1003 DBPRINTF ("Unboostable insn %lx, DPRS\n", insn[0]);
1007 else if (ARM_INSN_MATCH (SM, insn[0]))
1011 if (ARM_INSN_REG_MR (insn[0], 15))
1014 DBPRINTF ("Unboostable insn %lx, SM\n", insn[0]);
1018 // check instructions that can write result to SP andu uses PC
1019 if (pc_dep && (ARM_INSN_REG_RD (ainsn.insn[0]) == 13))
1023 //printk ("insn writes result to SP and uses PC: %lx/%d\n", ainsn.insn[0], count);
1024 free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn, 0);
1028 if (uregs && pc_dep)
1030 memcpy (insns, pc_dep_insn_execbuf, sizeof (insns));
1031 if (prep_pc_dep_insn_execbuf (insns, insn[0], uregs) != 0)
1033 DBPRINTF ("failed to prepare exec buffer for insn %lx!", insn[0]);
1034 free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn, 0);
1037 //insns[UPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
1038 insns[6] = (kprobe_opcode_t) (p->addr + 2);
1042 memcpy (insns, gen_insn_execbuf, sizeof (insns));
1043 insns[UPROBES_TRAMP_INSN_IDX] = insn[0];
1045 insns[UPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
1046 insns[7] = (kprobe_opcode_t) (p->addr + 1);
1047 DBPRINTF ("arch_prepare_uprobe: to %p - %lx %lx %lx %lx %lx %lx %lx %lx %lx",
1048 p->ainsn.insn, insns[0], insns[1], insns[2], insns[3], insns[4],
1049 insns[5], insns[6], insns[7], insns[8]);
1051 #elif defined(CONFIG_MIPS)
1052 p->ainsn.boostable = 0;
1053 memcpy (insns, gen_insn_execbuf, sizeof (insns));
1054 insns[UPROBES_TRAMP_INSN_IDX] = insn[0];
1055 insns[UPROBES_TRAMP_SS_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
1056 insns[UPROBES_TRAMP_RET_BREAK_IDX] = UNDEF_INSTRUCTION;
1057 DBPRINTF ("arch_prepare_uprobe: insn %lx", insn[0]);
1058 DBPRINTF ("arch_prepare_uprobe: to %p - %lx %lx %lx",
1059 p->ainsn.insn, insns[0], insns[1], insns[2]);
1060 #elif defined(CONFIG_X86)
1061 if (can_boost (insn))
1062 p->ainsn.boostable = 0;
1064 p->ainsn.boostable = -1;
1065 memcpy (&insns[UPROBES_TRAMP_INSN_IDX], insn, MAX_INSN_SIZE*sizeof(kprobe_opcode_t));
1066 insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
1067 /*printk ("arch_prepare_uprobe: to %p - %02x %02x %02x %02x %02x %02x %02x %02x "
1068 "%02x %02x %02x %02x %02x %02x %02x %02x %02x", p->ainsn.insn
1069 , insns[0], insns[1], insns[2], insns[3]
1070 , insns[4], insns[5], insns[6], insns[7]
1071 , insns[8], insns[9], insns[10], insns[11]
1072 , insns[12], insns[13], insns[14], insns[15], insns[16]);*/
1074 if (!write_proc_vm_atomic (task, (unsigned long) p->ainsn.insn, insns, sizeof (insns)))
1076 panic("failed to write memory %p!\n", p->ainsn.insn);
1077 DBPRINTF ("failed to write insn slot to process memory: insn %p, addr %p, probe %p!", insn, p->ainsn.insn, p->addr);
1078 /*printk ("failed to write insn slot to process memory: %p/%d insn %lx, addr %p, probe %p!\n",
1079 task, task->pid, insn, p->ainsn.insn, p->addr);*/
1080 free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn, 0);
1083 /*if(!read_proc_vm_atomic(task, (unsigned long)p->ainsn.insn, insns, 3*MAX_INSN_SIZE*sizeof(kprobe_opcode_t)))
1084 panic("failed to read memory %p!\n", p->addr);
1085 printk("arch_prepare_uprobe: from %p - %lx %lx %lx\n", p->ainsn.insn, insns[0], insns[1], insns[2]); */
1093 arch_prepare_uretprobe (struct kretprobe *p, struct task_struct *task)//, struct vm_area_struct **vma, struct page **page, unsigned long **kaddr)
1097 if ((unsigned long) p->kp.addr & 0x01)
1099 DBPRINTF ("Attempt to register kprobe at an unaligned address\n");
1102 #if defined(CONFIG_X86)
1103 #warning arch_prepare_uretprobe is not implemented for this arch!!!
1110 arch_remove_kprobe (struct kprobe *p, struct task_struct *task)
1112 //mutex_lock(&kprobe_mutex);
1114 free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn, (p->ainsn.boostable == 1));
1116 free_insn_slot (&kprobe_insn_pages, NULL, p->ainsn.insn, (p->ainsn.boostable == 1));
1117 //mutex_unlock(&kprobe_mutex)
1120 static unsigned long alloc_user_pages(struct task_struct *task, unsigned long len,
1121 unsigned long prot, unsigned long flags, int atomic)
1125 struct task_struct *otask = current;
1126 struct mm_struct *mm;
1128 mm = atomic ? task->active_mm : get_task_mm (task);
1131 down_write (&mm->mmap_sem);
1132 // FIXME: its seems to be bad decision to replace 'current' pointer temporarily
1133 current_thread_info()->task = task;
1134 ret = (unsigned long)do_mmap_pgoff(0, 0, len, prot, flags, 0);
1135 current_thread_info()->task = otask;
1136 //printk ("mmap proc %p/%d %p/%d (%ld/%lx)\n", task, task->pid, current, current->pid, ret, ret);
1138 up_write (&mm->mmap_sem);
1142 printk ("failed to mmap page in proc %d (%ld)", task->pid, ret);
1147 printk ("proc %d has no mm", task->pid);
1148 return (unsigned long)ret;
1150 struct file * file = 0;
1151 unsigned long addr = 0, pgoff = 0;
1152 struct mm_struct * mm = task->mm;
1153 struct vm_area_struct * vma, * prev;
1154 struct inode *inode;
1155 unsigned int vm_flags;
1156 int correct_wcount = 0;
1158 struct rb_node ** rb_link, * rb_parent;
1159 int accountable = 1;
1160 unsigned long charged = 0, reqprot = prot;
1163 if (is_file_hugepages(file))
1166 if (!file->f_op || !file->f_op->mmap)
1169 if ((prot & PROT_EXEC) &&
1170 (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
1174 * Does the application expect PROT_READ to imply PROT_EXEC?
1176 * (the exception is when the underlying filesystem is noexec
1177 * mounted, in which case we dont add PROT_EXEC.)
1179 if ((prot & PROT_READ) && (task->personality & READ_IMPLIES_EXEC))
1180 if (!(file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC)))
1186 /* Careful about overflows.. */
1187 len = PAGE_ALIGN(len);
1188 if (!len || len > TASK_SIZE)
1191 /* offset overflow? */
1192 if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1195 /* Too many mappings? */
1196 if (mm->map_count > sysctl_max_map_count)
1199 /* Obtain the address to map to. we verify (or select) it and ensure
1200 * that it represents a valid section of the address space.
1202 addr = get_unmapped_area(file, addr, len, pgoff, flags);
1203 if (addr & ~PAGE_MASK)
1206 /* Do simple checking here so the lower-level routines won't have
1207 * to. we assume access permissions have been handled by the open
1208 * of the memory object, so we don't do any here.
1210 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1211 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1213 if (flags & MAP_LOCKED) {
1214 if (!can_do_mlock())
1216 vm_flags |= VM_LOCKED;
1218 /* mlock MCL_FUTURE? */
1219 if (vm_flags & VM_LOCKED) {
1220 unsigned long locked, lock_limit;
1221 locked = len >> PAGE_SHIFT;
1222 locked += mm->locked_vm;
1223 lock_limit = task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
1224 lock_limit >>= PAGE_SHIFT;
1225 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1229 inode = file ? file->f_dentry->d_inode : NULL;
1232 switch (flags & MAP_TYPE) {
1234 if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1238 * Make sure we don't allow writing to an append-only
1241 if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1245 * Make sure there are no mandatory locks on the file.
1247 if (locks_verify_locked(inode))
1250 vm_flags |= VM_SHARED | VM_MAYSHARE;
1251 if (!(file->f_mode & FMODE_WRITE))
1252 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1256 if (!(file->f_mode & FMODE_READ))
1264 switch (flags & MAP_TYPE) {
1266 vm_flags |= VM_SHARED | VM_MAYSHARE;
1270 * Set pgoff according to addr for anon_vma.
1272 pgoff = addr >> PAGE_SHIFT;
1280 error = security_file_mmap(file, reqprot, prot, flags);
1284 /* Clear old maps */
1287 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
1288 if (vma && vma->vm_start < addr + len) {
1289 if (do_munmap(mm, addr, len))
1294 /* Check against address space limit. */
1295 if (!may_expand_vm(mm, len >> PAGE_SHIFT))
1298 if (accountable && (!(flags & MAP_NORESERVE) ||
1299 sysctl_overcommit_memory == OVERCOMMIT_NEVER)) {
1300 if (vm_flags & VM_SHARED) {
1301 /* Check memory availability in shmem_file_setup? */
1302 vm_flags |= VM_ACCOUNT;
1303 } else if (vm_flags & VM_WRITE) {
1305 * Private writable mapping: check memory availability
1307 charged = len >> PAGE_SHIFT;
1308 if (security_vm_enough_memory(charged))
1310 vm_flags |= VM_ACCOUNT;
1315 * Can we just expand an old private anonymous mapping?
1316 * The VM_SHARED test is necessary because shmem_zero_setup
1317 * will create the file object for a shared anonymous map below.
1319 if (!file && !(vm_flags & VM_SHARED) &&
1320 vma_merge(mm, prev, addr, addr + len, vm_flags,
1321 NULL, NULL, pgoff, NULL))
1325 * Determine the object being mapped and call the appropriate
1326 * specific mapper. the address has already been validated, but
1327 * not unmapped, but the maps are removed from the list.
1329 vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
1334 memset(vma, 0, sizeof(*vma));
1337 vma->vm_start = addr;
1338 vma->vm_end = addr + len;
1339 vma->vm_flags = vm_flags;
1340 vma->vm_page_prot = protection_map[vm_flags & 0x0f];
1341 vma->vm_pgoff = pgoff;
1345 if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1347 if (vm_flags & VM_DENYWRITE) {
1348 error = deny_write_access(file);
1353 vma->vm_file = file;
1355 error = file->f_op->mmap(file, vma);
1357 goto unmap_and_free_vma;
1358 } else if (vm_flags & VM_SHARED) {
1359 error = shmem_zero_setup(vma);
1364 /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
1365 * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
1366 * that memory reservation must be checked; but that reservation
1367 * belongs to shared memory object, not to vma: so now clear it.
1369 if ((vm_flags & (VM_SHARED|VM_ACCOUNT)) == (VM_SHARED|VM_ACCOUNT))
1370 vma->vm_flags &= ~VM_ACCOUNT;
1372 /* Can addr have changed??
1374 * Answer: Yes, several device drivers can do it in their
1375 * f_op->mmap method. -DaveM
1377 addr = vma->vm_start;
1378 pgoff = vma->vm_pgoff;
1379 vm_flags = vma->vm_flags;
1381 if (!file || !vma_merge(mm, prev, addr, vma->vm_end,
1382 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) {
1383 file = vma->vm_file;
1384 vma_link(mm, vma, prev, rb_link, rb_parent);
1386 atomic_inc(&inode->i_writecount);
1390 atomic_inc(&inode->i_writecount);
1393 mpol_free(vma_policy(vma));
1394 kmem_cache_free(vm_area_cachep, vma);
1397 mm->total_vm += len >> PAGE_SHIFT;
1398 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1399 if (vm_flags & VM_LOCKED) {
1400 mm->locked_vm += len >> PAGE_SHIFT;
1401 make_pages_present(addr, addr + len);
1403 if (flags & MAP_POPULATE) {
1404 up_write(&mm->mmap_sem);
1405 sys_remap_file_pages(addr, len, 0,
1406 pgoff, flags & MAP_NONBLOCK);
1407 down_write(&mm->mmap_sem);
1413 atomic_inc(&inode->i_writecount);
1414 vma->vm_file = NULL;
1417 /* Undo any partial mapping done by a device driver. */
1418 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1421 kmem_cache_free(vm_area_cachep, vma);
1424 vm_unacct_memory(charged);
1429 static int __kprobes
1433 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
1434 ret = freeze_processes ();
1437 struct task_struct *p, *q;
1438 do_each_thread (p, q)
1440 if (p != current && p->state == TASK_RUNNING && p->pid != 0)
1442 printk ("Check failed: %s is running\n", p->comm);
1447 while_each_thread (p, q);
1452 synchronize_sched ();
1458 * get_us_insn_slot() - Find a slot on an executable page for an instruction.
1459 * We allocate an executable page if there's no room on existing ones.
1461 kprobe_opcode_t __kprobes *
1462 get_insn_slot (struct task_struct *task, int atomic)
1464 struct kprobe_insn_page *kip;
1465 struct hlist_node *pos;
1466 struct hlist_head *page_list = task ? &uprobe_insn_pages : &kprobe_insn_pages;
1467 unsigned slots_per_page = INSNS_PER_PAGE, slot_size = MAX_INSN_SIZE;
1470 slots_per_page = INSNS_PER_PAGE/UPROBES_TRAMP_LEN;
1471 slot_size = UPROBES_TRAMP_LEN;
1474 slots_per_page = INSNS_PER_PAGE/KPROBES_TRAMP_LEN;
1475 slot_size = KPROBES_TRAMP_LEN;
1479 hlist_for_each_entry (kip, pos, page_list, hlist)
1481 if (kip->nused < slots_per_page)
1484 for (i = 0; i < slots_per_page; i++)
1486 if (kip->slot_used[i] == SLOT_CLEAN)
1488 if(!task || (kip->tgid == task->tgid)){
1489 kip->slot_used[i] = SLOT_USED;
1491 return kip->insns + (i * slot_size);
1495 /* Surprise! No unused slots. Fix kip->nused. */
1496 kip->nused = slots_per_page;
1500 /* If there are any garbage slots, collect it and try again. */
1502 if (uprobe_garbage_slots && collect_garbage_slots(page_list, task) == 0)
1506 if (kprobe_garbage_slots && collect_garbage_slots(page_list, task) == 0)
1510 /* All out of space. Need to allocate a new page. Use slot 0. */
1511 kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
1515 kip->slot_used = kmalloc(sizeof(char)*slots_per_page, GFP_KERNEL);
1516 if (!kip->slot_used){
1522 kip->insns = (kprobe_opcode_t *)alloc_user_pages(task, PAGE_SIZE,
1523 PROT_EXEC|PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_SHARED, atomic);
1526 #if defined(CONFIG_X86)
1527 kip->insns = module_alloc (PAGE_SIZE);
1529 kip->insns = kmalloc(PAGE_SIZE, GFP_KERNEL);
1534 kfree (kip->slot_used);
1538 INIT_HLIST_NODE (&kip->hlist);
1539 hlist_add_head (&kip->hlist, page_list);
1540 memset(kip->slot_used, SLOT_CLEAN, slots_per_page);
1541 kip->slot_used[0] = SLOT_USED;
1544 kip->tgid = task ? task->tgid : 0;
1548 /* Return 1 if all garbages are collected, otherwise 0. */
1549 static int __kprobes
1550 collect_one_slot (struct hlist_head *page_list, struct task_struct *task,
1551 struct kprobe_insn_page *kip, int idx)
1553 struct mm_struct *mm;
1555 kip->slot_used[idx] = SLOT_CLEAN;
1557 DBPRINTF("collect_one_slot: nused=%d", kip->nused);
1558 if (kip->nused == 0)
1561 * Page is no longer in use. Free it unless
1562 * it's the last one. We keep the last one
1563 * so as not to have to set it up again the
1564 * next time somebody inserts a probe.
1566 hlist_del (&kip->hlist);
1567 if (!task && hlist_empty (page_list))
1569 INIT_HLIST_NODE (&kip->hlist);
1570 hlist_add_head (&kip->hlist, page_list);
1575 //E. G.: This code provides kernel dump because of rescheduling while atomic.
1576 //As workaround, this code was commented. In this case we will have memory leaks
1577 //for instrumented process, but instrumentation process should functionate correctly.
1578 //Planned that good solution for this problem will be done during redesigning KProbe
1579 //for improving supportability and performance.
1581 //printk("collect_one_slot %p/%d\n", task, task->pid);
1582 mm = get_task_mm (task);
1584 down_write (&mm->mmap_sem);
1585 do_munmap(mm, (unsigned long)(kip->insns), PAGE_SIZE);
1586 up_write (&mm->mmap_sem);
1590 kip->insns = NULL; //workaround
1594 #if defined(CONFIG_X86)
1595 module_free (NULL, kip->insns);
1600 kfree (kip->slot_used);
1608 static int __kprobes
1609 collect_garbage_slots (struct hlist_head *page_list, struct task_struct *task)
1611 struct kprobe_insn_page *kip;
1612 struct hlist_node *pos, *next;
1613 unsigned slots_per_page = INSNS_PER_PAGE;
1615 /* Ensure no-one is preepmted on the garbages */
1616 if (!task && check_safety() != 0)
1620 slots_per_page = INSNS_PER_PAGE/UPROBES_TRAMP_LEN;
1622 slots_per_page = INSNS_PER_PAGE/KPROBES_TRAMP_LEN;
1624 hlist_for_each_entry_safe (kip, pos, next, page_list, hlist)
1627 if ((task && (kip->tgid != task->tgid)) || (kip->ngarbage == 0))
1629 kip->ngarbage = 0; /* we will collect all garbages */
1630 for (i = 0; i < slots_per_page; i++)
1632 if (kip->slot_used[i] == SLOT_DIRTY && collect_one_slot (page_list, task, kip, i))
1636 if(task) uprobe_garbage_slots = 0;
1637 else kprobe_garbage_slots = 0;
1641 void purge_garbage_uslots(struct task_struct *task, int atomic)
1643 if(collect_garbage_slots(&uprobe_insn_pages, task))
1644 panic("failed to collect garbage slotsfo for task %s/%d/%d", task->comm, task->tgid, task->pid);
1648 free_insn_slot (struct hlist_head *page_list, struct task_struct *task, kprobe_opcode_t *slot, int dirty)
1650 struct kprobe_insn_page *kip;
1651 struct hlist_node *pos;
1652 unsigned slots_per_page = INSNS_PER_PAGE, slot_size = MAX_INSN_SIZE;
1655 slots_per_page = INSNS_PER_PAGE/UPROBES_TRAMP_LEN;
1656 slot_size = UPROBES_TRAMP_LEN;
1659 slots_per_page = INSNS_PER_PAGE/KPROBES_TRAMP_LEN;
1660 slot_size = KPROBES_TRAMP_LEN;
1663 DBPRINTF("free_insn_slot: dirty %d, %p/%d", dirty, task, task?task->pid:0);
1664 hlist_for_each_entry (kip, pos, page_list, hlist)
1666 DBPRINTF("free_insn_slot: kip->insns=%p slot=%p", kip->insns, slot);
1667 if ((kip->insns <= slot) && (slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)))
1669 int i = (slot - kip->insns) / slot_size;
1672 kip->slot_used[i] = SLOT_DIRTY;
1677 collect_one_slot (page_list, task, kip, i);
1685 if(++uprobe_garbage_slots > slots_per_page)
1686 collect_garbage_slots (page_list, task);
1688 else if(++kprobe_garbage_slots > slots_per_page)
1689 collect_garbage_slots (page_list, task);
1694 prepare_singlestep (struct kprobe *p, struct pt_regs *regs)
1696 #if defined(CONFIG_X86)
1699 regs->EREG (ip) = (unsigned long)p->ss_addr;
1704 regs->EREG (flags) |= TF_MASK;
1705 regs->EREG (flags) &= ~IF_MASK;
1706 /*single step inline if the instruction is an int3 */
1707 if (p->opcode == BREAKPOINT_INSTRUCTION){
1708 regs->EREG (ip) = (unsigned long) p->addr;
1709 //printk("break_insn!!!\n");
1712 regs->EREG (ip) = (unsigned long) p->ainsn.insn;
1714 //printk("singlestep %p/%lx\n", p->addr, p->ainsn.insn);
1715 #elif defined(CONFIG_ARM)
1718 regs->uregs[15] = (unsigned long) p->ss_addr;
1722 regs->uregs[15] = (unsigned long) p->ainsn.insn;
1723 //DBPRINTF("prepare_singlestep: %p/%p/%d\n", p, p->addr, p->ainsn.boostable);
1724 #elif defined(CONFIG_MIPS)
1727 regs->cp0_epc = (unsigned long) p->ss_addr;
1731 regs->cp0_epc = (unsigned long) p->ainsn.insn;
1734 //printk("prepare_singlestep: %p/%d to %lx\n", p->addr, p->ainsn.boostable, regs->EREG (ip));
1735 //printk("SS[%lx] to %lx/%lx/%lx\n", p->addr, regs->uregs[15], p->ss_addr, p);
1738 static void __kprobes
1739 save_previous_kprobe (struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
1741 if (kcb->prev_kprobe.kp != NULL)
1743 panic ("no space to save new probe[%lu]: task = %d/%s, prev %d/%p, current %d/%p, new %d/%p,",
1744 nCount, current->pid, current->comm, kcb->prev_kprobe.kp->tgid, kcb->prev_kprobe.kp->addr,
1745 kprobe_running()->tgid, kprobe_running()->addr, cur_p->tgid, cur_p->addr);
1747 #if defined(CONFIG_X86)
1748 kcb->prev_kprobe.old_eflags = kcb->kprobe_old_eflags;
1749 kcb->prev_kprobe.saved_eflags = kcb->kprobe_saved_eflags;
1751 kcb->prev_kprobe.kp = kprobe_running ();
1752 kcb->prev_kprobe.status = kcb->kprobe_status;
1755 static void __kprobes
1756 restore_previous_kprobe (struct kprobe_ctlblk *kcb)
1758 __get_cpu_var (current_kprobe) = kcb->prev_kprobe.kp;
1759 kcb->kprobe_status = kcb->prev_kprobe.status;
1760 kcb->prev_kprobe.kp = NULL;
1761 kcb->prev_kprobe.status = 0;
1762 #if defined(CONFIG_X86)
1763 kcb->kprobe_old_eflags = kcb->prev_kprobe.old_eflags;
1764 kcb->kprobe_saved_eflags = kcb->prev_kprobe.saved_eflags;
1768 static void __kprobes
1769 set_current_kprobe (struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
1771 __get_cpu_var (current_kprobe) = p;
1772 DBPRINTF ("set_current_kprobe[%lu]: p=%p addr=%p\n", nCount, p, p->addr);
1773 #if defined(CONFIG_X86)
1774 kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags = (regs->EREG (flags) & (TF_MASK | IF_MASK));
1775 if (is_IF_modifier (p->opcode))
1776 kcb->kprobe_saved_eflags &= ~IF_MASK;
1786 #if defined(CONFIG_X86)
1788 kprobe_handler (struct pt_regs *regs)
1790 struct kprobe *p = 0;
1791 int ret = 0, pid = 0, retprobe = 0, reenter = 0;
1792 kprobe_opcode_t *addr = NULL;
1793 struct kprobe_ctlblk *kcb;
1797 /* We're in an interrupt, but this is clear and BUG()-safe. */
1802 addr = (kprobe_opcode_t *) (regs->EREG (ip) - sizeof (kprobe_opcode_t));
1803 DBPRINTF ("KPROBE[%lu]: regs->eip = 0x%lx addr = 0x%p\n", nCount, regs->EREG (ip), addr);
1807 kcb = get_kprobe_ctlblk ();
1809 if (user_mode_vm(regs))
1814 //printk("exception[%lu] from user mode %s/%u/%u addr %p.\n", nCount, current->comm, current->pid, current->tgid, addr);
1815 pid = current->tgid;
1818 /* Check we're not actually recursing */
1819 if (kprobe_running ())
1821 DBPRINTF ("lock???");
1822 p = get_kprobe (addr, pid, current);
1825 DBPRINTF ("reenter p = %p", p);
1827 if (kcb->kprobe_status == KPROBE_HIT_SS && *p->ainsn.insn == BREAKPOINT_INSTRUCTION)
1829 regs->EREG (flags) &= ~TF_MASK;
1830 regs->EREG (flags) |= kcb->kprobe_saved_eflags;
1835 //#warning BREAKPOINT_INSTRUCTION user mode handling is missed!!!
1838 /* We have reentered the kprobe_handler(), since
1839 * another probe was hit while within the handler.
1840 * We here save the original kprobes variables and
1841 * just single step on the instruction of the new probe
1842 * without calling any user handlers.
1844 save_previous_kprobe (kcb, p);
1845 set_current_kprobe (p, regs, kcb);
1846 kprobes_inc_nmissed_count (p);
1847 prepare_singlestep (p, regs);
1848 kcb->kprobe_status = KPROBE_REENTER;
1857 if (*addr != BREAKPOINT_INSTRUCTION)
1859 /* The breakpoint instruction was removed by
1860 * another cpu right after we hit, no further
1861 * handling of this interrupt is appropriate
1863 regs->EREG (ip) -= sizeof (kprobe_opcode_t);
1869 //#warning BREAKPOINT_INSTRUCTION user mode handling is missed!!!
1870 //we can reenter probe upon uretprobe exception
1871 DBPRINTF ("check for UNDEF_INSTRUCTION %p\n", addr);
1872 // UNDEF_INSTRUCTION from user space
1873 p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
1875 save_previous_kprobe (kcb, p);
1876 kcb->kprobe_status = KPROBE_REENTER;
1879 DBPRINTF ("uretprobe %p\n", addr);
1883 p = __get_cpu_var (current_kprobe);
1885 panic("after uhandler");
1886 DBPRINTF ("kprobe_running !!! p = 0x%p p->break_handler = 0x%p", p, p->break_handler);
1887 if (p->break_handler && p->break_handler (p, regs))
1889 DBPRINTF ("kprobe_running !!! goto ss");
1892 DBPRINTF ("kprobe_running !!! goto no");
1893 DBPRINTF ("no_kprobe");
1899 DBPRINTF ("get_kprobe %p", addr);
1901 p = get_kprobe (addr, pid, current);
1905 if (*addr != BREAKPOINT_INSTRUCTION)
1908 * The breakpoint instruction was removed right
1909 * after we hit it. Another cpu has removed
1910 * either a probepoint or a debugger breakpoint
1911 * at this address. In either case, no further
1912 * handling of this interrupt is appropriate.
1913 * Back up over the (now missing) int3 and run
1914 * the original instruction.
1916 regs->EREG (ip) -= sizeof (kprobe_opcode_t);
1921 //#warning BREAKPOINT_INSTRUCTION user mode handling is missed!!!
1922 DBPRINTF ("search UNDEF_INSTRUCTION %p\n", addr);
1923 // UNDEF_INSTRUCTION from user space
1924 p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
1926 // Not one of ours: let kernel handle it
1927 DBPRINTF ("no_kprobe");
1928 //printk("no_kprobe2 ret = %d\n", ret);
1932 DBPRINTF ("uretprobe %p\n", addr);
1935 /* Not one of ours: let kernel handle it */
1936 DBPRINTF ("no_kprobe");
1940 set_current_kprobe (p, regs, kcb);
1942 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
1944 if (retprobe) //(einsn == UNDEF_INSTRUCTION)
1945 ret = trampoline_probe_handler (p, regs);
1946 else if (p->pre_handler)
1947 ret = p->pre_handler (p, regs);
1951 if (ret == 2) { // we have alreadyc called the handler, so just single step the instruction
1952 DBPRINTF ("p->pre_handler[%lu] 2", nCount);
1955 DBPRINTF ("p->pre_handler[%lu] 1", nCount);
1956 /* handler has already set things up, so skip ss setup */
1962 DBPRINTF ("p->pre_handler[%lu] 0", nCount);
1965 DBPRINTF ("p = %p\n", p);
1966 DBPRINTF ("p->opcode = 0x%lx *p->addr = 0x%lx p->addr = 0x%p\n", (unsigned long) p->opcode, p->tgid ? 0 : (unsigned long) (*p->addr), p->addr);
1968 #if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
1969 if (p->ainsn.boostable == 1 && !p->post_handler)
1971 /* Boost up -- we can execute copied instructions directly */
1972 reset_current_kprobe ();
1973 regs->EREG (ip) = (unsigned long) p->ainsn.insn;
1974 preempt_enable_no_resched ();
1980 #endif // !CONFIG_PREEMPT
1981 prepare_singlestep (p, regs);
1982 kcb->kprobe_status = KPROBE_HIT_SS;
1990 preempt_enable_no_resched ();
1999 kprobe_handler (struct pt_regs *regs)
2001 struct kprobe *p = 0;
2002 int ret = 0, pid = 0, retprobe = 0, reenter = 0;
2003 kprobe_opcode_t *addr = NULL, *ssaddr = 0;
2004 struct kprobe_ctlblk *kcb;
2007 /* We're in an interrupt, but this is clear and BUG()-safe. */
2011 #if defined(CONFIG_MIPS)
2012 addr = (kprobe_opcode_t *) regs->cp0_epc;
2013 DBPRINTF ("regs->regs[ 31 ] = 0x%lx\n", regs->regs[31]);
2014 #elif defined(CONFIG_ARM)
2015 addr = (kprobe_opcode_t *) (regs->uregs[15] - 4);
2016 DBPRINTF ("KPROBE[%lu]: regs->uregs[15] = 0x%lx addr = 0x%p\n", nCount, regs->uregs[15], addr);
2017 regs->uregs[15] -= 4;
2018 //DBPRINTF("regs->uregs[14] = 0x%lx\n", regs->uregs[14]);
2020 #error implement how to get exception address for this arch!!!
2025 kcb = get_kprobe_ctlblk ();
2027 if (user_mode (regs))
2032 //DBPRINTF("exception[%lu] from user mode %s/%u addr %p (%lx).", nCount, current->comm, current->pid, addr, regs->uregs[14]);
2033 pid = current->tgid;
2036 /* Check we're not actually recursing */
2037 if (kprobe_running ())
2039 DBPRINTF ("lock???");
2040 p = get_kprobe (addr, pid, current);
2043 if(!pid && (addr == (kprobe_opcode_t *)kretprobe_trampoline)){
2044 save_previous_kprobe (kcb, p);
2045 kcb->kprobe_status = KPROBE_REENTER;
2049 /* We have reentered the kprobe_handler(), since
2050 * another probe was hit while within the handler.
2051 * We here save the original kprobes variables and
2052 * just single step on the instruction of the new probe
2053 * without calling any user handlers.
2055 if(!p->ainsn.boostable){
2056 save_previous_kprobe (kcb, p);
2057 set_current_kprobe (p, regs, kcb);
2059 kprobes_inc_nmissed_count (p);
2060 prepare_singlestep (p, regs);
2061 if(!p->ainsn.boostable)
2062 kcb->kprobe_status = KPROBE_REENTER;
2063 preempt_enable_no_resched ();
2069 if(pid) { //we can reenter probe upon uretprobe exception
2070 DBPRINTF ("check for UNDEF_INSTRUCTION %p\n", addr);
2071 // UNDEF_INSTRUCTION from user space
2072 p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
2074 save_previous_kprobe (kcb, p);
2075 kcb->kprobe_status = KPROBE_REENTER;
2078 DBPRINTF ("uretprobe %p\n", addr);
2082 p = __get_cpu_var (current_kprobe);
2084 if (p->tgid) gSilent = 0;
2086 DBPRINTF ("kprobe_running !!! p = 0x%p p->break_handler = 0x%p", p, p->break_handler);
2087 /*if (p->break_handler && p->break_handler(p, regs)) {
2088 DBPRINTF("kprobe_running !!! goto ss");
2091 DBPRINTF ("unknown uprobe at %p cur at %p/%p\n", addr, p->addr, p->ainsn.insn);
2093 ssaddr = p->ainsn.insn + UPROBES_TRAMP_SS_BREAK_IDX;
2095 ssaddr = p->ainsn.insn + KPROBES_TRAMP_SS_BREAK_IDX;
2098 #if defined(CONFIG_ARM)
2099 regs->uregs[15] = (unsigned long) (p->addr + 1);
2100 DBPRINTF ("finish step at %p cur at %p/%p, redirect to %lx\n", addr, p->addr, p->ainsn.insn, regs->uregs[15]);
2101 #elif defined(CONFIG_MIPS)
2102 regs->cp0_epc = (unsigned long) (p->addr + 1);
2103 DBPRINTF ("finish step at %p cur at %p/%p, redirect to %lx\n", addr, p->addr, p->ainsn.insn, regs->cp0_epc);
2105 #warning uprobe single step is not implemented for this arch!!!
2107 if (kcb->kprobe_status == KPROBE_REENTER) {
2108 restore_previous_kprobe (kcb);
2111 reset_current_kprobe ();
2114 DBPRINTF ("kprobe_running !!! goto no");
2116 /* If it's not ours, can't be delete race, (we hold lock). */
2117 DBPRINTF ("no_kprobe");
2123 //if(einsn != UNDEF_INSTRUCTION) {
2124 DBPRINTF ("get_kprobe %p-%d", addr, pid);
2126 p = get_kprobe (addr, pid, current);
2130 DBPRINTF ("search UNDEF_INSTRUCTION %p\n", addr);
2131 // UNDEF_INSTRUCTION from user space
2132 p = get_kprobe_by_insn_slot (addr-UPROBES_TRAMP_RET_BREAK_IDX, pid, current);
2134 /* Not one of ours: let kernel handle it */
2135 DBPRINTF ("no_kprobe");
2136 //printk("no_kprobe2 ret = %d\n", ret);
2140 DBPRINTF ("uretprobe %p\n", addr);
2143 /* Not one of ours: let kernel handle it */
2144 DBPRINTF ("no_kprobe");
2145 //printk("no_kprobe2 ret = %d\n", ret);
2150 if (p->tgid) gSilent = 0;
2153 set_current_kprobe (p, regs, kcb);
2155 kcb->kprobe_status = KPROBE_HIT_ACTIVE;
2157 if (retprobe) //(einsn == UNDEF_INSTRUCTION)
2158 ret = trampoline_probe_handler (p, regs);
2159 else if (p->pre_handler)
2161 ret = p->pre_handler (p, regs);
2162 if(!p->ainsn.boostable)
2163 kcb->kprobe_status = KPROBE_HIT_SS;
2164 else if(p->pre_handler != trampoline_probe_handler)
2165 reset_current_kprobe ();
2170 DBPRINTF ("p->pre_handler[%lu] 1", nCount);
2171 /* handler has already set things up, so skip ss setup */
2174 DBPRINTF ("p->pre_handler 0");
2177 preempt_enable_no_resched ();
2185 extern struct kretprobe *sched_rp;
2187 static void patch_suspended_task_ret_addr(struct task_struct *p, struct kretprobe *rp)
2189 struct kretprobe_instance *ri = NULL;
2190 struct hlist_node *node, *tmp;
2191 struct hlist_head *head;
2192 unsigned long flags;
2195 spin_lock_irqsave (&kretprobe_lock, flags);
2196 head = kretprobe_inst_table_head (p);
2197 hlist_for_each_entry_safe (ri, node, tmp, head, hlist){
2198 if ((ri->rp == rp) && (p == ri->task)){
2203 spin_unlock_irqrestore (&kretprobe_lock, flags);
2207 #ifndef task_thread_info
2208 #define task_thread_info(task) (task)->thread_info
2209 #endif // task_thread_info
2213 if(thread_saved_pc(p) != (unsigned long)&kretprobe_trampoline){
2214 ri->ret_addr = (kprobe_opcode_t *)thread_saved_pc(p);
2215 task_thread_info(p)->cpu_context.pc = (unsigned long) &kretprobe_trampoline;
2220 if ((ri = get_free_rp_inst(rp)) != NULL)
2225 ri->ret_addr = (kprobe_opcode_t *)thread_saved_pc(p);
2226 task_thread_info(p)->cpu_context.pc = (unsigned long) &kretprobe_trampoline;
2228 // printk("change2 saved pc %p->%p for %d/%d/%p\n", ri->ret_addr, &kretprobe_trampoline, p->tgid, p->pid, p);
2231 printk("no ri for %d\n", p->pid);
2234 #endif // CONFIG_ARM
2237 typedef kprobe_opcode_t (*entry_point_t) (unsigned long, unsigned long, unsigned long, unsigned long, unsigned long, unsigned long);
2239 setjmp_pre_handler (struct kprobe *p, struct pt_regs *regs)
2241 struct jprobe *jp = container_of (p, struct jprobe, kp);
2242 kprobe_pre_entry_handler_t pre_entry;
2243 entry_point_t entry;
2245 #if defined(CONFIG_X86)
2246 unsigned long addr, args[6];
2247 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2249 DBPRINTF ("setjmp_pre_handler %p:%d", p->addr, p->tgid);
2250 pre_entry = (kprobe_pre_entry_handler_t) jp->pre_entry;
2251 entry = (entry_point_t) jp->entry;
2253 regs->EREG (flags) &= ~IF_MASK;
2254 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
2255 trace_hardirqs_off ();
2257 if (p->tgid == current->tgid)
2259 // read first 6 args from stack
2260 if (!read_proc_vm_atomic (current, regs->EREG(sp)+4, args, sizeof(args)))
2261 panic ("failed to read user space func arguments %lx!\n", regs->EREG(sp)+4);
2263 p->ss_addr = pre_entry (jp->priv_arg, regs);
2265 entry (args[0], args[1], args[2], args[3], args[4], args[5]);
2273 kcb->jprobe_saved_regs = *regs;
2274 kcb->jprobe_saved_esp = ®s->EREG (sp);
2275 addr = (unsigned long) (kcb->jprobe_saved_esp);
2278 * TBD: As Linus pointed out, gcc assumes that the callee
2279 * owns the argument space and could overwrite it, e.g.
2280 * tailcall optimization. So, to be absolutely safe
2281 * we also save and restore enough stack bytes to cover
2282 * the argument area.
2284 memcpy (kcb->jprobes_stack, (kprobe_opcode_t *) addr, MIN_STACK_SIZE (addr));
2285 regs->EREG (flags) &= ~IF_MASK;
2286 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
2287 trace_hardirqs_off ();
2290 p->ss_addr = pre_entry (jp->priv_arg, regs);
2291 regs->EREG (ip) = (unsigned long) (jp->entry);
2297 p = __get_cpu_var (current_kprobe);
2300 DBPRINTF ("pjp = 0x%p jp->entry = 0x%p", jp, jp->entry);
2301 entry = (entry_point_t) jp->entry;
2302 pre_entry = (kprobe_pre_entry_handler_t) jp->pre_entry;
2304 // DIE("entry NULL", regs)
2305 DBPRINTF ("entry = 0x%p jp->entry = 0x%p", entry, jp->entry);
2307 //call handler for all kernel probes and user space ones which belong to current tgid
2308 if (!p->tgid || (p->tgid == current->tgid))
2310 if(!p->tgid && (p->addr == sched_addr) && sched_rp){
2311 struct task_struct *p, *g;
2314 if(current != &init_task)
2315 patch_suspended_task_ret_addr(&init_task, sched_rp);
2317 do_each_thread(g, p){
2320 patch_suspended_task_ret_addr(p, sched_rp);
2321 } while_each_thread(g, p);
2325 p->ss_addr = (void *)pre_entry (jp->priv_arg, regs);
2327 # if defined(CONFIG_MIPS)
2328 entry (regs->regs[4], regs->regs[5], regs->regs[6], regs->regs[7], regs->regs[8], regs->regs[9]);
2329 # elif defined(CONFIG_ARM)
2330 entry (regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3, regs->ARM_r4, regs->ARM_r5);
2343 prepare_singlestep (p, regs);
2349 #endif //!CONFIG_X86
2353 jprobe_return (void)
2355 #if defined(CONFIG_X86)
2356 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2358 asm volatile(" xchgl %%ebx,%%esp \n"
2360 " .globl jprobe_return_end \n"
2361 " jprobe_return_end: \n"
2362 " nop \n"::"b" (kcb->jprobe_saved_esp):"memory");
2364 preempt_enable_no_resched();
2369 uprobe_return (void)
2371 #if defined(CONFIG_X86)
2372 /*struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2374 asm volatile(" xchgl %%ebx,%%esp \n"
2376 " .globl jprobe_return_end \n"
2377 " jprobe_return_end: \n"
2378 " nop \n"::"b" (kcb->jprobe_saved_esp):"memory");*/
2380 preempt_enable_no_resched ();
2384 #if defined(CONFIG_X86)
2386 * Called after single-stepping. p->addr is the address of the
2387 * instruction whose first byte has been replaced by the "int 3"
2388 * instruction. To avoid the SMP problems that can occur when we
2389 * temporarily put back the original opcode to single-step, we
2390 * single-stepped a copy of the instruction. The address of this
2391 * copy is p->ainsn.insn.
2393 * This function prepares to return from the post-single-step
2394 * interrupt. We have to fix up the stack as follows:
2396 * 0) Except in the case of absolute or indirect jump or call instructions,
2397 * the new eip is relative to the copied instruction. We need to make
2398 * it relative to the original instruction.
2400 * 1) If the single-stepped instruction was pushfl, then the TF and IF
2401 * flags are set in the just-pushed eflags, and may need to be cleared.
2403 * 2) If the single-stepped instruction was a call, the return address
2404 * that is atop the stack is the address following the copied instruction.
2405 * We need to make it the address following the original instruction.
2407 * This function also checks instruction size for preparing direct execution.
2409 static void __kprobes
2410 resume_execution (struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
2412 unsigned long *tos, tos_dword = 0;
2413 unsigned long copy_eip = (unsigned long) p->ainsn.insn;
2414 unsigned long orig_eip = (unsigned long) p->addr;
2415 kprobe_opcode_t insns[2];
2417 regs->EREG (flags) &= ~TF_MASK;
2420 tos = (unsigned long *) &tos_dword;
2421 if (!read_proc_vm_atomic (current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
2422 panic ("failed to read dword from top of the user space stack %lx!\n", regs->EREG (sp));
2423 if (!read_proc_vm_atomic (current, (unsigned long)p->ainsn.insn, insns, 2*sizeof(kprobe_opcode_t)))
2424 panic ("failed to read first 2 opcodes of instruction copy from user space %p!\n", p->ainsn.insn);
2427 tos = (unsigned long *) ®s->EREG (sp);
2428 insns[0] = p->ainsn.insn[0];
2429 insns[1] = p->ainsn.insn[1];
2434 case 0x9c: /* pushfl */
2435 *tos &= ~(TF_MASK | IF_MASK);
2436 *tos |= kcb->kprobe_old_eflags;
2438 case 0xc2: /* iret/ret/lret */
2443 case 0xea: /* jmp absolute -- eip is correct */
2444 /* eip is already adjusted, no more changes required */
2445 p->ainsn.boostable = 1;
2447 case 0xe8: /* call relative - Fix return addr */
2448 *tos = orig_eip + (*tos - copy_eip);
2450 case 0x9a: /* call absolute -- same as call absolute, indirect */
2451 *tos = orig_eip + (*tos - copy_eip);
2453 if (!write_proc_vm_atomic (current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
2454 panic ("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
2458 if ((insns[1] & 0x30) == 0x10)
2461 * call absolute, indirect
2462 * Fix return addr; eip is correct.
2463 * But this is not boostable
2465 *tos = orig_eip + (*tos - copy_eip);
2467 if (!write_proc_vm_atomic (current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
2468 panic ("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
2472 else if (((insns[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */
2473 ((insns[1] & 0x31) == 0x21))
2474 { /* jmp far, absolute indirect */
2475 /* eip is correct. And this is boostable */
2476 p->ainsn.boostable = 1;
2484 if (!write_proc_vm_atomic (current, regs->EREG (sp), &tos_dword, sizeof(tos_dword)))
2485 panic ("failed to write dword to top of the user space stack %lx!\n", regs->EREG (sp));
2488 if (p->ainsn.boostable == 0)
2490 if ((regs->EREG (ip) > copy_eip) && (regs->EREG (ip) - copy_eip) + 5 < MAX_INSN_SIZE)
2493 * These instructions can be executed directly if it
2494 * jumps back to correct address.
2497 set_user_jmp_op ((void *) regs->EREG (ip), (void *) orig_eip + (regs->EREG (ip) - copy_eip));
2499 set_jmp_op ((void *) regs->EREG (ip), (void *) orig_eip + (regs->EREG (ip) - copy_eip));
2500 p->ainsn.boostable = 1;
2504 p->ainsn.boostable = -1;
2508 regs->EREG (ip) = orig_eip + (regs->EREG (ip) - copy_eip);
2515 * Interrupts are disabled on entry as trap1 is an interrupt gate and they
2516 * remain disabled thoroughout this function.
2518 static int __kprobes
2519 post_kprobe_handler (struct pt_regs *regs)
2521 struct kprobe *cur = kprobe_running ();
2522 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2526 if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler)
2528 kcb->kprobe_status = KPROBE_HIT_SSDONE;
2529 cur->post_handler (cur, regs, 0);
2532 resume_execution (cur, regs, kcb);
2533 regs->EREG (flags) |= kcb->kprobe_saved_eflags;
2535 trace_hardirqs_fixup_flags (regs->EREG (flags));
2536 #endif // CONFIG_X86
2537 /*Restore back the original saved kprobes variables and continue. */
2538 if (kcb->kprobe_status == KPROBE_REENTER)
2540 restore_previous_kprobe (kcb);
2543 reset_current_kprobe ();
2545 preempt_enable_no_resched ();
2548 * if somebody else is singlestepping across a probe point, eflags
2549 * will have TF set, in which case, continue the remaining processing
2550 * of do_debug, as if this is not a probe hit.
2552 if (regs->EREG (flags) & TF_MASK)
2558 static int __kprobes
2559 kprobe_fault_handler (struct pt_regs *regs, int trapnr)
2561 struct kprobe *cur = kprobe_running ();
2562 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2564 switch (kcb->kprobe_status)
2567 case KPROBE_REENTER:
2569 * We are here because the instruction being single
2570 * stepped caused a page fault. We reset the current
2571 * kprobe and the eip points back to the probe address
2572 * and allow the page fault handler to continue as a
2573 * normal page fault.
2575 regs->EREG (ip) = (unsigned long) cur->addr;
2576 regs->EREG (flags) |= kcb->kprobe_old_eflags;
2577 if (kcb->kprobe_status == KPROBE_REENTER)
2578 restore_previous_kprobe (kcb);
2580 reset_current_kprobe ();
2581 preempt_enable_no_resched ();
2583 case KPROBE_HIT_ACTIVE:
2584 case KPROBE_HIT_SSDONE:
2586 * We increment the nmissed count for accounting,
2587 * we can also use npre/npostfault count for accouting
2588 * these specific fault cases.
2590 kprobes_inc_nmissed_count (cur);
2593 * We come here because instructions in the pre/post
2594 * handler caused the page_fault, this could happen
2595 * if handler tries to access user space by
2596 * copy_from_user(), get_user() etc. Let the
2597 * user-specified handler try to fix it first.
2599 if (cur->fault_handler && cur->fault_handler (cur, regs, trapnr))
2603 * In case the user-specified fault handler returned
2604 * zero, try to fix up.
2606 if (fixup_exception (regs))
2610 * fixup_exception() could not handle it,
2611 * Let do_page_fault() fix it.
2621 kprobe_exceptions_notify (struct notifier_block *self, unsigned long val, void *data)
2623 struct die_args *args = (struct die_args *) data;
2624 int ret = NOTIFY_DONE;
2626 DBPRINTF ("val = %ld, data = 0x%X", val, (unsigned int) data);
2628 /*if (args->regs && user_mode_vm (args->regs))
2631 DBPRINTF ("switch (val) %lu %d %d", val, DIE_INT3, DIE_TRAP);
2634 //#ifdef CONFIG_KPROBES
2639 DBPRINTF ("before kprobe_handler ret=%d %p", ret, args->regs);
2640 if (kprobe_handler (args->regs))
2642 DBPRINTF ("after kprobe_handler ret=%d %p", ret, args->regs);
2645 if (post_kprobe_handler (args->regs))
2649 // kprobe_running() needs smp_processor_id()
2651 if (kprobe_running () && kprobe_fault_handler (args->regs, args->trapnr))
2658 DBPRINTF ("ret=%d", ret);
2659 if(ret == NOTIFY_STOP)
2660 handled_exceptions++;
2664 #endif // CONFIG_X86
2667 longjmp_break_handler (struct kprobe *p, struct pt_regs *regs)
2669 #if defined(CONFIG_X86)
2670 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2671 u8 *addr = (u8 *) (regs->EREG (ip) - 1);
2672 unsigned long stack_addr = (unsigned long) (kcb->jprobe_saved_esp);
2673 struct jprobe *jp = container_of (p, struct jprobe, kp);
2675 DBPRINTF ("p = %p\n", p);
2677 if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end))
2679 if ((unsigned long *)(®s->EREG(sp)) != kcb->jprobe_saved_esp)
2681 struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
2682 printk ("current esp %p does not match saved esp %p\n", ®s->EREG (sp), kcb->jprobe_saved_esp);
2683 printk ("Saved registers for jprobe %p\n", jp);
2684 show_registers (saved_regs);
2685 printk ("Current registers\n");
2686 show_registers (regs);
2690 *regs = kcb->jprobe_saved_regs;
2691 memcpy ((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack, MIN_STACK_SIZE (stack_addr));
2692 preempt_enable_no_resched ();
2696 DBPRINTF ("p = %p\n", p);
2697 //DBPRINTF("p->opcode = 0x%lx *p->addr = 0x%lx p->addr = 0x%p\n", p->opcode, p->pid?*kaddr[0]:*p->addr, p->pid?kaddr[0]:p->addr);
2699 //kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
2700 kprobe_opcode_t insns[2];
2704 insns[0] = BREAKPOINT_INSTRUCTION;
2705 insns[1] = p->opcode;
2706 //p->opcode = *p->addr;
2707 if (read_proc_vm_atomic (current, (unsigned long) (p->addr), &(p->opcode), sizeof (p->opcode)) < sizeof (p->opcode))
2709 printk ("ERROR[%lu]: failed to read vm of proc %s/%u addr %p.", nCount, current->comm, current->pid, p->addr);
2712 //*p->addr = BREAKPOINT_INSTRUCTION;
2713 //*(p->addr+1) = p->opcode;
2714 if (write_proc_vm_atomic (current, (unsigned long) (p->addr), insns, sizeof (insns)) < sizeof (insns))
2716 printk ("ERROR[%lu]: failed to write vm of proc %s/%u addr %p.", nCount, current->comm, current->pid, p->addr);
2722 DBPRINTF ("p->opcode = 0x%lx *p->addr = 0x%lx p->addr = 0x%p\n", p->opcode, *p->addr, p->addr);
2723 *(p->addr + 1) = p->opcode;
2724 p->opcode = *p->addr;
2725 *p->addr = BREAKPOINT_INSTRUCTION;
2726 flush_icache_range ((unsigned int) p->addr, (unsigned int) (((unsigned int) p->addr) + (sizeof (kprobe_opcode_t) * 2)));
2729 reset_current_kprobe ();
2737 arch_arm_kprobe (struct kprobe *p)
2739 #if defined(CONFIG_X86)
2740 text_poke (p->addr, ((unsigned char[])
2741 {BREAKPOINT_INSTRUCTION}), 1);
2743 *p->addr = BREAKPOINT_INSTRUCTION;
2744 flush_icache_range ((unsigned long) p->addr, (unsigned long) p->addr + sizeof (kprobe_opcode_t));
2749 arch_disarm_kprobe (struct kprobe *p)
2751 #if defined(CONFIG_X86)
2752 text_poke (p->addr, &p->opcode, 1);
2754 *p->addr = p->opcode;
2755 flush_icache_range ((unsigned long) p->addr, (unsigned long) p->addr + sizeof (kprobe_opcode_t));
2760 arch_arm_uprobe (struct kprobe *p, struct task_struct *tsk)
2762 kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
2764 if (!write_proc_vm_atomic (tsk, (unsigned long) p->addr, &insn, sizeof (insn)))
2765 panic ("failed to write memory %p!\n", p->addr);
2769 arch_arm_uretprobe (struct kretprobe *p, struct task_struct *tsk)
2774 arch_disarm_uprobe (struct kprobe *p, struct task_struct *tsk)
2776 if (!write_proc_vm_atomic (tsk, (unsigned long) p->addr, &p->opcode, sizeof (p->opcode)))
2777 panic ("failed to write memory %p!\n", p->addr);
2781 arch_disarm_uretprobe (struct kretprobe *p, struct task_struct *tsk)//, struct vm_area_struct *vma, struct page *page, unsigned long *kaddr)
2785 #if defined(CONFIG_X86)
2786 /*fastcall*/ void *__kprobes trampoline_probe_handler_x86 (struct pt_regs *regs)
2788 return (void *)trampoline_probe_handler(NULL, regs);
2793 * Function return probe trampoline:
2794 * - init_kprobes() establishes a probepoint here
2795 * - When the probed function returns, this probe
2796 * causes the handlers to fire
2799 kretprobe_trampoline_holder (void)
2801 asm volatile (".global kretprobe_trampoline\n"
2802 "kretprobe_trampoline:\n"
2803 #if defined(CONFIG_MIPS)
2806 #elif defined(CONFIG_ARM)
2810 #elif defined(CONFIG_X86)
2812 /* skip cs, eip, orig_eax */
2824 " movl %esp, %eax\n"
2825 " call trampoline_probe_handler_x86\n"
2826 /* move eflags to cs */
2827 " movl 52(%esp), %edx\n"
2828 " movl %edx, 48(%esp)\n"
2829 /* save true return address on eflags */
2830 " movl %eax, 52(%esp)\n"
2838 /* skip eip, orig_eax, es, ds, fs */
2843 # error kretprobe_trampoline_holder is not implemented for this arch!!!
2848 * Called when the probe at kretprobe trampoline is hit
2850 int __kprobes trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs)
2852 struct kretprobe_instance *ri = NULL;
2853 struct hlist_head *head, empty_rp;
2854 struct hlist_node *node, *tmp;
2855 unsigned long flags, orig_ret_address = 0;
2856 unsigned long trampoline_address = (unsigned long) &kretprobe_trampoline;
2857 struct kretprobe *crp = NULL;
2858 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk ();
2863 // in case of user space retprobe trampoline is at the Nth instruction of US tramp
2864 trampoline_address = (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
2867 INIT_HLIST_HEAD (&empty_rp);
2868 spin_lock_irqsave (&kretprobe_lock, flags);
2869 head = kretprobe_inst_table_head (current);
2870 #if defined(CONFIG_X86)
2871 if(!p){ // X86 kernel space
2872 DBPRINTF ("regs %p", regs);
2873 /* fixup registers */
2874 regs->XREG (cs) = __KERNEL_CS | get_kernel_rpl ();
2875 regs->EREG (ip) = trampoline_address;
2876 regs->ORIG_EAX_REG = 0xffffffff;
2880 * It is possible to have multiple instances associated with a given
2881 * task either because an multiple functions in the call path
2882 * have a return probe installed on them, and/or more then one
2883 * return probe was registered for a target function.
2885 * We can handle this because:
2886 * - instances are always inserted at the head of the list
2887 * - when multiple return probes are registered for the same
2888 * function, the first instance's ret_addr will point to the
2889 * real return address, and all the rest will point to
2890 * kretprobe_trampoline
2892 hlist_for_each_entry_safe (ri, node, tmp, head, hlist)
2894 if (ri->task != current)
2895 /* another task is sharing our hash bucket */
2897 if (ri->rp && ri->rp->handler){
2898 #if defined(CONFIG_X86)
2899 if(!p){ // X86 kernel space
2900 __get_cpu_var (current_kprobe) = &ri->rp->kp;
2901 get_kprobe_ctlblk ()->kprobe_status = KPROBE_HIT_ACTIVE;
2904 ri->rp->handler (ri, regs, ri->rp->priv_arg);
2905 #if defined(CONFIG_X86)
2906 if(!p) // X86 kernel space
2907 __get_cpu_var (current_kprobe) = NULL;
2911 orig_ret_address = (unsigned long) ri->ret_addr;
2912 recycle_rp_inst (ri, &empty_rp);
2913 if (orig_ret_address != trampoline_address)
2915 * This is the real return address. Any other
2916 * instances associated with this task are for
2917 * other calls deeper on the call stack
2921 kretprobe_assert (ri, orig_ret_address, trampoline_address);
2922 //BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
2923 if (trampoline_address != (unsigned long) &kretprobe_trampoline){
2924 if (ri->rp2) BUG_ON (ri->rp2->kp.tgid == 0);
2925 if (ri->rp) BUG_ON (ri->rp->kp.tgid == 0);
2926 else if (ri->rp2) BUG_ON (ri->rp2->kp.tgid == 0);
2928 if ((ri->rp && ri->rp->kp.tgid) || (ri->rp2 && ri->rp2->kp.tgid))
2929 BUG_ON (trampoline_address == (unsigned long) &kretprobe_trampoline);
2930 #if defined(CONFIG_MIPS)
2931 regs->regs[31] = orig_ret_address;
2932 DBPRINTF ("regs->cp0_epc = 0x%lx", regs->cp0_epc);
2933 if (trampoline_address != (unsigned long) &kretprobe_trampoline)
2934 regs->cp0_epc = orig_ret_address;
2936 regs->cp0_epc = regs->cp0_epc + 4;
2937 DBPRINTF ("regs->cp0_epc = 0x%lx", regs->cp0_epc);
2938 DBPRINTF ("regs->cp0_status = 0x%lx", regs->cp0_status);
2939 #elif defined(CONFIG_ARM)
2940 regs->uregs[14] = orig_ret_address;
2941 DBPRINTF ("regs->uregs[14] = 0x%lx\n", regs->uregs[14]);
2942 DBPRINTF ("regs->uregs[15] = 0x%lx\n", regs->uregs[15]);
2943 if (trampoline_address != (unsigned long) &kretprobe_trampoline)
2944 regs->uregs[15] = orig_ret_address;
2946 regs->uregs[15] += 4;
2947 DBPRINTF ("regs->uregs[15] = 0x%lx\n", regs->uregs[15]);
2948 #elif defined(CONFIG_X86)
2949 if(p){ // X86 user space
2950 regs->EREG(ip) = orig_ret_address;
2951 //printk (" uretprobe regs->eip = 0x%lx\n", regs->EREG(ip));
2955 if(p){ // ARM, MIPS, X86 user space
2956 if (kcb->kprobe_status == KPROBE_REENTER)
2957 restore_previous_kprobe (kcb);
2959 reset_current_kprobe ();
2961 //TODO: test - enter function, delete us retprobe, exit function
2962 // for user space retprobes only - deferred deletion
2963 if (trampoline_address != (unsigned long) &kretprobe_trampoline)
2965 // if we are not at the end of the list and current retprobe should be disarmed
2966 if (node && ri->rp2)
2969 /*sprintf(die_msg, "deferred disarm p->addr = %p [%lx %lx %lx]\n",
2970 crp->kp.addr, *kaddrs[0], *kaddrs[1], *kaddrs[2]);
2971 DIE(die_msg, regs); */
2972 // look for other instances for the same retprobe
2973 hlist_for_each_entry_continue (ri, node, hlist)
2975 if (ri->task != current)
2976 continue; /* another task is sharing our hash bucket */
2977 if (ri->rp2 == crp) //if instance belong to the same retprobe
2981 { // if there are no more instances for this retprobe
2983 DBPRINTF ("defered retprobe deletion p->addr = %p", crp->kp.addr);
2984 unregister_uprobe (&crp->kp, current, 1);
2991 spin_unlock_irqrestore (&kretprobe_lock, flags);
2992 hlist_for_each_entry_safe (ri, node, tmp, &empty_rp, hlist)
2994 hlist_del (&ri->hlist);
2997 #if defined(CONFIG_X86)
2998 if(!p) // X86 kernel space
2999 return (int)orig_ret_address;
3001 preempt_enable_no_resched ();
3003 * By returning a non-zero value, we are telling
3004 * kprobe_handler() that we don't want the post_handler
3005 * to run (and have re-enabled preemption)
3010 /* Called with kretprobe_lock held */
3011 void __kprobes __arch_prepare_kretprobe (struct kretprobe *rp, struct pt_regs *regs)
3013 struct kretprobe_instance *ri;
3015 DBPRINTF ("start\n");
3016 //TODO: test - remove retprobe after func entry but before its exit
3017 if ((ri = get_free_rp_inst (rp)) != NULL)
3022 #if defined(CONFIG_MIPS)
3023 ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
3025 regs->regs[31] = (unsigned long) (rp->kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
3026 else /* Replace the return addr with trampoline addr */
3027 regs->regs[31] = (unsigned long) &kretprobe_trampoline;
3028 #elif defined(CONFIG_ARM)
3029 ri->ret_addr = (kprobe_opcode_t *) regs->uregs[14];
3031 regs->uregs[14] = (unsigned long) (rp->kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
3032 else /* Replace the return addr with trampoline addr */
3033 regs->uregs[14] = (unsigned long) &kretprobe_trampoline;
3034 DBPRINTF ("ret addr set to %p->%lx\n", ri->ret_addr, regs->uregs[14]);
3035 #elif defined(CONFIG_X86)
3036 /* Replace the return addr with trampoline addr */
3038 unsigned long ra = (unsigned long) (rp->kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);/*, stack[6];
3039 if (!read_proc_vm_atomic (current, regs->EREG(sp), stack, sizeof(stack)))
3040 panic ("failed to read user space func stack %lx!\n", regs->EREG(sp));
3041 printk("stack: %lx %lx %lx %lx %lx %lx\n", stack[0], stack[1], stack[2], stack[3], stack[4], stack[5]);*/
3042 if (!read_proc_vm_atomic (current, regs->EREG(sp), &(ri->ret_addr), sizeof(ri->ret_addr)))
3043 panic ("failed to read user space func ra %lx!\n", regs->EREG(sp));
3044 if (!write_proc_vm_atomic (current, regs->EREG(sp), &ra, sizeof(ra)))
3045 panic ("failed to write user space func ra %lx!\n", regs->EREG(sp));
3046 //printk("__arch_prepare_kretprobe: ra %lx %p->%lx\n",regs->EREG(sp), ri->ret_addr, ra);
3049 unsigned long *sara = (unsigned long *)®s->EREG(sp);
3050 ri->ret_addr = (kprobe_opcode_t *)*sara;
3051 *sara = (unsigned long)&kretprobe_trampoline;
3052 DBPRINTF ("ra loc %p, origr_ra %p new ra %lx\n", sara, ri->ret_addr, *sara);
3055 #error __arch_prepare_kretprobe is not implemented for this arch!!!
3060 DBPRINTF ("WARNING: missed retprobe %p\n", rp->kp.addr);
3065 #if !defined(CONFIG_X86)
3066 static struct kprobe trampoline_p =
3068 .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
3069 .pre_handler = trampoline_probe_handler
3073 /*static void do_exit_probe_handler (void)
3075 printk("do_exit_probe_handler\n");
3076 unregister_all_uprobes(current, 1);
3080 static struct jprobe do_exit_p =
3082 .entry = (kprobe_pre_entry_handler_t)do_exit_probe_handler
3085 //--------------------- Declaration of module dependencies ------------------------//
3086 #define DECLARE_MOD_FUNC_DEP(name, ret, ...) ret(*__ref_##name)(__VA_ARGS__)
3087 #define DECLARE_MOD_CB_DEP(name, ret, ...) ret(*name)(__VA_ARGS__)
3089 DECLARE_MOD_CB_DEP(kallsyms_search, unsigned long, const char *name);
3090 DECLARE_MOD_FUNC_DEP(access_process_vm, int, struct task_struct * tsk, unsigned long addr, void *buf, int len, int write);
3092 DECLARE_MOD_FUNC_DEP(find_extend_vma, struct vm_area_struct *, struct mm_struct * mm, unsigned long addr);
3094 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
3095 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
3096 DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access);
3098 DECLARE_MOD_FUNC_DEP(handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags);
3102 DECLARE_MOD_FUNC_DEP(get_gate_vma, struct vm_area_struct *, struct task_struct *tsk);
3103 DECLARE_MOD_FUNC_DEP(in_gate_area_no_task, int, unsigned long addr);
3104 DECLARE_MOD_FUNC_DEP(follow_page, struct page *, struct vm_area_struct * vma, unsigned long address, unsigned int foll_flags);
3105 DECLARE_MOD_FUNC_DEP(__flush_anon_page, void, struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
3106 DECLARE_MOD_FUNC_DEP(vm_normal_page, struct page *, struct vm_area_struct *vma, unsigned long addr, pte_t pte);
3107 DECLARE_MOD_FUNC_DEP(flush_ptrace_access, void, struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write);
3109 struct mm_struct* init_mm_ptr;
3110 struct mm_struct init_mm;
3112 // deps controled by config macros
3113 #ifdef KERNEL_HAS_ISPAGEPRESENT
3114 DECLARE_MOD_FUNC_DEP(is_page_present, int, struct mm_struct * mm, unsigned long address);
3116 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
3117 DECLARE_MOD_FUNC_DEP(freeze_processes, int, void);
3118 DECLARE_MOD_FUNC_DEP(thaw_processes, void, void);
3120 // deps controled by arch type
3121 #if defined(CONFIG_MIPS)
3122 DECLARE_MOD_CB_DEP(flush_icache_range, void, unsigned long __user start, unsigned long __user end);
3123 DECLARE_MOD_CB_DEP(flush_icache_page, void, struct vm_area_struct * vma, struct page * page);
3124 DECLARE_MOD_CB_DEP(flush_cache_page, void, struct vm_area_struct * vma, unsigned long page);
3125 #elif defined(CONFIG_X86)
3126 DECLARE_MOD_FUNC_DEP(module_alloc, void *, unsigned long size);
3127 DECLARE_MOD_FUNC_DEP(module_free, void, struct module *mod, void *module_region);
3128 DECLARE_MOD_FUNC_DEP(fixup_exception, int, struct pt_regs * regs);
3129 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
3130 DECLARE_MOD_FUNC_DEP(text_poke, void, void *addr, unsigned char *opcode, int len);
3132 DECLARE_MOD_FUNC_DEP(text_poke, void *, void *addr, const void *opcode, size_t len);
3134 DECLARE_MOD_FUNC_DEP(show_registers, void, struct pt_regs * regs);
3135 #elif defined(CONFIG_ARM)
3136 #if defined(CONFIG_CPU_CACHE_VIPT) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18))
3137 //DECLARE_MOD_FUNC_DEP(flush_ptrace_access, void, struct vm_area_struct * vma, struct page * page, unsigned long uaddr, void *kaddr, unsigned long len, int write);
3140 // deps controled by kernel version
3141 #if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 16))
3142 DECLARE_MOD_FUNC_DEP(put_task_struct, void, struct task_struct *tsk);
3144 DECLARE_MOD_FUNC_DEP(put_task_struct, void, struct rcu_head * rhp);
3147 //----------------- Implementation of module dependencies wrappers -----------------//
3148 #define DECLARE_MOD_DEP_WRAPPER(name, ret, ...) ret name(__VA_ARGS__)
3149 #define IMP_MOD_DEP_WRAPPER(name, ...) \
3151 return __ref_##name(__VA_ARGS__); \
3153 /*#define IMP_MOD_DEP_WRAPPER_NORET(name, ...) \
3155 return __ref_##name(__VA_ARGS__); \
3158 DECLARE_MOD_DEP_WRAPPER(access_process_vm, int, struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
3159 IMP_MOD_DEP_WRAPPER(access_process_vm, tsk, addr, buf, len, write)
3161 DECLARE_MOD_DEP_WRAPPER (find_extend_vma, struct vm_area_struct *, struct mm_struct * mm, unsigned long addr)
3162 IMP_MOD_DEP_WRAPPER (find_extend_vma, mm, addr)
3164 #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18)
3165 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 30)
3166 DECLARE_MOD_DEP_WRAPPER (handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, int write_access)
3167 IMP_MOD_DEP_WRAPPER (handle_mm_fault, mm, vma, address, write_access)
3169 DECLARE_MOD_DEP_WRAPPER (handle_mm_fault, int, struct mm_struct *mm, struct vm_area_struct *vma, unsigned long address, unsigned int flags)
3170 IMP_MOD_DEP_WRAPPER (handle_mm_fault, mm, vma, address, flags)
3174 DECLARE_MOD_DEP_WRAPPER (get_gate_vma, struct vm_area_struct *, struct task_struct *tsk)
3175 IMP_MOD_DEP_WRAPPER (get_gate_vma, tsk)
3177 DECLARE_MOD_DEP_WRAPPER (in_gate_area_no_task, int, unsigned long addr)
3178 IMP_MOD_DEP_WRAPPER (in_gate_area_no_task, addr)
3180 DECLARE_MOD_DEP_WRAPPER (follow_page, struct page *, struct vm_area_struct * vma, unsigned long address, unsigned int foll_flags)
3181 IMP_MOD_DEP_WRAPPER (follow_page, vma, address, foll_flags)
3183 DECLARE_MOD_DEP_WRAPPER (__flush_anon_page, void, struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
3184 IMP_MOD_DEP_WRAPPER (__flush_anon_page, vma, page, vmaddr)
3186 DECLARE_MOD_DEP_WRAPPER(vm_normal_page, struct page *, struct vm_area_struct *vma, unsigned long addr, pte_t pte)
3187 IMP_MOD_DEP_WRAPPER (vm_normal_page, vma, addr, pte)
3189 DECLARE_MOD_DEP_WRAPPER (flush_ptrace_access, void, struct vm_area_struct *vma, struct page *page, unsigned long uaddr, void *kaddr, unsigned long len, int write)
3190 IMP_MOD_DEP_WRAPPER (flush_ptrace_access, vma, page, uaddr, kaddr, len, write)
3193 // deps controled by config macros
3194 #ifdef KERNEL_HAS_ISPAGEPRESENT
3195 int is_page_present (struct mm_struct *mm, unsigned long address)
3199 spin_lock (&(mm->page_table_lock));
3200 ret = __ref_is_page_present (mm, address);
3201 spin_unlock (&(mm->page_table_lock));
3206 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
3207 DECLARE_MOD_DEP_WRAPPER(freeze_processes, int, void)
3208 IMP_MOD_DEP_WRAPPER(freeze_processes)
3209 DECLARE_MOD_DEP_WRAPPER(thaw_processes, void, void)
3210 IMP_MOD_DEP_WRAPPER(thaw_processes)
3213 // deps controled by arch type
3214 #if defined(CONFIG_MIPS)
3215 #elif defined(CONFIG_X86)
3216 DECLARE_MOD_DEP_WRAPPER(module_alloc, void *, unsigned long size)
3217 IMP_MOD_DEP_WRAPPER(module_alloc, size)
3218 DECLARE_MOD_DEP_WRAPPER(module_free, void, struct module *mod, void *module_region)
3219 IMP_MOD_DEP_WRAPPER(module_free, mod, module_region)
3220 DECLARE_MOD_DEP_WRAPPER(fixup_exception, int, struct pt_regs * regs)
3221 IMP_MOD_DEP_WRAPPER(fixup_exception, regs)
3222 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26))
3223 DECLARE_MOD_DEP_WRAPPER(text_poke, void, void *addr, unsigned char *opcode, int len)
3225 DECLARE_MOD_DEP_WRAPPER(text_poke, void *, void *addr, const void *opcode, size_t len)
3227 IMP_MOD_DEP_WRAPPER(text_poke, addr, opcode, len)
3228 DECLARE_MOD_DEP_WRAPPER(show_registers, void, struct pt_regs * regs)
3229 IMP_MOD_DEP_WRAPPER(show_registers, regs)
3230 #elif defined(CONFIG_ARM)
3232 // deps controled by kernel version
3233 #if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 16))
3234 //DECLARE_MOD_FUNC_DEP(put_task_struct, void, struct task_struct *tsk);
3235 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))
3236 DECLARE_MOD_DEP_WRAPPER(put_task_struct, void, struct task_struct *tsk)
3237 IMP_MOD_DEP_WRAPPER(put_task_struct, tsk)
3238 #else // >= 2.6.11 and != 2.6.16.x
3239 DECLARE_MOD_DEP_WRAPPER(__put_task_struct, void, struct task_struct *tsk)
3240 IMP_MOD_DEP_WRAPPER(put_task_struct, tsk)
3243 DECLARE_MOD_DEP_WRAPPER(__put_task_struct_cb, void, struct rcu_head *rhp)
3244 IMP_MOD_DEP_WRAPPER(put_task_struct, rhp)
3247 //---------------------- Module dependencies initialization --------------------//
3248 #define INIT_MOD_DEP_VAR(dep, name) \
3250 __ref_##dep = (void *) kallsyms_search (#name); \
3253 DBPRINTF (#name " is not found! Oops. Where is it?"); \
3258 #define INIT_MOD_DEP_CB(dep, name) \
3260 dep = (void *) kallsyms_search (#name); \
3263 DBPRINTF (#name " is not found! Oops. Where is it?"); \
3268 int __init arch_init_kprobes (void)
3270 #if !defined(CONFIG_X86)
3271 unsigned int xDoBp; unsigned int xKProbeHandler;
3273 #if defined(CONFIG_MIPS)
3274 unsigned int xRegHi; unsigned int xRegLo;
3278 // Prepare to lookup names
3279 kallsyms_search = (void *) ksyms;
3280 DBPRINTF ("kallsyms=0x%08x\n", ksyms);
3282 sched_addr = (kprobe_opcode_t *)kallsyms_search("__switch_to");//"schedule");
3283 fork_addr = (kprobe_opcode_t *)kallsyms_search("do_fork");
3285 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 18))
3286 INIT_MOD_DEP_VAR(handle_mm_fault, handle_mm_fault);
3288 INIT_MOD_DEP_VAR(flush_ptrace_access, flush_ptrace_access);
3289 INIT_MOD_DEP_VAR(find_extend_vma, find_extend_vma);
3290 INIT_MOD_DEP_VAR(get_gate_vma, get_gate_vma);
3291 INIT_MOD_DEP_VAR(in_gate_area_no_task, in_gate_area_no_task);
3292 INIT_MOD_DEP_VAR(follow_page, follow_page);
3293 INIT_MOD_DEP_VAR(__flush_anon_page, __flush_anon_page);
3294 INIT_MOD_DEP_VAR(vm_normal_page, vm_normal_page);
3296 init_mm_ptr = (struct mm_struct*) kallsyms_search ("init_mm");
3297 memcmp(init_mm_ptr, &init_mm, sizeof(struct mm_struct));
3299 INIT_MOD_DEP_VAR(access_process_vm, access_process_vm);
3300 #ifdef KERNEL_HAS_ISPAGEPRESENT
3301 INIT_MOD_DEP_VAR(is_page_present, is_page_present);
3303 #if (LINUX_VERSION_CODE != KERNEL_VERSION(2, 6, 16))
3304 # if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11))
3305 INIT_MOD_DEP_VAR(put_task_struct, put_task_struct);
3307 INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct);
3310 INIT_MOD_DEP_VAR(put_task_struct, __put_task_struct_cb);
3312 #if defined(CONFIG_MIPS)
3313 INIT_MOD_DEP_CB(flush_icache_range, r4k_flush_icache_range);
3314 INIT_MOD_DEP_CB(flush_icache_page, r4k_flush_icache_page);
3315 INIT_MOD_DEP_CB(flush_cache_page, r4k_flush_cache_page);
3316 #elif defined(CONFIG_X86)
3317 INIT_MOD_DEP_VAR(module_alloc, module_alloc);
3318 INIT_MOD_DEP_VAR(module_free, module_free);
3319 INIT_MOD_DEP_VAR(fixup_exception, fixup_exception);
3320 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
3321 # error this kernel version has no text_poke function which is necessaryf for x86 ach!!!
3323 INIT_MOD_DEP_VAR(text_poke, text_poke);
3325 INIT_MOD_DEP_VAR(show_registers, show_registers);
3326 #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
3327 INIT_MOD_DEP_VAR(freeze_processes, freeze_processes);
3328 INIT_MOD_DEP_VAR(thaw_processes, thaw_processes);
3331 #endif // CONFIG_X86
3333 #if !defined(CONFIG_X86)
3334 // Get instruction addresses
3335 # if defined(CONFIG_MIPS)
3336 xDoBp = (unsigned int) kallsyms_search ("do_bp");
3337 # elif defined(CONFIG_ARM)
3338 xDoBp = (unsigned int) kallsyms_search ("do_undefinstr");
3340 xKProbeHandler = (unsigned int) &kprobe_handler;
3341 gl_nNumberOfInstructions = sizeof (arrTrapsTemplate) / sizeof (arrTrapsTemplate[0]);
3342 gl_nCodeSize = gl_nNumberOfInstructions * sizeof (unsigned int);
3343 DBPRINTF ("nNumberOfInstructions = %d\n", gl_nNumberOfInstructions);
3344 // Save original code
3345 arrTrapsOriginal = kmalloc (gl_nCodeSize /* + sizeof(unsigned int) */ , GFP_KERNEL);
3346 if (!arrTrapsOriginal)
3348 DBPRINTF ("Unable to allocate space for original code of <do_bp>!\n");
3351 memcpy (arrTrapsOriginal, (void *) xDoBp, gl_nCodeSize);
3353 #if defined(CONFIG_MIPS)
3354 xRegHi = HIWORD (xKProbeHandler);
3355 xRegLo = LOWORD (xKProbeHandler);
3356 if (xRegLo >= 0x8000)
3358 arrTrapsTemplate[REG_HI_INDEX] |= xRegHi;
3359 arrTrapsTemplate[REG_LO_INDEX] |= xRegLo;
3360 #elif defined(CONFIG_ARM)
3361 arrTrapsTemplate[NOTIFIER_CALL_CHAIN_INDEX] = arch_construct_brunch (xKProbeHandler, xDoBp + NOTIFIER_CALL_CHAIN_INDEX * 4, 1);
3362 //arrTrapsTemplate[NOTIFIER_CALL_CHAIN_INDEX1] = arch_construct_brunch(xKProbeHandler,
3363 // xDoBp + NOTIFIER_CALL_CHAIN_INDEX1 * 4, 1);
3364 //arrTrapsTemplate[NOTIFIER_CALL_CHAIN_INDEX2] = arch_construct_brunch((unsigned int)arrTrapsOriginal,
3365 // xDoBp + NOTIFIER_CALL_CHAIN_INDEX2 * 4, 1);
3366 //arrTrapsOriginal[gl_nNumberOfInstructions] = arch_construct_brunch(xDoBp + gl_nNumberOfInstructions * 4,
3367 // (unsigned int)(arrTrapsOriginal + gl_nNumberOfInstructions), 1);
3369 /*for(i = 0; i < gl_nNumberOfInstructions+1; i++)
3371 printk("%08x\n", arrTrapsOriginal[i]);
3373 /*do_exit_p.kp.addr = (kprobe_opcode_t *)kallsyms_search ("do_exit");
3374 if (!do_exit_p.kp.addr)
3376 DBPRINTF ("do_exit is not found! Oops. Where is it?");
3379 if((ret = register_jprobe (&do_exit_p, 0)) != 0)
3383 memcpy ((void *) xDoBp, arrTrapsTemplate, gl_nCodeSize);
3384 flush_icache_range (xDoBp, xDoBp + gl_nCodeSize);
3385 if((ret = register_kprobe (&trampoline_p, 0)) != 0){
3386 //unregister_jprobe(&do_exit_p, 0);
3394 void __exit arch_exit_kprobes (void)
3396 #if !defined(CONFIG_X86)
3398 // Get instruction address
3399 #if defined(CONFIG_MIPS)
3400 xDoBp = (unsigned int) kallsyms_search ("do_bp");
3401 #elif defined(CONFIG_ARM)
3402 xDoBp = (unsigned int) kallsyms_search ("do_undefinstr");
3404 //unregister_jprobe(&do_exit_p, 0);
3405 // Replace back the original code
3406 memcpy ((void *) xDoBp, arrTrapsOriginal, gl_nCodeSize);
3407 flush_icache_range (xDoBp, xDoBp + gl_nCodeSize);
3408 kfree (arrTrapsOriginal);
3409 arrTrapsOriginal = NULL;
3413 MODULE_LICENSE ("Dual BSD/GPL");