--- /dev/null
- printk("no_uprobe\n");
+/*
+ * Dynamic Binary Instrumentation Module based on KProbes
+ * modules/uprobe/arch/asm-arm/swap_uprobes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Samsung Electronics, 2006-2010
+ *
+ * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
+ * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
+ *
+ */
+
+#include <dbi_kprobes.h>
+#include <asm/dbi_kprobes.h>
+#include <asm/trampoline_arm.h>
+#include <asm/traps.h>
+#include <swap_uprobes.h>
+#include <asm/swap_uprobes.h>
+#include <dbi_insn_slots.h>
+#include <dbi_kprobes_deps.h>
+#include "trampoline_thumb.h"
+
+// FIXME:
+#include <dbi_kdebug.h>
+
+
+#define flush_insns(addr, size) \
+ flush_icache_range((unsigned long)(addr), \
+ (unsigned long)(addr) + (size))
+
+#define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit)))))
+#define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25)
+
+static inline long branch_t16_dest(kprobe_opcode_t insn, unsigned int insn_addr)
+{
+ long offset = insn & 0x3ff;
+ offset -= insn & 0x400;
+ return (insn_addr + 4 + offset * 2);
+}
+
+static inline long branch_cond_t16_dest(kprobe_opcode_t insn, unsigned int insn_addr)
+{
+ long offset = insn & 0x7f;
+ offset -= insn & 0x80;
+ return (insn_addr + 4 + offset * 2);
+}
+
+static inline long branch_t32_dest(kprobe_opcode_t insn, unsigned int insn_addr)
+{
+ unsigned int poff = insn & 0x3ff;
+ unsigned int offset = (insn & 0x07fe0000) >> 17;
+
+ poff -= (insn & 0x400);
+
+ if (insn & (1 << 12))
+ return ((insn_addr + 4 + (poff << 12) + offset * 4));
+ else
+ return ((insn_addr + 4 + (poff << 12) + offset * 4) & ~3);
+}
+
+static inline long cbz_t16_dest(kprobe_opcode_t insn, unsigned int insn_addr)
+{
+ unsigned int i = (insn & 0x200) >> 3;
+ unsigned int offset = (insn & 0xf8) >> 2;
+ return insn_addr + 4 + i + offset;
+}
+
+static kprobe_opcode_t get_addr_b(kprobe_opcode_t insn, kprobe_opcode_t *addr)
+{
+ // real position less then PC by 8
+ return (kprobe_opcode_t)((long)addr + 8 + branch_displacement(insn));
+}
+
+/* is instruction Thumb2 and NOT a branch, etc... */
+static int is_thumb2(kprobe_opcode_t insn)
+{
+ return ((insn & 0xf800) == 0xe800 ||
+ (insn & 0xf800) == 0xf000 ||
+ (insn & 0xf800) == 0xf800);
+}
+
+static int arch_copy_trampoline_arm_uprobe(struct kprobe *p, struct task_struct *task, int atomic)
+{
+ kprobe_opcode_t insns[UPROBES_TRAMP_LEN];
+ int uregs, pc_dep;
+ kprobe_opcode_t insn[MAX_INSN_SIZE];
+ struct arch_specific_insn ainsn;
+
+ p->safe_arm = 1;
+ if ((unsigned long)p->addr & 0x01) {
+ printk("Error in %s at %d: attempt to register kprobe at an unaligned address\n", __FILE__, __LINE__);
+ return -EINVAL;
+ }
+
+ insn[0] = p->opcode;
+ ainsn.insn_arm = insn;
+ if (!arch_check_insn_arm(&ainsn)) {
+ p->safe_arm = 0;
+ }
+
+ uregs = pc_dep = 0;
+ // Rn, Rm ,Rd
+ if (ARM_INSN_MATCH(DPIS, insn[0]) || ARM_INSN_MATCH(LRO, insn[0]) ||
+ ARM_INSN_MATCH(SRO, insn[0])) {
+ uregs = 0xb;
+ if ((ARM_INSN_REG_RN(insn[0]) == 15) || (ARM_INSN_REG_RM(insn[0]) == 15) ||
+ (ARM_INSN_MATCH(SRO, insn[0]) && (ARM_INSN_REG_RD(insn[0]) == 15))) {
+ DBPRINTF("Unboostable insn %lx, DPIS/LRO/SRO\n", insn[0]);
+ pc_dep = 1;
+ }
+
+ // Rn ,Rd
+ } else if (ARM_INSN_MATCH(DPI, insn[0]) || ARM_INSN_MATCH(LIO, insn[0]) ||
+ ARM_INSN_MATCH (SIO, insn[0])) {
+ uregs = 0x3;
+ if ((ARM_INSN_REG_RN(insn[0]) == 15) || (ARM_INSN_MATCH(SIO, insn[0]) &&
+ (ARM_INSN_REG_RD(insn[0]) == 15))) {
+ pc_dep = 1;
+ DBPRINTF("Unboostable insn %lx/%p, DPI/LIO/SIO\n", insn[0], p);
+ }
+
+ // Rn, Rm, Rs
+ } else if (ARM_INSN_MATCH(DPRS, insn[0])) {
+ uregs = 0xd;
+ if ((ARM_INSN_REG_RN(insn[0]) == 15) || (ARM_INSN_REG_RM(insn[0]) == 15) ||
+ (ARM_INSN_REG_RS(insn[0]) == 15)) {
+ pc_dep = 1;
+ DBPRINTF("Unboostable insn %lx, DPRS\n", insn[0]);
+ }
+
+ // register list
+ } else if (ARM_INSN_MATCH(SM, insn[0])) {
+ uregs = 0x10;
+ if (ARM_INSN_REG_MR (insn[0], 15))
+ {
+ DBPRINTF ("Unboostable insn %lx, SM\n", insn[0]);
+ pc_dep = 1;
+ }
+ }
+
+ // check instructions that can write result to SP andu uses PC
+ if (pc_dep && (ARM_INSN_REG_RD (ainsn.insn_arm[0]) == 13)) {
+ printk("Error in %s at %d: instruction check failed (arm)\n", __FILE__, __LINE__);
+ p->safe_arm = 1;
+ // TODO: move free to later phase
+ //free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn_arm, 0);
+ //ret = -EFAULT;
+ }
+
+ if (unlikely(uregs && pc_dep)) {
+ memcpy(insns, pc_dep_insn_execbuf, sizeof(insns));
+ if (prep_pc_dep_insn_execbuf(insns, insn[0], uregs) != 0) {
+ printk("Error in %s at %d: failed to prepare exec buffer for insn %lx!",
+ __FILE__, __LINE__, insn[0]);
+ p->safe_arm = 1;
+ // TODO: move free to later phase
+ //free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn_arm, 0);
+ //return -EINVAL;
+ }
+
+ insns[6] = (kprobe_opcode_t) (p->addr + 2);
+ } else {
+ memcpy(insns, gen_insn_execbuf, sizeof(insns));
+ insns[UPROBES_TRAMP_INSN_IDX] = insn[0];
+ }
+
+ insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[7] = (kprobe_opcode_t) (p->addr + 1);
+
+ if (ARM_INSN_MATCH(B, ainsn.insn_arm[0]) && /* B */
+ !ARM_INSN_MATCH(BLX1, ainsn.insn_arm[0])) {
+ /* B check can be false positive on BLX1 instruction */
+ memcpy(insns, b_cond_insn_execbuf, sizeof(insns));
+ insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[0] |= insn[0] & 0xf0000000;
+ insns[6] = get_addr_b(p->opcode, p->addr);
+ insns[7] = (kprobe_opcode_t) (p->addr + 1);
+ } else if (ARM_INSN_MATCH(BX, ainsn.insn_arm[0]) || // BX, BLX (Rm)
+ ARM_INSN_MATCH(BLX2, ainsn.insn_arm[0])) {
+ memcpy(insns, b_r_insn_execbuf, sizeof (insns));
+ insns[0] = insn[0];
+ insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[7] = (kprobe_opcode_t) (p->addr + 1);
+ } else if (ARM_INSN_MATCH(BLX1, ainsn.insn_arm[0])) { // BL, BLX (Off)
+ memcpy(insns, blx_off_insn_execbuf, sizeof(insns));
+ insns[0] |= 0xe0000000;
+ insns[1] |= 0xe0000000;
+ insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[6] = get_addr_b(p->opcode, p->addr) +
+ 2 * (p->opcode & 01000000) + 1; /* jump to thumb */
+ insns[7] = (kprobe_opcode_t) (p->addr + 1);
+ } else if (ARM_INSN_MATCH(BL, ainsn.insn_arm[0])){ // BL
+ memcpy(insns, blx_off_insn_execbuf, sizeof(insns));
+ insns[0] |= insn[0] & 0xf0000000;
+ insns[1] |= insn[0] & 0xf0000000;
+ insns[UPROBES_TRAMP_RET_BREAK_IDX] = BREAKPOINT_INSTRUCTION;
+ insns[6] = get_addr_b(p->opcode, p->addr);
+ insns[7] = (kprobe_opcode_t) (p->addr + 1);
+ }
+
+ DBPRINTF("arch_prepare_uprobe: to %p - %lx %lx %lx %lx %lx %lx %lx %lx %lx",
+ p->ainsn.insn_arm, insns[0], insns[1], insns[2], insns[3], insns[4],
+ insns[5], insns[6], insns[7], insns[8]);
+ if (!write_proc_vm_atomic(task, (unsigned long)p->ainsn.insn_arm, insns, sizeof(insns))) {
+ panic("failed to write memory %p!\n", p->ainsn.insn_arm);
+ // Mr_Nobody: we have to panic, really??...
+ //free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn_arm, 0);
+ //return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int arch_check_insn_thumb(struct arch_specific_insn *ainsn)
+{
+ int ret = 0;
+
+ // check instructions that can change PC
+ if (THUMB_INSN_MATCH(UNDEF, ainsn->insn_thumb[0]) ||
+ THUMB_INSN_MATCH(SWI, ainsn->insn_thumb[0]) ||
+ THUMB_INSN_MATCH(BREAK, ainsn->insn_thumb[0]) ||
+ THUMB2_INSN_MATCH(B1, ainsn->insn_thumb[0]) ||
+ THUMB2_INSN_MATCH(B2, ainsn->insn_thumb[0]) ||
+ THUMB2_INSN_MATCH(BXJ, ainsn->insn_thumb[0]) ||
+ (THUMB2_INSN_MATCH(ADR, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RT(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RT(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RT(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RT(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRWL, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RT(ainsn->insn_thumb[0]) == 15) ||
+ THUMB2_INSN_MATCH(LDMIA, ainsn->insn_thumb[0]) ||
+ THUMB2_INSN_MATCH(LDMDB, ainsn->insn_thumb[0]) ||
+ (THUMB2_INSN_MATCH(DP, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(RSBW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(RORW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(ROR, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LSLW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LSLW2, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LSRW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LSRW2, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RD(ainsn->insn_thumb[0]) == 15) ||
+ /* skip PC, #-imm12 -> SP, #-imm8 and Tegra-hanging instructions */
+ (THUMB2_INSN_MATCH(STRW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(STRBW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(STRHW1, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(STRW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(STRHW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRBW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ (THUMB2_INSN_MATCH(LDRHW, ainsn->insn_thumb[0]) && THUMB2_INSN_REG_RN(ainsn->insn_thumb[0]) == 15) ||
+ /* skip STRDx/LDRDx Rt, Rt2, [Rd, ...] */
+ (THUMB2_INSN_MATCH(LDRD, ainsn->insn_thumb[0]) || THUMB2_INSN_MATCH(LDRD1, ainsn->insn_thumb[0]) || THUMB2_INSN_MATCH(STRD, ainsn->insn_thumb[0])) ) {
+ DBPRINTF("Bad insn arch_check_insn_thumb: %lx\n", ainsn->insn_thumb[0]);
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+static int prep_pc_dep_insn_execbuf_thumb(kprobe_opcode_t * insns, kprobe_opcode_t insn, int uregs)
+{
+ unsigned char mreg = 0;
+ unsigned char reg = 0;
+
+ if (THUMB_INSN_MATCH(APC, insn) || THUMB_INSN_MATCH(LRO3, insn)) {
+ reg = ((insn & 0xffff) & uregs) >> 8;
+ } else {
+ if (THUMB_INSN_MATCH(MOV3, insn)) {
+ if (((((unsigned char) insn) & 0xff) >> 3) == 15) {
+ reg = (insn & 0xffff) & uregs;
+ } else {
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(ADR, insn)) {
+ reg = ((insn >> 16) & uregs) >> 8;
+ if (reg == 15) {
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(LDRW, insn) || THUMB2_INSN_MATCH(LDRW1, insn) ||
+ THUMB2_INSN_MATCH(LDRHW, insn) || THUMB2_INSN_MATCH(LDRHW1, insn) ||
+ THUMB2_INSN_MATCH(LDRWL, insn)) {
+ reg = ((insn >> 16) & uregs) >> 12;
+ if (reg == 15) {
+ return 0;
+ }
+ } else {
+ // LDRB.W PC, [PC, #immed] => PLD [PC, #immed], so Rt == PC is skipped
+ if (THUMB2_INSN_MATCH(LDRBW, insn) || THUMB2_INSN_MATCH(LDRBW1, insn) ||
+ THUMB2_INSN_MATCH(LDREX, insn)) {
+ reg = ((insn >> 16) & uregs) >> 12;
+ } else {
+ if (THUMB2_INSN_MATCH(DP, insn)) {
+ reg = ((insn >> 16) & uregs) >> 12;
+ if (reg == 15) {
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(RSBW, insn)) {
+ reg = ((insn >> 12) & uregs) >> 8;
+ if (reg == 15){
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(RORW, insn)) {
+ reg = ((insn >> 12) & uregs) >> 8;
+ if (reg == 15) {
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW1, insn) ||
+ THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW1, insn) ||
+ THUMB2_INSN_MATCH(LSRW2, insn)) {
+ reg = ((insn >> 12) & uregs) >> 8;
+ if (reg == 15) {
+ return 0;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(TEQ1, insn) || THUMB2_INSN_MATCH(TST1, insn)) {
+ reg = 15;
+ } else {
+ if (THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn)) {
+ reg = THUMB2_INSN_REG_RM(insn);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if ((THUMB2_INSN_MATCH(STRW, insn) || THUMB2_INSN_MATCH(STRBW, insn) ||
+ THUMB2_INSN_MATCH(STRD, insn) || THUMB2_INSN_MATCH(STRHT, insn) ||
+ THUMB2_INSN_MATCH(STRT, insn) || THUMB2_INSN_MATCH(STRHW1, insn) ||
+ THUMB2_INSN_MATCH(STRHW, insn)) && THUMB2_INSN_REG_RT(insn) == 15) {
+ reg = THUMB2_INSN_REG_RT(insn);
+ }
+
+ if (reg == 6 || reg == 7) {
+ *((unsigned short*)insns + 0) = (*((unsigned short*)insns + 0) & 0x00ff) | ((1 << mreg) | (1 << (mreg + 1)));
+ *((unsigned short*)insns + 1) = (*((unsigned short*)insns + 1) & 0xf8ff) | (mreg << 8);
+ *((unsigned short*)insns + 2) = (*((unsigned short*)insns + 2) & 0xfff8) | (mreg + 1);
+ *((unsigned short*)insns + 3) = (*((unsigned short*)insns + 3) & 0xffc7) | (mreg << 3);
+ *((unsigned short*)insns + 7) = (*((unsigned short*)insns + 7) & 0xf8ff) | (mreg << 8);
+ *((unsigned short*)insns + 8) = (*((unsigned short*)insns + 8) & 0xffc7) | (mreg << 3);
+ *((unsigned short*)insns + 9) = (*((unsigned short*)insns + 9) & 0xffc7) | ((mreg + 1) << 3);
+ *((unsigned short*)insns + 10) = (*((unsigned short*)insns + 10) & 0x00ff) | (( 1 << mreg) | (1 << (mreg + 1)));
+ }
+
+ if (THUMB_INSN_MATCH(APC, insn)) {
+ // ADD Rd, PC, #immed_8*4 -> ADD Rd, SP, #immed_8*4
+ *((unsigned short*)insns + 4) = ((insn & 0xffff) | 0x800); // ADD Rd, SP, #immed_8*4
+ } else {
+ if (THUMB_INSN_MATCH(LRO3, insn)) {
+ // LDR Rd, [PC, #immed_8*4] -> LDR Rd, [SP, #immed_8*4]
+ *((unsigned short*)insns + 4) = ((insn & 0xffff) + 0x5000); // LDR Rd, [SP, #immed_8*4]
+ } else {
+ if (THUMB_INSN_MATCH(MOV3, insn)) {
+ // MOV Rd, PC -> MOV Rd, SP
+ *((unsigned short*)insns + 4) = ((insn & 0xffff) ^ 0x10); // MOV Rd, SP
+ } else {
+ if (THUMB2_INSN_MATCH(ADR, insn)) {
+ // ADDW Rd, PC, #imm -> ADDW Rd, SP, #imm
+ insns[2] = (insn & 0xfffffff0) | 0x0d; // ADDW Rd, SP, #imm
+ } else {
+ if (THUMB2_INSN_MATCH(LDRW, insn) || THUMB2_INSN_MATCH(LDRBW, insn) ||
+ THUMB2_INSN_MATCH(LDRHW, insn)) {
+ // LDR.W Rt, [PC, #-<imm_12>] -> LDR.W Rt, [SP, #-<imm_8>]
+ // !!!!!!!!!!!!!!!!!!!!!!!!
+ // !!! imm_12 vs. imm_8 !!!
+ // !!!!!!!!!!!!!!!!!!!!!!!!
+ insns[2] = (insn & 0xf0fffff0) | 0x0c00000d; // LDR.W Rt, [SP, #-<imm_8>]
+ } else {
+ if (THUMB2_INSN_MATCH(LDRW1, insn) || THUMB2_INSN_MATCH(LDRBW1, insn) ||
+ THUMB2_INSN_MATCH(LDRHW1, insn) || THUMB2_INSN_MATCH(LDRD, insn) ||
+ THUMB2_INSN_MATCH(LDRD1, insn) || THUMB2_INSN_MATCH(LDREX, insn)) {
+ // LDRx.W Rt, [PC, #+<imm_12>] -> LDRx.W Rt, [SP, #+<imm_12>] (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>]
+ insns[2] = (insn & 0xfffffff0) | 0xd; // LDRx.W Rt, [SP, #+<imm_12>]
+ } else {
+ if (THUMB2_INSN_MATCH(MUL, insn)) {
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // MUL Rd, Rn, SP
+ } else {
+ if (THUMB2_INSN_MATCH(DP, insn)) {
+ if (THUMB2_INSN_REG_RM(insn) == 15) {
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // DP Rd, Rn, PC
+ } else if (THUMB2_INSN_REG_RN(insn) == 15) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // DP Rd, PC, Rm
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(LDRWL, insn)) {
+ // LDRx.W Rt, [PC, #<imm_12>] -> LDRx.W Rt, [SP, #+<imm_12>] (+/-imm_8 for LDRD Rt, Rt2, [PC, #<imm_8>]
+ insns[2] = (insn & 0xfffffff0) | 0xd; // LDRx.W Rt, [SP, #+<imm_12>]
+ } else {
+ if (THUMB2_INSN_MATCH(RSBW, insn)) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // RSB{S}.W Rd, PC, #<const> -> RSB{S}.W Rd, SP, #<const>
+ } else {
+ if (THUMB2_INSN_MATCH(RORW, insn) || THUMB2_INSN_MATCH(LSLW1, insn) || THUMB2_INSN_MATCH(LSRW1, insn)) {
+ if ((THUMB2_INSN_REG_RM(insn) == 15) && (THUMB2_INSN_REG_RN(insn) == 15)) {
+ insns[2] = (insn & 0xfffdfffd); // ROR.W Rd, PC, PC
+ } else if (THUMB2_INSN_REG_RM(insn) == 15) {
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; // ROR.W Rd, Rn, PC
+ } else if (THUMB2_INSN_REG_RN(insn) == 15) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // ROR.W Rd, PC, Rm
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(ROR, insn) || THUMB2_INSN_MATCH(LSLW2, insn) || THUMB2_INSN_MATCH(LSRW2, insn)) {
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; // ROR{S} Rd, PC, #<const> -> ROR{S} Rd, SP, #<const>
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (THUMB2_INSN_MATCH(STRW, insn) || THUMB2_INSN_MATCH(STRBW, insn)) {
+ insns[2] = (insn & 0xfff0ffff) | 0x000d0000; // STRx.W Rt, [Rn, SP]
+ } else {
+ if (THUMB2_INSN_MATCH(STRD, insn) || THUMB2_INSN_MATCH(STRHT, insn) ||
+ THUMB2_INSN_MATCH(STRT, insn) || THUMB2_INSN_MATCH(STRHW1, insn)) {
+ if (THUMB2_INSN_REG_RN(insn) == 15) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // STRD/T/HT{.W} Rt, [SP, ...]
+ } else {
+ insns[2] = insn;
+ }
+ } else {
+ if (THUMB2_INSN_MATCH(STRHW, insn) && (THUMB2_INSN_REG_RN(insn) == 15)) {
+ if (THUMB2_INSN_REG_RN(insn) == 15) {
+ insns[2] = (insn & 0xf0fffff0) | 0x0c00000d; // STRH.W Rt, [SP, #-<imm_8>]
+ } else {
+ insns[2] = insn;
+ }
+ }
+ }
+ }
+
+ // STRx PC, xxx
+ if ((reg == 15) && (THUMB2_INSN_MATCH(STRW, insn) ||
+ THUMB2_INSN_MATCH(STRBW, insn) ||
+ THUMB2_INSN_MATCH(STRD, insn) ||
+ THUMB2_INSN_MATCH(STRHT, insn) ||
+ THUMB2_INSN_MATCH(STRT, insn) ||
+ THUMB2_INSN_MATCH(STRHW1, insn) ||
+ THUMB2_INSN_MATCH(STRHW, insn) )) {
+ insns[2] = (insns[2] & 0x0fffffff) | 0xd0000000;
+ }
+
+ if (THUMB2_INSN_MATCH(TEQ1, insn) || THUMB2_INSN_MATCH(TST1, insn)) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // TEQ SP, #<const>
+ } else {
+ if (THUMB2_INSN_MATCH(TEQ2, insn) || THUMB2_INSN_MATCH(TST2, insn)) {
+ if ((THUMB2_INSN_REG_RN(insn) == 15) && (THUMB2_INSN_REG_RM(insn) == 15)) {
+ insns[2] = (insn & 0xfffdfffd); // TEQ/TST PC, PC
+ } else if (THUMB2_INSN_REG_RM(insn) == 15) {
+ insns[2] = (insn & 0xfff0ffff) | 0xd0000; // TEQ/TST Rn, PC
+ } else if (THUMB2_INSN_REG_RN(insn) == 15) {
+ insns[2] = (insn & 0xfffffff0) | 0xd; // TEQ/TST PC, Rm
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int arch_copy_trampoline_thumb_uprobe(struct kprobe *p, struct task_struct *task, int atomic)
+{
+ int uregs, pc_dep;
+ unsigned int addr;
+ kprobe_opcode_t insn[MAX_INSN_SIZE];
+ struct arch_specific_insn ainsn;
+ kprobe_opcode_t insns[UPROBES_TRAMP_LEN * 2];
+
+ p->safe_thumb = 1;
+ if ((unsigned long)p->addr & 0x01) {
+ printk("Error in %s at %d: attempt to register kprobe at an unaligned address\n", __FILE__, __LINE__);
+ return -EINVAL;
+ }
+
+ insn[0] = p->opcode;
+ ainsn.insn_thumb = insn;
+ if (!arch_check_insn_thumb(&ainsn)) {
+ p->safe_thumb = 0;
+ }
+
+ uregs = 0;
+ pc_dep = 0;
+
+ if (THUMB_INSN_MATCH(APC, insn[0]) || THUMB_INSN_MATCH(LRO3, insn[0])) {
+ uregs = 0x0700; // 8-10
+ pc_dep = 1;
+ } else if (THUMB_INSN_MATCH(MOV3, insn[0]) && (((((unsigned char)insn[0]) & 0xff) >> 3) == 15)) {
+ // MOV Rd, PC
+ uregs = 0x07;
+ pc_dep = 1;
+ } else if THUMB2_INSN_MATCH(ADR, insn[0]) {
+ uregs = 0x0f00; // Rd 8-11
+ pc_dep = 1;
+ } else if (((THUMB2_INSN_MATCH(LDRW, insn[0]) || THUMB2_INSN_MATCH(LDRW1, insn[0]) ||
+ THUMB2_INSN_MATCH(LDRBW, insn[0]) || THUMB2_INSN_MATCH(LDRBW1, insn[0]) ||
+ THUMB2_INSN_MATCH(LDRHW, insn[0]) || THUMB2_INSN_MATCH(LDRHW1, insn[0]) ||
+ THUMB2_INSN_MATCH(LDRWL, insn[0])) && THUMB2_INSN_REG_RN(insn[0]) == 15) ||
+ THUMB2_INSN_MATCH(LDREX, insn[0]) ||
+ ((THUMB2_INSN_MATCH(STRW, insn[0]) || THUMB2_INSN_MATCH(STRBW, insn[0]) ||
+ THUMB2_INSN_MATCH(STRHW, insn[0]) || THUMB2_INSN_MATCH(STRHW1, insn[0])) &&
+ (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RT(insn[0]) == 15)) ||
+ ((THUMB2_INSN_MATCH(STRT, insn[0]) || THUMB2_INSN_MATCH(STRHT, insn[0])) &&
+ (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RT(insn[0]) == 15))) {
+ uregs = 0xf000; // Rt 12-15
+ pc_dep = 1;
+ } else if ((THUMB2_INSN_MATCH(LDRD, insn[0]) || THUMB2_INSN_MATCH(LDRD1, insn[0])) && (THUMB2_INSN_REG_RN(insn[0]) == 15)) {
+ uregs = 0xff00; // Rt 12-15, Rt2 8-11
+ pc_dep = 1;
+ } else if (THUMB2_INSN_MATCH(MUL, insn[0]) && THUMB2_INSN_REG_RM(insn[0]) == 15) {
+ uregs = 0xf;
+ pc_dep = 1;
+ } else if (THUMB2_INSN_MATCH(DP, insn[0]) && (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RM(insn[0]) == 15)) {
+ uregs = 0xf000; // Rd 12-15
+ pc_dep = 1;
+ } else if (THUMB2_INSN_MATCH(STRD, insn[0]) && ((THUMB2_INSN_REG_RN(insn[0]) == 15) || (THUMB2_INSN_REG_RT(insn[0]) == 15) || THUMB2_INSN_REG_RT2(insn[0]) == 15)) {
+ uregs = 0xff00; // Rt 12-15, Rt2 8-11
+ pc_dep = 1;
+ } else if (THUMB2_INSN_MATCH(RSBW, insn[0]) && THUMB2_INSN_REG_RN(insn[0]) == 15) {
+ uregs = 0x0f00; // Rd 8-11
+ pc_dep = 1;
+ } else if (THUMB2_INSN_MATCH (RORW, insn[0]) && (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RM(insn[0]) == 15)) {
+ uregs = 0x0f00;
+ pc_dep = 1;
+ } else if ((THUMB2_INSN_MATCH(ROR, insn[0]) || THUMB2_INSN_MATCH(LSLW2, insn[0]) || THUMB2_INSN_MATCH(LSRW2, insn[0])) && THUMB2_INSN_REG_RM(insn[0]) == 15) {
+ uregs = 0x0f00; // Rd 8-11
+ pc_dep = 1;
+ } else if ((THUMB2_INSN_MATCH(LSLW1, insn[0]) || THUMB2_INSN_MATCH(LSRW1, insn[0])) && (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RM(insn[0]) == 15)) {
+ uregs = 0x0f00; // Rd 8-11
+ pc_dep = 1;
+ } else if ((THUMB2_INSN_MATCH(TEQ1, insn[0]) || THUMB2_INSN_MATCH(TST1, insn[0])) && THUMB2_INSN_REG_RN(insn[0]) == 15) {
+ uregs = 0xf0000; //Rn 0-3 (16-19)
+ pc_dep = 1;
+ } else if ((THUMB2_INSN_MATCH(TEQ2, insn[0]) || THUMB2_INSN_MATCH(TST2, insn[0])) &&
+ (THUMB2_INSN_REG_RN(insn[0]) == 15 || THUMB2_INSN_REG_RM(insn[0]) == 15)) {
+ uregs = 0xf0000; //Rn 0-3 (16-19)
+ pc_dep = 1;
+ }
+
+ if (unlikely(uregs && pc_dep)) {
+ memcpy(insns, pc_dep_insn_execbuf_thumb, 18 * 2);
+ if (prep_pc_dep_insn_execbuf_thumb(insns, insn[0], uregs) != 0) {
+ printk("Error in %s at %d: failed to prepare exec buffer for insn %lx!",
+ __FILE__, __LINE__, insn[0]);
+ p->safe_thumb = 1;
+ //free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn_thumb, 0);
+ //return -EINVAL;
+ }
+
+ addr = ((unsigned int)p->addr) + 4;
+ *((unsigned short*)insns + 13) = 0xdeff;
+ *((unsigned short*)insns + 14) = addr & 0x0000ffff;
+ *((unsigned short*)insns + 15) = addr >> 16;
+ if (!is_thumb2(insn[0])) {
+ addr = ((unsigned int)p->addr) + 2;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+ } else {
+ addr = ((unsigned int)p->addr) + 4;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+ }
+ } else {
+ memcpy(insns, gen_insn_execbuf_thumb, 18 * 2);
+ *((unsigned short*)insns + 13) = 0xdeff;
+ if (!is_thumb2(insn[0])) {
+ addr = ((unsigned int)p->addr) + 2;
+ *((unsigned short*)insns + 2) = insn[0];
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+ } else {
+ addr = ((unsigned int)p->addr) + 4;
+ insns[1] = insn[0];
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+ }
+ }
+
+ if (THUMB_INSN_MATCH(B2, insn[0])) {
+ memcpy(insns, b_off_insn_execbuf_thumb, sizeof(insns));
+ *((unsigned short*)insns + 13) = 0xdeff;
+ addr = branch_t16_dest(insn[0], (unsigned int)p->addr);
+ *((unsigned short*)insns + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 15) = addr >> 16;
+ *((unsigned short*)insns + 16) = 0;
+ *((unsigned short*)insns + 17) = 0;
+
+ } else if (THUMB_INSN_MATCH(B1, insn[0])) {
+ memcpy(insns, b_cond_insn_execbuf_thumb, sizeof(insns));
+ *((unsigned short*)insns + 13) = 0xdeff;
+ *((unsigned short*)insns + 0) |= (insn[0] & 0xf00);
+ addr = branch_cond_t16_dest(insn[0], (unsigned int)p->addr);
+ *((unsigned short*)insns + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 15) = addr >> 16;
+ addr = ((unsigned int)p->addr) + 2;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+
+ } else if (THUMB_INSN_MATCH(BLX2, insn[0]) ||
+ THUMB_INSN_MATCH(BX, insn[0])) {
+ memcpy(insns, b_r_insn_execbuf_thumb, sizeof(insns));
+ *((unsigned short*)insns + 13) = 0xdeff;
+ *((unsigned short*)insns + 4) = insn[0];
+ addr = ((unsigned int)p->addr) + 2;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+
+ } else if (THUMB2_INSN_MATCH(BLX1, insn[0]) ||
+ THUMB2_INSN_MATCH(BL, insn[0])) {
+ memcpy(insns, blx_off_insn_execbuf_thumb, sizeof(insns));
+ *((unsigned short*)insns + 13) = 0xdeff;
+ addr = branch_t32_dest(insn[0], (unsigned int)p->addr);
+ *((unsigned short*)insns + 14) = (addr & 0x0000ffff);
+ *((unsigned short*)insns + 15) = addr >> 16;
+ addr = ((unsigned int)p->addr) + 4;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+
+ } else if (THUMB_INSN_MATCH(CBZ, insn[0])) {
+ memcpy(insns, cbz_insn_execbuf_thumb, sizeof(insns));
+ *((unsigned short*)insns + 13) = 0xdeff;
+ *((unsigned short*)insns + 0) = insn[0] & (~insn[0] & 0xf8);
+ *((unsigned short*)insns + 0) &= 0x20;
+ addr = cbz_t16_dest(insn[0], (unsigned int)p->addr);
+ *((unsigned short*)insns + 14) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 15) = addr >> 16;
+ addr = ((unsigned int)p->addr) + 2;
+ *((unsigned short*)insns + 16) = (addr & 0x0000ffff) | 0x1;
+ *((unsigned short*)insns + 17) = addr >> 16;
+ }
+
+ if (!write_proc_vm_atomic (task, (unsigned long)p->ainsn.insn_thumb, insns, 18 * 2)) {
+ panic("failed to write memory %p!\n", p->ainsn.insn_thumb);
+ // Mr_Nobody: we have to panic, really??...
+ //free_insn_slot (&uprobe_insn_pages, task, p->ainsn.insn_thumb, 0);
+ //return -EINVAL;
+ }
+
+ return 0;
+}
+
+int arch_prepare_uprobe(struct uprobe *up, struct hlist_head *page_list)
+{
+ int ret = 0;
+ struct kprobe *p = &up->kp;
+ struct task_struct *task = up->task;
+ kprobe_opcode_t insn[MAX_INSN_SIZE];
+
+ if ((unsigned long)p->addr & 0x01) {
+ printk("Error in %s at %d: attempt to register kprobe at an unaligned address\n", __FILE__, __LINE__);
+ return -EINVAL;
+ }
+
+ if (!read_proc_vm_atomic(task, (unsigned long)p->addr, &insn, MAX_INSN_SIZE * sizeof(kprobe_opcode_t))) {
+ panic("Failed to read memory task[tgid=%u, comm=%s] %p!\n", task->tgid, task->comm, p->addr);
+ }
+
+ p->opcode = insn[0];
+ p->ainsn.insn_arm = alloc_insn_slot(up->sm);
+ if (!p->ainsn.insn_arm) {
+ printk("Error in %s at %d: kprobe slot allocation error (arm)\n", __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ ret = arch_copy_trampoline_arm_uprobe(p, task, 1);
+ if (ret) {
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ return -EFAULT;
+ }
+
+ p->ainsn.insn_thumb = alloc_insn_slot(up->sm);
+ if (!p->ainsn.insn_thumb) {
+ printk("Error in %s at %d: kprobe slot allocation error (thumb)\n", __FILE__, __LINE__);
+ return -ENOMEM;
+ }
+
+ ret = arch_copy_trampoline_thumb_uprobe(p, task, 1);
+ if (ret) {
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
+ return -EFAULT;
+ }
+
+ if ((p->safe_arm) && (p->safe_thumb)) {
+ printk("Error in %s at %d: failed arch_copy_trampoline_*_uprobe() (both) [tgid=%u, addr=%lx, data=%lx]\n",
+ __FILE__, __LINE__, task->tgid, (unsigned long)p->addr, (unsigned long)p->opcode);
+ if (!write_proc_vm_atomic(task, (unsigned long)p->addr, &p->opcode, sizeof(p->opcode))) {
+ panic("Failed to write memory %p!\n", p->addr);
+ }
+
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
+
+ return -EFAULT;
+ }
+
+ return ret;
+}
+
+void arch_opcode_analysis_uretprobe(struct uretprobe *rp)
+{
+ /* Remove retprobe if first insn overwrites lr */
+ rp->thumb_noret = !!(THUMB2_INSN_MATCH(BL, rp->up.kp.opcode) ||
+ THUMB2_INSN_MATCH(BLX1, rp->up.kp.opcode) ||
+ THUMB_INSN_MATCH(BLX2, rp->up.kp.opcode));
+
+ rp->arm_noret = !!(ARM_INSN_MATCH(BL, rp->up.kp.opcode) ||
+ ARM_INSN_MATCH(BLX1, rp->up.kp.opcode) ||
+ ARM_INSN_MATCH(BLX2, rp->up.kp.opcode));
+}
+
+void arch_prepare_uretprobe(struct uretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
+ ri->sp = (kprobe_opcode_t *)regs->ARM_sp;
+
+ /* Set flag of current mode */
+ ri->sp = (kprobe_opcode_t *)((long)ri->sp | !!thumb_mode(regs));
+
+ if (thumb_mode(regs)) {
+ regs->ARM_lr = (unsigned long)(ri->rp->up.kp.ainsn.insn) + 0x1b;
+ } else {
+ regs->ARM_lr = (unsigned long)(ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+ }
+}
+
+int setjmp_upre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct uprobe *up = container_of(p, struct uprobe, kp);
+ struct ujprobe *jp = container_of(up, struct ujprobe, up);
+
+ kprobe_pre_entry_handler_t pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
+ entry_point_t entry = (entry_point_t)jp->entry;
+
+ if (pre_entry) {
+ p->ss_addr = (kprobe_opcode_t *)pre_entry(jp->priv_arg, regs);
+ }
+
+ if (entry) {
+ entry(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
+ regs->ARM_r3, regs->ARM_r4, regs->ARM_r5);
+ } else {
+ arch_ujprobe_return();
+ }
+
+ return 0;
+}
+
+unsigned long arch_get_trampoline_addr(struct kprobe *p, struct pt_regs *regs)
+{
+ return thumb_mode(regs) ?
+ (unsigned long)(p->ainsn.insn) + 0x1b :
+ (unsigned long)(p->ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+}
+
+void arch_set_orig_ret_addr(unsigned long orig_ret_addr, struct pt_regs *regs)
+{
+ regs->ARM_lr = orig_ret_addr;
+ regs->ARM_pc = orig_ret_addr & ~0x1;
+
+ if (regs->ARM_lr & 0x1)
+ regs->ARM_cpsr |= PSR_T_BIT;
+ else
+ regs->ARM_cpsr &= ~PSR_T_BIT;
+}
+
+static int check_validity_insn(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe *kp;
+
+ if (unlikely(thumb_mode(regs))) {
+ if (p->safe_thumb) {
+ goto disarm;
+ }
+
+ p->ainsn.insn = p->ainsn.insn_thumb;
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ kp->ainsn.insn = p->ainsn.insn_thumb;
+ }
+ } else {
+ if (p->safe_arm) {
+ goto disarm;
+ }
+
+ p->ainsn.insn = p->ainsn.insn_arm;
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ kp->ainsn.insn = p->ainsn.insn_arm;
+ }
+ }
+
+ return 0;
+
+disarm:
+ printk("Error in %s at %d: we are in arm mode (!) and check "
+ "instruction was fail (%0lX instruction at %p address)!\n",
+ __FILE__, __LINE__, p->opcode, p->addr);
+
+ /* Test case when we do our actions on already running application */
+ disarm_uprobe(p, kp2up(p)->task);
+ return -1;
+}
+
+static void restore_opcode_for_thumb(struct kprobe *p, struct pt_regs *regs)
+{
+ if (thumb_mode(regs) && !is_thumb2(p->opcode)) {
+ u16 tmp = p->opcode >> 16;
+ write_proc_vm_atomic(current,
+ (unsigned long)((u16*)p->addr + 1), &tmp, 2);
+ flush_insns(p->addr, 4);
+ }
+}
+
+static int uprobe_handler(struct pt_regs *regs)
+{
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)(regs->ARM_pc);
+ struct task_struct *task = current;
+ pid_t tgid = task->tgid;
+ struct kprobe *p;
+
+ p = get_ukprobe(addr, tgid);
+
+ if (p == NULL) {
+ p = get_ukprobe_by_insn_slot(addr, tgid, regs);
+ if (p == NULL) {
++ printk("no_kprobe: Not one of ours: let "
++ "kernel handle it %p\n", addr);
+ return 1;
+ }
+
+ trampoline_uprobe_handler(p, regs);
+ } else if (check_validity_insn(p, regs) != 0) {
+ printk("no_uprobe live\n");
+ } else {
+ restore_opcode_for_thumb(p, regs);
+
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ prepare_singlestep(p, regs);
+ }
+ }
+
+ return 0;
+}
+
+int uprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
+{
+ int ret;
+ unsigned long flags;
+ local_irq_save(flags);
+
+ preempt_disable();
+ ret = uprobe_handler(regs);
+ preempt_enable_no_resched();
+
+ local_irq_restore(flags);
+ return ret;
+}
+
+/* userspace probes hook (arm) */
+static struct undef_hook undef_hook_for_us_arm = {
+ .instr_mask = 0xffffffff,
+ .instr_val = BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = USR_MODE,
+ .fn = uprobe_trap_handler
+};
+
+/* userspace probes hook (thumb) */
+static struct undef_hook undef_hook_for_us_thumb = {
+ .instr_mask = 0xffffffff,
+ .instr_val = BREAKPOINT_INSTRUCTION & 0x0000ffff,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = USR_MODE,
+ .fn = uprobe_trap_handler
+};
+
+int swap_arch_init_uprobes(void)
+{
+ swap_register_undef_hook(&undef_hook_for_us_arm);
+ swap_register_undef_hook(&undef_hook_for_us_thumb);
+
+ return 0;
+}
+
+void swap_arch_exit_uprobes(void)
+{
+ swap_unregister_undef_hook(&undef_hook_for_us_thumb);
+ swap_unregister_undef_hook(&undef_hook_for_us_arm);
+}
--- /dev/null
- printk("---> %s (%d/%d): failed to read stack from %08lx",
+/*
+ * Dynamic Binary Instrumentation Module based on KProbes
+ * modules/uprobe/swap_uprobes.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) Samsung Electronics, 2006-2010
+ *
+ * 2008-2009 Alexey Gerenkov <a.gerenkov@samsung.com> User-Space
+ * Probes initial implementation; Support x86/ARM/MIPS for both user and kernel spaces.
+ * 2010 Ekaterina Gorelkina <e.gorelkina@samsung.com>: redesign module for separating core and arch parts
+ *
+ */
+
+
+#include "swap_uprobes.h"
+#include "dbi_kdebug.h"
+
+#include <asm/swap_uprobes.h>
+
+#include <linux/hash.h>
+#include <linux/mempolicy.h>
+#include <linux/module.h>
+#include <dbi_insn_slots.h>
+#include <dbi_kprobes_deps.h>
+
+enum {
+ UPROBE_HASH_BITS = 10,
+ UPROBE_TABLE_SIZE = (1 << UPROBE_HASH_BITS)
+};
+
+struct hlist_head uprobe_insn_slot_table[UPROBE_TABLE_SIZE];
+struct hlist_head uprobe_table[UPROBE_TABLE_SIZE];
+struct hlist_head uprobe_insn_pages;
+
+DEFINE_SPINLOCK(uretprobe_lock); /* Protects uretprobe_inst_table */
+static struct hlist_head uretprobe_inst_table[UPROBE_TABLE_SIZE];
+
+#define DEBUG_PRINT_HASH_TABLE 0
+
+#if DEBUG_PRINT_HASH_TABLE
+void print_kprobe_hash_table(void)
+{
+ int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ // print uprobe table
+ for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
+ head = &kprobe_table[i];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+ printk("####### find K tgid=%u, addr=%x\n",
+ p->tgid, p->addr);
+ }
+ }
+}
+
+void print_kretprobe_hash_table(void)
+{
+ int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ // print uprobe table
+ for (i = 0; i < KPROBE_TABLE_SIZE; ++i) {
+ head = &kretprobe_inst_table[i];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+ printk("####### find KR tgid=%u, addr=%x\n",
+ p->tgid, p->addr);
+ }
+ }
+}
+
+void print_uprobe_hash_table(void)
+{
+ int i;
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ // print uprobe table
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ head = &uprobe_insn_slot_table[i];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+ printk("####### find U tgid=%u, addr=%x\n",
+ p->tgid, p->addr);
+ }
+ }
+}
+#endif
+
+/*
+ * Keep all fields in the uprobe consistent
+ */
+static inline void copy_uprobe(struct kprobe *old_p, struct kprobe *p)
+{
+ memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
+ memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
+ p->ss_addr = old_p->ss_addr;
+#ifdef CONFIG_ARM
+ p->safe_arm = old_p->safe_arm;
+ p->safe_thumb = old_p->safe_thumb;
+#endif
+}
+
+/*
+ * Aggregate handlers for multiple uprobes support - these handlers
+ * take care of invoking the individual uprobe handlers on p->list
+ */
+static int aggr_pre_uhandler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe *kp;
+ int ret;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (kp->pre_handler) {
+ ret = kp->pre_handler(kp, regs);
+ if (ret) {
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void aggr_post_uhandler(struct kprobe *p, struct pt_regs *regs, unsigned long flags)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (kp->post_handler) {
+ kp->post_handler(kp, regs, flags);
+ }
+ }
+}
+
+static int aggr_fault_uhandler(struct kprobe *p, struct pt_regs *regs, int trapnr)
+{
+ return 0;
+}
+
+static int aggr_break_uhandler(struct kprobe *p, struct pt_regs *regs)
+{
+ return 0;
+}
+
+/*
+ * Add the new probe to old_p->list. Fail if this is the
+ * second ujprobe at the address - two ujprobes can't coexist
+ */
+static int add_new_uprobe(struct kprobe *old_p, struct kprobe *p)
+{
+ if (p->break_handler) {
+ if (old_p->break_handler) {
+ return -EEXIST;
+ }
+
+ list_add_tail_rcu(&p->list, &old_p->list);
+ old_p->break_handler = aggr_break_uhandler;
+ } else {
+ list_add_rcu (&p->list, &old_p->list);
+ }
+
+ if (p->post_handler && !old_p->post_handler) {
+ old_p->post_handler = aggr_post_uhandler;
+ }
+
+ return 0;
+}
+
+/*
+ * Fill in the required fields of the "manager uprobe". Replace the
+ * earlier uprobe in the hlist with the manager uprobe
+ */
+static inline void add_aggr_uprobe(struct kprobe *ap, struct kprobe *p)
+{
+ copy_uprobe(p, ap);
+
+ ap->addr = p->addr;
+ ap->pre_handler = aggr_pre_uhandler;
+ ap->fault_handler = aggr_fault_uhandler;
+
+ if (p->post_handler) {
+ ap->post_handler = aggr_post_uhandler;
+ }
+
+ if (p->break_handler) {
+ ap->break_handler = aggr_break_uhandler;
+ }
+
+ INIT_LIST_HEAD(&ap->list);
+ list_add_rcu(&p->list, &ap->list);
+
+ hlist_replace_rcu(&p->hlist, &ap->hlist);
+}
+
+/*
+ * This is the second or subsequent uprobe at the address - handle
+ * the intricacies
+ */
+static int register_aggr_uprobe(struct kprobe *old_p, struct kprobe *p)
+{
+ int ret = 0;
+ struct kprobe *ap;
+
+ if (old_p->pre_handler == aggr_pre_uhandler) {
+ copy_uprobe(old_p, p);
+ ret = add_new_uprobe(old_p, p);
+ } else {
+ struct uprobe *uap = kzalloc(sizeof(*uap), GFP_KERNEL);
+ if (!uap) {
+ return -ENOMEM;
+ }
+
+ uap->task = kp2up(p)->task;
+ ap = up2kp(uap);
+ add_aggr_uprobe(ap, old_p);
+ copy_uprobe(ap, p);
+ ret = add_new_uprobe(ap, p);
+ }
+
+ return ret;
+}
+
+static void arm_uprobe(struct uprobe *p)
+{
+ kprobe_opcode_t insn = BREAKPOINT_INSTRUCTION;
+ int ret = write_proc_vm_atomic(p->task, (unsigned long)p->kp.addr,
+ &insn, sizeof(insn));
+ if (!ret) {
+ panic("arm_uprobe: failed to write memory "
+ "tgid=%u addr=%p!\n", p->task->tgid, p->kp.addr);
+ }
+}
+
+void disarm_uprobe(struct kprobe *p, struct task_struct *task)
+{
+ int ret = write_proc_vm_atomic(task, (unsigned long)p->addr,
+ &p->opcode, sizeof(p->opcode));
+ if (!ret) {
+ panic("disarm_uprobe: failed to write memory "
+ "tgid=%u, addr=%p!\n", task->tgid, p->addr);
+ }
+}
+EXPORT_SYMBOL_GPL(disarm_uprobe);
+
+static void init_uprobes_insn_slots(void)
+{
+ int i;
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ INIT_HLIST_HEAD(&uprobe_insn_slot_table[i]);
+ }
+}
+
+static void init_uprobe_table(void)
+{
+ int i;
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ INIT_HLIST_HEAD(&uprobe_table[i]);
+ }
+}
+
+static void init_uretprobe_inst_table(void)
+{
+ int i;
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ INIT_HLIST_HEAD (&uretprobe_inst_table[i]);
+ }
+}
+
+struct kprobe *get_ukprobe(void *addr, pid_t tgid)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ head = &uprobe_table[hash_ptr(addr, UPROBE_HASH_BITS)];
+ swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
+ if (p->addr == addr && kp2up(p)->task->tgid == tgid) {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+
+static void add_uprobe_table(struct kprobe *p)
+{
+#ifdef CONFIG_ARM
+ INIT_HLIST_NODE(&p->is_hlist_arm);
+ hlist_add_head_rcu(&p->is_hlist_arm, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_arm, UPROBE_HASH_BITS)]);
+ INIT_HLIST_NODE(&p->is_hlist_thumb);
+ hlist_add_head_rcu(&p->is_hlist_thumb, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn_thumb, UPROBE_HASH_BITS)]);
+#else /* CONFIG_ARM */
+ INIT_HLIST_NODE(&p->is_hlist);
+ hlist_add_head_rcu(&p->is_hlist, &uprobe_insn_slot_table[hash_ptr(p->ainsn.insn, UPROBE_HASH_BITS)]);
+#endif /* CONFIG_ARM */
+}
+
+#ifdef CONFIG_ARM
+static struct kprobe *get_ukprobe_bis_arm(void *addr, pid_t tgid)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ /* TODO: test - two processes invokes instrumented function */
+ head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_arm) {
+ if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+
+static struct kprobe *get_ukprobe_bis_thumb(void *addr, pid_t tgid)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ /* TODO: test - two processes invokes instrumented function */
+ head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist_thumb) {
+ if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+
+struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
+{
+ return thumb_mode(regs) ?
+ get_ukprobe_bis_thumb(addr - 0x1a, tgid) :
+ get_ukprobe_bis_arm(addr - 4 * UPROBES_TRAMP_RET_BREAK_IDX, tgid);
+}
+#else /* CONFIG_ARM */
+struct kprobe *get_ukprobe_by_insn_slot(void *addr, pid_t tgid, struct pt_regs *regs)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct kprobe *p;
+
+ addr -= UPROBES_TRAMP_RET_BREAK_IDX;
+
+ /* TODO: test - two processes invokes instrumented function */
+ head = &uprobe_insn_slot_table[hash_ptr(addr, UPROBE_HASH_BITS)];
+ swap_hlist_for_each_entry_rcu(p, node, head, is_hlist) {
+ if (p->ainsn.insn == addr && kp2up(p)->task->tgid == tgid) {
+ return p;
+ }
+ }
+
+ return NULL;
+}
+#endif /* CONFIG_ARM */
+
+
+static void remove_uprobe(struct uprobe *up)
+{
+ struct kprobe *p = &up->kp;
+ struct task_struct *task = up->task;
+
+#ifdef CONFIG_ARM
+ free_insn_slot(up->sm, p->ainsn.insn_arm);
+ free_insn_slot(up->sm, p->ainsn.insn_thumb);
+#else /* CONFIG_ARM */
+ free_insn_slot(up->sm, p->ainsn.insn);
+#endif /* CONFIG_ARM */
+}
+
+static struct hlist_head *uretprobe_inst_table_head(void *hash_key)
+{
+ return &uretprobe_inst_table[hash_ptr (hash_key, UPROBE_HASH_BITS)];
+}
+
+/* Called with uretprobe_lock held */
+static void add_urp_inst(struct uretprobe_instance *ri)
+{
+ /*
+ * Remove rp inst off the free list -
+ * Add it back when probed function returns
+ */
+ hlist_del(&ri->uflist);
+
+ /* Add rp inst onto table */
+ INIT_HLIST_NODE(&ri->hlist);
+ hlist_add_head(&ri->hlist, uretprobe_inst_table_head(ri->task->mm));
+
+ /* Also add this rp inst to the used list. */
+ INIT_HLIST_NODE(&ri->uflist);
+ hlist_add_head(&ri->uflist, &ri->rp->used_instances);
+}
+
+/* Called with uretprobe_lock held */
+static void recycle_urp_inst(struct uretprobe_instance *ri)
+{
+ if (ri->rp) {
+ hlist_del(&ri->hlist);
+ /* remove rp inst off the used list */
+ hlist_del(&ri->uflist);
+ /* put rp inst back onto the free list */
+ INIT_HLIST_NODE(&ri->uflist);
+ hlist_add_head(&ri->uflist, &ri->rp->free_instances);
+ }
+}
+
+/* Called with uretprobe_lock held */
+static struct uretprobe_instance *get_used_urp_inst(struct uretprobe *rp)
+{
+ struct hlist_node *node;
+ struct uretprobe_instance *ri;
+
+ swap_hlist_for_each_entry(ri, node, &rp->used_instances, uflist) {
+ return ri;
+ }
+
+ return NULL;
+}
+
+/* Called with uretprobe_lock held */
+struct uretprobe_instance *get_free_urp_inst_no_alloc(struct uretprobe *rp)
+{
+ struct hlist_node *node;
+ struct uretprobe_instance *ri;
+
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ return ri;
+ }
+
+ return NULL;
+}
+
+/* Called with uretprobe_lock held */
+static void free_urp_inst(struct uretprobe *rp)
+{
+ struct uretprobe_instance *ri;
+ while ((ri = get_free_urp_inst_no_alloc(rp)) != NULL) {
+ hlist_del(&ri->uflist);
+ kfree(ri);
+ }
+}
+
+#define COMMON_URP_NR 10
+
+static int alloc_nodes_uretprobe(struct uretprobe *rp)
+{
+ int alloc_nodes;
+ struct uretprobe_instance *inst;
+ int i;
+
+#if 1//def CONFIG_PREEMPT
+ rp->maxactive += max(COMMON_URP_NR, 2 * NR_CPUS);
+#else
+ rp->maxacpptive += NR_CPUS;
+#endif
+ alloc_nodes = COMMON_URP_NR;
+
+ for (i = 0; i < alloc_nodes; ++i) {
+ inst = kmalloc(sizeof(*inst), GFP_ATOMIC);
+ if (inst == NULL) {
+ free_urp_inst(rp);
+ return -ENOMEM;
+ }
+ INIT_HLIST_NODE(&inst->uflist);
+ hlist_add_head(&inst->uflist, &rp->free_instances);
+ }
+
+ return 0;
+}
+
+/* Called with uretprobe_lock held */
+static struct uretprobe_instance *get_free_urp_inst(struct uretprobe *rp)
+{
+ struct hlist_node *node;
+ struct uretprobe_instance *ri;
+
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ return ri;
+ }
+
+ if (!alloc_nodes_uretprobe(rp)) {
+ swap_hlist_for_each_entry(ri, node, &rp->free_instances, uflist) {
+ return ri;
+ }
+ }
+
+ return NULL;
+}
+// ===================================================================
+
+int dbi_register_uprobe(struct uprobe *up)
+{
+ int ret = 0;
+ struct kprobe *p, *old_p;
+
+ p = &up->kp;
+ if (!p->addr) {
+ return -EINVAL;
+ }
+
+ DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
+
+// thumb address = address-1;
+#if defined(CONFIG_ARM)
+ // TODO: must be corrected in 'bundle'
+ if ((unsigned long) p->addr & 0x01) {
+ p->addr = (kprobe_opcode_t *)((unsigned long)p->addr & 0xfffffffe);
+ }
+#endif
+
+ p->mod_refcounted = 0;
+ p->nmissed = 0;
+ INIT_LIST_HEAD(&p->list);
+#ifdef KPROBES_PROFILE
+ p->start_tm.tv_sec = p->start_tm.tv_usec = 0;
+ p->hnd_tm_sum.tv_sec = p->hnd_tm_sum.tv_usec = 0;
+ p->count = 0;
+#endif
+
+ // get the first item
+ old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
+ if (old_p) {
+#ifdef CONFIG_ARM
+ p->safe_arm = old_p->safe_arm;
+ p->safe_thumb = old_p->safe_thumb;
+#endif
+ ret = register_aggr_uprobe(old_p, p);
+ if (!ret) {
+// atomic_inc(&kprobe_count);
+ add_uprobe_table(p);
+ }
+ DBPRINTF("goto out\n", ret);
+ goto out;
+ }
+
+ ret = arch_prepare_uprobe(up, &uprobe_insn_pages);
+ if (ret) {
+ DBPRINTF("goto out\n", ret);
+ goto out;
+ }
+
+ DBPRINTF ("before out ret = 0x%x\n", ret);
+
+ // TODO: add uprobe (must be in function)
+ INIT_HLIST_NODE(&p->hlist);
+ hlist_add_head_rcu(&p->hlist, &uprobe_table[hash_ptr(p->addr, UPROBE_HASH_BITS)]);
+ add_uprobe_table(p);
+ arm_uprobe(up);
+
+out:
+ DBPRINTF("out ret = 0x%x\n", ret);
+ return ret;
+}
+
+void dbi_unregister_uprobe(struct uprobe *up)
+{
+ struct kprobe *p, *old_p, *list_p;
+ int cleanup_p;
+
+ p = &up->kp;
+ old_p = get_ukprobe(p->addr, kp2up(p)->task->tgid);
+ if (unlikely(!old_p)) {
+ return;
+ }
+
+ if (p != old_p) {
+ list_for_each_entry_rcu(list_p, &old_p->list, list) {
+ if (list_p == p) {
+ /* uprobe p is a valid probe */
+ goto valid_p;
+ }
+ }
+
+ return;
+ }
+
+valid_p:
+ if ((old_p == p) || ((old_p->pre_handler == aggr_pre_uhandler) &&
+ (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
+ /* Only probe on the hash list */
+ disarm_uprobe(&up->kp, up->task);
+ hlist_del_rcu(&old_p->hlist);
+ cleanup_p = 1;
+ } else {
+ list_del_rcu(&p->list);
+ cleanup_p = 0;
+ }
+
+ if (cleanup_p) {
+ if (p != old_p) {
+ list_del_rcu(&p->list);
+ kfree(old_p);
+ }
+
+ if (!in_atomic()) {
+ synchronize_sched();
+ }
+
+ remove_uprobe(up);
+ } else {
+ if (p->break_handler) {
+ old_p->break_handler = NULL;
+ }
+
+ if (p->post_handler) {
+ list_for_each_entry_rcu (list_p, &old_p->list, list) {
+ if (list_p->post_handler) {
+ cleanup_p = 2;
+ break;
+ }
+ }
+
+ if (cleanup_p == 0) {
+ old_p->post_handler = NULL;
+ }
+ }
+ }
+}
+
+int dbi_register_ujprobe(struct ujprobe *jp)
+{
+ int ret = 0;
+
+ /* Todo: Verify probepoint is a function entry point */
+ jp->up.kp.pre_handler = setjmp_upre_handler;
+ jp->up.kp.break_handler = longjmp_break_uhandler;
+
+ ret = dbi_register_uprobe(&jp->up);
+
+ return ret;
+}
+
+void dbi_unregister_ujprobe(struct ujprobe *jp)
+{
+ dbi_unregister_uprobe(&jp->up);
+ /*
+ * Here is an attempt to unregister even those probes that have not been
+ * installed (hence not added to the hlist).
+ * So if we try to delete them from the hlist we will get NULL pointer
+ * dereference error. That is why we check whether this node
+ * really belongs to the hlist.
+ */
+#ifdef CONFIG_ARM
+ if (!(hlist_unhashed(&jp->up.kp.is_hlist_arm))) {
+ hlist_del_rcu(&jp->up.kp.is_hlist_arm);
+ }
+ if (!(hlist_unhashed(&jp->up.kp.is_hlist_thumb))) {
+ hlist_del_rcu(&jp->up.kp.is_hlist_thumb);
+ }
+#else /* CONFIG_ARM */
+ if (!(hlist_unhashed(&jp->up.kp.is_hlist))) {
+ hlist_del_rcu(&jp->up.kp.is_hlist);
+ }
+#endif /* CONFIG_ARM */
+}
+
+int trampoline_uprobe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct uretprobe_instance *ri = NULL;
+ struct hlist_head *head;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, tramp_addr, orig_ret_addr = 0;
+
+ tramp_addr = arch_get_trampoline_addr(p, regs);
+ spin_lock_irqsave(&uretprobe_lock, flags);
+
+ head = uretprobe_inst_table_head(current->mm);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * uretprobe_trampoline
+ */
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current) {
+ /* another task is sharing our hash bucket */
+ continue;
+ }
+
+ if (ri->rp && ri->rp->handler) {
+ ri->rp->handler(ri, regs, ri->rp->priv_arg);
+ }
+
+ orig_ret_addr = (unsigned long)ri->ret_addr;
+ recycle_urp_inst(ri);
+
+ if (orig_ret_addr != tramp_addr) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+
+ spin_unlock_irqrestore(&uretprobe_lock, flags);
+ arch_set_orig_ret_addr(orig_ret_addr, regs);
+
+ return 1;
+}
+
+static int pre_handler_uretprobe(struct kprobe *p, struct pt_regs *regs)
+{
+ struct uprobe *up = container_of(p, struct uprobe, kp);
+ struct uretprobe *rp = container_of(up, struct uretprobe, up);
+#ifdef CONFIG_ARM
+ int noret = thumb_mode(regs) ? rp->thumb_noret : rp->arm_noret;
+#endif
+ struct uretprobe_instance *ri;
+ unsigned long flags;
+
+#ifdef CONFIG_ARM
+ if (noret)
+ return 0;
+#endif
+
+ /* TODO: consider to only swap the RA after the last pre_handler fired */
+ spin_lock_irqsave(&uretprobe_lock, flags);
+
+ /* TODO: test - remove retprobe after func entry but before its exit */
+ if ((ri = get_free_urp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->task = current;
+
+ arch_prepare_uretprobe(ri, regs);
+
+ add_urp_inst(ri);
+ } else {
+ ++rp->nmissed;
+ }
+
+ spin_unlock_irqrestore(&uretprobe_lock, flags);
+
+ return 0;
+}
+
+int dbi_register_uretprobe(struct uretprobe *rp)
+{
+ int i, ret = 0;
+ struct uretprobe_instance *inst;
+
+ DBPRINTF ("START\n");
+
+ rp->up.kp.pre_handler = pre_handler_uretprobe;
+ rp->up.kp.post_handler = NULL;
+ rp->up.kp.fault_handler = NULL;
+ rp->up.kp.break_handler = NULL;
+
+ /* Pre-allocate memory for max kretprobe instances */
+ if (rp->maxactive <= 0) {
+#if 1//def CONFIG_PREEMPT
+ rp->maxactive = max(10, 2 * NR_CPUS);
+#else
+ rp->maxactive = NR_CPUS;
+#endif
+ }
+
+ INIT_HLIST_HEAD(&rp->used_instances);
+ INIT_HLIST_HEAD(&rp->free_instances);
+
+ for (i = 0; i < rp->maxactive; i++) {
+ inst = kmalloc(sizeof(*inst), GFP_KERNEL);
+ if (inst == NULL) {
+ free_urp_inst(rp);
+ return -ENOMEM;
+ }
+
+ INIT_HLIST_NODE(&inst->uflist);
+ hlist_add_head(&inst->uflist, &rp->free_instances);
+ }
+
+ rp->nmissed = 0;
+
+ /* Establish function entry probe point */
+ ret = dbi_register_uprobe(&rp->up);
+ if (ret)
+ return ret;
+
+ arch_opcode_analysis_uretprobe(rp);
+
+ return 0;
+}
+
+int dbi_disarm_urp_inst(struct uretprobe_instance *ri, struct task_struct *rm_task)
+{
+ struct task_struct *task = rm_task ? rm_task : ri->task;
+ unsigned long *tramp;
+ unsigned long *sp = (unsigned long *)((long)ri->sp & ~1);
+ unsigned long *stack = sp - RETPROBE_STACK_DEPTH + 1;
+ unsigned long *found = NULL;
+ unsigned long *buf[RETPROBE_STACK_DEPTH];
+ int i, retval;
+
+ /* Understand function mode */
+ if ((long)ri->sp & 1) {
+ tramp = (unsigned long *)
+ ((unsigned long)ri->rp->up.kp.ainsn.insn + 0x1b);
+ } else {
+ tramp = (unsigned long *)
+ (ri->rp->up.kp.ainsn.insn + UPROBES_TRAMP_RET_BREAK_IDX);
+ }
+
+ retval = read_proc_vm_atomic(task, (unsigned long)stack, buf, sizeof(buf));
+ if (retval != sizeof(buf)) {
- dbi_set_ret_addr(uregs, (unsigned long)tramp);
++ printk("---> %s (%d/%d): failed to read stack from %08lx\n",
+ task->comm, task->tgid, task->pid, (unsigned long)stack);
+ retval = -EFAULT;
+ goto out;
+ }
+
+ /* search the stack from the bottom */
+ for (i = RETPROBE_STACK_DEPTH - 1; i >= 0; i--) {
+ if (buf[i] == tramp) {
+ found = stack + i;
+ break;
+ }
+ }
+
+ if (found) {
+ printk("---> %s (%d/%d): trampoline found at %08lx (%08lx /%+d) - %p\n",
+ task->comm, task->tgid, task->pid,
+ (unsigned long)found, (unsigned long)sp,
+ found - sp, ri->rp->up.kp.addr);
+ retval = write_proc_vm_atomic(task, (unsigned long)found, &ri->ret_addr,
+ sizeof(ri->ret_addr));
+ if (retval != sizeof(ri->ret_addr)) {
+ printk("---> %s (%d/%d): failed to write value to %08lx",
+ task->comm, task->tgid, task->pid, (unsigned long)found);
+ retval = -EFAULT;
+ } else {
+ retval = 0;
+ }
+ } else {
+ struct pt_regs *uregs = task_pt_regs(ri->task);
+ unsigned long ra = dbi_get_ret_addr(uregs);
+ if (ra == (unsigned long)tramp) {
+ printk("---> %s (%d/%d): trampoline found at lr = %08lx - %p\n",
+ task->comm, task->tgid, task->pid, ra, ri->rp->up.kp.addr);
- /*panic*/printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
++ dbi_set_ret_addr(uregs, (unsigned long)ri->ret_addr);
+ retval = 0;
+ } else {
+ printk("---> %s (%d/%d): trampoline NOT found at sp = %08lx, lr = %08lx - %p\n",
+ task->comm, task->tgid, task->pid,
+ (unsigned long)sp, ra, ri->rp->up.kp.addr);
+ retval = -ENOENT;
+ }
+ }
+
+out:
+ return retval;
+}
+
+/* Called with uretprobe_lock held */
+int dbi_disarm_urp_inst_for_task(struct task_struct *parent, struct task_struct *task)
+{
+ struct uretprobe_instance *ri;
+ struct hlist_node *node, *tmp;
+ struct hlist_head *head = uretprobe_inst_table_head(parent->mm);
+
+ swap_hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (parent == ri->task) {
+ dbi_disarm_urp_inst(ri, task);
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dbi_disarm_urp_inst_for_task);
+
+void dbi_unregister_uretprobe(struct uretprobe *rp)
+{
+ unsigned long flags;
+ struct uretprobe_instance *ri;
+
++ dbi_unregister_uprobe(&rp->up);
+ spin_lock_irqsave (&uretprobe_lock, flags);
+
+ while ((ri = get_used_urp_inst(rp)) != NULL) {
+ if (dbi_disarm_urp_inst(ri, NULL) != 0)
-
- dbi_unregister_uprobe(&rp->up);
++ printk("%s (%d/%d): cannot disarm urp instance (%08lx)\n",
+ ri->task->comm, ri->task->tgid, ri->task->pid,
+ (unsigned long)rp->up.kp.addr);
+ recycle_urp_inst(ri);
+ }
+
+ if (hlist_empty(&rp->used_instances)) {
+ struct kprobe *p = &rp->up.kp;
+#ifdef CONFIG_ARM
+ if (!(hlist_unhashed(&p->is_hlist_arm))) {
+ hlist_del_rcu(&p->is_hlist_arm);
+ }
+
+ if (!(hlist_unhashed(&p->is_hlist_thumb))) {
+ hlist_del_rcu(&p->is_hlist_thumb);
+ }
+#else /* CONFIG_ARM */
+ if (!(hlist_unhashed(&p->is_hlist))) {
+ hlist_del_rcu(&p->is_hlist);
+ }
+#endif /* CONFIG_ARM */
+ }
+
+ while ((ri = get_used_urp_inst(rp)) != NULL) {
+ ri->rp = NULL;
+ hlist_del(&ri->uflist);
+ }
+
+ spin_unlock_irqrestore(&uretprobe_lock, flags);
+ free_urp_inst(rp);
+}
+
+void dbi_unregister_all_uprobes(struct task_struct *task)
+{
+ struct hlist_head *head;
+ struct hlist_node *node, *tnode;
+ struct kprobe *p;
+ int i;
+
+ for (i = 0; i < UPROBE_TABLE_SIZE; ++i) {
+ head = &uprobe_table[i];
+ swap_hlist_for_each_entry_safe(p, node, tnode, head, hlist) {
+ if (kp2up(p)->task->tgid == task->tgid) {
+ struct uprobe *up = container_of(p, struct uprobe, kp);
+ printk("dbi_unregister_all_uprobes: delete uprobe at %p[%lx] for %s/%d\n",
+ p->addr, (unsigned long)p->opcode, task->comm, task->pid);
+ dbi_unregister_uprobe(up);
+ }
+ }
+ }
+}
+
+void swap_ujprobe_return(void)
+{
+ arch_ujprobe_return();
+}
+EXPORT_SYMBOL_GPL(swap_ujprobe_return);
+
+static int __init init_uprobes(void)
+{
+ init_uprobe_table();
+ init_uprobes_insn_slots();
+ init_uretprobe_inst_table();
+
+ return swap_arch_init_uprobes();
+}
+
+static void __exit exit_uprobes(void)
+{
+ swap_arch_exit_uprobes();
+}
+
+EXPORT_SYMBOL_GPL(dbi_register_ujprobe);
+EXPORT_SYMBOL_GPL(dbi_unregister_ujprobe);
+EXPORT_SYMBOL_GPL(dbi_register_uretprobe);
+EXPORT_SYMBOL_GPL(dbi_unregister_uretprobe);
+EXPORT_SYMBOL_GPL(dbi_unregister_all_uprobes);
+
+module_init(init_uprobes);
+module_exit(exit_uprobes);
+
+MODULE_LICENSE ("GPL");