struct kretprobe *rp = p[i].rp;
sym = p[i].name;
- rp->kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (rp->kp.addr == NULL)
+ rp->kp.addr = swap_ksyms(sym);
+ if (rp->kp.addr == 0)
goto not_found;
}
const char *sym;
sym = "__switch_to";
- switch_to_krp.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (switch_to_krp.kp.addr == NULL)
+ switch_to_krp.kp.addr = swap_ksyms(sym);
+ if (switch_to_krp.kp.addr == 0)
goto not_found;
sym = "sys_read";
- sys_read_krp.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (sys_read_krp.kp.addr == NULL)
+ sys_read_krp.kp.addr = swap_ksyms(sym);
+ if (sys_read_krp.kp.addr == 0)
goto not_found;
sym = "sys_write";
- sys_write_krp.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (sys_write_krp.kp.addr == NULL)
+ sys_write_krp.kp.addr = swap_ksyms(sym);
+ if (sys_write_krp.kp.addr == 0)
goto not_found;
energy_xxx_once(bt_probes, bt_probes_cnt);
/**
* @brief Creates trampoline for kprobe.
*
- * @param p Pointer to kprobe.
+ * @param p Pointer to kp_core.
* @param sm Pointer to slot manager
* @return 0 on success, error code on error.
*/
-int swap_arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm)
+int arch_kp_core_prepare(struct kp_core *p, struct slot_manager *sm)
{
- unsigned long addr = (unsigned long)p->addr;
- unsigned long insn = p->opcode = *p->addr;
unsigned long *tramp;
int ret;
if (tramp == NULL)
return -ENOMEM;
- ret = arch_make_trampoline_arm(addr, insn, tramp);
+ p->opcode = *(unsigned long *)p->addr;
+ ret = arch_make_trampoline_arm(p->addr, p->opcode, tramp);
if (ret) {
swap_slot_free(sm, tramp);
return ret;
* @param regs Pointer to CPU registers data.
* @return Void.
*/
-static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void prepare_singlestep(struct kp_core *p, struct pt_regs *regs)
{
int cpu = smp_processor_id();
if (p->ss_addr[cpu]) {
- regs->ARM_pc = (unsigned long)p->ss_addr[cpu];
- p->ss_addr[cpu] = NULL;
+ regs->ARM_pc = p->ss_addr[cpu];
+ p->ss_addr[cpu] = 0;
} else {
regs->ARM_pc = (unsigned long)p->ainsn.insn;
}
}
-/**
- * @brief Saves previous kprobe.
- *
- * @param kcb Pointer to kprobe_ctlblk struct whereto save current kprobe.
- * @param p_run Pointer to kprobe.
- * @return Void.
- */
-void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p_run)
-{
- kcb->prev_kprobe.kp = swap_kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
-}
-
-/**
- * @brief Restores previous kprobe.
- *
- * @param kcb Pointer to kprobe_ctlblk which contains previous kprobe.
- * @return Void.
- */
-void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void save_previous_kp_core(struct kp_core_ctlblk *kcb)
{
- swap_kprobe_running_set(kcb->prev_kprobe.kp);
- kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->prev_kp_core.p = kp_core_running();
+ kcb->prev_kp_core.status = kcb->kp_core_status;
}
/**
- * @brief Sets currently running kprobe.
+ * @brief Restores previous kp_core.
*
- * @param p Pointer to currently running kprobe.
- * @param regs Pointer to CPU registers data.
- * @param kcb Pointer to kprobe_ctlblk.
+ * @param kcb Pointer to kp_core_ctlblk which contains previous kp_core.
* @return Void.
*/
-void set_current_kprobe(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+void restore_previous_kp_core(struct kp_core_ctlblk *kcb)
{
- swap_kprobe_running_set(p);
+ kp_core_running_set(kcb->prev_kp_core.p);
+ kcb->kp_core_status = kcb->prev_kp_core.status;
}
static int kprobe_handler(struct pt_regs *regs)
{
- struct kprobe *p, *cur;
- struct kprobe_ctlblk *kcb;
+ struct kp_core *p, *cur;
+ struct kp_core_ctlblk *kcb;
- kcb = swap_get_kprobe_ctlblk();
- cur = swap_kprobe_running();
- p = swap_get_kprobe((void *)regs->ARM_pc);
+ kcb = kp_core_ctlblk();
+ cur = kp_core_running();
+
+ rcu_read_lock();
+ p = kp_core_by_addr(regs->ARM_pc);
+ if (p)
+ kp_core_get(p);
+ rcu_read_unlock();
if (p) {
if (cur) {
/* Kprobe is pending, so we're recursing. */
- switch (kcb->kprobe_status) {
+ switch (kcb->kp_core_status) {
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/* A pre- or post-handler probe got us here. */
- swap_kprobes_inc_nmissed_count(p);
- save_previous_kprobe(kcb, NULL);
- set_current_kprobe(p, 0, 0);
- kcb->kprobe_status = KPROBE_REENTER;
+ save_previous_kp_core(kcb);
+ kp_core_running_set(p);
+ kcb->kp_core_status = KPROBE_REENTER;
prepare_singlestep(p, regs);
- restore_previous_kprobe(kcb);
+ restore_previous_kp_core(kcb);
break;
default:
/* impossible cases */
BUG();
}
} else {
- set_current_kprobe(p, 0, 0);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ kp_core_running_set(p);
+ kcb->kp_core_status = KPROBE_HIT_ACTIVE;
+
+ if (!(regs->ARM_cpsr & PSR_I_BIT))
+ local_irq_enable();
- if (!p->pre_handler || !p->pre_handler(p, regs)) {
- kcb->kprobe_status = KPROBE_HIT_SS;
+ if (!p->handlers.pre(p, regs)) {
+ kcb->kp_core_status = KPROBE_HIT_SS;
prepare_singlestep(p, regs);
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
}
}
+ kp_core_put(p);
} else {
goto no_kprobe;
}
int kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
{
int ret;
- unsigned long flags;
-
- local_irq_save(flags);
if (likely(instr == BREAKPOINT_INSTRUCTION)) {
ret = kprobe_handler(regs);
} else {
- struct kprobe *p = swap_get_kprobe((void *)regs->ARM_pc);
+ struct kp_core *p;
+
+ rcu_read_lock();
+ p = kp_core_by_addr(regs->ARM_pc);
/* skip false exeption */
ret = p && (p->opcode == instr) ? 0 : 1;
+ rcu_read_unlock();
}
- local_irq_restore(flags);
-
return ret;
}
int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
- kprobe_pre_entry_handler_t pre_entry =
- (kprobe_pre_entry_handler_t)jp->pre_entry;
entry_point_t entry = (entry_point_t)jp->entry;
- pre_entry = (kprobe_pre_entry_handler_t)jp->pre_entry;
-
- if (pre_entry) {
- p->ss_addr[smp_processor_id()] = (void *)
- pre_entry(jp->priv_arg, regs);
- }
if (entry) {
entry(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2,
* @param p Pointer to target kprobe.
* @return Void.
*/
-void swap_arch_arm_kprobe(struct kprobe *p)
+void arch_kp_core_arm(struct kp_core *core)
{
- write_u32((long)p->addr, BREAKPOINT_INSTRUCTION);
+ write_u32(core->addr, BREAKPOINT_INSTRUCTION);
}
/**
* @param p Pointer to target kprobe.
* @return Void.
*/
-void swap_arch_disarm_kprobe(struct kprobe *p)
+void arch_kp_core_disarm(struct kp_core *core)
{
- write_u32((long)p->addr, p->opcode);
+ write_u32(core->addr, core->opcode);
}
/**
*/
int set_kjump_cb(struct pt_regs *regs, jumper_cb_t cb, void *data, size_t size)
{
- struct kprobe *p;
+ struct kp_core *p;
struct kj_cb_data *cb_data;
cb_data = kmalloc(sizeof(*cb_data) + size, GFP_ATOMIC);
if (size)
memcpy(cb_data->data, data, size);
- p = swap_kprobe_running();
- p->ss_addr[smp_processor_id()] = (kprobe_opcode_t *)&kjump_trampoline;
+ p = kp_core_running();
+ p->ss_addr[smp_processor_id()] = (unsigned long)&kjump_trampoline;
cb_data->ret_addr = (unsigned long)p->ainsn.insn;
cb_data->cb = cb;
/* restore regs */
memcpy(regs, &data->regs, sizeof(*regs));
- p->ss_addr[smp_processor_id()] = (void *)data->ret_addr;
+ /* p->ss_addr[smp_processor_id()] = (unsigned long)data->ret_addr; */
/* FIXME: potential memory leak, when process kill */
kfree(data);
static struct kprobe kjump_kprobe = {
.pre_handler = kjump_pre_handler,
- .addr = (unsigned long *)&kjump_trampoline + 2, /* nop */
+ .addr = (unsigned long)&kjump_trampoline + 2 * 4, /* nop */
};
static int kjump_init(void)
#define UREGS_OFFSET 8
/**
- * @struct prev_kprobe
- * @brief Stores previous kprobe.
- * @var prev_kprobe::kp
- * Pointer to kprobe struct.
- * @var prev_kprobe::status
+ * @struct prev_kp_core
+ * @brief Stores previous kp_core.
+ * @var prev_kp_core::p
+ * Pointer to kp_core struct.
+ * @var prev_kp_core::status
* Kprobe status.
*/
-struct prev_kprobe {
- struct kprobe *kp;
+struct prev_kp_core {
+ struct kp_core *p;
unsigned long status;
};
/**
- * @struct kprobe_ctlblk
- * @brief Per-cpu kprobe control block.
- * @var kprobe_ctlblk::kprobe_status
+ * @struct kp_core_ctlblk
+ * @brief Per-cpu kp_core control block.
+ * @var kp_core_ctlblk::kp_core_status
* Kprobe status.
- * @var kprobe_ctlblk::prev_kprobe
- * Previous kprobe.
+ * @var kp_core_ctlblk::prev_kp_core
+ * Previous kp_core.
*/
-struct kprobe_ctlblk {
- unsigned long kprobe_status;
- struct prev_kprobe prev_kprobe;
+struct kp_core_ctlblk {
+ unsigned long kp_core_status;
+ struct prev_kp_core prev_kp_core;
};
/**
struct slot_manager;
struct kretprobe;
struct kretprobe_instance;
-int swap_arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm);
+struct kp_core;
+struct kprobe;
+
+int arch_kp_core_prepare(struct kp_core *p, struct slot_manager *sm);
void swap_arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
-void swap_arch_arm_kprobe(struct kprobe *p);
-void swap_arch_disarm_kprobe(struct kprobe *p);
+void arch_kp_core_arm(struct kp_core *p);
+void arch_kp_core_disarm(struct kp_core *p);
int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs);
int swap_longjmp_break_handler(struct kprobe *p, struct pt_regs *regs);
-void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p);
-void restore_previous_kprobe(struct kprobe_ctlblk *kcb);
-void set_current_kprobe(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb);
+void restore_previous_kp_core(struct kp_core_ctlblk *kcb);
void __naked swap_kretprobe_trampoline(void);
* @param sm Pointer to slot manager
* @return 0 on success, error code on error.
*/
-int swap_arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm)
+int arch_kp_core_prepare(struct kp_core *p, struct slot_manager *sm)
{
/* insn: must be on special executable page on i386. */
p->ainsn.insn = swap_slot_alloc(sm);
if (p->ainsn.insn == NULL)
return -ENOMEM;
- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE);
+ memcpy(p->ainsn.insn, (void *)p->addr, MAX_INSN_SIZE);
- p->opcode = *p->addr;
- p->ainsn.boostable = swap_can_boost(p->addr) ? 0 : -1;
+ p->opcode = *(char *)p->addr;
+ p->ainsn.boostable = swap_can_boost((void *)p->addr) ? 0 : -1;
return 0;
}
* @param regs Pointer to CPU registers data.
* @return Void.
*/
-static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+static void prepare_singlestep(struct kp_core *p, struct pt_regs *regs)
{
regs->flags |= TF_MASK;
regs->flags &= ~IF_MASK;
/**
* @brief Saves previous kprobe.
*
- * @param kcb Pointer to kprobe_ctlblk struct whereto save current kprobe.
+ * @param kcb Pointer to kp_core_ctlblk struct whereto save current kprobe.
* @param p_run Pointer to kprobe.
* @return Void.
*/
-void save_previous_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *cur_p)
+static void save_previous_kp_core(struct kp_core_ctlblk *kcb, struct kp_core *cur)
{
- if (kcb->prev_kprobe.kp != NULL) {
+ if (kcb->prev_kp_core.p != NULL) {
panic("no space to save new probe[]: "
- "task = %d/%s, prev %p, current %p, new %p,",
- current->pid, current->comm, kcb->prev_kprobe.kp->addr,
- swap_kprobe_running()->addr, cur_p->addr);
+ "task = %d/%s, prev %08lx, current %08lx, new %08lx,",
+ current->pid, current->comm, kcb->prev_kp_core.p->addr,
+ kp_core_running()->addr, cur->addr);
}
- kcb->prev_kprobe.kp = swap_kprobe_running();
- kcb->prev_kprobe.status = kcb->kprobe_status;
-
+ kcb->prev_kp_core.p = kp_core_running();
+ kcb->prev_kp_core.status = kcb->kp_core_status;
}
/**
- * @brief Restores previous kprobe.
+ * @brief Restores previous kp_core.
*
- * @param kcb Pointer to kprobe_ctlblk which contains previous kprobe.
+ * @param kcb Pointer to kp_core_ctlblk which contains previous kp_core.
* @return Void.
*/
-void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+void restore_previous_kp_core(struct kp_core_ctlblk *kcb)
{
- swap_kprobe_running_set(kcb->prev_kprobe.kp);
- kcb->kprobe_status = kcb->prev_kprobe.status;
- kcb->prev_kprobe.kp = NULL;
- kcb->prev_kprobe.status = 0;
+ kp_core_running_set(kcb->prev_kp_core.p);
+ kcb->kp_core_status = kcb->prev_kp_core.status;
+ kcb->prev_kp_core.p = NULL;
+ kcb->prev_kp_core.status = 0;
}
-/**
- * @brief Sets currently running kprobe.
- *
- * @param p Pointer to currently running kprobe.
- * @param regs Pointer to CPU registers data.
- * @param kcb Pointer to kprobe_ctlblk.
- * @return Void.
- */
-void set_current_kprobe(struct kprobe *p,
- struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+static void set_current_kp_core(struct kp_core *p, struct pt_regs *regs,
+ struct kp_core_ctlblk *kcb)
{
- swap_kprobe_running_set(p);
- kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags =
+ kp_core_running_set(p);
+ kcb->kp_core_saved_eflags = kcb->kp_core_old_eflags =
(regs->EREG(flags) & (TF_MASK | IF_MASK));
if (is_IF_modifier(p->opcode))
- kcb->kprobe_saved_eflags &= ~IF_MASK;
+ kcb->kp_core_saved_eflags &= ~IF_MASK;
}
-static int setup_singlestep(struct kprobe *p, struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+static int setup_singlestep(struct kp_core *p, struct pt_regs *regs,
+ struct kp_core_ctlblk *kcb)
{
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
- if (p->ainsn.boostable == 1 && !p->post_handler) {
+ if (p->ainsn.boostable == 1) {
/* Boost up -- we can execute copied instructions directly */
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
regs->ip = (unsigned long)p->ainsn.insn;
return 1;
#endif /* !CONFIG_PREEMPT */
prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_HIT_SS;
+ kcb->kp_core_status = KPROBE_HIT_SS;
return 1;
}
}
-void restore_int3(void);
+void exec_trampoline(void);
+void exec_trampoline_int3(void);
__asm(
- "restore_int3:\n"
+ "exec_trampoline:\n"
+ "call exec_handler\n"
+ "exec_trampoline_int3:\n"
"int3\n"
);
+static int __used exec_handler(void)
+{
+ struct kp_core *p = kp_core_running();
+ struct pt_regs *regs = current_regs();
+
+ return p->handlers.pre(p, regs);
+}
+
+static int befor_exec_trampoline(struct pt_regs *regs)
+{
+ int ret = (int)regs->ax;
+ struct kp_core *p = kp_core_running();
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
+
+ /* restore regs from stack */
+ *regs = *current_regs();
+
+ if (ret) {
+ kp_core_put(p);
+ return ret;
+ }
+
+ setup_singlestep(p, regs, kcb);
+ if (!(regs->flags & TF_MASK))
+ kp_core_put(p);
+
+ return 1;
+}
+
static int __kprobe_handler(struct pt_regs *regs)
{
- struct kprobe *p = 0;
+ struct kp_core *p;
int ret = 0;
- kprobe_opcode_t *addr = NULL;
- struct kprobe_ctlblk *kcb;
+ unsigned long addr = regs->ip - 1;
+ struct kp_core_ctlblk *kcb;
- addr = (kprobe_opcode_t *) (regs->EREG(ip) - sizeof(kprobe_opcode_t));
+ kcb = kp_core_ctlblk();
- kcb = swap_get_kprobe_ctlblk();
- p = swap_get_kprobe(addr);
+ rcu_read_lock();
+ p = kp_core_by_addr(addr);
+ kp_core_get(p);
+ rcu_read_unlock();
/* Check we're not actually recursing */
- if (swap_kprobe_running()) {
+ if (kp_core_running()) {
if (p) {
- if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ if (kcb->kp_core_status == KPROBE_HIT_SS &&
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
- regs->EREG(flags) &= ~TF_MASK;
- regs->EREG(flags) |= kcb->kprobe_saved_eflags;
+ regs->flags &= ~TF_MASK;
+ regs->flags |= kcb->kp_core_saved_eflags;
goto no_kprobe;
}
* just single step on the instruction of the new probe
* without calling any user handlers.
*/
- save_previous_kprobe(kcb, p);
- set_current_kprobe(p, regs, kcb);
- swap_kprobes_inc_nmissed_count(p);
+ save_previous_kp_core(kcb, p);
+ set_current_kp_core(p, regs, kcb);
prepare_singlestep(p, regs);
- kcb->kprobe_status = KPROBE_REENTER;
+ kcb->kp_core_status = KPROBE_REENTER;
goto out_get_kp_if_TF;
} else {
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ if (*(char *)addr != BREAKPOINT_INSTRUCTION) {
/* The breakpoint instruction was removed by
* another cpu right after we hit, no further
* handling of this interrupt is appropriate
goto no_kprobe;
}
- p = swap_kprobe_running();
- if (p->break_handler && p->break_handler(p, regs))
- goto ss_probe;
-
goto no_kprobe;
}
}
if (!p) {
- if (*addr != BREAKPOINT_INSTRUCTION) {
+ if (*(char *)addr != BREAKPOINT_INSTRUCTION) {
/*
* The breakpoint instruction was removed right
* after we hit it. Another cpu has removed
ret = 1;
}
- if (!p) {
- /* Not one of ours: let kernel handle it */
- DBPRINTF("no_kprobe");
- goto no_kprobe;
- }
+ goto no_kprobe;
}
- set_current_kprobe(p, regs, kcb);
- kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kp_core(p, regs, kcb);
+ kcb->kp_core_status = KPROBE_HIT_ACTIVE;
/* save regs to stack */
*current_regs() = *regs;
- regs->ip = (unsigned long)restore_int3;
- get_kp(p);
-
+ regs->ip = (unsigned long)exec_trampoline;
return 1;
-ss_probe:
- setup_singlestep(p, regs, kcb);
-
out_get_kp_if_TF:
- if ((regs->flags & TF_MASK))
- get_kp(p);
+ if (!(regs->flags & TF_MASK))
+ kp_core_put(p);
return 1;
return ret;
}
-static int restore_handler(struct pt_regs *regs)
-{
- struct kprobe *p = swap_kprobe_running();
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
-
- /* restore regs from stack */
- *regs = *current_regs();
-
- if (p->pre_handler) {
- int ret;
-
- ret = p->pre_handler(p, regs);
- if (ret) {
- put_kp(p);
- return ret;
- }
- }
-
- setup_singlestep(p, regs, kcb);
- if (!(regs->flags & TF_MASK))
- put_kp(p);
-
- return 1;
-}
-
static int kprobe_handler(struct pt_regs *regs)
{
int ret;
- if (regs->ip == (unsigned long)restore_int3 + 1)
- ret = restore_handler(regs);
+ if (regs->ip == (unsigned long)exec_trampoline_int3 + 1)
+ ret = befor_exec_trampoline(regs);
else
ret = __kprobe_handler(regs);
{
struct jprobe *jp = container_of(p, struct jprobe, kp);
kprobe_pre_entry_handler_t pre_entry;
- entry_point_t entry;
unsigned long addr;
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
pre_entry = (kprobe_pre_entry_handler_t) jp->pre_entry;
- entry = (entry_point_t) jp->entry;
kcb->jprobe_saved_regs = *regs;
kcb->jprobe_saved_esp = stack_addr(regs);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
trace_hardirqs_off();
#endif
- if (pre_entry)
- p->ss_addr[smp_processor_id()] = (kprobe_opcode_t *)
- pre_entry(jp->priv_arg, regs);
regs->EREG(ip) = (unsigned long)(jp->entry);
*/
void swap_jprobe_return(void)
{
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
asm volatile(" xchgl %%ebx,%%esp\n"
" int3\n"
*
* This function also checks instruction size for preparing direct execution.
*/
-static void resume_execution(struct kprobe *p,
+static void resume_execution(struct kp_core *p,
struct pt_regs *regs,
- struct kprobe_ctlblk *kcb)
+ struct kp_core_ctlblk *kcb)
{
unsigned long *tos;
unsigned long copy_eip = (unsigned long) p->ainsn.insn;
switch (insns[0]) {
case 0x9c: /* pushfl */
*tos &= ~(TF_MASK | IF_MASK);
- *tos |= kcb->kprobe_old_eflags;
+ *tos |= kcb->kp_core_old_eflags;
break;
case 0xc2: /* iret/ret/lret */
case 0xc3:
*/
static int post_kprobe_handler(struct pt_regs *regs)
{
- struct kprobe *cur = swap_kprobe_running();
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
+ struct kp_core *cur = kp_core_running();
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
if (!cur)
return 0;
- put_kp(cur);
-
- if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
- kcb->kprobe_status = KPROBE_HIT_SSDONE;
- cur->post_handler(cur, regs, 0);
- }
+ kp_core_put(cur);
resume_execution(cur, regs, kcb);
- regs->EREG(flags) |= kcb->kprobe_saved_eflags;
+ regs->flags |= kcb->kp_core_saved_eflags;
#ifndef CONFIG_X86
trace_hardirqs_fixup_flags(regs->EREG(flags));
#endif /* CONFIG_X86 */
/* Restore back the original saved kprobes variables and continue. */
- if (kcb->kprobe_status == KPROBE_REENTER) {
- restore_previous_kprobe(kcb);
+ if (kcb->kp_core_status == KPROBE_REENTER) {
+ restore_previous_kp_core(kcb);
goto out;
}
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
out:
/*
* if somebody else is singlestepping across a probe point, eflags
static int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
- struct kprobe *cur = swap_kprobe_running();
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
+ struct kp_core *cur = kp_core_running();
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
- switch (kcb->kprobe_status) {
+ switch (kcb->kp_core_status) {
case KPROBE_HIT_SS:
case KPROBE_REENTER:
/*
* and allow the page fault handler to continue as a
* normal page fault.
*/
- regs->EREG(ip) = (unsigned long) cur->addr;
- regs->EREG(flags) |= kcb->kprobe_old_eflags;
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe(kcb);
+ regs->ip = cur->addr;
+ regs->flags |= kcb->kp_core_old_eflags;
+ if (kcb->kp_core_status == KPROBE_REENTER)
+ restore_previous_kp_core(kcb);
else
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
break;
case KPROBE_HIT_ACTIVE:
case KPROBE_HIT_SSDONE:
/*
- * We increment the nmissed count for accounting,
- * we can also use npre/npostfault count for accouting
- * these specific fault cases.
- */
- swap_kprobes_inc_nmissed_count(cur);
-
- /*
- * We come here because instructions in the pre/post
- * handler caused the page_fault, this could happen
- * if handler tries to access user space by
- * copy_from_user(), get_user() etc. Let the
- * user-specified handler try to fix it first.
- */
- if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
- return 1;
-
- /*
* In case the user-specified fault handler returned
* zero, try to fix up.
*/
ret = NOTIFY_STOP;
break;
case DIE_GPF:
- if (swap_kprobe_running() &&
+ if (kp_core_running() &&
kprobe_fault_handler(args->regs, args->trapnr))
ret = NOTIFY_STOP;
break;
};
/**
- * @brief Longjump break handler.
+ * @brief Arms kp_core.
*
- * @param p Pointer to fired kprobe.
- * @param regs Pointer to CPU registers data.
- * @return 0 on success.
- */
-int swap_longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
- u8 *addr = (u8 *) (regs->EREG(ip) - 1);
- unsigned long stack_addr = (unsigned long) (kcb->jprobe_saved_esp);
- struct jprobe *jp = container_of(p, struct jprobe, kp);
-
- DBPRINTF("p = %p\n", p);
-
- if ((addr > (u8 *)swap_jprobe_return) &&
- (addr < (u8 *)swap_jprobe_return_end)) {
- if (stack_addr(regs) != kcb->jprobe_saved_esp) {
- struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
- printk(KERN_INFO "current esp %p does not match saved esp %p\n",
- stack_addr(regs), kcb->jprobe_saved_esp);
- printk(KERN_INFO "Saved registers for jprobe %p\n", jp);
- swap_show_registers(saved_regs);
- printk(KERN_INFO "Current registers\n");
- swap_show_registers(regs);
- panic("BUG");
- /* BUG(); */
- }
- *regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
- MIN_STACK_SIZE(stack_addr));
- return 1;
- }
-
- return 0;
-}
-
-/**
- * @brief Arms kprobe.
- *
- * @param p Pointer to target kprobe.
+ * @param core Pointer to target kp_core.
* @return Void.
*/
-void swap_arch_arm_kprobe(struct kprobe *p)
+void arch_kp_core_arm(struct kp_core *p)
{
- swap_text_poke(p->addr,
+ swap_text_poke((void *)p->addr,
((unsigned char[]){BREAKPOINT_INSTRUCTION}), 1);
}
/**
- * @brief Disarms kprobe.
+ * @brief Disarms kp_core.
*
- * @param p Pointer to target kprobe.
+ * @param core Pointer to target kp_core.
* @return Void.
*/
-void swap_arch_disarm_kprobe(struct kprobe *p)
+void arch_kp_core_disarm(struct kp_core *p)
{
- swap_text_poke(p->addr, &p->opcode, 1);
+ swap_text_poke((void *)p->addr, &p->opcode, 1);
}
static __used void *trampoline_probe_handler_x86(struct pt_regs *regs)
*/
struct kj_cb_data {
struct pt_regs regs;
- struct kprobe *p;
+ struct kp_core *p;
jumper_cb_t cb;
char data[0];
/* save regs */
cb_data->regs = *regs;
- cb_data->p = swap_kprobe_running();
+ cb_data->p = kp_core_running();
cb_data->cb = cb;
/* save data */
/* jump to kjump_trampoline */
regs->ip = (unsigned long)&kjump_trampoline;
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
return 1;
}
EXPORT_SYMBOL_GPL(set_kjump_cb);
-static int restore_regs_pre_handler(struct kprobe *p, struct pt_regs *regs)
+static int restore_regs_pre_handler(struct kprobe *kp, struct pt_regs *regs)
{
struct kj_cb_data *data = (struct kj_cb_data *)regs->ax;
- struct kprobe *kp = data->p;
- struct kprobe_ctlblk *kcb = swap_get_kprobe_ctlblk();
+ struct kp_core *p = data->p;
+ struct kp_core_ctlblk *kcb = kp_core_ctlblk();
/* restore regs */
*regs = data->regs;
/* FIXME: potential memory leak, when process kill */
kfree(data);
- kcb = swap_get_kprobe_ctlblk();
+ kcb = kp_core_ctlblk();
- set_current_kprobe(kp, regs, kcb);
- setup_singlestep(kp, regs, kcb);
+ set_current_kp_core(p, regs, kcb);
+ setup_singlestep(p, regs, kcb);
return 1;
}
static struct kprobe restore_regs_kp = {
.pre_handler = restore_regs_pre_handler,
- .addr = (kprobe_opcode_t *)&kjump_trampoline_int3, /* nop */
+ .addr = (unsigned long)&kjump_trampoline_int3, /* nop */
};
static int kjump_init(void)
}
/**
- * @struct prev_kprobe
- * @brief Stores previous kprobe.
- * @var prev_kprobe::kp
- * Pointer to kprobe struct.
- * @var prev_kprobe::status
- * Kprobe status.
+ * @struct prev_kp_core
+ * @brief Stores previous kp_core.
+ * @var prev_kp_core::kp
+ * Pointer to kp_core struct.
+ * @var prev_kp_core::status
+ * kp_core status.
*/
-struct prev_kprobe {
- struct kprobe *kp;
+struct prev_kp_core {
+ struct kp_core *p;
unsigned long status;
};
/**
- * @struct kprobe_ctlblk
- * @brief Per-cpu kprobe control block.
- * @var kprobe_ctlblk::kprobe_status
- * Kprobe status.
- * @var kprobe_ctlblk::prev_kprobe
- * Previous kprobe.
+ * @struct kp_core_ctlblk
+ * @brief Per-cpu kp_core control block.
+ * @var kp_core_ctlblk::kp_core_status
+ * kp_core status.
+ * @var kp_core_ctlblk::prev_kp_core
+ * Previous kp_core.
*/
-struct kprobe_ctlblk {
- unsigned long kprobe_status;
- struct prev_kprobe prev_kprobe;
+struct kp_core_ctlblk {
+ unsigned long kp_core_status;
+ struct prev_kp_core prev_kp_core;
struct pt_regs jprobe_saved_regs;
- unsigned long kprobe_old_eflags;
- unsigned long kprobe_saved_eflags;
+ unsigned long kp_core_old_eflags;
+ unsigned long kp_core_saved_eflags;
unsigned long *jprobe_saved_esp;
kprobe_opcode_t jprobes_stack[MAX_STACK_SIZE];
};
* @var arch_specific_insn::insn
* Copy of the original instruction.
* @var arch_specific_insn::boostable
- * If this flag is not 0, this kprobe can be boost when its
+ * If this flag is not 0, this kp_core can be boost when its
* post_handler and break_handler is not set.
*/
struct arch_specific_insn {
int arch_init_module_deps(void);
+struct kprobe;
+struct kp_core;
struct slot_manager;
struct kretprobe_instance;
-int swap_arch_prepare_kprobe(struct kprobe *p, struct slot_manager *sm);
-void swap_arch_arm_kprobe(struct kprobe *p);
-void swap_arch_disarm_kprobe(struct kprobe *p);
+int arch_kp_core_prepare(struct kp_core *p, struct slot_manager *sm);
+void arch_kp_core_arm(struct kp_core *core);
+void arch_kp_core_disarm(struct kp_core *core);
+int swap_setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs);
void swap_arch_prepare_kretprobe(struct kretprobe_instance *ri,
struct pt_regs *regs);
void swap_kretprobe_trampoline(void);
-void restore_previous_kprobe(struct kprobe_ctlblk *kcb);
+void restore_previous_kp_core(struct kp_core_ctlblk *kcb);
int swap_can_boost(kprobe_opcode_t *opcodes);
static inline int arch_check_insn(struct arch_specific_insn *ainsn)
{
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/stop_machine.h>
-
+#include <linux/delay.h>
#include <ksyms/ksyms.h>
#include <master/swap_initializer.h>
#include <swap-asm/swap_kprobes.h>
static DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
-struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
+static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/**
/* FIXME: free */
}
+static struct hlist_head *kpt_head_by_addr(unsigned long addr)
+{
+ return &kprobe_table[hash_ptr((void *)addr, KPROBE_HASH_BITS)];
+}
+
static void kretprobe_assert(struct kretprobe_instance *ri,
unsigned long orig_ret_address,
unsigned long trampoline_address)
panic("kretprobe BUG!: ri->rp = NULL\n");
panic("kretprobe BUG!: "
- "Processing kretprobe %p @ %p (%d/%d - %s)\n",
+ "Processing kretprobe %p @ %08lx (%d/%d - %s)\n",
ri->rp, ri->rp->kp.addr, ri->task->tgid,
ri->task->pid, ri->task->comm);
}
}
-struct kp_data {
- struct kprobe *running;
- struct kprobe *instance;
- struct kprobe_ctlblk ctlblk;
+struct kpc_data {
+ struct kp_core *running;
+ struct kp_core_ctlblk ctlblk;
};
static void ktd_cur_init(struct task_struct *task, void *data)
{
- struct kp_data *d = (struct kp_data *)data;
+ struct kpc_data *d = (struct kpc_data *)data;
memset(d, 0, sizeof(*d));
}
static void ktd_cur_exit(struct task_struct *task, void *data)
{
- struct kp_data *d = (struct kp_data *)data;
+ struct kpc_data *d = (struct kpc_data *)data;
WARN(d->running, "running probe is not NULL");
- WARN(d->instance, "instance probe is not NULL");
}
struct ktask_data ktd_cur = {
.init = ktd_cur_init,
.exit = ktd_cur_exit,
- .size = sizeof(struct kp_data),
+ .size = sizeof(struct kpc_data),
};
-static struct kp_data *kprobe_data(void)
+static DEFINE_PER_CPU(struct kpc_data, per_cpu_kpc_data);
+
+static struct kpc_data *kp_core_data(void)
{
- return (struct kp_data *)swap_ktd(&ktd_cur, current);
+ if (in_interrupt()) {
+ return &__get_cpu_var(per_cpu_kpc_data);
+ } else {
+ return (struct kpc_data *)swap_ktd(&ktd_cur, current);
+ }
}
static int kprobe_cur_reg(void)
swap_ktd_unreg(&ktd_cur);
}
-
-static struct kprobe *kprobe_instance(void)
-{
- return kprobe_data()->instance;
-}
-
-static void kprobe_instance_set(struct kprobe *p)
+struct kp_core *kp_core_running(void)
{
- kprobe_data()->instance = p;
+ return kp_core_data()->running;
}
-
-struct kprobe *swap_kprobe_running(void)
+void kp_core_running_set(struct kp_core *p)
{
- return kprobe_data()->running;
-}
-
-void swap_kprobe_running_set(struct kprobe *p)
-{
- kprobe_data()->running = p;
+ kp_core_data()->running = p;
}
/**
- * @brief Sets the current kprobe to NULL.
+ * @brief Gets kp_core_ctlblk for the current CPU.
*
- * @return Void.
+ * @return Current CPU struct kp_core_ctlblk.
*/
-void swap_reset_current_kprobe(void)
+struct kp_core_ctlblk *kp_core_ctlblk(void)
{
- swap_kprobe_running_set(NULL);
-}
-
-/**
- * @brief Gets kprobe_ctlblk for the current CPU.
- *
- * @return Current CPU struct kprobe_ctlblk.
- */
-struct kprobe_ctlblk *swap_get_kprobe_ctlblk(void)
-{
- return &kprobe_data()->ctlblk;
+ return &kp_core_data()->ctlblk;
}
/*
*/
/**
- * @brief Gets kprobe.
+ * @brief Gets kp_core.
*
* @param addr Probe address.
- * @return Kprobe for addr.
+ * @return kprobe_core for addr.
*/
-struct kprobe *swap_get_kprobe(void *addr)
+struct kp_core *kp_core_by_addr(unsigned long addr)
{
struct hlist_head *head;
- struct kprobe *p;
+ struct kp_core *core;
DECLARE_NODE_PTR_FOR_HLIST(node);
- head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
- swap_hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (p->addr == addr)
- return p;
+ head = kpt_head_by_addr(addr);
+ swap_hlist_for_each_entry_rcu(core, node, head, hlist) {
+ if (core->addr == addr)
+ return core;
}
return NULL;
}
-/*
- * Aggregate handlers for multiple kprobes support - these handlers
- * take care of invoking the individual kprobe handlers on p->list
- */
-static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe *kp;
- int ret;
-
- list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->pre_handler) {
- kprobe_instance_set(kp);
- ret = kp->pre_handler(kp, regs);
- if (ret)
- return ret;
- }
- kprobe_instance_set(NULL);
- }
-
- return 0;
-}
-
-static void aggr_post_handler(struct kprobe *p,
- struct pt_regs *regs,
- unsigned long flags)
-{
- struct kprobe *kp;
-
- list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler) {
- kprobe_instance_set(kp);
- kp->post_handler(kp, regs, flags);
- kprobe_instance_set(NULL);
- }
- }
-}
-
-static int aggr_fault_handler(struct kprobe *p,
- struct pt_regs *regs,
- int trapnr)
-{
- struct kprobe *cur = kprobe_instance();
-
- /*
- * if we faulted "during" the execution of a user specified
- * probe handler, invoke just that probe's fault handler
- */
- if (cur && cur->fault_handler) {
- if (cur->fault_handler(cur, regs, trapnr))
- return 1;
- }
-
- return 0;
-}
-
-static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
-{
- struct kprobe *cur = kprobe_instance();
- int ret = 0;
- DBPRINTF("cur = 0x%p\n", cur);
- if (cur)
- DBPRINTF("cur = 0x%p cur->break_handler = 0x%p\n",
- cur, cur->break_handler);
-
- if (cur && cur->break_handler) {
- if (cur->break_handler(cur, regs))
- ret = 1;
- }
- kprobe_instance_set(NULL);
-
- return ret;
-}
-
-/**
- * @brief Walks the list and increments nmissed count for multiprobe case.
- *
- * @param p Pointer to the missed kprobe.
- * @return Void.
- */
-void swap_kprobes_inc_nmissed_count(struct kprobe *p)
-{
- struct kprobe *kp;
- if (p->pre_handler != aggr_pre_handler) {
- p->nmissed++;
- } else {
- list_for_each_entry_rcu(kp, &p->list, list) {
- ++kp->nmissed;
- }
- }
-}
static int alloc_nodes_kretprobe(struct kretprobe *rp);
}
}
-/*
- * Keep all fields in the kprobe consistent
- */
-static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
+static void kp_core_remove(struct kp_core *core)
{
- memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
- memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
+ /* TODO: check boostable for x86 and MIPS */
+ swap_slot_free(&sm, core->ainsn.insn);
}
-/*
- * Add the new probe to old_p->list. Fail if this is the
- * second jprobe at the address - two jprobes can't coexist
- */
-static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
+static void kp_core_wait(struct kp_core *p)
{
- if (p->break_handler) {
- if (old_p->break_handler)
- return -EEXIST;
+ int ms = 1;
- list_add_tail_rcu(&p->list, &old_p->list);
- old_p->break_handler = aggr_break_handler;
- } else {
- list_add_rcu(&p->list, &old_p->list);
+ while (atomic_read(&p->usage)) {
+ msleep(ms);
+ ms += ms < 7 ? 1 : 0;
}
-
- if (p->post_handler && !old_p->post_handler)
- old_p->post_handler = aggr_post_handler;
-
- return 0;
}
-/**
- * hlist_replace_rcu - replace old entry by new one
- * @old : the element to be replaced
- * @new : the new element to insert
- *
- * The @old entry will be replaced with the @new entry atomically.
- */
-inline void swap_hlist_replace_rcu(struct hlist_node *old,
- struct hlist_node *new)
+static struct kp_core *kp_core_create(unsigned long addr)
{
- struct hlist_node *next = old->next;
-
- new->next = next;
- new->pprev = old->pprev;
- smp_wmb();
- if (next)
- new->next->pprev = &new->next;
- if (new->pprev)
- *new->pprev = new;
- old->pprev = LIST_POISON2;
+ struct kp_core *core;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (core) {
+ INIT_HLIST_NODE(&core->hlist);
+ core->addr = addr;
+ atomic_set(&core->usage, 0);
+ rwlock_init(&core->handlers.lock);
+ }
+
+ return core;
}
-/*
- * Fill in the required fields of the "manager kprobe". Replace the
- * earlier kprobe in the hlist with the manager kprobe
- */
-static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
+static void kp_core_free(struct kp_core *core)
{
- copy_kprobe(p, ap);
- ap->addr = p->addr;
- ap->pre_handler = aggr_pre_handler;
- ap->fault_handler = aggr_fault_handler;
- if (p->post_handler)
- ap->post_handler = aggr_post_handler;
- if (p->break_handler)
- ap->break_handler = aggr_break_handler;
-
- INIT_LIST_HEAD(&ap->list);
- list_add_rcu(&p->list, &ap->list);
-
- swap_hlist_replace_rcu(&p->hlist, &ap->hlist);
+ WARN_ON(atomic_read(&core->usage));
+ kfree(core);
}
-/*
- * This is the second or subsequent kprobe at the address - handle
- * the intricacies
- */
-static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
+static int pre_handler_one(struct kp_core *core, struct pt_regs *regs)
{
int ret = 0;
- struct kprobe *ap;
- DBPRINTF("start\n");
+ struct kprobe *p = core->handlers.kps[0];
- DBPRINTF("p = %p old_p = %p\n", p, old_p);
- if (old_p->pre_handler == aggr_pre_handler) {
- DBPRINTF("aggr_pre_handler\n");
+ if (p->pre_handler)
+ ret = p->pre_handler(p, regs);
- copy_kprobe(old_p, p);
- ret = add_new_kprobe(old_p, p);
- } else {
- DBPRINTF("kzalloc\n");
-#ifdef kzalloc
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
-#else
- ap = kmalloc(sizeof(struct kprobe), GFP_KERNEL);
- if (ap)
- memset(ap, 0, sizeof(struct kprobe));
-#endif
- if (!ap)
- return -ENOMEM;
+ return ret;
+}
+
+static int pre_handler_multi(struct kp_core *core, struct pt_regs *regs)
+{
+ int i, ret = 0;
- atomic_set(&ap->usage, 0);
- add_aggr_kprobe(ap, old_p);
- copy_kprobe(ap, p);
- DBPRINTF("ap = %p p = %p old_p = %p\n", ap, p, old_p);
- ret = add_new_kprobe(ap, p);
+ /* TODO: add sync use kprobe */
+ for (i = 0; i < ARRAY_SIZE(core->handlers.kps); ++i) {
+ struct kprobe *p = core->handlers.kps[i];
+
+ if (p && p->pre_handler) {
+ ret = p->pre_handler(p, regs);
+ if (ret)
+ break;
+ }
}
return ret;
}
-static void remove_kprobe(struct kprobe *p)
+static int kp_core_add_kprobe(struct kp_core *core, struct kprobe *p)
{
- /* TODO: check boostable for x86 and MIPS */
- swap_slot_free(&sm, p->ainsn.insn);
+ int i, ret = 0;
+ unsigned long flags;
+ struct kp_handlers *h = &core->handlers;
+
+ write_lock_irqsave(&h->lock, flags);
+ if (h->pre == NULL) {
+ h->pre = pre_handler_one;
+ } else if (h->pre == pre_handler_one) {
+ h->pre = pre_handler_multi;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(core->handlers.kps); ++i) {
+ if (core->handlers.kps[i])
+ continue;
+
+ core->handlers.kps[i] = p;
+ goto unlock;
+ }
+
+ pr_err("all kps slots is busy\n");
+ ret = -EBUSY;
+unlock:
+ write_unlock_irqrestore(&h->lock, flags);
+ return ret;
}
-static void wait_kp(struct kprobe *p)
+static void kp_core_del_kprobe(struct kp_core *core, struct kprobe *p)
{
- while (atomic_read(&p->usage))
- schedule();
+ int i, cnt = 0;
+ unsigned long flags;
+ struct kp_handlers *h = &core->handlers;
+
+ write_lock_irqsave(&h->lock, flags);
+ for (i = 0; i < ARRAY_SIZE(h->kps); ++i) {
+ if (h->kps[i] == p)
+ h->kps[i] = NULL;
+
+ if (h->kps[i] == NULL)
+ ++cnt;
+ }
+ write_unlock_irqrestore(&h->lock, flags);
+
+ if (cnt == ARRAY_SIZE(h->kps)) {
+ arch_kp_core_disarm(core);
+ synchronize_sched();
+
+ hlist_del_rcu(&core->hlist);
+ synchronize_rcu();
+
+ kp_core_wait(core);
+ kp_core_remove(core);
+ kp_core_free(core);
+ }
}
+static DEFINE_MUTEX(kp_mtx);
/**
* @brief Registers kprobe.
*
*/
int swap_register_kprobe(struct kprobe *p)
{
- struct kprobe *old_p;
+ struct kp_core *core;
+ unsigned long addr;
int ret = 0;
/*
* If we have a symbol_name argument look it up,
if (p->symbol_name) {
if (p->addr)
return -EINVAL;
- p->addr = (kprobe_opcode_t *)swap_ksyms(p->symbol_name);
+ p->addr = swap_ksyms(p->symbol_name);
}
if (!p->addr)
return -EINVAL;
- DBPRINTF("p->addr = 0x%p\n", p->addr);
- p->addr = (kprobe_opcode_t *)(((char *)p->addr) + p->offset);
- DBPRINTF("p->addr = 0x%p p = 0x%p\n", p->addr, p);
-
- p->nmissed = 0;
- INIT_LIST_HEAD(&p->list);
- atomic_set(&p->usage, 0);
-
- old_p = swap_get_kprobe(p->addr);
- if (old_p) {
- ret = register_aggr_kprobe(old_p, p);
- if (!ret)
- atomic_inc(&kprobe_count);
- goto out;
- }
- ret = swap_arch_prepare_kprobe(p, &sm);
- if (ret != 0)
- goto out;
-
- DBPRINTF("before out ret = 0x%x\n", ret);
- INIT_HLIST_NODE(&p->hlist);
- hlist_add_head_rcu(&p->hlist,
- &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- swap_arch_arm_kprobe(p);
-
-out:
- DBPRINTF("out ret = 0x%x\n", ret);
- return ret;
-}
-EXPORT_SYMBOL_GPL(swap_register_kprobe);
-
-static void swap_unregister_valid_kprobe(struct kprobe *p, struct kprobe *old_p)
-{
- struct kprobe *list_p;
- BUG_ON(in_atomic());
+ addr = p->addr + p->offset;
- if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
- (p->list.next == &old_p->list) && (p->list.prev == &old_p->list))) {
- /* Only probe on the hash list */
- swap_arch_disarm_kprobe(p);
-
- synchronize_sched();
- wait_kp(old_p);
-
- hlist_del_rcu(&old_p->hlist);
- remove_kprobe(old_p);
-
- if (p != old_p) {
- list_del_rcu(&old_p->list);
- kfree(old_p);
+ mutex_lock(&kp_mtx);
+ core = kp_core_by_addr(addr);
+ if (core == NULL) {
+ core = kp_core_create(addr);
+ if (core == NULL) {
+ pr_err("Out of memory\n");
+ ret = -ENOMEM;
+ goto unlock;
}
- /* Synchronize and remove probe in bottom */
- } else {
- list_del_rcu(&p->list);
- if (p->break_handler)
- old_p->break_handler = NULL;
- if (p->post_handler) {
- list_for_each_entry_rcu(list_p, &old_p->list, list)
- if (list_p->post_handler)
- goto out;
+ ret = arch_kp_core_prepare(core, &sm);
+ if (ret)
+ goto unlock;
- old_p->post_handler = NULL;
+ ret = kp_core_add_kprobe(core, p);
+ if (ret) {
+ kp_core_free(core);
+ goto unlock;
}
+
+ hlist_add_head_rcu(&core->hlist, kpt_head_by_addr(core->addr));
+ arch_kp_core_arm(core);
+ } else {
+ ret = kp_core_add_kprobe(core, p);
}
-out:
- /* Set NULL addr for reusability if symbol_name is used */
- if (p->symbol_name)
- p->addr = NULL;
+unlock:
+ mutex_unlock(&kp_mtx);
+ return ret;
}
+EXPORT_SYMBOL_GPL(swap_register_kprobe);
/**
* @brief Unregistes kprobe.
* @param kp Pointer to the target kprobe.
* @return Void.
*/
-void swap_unregister_kprobe(struct kprobe *kp)
+void swap_unregister_kprobe(struct kprobe *p)
{
- struct kprobe *old_p, *list_p;
-
- old_p = swap_get_kprobe(kp->addr);
- if (unlikely(!old_p))
- return;
-
- if (kp != old_p) {
- list_for_each_entry_rcu(list_p, &old_p->list, list)
- if (list_p == kp)
- goto unreg_valid_kprobe;
- /* kprobe invalid */
- return;
- }
+ unsigned long addr = p->addr + p->offset;
+ struct kp_core *core;
+
+ mutex_lock(&kp_mtx);
+ core = kp_core_by_addr(addr);
+ BUG_ON(core == NULL);
-unreg_valid_kprobe:
- swap_unregister_valid_kprobe(kp, old_p);
+ kp_core_del_kprobe(core, p);
+ mutex_unlock(&kp_mtx);
+
+ /* Set 0 addr for reusability if symbol_name is used */
+ if (p->symbol_name)
+ p->addr = 0;
}
EXPORT_SYMBOL_GPL(swap_unregister_kprobe);
{
/* Todo: Verify probepoint is a function entry point */
jp->kp.pre_handler = swap_setjmp_pre_handler;
- jp->kp.break_handler = swap_longjmp_break_handler;
return swap_register_kprobe(&jp->kp);
}
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address;
- struct kprobe_ctlblk *kcb;
+ struct kp_core_ctlblk *kcb;
struct hlist_node *tmp;
DECLARE_NODE_PTR_FOR_HLIST(node);
trampoline_address = (unsigned long)&swap_kretprobe_trampoline;
- kcb = swap_get_kprobe_ctlblk();
+ kcb = kp_core_ctlblk();
spin_lock_irqsave(&kretprobe_lock, flags);
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
- swap_kprobe_running_set(&ri->rp->kp);
- swap_get_kprobe_ctlblk()->kprobe_status =
- KPROBE_HIT_ACTIVE;
+ /*
+ * Set fake current probe, we don't
+ * want to go into recursion
+ */
+ kp_core_running_set((struct kp_core *)0xfffff);
+ kcb->kp_core_status = KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
}
kretprobe_assert(ri, orig_ret_address, trampoline_address);
- if (kcb->kprobe_status == KPROBE_REENTER)
- restore_previous_kprobe(kcb);
+ if (kcb->kp_core_status == KPROBE_REENTER)
+ restore_previous_kp_core(kcb);
else
- swap_kprobe_running_set(NULL);
+ kp_core_running_set(NULL);
spin_unlock_irqrestore(&kretprobe_lock, flags);
DBPRINTF("Alloc aditional mem for retprobes");
- if ((unsigned long)rp->kp.addr == sched_addr) {
+ if (rp->kp.addr == sched_addr) {
rp->maxactive += SCHED_RP_NR; /* max (100, 2 * NR_CPUS); */
alloc_nodes = SCHED_RP_NR;
} else {
DBPRINTF("START");
rp->kp.pre_handler = pre_handler_kretprobe;
- rp->kp.post_handler = NULL;
- rp->kp.fault_handler = NULL;
- rp->kp.break_handler = NULL;
/* Pre-allocate memory for max kretprobe instances */
- if ((unsigned long)rp->kp.addr == exit_addr) {
+ if (rp->kp.addr == exit_addr) {
rp->kp.pre_handler = NULL; /* not needed for do_exit */
rp->maxactive = 0;
- } else if ((unsigned long)rp->kp.addr == do_group_exit_addr) {
+ } else if (rp->kp.addr == do_group_exit_addr) {
rp->kp.pre_handler = NULL;
rp->maxactive = 0;
- } else if ((unsigned long)rp->kp.addr == sys_exit_group_addr) {
+ } else if (rp->kp.addr == sys_exit_group_addr) {
rp->kp.pre_handler = NULL;
rp->maxactive = 0;
- } else if ((unsigned long)rp->kp.addr == sys_exit_addr) {
+ } else if (rp->kp.addr == sys_exit_addr) {
rp->kp.pre_handler = NULL;
rp->maxactive = 0;
} else if (rp->maxactive <= 0) {
printk(KERN_INFO "%s (%d/%d): cannot disarm "
"krp instance (%08lx)\n",
ri->task->comm, ri->task->tgid, ri->task->pid,
- (unsigned long)rp->kp.addr);
+ rp->kp.addr);
}
}
}
ri->task->comm, ri->task->tgid, ri->task->pid,
pc, (long unsigned int)ri->ret_addr,
(long unsigned int)tramp,
- (long unsigned int)(ri->rp ? ri->rp->kp.addr : NULL));
+ (ri->rp ? ri->rp->kp.addr : 0));
/* __switch_to retprobe handling */
if (pc == (unsigned long)tramp) {
if (found) {
printk(KERN_INFO "---> [%d] %s (%d/%d): tramp (%08lx) "
- "found at %08lx (%08lx /%+d) - %p\n",
+ "found at %08lx (%08lx /%+d) - %08lx\n",
task_cpu(ri->task),
ri->task->comm, ri->task->tgid, ri->task->pid,
(long unsigned int)tramp,
(long unsigned int)found, (long unsigned int)ri->sp,
- found - ri->sp, ri->rp ? ri->rp->kp.addr : NULL);
+ found - ri->sp, ri->rp ? ri->rp->kp.addr : 0);
*found = (unsigned long)ri->ret_addr;
retval = 0;
} else {
printk(KERN_INFO "---> [%d] %s (%d/%d): tramp (%08lx) "
- "NOT found at sp = %08lx - %p\n",
+ "NOT found at sp = %08lx - %08lx\n",
task_cpu(ri->task),
ri->task->comm, ri->task->tgid, ri->task->pid,
(long unsigned int)tramp,
(long unsigned int)ri->sp,
- ri->rp ? ri->rp->kp.addr : NULL);
+ ri->rp ? ri->rp->kp.addr : 0);
}
return retval;
goto not_found;
sym = "__put_task_struct";
- put_task_kp.addr = (void *)swap_ksyms(sym);
- if (put_task_kp.addr == NULL)
+ put_task_kp.addr = swap_ksyms(sym);
+ if (put_task_kp.addr == 0)
goto not_found;
ret = init_module_deps();
#endif
-/* kprobe_status settings */
+/* kp_core_status settings */
/** Kprobe hit active */
#define KPROBE_HIT_ACTIVE 0x00000001
/** Kprobe hit ss */
typedef int (*kretprobe_handler_t) (struct kretprobe_instance *,
struct pt_regs *);
+struct kprobe;
+struct kp_core;
+
+struct kp_handlers {
+ int (*pre)(struct kp_core *, struct pt_regs *);
+
+ rwlock_t lock;
+ struct kprobe *kps[4];
+};
+
+struct kp_core {
+ struct hlist_node hlist;
+ atomic_t usage;
+
+ struct kp_handlers handlers;
+
+ unsigned long addr;
+ kprobe_opcode_t opcode;
+
+ struct arch_specific_insn ainsn;
+
+ unsigned long ss_addr[NR_CPUS];
+};
+
/**
* @struct kprobe
* @brief Main kprobe struct.
*/
struct kprobe {
- struct hlist_node hlist; /**< Hash list.*/
- /** List of probes to search by instruction slot.*/
- struct hlist_node is_hlist;
- /** List of kprobes for multi-handler support.*/
- struct list_head list;
- /** Count the number of times this probe was temporarily disarmed.*/
- unsigned long nmissed;
- /** Location of the probe point. */
- kprobe_opcode_t *addr;
- /** Allow user to indicate symbol name of the probe point.*/
- char *symbol_name;
- /** Offset into the symbol.*/
- unsigned int offset;
- /** Called before addr is executed.*/
- kprobe_pre_handler_t pre_handler;
- /** Called after addr is executed, unless...*/
- kprobe_post_handler_t post_handler;
- /** ... called if executing addr causes a fault (eg. page fault).*/
- kprobe_fault_handler_t fault_handler;
- /** Return 1 if it handled fault, otherwise kernel will see it.*/
- kprobe_break_handler_t break_handler;
- /** Saved opcode (which has been replaced with breakpoint).*/
- kprobe_opcode_t opcode;
- /** Copy of the original instruction.*/
- struct arch_specific_insn ainsn;
- /** Override single-step target address, may be used to redirect
- * control-flow to arbitrary address after probe point without
- * invocation of original instruction; useful for functions
- * replacement. If jprobe.entry should return address of function or
- * NULL if original function should be called.
- * Not supported for X86, not tested for MIPS. */
- kprobe_opcode_t *ss_addr[NR_CPUS];
- atomic_t usage;
+ unsigned long addr; /**< Location of the probe point. */
+ char *symbol_name; /**< Symbol name of the probe point. */
+ unsigned long offset; /**< Offset into the symbol.*/
+ /**< Called before addr is executed. */
+ kprobe_pre_handler_t pre_handler;
};
/**
};
-extern void swap_kprobes_inc_nmissed_count(struct kprobe *p);
-
/*
* Large value for fast but memory consuming implementation
* it is good when a lot of probes are instrumented
#define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
-static void inline get_kp(struct kprobe *p)
+static void inline kp_core_get(struct kp_core *p)
{
atomic_inc(&p->usage);
}
-static void inline put_kp(struct kprobe *p)
+static void inline kp_core_put(struct kp_core *p)
{
atomic_dec(&p->usage);
}
-/* Get the kprobe at this addr (if any) - called with preemption disabled */
-struct kprobe *swap_get_kprobe(void *addr);
-
+/* Get the kp_core at this addr (if any) - called with rcu_read_lock() */
+struct kp_core *kp_core_by_addr(unsigned long addr);
int swap_register_kprobe(struct kprobe *p);
void swap_unregister_kprobe(struct kprobe *p);
-int swap_setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int swap_longjmp_break_handler(struct kprobe *, struct pt_regs *);
-
int swap_register_jprobe(struct jprobe *p);
void swap_unregister_jprobe(struct jprobe *p);
void swap_jprobe_return(void);
extern atomic_t kprobe_count;
extern unsigned long sched_addr;
-struct kprobe *swap_kprobe_running(void);
-void swap_kprobe_running_set(struct kprobe *p);
-void swap_reset_current_kprobe(void);
-struct kprobe_ctlblk *swap_get_kprobe_ctlblk(void);
+struct kp_core *kp_core_running(void);
+void kp_core_running_set(struct kp_core *p);
+struct kp_core_ctlblk *kp_core_ctlblk(void);
#endif /* _SWAP_KPROBES_H */
if (rp && check_event(current)) {
struct ks_probe *ksp = container_of(rp, struct ks_probe, rp);
const char *fmt = ksp->args;
- const unsigned long addr = (unsigned long)ksp->rp.kp.addr;
+ const unsigned long addr = ksp->rp.kp.addr;
enum probe_t type = ksp->type;
ksf_msg_entry(regs, addr, type, fmt);
if (rp && check_event(current)) {
struct ks_probe *ksp = container_of(rp, struct ks_probe, rp);
- const unsigned long func_addr = (unsigned long)rp->kp.addr;
+ const unsigned long func_addr = rp->kp.addr;
const unsigned long ret_addr = (unsigned long)ri->ret_addr;
enum probe_t type = ksp->type;
*/
int init_switch_context(void)
{
- unsigned long addr;
-
- addr = swap_ksyms("__switch_to");
- if (addr == 0) {
+ switch_rp.kp.addr = swap_ksyms("__switch_to");
+ if (switch_rp.kp.addr == 0) {
printk(KERN_INFO "ERROR: not found '__switch_to'\n");
return -EINVAL;
}
- switch_rp.kp.addr = (kprobe_opcode_t *)addr;
-
return 0;
}
int ret;
printk(KERN_INFO "register_syscall: %s\n", get_sys_name(id));
- if (ksp[id].rp.kp.addr == NULL)
+ if (ksp[id].rp.kp.addr == 0)
return 0;
ksp[id].rp.entry_handler = entry_handler;
{
printk(KERN_INFO "unregister_syscall: %s\n", get_sys_name(id));
- if (ksp[id].rp.kp.addr == NULL)
+ if (ksp[id].rp.kp.addr == 0)
return 0;
swap_unregister_kretprobe(&ksp[id].rp);
for (; cnt != end; --cnt) {
id = id_p[cnt];
- if (ksp[id].rp.kp.addr != NULL) {
+ if (ksp[id].rp.kp.addr) {
rpp[i] = &ksp[id].rp;
++i;
}
addr = 0;
}
- ksp[i].rp.kp.addr = (kprobe_opcode_t *)addr;
+ ksp[i].rp.kp.addr = addr;
}
return 0;
if (p == NULL)
return NULL;
- p->p.jp.kp.addr = p->p.rp.kp.addr = (void *)addr;
+ p->p.jp.kp.addr = p->p.rp.kp.addr = addr;
p->p.jp.pre_entry = pre_handler;
p->p.jp.entry = jp_handler;
p->p.rp.handler = rp_handler;
EXPORT_SYMBOL_GPL(taskctx_run);
-static unsigned long cb_sig(void *data)
-{
- struct call_task *call = *(struct call_task **)data;
-
- complete(&call->comp0);
- call->func(call->data);
- complete(&call->comp1);
-
- return 0;
-}
-
static int sig_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
struct call_task *call = call_get(current);
call_set(current, NULL);
call->is_running = true;
- return set_kjump_cb(regs, cb_sig, &call, sizeof(call));
+ complete(&call->comp0);
+ call->func(call->data);
+ complete(&call->comp1);
}
return 0;
goto not_found;
sym = "get_signal_to_deliver";
- sig_kprobe.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (sig_kprobe.addr == NULL)
+ sig_kprobe.addr = swap_ksyms(sym);
+ if (sig_kprobe.addr == 0)
goto not_found;
return 0;
goto not_found;
sym = "do_exit";
- kp_do_exit.addr = (void *)swap_ksyms(sym);
- if (kp_do_exit.addr == NULL)
+ kp_do_exit.addr = swap_ksyms(sym);
+ if (kp_do_exit.addr == 0)
goto not_found;
ret = swap_td_raw_reg(&td_raw, sizeof(struct uprobe_ctlblk));
}
-static atomic_t pre_handler_cp_cnt = ATOMIC_INIT(0);
-
-static unsigned long cp_cb(void *data)
-{
- if (atomic_read(&stop_flag))
- call_mm_release(current);
-
- atomic_dec(&pre_handler_cp_cnt);
- return 0;
-}
-
static int pre_handler_cp(struct kprobe *p, struct pt_regs *regs)
{
- int ret = 0;
-
if (is_kthread(current))
goto out;
- if (!atomic_read(&stop_flag))
- goto out;
+ if (atomic_read(&stop_flag))
+ call_mm_release(current);
- ret = set_kjump_cb(regs, cp_cb, NULL, 0);
- if (ret < 0) {
- pr_err("set_kjump_cp, ret=%d\n", ret);
- ret = 0;
- } else {
- atomic_inc(&pre_handler_cp_cnt);
- }
out:
- return ret;
+ return 0;
}
do {
synchronize_sched();
- } while (atomic_read(&rm_uprobes_child_cnt)
- || atomic_read(&pre_handler_cp_cnt));
+ } while (atomic_read(&rm_uprobes_child_cnt));
}
* mm_release() *
******************************************************************************
*/
-
-static atomic_t mm_release_cnt = ATOMIC_INIT(0);
-
-static unsigned long mr_cb(void *data)
+static void mr_handler(struct task_struct *task)
{
- struct task_struct *task = *(struct task_struct **)data;
struct mm_struct *mm = task->mm;
if (mm == NULL) {
pr_err("mm is NULL\n");
- return 0;
+ return;
}
/* TODO: this lock for synchronizing to disarm urp */
if (task != current) {
pr_err("call mm_release in isn't current context\n");
- return 0;
+ return;
}
/* if the thread is killed we need to discard pending
call_mm_release(task);
}
up_write(&mm->mmap_sem);
-
- atomic_dec(&mm_release_cnt);
-
- return 0;
}
/* Detects when target process removes IPs. */
static int mr_pre_handler(struct kprobe *p, struct pt_regs *regs)
{
- int ret = 0;
struct task_struct *task = (struct task_struct *)swap_get_karg(regs, 0);
if (is_kthread(task))
goto out;
- ret = set_kjump_cb(regs, mr_cb, (void *)&task, sizeof(task));
- if (ret < 0) {
- printk("##### ERROR: mr_pre_handler, ret=%d\n", ret);
- ret = 0;
- } else {
- atomic_inc(&mm_release_cnt);
- }
+ mr_handler(task);
out:
- return ret;
+ return 0;
}
static struct kprobe mr_kprobe = {
static void unregister_mr(void)
{
swap_unregister_kprobe(&mr_kprobe);
- do {
- synchronize_sched();
- } while (atomic_read(&mm_release_cnt));
}
const char *sym;
sym = "do_page_fault";
- mf_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (mf_kretprobe.kp.addr == NULL)
+ mf_kretprobe.kp.addr = swap_ksyms(sym);
+ if (mf_kretprobe.kp.addr == 0)
goto not_found;
sym = "copy_process";
- cp_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms_substr(sym);
- if (cp_kretprobe.kp.addr == NULL)
+ cp_kretprobe.kp.addr = swap_ksyms_substr(sym);
+ if (cp_kretprobe.kp.addr == 0)
goto not_found;
cp_kprobe.addr = cp_kretprobe.kp.addr;
sym = "mm_release";
- mr_kprobe.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (mr_kprobe.addr == NULL)
+ mr_kprobe.addr = swap_ksyms(sym);
+ if (mr_kprobe.addr == 0)
goto not_found;
sym = "do_munmap";
- unmap_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (unmap_kretprobe.kp.addr == NULL)
+ unmap_kretprobe.kp.addr = swap_ksyms(sym);
+ if (unmap_kretprobe.kp.addr == 0)
goto not_found;
sym = "do_mmap_pgoff";
- mmap_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (mmap_kretprobe.kp.addr == NULL)
+ mmap_kretprobe.kp.addr = swap_ksyms(sym);
+ if (mmap_kretprobe.kp.addr == 0)
goto not_found;
sym = "set_task_comm";
- comm_kretprobe.kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (comm_kretprobe.kp.addr == NULL)
+ comm_kretprobe.kp.addr = swap_ksyms(sym);
+ if (comm_kretprobe.kp.addr == 0)
goto not_found;
sym = "release_task";
- release_task_kp.addr = (kprobe_opcode_t *)swap_ksyms(sym);
- if (release_task_kp.addr == NULL)
+ release_task_kp.addr = swap_ksyms(sym);
+ if (release_task_kp.addr == 0)
goto not_found;
return 0;