*/
void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(swap_current_kprobe) = kcb->prev_kprobe.kp;
+ swap_kprobe_running_set(kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
}
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(swap_current_kprobe) = p;
- DBPRINTF("set_current_kprobe: p=%p addr=%p\n", p, p->addr);
+ swap_kprobe_running_set(p);
}
static int kprobe_handler(struct pt_regs *regs)
if (!p->pre_handler || !p->pre_handler(p, regs)) {
kcb->kprobe_status = KPROBE_HIT_SS;
prepare_singlestep(p, regs);
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
}
}
} else {
*/
void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(swap_current_kprobe) = kcb->prev_kprobe.kp;
+ swap_kprobe_running_set(kcb->prev_kprobe.kp);
kcb->kprobe_status = kcb->prev_kprobe.status;
kcb->prev_kprobe.kp = NULL;
kcb->prev_kprobe.status = 0;
struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
- __get_cpu_var(swap_current_kprobe) = p;
- DBPRINTF("set_current_kprobe[]: p=%p addr=%p\n", p, p->addr);
+ swap_kprobe_running_set(p);
kcb->kprobe_saved_eflags = kcb->kprobe_old_eflags =
(regs->EREG(flags) & (TF_MASK | IF_MASK));
if (is_IF_modifier(p->opcode))
#if !defined(CONFIG_PREEMPT) || defined(CONFIG_PM)
if (p->ainsn.boostable == 1 && !p->post_handler) {
/* Boost up -- we can execute copied instructions directly */
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
regs->ip = (unsigned long)p->ainsn.insn;
swap_preempt_enable_no_resched();
goto no_kprobe;
}
- p = __get_cpu_var(swap_current_kprobe);
+ p = swap_kprobe_running();
if (p->break_handler && p->break_handler(p, regs))
goto ss_probe;
restore_previous_kprobe(kcb);
goto out;
}
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
out:
swap_preempt_enable_no_resched();
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
swap_preempt_enable_no_resched();
break;
case KPROBE_HIT_ACTIVE:
/* jump to kjump_trampoline */
regs->ip = (unsigned long)&kjump_trampoline;
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
swap_preempt_enable_no_resched();
return 1;
#include <swap-asm/swap_kprobes.h>
#include "swap_ktd.h"
#include "swap_slots.h"
+#include "swap_ktd.h"
#include "swap_td_raw.h"
#include "swap_kdebug.h"
#include "swap_kprobes.h"
*/
struct slot_manager sm;
-DEFINE_PER_CPU(struct kprobe *, swap_current_kprobe);
-static DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
-
static DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
-static DEFINE_PER_CPU(struct kprobe *, kprobe_instance);
struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
}
}
-/* We have preemption disabled.. so it is safe to use __ versions */
-static inline void set_kprobe_instance(struct kprobe *kp)
+struct kp_data {
+ struct kprobe *running;
+ struct kprobe *instance;
+ struct kprobe_ctlblk ctlblk;
+};
+
+static void ktd_cur_init(struct task_struct *task, void *data)
{
- __get_cpu_var(kprobe_instance) = kp;
+ struct kp_data *d = (struct kp_data *)data;
+
+ memset(d, 0, sizeof(*d));
}
-static inline void reset_kprobe_instance(void)
+static void ktd_cur_exit(struct task_struct *task, void *data)
{
- __get_cpu_var(kprobe_instance) = NULL;
+ struct kp_data *d = (struct kp_data *)data;
+
+ WARN(d->running, "running probe is not NULL");
+ WARN(d->instance, "instance probe is not NULL");
}
-/**
- * @brief Gets the current kprobe on this CPU.
- *
- * @return Pointer to the current kprobe.
- */
+struct ktask_data ktd_cur = {
+ .init = ktd_cur_init,
+ .exit = ktd_cur_exit,
+ .size = sizeof(struct kp_data),
+};
+
+static struct kp_data *kprobe_data(void)
+{
+ return (struct kp_data *)swap_ktd(&ktd_cur, current);
+}
+
+static int kprobe_cur_reg(void)
+{
+ return swap_ktd_reg(&ktd_cur);
+}
+
+static void kprobe_cur_unreg(void)
+{
+ swap_ktd_unreg(&ktd_cur);
+}
+
+
+static struct kprobe *kprobe_instance(void)
+{
+ return kprobe_data()->instance;
+}
+
+static void kprobe_instance_set(struct kprobe *p)
+{
+ kprobe_data()->instance = p;
+}
+
+
struct kprobe *swap_kprobe_running(void)
{
- return __get_cpu_var(swap_current_kprobe);
+ return kprobe_data()->running;
+}
+
+void swap_kprobe_running_set(struct kprobe *p)
+{
+ kprobe_data()->running = p;
}
/**
*/
void swap_reset_current_kprobe(void)
{
- __get_cpu_var(swap_current_kprobe) = NULL;
+ swap_kprobe_running_set(NULL);
}
/**
*/
struct kprobe_ctlblk *swap_get_kprobe_ctlblk(void)
{
- return &__get_cpu_var(kprobe_ctlblk);
+ return &kprobe_data()->ctlblk;
}
/*
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->pre_handler) {
- set_kprobe_instance(kp);
+ kprobe_instance_set(kp);
ret = kp->pre_handler(kp, regs);
if (ret)
return ret;
}
- reset_kprobe_instance();
+ kprobe_instance_set(NULL);
}
return 0;
list_for_each_entry_rcu(kp, &p->list, list) {
if (kp->post_handler) {
- set_kprobe_instance(kp);
+ kprobe_instance_set(kp);
kp->post_handler(kp, regs, flags);
- reset_kprobe_instance();
+ kprobe_instance_set(NULL);
}
}
}
struct pt_regs *regs,
int trapnr)
{
- struct kprobe *cur = __get_cpu_var(kprobe_instance);
+ struct kprobe *cur = kprobe_instance();
/*
* if we faulted "during" the execution of a user specified
static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
{
- struct kprobe *cur = __get_cpu_var(kprobe_instance);
+ struct kprobe *cur = kprobe_instance();
int ret = 0;
DBPRINTF("cur = 0x%p\n", cur);
if (cur)
if (cur->break_handler(cur, regs))
ret = 1;
}
- reset_kprobe_instance();
+ kprobe_instance_set(NULL);
return ret;
}
/* another task is sharing our hash bucket */
continue;
if (ri->rp && ri->rp->handler) {
- __get_cpu_var(swap_current_kprobe) = &ri->rp->kp;
+ swap_kprobe_running_set(&ri->rp->kp);
swap_get_kprobe_ctlblk()->kprobe_status =
KPROBE_HIT_ACTIVE;
ri->rp->handler(ri, regs);
- __get_cpu_var(swap_current_kprobe) = NULL;
+ swap_kprobe_running_set(NULL);
}
orig_ret_address = (unsigned long)ri->ret_addr;
if (kcb->kprobe_status == KPROBE_REENTER)
restore_previous_kprobe(kcb);
else
- swap_reset_current_kprobe();
+ swap_kprobe_running_set(NULL);
spin_unlock_irqrestore(&kretprobe_lock, flags);
swap_preempt_enable_no_resched();
spin_unlock_irqrestore(&kretprobe_lock, flags);
}
+/* Handler is called the last because it is registered the first */
static int put_task_handler(struct kprobe *p, struct pt_regs *regs)
{
struct task_struct *t = (struct task_struct *)swap_get_karg(regs, 0);
/* task has died */
krp_inst_flush(t);
+ swap_ktd_put_task(t);
return 0;
}
if (ret)
return ret;
- ret = swap_ktd_once();
- if (ret)
- return ret;
-
/*
* FIXME allocate the probe table, currently defined statically
* initialize all list heads
if (ret)
goto arch_kp_exit;
- ret = swap_register_kprobe(&put_task_kp);
+ ret = kprobe_cur_reg();
if (ret)
goto ktd_uninit;
+ ret = swap_register_kprobe(&put_task_kp);
+ if (ret)
+ goto cur_uninit;
+
return 0;
+cur_uninit:
+ kprobe_cur_unreg();
ktd_uninit:
- swap_ktd_uninit();
+ swap_ktd_uninit_top();
+ swap_ktd_uninit_bottom();
arch_kp_exit:
swap_arch_exit_kprobes();
td_raw_uninit:
static void exit_kprobes(void)
{
+ swap_ktd_uninit_top();
swap_unregister_kprobe(&put_task_kp);
- swap_ktd_uninit();
+ kprobe_cur_unreg();
+ swap_ktd_uninit_bottom();
swap_arch_exit_kprobes();
swap_td_raw_uninit();
exit_sm();
int trampoline_probe_handler (struct kprobe *p, struct pt_regs *regs);
-DECLARE_PER_CPU(struct kprobe *, swap_current_kprobe);
extern atomic_t kprobe_count;
extern unsigned long sched_addr;
struct kprobe *swap_kprobe_running(void);
+void swap_kprobe_running_set(struct kprobe *p);
void swap_reset_current_kprobe(void);
struct kprobe_ctlblk *swap_get_kprobe_ctlblk(void);
EXPORT_SYMBOL_GPL(swap_ktd_unreg);
-/*
- * void __put_task_struct(struct task_struct *tsk)
- */
-static int put_ts_handler(struct kprobe *p, struct pt_regs *regs)
+void swap_ktd_put_task(struct task_struct *task)
{
- struct task_struct *task;
-
- task = (struct task_struct *)swap_get_karg(regs, 0);
-
if (task_prepare_is(task))
td_prepare_clear(td_by_task(task), task);
-
- return 0;
-}
-
-static struct kprobe put_ts_kp = {
- .pre_handler = put_ts_handler
-};
-
-
-int swap_ktd_once(void)
-{
- const char *sym;
-
- sym = "__put_task_struct";
- put_ts_kp.addr = (void *)swap_ksyms(sym);
- if (put_ts_kp.addr == NULL)
- goto not_found;
-
- return 0;
-
-not_found:
- pr_err(KTD_PREFIX "ERROR: symbol %s(...) not found\n", sym);
- return -ESRCH;
}
int swap_ktd_init(void)
ret = swap_td_raw_reg(&td_raw, sizeof(struct td));
if (ret)
- goto fail;
+ pr_err(KTD_PREFIX "registration failed, ret=%d", ret);
- ret = swap_register_kprobe(&put_ts_kp);
- if (ret)
- goto td_raw_unreg;
+ return ret;
+}
- return 0;
+void swap_ktd_uninit_top(void)
+{
+ struct td *td;
+ unsigned long flags;
-td_raw_unreg:
- swap_td_raw_unreg(&td_raw);
-fail:
- pr_err(KTD_PREFIX "registration failed, ret=%d", ret);
- return ret;
+ /* get injected tasks */
+ write_lock_irqsave(&prepare_lock, flags);
+ list_for_each_entry(td, &prepare_list, list) {
+ get_task_struct(task_by_td(td));
+ }
+ write_unlock_irqrestore(&prepare_lock, flags);
}
-void swap_ktd_uninit(void)
+void swap_ktd_uninit_bottom(void)
{
struct td *td, *n;
unsigned long flags;
- /* remove td injection from tasks */
+ /* remove td injection from tasks and put tasks */
write_lock_irqsave(&prepare_lock, flags);
- list_for_each_entry_safe(td, n, &prepare_list, list)
- td_prepare_clear_no_lock(td, task_by_td(td));
+ list_for_each_entry_safe(td, n, &prepare_list, list) {
+ struct task_struct *task = task_by_td(td);
+
+ td_prepare_clear_no_lock(td, task);
+ put_task_struct(task);
+ }
write_unlock_irqrestore(&prepare_lock, flags);
- swap_unregister_kprobe(&put_ts_kp);
swap_td_raw_unreg(&td_raw);
WARN(preparing_cnt, KTD_PREFIX "preparing_cnt=%d", preparing_cnt);
void *swap_ktd(struct ktask_data *ktd, struct task_struct *task);
-int swap_ktd_once(void);
int swap_ktd_init(void);
-void swap_ktd_uninit(void);
+void swap_ktd_uninit_top(void);
+void swap_ktd_uninit_bottom(void);
+void swap_ktd_put_task(struct task_struct *task);
#endif /* _SWAP_TD_H */