static struct td_raw kp_tdraw;
-static DEFINE_PER_CPU(struct pt_regs, per_cpu_regs);
+static DEFINE_PER_CPU(struct pt_regs, per_cpu_regs_i);
+static DEFINE_PER_CPU(struct pt_regs, per_cpu_regs_st);
static struct pt_regs *current_regs(void)
{
- if (able2resched())
- return (struct pt_regs *)swap_td_raw(&kp_tdraw, current);
+ if (in_interrupt())
+ return &__get_cpu_var(per_cpu_regs_i);
+ else if (switch_to_bits_get(current_kctx, SWITCH_TO_ALL))
+ return &__get_cpu_var(per_cpu_regs_st);
- return &__get_cpu_var(per_cpu_regs);
+ return (struct pt_regs *)swap_td_raw(&kp_tdraw, current);
}
struct kp_core *p;
struct kp_core_ctlblk *kcb;
unsigned long addr = regs->ip - 1;
+ struct kctx *ctx = current_kctx;
+
+ if (addr == sched_addr)
+ switch_to_bits_set(ctx, SWITCH_TO_KP);
kcb = kp_core_ctlblk();
kp_core_get(p);
rcu_read_unlock();
- if (able2resched()) {
+ if (able2resched(ctx)) {
ret = kprobe_pre_handler(p, regs, kcb);
if (ret == KSTAT_PREPARE_KCB) {
/* save regs to stack */
if (ret == KSTAT_PREPARE_KCB) {
int rr = p->handlers.pre(p, regs);
if (rr) {
+ switch_to_bits_reset(ctx, SWITCH_TO_KP);
kp_core_put(p);
return 1;
}
* If TF is enabled then processing instruction
* takes place in two stages.
*/
- if (regs->flags & TF_MASK)
+ if (regs->flags & TF_MASK) {
preempt_disable();
- else
+ } else {
+ switch_to_bits_reset(ctx, SWITCH_TO_KP);
kp_core_put(p);
+ }
}
return !!ret;
*/
static int post_kprobe_handler(struct pt_regs *regs)
{
+ struct kctx *ctx = current_kctx;
struct kp_core *cur = kp_core_running();
struct kp_core_ctlblk *kcb = kp_core_ctlblk();
kp_core_running_set(NULL);
out:
- kp_core_put(cur);
- if (!able2resched())
+ if (!able2resched(ctx))
swap_preempt_enable_no_resched();
+ switch_to_bits_reset(ctx, SWITCH_TO_KP);
+ kp_core_put(cur);
+
/*
* if somebody else is singlestepping across a probe point, eflags
* will have TF set, in which case, continue the remaining processing
/* for __switch_to probe */
if ((unsigned long)ri->rp->kp.addr == sched_addr) {
+ struct task_struct *next = (struct task_struct *)swap_get_karg(regs, 1);
ri->sp = NULL;
- ri->task = (struct task_struct *)regs->dx;
+ ri->task = next;
+ switch_to_bits_set(kctx_by_task(next), SWITCH_TO_RP);
} else {
ri->sp = ptr_ret_addr;
}
struct kp_core_ctlblk ctlblk;
};
+struct kctx {
+ struct kpc_data kpc;
+ unsigned long st_flags;
+};
+
static void ktd_cur_init(struct task_struct *task, void *data)
{
- struct kpc_data *d = (struct kpc_data *)data;
+ struct kctx *ctx = (struct kctx *)data;
- memset(d, 0, sizeof(*d));
+ memset(ctx, 0, sizeof(*ctx));
}
static void ktd_cur_exit(struct task_struct *task, void *data)
{
- struct kpc_data *d = (struct kpc_data *)data;
+ struct kctx *ctx = (struct kctx *)data;
- WARN(d->running, "running probe is not NULL");
+ WARN(ctx->kpc.running, "running=%p\n", ctx->kpc.running);
}
struct ktask_data ktd_cur = {
.init = ktd_cur_init,
.exit = ktd_cur_exit,
- .size = sizeof(struct kpc_data),
+ .size = sizeof(struct kctx),
};
-static DEFINE_PER_CPU(struct kpc_data, per_cpu_kpc_data);
+struct kctx *kctx_by_task(struct task_struct *task)
+{
+ return (struct kctx *)swap_ktd(&ktd_cur, task);
+}
+
+void switch_to_bits_set(struct kctx *ctx, unsigned long mask)
+{
+ ctx->st_flags |= mask;
+}
+
+void switch_to_bits_reset(struct kctx *ctx, unsigned long mask)
+{
+ ctx->st_flags &= ~mask;
+}
+
+unsigned long switch_to_bits_get(struct kctx *ctx, unsigned long mask)
+{
+ return ctx->st_flags & mask;
+}
+
+static DEFINE_PER_CPU(struct kpc_data, per_cpu_kpc_data_i);
+static DEFINE_PER_CPU(struct kpc_data, per_cpu_kpc_data_st);
static struct kpc_data *kp_core_data(void)
{
- if (able2resched())
- return (struct kpc_data *)swap_ktd(&ktd_cur, current);
+ struct kctx *ctx = current_kctx;
+
+ if (in_interrupt())
+ return &__get_cpu_var(per_cpu_kpc_data_i);
+ else if (switch_to_bits_get(ctx, SWITCH_TO_ALL))
+ return &__get_cpu_var(per_cpu_kpc_data_st);
- return &__get_cpu_var(per_cpu_kpc_data);
+ return &ctx->kpc;
}
static int kprobe_cur_reg(void)
else
kp_core_running_set(NULL);
+ switch_to_bits_reset(current_kctx, SWITCH_TO_RP);
spin_unlock_irqrestore(&kretprobe_lock, flags);
/*
struct kp_core_ctlblk *kp_core_ctlblk(void);
-static inline int able2resched(void)
+struct kctx;
+
+/* for __switch_to support */
+#define SWITCH_TO_KP 0b0001
+#define SWITCH_TO_RP 0b0010
+#define SWITCH_TO_ALL (SWITCH_TO_KP | SWITCH_TO_RP)
+
+#define current_kctx kctx_by_task(current)
+struct kctx *kctx_by_task(struct task_struct *task);
+
+void switch_to_bits_set(struct kctx *ctx, unsigned long mask);
+void switch_to_bits_reset(struct kctx *ctx, unsigned long mask);
+unsigned long switch_to_bits_get(struct kctx *ctx, unsigned long mask);
+
+static inline int able2resched(struct kctx *ctx)
{
- if (in_interrupt())
+ if (in_interrupt() || switch_to_bits_get(ctx, SWITCH_TO_ALL))
return 0;
return 1;