return 0;
}
-struct sched_in_data {
- struct perf_event_context *ctx;
- struct perf_cpu_context *cpuctx;
- int can_add_hw;
-};
-
static int merge_sched_in(struct perf_event *event, void *data)
{
- struct sched_in_data *sid = data;
-
- WARN_ON_ONCE(event->ctx != sid->ctx);
+ struct perf_event_context *ctx = event->ctx;
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int *can_add_hw = data;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
if (!event_filter_match(event))
return 0;
- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- if (!group_sched_in(event, sid->cpuctx, sid->ctx))
+ if (group_can_go_on(event, cpuctx, *can_add_hw)) {
+ if (!group_sched_in(event, cpuctx, ctx))
list_add_tail(&event->active_list, get_event_list(event));
}
if (event->attr.pinned)
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
- sid->can_add_hw = 0;
- sid->ctx->rotate_necessary = 1;
+ *can_add_hw = 0;
+ ctx->rotate_necessary = 1;
}
return 0;
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
+ int can_add_hw = 1;
visit_groups_merge(&ctx->pinned_groups,
smp_processor_id(),
- merge_sched_in, &sid);
+ merge_sched_in, &can_add_hw);
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
+ int can_add_hw = 1;
visit_groups_merge(&ctx->flexible_groups,
smp_processor_id(),
- merge_sched_in, &sid);
+ merge_sched_in, &can_add_hw);
}
static void