From: Namhyung Kim Date: Wed, 18 Jan 2023 06:05:52 +0000 (-0800) Subject: perf/core: Save the dynamic parts of sample data size X-Git-Tag: v6.6.7~3527^2~15 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4cf7a136115e96241f9f1089d2b53c47accf3823;p=platform%2Fkernel%2Flinux-starfive.git perf/core: Save the dynamic parts of sample data size The perf sample data can be divided into parts. The event->header_size and event->id_header_size keep the static part of the sample data which is determined by the sample_type flags. But other parts like CALLCHAIN and BRANCH_STACK are changing dynamically so it needs to see the actual data. In preparation of handling repeated calls for perf_prepare_sample(), it can save the dynamic size to the perf sample data to avoid the duplicate work. Signed-off-by: Namhyung Kim Signed-off-by: Ingo Molnar Tested-by: Jiri Olsa Acked-by: Jiri Olsa Acked-by: Song Liu Acked-by: Peter Zijlstra Link: https://lore.kernel.org/r/20230118060559.615653-2-namhyung@kernel.org --- diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 03949d0..16b9800 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1103,6 +1103,7 @@ struct perf_sample_data { */ u64 sample_flags; u64 period; + u64 dyn_size; /* * Fields commonly set by __perf_event_header__init_id(), @@ -1158,6 +1159,7 @@ static inline void perf_sample_data_init(struct perf_sample_data *data, /* remaining struct members initialized in perf_prepare_sample() */ data->sample_flags = PERF_SAMPLE_PERIOD; data->period = period; + data->dyn_size = 0; if (addr) { data->addr = addr; diff --git a/kernel/events/core.c b/kernel/events/core.c index d56328e..827082d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7586,7 +7586,7 @@ void perf_prepare_sample(struct perf_event_header *header, size += data->callchain->nr; - header->size += size * sizeof(u64); + data->dyn_size += size * sizeof(u64); } if (sample_type & PERF_SAMPLE_RAW) { @@ -7612,7 +7612,7 @@ void perf_prepare_sample(struct perf_event_header *header, data->raw = NULL; } - header->size += size; + data->dyn_size += size; } if (sample_type & PERF_SAMPLE_BRANCH_STACK) { @@ -7624,7 +7624,7 @@ void perf_prepare_sample(struct perf_event_header *header, size += data->br_stack->nr * sizeof(struct perf_branch_entry); } - header->size += size; + data->dyn_size += size; } if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) @@ -7639,7 +7639,7 @@ void perf_prepare_sample(struct perf_event_header *header, size += hweight64(mask) * sizeof(u64); } - header->size += size; + data->dyn_size += size; } if (sample_type & PERF_SAMPLE_STACK_USER) { @@ -7664,7 +7664,7 @@ void perf_prepare_sample(struct perf_event_header *header, size += sizeof(u64) + stack_size; data->stack_user_size = stack_size; - header->size += size; + data->dyn_size += size; } if (filtered_sample_type & PERF_SAMPLE_WEIGHT_TYPE) @@ -7693,7 +7693,7 @@ void perf_prepare_sample(struct perf_event_header *header, size += hweight64(mask) * sizeof(u64); } - header->size += size; + data->dyn_size += size; } if (sample_type & PERF_SAMPLE_PHYS_ADDR && @@ -7738,8 +7738,11 @@ void perf_prepare_sample(struct perf_event_header *header, size = perf_prepare_sample_aux(event, data, size); WARN_ON_ONCE(size + header->size > U16_MAX); - header->size += size; + data->dyn_size += size + sizeof(u64); /* size above */ } + + header->size += data->dyn_size; + /* * If you're adding more sample types here, you likely need to do * something about the overflowing header::size, like repurpose the