unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ bool sda_is_static; /* May ->sda be passed to free_percpu()? */
unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
struct completion srcu_barrier_completion;
mutex_init(&ssp->srcu_barrier_mutex);
atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
INIT_DELAYED_WORK(&ssp->work, process_srcu);
+ ssp->sda_is_static = is_static;
if (!is_static)
ssp->sda = alloc_percpu(struct srcu_data);
if (!ssp->sda)
ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && convert_to_big == 1) {
if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
- if (!is_static) {
+ if (!ssp->sda_is_static) {
free_percpu(ssp->sda);
ssp->sda = NULL;
return -ENOMEM;
rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
return; /* Caller forgot to stop doing call_srcu()? */
}
- free_percpu(ssp->sda);
- ssp->sda = NULL;
+ if (!ssp->sda_is_static) {
+ free_percpu(ssp->sda);
+ ssp->sda = NULL;
+ }
kfree(ssp->node);
ssp->node = NULL;
ssp->srcu_size_state = SRCU_SIZE_SMALL;