kzfree(cc->cipher_auth);
kzfree(cc->authenc_key);
+ mutex_destroy(&cc->bio_alloc_lock);
+
/* Must zero key material before freeing */
kzfree(cc);
}
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
+ mutex_destroy(&dc->timer_lock);
+
kfree(dc);
}
* If this is the master job, the sub jobs have already
* completed so we can free everything.
*/
- if (job->master_job == job)
+ if (job->master_job == job) {
+ mutex_destroy(&job->lock);
mempool_free(job, kc->job_pool);
+ }
fn(read_err, write_err, context);
if (atomic_dec_and_test(&kc->nr_jobs))
* followed by SPLIT_COUNT sub jobs.
*/
job = mempool_alloc(kc->job_pool, GFP_NOIO);
+ mutex_init(&job->lock);
/*
* set up for the read.
if (job->source.count <= SUB_JOB_SIZE)
dispatch_job(job);
else {
- mutex_init(&job->lock);
job->progress = 0;
split_job(job);
}
kfree(m->hw_handler_name);
kfree(m->hw_handler_params);
+ mutex_destroy(&m->work_mutex);
kfree(m);
}
dm_stat_free(&s->rcu_head);
}
free_percpu(stats->last);
+ mutex_destroy(&stats->mutex);
}
static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
INIT_LIST_HEAD(&dm_thin_pool_table.pools);
}
+static void pool_table_exit(void)
+{
+ mutex_destroy(&dm_thin_pool_table.mutex);
+}
+
static void __pool_table_insert(struct pool *pool)
{
BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
dm_unregister_target(&pool_target);
kmem_cache_destroy(_new_mapping_cache);
+
+ pool_table_exit();
}
module_init(dm_thin_init);
/* Free the zone descriptors */
dmz_drop_zones(zmd);
+
+ mutex_destroy(&zmd->mblk_flush_lock);
+ mutex_destroy(&zmd->map_lock);
}
/*
err_cwq:
destroy_workqueue(dmz->chunk_wq);
err_bio:
+ mutex_destroy(&dmz->chunk_lock);
bioset_free(dmz->bio_set);
err_meta:
dmz_dtr_metadata(dmz->metadata);
dmz_put_zoned_device(ti);
+ mutex_destroy(&dmz->chunk_lock);
+
kfree(dmz);
}
md->bdev = NULL;
}
+ mutex_destroy(&md->suspend_lock);
+ mutex_destroy(&md->type_lock);
+ mutex_destroy(&md->table_devices_lock);
+
dm_mq_cleanup_mapped_device(md);
}