struct dm_bio_prison {
spinlock_t lock;
- mempool_t cell_pool;
struct rb_root cells;
+ mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
struct workqueue_struct *wq;
spinlock_t lock;
- mempool_t cell_pool;
struct rb_root cells;
+ mempool_t cell_pool;
};
static struct kmem_cache *_cell_cache;
struct cache {
struct dm_target *ti;
- struct dm_target_callbacks callbacks;
+ spinlock_t lock;
+
+ /*
+ * Fields for converting from sectors to blocks.
+ */
+ int sectors_per_block_shift;
+ sector_t sectors_per_block;
struct dm_cache_metadata *cmd;
dm_cblock_t cache_size;
/*
- * Fields for converting from sectors to blocks.
+ * Invalidation fields.
*/
- sector_t sectors_per_block;
- int sectors_per_block_shift;
+ spinlock_t invalidation_lock;
+ struct list_head invalidation_requests;
- spinlock_t lock;
- struct bio_list deferred_bios;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
atomic_t nr_allocated_migrations;
*/
atomic_t nr_io_migrations;
+ struct bio_list deferred_bios;
+
struct rw_semaphore quiesce_lock;
- /*
- * cache_size entries, dirty if set
- */
- atomic_t nr_dirty;
- unsigned long *dirty_bitset;
+ struct dm_target_callbacks callbacks;
/*
* origin_blocks entries, discarded if set.
const char **ctr_args;
struct dm_kcopyd_client *copier;
- struct workqueue_struct *wq;
struct work_struct deferred_bio_worker;
struct work_struct migration_worker;
+ struct workqueue_struct *wq;
struct delayed_work waker;
struct dm_bio_prison_v2 *prison;
- struct bio_set bs;
- mempool_t migration_pool;
+ /*
+ * cache_size entries, dirty if set
+ */
+ unsigned long *dirty_bitset;
+ atomic_t nr_dirty;
- struct dm_cache_policy *policy;
unsigned policy_nr_args;
+ struct dm_cache_policy *policy;
+
+ /*
+ * Cache features such as write-through.
+ */
+ struct cache_features features;
+
+ struct cache_stats stats;
bool need_tick_bio:1;
bool sized:1;
bool loaded_mappings:1;
bool loaded_discards:1;
- /*
- * Cache features such as write-through.
- */
- struct cache_features features;
-
- struct cache_stats stats;
+ struct rw_semaphore background_work_lock;
- /*
- * Invalidation fields.
- */
- spinlock_t invalidation_lock;
- struct list_head invalidation_requests;
+ struct batcher committer;
+ struct work_struct commit_ws;
struct io_tracker tracker;
- struct work_struct commit_ws;
- struct batcher committer;
+ mempool_t migration_pool;
- struct rw_semaphore background_work_lock;
+ struct bio_set bs;
};
struct per_bio_data {
struct mapped_device {
struct mutex suspend_lock;
+ struct mutex table_devices_lock;
+ struct list_head table_devices;
+
/*
* The current mapping (struct dm_table *).
* Use dm_get_live_table{_fast} or take suspend_lock for
*/
void __rcu *map;
- struct list_head table_devices;
- struct mutex table_devices_lock;
-
unsigned long flags;
- struct request_queue *queue;
- int numa_node_id;
-
- enum dm_queue_mode type;
/* Protect queue and type against concurrent access. */
struct mutex type_lock;
+ enum dm_queue_mode type;
+
+ int numa_node_id;
+ struct request_queue *queue;
atomic_t holders;
atomic_t open_count;
struct dm_target *immutable_target;
struct target_type *immutable_target_type;
+ char name[16];
struct gendisk *disk;
struct dax_device *dax_dev;
- char name[16];
-
- void *interface_ptr;
/*
* A list of ios that arrived while we were suspended.
*/
- atomic_t pending[2];
- wait_queue_head_t wait;
struct work_struct work;
+ wait_queue_head_t wait;
+ atomic_t pending[2];
spinlock_t deferred_lock;
struct bio_list deferred;
+ void *interface_ptr;
+
/*
* Event handling.
*/
unsigned internal_suspend_count;
/*
- * Processing queue (flush)
- */
- struct workqueue_struct *wq;
-
- /*
* io objects are allocated from here.
*/
struct bio_set io_bs;
struct bio_set bs;
/*
+ * Processing queue (flush)
+ */
+ struct workqueue_struct *wq;
+
+ /*
* freeze/thaw support require holding onto a super block
*/
struct super_block *frozen_sb;
/* forced geometry settings */
struct hd_geometry geometry;
- struct block_device *bdev;
-
/* kobject and completion */
struct dm_kobject_holder kobj_holder;
+ struct block_device *bdev;
+
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
struct dm_dev *dev;
sector_t start;
- /*
- * pool for per bio private data, crypto requests,
- * encryption requeusts/buffer pages and integrity tags
- */
- mempool_t req_pool;
- mempool_t page_pool;
- mempool_t tag_pool;
- unsigned tag_pool_max_sectors;
-
struct percpu_counter n_allocated_pages;
- struct bio_set bs;
- struct mutex bio_alloc_lock;
-
struct workqueue_struct *io_queue;
struct workqueue_struct *crypt_queue;
- struct task_struct *write_thread;
wait_queue_head_t write_thread_wait;
+ struct task_struct *write_thread;
struct rb_root write_tree;
char *cipher;
unsigned int integrity_iv_size;
unsigned int on_disk_tag_size;
+ /*
+ * pool for per bio private data, crypto requests,
+ * encryption requeusts/buffer pages and integrity tags
+ */
+ unsigned tag_pool_max_sectors;
+ mempool_t tag_pool;
+ mempool_t req_pool;
+ mempool_t page_pool;
+
+ struct bio_set bs;
+ struct mutex bio_alloc_lock;
+
u8 *authenc_key; /* space for keys in authenc() format (if used) */
u8 key[0];
};
struct dm_io_client *io_client;
wait_queue_head_t destroyq;
- atomic_t nr_jobs;
mempool_t job_pool;
struct dm_kcopyd_throttle *throttle;
+ atomic_t nr_jobs;
+
/*
* We maintain three lists of jobs:
*
/* hash table */
rwlock_t hash_lock;
- mempool_t region_pool;
unsigned mask;
unsigned nr_buckets;
unsigned prime;
unsigned shift;
struct list_head *buckets;
+ /*
+ * If there was a flush failure no regions can be marked clean.
+ */
+ int flush_failure;
+
unsigned max_recovery; /* Max # of regions to recover in parallel */
spinlock_t region_lock;
atomic_t recovery_in_flight;
- struct semaphore recovery_count;
struct list_head clean_regions;
struct list_head quiesced_regions;
struct list_head recovered_regions;
struct list_head failed_recovered_regions;
+ struct semaphore recovery_count;
- /*
- * If there was a flush failure no regions can be marked clean.
- */
- int flush_failure;
+ mempool_t region_pool;
void *context;
sector_t target_begin;
struct dm_bio_prison *prison;
struct dm_kcopyd_client *copier;
+ struct work_struct worker;
struct workqueue_struct *wq;
struct throttle throttle;
- struct work_struct worker;
struct delayed_work waker;
struct delayed_work no_space_timeout;
struct dm_deferred_set *all_io_ds;
struct dm_thin_new_mapping *next_mapping;
- mempool_t mapping_pool;
process_bio_fn process_bio;
process_bio_fn process_discard;
process_mapping_fn process_prepared_discard_pt2;
struct dm_bio_prison_cell **cell_sort_array;
+
+ mempool_t mapping_pool;
};
static enum pool_mode get_pool_mode(struct pool *pool);
struct dmz_reclaim *reclaim;
/* For chunk work */
- struct mutex chunk_lock;
struct radix_tree_root chunk_rxtree;
struct workqueue_struct *chunk_wq;
+ struct mutex chunk_lock;
/* For cloned BIOs to zones */
struct bio_set bio_set;