2 * Internal header file for device mapper
4 * Copyright (C) 2001, 2002 Sistina Software
5 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
7 * This file is released under the LGPL.
14 #include <linux/device-mapper.h>
15 #include <linux/list.h>
16 #include <linux/moduleparam.h>
17 #include <linux/blkdev.h>
18 #include <linux/backing-dev.h>
19 #include <linux/hdreg.h>
20 #include <linux/completion.h>
21 #include <linux/kobject.h>
22 #include <linux/refcount.h>
27 * Suspend feature flags
29 #define DM_SUSPEND_LOCKFS_FLAG (1 << 0)
30 #define DM_SUSPEND_NOFLUSH_FLAG (1 << 1)
33 * Status feature flags
35 #define DM_STATUS_NOFLUSH_FLAG (1 << 0)
38 * List of devices that a metadevice uses and should open/close.
40 struct dm_dev_internal {
41 struct list_head list;
43 struct dm_dev *dm_dev;
47 struct dm_md_mempools;
51 /*-----------------------------------------------------------------
52 * Internal table functions.
53 *---------------------------------------------------------------*/
54 void dm_table_event_callback(struct dm_table *t,
55 void (*fn)(void *), void *context);
56 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
57 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
58 bool dm_table_has_no_data_devices(struct dm_table *table);
59 int dm_calculate_queue_limits(struct dm_table *table,
60 struct queue_limits *limits);
61 int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
62 struct queue_limits *limits);
63 struct list_head *dm_table_get_devices(struct dm_table *t);
64 void dm_table_presuspend_targets(struct dm_table *t);
65 void dm_table_presuspend_undo_targets(struct dm_table *t);
66 void dm_table_postsuspend_targets(struct dm_table *t);
67 int dm_table_resume_targets(struct dm_table *t);
68 enum dm_queue_mode dm_table_get_type(struct dm_table *t);
69 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t);
70 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
71 struct dm_target *dm_table_get_wildcard_target(struct dm_table *t);
72 bool dm_table_bio_based(struct dm_table *t);
73 bool dm_table_request_based(struct dm_table *t);
74 void dm_table_free_md_mempools(struct dm_table *t);
75 struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t);
77 void dm_lock_md_type(struct mapped_device *md);
78 void dm_unlock_md_type(struct mapped_device *md);
79 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type);
80 enum dm_queue_mode dm_get_md_type(struct mapped_device *md);
81 struct target_type *dm_get_immutable_target_type(struct mapped_device *md);
83 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t);
86 * To check whether the target type is bio-based or not (request-based).
88 #define dm_target_bio_based(t) ((t)->type->map != NULL)
91 * To check whether the target type is request-based or not (bio-based).
93 #define dm_target_request_based(t) ((t)->type->clone_and_map_rq != NULL)
96 * To check whether the target type is a hybrid (capable of being
97 * either request-based or bio-based).
99 #define dm_target_hybrid(t) (dm_target_bio_based(t) && dm_target_request_based(t))
102 * Zoned targets related functions.
104 int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q);
105 void dm_zone_endio(struct dm_io *io, struct bio *clone);
106 #ifdef CONFIG_BLK_DEV_ZONED
107 void dm_cleanup_zoned_dev(struct mapped_device *md);
108 int dm_blk_report_zones(struct gendisk *disk, sector_t sector,
109 unsigned int nr_zones, report_zones_cb cb, void *data);
110 bool dm_is_zone_write(struct mapped_device *md, struct bio *bio);
111 int dm_zone_map_bio(struct dm_target_io *io);
113 static inline void dm_cleanup_zoned_dev(struct mapped_device *md) {}
114 #define dm_blk_report_zones NULL
115 static inline bool dm_is_zone_write(struct mapped_device *md, struct bio *bio)
119 static inline int dm_zone_map_bio(struct dm_target_io *tio)
121 return DM_MAPIO_KILL;
125 /*-----------------------------------------------------------------
126 * A registry of target types.
127 *---------------------------------------------------------------*/
128 int dm_target_init(void);
129 void dm_target_exit(void);
130 struct target_type *dm_get_target_type(const char *name);
131 void dm_put_target_type(struct target_type *tt);
132 int dm_target_iterate(void (*iter_func)(struct target_type *tt,
133 void *param), void *param);
135 int dm_split_args(int *argc, char ***argvp, char *input);
138 * Is this mapped_device being deleted?
140 int dm_deleting_md(struct mapped_device *md);
143 * Is this mapped_device suspended?
145 int dm_suspended_md(struct mapped_device *md);
148 * Internal suspend and resume methods.
150 int dm_suspended_internally_md(struct mapped_device *md);
151 void dm_internal_suspend_fast(struct mapped_device *md);
152 void dm_internal_resume_fast(struct mapped_device *md);
153 void dm_internal_suspend_noflush(struct mapped_device *md);
154 void dm_internal_resume(struct mapped_device *md);
157 * Test if the device is scheduled for deferred remove.
159 int dm_test_deferred_remove_flag(struct mapped_device *md);
162 * Try to remove devices marked for deferred removal.
164 void dm_deferred_remove(void);
167 * The device-mapper can be driven through one of two interfaces;
168 * ioctl or filesystem, depending which patch you have applied.
170 int dm_interface_init(void);
171 void dm_interface_exit(void);
176 int dm_sysfs_init(struct mapped_device *md);
177 void dm_sysfs_exit(struct mapped_device *md);
178 struct kobject *dm_kobject(struct mapped_device *md);
179 struct mapped_device *dm_get_from_kobject(struct kobject *kobj);
184 void dm_kobject_release(struct kobject *kobj);
187 * Targets for linear and striped mappings
189 int dm_linear_init(void);
190 void dm_linear_exit(void);
192 int dm_stripe_init(void);
193 void dm_stripe_exit(void);
196 * mapped_device operations
198 void dm_destroy(struct mapped_device *md);
199 void dm_destroy_immediate(struct mapped_device *md);
200 int dm_open_count(struct mapped_device *md);
201 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred);
202 int dm_cancel_deferred_remove(struct mapped_device *md);
203 int dm_request_based(struct mapped_device *md);
204 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode,
205 struct dm_dev **result);
206 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d);
208 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
211 void dm_internal_suspend(struct mapped_device *md);
212 void dm_internal_resume(struct mapped_device *md);
214 int dm_io_init(void);
215 void dm_io_exit(void);
217 int dm_kcopyd_init(void);
218 void dm_kcopyd_exit(void);
223 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type,
224 unsigned per_io_data_size, unsigned min_pool_size,
225 bool integrity, bool poll);
226 void dm_free_md_mempools(struct dm_md_mempools *pools);
231 unsigned dm_get_reserved_bio_based_ios(void);