3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
340 struct rbd_obj_request *obj_request; /* obj req initiator */
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
382 int dev_id; /* blkdev unique id */
384 int major; /* blkdev assigned major */
386 struct gendisk *disk; /* blkdev's gendisk and rq */
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock; /* queue, flags, open_count */
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
404 struct ceph_file_layout layout; /* used for all rbd requests */
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
410 struct delayed_work watch_dwork;
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
425 struct completion releasing_wait;
427 spinlock_t object_map_lock;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
432 struct workqueue_struct *task_wq;
434 struct rbd_spec *parent_spec;
437 struct rbd_device *parent;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
445 struct rbd_mapping mapping;
447 struct list_head node;
451 unsigned long open_count; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
481 static struct workqueue_struct *rbd_wq;
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(const struct bus_type *bus, const char *buf,
497 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
499 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
503 static int rbd_dev_id_to_minor(int dev_id)
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
508 static int minor_to_rbd_dev_id(int minor)
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
541 static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
552 static struct attribute *rbd_bus_attrs[] = {
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
576 __ATTRIBUTE_GROUPS(rbd_bus);
578 static struct bus_type rbd_bus_type = {
580 .bus_groups = rbd_bus_groups,
583 static void rbd_root_dev_release(struct device *dev)
587 static struct device rbd_root_dev = {
589 .release = rbd_root_dev_release,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
595 struct va_format vaf;
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
636 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
648 * Return true if nothing else is pending.
650 static bool pending_result_dec(struct pending_result *pending, int *result)
652 rbd_assert(pending->num_pending > 0);
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
659 *result = pending->result;
663 static int rbd_open(struct gendisk *disk, blk_mode_t mode)
665 struct rbd_device *rbd_dev = disk->private_data;
666 bool removing = false;
668 spin_lock_irq(&rbd_dev->lock);
669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
672 rbd_dev->open_count++;
673 spin_unlock_irq(&rbd_dev->lock);
677 (void) get_device(&rbd_dev->dev);
682 static void rbd_release(struct gendisk *disk)
684 struct rbd_device *rbd_dev = disk->private_data;
685 unsigned long open_count_before;
687 spin_lock_irq(&rbd_dev->lock);
688 open_count_before = rbd_dev->open_count--;
689 spin_unlock_irq(&rbd_dev->lock);
690 rbd_assert(open_count_before > 0);
692 put_device(&rbd_dev->dev);
695 static const struct block_device_operations rbd_bd_ops = {
696 .owner = THIS_MODULE,
698 .release = rbd_release,
702 * Initialize an rbd client instance. Success or not, this function
703 * consumes ceph_opts. Caller holds client_mutex.
705 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
707 struct rbd_client *rbdc;
710 dout("%s:\n", __func__);
711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
715 kref_init(&rbdc->kref);
716 INIT_LIST_HEAD(&rbdc->node);
718 rbdc->client = ceph_create_client(ceph_opts, rbdc);
719 if (IS_ERR(rbdc->client))
721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
723 ret = ceph_open_session(rbdc->client);
727 spin_lock(&rbd_client_list_lock);
728 list_add_tail(&rbdc->node, &rbd_client_list);
729 spin_unlock(&rbd_client_list_lock);
731 dout("%s: rbdc %p\n", __func__, rbdc);
735 ceph_destroy_client(rbdc->client);
740 ceph_destroy_options(ceph_opts);
741 dout("%s: error %d\n", __func__, ret);
746 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
748 kref_get(&rbdc->kref);
754 * Find a ceph client with specific addr and configuration. If
755 * found, bump its reference count.
757 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
759 struct rbd_client *rbdc = NULL, *iter;
761 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
764 spin_lock(&rbd_client_list_lock);
765 list_for_each_entry(iter, &rbd_client_list, node) {
766 if (!ceph_compare_options(ceph_opts, iter->client)) {
767 __rbd_get_client(iter);
773 spin_unlock(&rbd_client_list_lock);
779 * (Per device) rbd map options
787 Opt_compression_hint,
788 /* string args above */
797 Opt_compression_hint_none,
798 Opt_compression_hint_compressible,
799 Opt_compression_hint_incompressible,
802 static const struct constant_table rbd_param_compression_hint[] = {
803 {"none", Opt_compression_hint_none},
804 {"compressible", Opt_compression_hint_compressible},
805 {"incompressible", Opt_compression_hint_incompressible},
809 static const struct fs_parameter_spec rbd_parameters[] = {
810 fsparam_u32 ("alloc_size", Opt_alloc_size),
811 fsparam_enum ("compression_hint", Opt_compression_hint,
812 rbd_param_compression_hint),
813 fsparam_flag ("exclusive", Opt_exclusive),
814 fsparam_flag ("lock_on_read", Opt_lock_on_read),
815 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
816 fsparam_flag ("notrim", Opt_notrim),
817 fsparam_string ("_pool_ns", Opt_pool_ns),
818 fsparam_u32 ("queue_depth", Opt_queue_depth),
819 fsparam_flag ("read_only", Opt_read_only),
820 fsparam_flag ("read_write", Opt_read_write),
821 fsparam_flag ("ro", Opt_read_only),
822 fsparam_flag ("rw", Opt_read_write),
829 unsigned long lock_timeout;
835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
838 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
839 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
840 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
841 #define RBD_READ_ONLY_DEFAULT false
842 #define RBD_LOCK_ON_READ_DEFAULT false
843 #define RBD_EXCLUSIVE_DEFAULT false
844 #define RBD_TRIM_DEFAULT true
846 struct rbd_parse_opts_ctx {
847 struct rbd_spec *spec;
848 struct ceph_options *copts;
849 struct rbd_options *opts;
852 static char* obj_op_name(enum obj_operation_type op_type)
869 * Destroy ceph client
871 * Caller must hold rbd_client_list_lock.
873 static void rbd_client_release(struct kref *kref)
875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
877 dout("%s: rbdc %p\n", __func__, rbdc);
878 spin_lock(&rbd_client_list_lock);
879 list_del(&rbdc->node);
880 spin_unlock(&rbd_client_list_lock);
882 ceph_destroy_client(rbdc->client);
887 * Drop reference to ceph client node. If it's not referenced anymore, release
890 static void rbd_put_client(struct rbd_client *rbdc)
893 kref_put(&rbdc->kref, rbd_client_release);
897 * Get a ceph client with specific addr and configuration, if one does
898 * not exist create it. Either way, ceph_opts is consumed by this
901 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
903 struct rbd_client *rbdc;
906 mutex_lock(&client_mutex);
907 rbdc = rbd_client_find(ceph_opts);
909 ceph_destroy_options(ceph_opts);
912 * Using an existing client. Make sure ->pg_pools is up to
913 * date before we look up the pool id in do_rbd_add().
915 ret = ceph_wait_for_latest_osdmap(rbdc->client,
916 rbdc->client->options->mount_timeout);
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
923 rbdc = rbd_client_create(ceph_opts);
925 mutex_unlock(&client_mutex);
930 static bool rbd_image_format_valid(u32 image_format)
932 return image_format == 1 || image_format == 2;
935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
944 /* The bio layer requires at least sector-sized I/O */
946 if (ondisk->options.order < SECTOR_SHIFT)
949 /* If we use u64 in a few spots we may be able to loosen this */
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
975 * returns the size of an object in the image
977 static u32 rbd_obj_bytes(struct rbd_image_header *header)
979 return 1U << header->obj_order;
982 static void rbd_init_layout(struct rbd_device *rbd_dev)
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
999 * Fill an rbd image header with information from the given format 1
1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1003 struct rbd_image_header_ondisk *ondisk)
1005 struct rbd_image_header *header = &rbd_dev->header;
1006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
1015 /* Allocate this now to avoid having to handle failure below */
1018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1025 /* Allocate the snapshot context and fill it in */
1027 snap_count = le32_to_cpu(ondisk->snap_count);
1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1033 struct rbd_image_snap_ondisk *snaps;
1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1036 /* We'll keep a copy of the snapshot names... */
1038 if (snap_names_len > (u64)SIZE_MAX)
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1044 /* ...as well as the array of their sizes. */
1045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1052 * Copy the names, and fill in each snapshot's id
1055 * Note that rbd_dev_v1_header_info() guarantees the
1056 * ondisk buffer we're working with has
1057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1068 /* We won't fail any more, fill in the header */
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
1073 rbd_init_layout(rbd_dev);
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
1080 /* The remaining fields always get updated (when we refresh) */
1082 header->image_size = le64_to_cpu(ondisk->image_size);
1083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1101 const char *snap_name;
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1105 /* Skip over names until we find the one we are looking for */
1107 snap_name = rbd_dev->header.snap_names;
1109 snap_name += strlen(snap_name) + 1;
1111 return kstrdup(snap_name, GFP_KERNEL);
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1118 static int snapid_compare_reverse(const void *s1, const void *s2)
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1123 if (snap_id1 < snap_id2)
1125 return snap_id1 == snap_id2 ? 0 : -1;
1129 * Search a snapshot context to see if the given snapshot id is
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1153 const char *snap_name;
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
1157 return ERR_PTR(-ENOENT);
1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1188 *snap_size = rbd_dev->header.snap_sizes[which];
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1202 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1204 u64 snap_id = rbd_dev->spec->snap_id;
1208 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1212 rbd_dev->mapping.size = size;
1216 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1218 rbd_dev->mapping.size = 0;
1221 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1223 struct ceph_bio_iter it = *bio_pos;
1225 ceph_bio_iter_advance(&it, off);
1226 ceph_bio_iter_advance_step(&it, bytes, ({
1231 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1233 struct ceph_bvec_iter it = *bvec_pos;
1235 ceph_bvec_iter_advance(&it, off);
1236 ceph_bvec_iter_advance_step(&it, bytes, ({
1242 * Zero a range in @obj_req data buffer defined by a bio (list) or
1243 * (private) bio_vec array.
1245 * @off is relative to the start of the data buffer.
1247 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1252 switch (obj_req->img_request->data_type) {
1253 case OBJ_REQUEST_BIO:
1254 zero_bios(&obj_req->bio_pos, off, bytes);
1256 case OBJ_REQUEST_BVECS:
1257 case OBJ_REQUEST_OWN_BVECS:
1258 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1265 static void rbd_obj_request_destroy(struct kref *kref);
1266 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1268 rbd_assert(obj_request != NULL);
1269 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1270 kref_read(&obj_request->kref));
1271 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1274 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1275 struct rbd_obj_request *obj_request)
1277 rbd_assert(obj_request->img_request == NULL);
1279 /* Image request now owns object's original reference */
1280 obj_request->img_request = img_request;
1281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1284 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1285 struct rbd_obj_request *obj_request)
1287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1288 list_del(&obj_request->ex.oe_item);
1289 rbd_assert(obj_request->img_request == img_request);
1290 rbd_obj_request_put(obj_request);
1293 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1295 struct rbd_obj_request *obj_req = osd_req->r_priv;
1297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1299 obj_req->ex.oe_off, obj_req->ex.oe_len);
1300 ceph_osdc_start_request(osd_req->r_osdc, osd_req);
1304 * The default/initial value for all image request flags is 0. Each
1305 * is conditionally set to 1 at image request initialization time
1306 * and currently never change thereafter.
1308 static void img_request_layered_set(struct rbd_img_request *img_request)
1310 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1313 static bool img_request_layered_test(struct rbd_img_request *img_request)
1315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1318 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1322 return !obj_req->ex.oe_off &&
1323 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1326 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1330 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1331 rbd_dev->layout.object_size;
1335 * Must be called after rbd_obj_calc_img_extents().
1337 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1339 rbd_assert(obj_req->img_request->snapc);
1341 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1342 dout("%s %p objno %llu discard\n", __func__, obj_req,
1343 obj_req->ex.oe_objno);
1347 if (!obj_req->num_img_extents) {
1348 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1349 obj_req->ex.oe_objno);
1353 if (rbd_obj_is_entire(obj_req) &&
1354 !obj_req->img_request->snapc->num_snaps) {
1355 dout("%s %p objno %llu entire\n", __func__, obj_req,
1356 obj_req->ex.oe_objno);
1360 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1363 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1365 return ceph_file_extents_bytes(obj_req->img_extents,
1366 obj_req->num_img_extents);
1369 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1371 switch (img_req->op_type) {
1375 case OBJ_OP_DISCARD:
1376 case OBJ_OP_ZEROOUT:
1383 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1385 struct rbd_obj_request *obj_req = osd_req->r_priv;
1388 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1389 osd_req->r_result, obj_req);
1392 * Writes aren't allowed to return a data payload. In some
1393 * guarded write cases (e.g. stat + zero on an empty object)
1394 * a stat response makes it through, but we don't care.
1396 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1399 result = osd_req->r_result;
1401 rbd_obj_handle_request(obj_req, result);
1404 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1406 struct rbd_obj_request *obj_request = osd_req->r_priv;
1407 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1408 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1410 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1411 osd_req->r_snapid = obj_request->img_request->snap_id;
1414 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1416 struct rbd_obj_request *obj_request = osd_req->r_priv;
1418 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1419 ktime_get_real_ts64(&osd_req->r_mtime);
1420 osd_req->r_data_offset = obj_request->ex.oe_off;
1423 static struct ceph_osd_request *
1424 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1425 struct ceph_snap_context *snapc, int num_ops)
1427 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1428 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1429 struct ceph_osd_request *req;
1430 const char *name_format = rbd_dev->image_format == 1 ?
1431 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1434 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1436 return ERR_PTR(-ENOMEM);
1438 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1439 req->r_callback = rbd_osd_req_callback;
1440 req->r_priv = obj_req;
1443 * Data objects may be stored in a separate pool, but always in
1444 * the same namespace in that pool as the header in its pool.
1446 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1447 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1449 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1450 rbd_dev->header.object_prefix,
1451 obj_req->ex.oe_objno);
1453 return ERR_PTR(ret);
1458 static struct ceph_osd_request *
1459 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1461 rbd_assert(obj_req->img_request->snapc);
1462 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1466 static struct rbd_obj_request *rbd_obj_request_create(void)
1468 struct rbd_obj_request *obj_request;
1470 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1474 ceph_object_extent_init(&obj_request->ex);
1475 INIT_LIST_HEAD(&obj_request->osd_reqs);
1476 mutex_init(&obj_request->state_mutex);
1477 kref_init(&obj_request->kref);
1479 dout("%s %p\n", __func__, obj_request);
1483 static void rbd_obj_request_destroy(struct kref *kref)
1485 struct rbd_obj_request *obj_request;
1486 struct ceph_osd_request *osd_req;
1489 obj_request = container_of(kref, struct rbd_obj_request, kref);
1491 dout("%s: obj %p\n", __func__, obj_request);
1493 while (!list_empty(&obj_request->osd_reqs)) {
1494 osd_req = list_first_entry(&obj_request->osd_reqs,
1495 struct ceph_osd_request, r_private_item);
1496 list_del_init(&osd_req->r_private_item);
1497 ceph_osdc_put_request(osd_req);
1500 switch (obj_request->img_request->data_type) {
1501 case OBJ_REQUEST_NODATA:
1502 case OBJ_REQUEST_BIO:
1503 case OBJ_REQUEST_BVECS:
1504 break; /* Nothing to do */
1505 case OBJ_REQUEST_OWN_BVECS:
1506 kfree(obj_request->bvec_pos.bvecs);
1512 kfree(obj_request->img_extents);
1513 if (obj_request->copyup_bvecs) {
1514 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1515 if (obj_request->copyup_bvecs[i].bv_page)
1516 __free_page(obj_request->copyup_bvecs[i].bv_page);
1518 kfree(obj_request->copyup_bvecs);
1521 kmem_cache_free(rbd_obj_request_cache, obj_request);
1524 /* It's OK to call this for a device with no parent */
1526 static void rbd_spec_put(struct rbd_spec *spec);
1527 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1529 rbd_dev_remove_parent(rbd_dev);
1530 rbd_spec_put(rbd_dev->parent_spec);
1531 rbd_dev->parent_spec = NULL;
1532 rbd_dev->parent_overlap = 0;
1536 * Parent image reference counting is used to determine when an
1537 * image's parent fields can be safely torn down--after there are no
1538 * more in-flight requests to the parent image. When the last
1539 * reference is dropped, cleaning them up is safe.
1541 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1545 if (!rbd_dev->parent_spec)
1548 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1552 /* Last reference; clean up parent data structures */
1555 rbd_dev_unparent(rbd_dev);
1557 rbd_warn(rbd_dev, "parent reference underflow");
1561 * If an image has a non-zero parent overlap, get a reference to its
1564 * Returns true if the rbd device has a parent with a non-zero
1565 * overlap and a reference for it was successfully taken, or
1568 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1572 if (!rbd_dev->parent_spec)
1575 if (rbd_dev->parent_overlap)
1576 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1579 rbd_warn(rbd_dev, "parent reference overflow");
1584 static void rbd_img_request_init(struct rbd_img_request *img_request,
1585 struct rbd_device *rbd_dev,
1586 enum obj_operation_type op_type)
1588 memset(img_request, 0, sizeof(*img_request));
1590 img_request->rbd_dev = rbd_dev;
1591 img_request->op_type = op_type;
1593 INIT_LIST_HEAD(&img_request->lock_item);
1594 INIT_LIST_HEAD(&img_request->object_extents);
1595 mutex_init(&img_request->state_mutex);
1599 * Only snap_id is captured here, for reads. For writes, snapshot
1600 * context is captured in rbd_img_object_requests() after exclusive
1601 * lock is ensured to be held.
1603 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1605 struct rbd_device *rbd_dev = img_req->rbd_dev;
1607 lockdep_assert_held(&rbd_dev->header_rwsem);
1609 if (!rbd_img_is_write(img_req))
1610 img_req->snap_id = rbd_dev->spec->snap_id;
1612 if (rbd_dev_parent_get(rbd_dev))
1613 img_request_layered_set(img_req);
1616 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1618 struct rbd_obj_request *obj_request;
1619 struct rbd_obj_request *next_obj_request;
1621 dout("%s: img %p\n", __func__, img_request);
1623 WARN_ON(!list_empty(&img_request->lock_item));
1624 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1625 rbd_img_obj_request_del(img_request, obj_request);
1627 if (img_request_layered_test(img_request))
1628 rbd_dev_parent_put(img_request->rbd_dev);
1630 if (rbd_img_is_write(img_request))
1631 ceph_put_snap_context(img_request->snapc);
1633 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1634 kmem_cache_free(rbd_img_request_cache, img_request);
1637 #define BITS_PER_OBJ 2
1638 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1639 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1641 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1642 u64 *index, u8 *shift)
1646 rbd_assert(objno < rbd_dev->object_map_size);
1647 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1648 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1651 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1656 lockdep_assert_held(&rbd_dev->object_map_lock);
1657 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1658 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1661 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1667 lockdep_assert_held(&rbd_dev->object_map_lock);
1668 rbd_assert(!(val & ~OBJ_MASK));
1670 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1671 p = &rbd_dev->object_map[index];
1672 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1675 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1679 spin_lock(&rbd_dev->object_map_lock);
1680 state = __rbd_object_map_get(rbd_dev, objno);
1681 spin_unlock(&rbd_dev->object_map_lock);
1685 static bool use_object_map(struct rbd_device *rbd_dev)
1688 * An image mapped read-only can't use the object map -- it isn't
1689 * loaded because the header lock isn't acquired. Someone else can
1690 * write to the image and update the object map behind our back.
1692 * A snapshot can't be written to, so using the object map is always
1695 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1698 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1699 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1702 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1706 /* fall back to default logic if object map is disabled or invalid */
1707 if (!use_object_map(rbd_dev))
1710 state = rbd_object_map_get(rbd_dev, objno);
1711 return state != OBJECT_NONEXISTENT;
1714 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1715 struct ceph_object_id *oid)
1717 if (snap_id == CEPH_NOSNAP)
1718 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1719 rbd_dev->spec->image_id);
1721 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1722 rbd_dev->spec->image_id, snap_id);
1725 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1727 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1728 CEPH_DEFINE_OID_ONSTACK(oid);
1731 struct ceph_locker *lockers;
1733 bool broke_lock = false;
1736 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1739 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1740 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1741 if (ret != -EBUSY || broke_lock) {
1743 ret = 0; /* already locked by myself */
1745 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1749 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1750 RBD_LOCK_NAME, &lock_type, &lock_tag,
1751 &lockers, &num_lockers);
1756 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1761 if (num_lockers == 0)
1764 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1765 ENTITY_NAME(lockers[0].id.name));
1767 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1768 RBD_LOCK_NAME, lockers[0].id.cookie,
1769 &lockers[0].id.name);
1770 ceph_free_lockers(lockers, num_lockers);
1775 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1783 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1785 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1786 CEPH_DEFINE_OID_ONSTACK(oid);
1789 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1791 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1793 if (ret && ret != -ENOENT)
1794 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1797 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1805 ceph_decode_32_safe(p, end, header_len, e_inval);
1806 header_end = *p + header_len;
1808 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1813 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1822 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1824 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1825 CEPH_DEFINE_OID_ONSTACK(oid);
1826 struct page **pages;
1830 u64 object_map_bytes;
1831 u64 object_map_size;
1835 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1837 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1838 rbd_dev->mapping.size);
1839 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1841 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1842 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1844 return PTR_ERR(pages);
1846 reply_len = num_pages * PAGE_SIZE;
1847 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1848 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1849 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1850 NULL, 0, pages, &reply_len);
1854 p = page_address(pages[0]);
1855 end = p + min(reply_len, (size_t)PAGE_SIZE);
1856 ret = decode_object_map_header(&p, end, &object_map_size);
1860 if (object_map_size != num_objects) {
1861 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1862 object_map_size, num_objects);
1867 if (offset_in_page(p) + object_map_bytes > reply_len) {
1872 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1873 if (!rbd_dev->object_map) {
1878 rbd_dev->object_map_size = object_map_size;
1879 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1880 offset_in_page(p), object_map_bytes);
1883 ceph_release_page_vector(pages, num_pages);
1887 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1889 kvfree(rbd_dev->object_map);
1890 rbd_dev->object_map = NULL;
1891 rbd_dev->object_map_size = 0;
1894 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1898 ret = __rbd_object_map_load(rbd_dev);
1902 ret = rbd_dev_v2_get_flags(rbd_dev);
1904 rbd_object_map_free(rbd_dev);
1908 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1909 rbd_warn(rbd_dev, "object map is invalid");
1914 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1918 ret = rbd_object_map_lock(rbd_dev);
1922 ret = rbd_object_map_load(rbd_dev);
1924 rbd_object_map_unlock(rbd_dev);
1931 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1933 rbd_object_map_free(rbd_dev);
1934 rbd_object_map_unlock(rbd_dev);
1938 * This function needs snap_id (or more precisely just something to
1939 * distinguish between HEAD and snapshot object maps), new_state and
1940 * current_state that were passed to rbd_object_map_update().
1942 * To avoid allocating and stashing a context we piggyback on the OSD
1943 * request. A HEAD update has two ops (assert_locked). For new_state
1944 * and current_state we decode our own object_map_update op, encoded in
1945 * rbd_cls_object_map_update().
1947 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1948 struct ceph_osd_request *osd_req)
1950 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1951 struct ceph_osd_data *osd_data;
1953 u8 state, new_state, current_state;
1954 bool has_current_state;
1957 if (osd_req->r_result)
1958 return osd_req->r_result;
1961 * Nothing to do for a snapshot object map.
1963 if (osd_req->r_num_ops == 1)
1967 * Update in-memory HEAD object map.
1969 rbd_assert(osd_req->r_num_ops == 2);
1970 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1971 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1973 p = page_address(osd_data->pages[0]);
1974 objno = ceph_decode_64(&p);
1975 rbd_assert(objno == obj_req->ex.oe_objno);
1976 rbd_assert(ceph_decode_64(&p) == objno + 1);
1977 new_state = ceph_decode_8(&p);
1978 has_current_state = ceph_decode_8(&p);
1979 if (has_current_state)
1980 current_state = ceph_decode_8(&p);
1982 spin_lock(&rbd_dev->object_map_lock);
1983 state = __rbd_object_map_get(rbd_dev, objno);
1984 if (!has_current_state || current_state == state ||
1985 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1986 __rbd_object_map_set(rbd_dev, objno, new_state);
1987 spin_unlock(&rbd_dev->object_map_lock);
1992 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1994 struct rbd_obj_request *obj_req = osd_req->r_priv;
1997 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1998 osd_req->r_result, obj_req);
2000 result = rbd_object_map_update_finish(obj_req, osd_req);
2001 rbd_obj_handle_request(obj_req, result);
2004 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2006 u8 state = rbd_object_map_get(rbd_dev, objno);
2008 if (state == new_state ||
2009 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2010 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2016 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2017 int which, u64 objno, u8 new_state,
2018 const u8 *current_state)
2020 struct page **pages;
2024 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2028 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2030 return PTR_ERR(pages);
2032 p = start = page_address(pages[0]);
2033 ceph_encode_64(&p, objno);
2034 ceph_encode_64(&p, objno + 1);
2035 ceph_encode_8(&p, new_state);
2036 if (current_state) {
2037 ceph_encode_8(&p, 1);
2038 ceph_encode_8(&p, *current_state);
2040 ceph_encode_8(&p, 0);
2043 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2050 * 0 - object map update sent
2051 * 1 - object map update isn't needed
2054 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2055 u8 new_state, const u8 *current_state)
2057 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2058 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2059 struct ceph_osd_request *req;
2064 if (snap_id == CEPH_NOSNAP) {
2065 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2068 num_ops++; /* assert_locked */
2071 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2075 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2076 req->r_callback = rbd_object_map_callback;
2077 req->r_priv = obj_req;
2079 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2080 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2081 req->r_flags = CEPH_OSD_FLAG_WRITE;
2082 ktime_get_real_ts64(&req->r_mtime);
2084 if (snap_id == CEPH_NOSNAP) {
2086 * Protect against possible race conditions during lock
2087 * ownership transitions.
2089 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2090 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2095 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2096 new_state, current_state);
2100 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2104 ceph_osdc_start_request(osdc, req);
2108 static void prune_extents(struct ceph_file_extent *img_extents,
2109 u32 *num_img_extents, u64 overlap)
2111 u32 cnt = *num_img_extents;
2113 /* drop extents completely beyond the overlap */
2114 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2118 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2120 /* trim final overlapping extent */
2121 if (ex->fe_off + ex->fe_len > overlap)
2122 ex->fe_len = overlap - ex->fe_off;
2125 *num_img_extents = cnt;
2129 * Determine the byte range(s) covered by either just the object extent
2130 * or the entire object in the parent image.
2132 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2135 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2138 if (!rbd_dev->parent_overlap)
2141 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2142 entire ? 0 : obj_req->ex.oe_off,
2143 entire ? rbd_dev->layout.object_size :
2145 &obj_req->img_extents,
2146 &obj_req->num_img_extents);
2150 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2151 rbd_dev->parent_overlap);
2155 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2157 struct rbd_obj_request *obj_req = osd_req->r_priv;
2159 switch (obj_req->img_request->data_type) {
2160 case OBJ_REQUEST_BIO:
2161 osd_req_op_extent_osd_data_bio(osd_req, which,
2163 obj_req->ex.oe_len);
2165 case OBJ_REQUEST_BVECS:
2166 case OBJ_REQUEST_OWN_BVECS:
2167 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2168 obj_req->ex.oe_len);
2169 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2170 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2171 &obj_req->bvec_pos);
2178 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2180 struct page **pages;
2183 * The response data for a STAT call consists of:
2190 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2192 return PTR_ERR(pages);
2194 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2195 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2196 8 + sizeof(struct ceph_timespec),
2201 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2204 struct rbd_obj_request *obj_req = osd_req->r_priv;
2207 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2211 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2212 obj_req->copyup_bvec_count, bytes);
2216 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2218 obj_req->read_state = RBD_OBJ_READ_START;
2222 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2225 struct rbd_obj_request *obj_req = osd_req->r_priv;
2226 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2229 if (!use_object_map(rbd_dev) ||
2230 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2231 osd_req_op_alloc_hint_init(osd_req, which++,
2232 rbd_dev->layout.object_size,
2233 rbd_dev->layout.object_size,
2234 rbd_dev->opts->alloc_hint_flags);
2237 if (rbd_obj_is_entire(obj_req))
2238 opcode = CEPH_OSD_OP_WRITEFULL;
2240 opcode = CEPH_OSD_OP_WRITE;
2242 osd_req_op_extent_init(osd_req, which, opcode,
2243 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2244 rbd_osd_setup_data(osd_req, which);
2247 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2251 /* reverse map the entire object onto the parent */
2252 ret = rbd_obj_calc_img_extents(obj_req, true);
2256 obj_req->write_state = RBD_OBJ_WRITE_START;
2260 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2262 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2266 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2269 struct rbd_obj_request *obj_req = osd_req->r_priv;
2271 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2272 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2273 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2275 osd_req_op_extent_init(osd_req, which,
2276 truncate_or_zero_opcode(obj_req),
2277 obj_req->ex.oe_off, obj_req->ex.oe_len,
2282 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2284 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2289 * Align the range to alloc_size boundary and punt on discards
2290 * that are too small to free up any space.
2292 * alloc_size == object_size && is_tail() is a special case for
2293 * filestore with filestore_punch_hole = false, needed to allow
2294 * truncate (in addition to delete).
2296 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2297 !rbd_obj_is_tail(obj_req)) {
2298 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2299 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2300 rbd_dev->opts->alloc_size);
2301 if (off >= next_off)
2304 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2305 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2306 off, next_off - off);
2307 obj_req->ex.oe_off = off;
2308 obj_req->ex.oe_len = next_off - off;
2311 /* reverse map the entire object onto the parent */
2312 ret = rbd_obj_calc_img_extents(obj_req, true);
2316 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2317 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2318 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2320 obj_req->write_state = RBD_OBJ_WRITE_START;
2324 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2327 struct rbd_obj_request *obj_req = osd_req->r_priv;
2330 if (rbd_obj_is_entire(obj_req)) {
2331 if (obj_req->num_img_extents) {
2332 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2333 osd_req_op_init(osd_req, which++,
2334 CEPH_OSD_OP_CREATE, 0);
2335 opcode = CEPH_OSD_OP_TRUNCATE;
2337 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2338 osd_req_op_init(osd_req, which++,
2339 CEPH_OSD_OP_DELETE, 0);
2343 opcode = truncate_or_zero_opcode(obj_req);
2347 osd_req_op_extent_init(osd_req, which, opcode,
2348 obj_req->ex.oe_off, obj_req->ex.oe_len,
2352 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2356 /* reverse map the entire object onto the parent */
2357 ret = rbd_obj_calc_img_extents(obj_req, true);
2361 if (!obj_req->num_img_extents) {
2362 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2363 if (rbd_obj_is_entire(obj_req))
2364 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2367 obj_req->write_state = RBD_OBJ_WRITE_START;
2371 static int count_write_ops(struct rbd_obj_request *obj_req)
2373 struct rbd_img_request *img_req = obj_req->img_request;
2375 switch (img_req->op_type) {
2377 if (!use_object_map(img_req->rbd_dev) ||
2378 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2379 return 2; /* setallochint + write/writefull */
2381 return 1; /* write/writefull */
2382 case OBJ_OP_DISCARD:
2383 return 1; /* delete/truncate/zero */
2384 case OBJ_OP_ZEROOUT:
2385 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2386 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2387 return 2; /* create + truncate */
2389 return 1; /* delete/truncate/zero */
2395 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2398 struct rbd_obj_request *obj_req = osd_req->r_priv;
2400 switch (obj_req->img_request->op_type) {
2402 __rbd_osd_setup_write_ops(osd_req, which);
2404 case OBJ_OP_DISCARD:
2405 __rbd_osd_setup_discard_ops(osd_req, which);
2407 case OBJ_OP_ZEROOUT:
2408 __rbd_osd_setup_zeroout_ops(osd_req, which);
2416 * Prune the list of object requests (adjust offset and/or length, drop
2417 * redundant requests). Prepare object request state machines and image
2418 * request state machine for execution.
2420 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2422 struct rbd_obj_request *obj_req, *next_obj_req;
2425 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2426 switch (img_req->op_type) {
2428 ret = rbd_obj_init_read(obj_req);
2431 ret = rbd_obj_init_write(obj_req);
2433 case OBJ_OP_DISCARD:
2434 ret = rbd_obj_init_discard(obj_req);
2436 case OBJ_OP_ZEROOUT:
2437 ret = rbd_obj_init_zeroout(obj_req);
2445 rbd_img_obj_request_del(img_req, obj_req);
2450 img_req->state = RBD_IMG_START;
2454 union rbd_img_fill_iter {
2455 struct ceph_bio_iter bio_iter;
2456 struct ceph_bvec_iter bvec_iter;
2459 struct rbd_img_fill_ctx {
2460 enum obj_request_type pos_type;
2461 union rbd_img_fill_iter *pos;
2462 union rbd_img_fill_iter iter;
2463 ceph_object_extent_fn_t set_pos_fn;
2464 ceph_object_extent_fn_t count_fn;
2465 ceph_object_extent_fn_t copy_fn;
2468 static struct ceph_object_extent *alloc_object_extent(void *arg)
2470 struct rbd_img_request *img_req = arg;
2471 struct rbd_obj_request *obj_req;
2473 obj_req = rbd_obj_request_create();
2477 rbd_img_obj_request_add(img_req, obj_req);
2478 return &obj_req->ex;
2482 * While su != os && sc == 1 is technically not fancy (it's the same
2483 * layout as su == os && sc == 1), we can't use the nocopy path for it
2484 * because ->set_pos_fn() should be called only once per object.
2485 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2486 * treat su != os && sc == 1 as fancy.
2488 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2490 return l->stripe_unit != l->object_size;
2493 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2494 struct ceph_file_extent *img_extents,
2495 u32 num_img_extents,
2496 struct rbd_img_fill_ctx *fctx)
2501 img_req->data_type = fctx->pos_type;
2504 * Create object requests and set each object request's starting
2505 * position in the provided bio (list) or bio_vec array.
2507 fctx->iter = *fctx->pos;
2508 for (i = 0; i < num_img_extents; i++) {
2509 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2510 img_extents[i].fe_off,
2511 img_extents[i].fe_len,
2512 &img_req->object_extents,
2513 alloc_object_extent, img_req,
2514 fctx->set_pos_fn, &fctx->iter);
2519 return __rbd_img_fill_request(img_req);
2523 * Map a list of image extents to a list of object extents, create the
2524 * corresponding object requests (normally each to a different object,
2525 * but not always) and add them to @img_req. For each object request,
2526 * set up its data descriptor to point to the corresponding chunk(s) of
2527 * @fctx->pos data buffer.
2529 * Because ceph_file_to_extents() will merge adjacent object extents
2530 * together, each object request's data descriptor may point to multiple
2531 * different chunks of @fctx->pos data buffer.
2533 * @fctx->pos data buffer is assumed to be large enough.
2535 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2536 struct ceph_file_extent *img_extents,
2537 u32 num_img_extents,
2538 struct rbd_img_fill_ctx *fctx)
2540 struct rbd_device *rbd_dev = img_req->rbd_dev;
2541 struct rbd_obj_request *obj_req;
2545 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2546 !rbd_layout_is_fancy(&rbd_dev->layout))
2547 return rbd_img_fill_request_nocopy(img_req, img_extents,
2548 num_img_extents, fctx);
2550 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2553 * Create object requests and determine ->bvec_count for each object
2554 * request. Note that ->bvec_count sum over all object requests may
2555 * be greater than the number of bio_vecs in the provided bio (list)
2556 * or bio_vec array because when mapped, those bio_vecs can straddle
2557 * stripe unit boundaries.
2559 fctx->iter = *fctx->pos;
2560 for (i = 0; i < num_img_extents; i++) {
2561 ret = ceph_file_to_extents(&rbd_dev->layout,
2562 img_extents[i].fe_off,
2563 img_extents[i].fe_len,
2564 &img_req->object_extents,
2565 alloc_object_extent, img_req,
2566 fctx->count_fn, &fctx->iter);
2571 for_each_obj_request(img_req, obj_req) {
2572 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2573 sizeof(*obj_req->bvec_pos.bvecs),
2575 if (!obj_req->bvec_pos.bvecs)
2580 * Fill in each object request's private bio_vec array, splitting and
2581 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2583 fctx->iter = *fctx->pos;
2584 for (i = 0; i < num_img_extents; i++) {
2585 ret = ceph_iterate_extents(&rbd_dev->layout,
2586 img_extents[i].fe_off,
2587 img_extents[i].fe_len,
2588 &img_req->object_extents,
2589 fctx->copy_fn, &fctx->iter);
2594 return __rbd_img_fill_request(img_req);
2597 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2600 struct ceph_file_extent ex = { off, len };
2601 union rbd_img_fill_iter dummy = {};
2602 struct rbd_img_fill_ctx fctx = {
2603 .pos_type = OBJ_REQUEST_NODATA,
2607 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2610 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2612 struct rbd_obj_request *obj_req =
2613 container_of(ex, struct rbd_obj_request, ex);
2614 struct ceph_bio_iter *it = arg;
2616 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2617 obj_req->bio_pos = *it;
2618 ceph_bio_iter_advance(it, bytes);
2621 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2623 struct rbd_obj_request *obj_req =
2624 container_of(ex, struct rbd_obj_request, ex);
2625 struct ceph_bio_iter *it = arg;
2627 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2628 ceph_bio_iter_advance_step(it, bytes, ({
2629 obj_req->bvec_count++;
2634 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2636 struct rbd_obj_request *obj_req =
2637 container_of(ex, struct rbd_obj_request, ex);
2638 struct ceph_bio_iter *it = arg;
2640 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2641 ceph_bio_iter_advance_step(it, bytes, ({
2642 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2643 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2647 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2648 struct ceph_file_extent *img_extents,
2649 u32 num_img_extents,
2650 struct ceph_bio_iter *bio_pos)
2652 struct rbd_img_fill_ctx fctx = {
2653 .pos_type = OBJ_REQUEST_BIO,
2654 .pos = (union rbd_img_fill_iter *)bio_pos,
2655 .set_pos_fn = set_bio_pos,
2656 .count_fn = count_bio_bvecs,
2657 .copy_fn = copy_bio_bvecs,
2660 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2664 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2665 u64 off, u64 len, struct bio *bio)
2667 struct ceph_file_extent ex = { off, len };
2668 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2670 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2673 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2675 struct rbd_obj_request *obj_req =
2676 container_of(ex, struct rbd_obj_request, ex);
2677 struct ceph_bvec_iter *it = arg;
2679 obj_req->bvec_pos = *it;
2680 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2681 ceph_bvec_iter_advance(it, bytes);
2684 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2686 struct rbd_obj_request *obj_req =
2687 container_of(ex, struct rbd_obj_request, ex);
2688 struct ceph_bvec_iter *it = arg;
2690 ceph_bvec_iter_advance_step(it, bytes, ({
2691 obj_req->bvec_count++;
2695 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2697 struct rbd_obj_request *obj_req =
2698 container_of(ex, struct rbd_obj_request, ex);
2699 struct ceph_bvec_iter *it = arg;
2701 ceph_bvec_iter_advance_step(it, bytes, ({
2702 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2703 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2707 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2708 struct ceph_file_extent *img_extents,
2709 u32 num_img_extents,
2710 struct ceph_bvec_iter *bvec_pos)
2712 struct rbd_img_fill_ctx fctx = {
2713 .pos_type = OBJ_REQUEST_BVECS,
2714 .pos = (union rbd_img_fill_iter *)bvec_pos,
2715 .set_pos_fn = set_bvec_pos,
2716 .count_fn = count_bvecs,
2717 .copy_fn = copy_bvecs,
2720 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2724 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2725 struct ceph_file_extent *img_extents,
2726 u32 num_img_extents,
2727 struct bio_vec *bvecs)
2729 struct ceph_bvec_iter it = {
2731 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2735 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2739 static void rbd_img_handle_request_work(struct work_struct *work)
2741 struct rbd_img_request *img_req =
2742 container_of(work, struct rbd_img_request, work);
2744 rbd_img_handle_request(img_req, img_req->work_result);
2747 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2749 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2750 img_req->work_result = result;
2751 queue_work(rbd_wq, &img_req->work);
2754 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2756 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2758 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2759 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2763 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2764 obj_req->ex.oe_objno);
2768 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2770 struct ceph_osd_request *osd_req;
2773 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2774 if (IS_ERR(osd_req))
2775 return PTR_ERR(osd_req);
2777 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2778 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2779 rbd_osd_setup_data(osd_req, 0);
2780 rbd_osd_format_read(osd_req);
2782 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2786 rbd_osd_submit(osd_req);
2790 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2792 struct rbd_img_request *img_req = obj_req->img_request;
2793 struct rbd_device *parent = img_req->rbd_dev->parent;
2794 struct rbd_img_request *child_img_req;
2797 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2801 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2802 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2803 child_img_req->obj_request = obj_req;
2805 down_read(&parent->header_rwsem);
2806 rbd_img_capture_header(child_img_req);
2807 up_read(&parent->header_rwsem);
2809 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2812 if (!rbd_img_is_write(img_req)) {
2813 switch (img_req->data_type) {
2814 case OBJ_REQUEST_BIO:
2815 ret = __rbd_img_fill_from_bio(child_img_req,
2816 obj_req->img_extents,
2817 obj_req->num_img_extents,
2820 case OBJ_REQUEST_BVECS:
2821 case OBJ_REQUEST_OWN_BVECS:
2822 ret = __rbd_img_fill_from_bvecs(child_img_req,
2823 obj_req->img_extents,
2824 obj_req->num_img_extents,
2825 &obj_req->bvec_pos);
2831 ret = rbd_img_fill_from_bvecs(child_img_req,
2832 obj_req->img_extents,
2833 obj_req->num_img_extents,
2834 obj_req->copyup_bvecs);
2837 rbd_img_request_destroy(child_img_req);
2841 /* avoid parent chain recursion */
2842 rbd_img_schedule(child_img_req, 0);
2846 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2848 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2852 switch (obj_req->read_state) {
2853 case RBD_OBJ_READ_START:
2854 rbd_assert(!*result);
2856 if (!rbd_obj_may_exist(obj_req)) {
2858 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2862 ret = rbd_obj_read_object(obj_req);
2867 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2869 case RBD_OBJ_READ_OBJECT:
2870 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2871 /* reverse map this object extent onto the parent */
2872 ret = rbd_obj_calc_img_extents(obj_req, false);
2877 if (obj_req->num_img_extents) {
2878 ret = rbd_obj_read_from_parent(obj_req);
2883 obj_req->read_state = RBD_OBJ_READ_PARENT;
2889 * -ENOENT means a hole in the image -- zero-fill the entire
2890 * length of the request. A short read also implies zero-fill
2891 * to the end of the request.
2893 if (*result == -ENOENT) {
2894 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2896 } else if (*result >= 0) {
2897 if (*result < obj_req->ex.oe_len)
2898 rbd_obj_zero_range(obj_req, *result,
2899 obj_req->ex.oe_len - *result);
2901 rbd_assert(*result == obj_req->ex.oe_len);
2905 case RBD_OBJ_READ_PARENT:
2907 * The parent image is read only up to the overlap -- zero-fill
2908 * from the overlap to the end of the request.
2911 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2913 if (obj_overlap < obj_req->ex.oe_len)
2914 rbd_obj_zero_range(obj_req, obj_overlap,
2915 obj_req->ex.oe_len - obj_overlap);
2923 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2925 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2927 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2928 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2930 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2931 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2932 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2941 * 0 - object map update sent
2942 * 1 - object map update isn't needed
2945 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2947 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2950 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2953 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2954 new_state = OBJECT_PENDING;
2956 new_state = OBJECT_EXISTS;
2958 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2961 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2963 struct ceph_osd_request *osd_req;
2964 int num_ops = count_write_ops(obj_req);
2968 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2969 num_ops++; /* stat */
2971 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2972 if (IS_ERR(osd_req))
2973 return PTR_ERR(osd_req);
2975 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2976 ret = rbd_osd_setup_stat(osd_req, which++);
2981 rbd_osd_setup_write_ops(osd_req, which);
2982 rbd_osd_format_write(osd_req);
2984 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2988 rbd_osd_submit(osd_req);
2993 * copyup_bvecs pages are never highmem pages
2995 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2997 struct ceph_bvec_iter it = {
2999 .iter = { .bi_size = bytes },
3002 ceph_bvec_iter_advance_step(&it, bytes, ({
3003 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
3009 #define MODS_ONLY U32_MAX
3011 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3014 struct ceph_osd_request *osd_req;
3017 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3018 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3020 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3021 if (IS_ERR(osd_req))
3022 return PTR_ERR(osd_req);
3024 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3028 rbd_osd_format_write(osd_req);
3030 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3034 rbd_osd_submit(osd_req);
3038 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3041 struct ceph_osd_request *osd_req;
3042 int num_ops = count_write_ops(obj_req);
3046 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3048 if (bytes != MODS_ONLY)
3049 num_ops++; /* copyup */
3051 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3052 if (IS_ERR(osd_req))
3053 return PTR_ERR(osd_req);
3055 if (bytes != MODS_ONLY) {
3056 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3061 rbd_osd_setup_write_ops(osd_req, which);
3062 rbd_osd_format_write(osd_req);
3064 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3068 rbd_osd_submit(osd_req);
3072 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3076 rbd_assert(!obj_req->copyup_bvecs);
3077 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3078 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3079 sizeof(*obj_req->copyup_bvecs),
3081 if (!obj_req->copyup_bvecs)
3084 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3085 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3086 struct page *page = alloc_page(GFP_NOIO);
3091 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
3095 rbd_assert(!obj_overlap);
3100 * The target object doesn't exist. Read the data for the entire
3101 * target object up to the overlap point (if any) from the parent,
3102 * so we can use it for a copyup.
3104 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3106 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3109 rbd_assert(obj_req->num_img_extents);
3110 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3111 rbd_dev->parent_overlap);
3112 if (!obj_req->num_img_extents) {
3114 * The overlap has become 0 (most likely because the
3115 * image has been flattened). Re-submit the original write
3116 * request -- pass MODS_ONLY since the copyup isn't needed
3119 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3122 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3126 return rbd_obj_read_from_parent(obj_req);
3129 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3131 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3132 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3137 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3139 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3142 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3145 for (i = 0; i < snapc->num_snaps; i++) {
3146 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3147 i + 1 < snapc->num_snaps)
3148 new_state = OBJECT_EXISTS_CLEAN;
3150 new_state = OBJECT_EXISTS;
3152 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3155 obj_req->pending.result = ret;
3160 obj_req->pending.num_pending++;
3164 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3166 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3169 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3172 * Only send non-zero copyup data to save some I/O and network
3173 * bandwidth -- zero copyup data is equivalent to the object not
3176 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3179 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3181 * Send a copyup request with an empty snapshot context to
3182 * deep-copyup the object through all existing snapshots.
3183 * A second request with the current snapshot context will be
3184 * sent for the actual modification.
3186 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3188 obj_req->pending.result = ret;
3192 obj_req->pending.num_pending++;
3196 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3198 obj_req->pending.result = ret;
3202 obj_req->pending.num_pending++;
3205 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3207 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3211 switch (obj_req->copyup_state) {
3212 case RBD_OBJ_COPYUP_START:
3213 rbd_assert(!*result);
3215 ret = rbd_obj_copyup_read_parent(obj_req);
3220 if (obj_req->num_img_extents)
3221 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3223 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3225 case RBD_OBJ_COPYUP_READ_PARENT:
3229 if (is_zero_bvecs(obj_req->copyup_bvecs,
3230 rbd_obj_img_extents_bytes(obj_req))) {
3231 dout("%s %p detected zeros\n", __func__, obj_req);
3232 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3235 rbd_obj_copyup_object_maps(obj_req);
3236 if (!obj_req->pending.num_pending) {
3237 *result = obj_req->pending.result;
3238 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3241 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3243 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3244 if (!pending_result_dec(&obj_req->pending, result))
3247 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3249 rbd_warn(rbd_dev, "snap object map update failed: %d",
3254 rbd_obj_copyup_write_object(obj_req);
3255 if (!obj_req->pending.num_pending) {
3256 *result = obj_req->pending.result;
3257 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3260 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3262 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3263 if (!pending_result_dec(&obj_req->pending, result))
3266 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3275 * 0 - object map update sent
3276 * 1 - object map update isn't needed
3279 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3281 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3282 u8 current_state = OBJECT_PENDING;
3284 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3287 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3290 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3294 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3296 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3300 switch (obj_req->write_state) {
3301 case RBD_OBJ_WRITE_START:
3302 rbd_assert(!*result);
3304 rbd_obj_set_copyup_enabled(obj_req);
3305 if (rbd_obj_write_is_noop(obj_req))
3308 ret = rbd_obj_write_pre_object_map(obj_req);
3313 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3317 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3319 rbd_warn(rbd_dev, "pre object map update failed: %d",
3323 ret = rbd_obj_write_object(obj_req);
3328 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3330 case RBD_OBJ_WRITE_OBJECT:
3331 if (*result == -ENOENT) {
3332 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3334 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3335 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3339 * On a non-existent object:
3340 * delete - -ENOENT, truncate/zero - 0
3342 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3348 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3350 case __RBD_OBJ_WRITE_COPYUP:
3351 if (!rbd_obj_advance_copyup(obj_req, result))
3354 case RBD_OBJ_WRITE_COPYUP:
3356 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3359 ret = rbd_obj_write_post_object_map(obj_req);
3364 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3368 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3370 rbd_warn(rbd_dev, "post object map update failed: %d",
3379 * Return true if @obj_req is completed.
3381 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3384 struct rbd_img_request *img_req = obj_req->img_request;
3385 struct rbd_device *rbd_dev = img_req->rbd_dev;
3388 mutex_lock(&obj_req->state_mutex);
3389 if (!rbd_img_is_write(img_req))
3390 done = rbd_obj_advance_read(obj_req, result);
3392 done = rbd_obj_advance_write(obj_req, result);
3393 mutex_unlock(&obj_req->state_mutex);
3395 if (done && *result) {
3396 rbd_assert(*result < 0);
3397 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3398 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3399 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3405 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3408 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3410 if (__rbd_obj_handle_request(obj_req, &result))
3411 rbd_img_handle_request(obj_req->img_request, result);
3414 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3416 struct rbd_device *rbd_dev = img_req->rbd_dev;
3418 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3421 if (rbd_is_ro(rbd_dev))
3424 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3425 if (rbd_dev->opts->lock_on_read ||
3426 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3429 return rbd_img_is_write(img_req);
3432 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3434 struct rbd_device *rbd_dev = img_req->rbd_dev;
3437 lockdep_assert_held(&rbd_dev->lock_rwsem);
3438 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3439 spin_lock(&rbd_dev->lock_lists_lock);
3440 rbd_assert(list_empty(&img_req->lock_item));
3442 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3444 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3445 spin_unlock(&rbd_dev->lock_lists_lock);
3449 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3451 struct rbd_device *rbd_dev = img_req->rbd_dev;
3454 lockdep_assert_held(&rbd_dev->lock_rwsem);
3455 spin_lock(&rbd_dev->lock_lists_lock);
3456 rbd_assert(!list_empty(&img_req->lock_item));
3457 list_del_init(&img_req->lock_item);
3458 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3459 list_empty(&rbd_dev->running_list));
3460 spin_unlock(&rbd_dev->lock_lists_lock);
3462 complete(&rbd_dev->releasing_wait);
3465 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3467 struct rbd_device *rbd_dev = img_req->rbd_dev;
3469 if (!need_exclusive_lock(img_req))
3472 if (rbd_lock_add_request(img_req))
3475 if (rbd_dev->opts->exclusive) {
3476 WARN_ON(1); /* lock got released? */
3481 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3482 * and cancel_delayed_work() in wake_lock_waiters().
3484 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3485 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3489 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3491 struct rbd_device *rbd_dev = img_req->rbd_dev;
3492 struct rbd_obj_request *obj_req;
3494 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3495 rbd_assert(!need_exclusive_lock(img_req) ||
3496 __rbd_is_lock_owner(rbd_dev));
3498 if (rbd_img_is_write(img_req)) {
3499 rbd_assert(!img_req->snapc);
3500 down_read(&rbd_dev->header_rwsem);
3501 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3502 up_read(&rbd_dev->header_rwsem);
3505 for_each_obj_request(img_req, obj_req) {
3508 if (__rbd_obj_handle_request(obj_req, &result)) {
3510 img_req->pending.result = result;
3514 img_req->pending.num_pending++;
3519 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3524 switch (img_req->state) {
3526 rbd_assert(!*result);
3528 ret = rbd_img_exclusive_lock(img_req);
3533 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3537 case RBD_IMG_EXCLUSIVE_LOCK:
3541 rbd_img_object_requests(img_req);
3542 if (!img_req->pending.num_pending) {
3543 *result = img_req->pending.result;
3544 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3547 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3549 case __RBD_IMG_OBJECT_REQUESTS:
3550 if (!pending_result_dec(&img_req->pending, result))
3553 case RBD_IMG_OBJECT_REQUESTS:
3561 * Return true if @img_req is completed.
3563 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3566 struct rbd_device *rbd_dev = img_req->rbd_dev;
3569 if (need_exclusive_lock(img_req)) {
3570 down_read(&rbd_dev->lock_rwsem);
3571 mutex_lock(&img_req->state_mutex);
3572 done = rbd_img_advance(img_req, result);
3574 rbd_lock_del_request(img_req);
3575 mutex_unlock(&img_req->state_mutex);
3576 up_read(&rbd_dev->lock_rwsem);
3578 mutex_lock(&img_req->state_mutex);
3579 done = rbd_img_advance(img_req, result);
3580 mutex_unlock(&img_req->state_mutex);
3583 if (done && *result) {
3584 rbd_assert(*result < 0);
3585 rbd_warn(rbd_dev, "%s%s result %d",
3586 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3587 obj_op_name(img_req->op_type), *result);
3592 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3595 if (!__rbd_img_handle_request(img_req, &result))
3598 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3599 struct rbd_obj_request *obj_req = img_req->obj_request;
3601 rbd_img_request_destroy(img_req);
3602 if (__rbd_obj_handle_request(obj_req, &result)) {
3603 img_req = obj_req->img_request;
3607 struct request *rq = blk_mq_rq_from_pdu(img_req);
3609 rbd_img_request_destroy(img_req);
3610 blk_mq_end_request(rq, errno_to_blk_status(result));
3614 static const struct rbd_client_id rbd_empty_cid;
3616 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3617 const struct rbd_client_id *rhs)
3619 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3622 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3624 struct rbd_client_id cid;
3626 mutex_lock(&rbd_dev->watch_mutex);
3627 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3628 cid.handle = rbd_dev->watch_cookie;
3629 mutex_unlock(&rbd_dev->watch_mutex);
3634 * lock_rwsem must be held for write
3636 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3637 const struct rbd_client_id *cid)
3639 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3640 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3641 cid->gid, cid->handle);
3642 rbd_dev->owner_cid = *cid; /* struct */
3645 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3647 mutex_lock(&rbd_dev->watch_mutex);
3648 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3649 mutex_unlock(&rbd_dev->watch_mutex);
3652 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3654 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3656 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3657 strcpy(rbd_dev->lock_cookie, cookie);
3658 rbd_set_owner_cid(rbd_dev, &cid);
3659 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3663 * lock_rwsem must be held for write
3665 static int rbd_lock(struct rbd_device *rbd_dev)
3667 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3671 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3672 rbd_dev->lock_cookie[0] != '\0');
3674 format_lock_cookie(rbd_dev, cookie);
3675 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3676 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3677 RBD_LOCK_TAG, "", 0);
3678 if (ret && ret != -EEXIST)
3681 __rbd_lock(rbd_dev, cookie);
3686 * lock_rwsem must be held for write
3688 static void rbd_unlock(struct rbd_device *rbd_dev)
3690 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3693 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3694 rbd_dev->lock_cookie[0] == '\0');
3696 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3697 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3698 if (ret && ret != -ENOENT)
3699 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3701 /* treat errors as the image is unlocked */
3702 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3703 rbd_dev->lock_cookie[0] = '\0';
3704 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3705 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3708 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3709 enum rbd_notify_op notify_op,
3710 struct page ***preply_pages,
3713 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3714 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3715 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3716 int buf_size = sizeof(buf);
3719 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3721 /* encode *LockPayload NotifyMessage (op + ClientId) */
3722 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3723 ceph_encode_32(&p, notify_op);
3724 ceph_encode_64(&p, cid.gid);
3725 ceph_encode_64(&p, cid.handle);
3727 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3728 &rbd_dev->header_oloc, buf, buf_size,
3729 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3732 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3733 enum rbd_notify_op notify_op)
3735 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3738 static void rbd_notify_acquired_lock(struct work_struct *work)
3740 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3741 acquired_lock_work);
3743 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3746 static void rbd_notify_released_lock(struct work_struct *work)
3748 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3749 released_lock_work);
3751 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3754 static int rbd_request_lock(struct rbd_device *rbd_dev)
3756 struct page **reply_pages;
3758 bool lock_owner_responded = false;
3761 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3763 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3764 &reply_pages, &reply_len);
3765 if (ret && ret != -ETIMEDOUT) {
3766 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3770 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3771 void *p = page_address(reply_pages[0]);
3772 void *const end = p + reply_len;
3775 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3780 ceph_decode_need(&p, end, 8 + 8, e_inval);
3781 p += 8 + 8; /* skip gid and cookie */
3783 ceph_decode_32_safe(&p, end, len, e_inval);
3787 if (lock_owner_responded) {
3789 "duplicate lock owners detected");
3794 lock_owner_responded = true;
3795 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3799 "failed to decode ResponseMessage: %d",
3804 ret = ceph_decode_32(&p);
3808 if (!lock_owner_responded) {
3809 rbd_warn(rbd_dev, "no lock owners detected");
3814 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3823 * Either image request state machine(s) or rbd_add_acquire_lock()
3826 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3828 struct rbd_img_request *img_req;
3830 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3831 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3833 cancel_delayed_work(&rbd_dev->lock_dwork);
3834 if (!completion_done(&rbd_dev->acquire_wait)) {
3835 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3836 list_empty(&rbd_dev->running_list));
3837 rbd_dev->acquire_err = result;
3838 complete_all(&rbd_dev->acquire_wait);
3842 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3843 mutex_lock(&img_req->state_mutex);
3844 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3845 rbd_img_schedule(img_req, result);
3846 mutex_unlock(&img_req->state_mutex);
3849 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3852 static bool locker_equal(const struct ceph_locker *lhs,
3853 const struct ceph_locker *rhs)
3855 return lhs->id.name.type == rhs->id.name.type &&
3856 lhs->id.name.num == rhs->id.name.num &&
3857 !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3858 ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3861 static void free_locker(struct ceph_locker *locker)
3864 ceph_free_lockers(locker, 1);
3867 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3869 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3870 struct ceph_locker *lockers;
3877 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3878 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3879 &lock_type, &lock_tag, &lockers, &num_lockers);
3881 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3882 return ERR_PTR(ret);
3885 if (num_lockers == 0) {
3886 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3891 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3892 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3897 if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) {
3898 rbd_warn(rbd_dev, "incompatible lock type detected");
3902 WARN_ON(num_lockers != 1);
3903 ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu",
3906 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3907 lockers[0].id.cookie);
3910 if (ceph_addr_is_blank(&lockers[0].info.addr)) {
3911 rbd_warn(rbd_dev, "locker has a blank address");
3915 dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n",
3916 __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name),
3917 &lockers[0].info.addr.in_addr,
3918 le32_to_cpu(lockers[0].info.addr.nonce), handle);
3926 ceph_free_lockers(lockers, num_lockers);
3927 return ERR_PTR(-EBUSY);
3930 static int find_watcher(struct rbd_device *rbd_dev,
3931 const struct ceph_locker *locker)
3933 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3934 struct ceph_watch_item *watchers;
3940 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3941 &rbd_dev->header_oloc, &watchers,
3944 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
3948 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3949 for (i = 0; i < num_watchers; i++) {
3951 * Ignore addr->type while comparing. This mimics
3952 * entity_addr_t::get_legacy_str() + strcmp().
3954 if (ceph_addr_equal_no_type(&watchers[i].addr,
3955 &locker->info.addr) &&
3956 watchers[i].cookie == cookie) {
3957 struct rbd_client_id cid = {
3958 .gid = le64_to_cpu(watchers[i].name.num),
3962 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3963 rbd_dev, cid.gid, cid.handle);
3964 rbd_set_owner_cid(rbd_dev, &cid);
3970 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3978 * lock_rwsem must be held for write
3980 static int rbd_try_lock(struct rbd_device *rbd_dev)
3982 struct ceph_client *client = rbd_dev->rbd_client->client;
3983 struct ceph_locker *locker, *refreshed_locker;
3987 locker = refreshed_locker = NULL;
3989 ret = rbd_lock(rbd_dev);
3992 if (ret != -EBUSY) {
3993 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
3997 /* determine if the current lock holder is still alive */
3998 locker = get_lock_owner_info(rbd_dev);
3999 if (IS_ERR(locker)) {
4000 ret = PTR_ERR(locker);
4007 ret = find_watcher(rbd_dev, locker);
4009 goto out; /* request lock or error */
4011 refreshed_locker = get_lock_owner_info(rbd_dev);
4012 if (IS_ERR(refreshed_locker)) {
4013 ret = PTR_ERR(refreshed_locker);
4014 refreshed_locker = NULL;
4017 if (!refreshed_locker ||
4018 !locker_equal(locker, refreshed_locker))
4021 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4022 ENTITY_NAME(locker->id.name));
4024 ret = ceph_monc_blocklist_add(&client->monc,
4025 &locker->info.addr);
4027 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4028 ENTITY_NAME(locker->id.name), ret);
4032 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4033 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4034 locker->id.cookie, &locker->id.name);
4035 if (ret && ret != -ENOENT) {
4036 rbd_warn(rbd_dev, "failed to break header lock: %d",
4042 free_locker(refreshed_locker);
4043 free_locker(locker);
4047 free_locker(refreshed_locker);
4048 free_locker(locker);
4052 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4056 ret = rbd_dev_refresh(rbd_dev);
4060 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4061 ret = rbd_object_map_open(rbd_dev);
4072 * 1 - caller should call rbd_request_lock()
4075 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4079 down_read(&rbd_dev->lock_rwsem);
4080 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4081 rbd_dev->lock_state);
4082 if (__rbd_is_lock_owner(rbd_dev)) {
4083 up_read(&rbd_dev->lock_rwsem);
4087 up_read(&rbd_dev->lock_rwsem);
4088 down_write(&rbd_dev->lock_rwsem);
4089 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4090 rbd_dev->lock_state);
4091 if (__rbd_is_lock_owner(rbd_dev)) {
4092 up_write(&rbd_dev->lock_rwsem);
4096 ret = rbd_try_lock(rbd_dev);
4098 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4102 up_write(&rbd_dev->lock_rwsem);
4106 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4107 rbd_assert(list_empty(&rbd_dev->running_list));
4109 ret = rbd_post_acquire_action(rbd_dev);
4111 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4113 * Can't stay in RBD_LOCK_STATE_LOCKED because
4114 * rbd_lock_add_request() would let the request through,
4115 * assuming that e.g. object map is locked and loaded.
4117 rbd_unlock(rbd_dev);
4121 wake_lock_waiters(rbd_dev, ret);
4122 up_write(&rbd_dev->lock_rwsem);
4126 static void rbd_acquire_lock(struct work_struct *work)
4128 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4129 struct rbd_device, lock_dwork);
4132 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4134 ret = rbd_try_acquire_lock(rbd_dev);
4136 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4140 ret = rbd_request_lock(rbd_dev);
4141 if (ret == -ETIMEDOUT) {
4142 goto again; /* treat this as a dead client */
4143 } else if (ret == -EROFS) {
4144 rbd_warn(rbd_dev, "peer will not release lock");
4145 down_write(&rbd_dev->lock_rwsem);
4146 wake_lock_waiters(rbd_dev, ret);
4147 up_write(&rbd_dev->lock_rwsem);
4148 } else if (ret < 0) {
4149 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4150 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4154 * lock owner acked, but resend if we don't see them
4157 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4159 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4160 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4164 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4166 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4167 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4169 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4173 * Ensure that all in-flight IO is flushed.
4175 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4176 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4177 if (list_empty(&rbd_dev->running_list))
4180 up_write(&rbd_dev->lock_rwsem);
4181 wait_for_completion(&rbd_dev->releasing_wait);
4183 down_write(&rbd_dev->lock_rwsem);
4184 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4187 rbd_assert(list_empty(&rbd_dev->running_list));
4191 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4193 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4194 rbd_object_map_close(rbd_dev);
4197 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4199 rbd_assert(list_empty(&rbd_dev->running_list));
4201 rbd_pre_release_action(rbd_dev);
4202 rbd_unlock(rbd_dev);
4206 * lock_rwsem must be held for write
4208 static void rbd_release_lock(struct rbd_device *rbd_dev)
4210 if (!rbd_quiesce_lock(rbd_dev))
4213 __rbd_release_lock(rbd_dev);
4216 * Give others a chance to grab the lock - we would re-acquire
4217 * almost immediately if we got new IO while draining the running
4218 * list otherwise. We need to ack our own notifications, so this
4219 * lock_dwork will be requeued from rbd_handle_released_lock() by
4220 * way of maybe_kick_acquire().
4222 cancel_delayed_work(&rbd_dev->lock_dwork);
4225 static void rbd_release_lock_work(struct work_struct *work)
4227 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4230 down_write(&rbd_dev->lock_rwsem);
4231 rbd_release_lock(rbd_dev);
4232 up_write(&rbd_dev->lock_rwsem);
4235 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4239 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4240 if (__rbd_is_lock_owner(rbd_dev))
4243 spin_lock(&rbd_dev->lock_lists_lock);
4244 have_requests = !list_empty(&rbd_dev->acquiring_list);
4245 spin_unlock(&rbd_dev->lock_lists_lock);
4246 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4247 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4248 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4252 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4255 struct rbd_client_id cid = { 0 };
4257 if (struct_v >= 2) {
4258 cid.gid = ceph_decode_64(p);
4259 cid.handle = ceph_decode_64(p);
4262 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4264 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4265 down_write(&rbd_dev->lock_rwsem);
4266 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4267 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4268 __func__, rbd_dev, cid.gid, cid.handle);
4270 rbd_set_owner_cid(rbd_dev, &cid);
4272 downgrade_write(&rbd_dev->lock_rwsem);
4274 down_read(&rbd_dev->lock_rwsem);
4277 maybe_kick_acquire(rbd_dev);
4278 up_read(&rbd_dev->lock_rwsem);
4281 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4284 struct rbd_client_id cid = { 0 };
4286 if (struct_v >= 2) {
4287 cid.gid = ceph_decode_64(p);
4288 cid.handle = ceph_decode_64(p);
4291 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4293 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4294 down_write(&rbd_dev->lock_rwsem);
4295 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4296 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4297 __func__, rbd_dev, cid.gid, cid.handle,
4298 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4300 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4302 downgrade_write(&rbd_dev->lock_rwsem);
4304 down_read(&rbd_dev->lock_rwsem);
4307 maybe_kick_acquire(rbd_dev);
4308 up_read(&rbd_dev->lock_rwsem);
4312 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4313 * ResponseMessage is needed.
4315 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4318 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4319 struct rbd_client_id cid = { 0 };
4322 if (struct_v >= 2) {
4323 cid.gid = ceph_decode_64(p);
4324 cid.handle = ceph_decode_64(p);
4327 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4329 if (rbd_cid_equal(&cid, &my_cid))
4332 down_read(&rbd_dev->lock_rwsem);
4333 if (__rbd_is_lock_owner(rbd_dev)) {
4334 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4335 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4339 * encode ResponseMessage(0) so the peer can detect
4344 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4345 if (!rbd_dev->opts->exclusive) {
4346 dout("%s rbd_dev %p queueing unlock_work\n",
4348 queue_work(rbd_dev->task_wq,
4349 &rbd_dev->unlock_work);
4351 /* refuse to release the lock */
4358 up_read(&rbd_dev->lock_rwsem);
4362 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4363 u64 notify_id, u64 cookie, s32 *result)
4365 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4366 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4367 int buf_size = sizeof(buf);
4373 /* encode ResponseMessage */
4374 ceph_start_encoding(&p, 1, 1,
4375 buf_size - CEPH_ENCODING_START_BLK_LEN);
4376 ceph_encode_32(&p, *result);
4381 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4382 &rbd_dev->header_oloc, notify_id, cookie,
4385 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4388 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4391 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4392 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4395 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4396 u64 notify_id, u64 cookie, s32 result)
4398 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4399 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4402 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4403 u64 notifier_id, void *data, size_t data_len)
4405 struct rbd_device *rbd_dev = arg;
4407 void *const end = p + data_len;
4413 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4414 __func__, rbd_dev, cookie, notify_id, data_len);
4416 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4419 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4424 notify_op = ceph_decode_32(&p);
4426 /* legacy notification for header updates */
4427 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4431 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4432 switch (notify_op) {
4433 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4434 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4435 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4437 case RBD_NOTIFY_OP_RELEASED_LOCK:
4438 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4439 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4441 case RBD_NOTIFY_OP_REQUEST_LOCK:
4442 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4444 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4447 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4449 case RBD_NOTIFY_OP_HEADER_UPDATE:
4450 ret = rbd_dev_refresh(rbd_dev);
4452 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4454 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4457 if (rbd_is_lock_owner(rbd_dev))
4458 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4459 cookie, -EOPNOTSUPP);
4461 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4466 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4468 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4470 struct rbd_device *rbd_dev = arg;
4472 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4474 down_write(&rbd_dev->lock_rwsem);
4475 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4476 up_write(&rbd_dev->lock_rwsem);
4478 mutex_lock(&rbd_dev->watch_mutex);
4479 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4480 __rbd_unregister_watch(rbd_dev);
4481 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4483 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4485 mutex_unlock(&rbd_dev->watch_mutex);
4489 * watch_mutex must be locked
4491 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4493 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4494 struct ceph_osd_linger_request *handle;
4496 rbd_assert(!rbd_dev->watch_handle);
4497 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4499 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4500 &rbd_dev->header_oloc, rbd_watch_cb,
4501 rbd_watch_errcb, rbd_dev);
4503 return PTR_ERR(handle);
4505 rbd_dev->watch_handle = handle;
4510 * watch_mutex must be locked
4512 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4514 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4517 rbd_assert(rbd_dev->watch_handle);
4518 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4520 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4522 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4524 rbd_dev->watch_handle = NULL;
4527 static int rbd_register_watch(struct rbd_device *rbd_dev)
4531 mutex_lock(&rbd_dev->watch_mutex);
4532 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4533 ret = __rbd_register_watch(rbd_dev);
4537 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4538 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4541 mutex_unlock(&rbd_dev->watch_mutex);
4545 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4547 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4549 cancel_work_sync(&rbd_dev->acquired_lock_work);
4550 cancel_work_sync(&rbd_dev->released_lock_work);
4551 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4552 cancel_work_sync(&rbd_dev->unlock_work);
4556 * header_rwsem must not be held to avoid a deadlock with
4557 * rbd_dev_refresh() when flushing notifies.
4559 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4561 cancel_tasks_sync(rbd_dev);
4563 mutex_lock(&rbd_dev->watch_mutex);
4564 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4565 __rbd_unregister_watch(rbd_dev);
4566 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4567 mutex_unlock(&rbd_dev->watch_mutex);
4569 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4570 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4574 * lock_rwsem must be held for write
4576 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4578 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4582 if (!rbd_quiesce_lock(rbd_dev))
4585 format_lock_cookie(rbd_dev, cookie);
4586 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4587 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4588 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4589 RBD_LOCK_TAG, cookie);
4591 if (ret != -EOPNOTSUPP)
4592 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4596 * Lock cookie cannot be updated on older OSDs, so do
4597 * a manual release and queue an acquire.
4599 __rbd_release_lock(rbd_dev);
4600 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4602 __rbd_lock(rbd_dev, cookie);
4603 wake_lock_waiters(rbd_dev, 0);
4607 static void rbd_reregister_watch(struct work_struct *work)
4609 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4610 struct rbd_device, watch_dwork);
4613 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4615 mutex_lock(&rbd_dev->watch_mutex);
4616 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4617 mutex_unlock(&rbd_dev->watch_mutex);
4621 ret = __rbd_register_watch(rbd_dev);
4623 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4624 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4625 queue_delayed_work(rbd_dev->task_wq,
4626 &rbd_dev->watch_dwork,
4628 mutex_unlock(&rbd_dev->watch_mutex);
4632 mutex_unlock(&rbd_dev->watch_mutex);
4633 down_write(&rbd_dev->lock_rwsem);
4634 wake_lock_waiters(rbd_dev, ret);
4635 up_write(&rbd_dev->lock_rwsem);
4639 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4640 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4641 mutex_unlock(&rbd_dev->watch_mutex);
4643 down_write(&rbd_dev->lock_rwsem);
4644 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4645 rbd_reacquire_lock(rbd_dev);
4646 up_write(&rbd_dev->lock_rwsem);
4648 ret = rbd_dev_refresh(rbd_dev);
4650 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4654 * Synchronous osd object method call. Returns the number of bytes
4655 * returned in the outbound buffer, or a negative error code.
4657 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4658 struct ceph_object_id *oid,
4659 struct ceph_object_locator *oloc,
4660 const char *method_name,
4661 const void *outbound,
4662 size_t outbound_size,
4664 size_t inbound_size)
4666 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4667 struct page *req_page = NULL;
4668 struct page *reply_page;
4672 * Method calls are ultimately read operations. The result
4673 * should placed into the inbound buffer provided. They
4674 * also supply outbound data--parameters for the object
4675 * method. Currently if this is present it will be a
4679 if (outbound_size > PAGE_SIZE)
4682 req_page = alloc_page(GFP_KERNEL);
4686 memcpy(page_address(req_page), outbound, outbound_size);
4689 reply_page = alloc_page(GFP_KERNEL);
4692 __free_page(req_page);
4696 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4697 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4698 &reply_page, &inbound_size);
4700 memcpy(inbound, page_address(reply_page), inbound_size);
4705 __free_page(req_page);
4706 __free_page(reply_page);
4710 static void rbd_queue_workfn(struct work_struct *work)
4712 struct rbd_img_request *img_request =
4713 container_of(work, struct rbd_img_request, work);
4714 struct rbd_device *rbd_dev = img_request->rbd_dev;
4715 enum obj_operation_type op_type = img_request->op_type;
4716 struct request *rq = blk_mq_rq_from_pdu(img_request);
4717 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4718 u64 length = blk_rq_bytes(rq);
4722 /* Ignore/skip any zero-length requests */
4724 dout("%s: zero-length request\n", __func__);
4726 goto err_img_request;
4729 blk_mq_start_request(rq);
4731 down_read(&rbd_dev->header_rwsem);
4732 mapping_size = rbd_dev->mapping.size;
4733 rbd_img_capture_header(img_request);
4734 up_read(&rbd_dev->header_rwsem);
4736 if (offset + length > mapping_size) {
4737 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4738 length, mapping_size);
4740 goto err_img_request;
4743 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4744 img_request, obj_op_name(op_type), offset, length);
4746 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4747 result = rbd_img_fill_nodata(img_request, offset, length);
4749 result = rbd_img_fill_from_bio(img_request, offset, length,
4752 goto err_img_request;
4754 rbd_img_handle_request(img_request, 0);
4758 rbd_img_request_destroy(img_request);
4760 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4761 obj_op_name(op_type), length, offset, result);
4762 blk_mq_end_request(rq, errno_to_blk_status(result));
4765 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4766 const struct blk_mq_queue_data *bd)
4768 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4769 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4770 enum obj_operation_type op_type;
4772 switch (req_op(bd->rq)) {
4773 case REQ_OP_DISCARD:
4774 op_type = OBJ_OP_DISCARD;
4776 case REQ_OP_WRITE_ZEROES:
4777 op_type = OBJ_OP_ZEROOUT;
4780 op_type = OBJ_OP_WRITE;
4783 op_type = OBJ_OP_READ;
4786 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4787 return BLK_STS_IOERR;
4790 rbd_img_request_init(img_req, rbd_dev, op_type);
4792 if (rbd_img_is_write(img_req)) {
4793 if (rbd_is_ro(rbd_dev)) {
4794 rbd_warn(rbd_dev, "%s on read-only mapping",
4795 obj_op_name(img_req->op_type));
4796 return BLK_STS_IOERR;
4798 rbd_assert(!rbd_is_snap(rbd_dev));
4801 INIT_WORK(&img_req->work, rbd_queue_workfn);
4802 queue_work(rbd_wq, &img_req->work);
4806 static void rbd_free_disk(struct rbd_device *rbd_dev)
4808 put_disk(rbd_dev->disk);
4809 blk_mq_free_tag_set(&rbd_dev->tag_set);
4810 rbd_dev->disk = NULL;
4813 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4814 struct ceph_object_id *oid,
4815 struct ceph_object_locator *oloc,
4816 void *buf, int buf_len)
4819 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4820 struct ceph_osd_request *req;
4821 struct page **pages;
4822 int num_pages = calc_pages_for(0, buf_len);
4825 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4829 ceph_oid_copy(&req->r_base_oid, oid);
4830 ceph_oloc_copy(&req->r_base_oloc, oloc);
4831 req->r_flags = CEPH_OSD_FLAG_READ;
4833 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4834 if (IS_ERR(pages)) {
4835 ret = PTR_ERR(pages);
4839 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4840 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4843 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4847 ceph_osdc_start_request(osdc, req);
4848 ret = ceph_osdc_wait_request(osdc, req);
4850 ceph_copy_from_page_vector(pages, buf, 0, ret);
4853 ceph_osdc_put_request(req);
4858 * Read the complete header for the given rbd device. On successful
4859 * return, the rbd_dev->header field will contain up-to-date
4860 * information about the image.
4862 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4864 struct rbd_image_header_ondisk *ondisk = NULL;
4871 * The complete header will include an array of its 64-bit
4872 * snapshot ids, followed by the names of those snapshots as
4873 * a contiguous block of NUL-terminated strings. Note that
4874 * the number of snapshots could change by the time we read
4875 * it in, in which case we re-read it.
4882 size = sizeof (*ondisk);
4883 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4885 ondisk = kmalloc(size, GFP_KERNEL);
4889 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4890 &rbd_dev->header_oloc, ondisk, size);
4893 if ((size_t)ret < size) {
4895 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4899 if (!rbd_dev_ondisk_valid(ondisk)) {
4901 rbd_warn(rbd_dev, "invalid header");
4905 names_size = le64_to_cpu(ondisk->snap_names_len);
4906 want_count = snap_count;
4907 snap_count = le32_to_cpu(ondisk->snap_count);
4908 } while (snap_count != want_count);
4910 ret = rbd_header_from_disk(rbd_dev, ondisk);
4917 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4922 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4923 * try to update its size. If REMOVING is set, updating size
4924 * is just useless work since the device can't be opened.
4926 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4927 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4928 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4929 dout("setting size to %llu sectors", (unsigned long long)size);
4930 set_capacity_and_notify(rbd_dev->disk, size);
4934 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4939 down_write(&rbd_dev->header_rwsem);
4940 mapping_size = rbd_dev->mapping.size;
4942 ret = rbd_dev_header_info(rbd_dev);
4947 * If there is a parent, see if it has disappeared due to the
4948 * mapped image getting flattened.
4950 if (rbd_dev->parent) {
4951 ret = rbd_dev_v2_parent_info(rbd_dev);
4956 rbd_assert(!rbd_is_snap(rbd_dev));
4957 rbd_dev->mapping.size = rbd_dev->header.image_size;
4960 up_write(&rbd_dev->header_rwsem);
4961 if (!ret && mapping_size != rbd_dev->mapping.size)
4962 rbd_dev_update_size(rbd_dev);
4967 static const struct blk_mq_ops rbd_mq_ops = {
4968 .queue_rq = rbd_queue_rq,
4971 static int rbd_init_disk(struct rbd_device *rbd_dev)
4973 struct gendisk *disk;
4974 struct request_queue *q;
4975 unsigned int objset_bytes =
4976 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4979 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4980 rbd_dev->tag_set.ops = &rbd_mq_ops;
4981 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4982 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4983 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4984 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4985 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4987 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4991 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4993 err = PTR_ERR(disk);
4998 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5000 disk->major = rbd_dev->major;
5001 disk->first_minor = rbd_dev->minor;
5003 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
5005 disk->minors = RBD_MINORS_PER_MAJOR;
5006 disk->fops = &rbd_bd_ops;
5007 disk->private_data = rbd_dev;
5009 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5010 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5012 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5013 q->limits.max_sectors = queue_max_hw_sectors(q);
5014 blk_queue_max_segments(q, USHRT_MAX);
5015 blk_queue_max_segment_size(q, UINT_MAX);
5016 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5017 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5019 if (rbd_dev->opts->trim) {
5020 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5021 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5022 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5025 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5026 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
5028 rbd_dev->disk = disk;
5032 blk_mq_free_tag_set(&rbd_dev->tag_set);
5040 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5042 return container_of(dev, struct rbd_device, dev);
5045 static ssize_t rbd_size_show(struct device *dev,
5046 struct device_attribute *attr, char *buf)
5048 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5050 return sprintf(buf, "%llu\n",
5051 (unsigned long long)rbd_dev->mapping.size);
5054 static ssize_t rbd_features_show(struct device *dev,
5055 struct device_attribute *attr, char *buf)
5057 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5059 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5062 static ssize_t rbd_major_show(struct device *dev,
5063 struct device_attribute *attr, char *buf)
5065 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5068 return sprintf(buf, "%d\n", rbd_dev->major);
5070 return sprintf(buf, "(none)\n");
5073 static ssize_t rbd_minor_show(struct device *dev,
5074 struct device_attribute *attr, char *buf)
5076 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5078 return sprintf(buf, "%d\n", rbd_dev->minor);
5081 static ssize_t rbd_client_addr_show(struct device *dev,
5082 struct device_attribute *attr, char *buf)
5084 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5085 struct ceph_entity_addr *client_addr =
5086 ceph_client_addr(rbd_dev->rbd_client->client);
5088 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5089 le32_to_cpu(client_addr->nonce));
5092 static ssize_t rbd_client_id_show(struct device *dev,
5093 struct device_attribute *attr, char *buf)
5095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5097 return sprintf(buf, "client%lld\n",
5098 ceph_client_gid(rbd_dev->rbd_client->client));
5101 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5102 struct device_attribute *attr, char *buf)
5104 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5106 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5109 static ssize_t rbd_config_info_show(struct device *dev,
5110 struct device_attribute *attr, char *buf)
5112 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5114 if (!capable(CAP_SYS_ADMIN))
5117 return sprintf(buf, "%s\n", rbd_dev->config_info);
5120 static ssize_t rbd_pool_show(struct device *dev,
5121 struct device_attribute *attr, char *buf)
5123 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5125 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5128 static ssize_t rbd_pool_id_show(struct device *dev,
5129 struct device_attribute *attr, char *buf)
5131 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5133 return sprintf(buf, "%llu\n",
5134 (unsigned long long) rbd_dev->spec->pool_id);
5137 static ssize_t rbd_pool_ns_show(struct device *dev,
5138 struct device_attribute *attr, char *buf)
5140 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5142 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5145 static ssize_t rbd_name_show(struct device *dev,
5146 struct device_attribute *attr, char *buf)
5148 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5150 if (rbd_dev->spec->image_name)
5151 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5153 return sprintf(buf, "(unknown)\n");
5156 static ssize_t rbd_image_id_show(struct device *dev,
5157 struct device_attribute *attr, char *buf)
5159 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5161 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5165 * Shows the name of the currently-mapped snapshot (or
5166 * RBD_SNAP_HEAD_NAME for the base image).
5168 static ssize_t rbd_snap_show(struct device *dev,
5169 struct device_attribute *attr,
5172 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5174 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5177 static ssize_t rbd_snap_id_show(struct device *dev,
5178 struct device_attribute *attr, char *buf)
5180 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5182 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5186 * For a v2 image, shows the chain of parent images, separated by empty
5187 * lines. For v1 images or if there is no parent, shows "(no parent
5190 static ssize_t rbd_parent_show(struct device *dev,
5191 struct device_attribute *attr,
5194 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5197 if (!rbd_dev->parent)
5198 return sprintf(buf, "(no parent image)\n");
5200 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5201 struct rbd_spec *spec = rbd_dev->parent_spec;
5203 count += sprintf(&buf[count], "%s"
5204 "pool_id %llu\npool_name %s\n"
5206 "image_id %s\nimage_name %s\n"
5207 "snap_id %llu\nsnap_name %s\n"
5209 !count ? "" : "\n", /* first? */
5210 spec->pool_id, spec->pool_name,
5211 spec->pool_ns ?: "",
5212 spec->image_id, spec->image_name ?: "(unknown)",
5213 spec->snap_id, spec->snap_name,
5214 rbd_dev->parent_overlap);
5220 static ssize_t rbd_image_refresh(struct device *dev,
5221 struct device_attribute *attr,
5225 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5228 if (!capable(CAP_SYS_ADMIN))
5231 ret = rbd_dev_refresh(rbd_dev);
5238 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5239 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5240 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5241 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5242 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5243 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5244 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5245 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5246 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5247 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5248 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5249 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5250 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5251 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5252 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5253 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5254 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5256 static struct attribute *rbd_attrs[] = {
5257 &dev_attr_size.attr,
5258 &dev_attr_features.attr,
5259 &dev_attr_major.attr,
5260 &dev_attr_minor.attr,
5261 &dev_attr_client_addr.attr,
5262 &dev_attr_client_id.attr,
5263 &dev_attr_cluster_fsid.attr,
5264 &dev_attr_config_info.attr,
5265 &dev_attr_pool.attr,
5266 &dev_attr_pool_id.attr,
5267 &dev_attr_pool_ns.attr,
5268 &dev_attr_name.attr,
5269 &dev_attr_image_id.attr,
5270 &dev_attr_current_snap.attr,
5271 &dev_attr_snap_id.attr,
5272 &dev_attr_parent.attr,
5273 &dev_attr_refresh.attr,
5277 static struct attribute_group rbd_attr_group = {
5281 static const struct attribute_group *rbd_attr_groups[] = {
5286 static void rbd_dev_release(struct device *dev);
5288 static const struct device_type rbd_device_type = {
5290 .groups = rbd_attr_groups,
5291 .release = rbd_dev_release,
5294 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5296 kref_get(&spec->kref);
5301 static void rbd_spec_free(struct kref *kref);
5302 static void rbd_spec_put(struct rbd_spec *spec)
5305 kref_put(&spec->kref, rbd_spec_free);
5308 static struct rbd_spec *rbd_spec_alloc(void)
5310 struct rbd_spec *spec;
5312 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5316 spec->pool_id = CEPH_NOPOOL;
5317 spec->snap_id = CEPH_NOSNAP;
5318 kref_init(&spec->kref);
5323 static void rbd_spec_free(struct kref *kref)
5325 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5327 kfree(spec->pool_name);
5328 kfree(spec->pool_ns);
5329 kfree(spec->image_id);
5330 kfree(spec->image_name);
5331 kfree(spec->snap_name);
5335 static void rbd_dev_free(struct rbd_device *rbd_dev)
5337 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5338 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5340 ceph_oid_destroy(&rbd_dev->header_oid);
5341 ceph_oloc_destroy(&rbd_dev->header_oloc);
5342 kfree(rbd_dev->config_info);
5344 rbd_put_client(rbd_dev->rbd_client);
5345 rbd_spec_put(rbd_dev->spec);
5346 kfree(rbd_dev->opts);
5350 static void rbd_dev_release(struct device *dev)
5352 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5353 bool need_put = !!rbd_dev->opts;
5356 destroy_workqueue(rbd_dev->task_wq);
5357 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5360 rbd_dev_free(rbd_dev);
5363 * This is racy, but way better than putting module outside of
5364 * the release callback. The race window is pretty small, so
5365 * doing something similar to dm (dm-builtin.c) is overkill.
5368 module_put(THIS_MODULE);
5371 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5373 struct rbd_device *rbd_dev;
5375 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5379 spin_lock_init(&rbd_dev->lock);
5380 INIT_LIST_HEAD(&rbd_dev->node);
5381 init_rwsem(&rbd_dev->header_rwsem);
5383 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5384 ceph_oid_init(&rbd_dev->header_oid);
5385 rbd_dev->header_oloc.pool = spec->pool_id;
5386 if (spec->pool_ns) {
5387 WARN_ON(!*spec->pool_ns);
5388 rbd_dev->header_oloc.pool_ns =
5389 ceph_find_or_create_string(spec->pool_ns,
5390 strlen(spec->pool_ns));
5393 mutex_init(&rbd_dev->watch_mutex);
5394 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5395 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5397 init_rwsem(&rbd_dev->lock_rwsem);
5398 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5399 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5400 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5401 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5402 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5403 spin_lock_init(&rbd_dev->lock_lists_lock);
5404 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5405 INIT_LIST_HEAD(&rbd_dev->running_list);
5406 init_completion(&rbd_dev->acquire_wait);
5407 init_completion(&rbd_dev->releasing_wait);
5409 spin_lock_init(&rbd_dev->object_map_lock);
5411 rbd_dev->dev.bus = &rbd_bus_type;
5412 rbd_dev->dev.type = &rbd_device_type;
5413 rbd_dev->dev.parent = &rbd_root_dev;
5414 device_initialize(&rbd_dev->dev);
5420 * Create a mapping rbd_dev.
5422 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5423 struct rbd_spec *spec,
5424 struct rbd_options *opts)
5426 struct rbd_device *rbd_dev;
5428 rbd_dev = __rbd_dev_create(spec);
5432 /* get an id and fill in device name */
5433 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5434 minor_to_rbd_dev_id(1 << MINORBITS),
5436 if (rbd_dev->dev_id < 0)
5439 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5440 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5442 if (!rbd_dev->task_wq)
5445 /* we have a ref from do_rbd_add() */
5446 __module_get(THIS_MODULE);
5448 rbd_dev->rbd_client = rbdc;
5449 rbd_dev->spec = spec;
5450 rbd_dev->opts = opts;
5452 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5456 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5458 rbd_dev_free(rbd_dev);
5462 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5465 put_device(&rbd_dev->dev);
5469 * Get the size and object order for an image snapshot, or if
5470 * snap_id is CEPH_NOSNAP, gets this information for the base
5473 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5474 u8 *order, u64 *snap_size)
5476 __le64 snapid = cpu_to_le64(snap_id);
5481 } __attribute__ ((packed)) size_buf = { 0 };
5483 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5484 &rbd_dev->header_oloc, "get_size",
5485 &snapid, sizeof(snapid),
5486 &size_buf, sizeof(size_buf));
5487 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5490 if (ret < sizeof (size_buf))
5494 *order = size_buf.order;
5495 dout(" order %u", (unsigned int)*order);
5497 *snap_size = le64_to_cpu(size_buf.size);
5499 dout(" snap_id 0x%016llx snap_size = %llu\n",
5500 (unsigned long long)snap_id,
5501 (unsigned long long)*snap_size);
5506 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5508 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5509 &rbd_dev->header.obj_order,
5510 &rbd_dev->header.image_size);
5513 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5520 /* Response will be an encoded string, which includes a length */
5521 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5522 reply_buf = kzalloc(size, GFP_KERNEL);
5526 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5527 &rbd_dev->header_oloc, "get_object_prefix",
5528 NULL, 0, reply_buf, size);
5529 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5534 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5535 p + ret, NULL, GFP_NOIO);
5538 if (IS_ERR(rbd_dev->header.object_prefix)) {
5539 ret = PTR_ERR(rbd_dev->header.object_prefix);
5540 rbd_dev->header.object_prefix = NULL;
5542 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5550 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5551 bool read_only, u64 *snap_features)
5560 } __attribute__ ((packed)) features_buf = { 0 };
5564 features_in.snap_id = cpu_to_le64(snap_id);
5565 features_in.read_only = read_only;
5567 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5568 &rbd_dev->header_oloc, "get_features",
5569 &features_in, sizeof(features_in),
5570 &features_buf, sizeof(features_buf));
5571 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5574 if (ret < sizeof (features_buf))
5577 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5579 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5584 *snap_features = le64_to_cpu(features_buf.features);
5586 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5587 (unsigned long long)snap_id,
5588 (unsigned long long)*snap_features,
5589 (unsigned long long)le64_to_cpu(features_buf.incompat));
5594 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5596 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5598 &rbd_dev->header.features);
5602 * These are generic image flags, but since they are used only for
5603 * object map, store them in rbd_dev->object_map_flags.
5605 * For the same reason, this function is called only on object map
5606 * (re)load and not on header refresh.
5608 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5610 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5614 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5615 &rbd_dev->header_oloc, "get_flags",
5616 &snapid, sizeof(snapid),
5617 &flags, sizeof(flags));
5620 if (ret < sizeof(flags))
5623 rbd_dev->object_map_flags = le64_to_cpu(flags);
5627 struct parent_image_info {
5629 const char *pool_ns;
5630 const char *image_id;
5638 * The caller is responsible for @pii.
5640 static int decode_parent_image_spec(void **p, void *end,
5641 struct parent_image_info *pii)
5647 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5648 &struct_v, &struct_len);
5652 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5653 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5654 if (IS_ERR(pii->pool_ns)) {
5655 ret = PTR_ERR(pii->pool_ns);
5656 pii->pool_ns = NULL;
5659 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5660 if (IS_ERR(pii->image_id)) {
5661 ret = PTR_ERR(pii->image_id);
5662 pii->image_id = NULL;
5665 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5672 static int __get_parent_info(struct rbd_device *rbd_dev,
5673 struct page *req_page,
5674 struct page *reply_page,
5675 struct parent_image_info *pii)
5677 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5678 size_t reply_len = PAGE_SIZE;
5682 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5683 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5684 req_page, sizeof(u64), &reply_page, &reply_len);
5686 return ret == -EOPNOTSUPP ? 1 : ret;
5688 p = page_address(reply_page);
5689 end = p + reply_len;
5690 ret = decode_parent_image_spec(&p, end, pii);
5694 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5695 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5696 req_page, sizeof(u64), &reply_page, &reply_len);
5700 p = page_address(reply_page);
5701 end = p + reply_len;
5702 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5703 if (pii->has_overlap)
5704 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5713 * The caller is responsible for @pii.
5715 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5716 struct page *req_page,
5717 struct page *reply_page,
5718 struct parent_image_info *pii)
5720 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5721 size_t reply_len = PAGE_SIZE;
5725 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5726 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5727 req_page, sizeof(u64), &reply_page, &reply_len);
5731 p = page_address(reply_page);
5732 end = p + reply_len;
5733 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5734 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5735 if (IS_ERR(pii->image_id)) {
5736 ret = PTR_ERR(pii->image_id);
5737 pii->image_id = NULL;
5740 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5741 pii->has_overlap = true;
5742 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5750 static int get_parent_info(struct rbd_device *rbd_dev,
5751 struct parent_image_info *pii)
5753 struct page *req_page, *reply_page;
5757 req_page = alloc_page(GFP_KERNEL);
5761 reply_page = alloc_page(GFP_KERNEL);
5763 __free_page(req_page);
5767 p = page_address(req_page);
5768 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5769 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5771 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5774 __free_page(req_page);
5775 __free_page(reply_page);
5779 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5781 struct rbd_spec *parent_spec;
5782 struct parent_image_info pii = { 0 };
5785 parent_spec = rbd_spec_alloc();
5789 ret = get_parent_info(rbd_dev, &pii);
5793 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5794 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5795 pii.has_overlap, pii.overlap);
5797 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5799 * Either the parent never existed, or we have
5800 * record of it but the image got flattened so it no
5801 * longer has a parent. When the parent of a
5802 * layered image disappears we immediately set the
5803 * overlap to 0. The effect of this is that all new
5804 * requests will be treated as if the image had no
5807 * If !pii.has_overlap, the parent image spec is not
5808 * applicable. It's there to avoid duplication in each
5811 if (rbd_dev->parent_overlap) {
5812 rbd_dev->parent_overlap = 0;
5813 rbd_dev_parent_put(rbd_dev);
5814 pr_info("%s: clone image has been flattened\n",
5815 rbd_dev->disk->disk_name);
5818 goto out; /* No parent? No problem. */
5821 /* The ceph file layout needs to fit pool id in 32 bits */
5824 if (pii.pool_id > (u64)U32_MAX) {
5825 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5826 (unsigned long long)pii.pool_id, U32_MAX);
5831 * The parent won't change (except when the clone is
5832 * flattened, already handled that). So we only need to
5833 * record the parent spec we have not already done so.
5835 if (!rbd_dev->parent_spec) {
5836 parent_spec->pool_id = pii.pool_id;
5837 if (pii.pool_ns && *pii.pool_ns) {
5838 parent_spec->pool_ns = pii.pool_ns;
5841 parent_spec->image_id = pii.image_id;
5842 pii.image_id = NULL;
5843 parent_spec->snap_id = pii.snap_id;
5845 rbd_dev->parent_spec = parent_spec;
5846 parent_spec = NULL; /* rbd_dev now owns this */
5850 * We always update the parent overlap. If it's zero we issue
5851 * a warning, as we will proceed as if there was no parent.
5855 /* refresh, careful to warn just once */
5856 if (rbd_dev->parent_overlap)
5858 "clone now standalone (overlap became 0)");
5861 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5864 rbd_dev->parent_overlap = pii.overlap;
5870 kfree(pii.image_id);
5871 rbd_spec_put(parent_spec);
5875 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5879 __le64 stripe_count;
5880 } __attribute__ ((packed)) striping_info_buf = { 0 };
5881 size_t size = sizeof (striping_info_buf);
5885 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5886 &rbd_dev->header_oloc, "get_stripe_unit_count",
5887 NULL, 0, &striping_info_buf, size);
5888 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5894 p = &striping_info_buf;
5895 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5896 rbd_dev->header.stripe_count = ceph_decode_64(&p);
5900 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5902 __le64 data_pool_id;
5905 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5906 &rbd_dev->header_oloc, "get_data_pool",
5907 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5910 if (ret < sizeof(data_pool_id))
5913 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5914 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5918 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5920 CEPH_DEFINE_OID_ONSTACK(oid);
5921 size_t image_id_size;
5926 void *reply_buf = NULL;
5928 char *image_name = NULL;
5931 rbd_assert(!rbd_dev->spec->image_name);
5933 len = strlen(rbd_dev->spec->image_id);
5934 image_id_size = sizeof (__le32) + len;
5935 image_id = kmalloc(image_id_size, GFP_KERNEL);
5940 end = image_id + image_id_size;
5941 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5943 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5944 reply_buf = kmalloc(size, GFP_KERNEL);
5948 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5949 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5950 "dir_get_name", image_id, image_id_size,
5955 end = reply_buf + ret;
5957 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5958 if (IS_ERR(image_name))
5961 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5969 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5971 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5972 const char *snap_name;
5975 /* Skip over names until we find the one we are looking for */
5977 snap_name = rbd_dev->header.snap_names;
5978 while (which < snapc->num_snaps) {
5979 if (!strcmp(name, snap_name))
5980 return snapc->snaps[which];
5981 snap_name += strlen(snap_name) + 1;
5987 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5989 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5994 for (which = 0; !found && which < snapc->num_snaps; which++) {
5995 const char *snap_name;
5997 snap_id = snapc->snaps[which];
5998 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5999 if (IS_ERR(snap_name)) {
6000 /* ignore no-longer existing snapshots */
6001 if (PTR_ERR(snap_name) == -ENOENT)
6006 found = !strcmp(name, snap_name);
6009 return found ? snap_id : CEPH_NOSNAP;
6013 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6014 * no snapshot by that name is found, or if an error occurs.
6016 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6018 if (rbd_dev->image_format == 1)
6019 return rbd_v1_snap_id_by_name(rbd_dev, name);
6021 return rbd_v2_snap_id_by_name(rbd_dev, name);
6025 * An image being mapped will have everything but the snap id.
6027 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6029 struct rbd_spec *spec = rbd_dev->spec;
6031 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6032 rbd_assert(spec->image_id && spec->image_name);
6033 rbd_assert(spec->snap_name);
6035 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6038 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6039 if (snap_id == CEPH_NOSNAP)
6042 spec->snap_id = snap_id;
6044 spec->snap_id = CEPH_NOSNAP;
6051 * A parent image will have all ids but none of the names.
6053 * All names in an rbd spec are dynamically allocated. It's OK if we
6054 * can't figure out the name for an image id.
6056 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6058 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6059 struct rbd_spec *spec = rbd_dev->spec;
6060 const char *pool_name;
6061 const char *image_name;
6062 const char *snap_name;
6065 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6066 rbd_assert(spec->image_id);
6067 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6069 /* Get the pool name; we have to make our own copy of this */
6071 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6073 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6076 pool_name = kstrdup(pool_name, GFP_KERNEL);
6080 /* Fetch the image name; tolerate failure here */
6082 image_name = rbd_dev_image_name(rbd_dev);
6084 rbd_warn(rbd_dev, "unable to get image name");
6086 /* Fetch the snapshot name */
6088 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6089 if (IS_ERR(snap_name)) {
6090 ret = PTR_ERR(snap_name);
6094 spec->pool_name = pool_name;
6095 spec->image_name = image_name;
6096 spec->snap_name = snap_name;
6106 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6115 struct ceph_snap_context *snapc;
6119 * We'll need room for the seq value (maximum snapshot id),
6120 * snapshot count, and array of that many snapshot ids.
6121 * For now we have a fixed upper limit on the number we're
6122 * prepared to receive.
6124 size = sizeof (__le64) + sizeof (__le32) +
6125 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6126 reply_buf = kzalloc(size, GFP_KERNEL);
6130 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6131 &rbd_dev->header_oloc, "get_snapcontext",
6132 NULL, 0, reply_buf, size);
6133 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6138 end = reply_buf + ret;
6140 ceph_decode_64_safe(&p, end, seq, out);
6141 ceph_decode_32_safe(&p, end, snap_count, out);
6144 * Make sure the reported number of snapshot ids wouldn't go
6145 * beyond the end of our buffer. But before checking that,
6146 * make sure the computed size of the snapshot context we
6147 * allocate is representable in a size_t.
6149 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6154 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6158 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6164 for (i = 0; i < snap_count; i++)
6165 snapc->snaps[i] = ceph_decode_64(&p);
6167 ceph_put_snap_context(rbd_dev->header.snapc);
6168 rbd_dev->header.snapc = snapc;
6170 dout(" snap context seq = %llu, snap_count = %u\n",
6171 (unsigned long long)seq, (unsigned int)snap_count);
6178 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6189 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6190 reply_buf = kmalloc(size, GFP_KERNEL);
6192 return ERR_PTR(-ENOMEM);
6194 snapid = cpu_to_le64(snap_id);
6195 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6196 &rbd_dev->header_oloc, "get_snapshot_name",
6197 &snapid, sizeof(snapid), reply_buf, size);
6198 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6200 snap_name = ERR_PTR(ret);
6205 end = reply_buf + ret;
6206 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6207 if (IS_ERR(snap_name))
6210 dout(" snap_id 0x%016llx snap_name = %s\n",
6211 (unsigned long long)snap_id, snap_name);
6218 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6220 bool first_time = rbd_dev->header.object_prefix == NULL;
6223 ret = rbd_dev_v2_image_size(rbd_dev);
6228 ret = rbd_dev_v2_header_onetime(rbd_dev);
6233 ret = rbd_dev_v2_snap_context(rbd_dev);
6234 if (ret && first_time) {
6235 kfree(rbd_dev->header.object_prefix);
6236 rbd_dev->header.object_prefix = NULL;
6242 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6244 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6246 if (rbd_dev->image_format == 1)
6247 return rbd_dev_v1_header_info(rbd_dev);
6249 return rbd_dev_v2_header_info(rbd_dev);
6253 * Skips over white space at *buf, and updates *buf to point to the
6254 * first found non-space character (if any). Returns the length of
6255 * the token (string of non-white space characters) found. Note
6256 * that *buf must be terminated with '\0'.
6258 static inline size_t next_token(const char **buf)
6261 * These are the characters that produce nonzero for
6262 * isspace() in the "C" and "POSIX" locales.
6264 static const char spaces[] = " \f\n\r\t\v";
6266 *buf += strspn(*buf, spaces); /* Find start of token */
6268 return strcspn(*buf, spaces); /* Return token length */
6272 * Finds the next token in *buf, dynamically allocates a buffer big
6273 * enough to hold a copy of it, and copies the token into the new
6274 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6275 * that a duplicate buffer is created even for a zero-length token.
6277 * Returns a pointer to the newly-allocated duplicate, or a null
6278 * pointer if memory for the duplicate was not available. If
6279 * the lenp argument is a non-null pointer, the length of the token
6280 * (not including the '\0') is returned in *lenp.
6282 * If successful, the *buf pointer will be updated to point beyond
6283 * the end of the found token.
6285 * Note: uses GFP_KERNEL for allocation.
6287 static inline char *dup_token(const char **buf, size_t *lenp)
6292 len = next_token(buf);
6293 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6296 *(dup + len) = '\0';
6305 static int rbd_parse_param(struct fs_parameter *param,
6306 struct rbd_parse_opts_ctx *pctx)
6308 struct rbd_options *opt = pctx->opts;
6309 struct fs_parse_result result;
6310 struct p_log log = {.prefix = "rbd"};
6313 ret = ceph_parse_param(param, pctx->copts, NULL);
6314 if (ret != -ENOPARAM)
6317 token = __fs_parse(&log, rbd_parameters, param, &result);
6318 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6320 if (token == -ENOPARAM)
6321 return inval_plog(&log, "Unknown parameter '%s'",
6327 case Opt_queue_depth:
6328 if (result.uint_32 < 1)
6330 opt->queue_depth = result.uint_32;
6332 case Opt_alloc_size:
6333 if (result.uint_32 < SECTOR_SIZE)
6335 if (!is_power_of_2(result.uint_32))
6336 return inval_plog(&log, "alloc_size must be a power of 2");
6337 opt->alloc_size = result.uint_32;
6339 case Opt_lock_timeout:
6340 /* 0 is "wait forever" (i.e. infinite timeout) */
6341 if (result.uint_32 > INT_MAX / 1000)
6343 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6346 kfree(pctx->spec->pool_ns);
6347 pctx->spec->pool_ns = param->string;
6348 param->string = NULL;
6350 case Opt_compression_hint:
6351 switch (result.uint_32) {
6352 case Opt_compression_hint_none:
6353 opt->alloc_hint_flags &=
6354 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6355 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6357 case Opt_compression_hint_compressible:
6358 opt->alloc_hint_flags |=
6359 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6360 opt->alloc_hint_flags &=
6361 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6363 case Opt_compression_hint_incompressible:
6364 opt->alloc_hint_flags |=
6365 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6366 opt->alloc_hint_flags &=
6367 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6374 opt->read_only = true;
6376 case Opt_read_write:
6377 opt->read_only = false;
6379 case Opt_lock_on_read:
6380 opt->lock_on_read = true;
6383 opt->exclusive = true;
6395 return inval_plog(&log, "%s out of range", param->key);
6399 * This duplicates most of generic_parse_monolithic(), untying it from
6400 * fs_context and skipping standard superblock and security options.
6402 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6407 dout("%s '%s'\n", __func__, options);
6408 while ((key = strsep(&options, ",")) != NULL) {
6410 struct fs_parameter param = {
6412 .type = fs_value_is_flag,
6414 char *value = strchr(key, '=');
6421 v_len = strlen(value);
6422 param.string = kmemdup_nul(value, v_len,
6426 param.type = fs_value_is_string;
6430 ret = rbd_parse_param(¶m, pctx);
6431 kfree(param.string);
6441 * Parse the options provided for an "rbd add" (i.e., rbd image
6442 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6443 * and the data written is passed here via a NUL-terminated buffer.
6444 * Returns 0 if successful or an error code otherwise.
6446 * The information extracted from these options is recorded in
6447 * the other parameters which return dynamically-allocated
6450 * The address of a pointer that will refer to a ceph options
6451 * structure. Caller must release the returned pointer using
6452 * ceph_destroy_options() when it is no longer needed.
6454 * Address of an rbd options pointer. Fully initialized by
6455 * this function; caller must release with kfree().
6457 * Address of an rbd image specification pointer. Fully
6458 * initialized by this function based on parsed options.
6459 * Caller must release with rbd_spec_put().
6461 * The options passed take this form:
6462 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6465 * A comma-separated list of one or more monitor addresses.
6466 * A monitor address is an ip address, optionally followed
6467 * by a port number (separated by a colon).
6468 * I.e.: ip1[:port1][,ip2[:port2]...]
6470 * A comma-separated list of ceph and/or rbd options.
6472 * The name of the rados pool containing the rbd image.
6474 * The name of the image in that pool to map.
6476 * An optional snapshot id. If provided, the mapping will
6477 * present data from the image at the time that snapshot was
6478 * created. The image head is used if no snapshot id is
6479 * provided. Snapshot mappings are always read-only.
6481 static int rbd_add_parse_args(const char *buf,
6482 struct ceph_options **ceph_opts,
6483 struct rbd_options **opts,
6484 struct rbd_spec **rbd_spec)
6488 const char *mon_addrs;
6490 size_t mon_addrs_size;
6491 struct rbd_parse_opts_ctx pctx = { 0 };
6494 /* The first four tokens are required */
6496 len = next_token(&buf);
6498 rbd_warn(NULL, "no monitor address(es) provided");
6502 mon_addrs_size = len;
6506 options = dup_token(&buf, NULL);
6510 rbd_warn(NULL, "no options provided");
6514 pctx.spec = rbd_spec_alloc();
6518 pctx.spec->pool_name = dup_token(&buf, NULL);
6519 if (!pctx.spec->pool_name)
6521 if (!*pctx.spec->pool_name) {
6522 rbd_warn(NULL, "no pool name provided");
6526 pctx.spec->image_name = dup_token(&buf, NULL);
6527 if (!pctx.spec->image_name)
6529 if (!*pctx.spec->image_name) {
6530 rbd_warn(NULL, "no image name provided");
6535 * Snapshot name is optional; default is to use "-"
6536 * (indicating the head/no snapshot).
6538 len = next_token(&buf);
6540 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6541 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6542 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6543 ret = -ENAMETOOLONG;
6546 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6549 *(snap_name + len) = '\0';
6550 pctx.spec->snap_name = snap_name;
6552 pctx.copts = ceph_alloc_options();
6556 /* Initialize all rbd options to the defaults */
6558 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6562 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6563 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6564 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6565 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6566 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6567 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6568 pctx.opts->trim = RBD_TRIM_DEFAULT;
6570 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
6575 ret = rbd_parse_options(options, &pctx);
6579 *ceph_opts = pctx.copts;
6581 *rbd_spec = pctx.spec;
6589 ceph_destroy_options(pctx.copts);
6590 rbd_spec_put(pctx.spec);
6595 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6597 down_write(&rbd_dev->lock_rwsem);
6598 if (__rbd_is_lock_owner(rbd_dev))
6599 __rbd_release_lock(rbd_dev);
6600 up_write(&rbd_dev->lock_rwsem);
6604 * If the wait is interrupted, an error is returned even if the lock
6605 * was successfully acquired. rbd_dev_image_unlock() will release it
6608 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6612 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6613 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6616 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6620 if (rbd_is_ro(rbd_dev))
6623 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6624 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6625 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6626 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6628 ret = rbd_dev->acquire_err;
6630 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6634 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6640 * The lock may have been released by now, unless automatic lock
6641 * transitions are disabled.
6643 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6648 * An rbd format 2 image has a unique identifier, distinct from the
6649 * name given to it by the user. Internally, that identifier is
6650 * what's used to specify the names of objects related to the image.
6652 * A special "rbd id" object is used to map an rbd image name to its
6653 * id. If that object doesn't exist, then there is no v2 rbd image
6654 * with the supplied name.
6656 * This function will record the given rbd_dev's image_id field if
6657 * it can be determined, and in that case will return 0. If any
6658 * errors occur a negative errno will be returned and the rbd_dev's
6659 * image_id field will be unchanged (and should be NULL).
6661 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6665 CEPH_DEFINE_OID_ONSTACK(oid);
6670 * When probing a parent image, the image id is already
6671 * known (and the image name likely is not). There's no
6672 * need to fetch the image id again in this case. We
6673 * do still need to set the image format though.
6675 if (rbd_dev->spec->image_id) {
6676 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6682 * First, see if the format 2 image id file exists, and if
6683 * so, get the image's persistent id from it.
6685 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6686 rbd_dev->spec->image_name);
6690 dout("rbd id object name is %s\n", oid.name);
6692 /* Response will be an encoded string, which includes a length */
6693 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6694 response = kzalloc(size, GFP_NOIO);
6700 /* If it doesn't exist we'll assume it's a format 1 image */
6702 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6705 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6706 if (ret == -ENOENT) {
6707 image_id = kstrdup("", GFP_KERNEL);
6708 ret = image_id ? 0 : -ENOMEM;
6710 rbd_dev->image_format = 1;
6711 } else if (ret >= 0) {
6714 image_id = ceph_extract_encoded_string(&p, p + ret,
6716 ret = PTR_ERR_OR_ZERO(image_id);
6718 rbd_dev->image_format = 2;
6722 rbd_dev->spec->image_id = image_id;
6723 dout("image_id is %s\n", image_id);
6727 ceph_oid_destroy(&oid);
6732 * Undo whatever state changes are made by v1 or v2 header info
6735 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6737 struct rbd_image_header *header;
6739 rbd_dev_parent_put(rbd_dev);
6740 rbd_object_map_free(rbd_dev);
6741 rbd_dev_mapping_clear(rbd_dev);
6743 /* Free dynamic fields from the header, then zero it out */
6745 header = &rbd_dev->header;
6746 ceph_put_snap_context(header->snapc);
6747 kfree(header->snap_sizes);
6748 kfree(header->snap_names);
6749 kfree(header->object_prefix);
6750 memset(header, 0, sizeof (*header));
6753 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6757 ret = rbd_dev_v2_object_prefix(rbd_dev);
6762 * Get the and check features for the image. Currently the
6763 * features are assumed to never change.
6765 ret = rbd_dev_v2_features(rbd_dev);
6769 /* If the image supports fancy striping, get its parameters */
6771 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6772 ret = rbd_dev_v2_striping_info(rbd_dev);
6777 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6778 ret = rbd_dev_v2_data_pool(rbd_dev);
6783 rbd_init_layout(rbd_dev);
6787 rbd_dev->header.features = 0;
6788 kfree(rbd_dev->header.object_prefix);
6789 rbd_dev->header.object_prefix = NULL;
6794 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6795 * rbd_dev_image_probe() recursion depth, which means it's also the
6796 * length of the already discovered part of the parent chain.
6798 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6800 struct rbd_device *parent = NULL;
6803 if (!rbd_dev->parent_spec)
6806 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6807 pr_info("parent chain is too long (%d)\n", depth);
6812 parent = __rbd_dev_create(rbd_dev->parent_spec);
6819 * Images related by parent/child relationships always share
6820 * rbd_client and spec/parent_spec, so bump their refcounts.
6822 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6823 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6825 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6827 ret = rbd_dev_image_probe(parent, depth);
6831 rbd_dev->parent = parent;
6832 atomic_set(&rbd_dev->parent_ref, 1);
6836 rbd_dev_unparent(rbd_dev);
6837 rbd_dev_destroy(parent);
6841 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6843 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6844 rbd_free_disk(rbd_dev);
6846 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6850 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6853 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6857 /* Record our major and minor device numbers. */
6859 if (!single_major) {
6860 ret = register_blkdev(0, rbd_dev->name);
6862 goto err_out_unlock;
6864 rbd_dev->major = ret;
6867 rbd_dev->major = rbd_major;
6868 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6871 /* Set up the blkdev mapping. */
6873 ret = rbd_init_disk(rbd_dev);
6875 goto err_out_blkdev;
6877 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6878 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6880 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6884 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6885 up_write(&rbd_dev->header_rwsem);
6889 rbd_free_disk(rbd_dev);
6892 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6894 up_write(&rbd_dev->header_rwsem);
6898 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6900 struct rbd_spec *spec = rbd_dev->spec;
6903 /* Record the header object name for this rbd image. */
6905 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6906 if (rbd_dev->image_format == 1)
6907 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6908 spec->image_name, RBD_SUFFIX);
6910 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6911 RBD_HEADER_PREFIX, spec->image_id);
6916 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6919 pr_info("image %s/%s%s%s does not exist\n",
6920 rbd_dev->spec->pool_name,
6921 rbd_dev->spec->pool_ns ?: "",
6922 rbd_dev->spec->pool_ns ? "/" : "",
6923 rbd_dev->spec->image_name);
6925 pr_info("snap %s/%s%s%s@%s does not exist\n",
6926 rbd_dev->spec->pool_name,
6927 rbd_dev->spec->pool_ns ?: "",
6928 rbd_dev->spec->pool_ns ? "/" : "",
6929 rbd_dev->spec->image_name,
6930 rbd_dev->spec->snap_name);
6934 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6936 if (!rbd_is_ro(rbd_dev))
6937 rbd_unregister_watch(rbd_dev);
6939 rbd_dev_unprobe(rbd_dev);
6940 rbd_dev->image_format = 0;
6941 kfree(rbd_dev->spec->image_id);
6942 rbd_dev->spec->image_id = NULL;
6946 * Probe for the existence of the header object for the given rbd
6947 * device. If this image is the one being mapped (i.e., not a
6948 * parent), initiate a watch on its header object before using that
6949 * object to get detailed information about the rbd image.
6951 * On success, returns with header_rwsem held for write if called
6954 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6956 bool need_watch = !rbd_is_ro(rbd_dev);
6960 * Get the id from the image id object. Unless there's an
6961 * error, rbd_dev->spec->image_id will be filled in with
6962 * a dynamically-allocated string, and rbd_dev->image_format
6963 * will be set to either 1 or 2.
6965 ret = rbd_dev_image_id(rbd_dev);
6969 ret = rbd_dev_header_name(rbd_dev);
6971 goto err_out_format;
6974 ret = rbd_register_watch(rbd_dev);
6977 rbd_print_dne(rbd_dev, false);
6978 goto err_out_format;
6983 down_write(&rbd_dev->header_rwsem);
6985 ret = rbd_dev_header_info(rbd_dev);
6987 if (ret == -ENOENT && !need_watch)
6988 rbd_print_dne(rbd_dev, false);
6993 * If this image is the one being mapped, we have pool name and
6994 * id, image name and id, and snap name - need to fill snap id.
6995 * Otherwise this is a parent image, identified by pool, image
6996 * and snap ids - need to fill in names for those ids.
6999 ret = rbd_spec_fill_snap_id(rbd_dev);
7001 ret = rbd_spec_fill_names(rbd_dev);
7004 rbd_print_dne(rbd_dev, true);
7008 ret = rbd_dev_mapping_set(rbd_dev);
7012 if (rbd_is_snap(rbd_dev) &&
7013 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7014 ret = rbd_object_map_load(rbd_dev);
7019 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7020 ret = rbd_dev_v2_parent_info(rbd_dev);
7025 ret = rbd_dev_probe_parent(rbd_dev, depth);
7029 dout("discovered format %u image, header name is %s\n",
7030 rbd_dev->image_format, rbd_dev->header_oid.name);
7035 up_write(&rbd_dev->header_rwsem);
7037 rbd_unregister_watch(rbd_dev);
7038 rbd_dev_unprobe(rbd_dev);
7040 rbd_dev->image_format = 0;
7041 kfree(rbd_dev->spec->image_id);
7042 rbd_dev->spec->image_id = NULL;
7046 static ssize_t do_rbd_add(const char *buf, size_t count)
7048 struct rbd_device *rbd_dev = NULL;
7049 struct ceph_options *ceph_opts = NULL;
7050 struct rbd_options *rbd_opts = NULL;
7051 struct rbd_spec *spec = NULL;
7052 struct rbd_client *rbdc;
7055 if (!capable(CAP_SYS_ADMIN))
7058 if (!try_module_get(THIS_MODULE))
7061 /* parse add command */
7062 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7066 rbdc = rbd_get_client(ceph_opts);
7073 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7076 pr_info("pool %s does not exist\n", spec->pool_name);
7077 goto err_out_client;
7079 spec->pool_id = (u64)rc;
7081 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7084 goto err_out_client;
7086 rbdc = NULL; /* rbd_dev now owns this */
7087 spec = NULL; /* rbd_dev now owns this */
7088 rbd_opts = NULL; /* rbd_dev now owns this */
7090 /* if we are mapping a snapshot it will be a read-only mapping */
7091 if (rbd_dev->opts->read_only ||
7092 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7093 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7095 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7096 if (!rbd_dev->config_info) {
7098 goto err_out_rbd_dev;
7101 rc = rbd_dev_image_probe(rbd_dev, 0);
7103 goto err_out_rbd_dev;
7105 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7106 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7107 rbd_dev->layout.object_size);
7108 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7111 rc = rbd_dev_device_setup(rbd_dev);
7113 goto err_out_image_probe;
7115 rc = rbd_add_acquire_lock(rbd_dev);
7117 goto err_out_image_lock;
7119 /* Everything's ready. Announce the disk to the world. */
7121 rc = device_add(&rbd_dev->dev);
7123 goto err_out_image_lock;
7125 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7127 goto err_out_cleanup_disk;
7129 spin_lock(&rbd_dev_list_lock);
7130 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7131 spin_unlock(&rbd_dev_list_lock);
7133 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7134 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7135 rbd_dev->header.features);
7138 module_put(THIS_MODULE);
7141 err_out_cleanup_disk:
7142 rbd_free_disk(rbd_dev);
7144 rbd_dev_image_unlock(rbd_dev);
7145 rbd_dev_device_release(rbd_dev);
7146 err_out_image_probe:
7147 rbd_dev_image_release(rbd_dev);
7149 rbd_dev_destroy(rbd_dev);
7151 rbd_put_client(rbdc);
7158 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
7163 return do_rbd_add(buf, count);
7166 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
7169 return do_rbd_add(buf, count);
7172 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7174 while (rbd_dev->parent) {
7175 struct rbd_device *first = rbd_dev;
7176 struct rbd_device *second = first->parent;
7177 struct rbd_device *third;
7180 * Follow to the parent with no grandparent and
7183 while (second && (third = second->parent)) {
7188 rbd_dev_image_release(second);
7189 rbd_dev_destroy(second);
7190 first->parent = NULL;
7191 first->parent_overlap = 0;
7193 rbd_assert(first->parent_spec);
7194 rbd_spec_put(first->parent_spec);
7195 first->parent_spec = NULL;
7199 static ssize_t do_rbd_remove(const char *buf, size_t count)
7201 struct rbd_device *rbd_dev = NULL;
7207 if (!capable(CAP_SYS_ADMIN))
7212 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7214 pr_err("dev_id out of range\n");
7217 if (opt_buf[0] != '\0') {
7218 if (!strcmp(opt_buf, "force")) {
7221 pr_err("bad remove option at '%s'\n", opt_buf);
7227 spin_lock(&rbd_dev_list_lock);
7228 list_for_each_entry(rbd_dev, &rbd_dev_list, node) {
7229 if (rbd_dev->dev_id == dev_id) {
7235 spin_lock_irq(&rbd_dev->lock);
7236 if (rbd_dev->open_count && !force)
7238 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7241 spin_unlock_irq(&rbd_dev->lock);
7243 spin_unlock(&rbd_dev_list_lock);
7249 * Prevent new IO from being queued and wait for existing
7250 * IO to complete/fail.
7252 blk_mq_freeze_queue(rbd_dev->disk->queue);
7253 blk_mark_disk_dead(rbd_dev->disk);
7256 del_gendisk(rbd_dev->disk);
7257 spin_lock(&rbd_dev_list_lock);
7258 list_del_init(&rbd_dev->node);
7259 spin_unlock(&rbd_dev_list_lock);
7260 device_del(&rbd_dev->dev);
7262 rbd_dev_image_unlock(rbd_dev);
7263 rbd_dev_device_release(rbd_dev);
7264 rbd_dev_image_release(rbd_dev);
7265 rbd_dev_destroy(rbd_dev);
7269 static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
7274 return do_rbd_remove(buf, count);
7277 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
7280 return do_rbd_remove(buf, count);
7284 * create control files in sysfs
7287 static int __init rbd_sysfs_init(void)
7291 ret = device_register(&rbd_root_dev);
7293 put_device(&rbd_root_dev);
7297 ret = bus_register(&rbd_bus_type);
7299 device_unregister(&rbd_root_dev);
7304 static void __exit rbd_sysfs_cleanup(void)
7306 bus_unregister(&rbd_bus_type);
7307 device_unregister(&rbd_root_dev);
7310 static int __init rbd_slab_init(void)
7312 rbd_assert(!rbd_img_request_cache);
7313 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7314 if (!rbd_img_request_cache)
7317 rbd_assert(!rbd_obj_request_cache);
7318 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7319 if (!rbd_obj_request_cache)
7325 kmem_cache_destroy(rbd_img_request_cache);
7326 rbd_img_request_cache = NULL;
7330 static void rbd_slab_exit(void)
7332 rbd_assert(rbd_obj_request_cache);
7333 kmem_cache_destroy(rbd_obj_request_cache);
7334 rbd_obj_request_cache = NULL;
7336 rbd_assert(rbd_img_request_cache);
7337 kmem_cache_destroy(rbd_img_request_cache);
7338 rbd_img_request_cache = NULL;
7341 static int __init rbd_init(void)
7345 if (!libceph_compatible(NULL)) {
7346 rbd_warn(NULL, "libceph incompatibility (quitting)");
7350 rc = rbd_slab_init();
7355 * The number of active work items is limited by the number of
7356 * rbd devices * queue depth, so leave @max_active at default.
7358 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7365 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7366 if (rbd_major < 0) {
7372 rc = rbd_sysfs_init();
7374 goto err_out_blkdev;
7377 pr_info("loaded (major %d)\n", rbd_major);
7379 pr_info("loaded\n");
7385 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7387 destroy_workqueue(rbd_wq);
7393 static void __exit rbd_exit(void)
7395 ida_destroy(&rbd_dev_id_ida);
7396 rbd_sysfs_cleanup();
7398 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7399 destroy_workqueue(rbd_wq);
7403 module_init(rbd_init);
7404 module_exit(rbd_exit);
7406 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7407 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7408 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7409 /* following authorship retained from original osdblk.c */
7410 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7412 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7413 MODULE_LICENSE("GPL");