3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
340 struct rbd_obj_request *obj_request; /* obj req initiator */
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
382 int dev_id; /* blkdev unique id */
384 int major; /* blkdev assigned major */
386 struct gendisk *disk; /* blkdev's gendisk and rq */
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock; /* queue, flags, open_count */
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
404 struct ceph_file_layout layout; /* used for all rbd requests */
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
410 struct delayed_work watch_dwork;
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
425 struct completion releasing_wait;
427 spinlock_t object_map_lock;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
432 struct workqueue_struct *task_wq;
434 struct rbd_spec *parent_spec;
437 struct rbd_device *parent;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
445 struct rbd_mapping mapping;
447 struct list_head node;
451 unsigned long open_count; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
481 static struct workqueue_struct *rbd_wq;
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(const struct bus_type *bus, const char *buf,
497 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
499 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
503 static int rbd_dev_id_to_minor(int dev_id)
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
508 static int minor_to_rbd_dev_id(int minor)
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
541 static ssize_t supported_features_show(const struct bus_type *bus, char *buf)
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
552 static struct attribute *rbd_bus_attrs[] = {
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
576 __ATTRIBUTE_GROUPS(rbd_bus);
578 static struct bus_type rbd_bus_type = {
580 .bus_groups = rbd_bus_groups,
583 static void rbd_root_dev_release(struct device *dev)
587 static struct device rbd_root_dev = {
589 .release = rbd_root_dev_release,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
595 struct va_format vaf;
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
636 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
648 * Return true if nothing else is pending.
650 static bool pending_result_dec(struct pending_result *pending, int *result)
652 rbd_assert(pending->num_pending > 0);
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
659 *result = pending->result;
663 static int rbd_open(struct block_device *bdev, fmode_t mode)
665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
666 bool removing = false;
668 spin_lock_irq(&rbd_dev->lock);
669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
672 rbd_dev->open_count++;
673 spin_unlock_irq(&rbd_dev->lock);
677 (void) get_device(&rbd_dev->dev);
682 static void rbd_release(struct gendisk *disk, fmode_t mode)
684 struct rbd_device *rbd_dev = disk->private_data;
685 unsigned long open_count_before;
687 spin_lock_irq(&rbd_dev->lock);
688 open_count_before = rbd_dev->open_count--;
689 spin_unlock_irq(&rbd_dev->lock);
690 rbd_assert(open_count_before > 0);
692 put_device(&rbd_dev->dev);
695 static const struct block_device_operations rbd_bd_ops = {
696 .owner = THIS_MODULE,
698 .release = rbd_release,
702 * Initialize an rbd client instance. Success or not, this function
703 * consumes ceph_opts. Caller holds client_mutex.
705 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
707 struct rbd_client *rbdc;
710 dout("%s:\n", __func__);
711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
715 kref_init(&rbdc->kref);
716 INIT_LIST_HEAD(&rbdc->node);
718 rbdc->client = ceph_create_client(ceph_opts, rbdc);
719 if (IS_ERR(rbdc->client))
721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
723 ret = ceph_open_session(rbdc->client);
727 spin_lock(&rbd_client_list_lock);
728 list_add_tail(&rbdc->node, &rbd_client_list);
729 spin_unlock(&rbd_client_list_lock);
731 dout("%s: rbdc %p\n", __func__, rbdc);
735 ceph_destroy_client(rbdc->client);
740 ceph_destroy_options(ceph_opts);
741 dout("%s: error %d\n", __func__, ret);
746 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
748 kref_get(&rbdc->kref);
754 * Find a ceph client with specific addr and configuration. If
755 * found, bump its reference count.
757 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
759 struct rbd_client *rbdc = NULL, *iter;
761 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
764 spin_lock(&rbd_client_list_lock);
765 list_for_each_entry(iter, &rbd_client_list, node) {
766 if (!ceph_compare_options(ceph_opts, iter->client)) {
767 __rbd_get_client(iter);
773 spin_unlock(&rbd_client_list_lock);
779 * (Per device) rbd map options
787 Opt_compression_hint,
788 /* string args above */
797 Opt_compression_hint_none,
798 Opt_compression_hint_compressible,
799 Opt_compression_hint_incompressible,
802 static const struct constant_table rbd_param_compression_hint[] = {
803 {"none", Opt_compression_hint_none},
804 {"compressible", Opt_compression_hint_compressible},
805 {"incompressible", Opt_compression_hint_incompressible},
809 static const struct fs_parameter_spec rbd_parameters[] = {
810 fsparam_u32 ("alloc_size", Opt_alloc_size),
811 fsparam_enum ("compression_hint", Opt_compression_hint,
812 rbd_param_compression_hint),
813 fsparam_flag ("exclusive", Opt_exclusive),
814 fsparam_flag ("lock_on_read", Opt_lock_on_read),
815 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
816 fsparam_flag ("notrim", Opt_notrim),
817 fsparam_string ("_pool_ns", Opt_pool_ns),
818 fsparam_u32 ("queue_depth", Opt_queue_depth),
819 fsparam_flag ("read_only", Opt_read_only),
820 fsparam_flag ("read_write", Opt_read_write),
821 fsparam_flag ("ro", Opt_read_only),
822 fsparam_flag ("rw", Opt_read_write),
829 unsigned long lock_timeout;
835 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
838 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_DEFAULT_RQ
839 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
840 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
841 #define RBD_READ_ONLY_DEFAULT false
842 #define RBD_LOCK_ON_READ_DEFAULT false
843 #define RBD_EXCLUSIVE_DEFAULT false
844 #define RBD_TRIM_DEFAULT true
846 struct rbd_parse_opts_ctx {
847 struct rbd_spec *spec;
848 struct ceph_options *copts;
849 struct rbd_options *opts;
852 static char* obj_op_name(enum obj_operation_type op_type)
869 * Destroy ceph client
871 * Caller must hold rbd_client_list_lock.
873 static void rbd_client_release(struct kref *kref)
875 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
877 dout("%s: rbdc %p\n", __func__, rbdc);
878 spin_lock(&rbd_client_list_lock);
879 list_del(&rbdc->node);
880 spin_unlock(&rbd_client_list_lock);
882 ceph_destroy_client(rbdc->client);
887 * Drop reference to ceph client node. If it's not referenced anymore, release
890 static void rbd_put_client(struct rbd_client *rbdc)
893 kref_put(&rbdc->kref, rbd_client_release);
897 * Get a ceph client with specific addr and configuration, if one does
898 * not exist create it. Either way, ceph_opts is consumed by this
901 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
903 struct rbd_client *rbdc;
906 mutex_lock(&client_mutex);
907 rbdc = rbd_client_find(ceph_opts);
909 ceph_destroy_options(ceph_opts);
912 * Using an existing client. Make sure ->pg_pools is up to
913 * date before we look up the pool id in do_rbd_add().
915 ret = ceph_wait_for_latest_osdmap(rbdc->client,
916 rbdc->client->options->mount_timeout);
918 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
919 rbd_put_client(rbdc);
923 rbdc = rbd_client_create(ceph_opts);
925 mutex_unlock(&client_mutex);
930 static bool rbd_image_format_valid(u32 image_format)
932 return image_format == 1 || image_format == 2;
935 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
940 /* The header has to start with the magic rbd header text */
941 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
944 /* The bio layer requires at least sector-sized I/O */
946 if (ondisk->options.order < SECTOR_SHIFT)
949 /* If we use u64 in a few spots we may be able to loosen this */
951 if (ondisk->options.order > 8 * sizeof (int) - 1)
955 * The size of a snapshot header has to fit in a size_t, and
956 * that limits the number of snapshots.
958 snap_count = le32_to_cpu(ondisk->snap_count);
959 size = SIZE_MAX - sizeof (struct ceph_snap_context);
960 if (snap_count > size / sizeof (__le64))
964 * Not only that, but the size of the entire the snapshot
965 * header must also be representable in a size_t.
967 size -= snap_count * sizeof (__le64);
968 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
975 * returns the size of an object in the image
977 static u32 rbd_obj_bytes(struct rbd_image_header *header)
979 return 1U << header->obj_order;
982 static void rbd_init_layout(struct rbd_device *rbd_dev)
984 if (rbd_dev->header.stripe_unit == 0 ||
985 rbd_dev->header.stripe_count == 0) {
986 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
987 rbd_dev->header.stripe_count = 1;
990 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
991 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
992 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
993 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
994 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
995 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
999 * Fill an rbd image header with information from the given format 1
1002 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1003 struct rbd_image_header_ondisk *ondisk)
1005 struct rbd_image_header *header = &rbd_dev->header;
1006 bool first_time = header->object_prefix == NULL;
1007 struct ceph_snap_context *snapc;
1008 char *object_prefix = NULL;
1009 char *snap_names = NULL;
1010 u64 *snap_sizes = NULL;
1015 /* Allocate this now to avoid having to handle failure below */
1018 object_prefix = kstrndup(ondisk->object_prefix,
1019 sizeof(ondisk->object_prefix),
1025 /* Allocate the snapshot context and fill it in */
1027 snap_count = le32_to_cpu(ondisk->snap_count);
1028 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1031 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1033 struct rbd_image_snap_ondisk *snaps;
1034 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1036 /* We'll keep a copy of the snapshot names... */
1038 if (snap_names_len > (u64)SIZE_MAX)
1040 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1044 /* ...as well as the array of their sizes. */
1045 snap_sizes = kmalloc_array(snap_count,
1046 sizeof(*header->snap_sizes),
1052 * Copy the names, and fill in each snapshot's id
1055 * Note that rbd_dev_v1_header_info() guarantees the
1056 * ondisk buffer we're working with has
1057 * snap_names_len bytes beyond the end of the
1058 * snapshot id array, this memcpy() is safe.
1060 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1061 snaps = ondisk->snaps;
1062 for (i = 0; i < snap_count; i++) {
1063 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1064 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1068 /* We won't fail any more, fill in the header */
1071 header->object_prefix = object_prefix;
1072 header->obj_order = ondisk->options.order;
1073 rbd_init_layout(rbd_dev);
1075 ceph_put_snap_context(header->snapc);
1076 kfree(header->snap_names);
1077 kfree(header->snap_sizes);
1080 /* The remaining fields always get updated (when we refresh) */
1082 header->image_size = le64_to_cpu(ondisk->image_size);
1083 header->snapc = snapc;
1084 header->snap_names = snap_names;
1085 header->snap_sizes = snap_sizes;
1093 ceph_put_snap_context(snapc);
1094 kfree(object_prefix);
1099 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1101 const char *snap_name;
1103 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1105 /* Skip over names until we find the one we are looking for */
1107 snap_name = rbd_dev->header.snap_names;
1109 snap_name += strlen(snap_name) + 1;
1111 return kstrdup(snap_name, GFP_KERNEL);
1115 * Snapshot id comparison function for use with qsort()/bsearch().
1116 * Note that result is for snapshots in *descending* order.
1118 static int snapid_compare_reverse(const void *s1, const void *s2)
1120 u64 snap_id1 = *(u64 *)s1;
1121 u64 snap_id2 = *(u64 *)s2;
1123 if (snap_id1 < snap_id2)
1125 return snap_id1 == snap_id2 ? 0 : -1;
1129 * Search a snapshot context to see if the given snapshot id is
1132 * Returns the position of the snapshot id in the array if it's found,
1133 * or BAD_SNAP_INDEX otherwise.
1135 * Note: The snapshot array is in kept sorted (by the osd) in
1136 * reverse order, highest snapshot id first.
1138 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1140 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1143 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1144 sizeof (snap_id), snapid_compare_reverse);
1146 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1149 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1153 const char *snap_name;
1155 which = rbd_dev_snap_index(rbd_dev, snap_id);
1156 if (which == BAD_SNAP_INDEX)
1157 return ERR_PTR(-ENOENT);
1159 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1160 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1163 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1165 if (snap_id == CEPH_NOSNAP)
1166 return RBD_SNAP_HEAD_NAME;
1168 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1169 if (rbd_dev->image_format == 1)
1170 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1172 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1175 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1178 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1179 if (snap_id == CEPH_NOSNAP) {
1180 *snap_size = rbd_dev->header.image_size;
1181 } else if (rbd_dev->image_format == 1) {
1184 which = rbd_dev_snap_index(rbd_dev, snap_id);
1185 if (which == BAD_SNAP_INDEX)
1188 *snap_size = rbd_dev->header.snap_sizes[which];
1193 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1202 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1204 u64 snap_id = rbd_dev->spec->snap_id;
1208 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1212 rbd_dev->mapping.size = size;
1216 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1218 rbd_dev->mapping.size = 0;
1221 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1223 struct ceph_bio_iter it = *bio_pos;
1225 ceph_bio_iter_advance(&it, off);
1226 ceph_bio_iter_advance_step(&it, bytes, ({
1231 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1233 struct ceph_bvec_iter it = *bvec_pos;
1235 ceph_bvec_iter_advance(&it, off);
1236 ceph_bvec_iter_advance_step(&it, bytes, ({
1242 * Zero a range in @obj_req data buffer defined by a bio (list) or
1243 * (private) bio_vec array.
1245 * @off is relative to the start of the data buffer.
1247 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1250 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1252 switch (obj_req->img_request->data_type) {
1253 case OBJ_REQUEST_BIO:
1254 zero_bios(&obj_req->bio_pos, off, bytes);
1256 case OBJ_REQUEST_BVECS:
1257 case OBJ_REQUEST_OWN_BVECS:
1258 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1265 static void rbd_obj_request_destroy(struct kref *kref);
1266 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1268 rbd_assert(obj_request != NULL);
1269 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1270 kref_read(&obj_request->kref));
1271 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1274 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1275 struct rbd_obj_request *obj_request)
1277 rbd_assert(obj_request->img_request == NULL);
1279 /* Image request now owns object's original reference */
1280 obj_request->img_request = img_request;
1281 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1284 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1285 struct rbd_obj_request *obj_request)
1287 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1288 list_del(&obj_request->ex.oe_item);
1289 rbd_assert(obj_request->img_request == img_request);
1290 rbd_obj_request_put(obj_request);
1293 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1295 struct rbd_obj_request *obj_req = osd_req->r_priv;
1297 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1298 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1299 obj_req->ex.oe_off, obj_req->ex.oe_len);
1300 ceph_osdc_start_request(osd_req->r_osdc, osd_req);
1304 * The default/initial value for all image request flags is 0. Each
1305 * is conditionally set to 1 at image request initialization time
1306 * and currently never change thereafter.
1308 static void img_request_layered_set(struct rbd_img_request *img_request)
1310 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1313 static bool img_request_layered_test(struct rbd_img_request *img_request)
1315 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1318 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1320 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1322 return !obj_req->ex.oe_off &&
1323 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1326 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1330 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1331 rbd_dev->layout.object_size;
1335 * Must be called after rbd_obj_calc_img_extents().
1337 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1339 if (!obj_req->num_img_extents ||
1340 (rbd_obj_is_entire(obj_req) &&
1341 !obj_req->img_request->snapc->num_snaps))
1347 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1349 return ceph_file_extents_bytes(obj_req->img_extents,
1350 obj_req->num_img_extents);
1353 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1355 switch (img_req->op_type) {
1359 case OBJ_OP_DISCARD:
1360 case OBJ_OP_ZEROOUT:
1367 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1369 struct rbd_obj_request *obj_req = osd_req->r_priv;
1372 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1373 osd_req->r_result, obj_req);
1376 * Writes aren't allowed to return a data payload. In some
1377 * guarded write cases (e.g. stat + zero on an empty object)
1378 * a stat response makes it through, but we don't care.
1380 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1383 result = osd_req->r_result;
1385 rbd_obj_handle_request(obj_req, result);
1388 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1390 struct rbd_obj_request *obj_request = osd_req->r_priv;
1391 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1392 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1394 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1395 osd_req->r_snapid = obj_request->img_request->snap_id;
1398 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1400 struct rbd_obj_request *obj_request = osd_req->r_priv;
1402 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1403 ktime_get_real_ts64(&osd_req->r_mtime);
1404 osd_req->r_data_offset = obj_request->ex.oe_off;
1407 static struct ceph_osd_request *
1408 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1409 struct ceph_snap_context *snapc, int num_ops)
1411 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1412 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1413 struct ceph_osd_request *req;
1414 const char *name_format = rbd_dev->image_format == 1 ?
1415 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1418 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1420 return ERR_PTR(-ENOMEM);
1422 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1423 req->r_callback = rbd_osd_req_callback;
1424 req->r_priv = obj_req;
1427 * Data objects may be stored in a separate pool, but always in
1428 * the same namespace in that pool as the header in its pool.
1430 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1431 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1433 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1434 rbd_dev->header.object_prefix,
1435 obj_req->ex.oe_objno);
1437 return ERR_PTR(ret);
1442 static struct ceph_osd_request *
1443 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1445 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1449 static struct rbd_obj_request *rbd_obj_request_create(void)
1451 struct rbd_obj_request *obj_request;
1453 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1457 ceph_object_extent_init(&obj_request->ex);
1458 INIT_LIST_HEAD(&obj_request->osd_reqs);
1459 mutex_init(&obj_request->state_mutex);
1460 kref_init(&obj_request->kref);
1462 dout("%s %p\n", __func__, obj_request);
1466 static void rbd_obj_request_destroy(struct kref *kref)
1468 struct rbd_obj_request *obj_request;
1469 struct ceph_osd_request *osd_req;
1472 obj_request = container_of(kref, struct rbd_obj_request, kref);
1474 dout("%s: obj %p\n", __func__, obj_request);
1476 while (!list_empty(&obj_request->osd_reqs)) {
1477 osd_req = list_first_entry(&obj_request->osd_reqs,
1478 struct ceph_osd_request, r_private_item);
1479 list_del_init(&osd_req->r_private_item);
1480 ceph_osdc_put_request(osd_req);
1483 switch (obj_request->img_request->data_type) {
1484 case OBJ_REQUEST_NODATA:
1485 case OBJ_REQUEST_BIO:
1486 case OBJ_REQUEST_BVECS:
1487 break; /* Nothing to do */
1488 case OBJ_REQUEST_OWN_BVECS:
1489 kfree(obj_request->bvec_pos.bvecs);
1495 kfree(obj_request->img_extents);
1496 if (obj_request->copyup_bvecs) {
1497 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1498 if (obj_request->copyup_bvecs[i].bv_page)
1499 __free_page(obj_request->copyup_bvecs[i].bv_page);
1501 kfree(obj_request->copyup_bvecs);
1504 kmem_cache_free(rbd_obj_request_cache, obj_request);
1507 /* It's OK to call this for a device with no parent */
1509 static void rbd_spec_put(struct rbd_spec *spec);
1510 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1512 rbd_dev_remove_parent(rbd_dev);
1513 rbd_spec_put(rbd_dev->parent_spec);
1514 rbd_dev->parent_spec = NULL;
1515 rbd_dev->parent_overlap = 0;
1519 * Parent image reference counting is used to determine when an
1520 * image's parent fields can be safely torn down--after there are no
1521 * more in-flight requests to the parent image. When the last
1522 * reference is dropped, cleaning them up is safe.
1524 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1528 if (!rbd_dev->parent_spec)
1531 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1535 /* Last reference; clean up parent data structures */
1538 rbd_dev_unparent(rbd_dev);
1540 rbd_warn(rbd_dev, "parent reference underflow");
1544 * If an image has a non-zero parent overlap, get a reference to its
1547 * Returns true if the rbd device has a parent with a non-zero
1548 * overlap and a reference for it was successfully taken, or
1551 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1555 if (!rbd_dev->parent_spec)
1558 if (rbd_dev->parent_overlap)
1559 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1562 rbd_warn(rbd_dev, "parent reference overflow");
1567 static void rbd_img_request_init(struct rbd_img_request *img_request,
1568 struct rbd_device *rbd_dev,
1569 enum obj_operation_type op_type)
1571 memset(img_request, 0, sizeof(*img_request));
1573 img_request->rbd_dev = rbd_dev;
1574 img_request->op_type = op_type;
1576 INIT_LIST_HEAD(&img_request->lock_item);
1577 INIT_LIST_HEAD(&img_request->object_extents);
1578 mutex_init(&img_request->state_mutex);
1581 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1583 struct rbd_device *rbd_dev = img_req->rbd_dev;
1585 lockdep_assert_held(&rbd_dev->header_rwsem);
1587 if (rbd_img_is_write(img_req))
1588 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1590 img_req->snap_id = rbd_dev->spec->snap_id;
1592 if (rbd_dev_parent_get(rbd_dev))
1593 img_request_layered_set(img_req);
1596 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1598 struct rbd_obj_request *obj_request;
1599 struct rbd_obj_request *next_obj_request;
1601 dout("%s: img %p\n", __func__, img_request);
1603 WARN_ON(!list_empty(&img_request->lock_item));
1604 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1605 rbd_img_obj_request_del(img_request, obj_request);
1607 if (img_request_layered_test(img_request))
1608 rbd_dev_parent_put(img_request->rbd_dev);
1610 if (rbd_img_is_write(img_request))
1611 ceph_put_snap_context(img_request->snapc);
1613 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1614 kmem_cache_free(rbd_img_request_cache, img_request);
1617 #define BITS_PER_OBJ 2
1618 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1619 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1621 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1622 u64 *index, u8 *shift)
1626 rbd_assert(objno < rbd_dev->object_map_size);
1627 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1628 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1631 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1636 lockdep_assert_held(&rbd_dev->object_map_lock);
1637 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1638 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1641 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1647 lockdep_assert_held(&rbd_dev->object_map_lock);
1648 rbd_assert(!(val & ~OBJ_MASK));
1650 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1651 p = &rbd_dev->object_map[index];
1652 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1655 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1659 spin_lock(&rbd_dev->object_map_lock);
1660 state = __rbd_object_map_get(rbd_dev, objno);
1661 spin_unlock(&rbd_dev->object_map_lock);
1665 static bool use_object_map(struct rbd_device *rbd_dev)
1668 * An image mapped read-only can't use the object map -- it isn't
1669 * loaded because the header lock isn't acquired. Someone else can
1670 * write to the image and update the object map behind our back.
1672 * A snapshot can't be written to, so using the object map is always
1675 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1678 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1679 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1682 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1686 /* fall back to default logic if object map is disabled or invalid */
1687 if (!use_object_map(rbd_dev))
1690 state = rbd_object_map_get(rbd_dev, objno);
1691 return state != OBJECT_NONEXISTENT;
1694 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1695 struct ceph_object_id *oid)
1697 if (snap_id == CEPH_NOSNAP)
1698 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1699 rbd_dev->spec->image_id);
1701 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1702 rbd_dev->spec->image_id, snap_id);
1705 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1707 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1708 CEPH_DEFINE_OID_ONSTACK(oid);
1711 struct ceph_locker *lockers;
1713 bool broke_lock = false;
1716 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1719 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1720 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1721 if (ret != -EBUSY || broke_lock) {
1723 ret = 0; /* already locked by myself */
1725 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1729 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1730 RBD_LOCK_NAME, &lock_type, &lock_tag,
1731 &lockers, &num_lockers);
1736 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1741 if (num_lockers == 0)
1744 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1745 ENTITY_NAME(lockers[0].id.name));
1747 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1748 RBD_LOCK_NAME, lockers[0].id.cookie,
1749 &lockers[0].id.name);
1750 ceph_free_lockers(lockers, num_lockers);
1755 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1763 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1765 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1766 CEPH_DEFINE_OID_ONSTACK(oid);
1769 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1771 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1773 if (ret && ret != -ENOENT)
1774 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1777 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1785 ceph_decode_32_safe(p, end, header_len, e_inval);
1786 header_end = *p + header_len;
1788 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1793 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1802 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1804 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1805 CEPH_DEFINE_OID_ONSTACK(oid);
1806 struct page **pages;
1810 u64 object_map_bytes;
1811 u64 object_map_size;
1815 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1817 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1818 rbd_dev->mapping.size);
1819 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1821 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1822 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1824 return PTR_ERR(pages);
1826 reply_len = num_pages * PAGE_SIZE;
1827 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1828 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1829 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1830 NULL, 0, pages, &reply_len);
1834 p = page_address(pages[0]);
1835 end = p + min(reply_len, (size_t)PAGE_SIZE);
1836 ret = decode_object_map_header(&p, end, &object_map_size);
1840 if (object_map_size != num_objects) {
1841 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1842 object_map_size, num_objects);
1847 if (offset_in_page(p) + object_map_bytes > reply_len) {
1852 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1853 if (!rbd_dev->object_map) {
1858 rbd_dev->object_map_size = object_map_size;
1859 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1860 offset_in_page(p), object_map_bytes);
1863 ceph_release_page_vector(pages, num_pages);
1867 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1869 kvfree(rbd_dev->object_map);
1870 rbd_dev->object_map = NULL;
1871 rbd_dev->object_map_size = 0;
1874 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1878 ret = __rbd_object_map_load(rbd_dev);
1882 ret = rbd_dev_v2_get_flags(rbd_dev);
1884 rbd_object_map_free(rbd_dev);
1888 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1889 rbd_warn(rbd_dev, "object map is invalid");
1894 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1898 ret = rbd_object_map_lock(rbd_dev);
1902 ret = rbd_object_map_load(rbd_dev);
1904 rbd_object_map_unlock(rbd_dev);
1911 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1913 rbd_object_map_free(rbd_dev);
1914 rbd_object_map_unlock(rbd_dev);
1918 * This function needs snap_id (or more precisely just something to
1919 * distinguish between HEAD and snapshot object maps), new_state and
1920 * current_state that were passed to rbd_object_map_update().
1922 * To avoid allocating and stashing a context we piggyback on the OSD
1923 * request. A HEAD update has two ops (assert_locked). For new_state
1924 * and current_state we decode our own object_map_update op, encoded in
1925 * rbd_cls_object_map_update().
1927 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1928 struct ceph_osd_request *osd_req)
1930 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1931 struct ceph_osd_data *osd_data;
1933 u8 state, new_state, current_state;
1934 bool has_current_state;
1937 if (osd_req->r_result)
1938 return osd_req->r_result;
1941 * Nothing to do for a snapshot object map.
1943 if (osd_req->r_num_ops == 1)
1947 * Update in-memory HEAD object map.
1949 rbd_assert(osd_req->r_num_ops == 2);
1950 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1951 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1953 p = page_address(osd_data->pages[0]);
1954 objno = ceph_decode_64(&p);
1955 rbd_assert(objno == obj_req->ex.oe_objno);
1956 rbd_assert(ceph_decode_64(&p) == objno + 1);
1957 new_state = ceph_decode_8(&p);
1958 has_current_state = ceph_decode_8(&p);
1959 if (has_current_state)
1960 current_state = ceph_decode_8(&p);
1962 spin_lock(&rbd_dev->object_map_lock);
1963 state = __rbd_object_map_get(rbd_dev, objno);
1964 if (!has_current_state || current_state == state ||
1965 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1966 __rbd_object_map_set(rbd_dev, objno, new_state);
1967 spin_unlock(&rbd_dev->object_map_lock);
1972 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1974 struct rbd_obj_request *obj_req = osd_req->r_priv;
1977 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1978 osd_req->r_result, obj_req);
1980 result = rbd_object_map_update_finish(obj_req, osd_req);
1981 rbd_obj_handle_request(obj_req, result);
1984 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
1986 u8 state = rbd_object_map_get(rbd_dev, objno);
1988 if (state == new_state ||
1989 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
1990 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
1996 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
1997 int which, u64 objno, u8 new_state,
1998 const u8 *current_state)
2000 struct page **pages;
2004 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2008 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2010 return PTR_ERR(pages);
2012 p = start = page_address(pages[0]);
2013 ceph_encode_64(&p, objno);
2014 ceph_encode_64(&p, objno + 1);
2015 ceph_encode_8(&p, new_state);
2016 if (current_state) {
2017 ceph_encode_8(&p, 1);
2018 ceph_encode_8(&p, *current_state);
2020 ceph_encode_8(&p, 0);
2023 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2030 * 0 - object map update sent
2031 * 1 - object map update isn't needed
2034 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2035 u8 new_state, const u8 *current_state)
2037 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2038 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2039 struct ceph_osd_request *req;
2044 if (snap_id == CEPH_NOSNAP) {
2045 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2048 num_ops++; /* assert_locked */
2051 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2055 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2056 req->r_callback = rbd_object_map_callback;
2057 req->r_priv = obj_req;
2059 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2060 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2061 req->r_flags = CEPH_OSD_FLAG_WRITE;
2062 ktime_get_real_ts64(&req->r_mtime);
2064 if (snap_id == CEPH_NOSNAP) {
2066 * Protect against possible race conditions during lock
2067 * ownership transitions.
2069 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2070 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2075 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2076 new_state, current_state);
2080 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2084 ceph_osdc_start_request(osdc, req);
2088 static void prune_extents(struct ceph_file_extent *img_extents,
2089 u32 *num_img_extents, u64 overlap)
2091 u32 cnt = *num_img_extents;
2093 /* drop extents completely beyond the overlap */
2094 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2098 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2100 /* trim final overlapping extent */
2101 if (ex->fe_off + ex->fe_len > overlap)
2102 ex->fe_len = overlap - ex->fe_off;
2105 *num_img_extents = cnt;
2109 * Determine the byte range(s) covered by either just the object extent
2110 * or the entire object in the parent image.
2112 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2115 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2118 if (!rbd_dev->parent_overlap)
2121 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2122 entire ? 0 : obj_req->ex.oe_off,
2123 entire ? rbd_dev->layout.object_size :
2125 &obj_req->img_extents,
2126 &obj_req->num_img_extents);
2130 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2131 rbd_dev->parent_overlap);
2135 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2137 struct rbd_obj_request *obj_req = osd_req->r_priv;
2139 switch (obj_req->img_request->data_type) {
2140 case OBJ_REQUEST_BIO:
2141 osd_req_op_extent_osd_data_bio(osd_req, which,
2143 obj_req->ex.oe_len);
2145 case OBJ_REQUEST_BVECS:
2146 case OBJ_REQUEST_OWN_BVECS:
2147 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2148 obj_req->ex.oe_len);
2149 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2150 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2151 &obj_req->bvec_pos);
2158 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2160 struct page **pages;
2163 * The response data for a STAT call consists of:
2170 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2172 return PTR_ERR(pages);
2174 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2175 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2176 8 + sizeof(struct ceph_timespec),
2181 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2184 struct rbd_obj_request *obj_req = osd_req->r_priv;
2187 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2191 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2192 obj_req->copyup_bvec_count, bytes);
2196 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2198 obj_req->read_state = RBD_OBJ_READ_START;
2202 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2205 struct rbd_obj_request *obj_req = osd_req->r_priv;
2206 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2209 if (!use_object_map(rbd_dev) ||
2210 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2211 osd_req_op_alloc_hint_init(osd_req, which++,
2212 rbd_dev->layout.object_size,
2213 rbd_dev->layout.object_size,
2214 rbd_dev->opts->alloc_hint_flags);
2217 if (rbd_obj_is_entire(obj_req))
2218 opcode = CEPH_OSD_OP_WRITEFULL;
2220 opcode = CEPH_OSD_OP_WRITE;
2222 osd_req_op_extent_init(osd_req, which, opcode,
2223 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2224 rbd_osd_setup_data(osd_req, which);
2227 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2231 /* reverse map the entire object onto the parent */
2232 ret = rbd_obj_calc_img_extents(obj_req, true);
2236 if (rbd_obj_copyup_enabled(obj_req))
2237 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2239 obj_req->write_state = RBD_OBJ_WRITE_START;
2243 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2245 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2249 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2252 struct rbd_obj_request *obj_req = osd_req->r_priv;
2254 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2255 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2256 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2258 osd_req_op_extent_init(osd_req, which,
2259 truncate_or_zero_opcode(obj_req),
2260 obj_req->ex.oe_off, obj_req->ex.oe_len,
2265 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2267 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2272 * Align the range to alloc_size boundary and punt on discards
2273 * that are too small to free up any space.
2275 * alloc_size == object_size && is_tail() is a special case for
2276 * filestore with filestore_punch_hole = false, needed to allow
2277 * truncate (in addition to delete).
2279 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2280 !rbd_obj_is_tail(obj_req)) {
2281 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2282 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2283 rbd_dev->opts->alloc_size);
2284 if (off >= next_off)
2287 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2288 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2289 off, next_off - off);
2290 obj_req->ex.oe_off = off;
2291 obj_req->ex.oe_len = next_off - off;
2294 /* reverse map the entire object onto the parent */
2295 ret = rbd_obj_calc_img_extents(obj_req, true);
2299 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2300 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2301 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2303 obj_req->write_state = RBD_OBJ_WRITE_START;
2307 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2310 struct rbd_obj_request *obj_req = osd_req->r_priv;
2313 if (rbd_obj_is_entire(obj_req)) {
2314 if (obj_req->num_img_extents) {
2315 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2316 osd_req_op_init(osd_req, which++,
2317 CEPH_OSD_OP_CREATE, 0);
2318 opcode = CEPH_OSD_OP_TRUNCATE;
2320 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2321 osd_req_op_init(osd_req, which++,
2322 CEPH_OSD_OP_DELETE, 0);
2326 opcode = truncate_or_zero_opcode(obj_req);
2330 osd_req_op_extent_init(osd_req, which, opcode,
2331 obj_req->ex.oe_off, obj_req->ex.oe_len,
2335 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2339 /* reverse map the entire object onto the parent */
2340 ret = rbd_obj_calc_img_extents(obj_req, true);
2344 if (rbd_obj_copyup_enabled(obj_req))
2345 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2346 if (!obj_req->num_img_extents) {
2347 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2348 if (rbd_obj_is_entire(obj_req))
2349 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2352 obj_req->write_state = RBD_OBJ_WRITE_START;
2356 static int count_write_ops(struct rbd_obj_request *obj_req)
2358 struct rbd_img_request *img_req = obj_req->img_request;
2360 switch (img_req->op_type) {
2362 if (!use_object_map(img_req->rbd_dev) ||
2363 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2364 return 2; /* setallochint + write/writefull */
2366 return 1; /* write/writefull */
2367 case OBJ_OP_DISCARD:
2368 return 1; /* delete/truncate/zero */
2369 case OBJ_OP_ZEROOUT:
2370 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2371 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2372 return 2; /* create + truncate */
2374 return 1; /* delete/truncate/zero */
2380 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2383 struct rbd_obj_request *obj_req = osd_req->r_priv;
2385 switch (obj_req->img_request->op_type) {
2387 __rbd_osd_setup_write_ops(osd_req, which);
2389 case OBJ_OP_DISCARD:
2390 __rbd_osd_setup_discard_ops(osd_req, which);
2392 case OBJ_OP_ZEROOUT:
2393 __rbd_osd_setup_zeroout_ops(osd_req, which);
2401 * Prune the list of object requests (adjust offset and/or length, drop
2402 * redundant requests). Prepare object request state machines and image
2403 * request state machine for execution.
2405 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2407 struct rbd_obj_request *obj_req, *next_obj_req;
2410 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2411 switch (img_req->op_type) {
2413 ret = rbd_obj_init_read(obj_req);
2416 ret = rbd_obj_init_write(obj_req);
2418 case OBJ_OP_DISCARD:
2419 ret = rbd_obj_init_discard(obj_req);
2421 case OBJ_OP_ZEROOUT:
2422 ret = rbd_obj_init_zeroout(obj_req);
2430 rbd_img_obj_request_del(img_req, obj_req);
2435 img_req->state = RBD_IMG_START;
2439 union rbd_img_fill_iter {
2440 struct ceph_bio_iter bio_iter;
2441 struct ceph_bvec_iter bvec_iter;
2444 struct rbd_img_fill_ctx {
2445 enum obj_request_type pos_type;
2446 union rbd_img_fill_iter *pos;
2447 union rbd_img_fill_iter iter;
2448 ceph_object_extent_fn_t set_pos_fn;
2449 ceph_object_extent_fn_t count_fn;
2450 ceph_object_extent_fn_t copy_fn;
2453 static struct ceph_object_extent *alloc_object_extent(void *arg)
2455 struct rbd_img_request *img_req = arg;
2456 struct rbd_obj_request *obj_req;
2458 obj_req = rbd_obj_request_create();
2462 rbd_img_obj_request_add(img_req, obj_req);
2463 return &obj_req->ex;
2467 * While su != os && sc == 1 is technically not fancy (it's the same
2468 * layout as su == os && sc == 1), we can't use the nocopy path for it
2469 * because ->set_pos_fn() should be called only once per object.
2470 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2471 * treat su != os && sc == 1 as fancy.
2473 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2475 return l->stripe_unit != l->object_size;
2478 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2479 struct ceph_file_extent *img_extents,
2480 u32 num_img_extents,
2481 struct rbd_img_fill_ctx *fctx)
2486 img_req->data_type = fctx->pos_type;
2489 * Create object requests and set each object request's starting
2490 * position in the provided bio (list) or bio_vec array.
2492 fctx->iter = *fctx->pos;
2493 for (i = 0; i < num_img_extents; i++) {
2494 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2495 img_extents[i].fe_off,
2496 img_extents[i].fe_len,
2497 &img_req->object_extents,
2498 alloc_object_extent, img_req,
2499 fctx->set_pos_fn, &fctx->iter);
2504 return __rbd_img_fill_request(img_req);
2508 * Map a list of image extents to a list of object extents, create the
2509 * corresponding object requests (normally each to a different object,
2510 * but not always) and add them to @img_req. For each object request,
2511 * set up its data descriptor to point to the corresponding chunk(s) of
2512 * @fctx->pos data buffer.
2514 * Because ceph_file_to_extents() will merge adjacent object extents
2515 * together, each object request's data descriptor may point to multiple
2516 * different chunks of @fctx->pos data buffer.
2518 * @fctx->pos data buffer is assumed to be large enough.
2520 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2521 struct ceph_file_extent *img_extents,
2522 u32 num_img_extents,
2523 struct rbd_img_fill_ctx *fctx)
2525 struct rbd_device *rbd_dev = img_req->rbd_dev;
2526 struct rbd_obj_request *obj_req;
2530 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2531 !rbd_layout_is_fancy(&rbd_dev->layout))
2532 return rbd_img_fill_request_nocopy(img_req, img_extents,
2533 num_img_extents, fctx);
2535 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2538 * Create object requests and determine ->bvec_count for each object
2539 * request. Note that ->bvec_count sum over all object requests may
2540 * be greater than the number of bio_vecs in the provided bio (list)
2541 * or bio_vec array because when mapped, those bio_vecs can straddle
2542 * stripe unit boundaries.
2544 fctx->iter = *fctx->pos;
2545 for (i = 0; i < num_img_extents; i++) {
2546 ret = ceph_file_to_extents(&rbd_dev->layout,
2547 img_extents[i].fe_off,
2548 img_extents[i].fe_len,
2549 &img_req->object_extents,
2550 alloc_object_extent, img_req,
2551 fctx->count_fn, &fctx->iter);
2556 for_each_obj_request(img_req, obj_req) {
2557 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2558 sizeof(*obj_req->bvec_pos.bvecs),
2560 if (!obj_req->bvec_pos.bvecs)
2565 * Fill in each object request's private bio_vec array, splitting and
2566 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2568 fctx->iter = *fctx->pos;
2569 for (i = 0; i < num_img_extents; i++) {
2570 ret = ceph_iterate_extents(&rbd_dev->layout,
2571 img_extents[i].fe_off,
2572 img_extents[i].fe_len,
2573 &img_req->object_extents,
2574 fctx->copy_fn, &fctx->iter);
2579 return __rbd_img_fill_request(img_req);
2582 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2585 struct ceph_file_extent ex = { off, len };
2586 union rbd_img_fill_iter dummy = {};
2587 struct rbd_img_fill_ctx fctx = {
2588 .pos_type = OBJ_REQUEST_NODATA,
2592 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2595 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2597 struct rbd_obj_request *obj_req =
2598 container_of(ex, struct rbd_obj_request, ex);
2599 struct ceph_bio_iter *it = arg;
2601 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2602 obj_req->bio_pos = *it;
2603 ceph_bio_iter_advance(it, bytes);
2606 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2608 struct rbd_obj_request *obj_req =
2609 container_of(ex, struct rbd_obj_request, ex);
2610 struct ceph_bio_iter *it = arg;
2612 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2613 ceph_bio_iter_advance_step(it, bytes, ({
2614 obj_req->bvec_count++;
2619 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2621 struct rbd_obj_request *obj_req =
2622 container_of(ex, struct rbd_obj_request, ex);
2623 struct ceph_bio_iter *it = arg;
2625 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2626 ceph_bio_iter_advance_step(it, bytes, ({
2627 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2628 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2632 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2633 struct ceph_file_extent *img_extents,
2634 u32 num_img_extents,
2635 struct ceph_bio_iter *bio_pos)
2637 struct rbd_img_fill_ctx fctx = {
2638 .pos_type = OBJ_REQUEST_BIO,
2639 .pos = (union rbd_img_fill_iter *)bio_pos,
2640 .set_pos_fn = set_bio_pos,
2641 .count_fn = count_bio_bvecs,
2642 .copy_fn = copy_bio_bvecs,
2645 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2649 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2650 u64 off, u64 len, struct bio *bio)
2652 struct ceph_file_extent ex = { off, len };
2653 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2655 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2658 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2660 struct rbd_obj_request *obj_req =
2661 container_of(ex, struct rbd_obj_request, ex);
2662 struct ceph_bvec_iter *it = arg;
2664 obj_req->bvec_pos = *it;
2665 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2666 ceph_bvec_iter_advance(it, bytes);
2669 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2671 struct rbd_obj_request *obj_req =
2672 container_of(ex, struct rbd_obj_request, ex);
2673 struct ceph_bvec_iter *it = arg;
2675 ceph_bvec_iter_advance_step(it, bytes, ({
2676 obj_req->bvec_count++;
2680 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2682 struct rbd_obj_request *obj_req =
2683 container_of(ex, struct rbd_obj_request, ex);
2684 struct ceph_bvec_iter *it = arg;
2686 ceph_bvec_iter_advance_step(it, bytes, ({
2687 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2688 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2692 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2693 struct ceph_file_extent *img_extents,
2694 u32 num_img_extents,
2695 struct ceph_bvec_iter *bvec_pos)
2697 struct rbd_img_fill_ctx fctx = {
2698 .pos_type = OBJ_REQUEST_BVECS,
2699 .pos = (union rbd_img_fill_iter *)bvec_pos,
2700 .set_pos_fn = set_bvec_pos,
2701 .count_fn = count_bvecs,
2702 .copy_fn = copy_bvecs,
2705 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2709 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2710 struct ceph_file_extent *img_extents,
2711 u32 num_img_extents,
2712 struct bio_vec *bvecs)
2714 struct ceph_bvec_iter it = {
2716 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2720 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2724 static void rbd_img_handle_request_work(struct work_struct *work)
2726 struct rbd_img_request *img_req =
2727 container_of(work, struct rbd_img_request, work);
2729 rbd_img_handle_request(img_req, img_req->work_result);
2732 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2734 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2735 img_req->work_result = result;
2736 queue_work(rbd_wq, &img_req->work);
2739 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2741 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2743 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2744 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2748 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2749 obj_req->ex.oe_objno);
2753 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2755 struct ceph_osd_request *osd_req;
2758 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2759 if (IS_ERR(osd_req))
2760 return PTR_ERR(osd_req);
2762 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2763 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2764 rbd_osd_setup_data(osd_req, 0);
2765 rbd_osd_format_read(osd_req);
2767 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2771 rbd_osd_submit(osd_req);
2775 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2777 struct rbd_img_request *img_req = obj_req->img_request;
2778 struct rbd_device *parent = img_req->rbd_dev->parent;
2779 struct rbd_img_request *child_img_req;
2782 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2786 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2787 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2788 child_img_req->obj_request = obj_req;
2790 down_read(&parent->header_rwsem);
2791 rbd_img_capture_header(child_img_req);
2792 up_read(&parent->header_rwsem);
2794 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2797 if (!rbd_img_is_write(img_req)) {
2798 switch (img_req->data_type) {
2799 case OBJ_REQUEST_BIO:
2800 ret = __rbd_img_fill_from_bio(child_img_req,
2801 obj_req->img_extents,
2802 obj_req->num_img_extents,
2805 case OBJ_REQUEST_BVECS:
2806 case OBJ_REQUEST_OWN_BVECS:
2807 ret = __rbd_img_fill_from_bvecs(child_img_req,
2808 obj_req->img_extents,
2809 obj_req->num_img_extents,
2810 &obj_req->bvec_pos);
2816 ret = rbd_img_fill_from_bvecs(child_img_req,
2817 obj_req->img_extents,
2818 obj_req->num_img_extents,
2819 obj_req->copyup_bvecs);
2822 rbd_img_request_destroy(child_img_req);
2826 /* avoid parent chain recursion */
2827 rbd_img_schedule(child_img_req, 0);
2831 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2833 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2837 switch (obj_req->read_state) {
2838 case RBD_OBJ_READ_START:
2839 rbd_assert(!*result);
2841 if (!rbd_obj_may_exist(obj_req)) {
2843 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2847 ret = rbd_obj_read_object(obj_req);
2852 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2854 case RBD_OBJ_READ_OBJECT:
2855 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2856 /* reverse map this object extent onto the parent */
2857 ret = rbd_obj_calc_img_extents(obj_req, false);
2862 if (obj_req->num_img_extents) {
2863 ret = rbd_obj_read_from_parent(obj_req);
2868 obj_req->read_state = RBD_OBJ_READ_PARENT;
2874 * -ENOENT means a hole in the image -- zero-fill the entire
2875 * length of the request. A short read also implies zero-fill
2876 * to the end of the request.
2878 if (*result == -ENOENT) {
2879 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2881 } else if (*result >= 0) {
2882 if (*result < obj_req->ex.oe_len)
2883 rbd_obj_zero_range(obj_req, *result,
2884 obj_req->ex.oe_len - *result);
2886 rbd_assert(*result == obj_req->ex.oe_len);
2890 case RBD_OBJ_READ_PARENT:
2892 * The parent image is read only up to the overlap -- zero-fill
2893 * from the overlap to the end of the request.
2896 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2898 if (obj_overlap < obj_req->ex.oe_len)
2899 rbd_obj_zero_range(obj_req, obj_overlap,
2900 obj_req->ex.oe_len - obj_overlap);
2908 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2910 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2912 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2913 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2915 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2916 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2917 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2926 * 0 - object map update sent
2927 * 1 - object map update isn't needed
2930 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2932 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2935 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2938 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2939 new_state = OBJECT_PENDING;
2941 new_state = OBJECT_EXISTS;
2943 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2946 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2948 struct ceph_osd_request *osd_req;
2949 int num_ops = count_write_ops(obj_req);
2953 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2954 num_ops++; /* stat */
2956 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2957 if (IS_ERR(osd_req))
2958 return PTR_ERR(osd_req);
2960 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2961 ret = rbd_osd_setup_stat(osd_req, which++);
2966 rbd_osd_setup_write_ops(osd_req, which);
2967 rbd_osd_format_write(osd_req);
2969 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2973 rbd_osd_submit(osd_req);
2978 * copyup_bvecs pages are never highmem pages
2980 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2982 struct ceph_bvec_iter it = {
2984 .iter = { .bi_size = bytes },
2987 ceph_bvec_iter_advance_step(&it, bytes, ({
2988 if (memchr_inv(bvec_virt(&bv), 0, bv.bv_len))
2994 #define MODS_ONLY U32_MAX
2996 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
2999 struct ceph_osd_request *osd_req;
3002 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3003 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3005 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3006 if (IS_ERR(osd_req))
3007 return PTR_ERR(osd_req);
3009 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3013 rbd_osd_format_write(osd_req);
3015 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3019 rbd_osd_submit(osd_req);
3023 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3026 struct ceph_osd_request *osd_req;
3027 int num_ops = count_write_ops(obj_req);
3031 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3033 if (bytes != MODS_ONLY)
3034 num_ops++; /* copyup */
3036 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3037 if (IS_ERR(osd_req))
3038 return PTR_ERR(osd_req);
3040 if (bytes != MODS_ONLY) {
3041 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3046 rbd_osd_setup_write_ops(osd_req, which);
3047 rbd_osd_format_write(osd_req);
3049 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3053 rbd_osd_submit(osd_req);
3057 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3061 rbd_assert(!obj_req->copyup_bvecs);
3062 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3063 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3064 sizeof(*obj_req->copyup_bvecs),
3066 if (!obj_req->copyup_bvecs)
3069 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3070 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3071 struct page *page = alloc_page(GFP_NOIO);
3076 bvec_set_page(&obj_req->copyup_bvecs[i], page, len, 0);
3080 rbd_assert(!obj_overlap);
3085 * The target object doesn't exist. Read the data for the entire
3086 * target object up to the overlap point (if any) from the parent,
3087 * so we can use it for a copyup.
3089 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3091 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3094 rbd_assert(obj_req->num_img_extents);
3095 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3096 rbd_dev->parent_overlap);
3097 if (!obj_req->num_img_extents) {
3099 * The overlap has become 0 (most likely because the
3100 * image has been flattened). Re-submit the original write
3101 * request -- pass MODS_ONLY since the copyup isn't needed
3104 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3107 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3111 return rbd_obj_read_from_parent(obj_req);
3114 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3116 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3117 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3122 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3124 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3127 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3130 for (i = 0; i < snapc->num_snaps; i++) {
3131 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3132 i + 1 < snapc->num_snaps)
3133 new_state = OBJECT_EXISTS_CLEAN;
3135 new_state = OBJECT_EXISTS;
3137 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3140 obj_req->pending.result = ret;
3145 obj_req->pending.num_pending++;
3149 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3151 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3154 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3157 * Only send non-zero copyup data to save some I/O and network
3158 * bandwidth -- zero copyup data is equivalent to the object not
3161 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3164 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3166 * Send a copyup request with an empty snapshot context to
3167 * deep-copyup the object through all existing snapshots.
3168 * A second request with the current snapshot context will be
3169 * sent for the actual modification.
3171 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3173 obj_req->pending.result = ret;
3177 obj_req->pending.num_pending++;
3181 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3183 obj_req->pending.result = ret;
3187 obj_req->pending.num_pending++;
3190 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3192 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3196 switch (obj_req->copyup_state) {
3197 case RBD_OBJ_COPYUP_START:
3198 rbd_assert(!*result);
3200 ret = rbd_obj_copyup_read_parent(obj_req);
3205 if (obj_req->num_img_extents)
3206 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3208 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3210 case RBD_OBJ_COPYUP_READ_PARENT:
3214 if (is_zero_bvecs(obj_req->copyup_bvecs,
3215 rbd_obj_img_extents_bytes(obj_req))) {
3216 dout("%s %p detected zeros\n", __func__, obj_req);
3217 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3220 rbd_obj_copyup_object_maps(obj_req);
3221 if (!obj_req->pending.num_pending) {
3222 *result = obj_req->pending.result;
3223 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3226 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3228 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3229 if (!pending_result_dec(&obj_req->pending, result))
3232 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3234 rbd_warn(rbd_dev, "snap object map update failed: %d",
3239 rbd_obj_copyup_write_object(obj_req);
3240 if (!obj_req->pending.num_pending) {
3241 *result = obj_req->pending.result;
3242 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3245 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3247 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3248 if (!pending_result_dec(&obj_req->pending, result))
3251 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3260 * 0 - object map update sent
3261 * 1 - object map update isn't needed
3264 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3266 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3267 u8 current_state = OBJECT_PENDING;
3269 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3272 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3275 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3279 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3281 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3285 switch (obj_req->write_state) {
3286 case RBD_OBJ_WRITE_START:
3287 rbd_assert(!*result);
3289 if (rbd_obj_write_is_noop(obj_req))
3292 ret = rbd_obj_write_pre_object_map(obj_req);
3297 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3301 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3303 rbd_warn(rbd_dev, "pre object map update failed: %d",
3307 ret = rbd_obj_write_object(obj_req);
3312 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3314 case RBD_OBJ_WRITE_OBJECT:
3315 if (*result == -ENOENT) {
3316 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3318 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3319 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3323 * On a non-existent object:
3324 * delete - -ENOENT, truncate/zero - 0
3326 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3332 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3334 case __RBD_OBJ_WRITE_COPYUP:
3335 if (!rbd_obj_advance_copyup(obj_req, result))
3338 case RBD_OBJ_WRITE_COPYUP:
3340 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3343 ret = rbd_obj_write_post_object_map(obj_req);
3348 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3352 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3354 rbd_warn(rbd_dev, "post object map update failed: %d",
3363 * Return true if @obj_req is completed.
3365 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3368 struct rbd_img_request *img_req = obj_req->img_request;
3369 struct rbd_device *rbd_dev = img_req->rbd_dev;
3372 mutex_lock(&obj_req->state_mutex);
3373 if (!rbd_img_is_write(img_req))
3374 done = rbd_obj_advance_read(obj_req, result);
3376 done = rbd_obj_advance_write(obj_req, result);
3377 mutex_unlock(&obj_req->state_mutex);
3379 if (done && *result) {
3380 rbd_assert(*result < 0);
3381 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3382 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3383 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3389 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3392 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3394 if (__rbd_obj_handle_request(obj_req, &result))
3395 rbd_img_handle_request(obj_req->img_request, result);
3398 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3400 struct rbd_device *rbd_dev = img_req->rbd_dev;
3402 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3405 if (rbd_is_ro(rbd_dev))
3408 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3409 if (rbd_dev->opts->lock_on_read ||
3410 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3413 return rbd_img_is_write(img_req);
3416 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3418 struct rbd_device *rbd_dev = img_req->rbd_dev;
3421 lockdep_assert_held(&rbd_dev->lock_rwsem);
3422 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3423 spin_lock(&rbd_dev->lock_lists_lock);
3424 rbd_assert(list_empty(&img_req->lock_item));
3426 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3428 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3429 spin_unlock(&rbd_dev->lock_lists_lock);
3433 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3435 struct rbd_device *rbd_dev = img_req->rbd_dev;
3438 lockdep_assert_held(&rbd_dev->lock_rwsem);
3439 spin_lock(&rbd_dev->lock_lists_lock);
3440 rbd_assert(!list_empty(&img_req->lock_item));
3441 list_del_init(&img_req->lock_item);
3442 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3443 list_empty(&rbd_dev->running_list));
3444 spin_unlock(&rbd_dev->lock_lists_lock);
3446 complete(&rbd_dev->releasing_wait);
3449 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3451 struct rbd_device *rbd_dev = img_req->rbd_dev;
3453 if (!need_exclusive_lock(img_req))
3456 if (rbd_lock_add_request(img_req))
3459 if (rbd_dev->opts->exclusive) {
3460 WARN_ON(1); /* lock got released? */
3465 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3466 * and cancel_delayed_work() in wake_lock_waiters().
3468 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3469 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3473 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3475 struct rbd_obj_request *obj_req;
3477 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3479 for_each_obj_request(img_req, obj_req) {
3482 if (__rbd_obj_handle_request(obj_req, &result)) {
3484 img_req->pending.result = result;
3488 img_req->pending.num_pending++;
3493 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3495 struct rbd_device *rbd_dev = img_req->rbd_dev;
3499 switch (img_req->state) {
3501 rbd_assert(!*result);
3503 ret = rbd_img_exclusive_lock(img_req);
3508 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3512 case RBD_IMG_EXCLUSIVE_LOCK:
3516 rbd_assert(!need_exclusive_lock(img_req) ||
3517 __rbd_is_lock_owner(rbd_dev));
3519 rbd_img_object_requests(img_req);
3520 if (!img_req->pending.num_pending) {
3521 *result = img_req->pending.result;
3522 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3525 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3527 case __RBD_IMG_OBJECT_REQUESTS:
3528 if (!pending_result_dec(&img_req->pending, result))
3531 case RBD_IMG_OBJECT_REQUESTS:
3539 * Return true if @img_req is completed.
3541 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3544 struct rbd_device *rbd_dev = img_req->rbd_dev;
3547 if (need_exclusive_lock(img_req)) {
3548 down_read(&rbd_dev->lock_rwsem);
3549 mutex_lock(&img_req->state_mutex);
3550 done = rbd_img_advance(img_req, result);
3552 rbd_lock_del_request(img_req);
3553 mutex_unlock(&img_req->state_mutex);
3554 up_read(&rbd_dev->lock_rwsem);
3556 mutex_lock(&img_req->state_mutex);
3557 done = rbd_img_advance(img_req, result);
3558 mutex_unlock(&img_req->state_mutex);
3561 if (done && *result) {
3562 rbd_assert(*result < 0);
3563 rbd_warn(rbd_dev, "%s%s result %d",
3564 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3565 obj_op_name(img_req->op_type), *result);
3570 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3573 if (!__rbd_img_handle_request(img_req, &result))
3576 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3577 struct rbd_obj_request *obj_req = img_req->obj_request;
3579 rbd_img_request_destroy(img_req);
3580 if (__rbd_obj_handle_request(obj_req, &result)) {
3581 img_req = obj_req->img_request;
3585 struct request *rq = blk_mq_rq_from_pdu(img_req);
3587 rbd_img_request_destroy(img_req);
3588 blk_mq_end_request(rq, errno_to_blk_status(result));
3592 static const struct rbd_client_id rbd_empty_cid;
3594 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3595 const struct rbd_client_id *rhs)
3597 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3600 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3602 struct rbd_client_id cid;
3604 mutex_lock(&rbd_dev->watch_mutex);
3605 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3606 cid.handle = rbd_dev->watch_cookie;
3607 mutex_unlock(&rbd_dev->watch_mutex);
3612 * lock_rwsem must be held for write
3614 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3615 const struct rbd_client_id *cid)
3617 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3618 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3619 cid->gid, cid->handle);
3620 rbd_dev->owner_cid = *cid; /* struct */
3623 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3625 mutex_lock(&rbd_dev->watch_mutex);
3626 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3627 mutex_unlock(&rbd_dev->watch_mutex);
3630 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3632 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3634 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3635 strcpy(rbd_dev->lock_cookie, cookie);
3636 rbd_set_owner_cid(rbd_dev, &cid);
3637 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3641 * lock_rwsem must be held for write
3643 static int rbd_lock(struct rbd_device *rbd_dev)
3645 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3649 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3650 rbd_dev->lock_cookie[0] != '\0');
3652 format_lock_cookie(rbd_dev, cookie);
3653 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3654 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3655 RBD_LOCK_TAG, "", 0);
3659 __rbd_lock(rbd_dev, cookie);
3664 * lock_rwsem must be held for write
3666 static void rbd_unlock(struct rbd_device *rbd_dev)
3668 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3671 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3672 rbd_dev->lock_cookie[0] == '\0');
3674 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3675 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3676 if (ret && ret != -ENOENT)
3677 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3679 /* treat errors as the image is unlocked */
3680 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3681 rbd_dev->lock_cookie[0] = '\0';
3682 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3683 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3686 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3687 enum rbd_notify_op notify_op,
3688 struct page ***preply_pages,
3691 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3692 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3693 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3694 int buf_size = sizeof(buf);
3697 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3699 /* encode *LockPayload NotifyMessage (op + ClientId) */
3700 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3701 ceph_encode_32(&p, notify_op);
3702 ceph_encode_64(&p, cid.gid);
3703 ceph_encode_64(&p, cid.handle);
3705 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3706 &rbd_dev->header_oloc, buf, buf_size,
3707 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3710 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3711 enum rbd_notify_op notify_op)
3713 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3716 static void rbd_notify_acquired_lock(struct work_struct *work)
3718 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3719 acquired_lock_work);
3721 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3724 static void rbd_notify_released_lock(struct work_struct *work)
3726 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3727 released_lock_work);
3729 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3732 static int rbd_request_lock(struct rbd_device *rbd_dev)
3734 struct page **reply_pages;
3736 bool lock_owner_responded = false;
3739 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3741 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3742 &reply_pages, &reply_len);
3743 if (ret && ret != -ETIMEDOUT) {
3744 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3748 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3749 void *p = page_address(reply_pages[0]);
3750 void *const end = p + reply_len;
3753 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3758 ceph_decode_need(&p, end, 8 + 8, e_inval);
3759 p += 8 + 8; /* skip gid and cookie */
3761 ceph_decode_32_safe(&p, end, len, e_inval);
3765 if (lock_owner_responded) {
3767 "duplicate lock owners detected");
3772 lock_owner_responded = true;
3773 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3777 "failed to decode ResponseMessage: %d",
3782 ret = ceph_decode_32(&p);
3786 if (!lock_owner_responded) {
3787 rbd_warn(rbd_dev, "no lock owners detected");
3792 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3801 * Either image request state machine(s) or rbd_add_acquire_lock()
3804 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3806 struct rbd_img_request *img_req;
3808 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3809 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3811 cancel_delayed_work(&rbd_dev->lock_dwork);
3812 if (!completion_done(&rbd_dev->acquire_wait)) {
3813 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3814 list_empty(&rbd_dev->running_list));
3815 rbd_dev->acquire_err = result;
3816 complete_all(&rbd_dev->acquire_wait);
3820 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3821 mutex_lock(&img_req->state_mutex);
3822 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3823 rbd_img_schedule(img_req, result);
3824 mutex_unlock(&img_req->state_mutex);
3827 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3830 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3831 struct ceph_locker **lockers, u32 *num_lockers)
3833 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3838 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3840 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3841 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3842 &lock_type, &lock_tag, lockers, num_lockers);
3846 if (*num_lockers == 0) {
3847 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3851 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3852 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3858 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3859 rbd_warn(rbd_dev, "shared lock type detected");
3864 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3865 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3866 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3867 (*lockers)[0].id.cookie);
3877 static int find_watcher(struct rbd_device *rbd_dev,
3878 const struct ceph_locker *locker)
3880 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3881 struct ceph_watch_item *watchers;
3887 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3888 &rbd_dev->header_oloc, &watchers,
3893 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3894 for (i = 0; i < num_watchers; i++) {
3896 * Ignore addr->type while comparing. This mimics
3897 * entity_addr_t::get_legacy_str() + strcmp().
3899 if (ceph_addr_equal_no_type(&watchers[i].addr,
3900 &locker->info.addr) &&
3901 watchers[i].cookie == cookie) {
3902 struct rbd_client_id cid = {
3903 .gid = le64_to_cpu(watchers[i].name.num),
3907 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3908 rbd_dev, cid.gid, cid.handle);
3909 rbd_set_owner_cid(rbd_dev, &cid);
3915 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3923 * lock_rwsem must be held for write
3925 static int rbd_try_lock(struct rbd_device *rbd_dev)
3927 struct ceph_client *client = rbd_dev->rbd_client->client;
3928 struct ceph_locker *lockers;
3933 ret = rbd_lock(rbd_dev);
3937 /* determine if the current lock holder is still alive */
3938 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3942 if (num_lockers == 0)
3945 ret = find_watcher(rbd_dev, lockers);
3947 goto out; /* request lock or error */
3949 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
3950 ENTITY_NAME(lockers[0].id.name));
3952 ret = ceph_monc_blocklist_add(&client->monc,
3953 &lockers[0].info.addr);
3955 rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
3956 ENTITY_NAME(lockers[0].id.name), ret);
3960 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3961 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3962 lockers[0].id.cookie,
3963 &lockers[0].id.name);
3964 if (ret && ret != -ENOENT)
3968 ceph_free_lockers(lockers, num_lockers);
3972 ceph_free_lockers(lockers, num_lockers);
3976 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
3980 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
3981 ret = rbd_object_map_open(rbd_dev);
3992 * 1 - caller should call rbd_request_lock()
3995 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
3999 down_read(&rbd_dev->lock_rwsem);
4000 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4001 rbd_dev->lock_state);
4002 if (__rbd_is_lock_owner(rbd_dev)) {
4003 up_read(&rbd_dev->lock_rwsem);
4007 up_read(&rbd_dev->lock_rwsem);
4008 down_write(&rbd_dev->lock_rwsem);
4009 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4010 rbd_dev->lock_state);
4011 if (__rbd_is_lock_owner(rbd_dev)) {
4012 up_write(&rbd_dev->lock_rwsem);
4016 ret = rbd_try_lock(rbd_dev);
4018 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4019 if (ret == -EBLOCKLISTED)
4022 ret = 1; /* request lock anyway */
4025 up_write(&rbd_dev->lock_rwsem);
4029 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4030 rbd_assert(list_empty(&rbd_dev->running_list));
4032 ret = rbd_post_acquire_action(rbd_dev);
4034 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4036 * Can't stay in RBD_LOCK_STATE_LOCKED because
4037 * rbd_lock_add_request() would let the request through,
4038 * assuming that e.g. object map is locked and loaded.
4040 rbd_unlock(rbd_dev);
4044 wake_lock_waiters(rbd_dev, ret);
4045 up_write(&rbd_dev->lock_rwsem);
4049 static void rbd_acquire_lock(struct work_struct *work)
4051 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4052 struct rbd_device, lock_dwork);
4055 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4057 ret = rbd_try_acquire_lock(rbd_dev);
4059 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4063 ret = rbd_request_lock(rbd_dev);
4064 if (ret == -ETIMEDOUT) {
4065 goto again; /* treat this as a dead client */
4066 } else if (ret == -EROFS) {
4067 rbd_warn(rbd_dev, "peer will not release lock");
4068 down_write(&rbd_dev->lock_rwsem);
4069 wake_lock_waiters(rbd_dev, ret);
4070 up_write(&rbd_dev->lock_rwsem);
4071 } else if (ret < 0) {
4072 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4073 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4077 * lock owner acked, but resend if we don't see them
4080 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4082 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4083 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4087 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4089 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4090 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4092 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4096 * Ensure that all in-flight IO is flushed.
4098 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4099 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4100 if (list_empty(&rbd_dev->running_list))
4103 up_write(&rbd_dev->lock_rwsem);
4104 wait_for_completion(&rbd_dev->releasing_wait);
4106 down_write(&rbd_dev->lock_rwsem);
4107 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4110 rbd_assert(list_empty(&rbd_dev->running_list));
4114 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4116 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4117 rbd_object_map_close(rbd_dev);
4120 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4122 rbd_assert(list_empty(&rbd_dev->running_list));
4124 rbd_pre_release_action(rbd_dev);
4125 rbd_unlock(rbd_dev);
4129 * lock_rwsem must be held for write
4131 static void rbd_release_lock(struct rbd_device *rbd_dev)
4133 if (!rbd_quiesce_lock(rbd_dev))
4136 __rbd_release_lock(rbd_dev);
4139 * Give others a chance to grab the lock - we would re-acquire
4140 * almost immediately if we got new IO while draining the running
4141 * list otherwise. We need to ack our own notifications, so this
4142 * lock_dwork will be requeued from rbd_handle_released_lock() by
4143 * way of maybe_kick_acquire().
4145 cancel_delayed_work(&rbd_dev->lock_dwork);
4148 static void rbd_release_lock_work(struct work_struct *work)
4150 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4153 down_write(&rbd_dev->lock_rwsem);
4154 rbd_release_lock(rbd_dev);
4155 up_write(&rbd_dev->lock_rwsem);
4158 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4162 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4163 if (__rbd_is_lock_owner(rbd_dev))
4166 spin_lock(&rbd_dev->lock_lists_lock);
4167 have_requests = !list_empty(&rbd_dev->acquiring_list);
4168 spin_unlock(&rbd_dev->lock_lists_lock);
4169 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4170 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4171 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4175 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4178 struct rbd_client_id cid = { 0 };
4180 if (struct_v >= 2) {
4181 cid.gid = ceph_decode_64(p);
4182 cid.handle = ceph_decode_64(p);
4185 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4187 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4188 down_write(&rbd_dev->lock_rwsem);
4189 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4190 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4191 __func__, rbd_dev, cid.gid, cid.handle);
4193 rbd_set_owner_cid(rbd_dev, &cid);
4195 downgrade_write(&rbd_dev->lock_rwsem);
4197 down_read(&rbd_dev->lock_rwsem);
4200 maybe_kick_acquire(rbd_dev);
4201 up_read(&rbd_dev->lock_rwsem);
4204 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4207 struct rbd_client_id cid = { 0 };
4209 if (struct_v >= 2) {
4210 cid.gid = ceph_decode_64(p);
4211 cid.handle = ceph_decode_64(p);
4214 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4216 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4217 down_write(&rbd_dev->lock_rwsem);
4218 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4219 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4220 __func__, rbd_dev, cid.gid, cid.handle,
4221 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4223 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4225 downgrade_write(&rbd_dev->lock_rwsem);
4227 down_read(&rbd_dev->lock_rwsem);
4230 maybe_kick_acquire(rbd_dev);
4231 up_read(&rbd_dev->lock_rwsem);
4235 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4236 * ResponseMessage is needed.
4238 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4241 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4242 struct rbd_client_id cid = { 0 };
4245 if (struct_v >= 2) {
4246 cid.gid = ceph_decode_64(p);
4247 cid.handle = ceph_decode_64(p);
4250 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4252 if (rbd_cid_equal(&cid, &my_cid))
4255 down_read(&rbd_dev->lock_rwsem);
4256 if (__rbd_is_lock_owner(rbd_dev)) {
4257 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4258 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4262 * encode ResponseMessage(0) so the peer can detect
4267 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4268 if (!rbd_dev->opts->exclusive) {
4269 dout("%s rbd_dev %p queueing unlock_work\n",
4271 queue_work(rbd_dev->task_wq,
4272 &rbd_dev->unlock_work);
4274 /* refuse to release the lock */
4281 up_read(&rbd_dev->lock_rwsem);
4285 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4286 u64 notify_id, u64 cookie, s32 *result)
4288 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4289 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4290 int buf_size = sizeof(buf);
4296 /* encode ResponseMessage */
4297 ceph_start_encoding(&p, 1, 1,
4298 buf_size - CEPH_ENCODING_START_BLK_LEN);
4299 ceph_encode_32(&p, *result);
4304 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4305 &rbd_dev->header_oloc, notify_id, cookie,
4308 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4311 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4314 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4315 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4318 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4319 u64 notify_id, u64 cookie, s32 result)
4321 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4322 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4325 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4326 u64 notifier_id, void *data, size_t data_len)
4328 struct rbd_device *rbd_dev = arg;
4330 void *const end = p + data_len;
4336 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4337 __func__, rbd_dev, cookie, notify_id, data_len);
4339 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4342 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4347 notify_op = ceph_decode_32(&p);
4349 /* legacy notification for header updates */
4350 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4354 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4355 switch (notify_op) {
4356 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4357 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4358 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4360 case RBD_NOTIFY_OP_RELEASED_LOCK:
4361 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4362 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4364 case RBD_NOTIFY_OP_REQUEST_LOCK:
4365 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4367 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4370 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4372 case RBD_NOTIFY_OP_HEADER_UPDATE:
4373 ret = rbd_dev_refresh(rbd_dev);
4375 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4377 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4380 if (rbd_is_lock_owner(rbd_dev))
4381 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4382 cookie, -EOPNOTSUPP);
4384 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4389 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4391 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4393 struct rbd_device *rbd_dev = arg;
4395 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4397 down_write(&rbd_dev->lock_rwsem);
4398 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4399 up_write(&rbd_dev->lock_rwsem);
4401 mutex_lock(&rbd_dev->watch_mutex);
4402 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4403 __rbd_unregister_watch(rbd_dev);
4404 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4406 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4408 mutex_unlock(&rbd_dev->watch_mutex);
4412 * watch_mutex must be locked
4414 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4416 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4417 struct ceph_osd_linger_request *handle;
4419 rbd_assert(!rbd_dev->watch_handle);
4420 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4422 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4423 &rbd_dev->header_oloc, rbd_watch_cb,
4424 rbd_watch_errcb, rbd_dev);
4426 return PTR_ERR(handle);
4428 rbd_dev->watch_handle = handle;
4433 * watch_mutex must be locked
4435 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4437 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4440 rbd_assert(rbd_dev->watch_handle);
4441 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4443 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4445 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4447 rbd_dev->watch_handle = NULL;
4450 static int rbd_register_watch(struct rbd_device *rbd_dev)
4454 mutex_lock(&rbd_dev->watch_mutex);
4455 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4456 ret = __rbd_register_watch(rbd_dev);
4460 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4461 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4464 mutex_unlock(&rbd_dev->watch_mutex);
4468 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4470 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4472 cancel_work_sync(&rbd_dev->acquired_lock_work);
4473 cancel_work_sync(&rbd_dev->released_lock_work);
4474 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4475 cancel_work_sync(&rbd_dev->unlock_work);
4479 * header_rwsem must not be held to avoid a deadlock with
4480 * rbd_dev_refresh() when flushing notifies.
4482 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4484 cancel_tasks_sync(rbd_dev);
4486 mutex_lock(&rbd_dev->watch_mutex);
4487 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4488 __rbd_unregister_watch(rbd_dev);
4489 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4490 mutex_unlock(&rbd_dev->watch_mutex);
4492 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4493 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4497 * lock_rwsem must be held for write
4499 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4501 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4505 if (!rbd_quiesce_lock(rbd_dev))
4508 format_lock_cookie(rbd_dev, cookie);
4509 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4510 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4511 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4512 RBD_LOCK_TAG, cookie);
4514 if (ret != -EOPNOTSUPP)
4515 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4519 * Lock cookie cannot be updated on older OSDs, so do
4520 * a manual release and queue an acquire.
4522 __rbd_release_lock(rbd_dev);
4523 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4525 __rbd_lock(rbd_dev, cookie);
4526 wake_lock_waiters(rbd_dev, 0);
4530 static void rbd_reregister_watch(struct work_struct *work)
4532 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4533 struct rbd_device, watch_dwork);
4536 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4538 mutex_lock(&rbd_dev->watch_mutex);
4539 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4540 mutex_unlock(&rbd_dev->watch_mutex);
4544 ret = __rbd_register_watch(rbd_dev);
4546 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4547 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4548 queue_delayed_work(rbd_dev->task_wq,
4549 &rbd_dev->watch_dwork,
4551 mutex_unlock(&rbd_dev->watch_mutex);
4555 mutex_unlock(&rbd_dev->watch_mutex);
4556 down_write(&rbd_dev->lock_rwsem);
4557 wake_lock_waiters(rbd_dev, ret);
4558 up_write(&rbd_dev->lock_rwsem);
4562 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4563 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4564 mutex_unlock(&rbd_dev->watch_mutex);
4566 down_write(&rbd_dev->lock_rwsem);
4567 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4568 rbd_reacquire_lock(rbd_dev);
4569 up_write(&rbd_dev->lock_rwsem);
4571 ret = rbd_dev_refresh(rbd_dev);
4573 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4577 * Synchronous osd object method call. Returns the number of bytes
4578 * returned in the outbound buffer, or a negative error code.
4580 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4581 struct ceph_object_id *oid,
4582 struct ceph_object_locator *oloc,
4583 const char *method_name,
4584 const void *outbound,
4585 size_t outbound_size,
4587 size_t inbound_size)
4589 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4590 struct page *req_page = NULL;
4591 struct page *reply_page;
4595 * Method calls are ultimately read operations. The result
4596 * should placed into the inbound buffer provided. They
4597 * also supply outbound data--parameters for the object
4598 * method. Currently if this is present it will be a
4602 if (outbound_size > PAGE_SIZE)
4605 req_page = alloc_page(GFP_KERNEL);
4609 memcpy(page_address(req_page), outbound, outbound_size);
4612 reply_page = alloc_page(GFP_KERNEL);
4615 __free_page(req_page);
4619 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4620 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4621 &reply_page, &inbound_size);
4623 memcpy(inbound, page_address(reply_page), inbound_size);
4628 __free_page(req_page);
4629 __free_page(reply_page);
4633 static void rbd_queue_workfn(struct work_struct *work)
4635 struct rbd_img_request *img_request =
4636 container_of(work, struct rbd_img_request, work);
4637 struct rbd_device *rbd_dev = img_request->rbd_dev;
4638 enum obj_operation_type op_type = img_request->op_type;
4639 struct request *rq = blk_mq_rq_from_pdu(img_request);
4640 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4641 u64 length = blk_rq_bytes(rq);
4645 /* Ignore/skip any zero-length requests */
4647 dout("%s: zero-length request\n", __func__);
4649 goto err_img_request;
4652 blk_mq_start_request(rq);
4654 down_read(&rbd_dev->header_rwsem);
4655 mapping_size = rbd_dev->mapping.size;
4656 rbd_img_capture_header(img_request);
4657 up_read(&rbd_dev->header_rwsem);
4659 if (offset + length > mapping_size) {
4660 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4661 length, mapping_size);
4663 goto err_img_request;
4666 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4667 img_request, obj_op_name(op_type), offset, length);
4669 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4670 result = rbd_img_fill_nodata(img_request, offset, length);
4672 result = rbd_img_fill_from_bio(img_request, offset, length,
4675 goto err_img_request;
4677 rbd_img_handle_request(img_request, 0);
4681 rbd_img_request_destroy(img_request);
4683 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4684 obj_op_name(op_type), length, offset, result);
4685 blk_mq_end_request(rq, errno_to_blk_status(result));
4688 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4689 const struct blk_mq_queue_data *bd)
4691 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4692 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4693 enum obj_operation_type op_type;
4695 switch (req_op(bd->rq)) {
4696 case REQ_OP_DISCARD:
4697 op_type = OBJ_OP_DISCARD;
4699 case REQ_OP_WRITE_ZEROES:
4700 op_type = OBJ_OP_ZEROOUT;
4703 op_type = OBJ_OP_WRITE;
4706 op_type = OBJ_OP_READ;
4709 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4710 return BLK_STS_IOERR;
4713 rbd_img_request_init(img_req, rbd_dev, op_type);
4715 if (rbd_img_is_write(img_req)) {
4716 if (rbd_is_ro(rbd_dev)) {
4717 rbd_warn(rbd_dev, "%s on read-only mapping",
4718 obj_op_name(img_req->op_type));
4719 return BLK_STS_IOERR;
4721 rbd_assert(!rbd_is_snap(rbd_dev));
4724 INIT_WORK(&img_req->work, rbd_queue_workfn);
4725 queue_work(rbd_wq, &img_req->work);
4729 static void rbd_free_disk(struct rbd_device *rbd_dev)
4731 put_disk(rbd_dev->disk);
4732 blk_mq_free_tag_set(&rbd_dev->tag_set);
4733 rbd_dev->disk = NULL;
4736 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4737 struct ceph_object_id *oid,
4738 struct ceph_object_locator *oloc,
4739 void *buf, int buf_len)
4742 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4743 struct ceph_osd_request *req;
4744 struct page **pages;
4745 int num_pages = calc_pages_for(0, buf_len);
4748 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4752 ceph_oid_copy(&req->r_base_oid, oid);
4753 ceph_oloc_copy(&req->r_base_oloc, oloc);
4754 req->r_flags = CEPH_OSD_FLAG_READ;
4756 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4757 if (IS_ERR(pages)) {
4758 ret = PTR_ERR(pages);
4762 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4763 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4766 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4770 ceph_osdc_start_request(osdc, req);
4771 ret = ceph_osdc_wait_request(osdc, req);
4773 ceph_copy_from_page_vector(pages, buf, 0, ret);
4776 ceph_osdc_put_request(req);
4781 * Read the complete header for the given rbd device. On successful
4782 * return, the rbd_dev->header field will contain up-to-date
4783 * information about the image.
4785 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4787 struct rbd_image_header_ondisk *ondisk = NULL;
4794 * The complete header will include an array of its 64-bit
4795 * snapshot ids, followed by the names of those snapshots as
4796 * a contiguous block of NUL-terminated strings. Note that
4797 * the number of snapshots could change by the time we read
4798 * it in, in which case we re-read it.
4805 size = sizeof (*ondisk);
4806 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4808 ondisk = kmalloc(size, GFP_KERNEL);
4812 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4813 &rbd_dev->header_oloc, ondisk, size);
4816 if ((size_t)ret < size) {
4818 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4822 if (!rbd_dev_ondisk_valid(ondisk)) {
4824 rbd_warn(rbd_dev, "invalid header");
4828 names_size = le64_to_cpu(ondisk->snap_names_len);
4829 want_count = snap_count;
4830 snap_count = le32_to_cpu(ondisk->snap_count);
4831 } while (snap_count != want_count);
4833 ret = rbd_header_from_disk(rbd_dev, ondisk);
4840 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4845 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4846 * try to update its size. If REMOVING is set, updating size
4847 * is just useless work since the device can't be opened.
4849 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4850 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4851 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4852 dout("setting size to %llu sectors", (unsigned long long)size);
4853 set_capacity_and_notify(rbd_dev->disk, size);
4857 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4862 down_write(&rbd_dev->header_rwsem);
4863 mapping_size = rbd_dev->mapping.size;
4865 ret = rbd_dev_header_info(rbd_dev);
4870 * If there is a parent, see if it has disappeared due to the
4871 * mapped image getting flattened.
4873 if (rbd_dev->parent) {
4874 ret = rbd_dev_v2_parent_info(rbd_dev);
4879 rbd_assert(!rbd_is_snap(rbd_dev));
4880 rbd_dev->mapping.size = rbd_dev->header.image_size;
4883 up_write(&rbd_dev->header_rwsem);
4884 if (!ret && mapping_size != rbd_dev->mapping.size)
4885 rbd_dev_update_size(rbd_dev);
4890 static const struct blk_mq_ops rbd_mq_ops = {
4891 .queue_rq = rbd_queue_rq,
4894 static int rbd_init_disk(struct rbd_device *rbd_dev)
4896 struct gendisk *disk;
4897 struct request_queue *q;
4898 unsigned int objset_bytes =
4899 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4902 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4903 rbd_dev->tag_set.ops = &rbd_mq_ops;
4904 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4905 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4906 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4907 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4908 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4910 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4914 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4916 err = PTR_ERR(disk);
4921 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4923 disk->major = rbd_dev->major;
4924 disk->first_minor = rbd_dev->minor;
4926 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
4928 disk->minors = RBD_MINORS_PER_MAJOR;
4929 disk->fops = &rbd_bd_ops;
4930 disk->private_data = rbd_dev;
4932 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4933 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4935 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4936 q->limits.max_sectors = queue_max_hw_sectors(q);
4937 blk_queue_max_segments(q, USHRT_MAX);
4938 blk_queue_max_segment_size(q, UINT_MAX);
4939 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4940 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
4942 if (rbd_dev->opts->trim) {
4943 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
4944 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4945 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4948 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4949 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
4951 rbd_dev->disk = disk;
4955 blk_mq_free_tag_set(&rbd_dev->tag_set);
4963 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4965 return container_of(dev, struct rbd_device, dev);
4968 static ssize_t rbd_size_show(struct device *dev,
4969 struct device_attribute *attr, char *buf)
4971 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4973 return sprintf(buf, "%llu\n",
4974 (unsigned long long)rbd_dev->mapping.size);
4977 static ssize_t rbd_features_show(struct device *dev,
4978 struct device_attribute *attr, char *buf)
4980 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4982 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
4985 static ssize_t rbd_major_show(struct device *dev,
4986 struct device_attribute *attr, char *buf)
4988 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4991 return sprintf(buf, "%d\n", rbd_dev->major);
4993 return sprintf(buf, "(none)\n");
4996 static ssize_t rbd_minor_show(struct device *dev,
4997 struct device_attribute *attr, char *buf)
4999 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5001 return sprintf(buf, "%d\n", rbd_dev->minor);
5004 static ssize_t rbd_client_addr_show(struct device *dev,
5005 struct device_attribute *attr, char *buf)
5007 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5008 struct ceph_entity_addr *client_addr =
5009 ceph_client_addr(rbd_dev->rbd_client->client);
5011 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5012 le32_to_cpu(client_addr->nonce));
5015 static ssize_t rbd_client_id_show(struct device *dev,
5016 struct device_attribute *attr, char *buf)
5018 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5020 return sprintf(buf, "client%lld\n",
5021 ceph_client_gid(rbd_dev->rbd_client->client));
5024 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5025 struct device_attribute *attr, char *buf)
5027 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5029 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5032 static ssize_t rbd_config_info_show(struct device *dev,
5033 struct device_attribute *attr, char *buf)
5035 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5037 if (!capable(CAP_SYS_ADMIN))
5040 return sprintf(buf, "%s\n", rbd_dev->config_info);
5043 static ssize_t rbd_pool_show(struct device *dev,
5044 struct device_attribute *attr, char *buf)
5046 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5048 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5051 static ssize_t rbd_pool_id_show(struct device *dev,
5052 struct device_attribute *attr, char *buf)
5054 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5056 return sprintf(buf, "%llu\n",
5057 (unsigned long long) rbd_dev->spec->pool_id);
5060 static ssize_t rbd_pool_ns_show(struct device *dev,
5061 struct device_attribute *attr, char *buf)
5063 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5065 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5068 static ssize_t rbd_name_show(struct device *dev,
5069 struct device_attribute *attr, char *buf)
5071 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5073 if (rbd_dev->spec->image_name)
5074 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5076 return sprintf(buf, "(unknown)\n");
5079 static ssize_t rbd_image_id_show(struct device *dev,
5080 struct device_attribute *attr, char *buf)
5082 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5084 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5088 * Shows the name of the currently-mapped snapshot (or
5089 * RBD_SNAP_HEAD_NAME for the base image).
5091 static ssize_t rbd_snap_show(struct device *dev,
5092 struct device_attribute *attr,
5095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5097 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5100 static ssize_t rbd_snap_id_show(struct device *dev,
5101 struct device_attribute *attr, char *buf)
5103 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5105 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5109 * For a v2 image, shows the chain of parent images, separated by empty
5110 * lines. For v1 images or if there is no parent, shows "(no parent
5113 static ssize_t rbd_parent_show(struct device *dev,
5114 struct device_attribute *attr,
5117 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5120 if (!rbd_dev->parent)
5121 return sprintf(buf, "(no parent image)\n");
5123 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5124 struct rbd_spec *spec = rbd_dev->parent_spec;
5126 count += sprintf(&buf[count], "%s"
5127 "pool_id %llu\npool_name %s\n"
5129 "image_id %s\nimage_name %s\n"
5130 "snap_id %llu\nsnap_name %s\n"
5132 !count ? "" : "\n", /* first? */
5133 spec->pool_id, spec->pool_name,
5134 spec->pool_ns ?: "",
5135 spec->image_id, spec->image_name ?: "(unknown)",
5136 spec->snap_id, spec->snap_name,
5137 rbd_dev->parent_overlap);
5143 static ssize_t rbd_image_refresh(struct device *dev,
5144 struct device_attribute *attr,
5148 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5151 if (!capable(CAP_SYS_ADMIN))
5154 ret = rbd_dev_refresh(rbd_dev);
5161 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5162 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5163 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5164 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5165 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5166 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5167 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5168 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5169 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5170 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5171 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5172 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5173 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5174 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5175 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5176 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5177 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5179 static struct attribute *rbd_attrs[] = {
5180 &dev_attr_size.attr,
5181 &dev_attr_features.attr,
5182 &dev_attr_major.attr,
5183 &dev_attr_minor.attr,
5184 &dev_attr_client_addr.attr,
5185 &dev_attr_client_id.attr,
5186 &dev_attr_cluster_fsid.attr,
5187 &dev_attr_config_info.attr,
5188 &dev_attr_pool.attr,
5189 &dev_attr_pool_id.attr,
5190 &dev_attr_pool_ns.attr,
5191 &dev_attr_name.attr,
5192 &dev_attr_image_id.attr,
5193 &dev_attr_current_snap.attr,
5194 &dev_attr_snap_id.attr,
5195 &dev_attr_parent.attr,
5196 &dev_attr_refresh.attr,
5200 static struct attribute_group rbd_attr_group = {
5204 static const struct attribute_group *rbd_attr_groups[] = {
5209 static void rbd_dev_release(struct device *dev);
5211 static const struct device_type rbd_device_type = {
5213 .groups = rbd_attr_groups,
5214 .release = rbd_dev_release,
5217 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5219 kref_get(&spec->kref);
5224 static void rbd_spec_free(struct kref *kref);
5225 static void rbd_spec_put(struct rbd_spec *spec)
5228 kref_put(&spec->kref, rbd_spec_free);
5231 static struct rbd_spec *rbd_spec_alloc(void)
5233 struct rbd_spec *spec;
5235 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5239 spec->pool_id = CEPH_NOPOOL;
5240 spec->snap_id = CEPH_NOSNAP;
5241 kref_init(&spec->kref);
5246 static void rbd_spec_free(struct kref *kref)
5248 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5250 kfree(spec->pool_name);
5251 kfree(spec->pool_ns);
5252 kfree(spec->image_id);
5253 kfree(spec->image_name);
5254 kfree(spec->snap_name);
5258 static void rbd_dev_free(struct rbd_device *rbd_dev)
5260 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5261 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5263 ceph_oid_destroy(&rbd_dev->header_oid);
5264 ceph_oloc_destroy(&rbd_dev->header_oloc);
5265 kfree(rbd_dev->config_info);
5267 rbd_put_client(rbd_dev->rbd_client);
5268 rbd_spec_put(rbd_dev->spec);
5269 kfree(rbd_dev->opts);
5273 static void rbd_dev_release(struct device *dev)
5275 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5276 bool need_put = !!rbd_dev->opts;
5279 destroy_workqueue(rbd_dev->task_wq);
5280 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5283 rbd_dev_free(rbd_dev);
5286 * This is racy, but way better than putting module outside of
5287 * the release callback. The race window is pretty small, so
5288 * doing something similar to dm (dm-builtin.c) is overkill.
5291 module_put(THIS_MODULE);
5294 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5296 struct rbd_device *rbd_dev;
5298 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5302 spin_lock_init(&rbd_dev->lock);
5303 INIT_LIST_HEAD(&rbd_dev->node);
5304 init_rwsem(&rbd_dev->header_rwsem);
5306 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5307 ceph_oid_init(&rbd_dev->header_oid);
5308 rbd_dev->header_oloc.pool = spec->pool_id;
5309 if (spec->pool_ns) {
5310 WARN_ON(!*spec->pool_ns);
5311 rbd_dev->header_oloc.pool_ns =
5312 ceph_find_or_create_string(spec->pool_ns,
5313 strlen(spec->pool_ns));
5316 mutex_init(&rbd_dev->watch_mutex);
5317 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5318 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5320 init_rwsem(&rbd_dev->lock_rwsem);
5321 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5322 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5323 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5324 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5325 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5326 spin_lock_init(&rbd_dev->lock_lists_lock);
5327 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5328 INIT_LIST_HEAD(&rbd_dev->running_list);
5329 init_completion(&rbd_dev->acquire_wait);
5330 init_completion(&rbd_dev->releasing_wait);
5332 spin_lock_init(&rbd_dev->object_map_lock);
5334 rbd_dev->dev.bus = &rbd_bus_type;
5335 rbd_dev->dev.type = &rbd_device_type;
5336 rbd_dev->dev.parent = &rbd_root_dev;
5337 device_initialize(&rbd_dev->dev);
5343 * Create a mapping rbd_dev.
5345 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5346 struct rbd_spec *spec,
5347 struct rbd_options *opts)
5349 struct rbd_device *rbd_dev;
5351 rbd_dev = __rbd_dev_create(spec);
5355 /* get an id and fill in device name */
5356 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5357 minor_to_rbd_dev_id(1 << MINORBITS),
5359 if (rbd_dev->dev_id < 0)
5362 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5363 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5365 if (!rbd_dev->task_wq)
5368 /* we have a ref from do_rbd_add() */
5369 __module_get(THIS_MODULE);
5371 rbd_dev->rbd_client = rbdc;
5372 rbd_dev->spec = spec;
5373 rbd_dev->opts = opts;
5375 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5379 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5381 rbd_dev_free(rbd_dev);
5385 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5388 put_device(&rbd_dev->dev);
5392 * Get the size and object order for an image snapshot, or if
5393 * snap_id is CEPH_NOSNAP, gets this information for the base
5396 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5397 u8 *order, u64 *snap_size)
5399 __le64 snapid = cpu_to_le64(snap_id);
5404 } __attribute__ ((packed)) size_buf = { 0 };
5406 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5407 &rbd_dev->header_oloc, "get_size",
5408 &snapid, sizeof(snapid),
5409 &size_buf, sizeof(size_buf));
5410 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5413 if (ret < sizeof (size_buf))
5417 *order = size_buf.order;
5418 dout(" order %u", (unsigned int)*order);
5420 *snap_size = le64_to_cpu(size_buf.size);
5422 dout(" snap_id 0x%016llx snap_size = %llu\n",
5423 (unsigned long long)snap_id,
5424 (unsigned long long)*snap_size);
5429 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5431 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5432 &rbd_dev->header.obj_order,
5433 &rbd_dev->header.image_size);
5436 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5443 /* Response will be an encoded string, which includes a length */
5444 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5445 reply_buf = kzalloc(size, GFP_KERNEL);
5449 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5450 &rbd_dev->header_oloc, "get_object_prefix",
5451 NULL, 0, reply_buf, size);
5452 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5457 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5458 p + ret, NULL, GFP_NOIO);
5461 if (IS_ERR(rbd_dev->header.object_prefix)) {
5462 ret = PTR_ERR(rbd_dev->header.object_prefix);
5463 rbd_dev->header.object_prefix = NULL;
5465 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5473 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5474 bool read_only, u64 *snap_features)
5483 } __attribute__ ((packed)) features_buf = { 0 };
5487 features_in.snap_id = cpu_to_le64(snap_id);
5488 features_in.read_only = read_only;
5490 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5491 &rbd_dev->header_oloc, "get_features",
5492 &features_in, sizeof(features_in),
5493 &features_buf, sizeof(features_buf));
5494 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5497 if (ret < sizeof (features_buf))
5500 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5502 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5507 *snap_features = le64_to_cpu(features_buf.features);
5509 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5510 (unsigned long long)snap_id,
5511 (unsigned long long)*snap_features,
5512 (unsigned long long)le64_to_cpu(features_buf.incompat));
5517 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5519 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5521 &rbd_dev->header.features);
5525 * These are generic image flags, but since they are used only for
5526 * object map, store them in rbd_dev->object_map_flags.
5528 * For the same reason, this function is called only on object map
5529 * (re)load and not on header refresh.
5531 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5533 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5537 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5538 &rbd_dev->header_oloc, "get_flags",
5539 &snapid, sizeof(snapid),
5540 &flags, sizeof(flags));
5543 if (ret < sizeof(flags))
5546 rbd_dev->object_map_flags = le64_to_cpu(flags);
5550 struct parent_image_info {
5552 const char *pool_ns;
5553 const char *image_id;
5561 * The caller is responsible for @pii.
5563 static int decode_parent_image_spec(void **p, void *end,
5564 struct parent_image_info *pii)
5570 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5571 &struct_v, &struct_len);
5575 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5576 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5577 if (IS_ERR(pii->pool_ns)) {
5578 ret = PTR_ERR(pii->pool_ns);
5579 pii->pool_ns = NULL;
5582 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5583 if (IS_ERR(pii->image_id)) {
5584 ret = PTR_ERR(pii->image_id);
5585 pii->image_id = NULL;
5588 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5595 static int __get_parent_info(struct rbd_device *rbd_dev,
5596 struct page *req_page,
5597 struct page *reply_page,
5598 struct parent_image_info *pii)
5600 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5601 size_t reply_len = PAGE_SIZE;
5605 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5606 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5607 req_page, sizeof(u64), &reply_page, &reply_len);
5609 return ret == -EOPNOTSUPP ? 1 : ret;
5611 p = page_address(reply_page);
5612 end = p + reply_len;
5613 ret = decode_parent_image_spec(&p, end, pii);
5617 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5618 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5619 req_page, sizeof(u64), &reply_page, &reply_len);
5623 p = page_address(reply_page);
5624 end = p + reply_len;
5625 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5626 if (pii->has_overlap)
5627 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5636 * The caller is responsible for @pii.
5638 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5639 struct page *req_page,
5640 struct page *reply_page,
5641 struct parent_image_info *pii)
5643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5644 size_t reply_len = PAGE_SIZE;
5648 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5649 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5650 req_page, sizeof(u64), &reply_page, &reply_len);
5654 p = page_address(reply_page);
5655 end = p + reply_len;
5656 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5657 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5658 if (IS_ERR(pii->image_id)) {
5659 ret = PTR_ERR(pii->image_id);
5660 pii->image_id = NULL;
5663 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5664 pii->has_overlap = true;
5665 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5673 static int get_parent_info(struct rbd_device *rbd_dev,
5674 struct parent_image_info *pii)
5676 struct page *req_page, *reply_page;
5680 req_page = alloc_page(GFP_KERNEL);
5684 reply_page = alloc_page(GFP_KERNEL);
5686 __free_page(req_page);
5690 p = page_address(req_page);
5691 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5692 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5694 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5697 __free_page(req_page);
5698 __free_page(reply_page);
5702 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5704 struct rbd_spec *parent_spec;
5705 struct parent_image_info pii = { 0 };
5708 parent_spec = rbd_spec_alloc();
5712 ret = get_parent_info(rbd_dev, &pii);
5716 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5717 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5718 pii.has_overlap, pii.overlap);
5720 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5722 * Either the parent never existed, or we have
5723 * record of it but the image got flattened so it no
5724 * longer has a parent. When the parent of a
5725 * layered image disappears we immediately set the
5726 * overlap to 0. The effect of this is that all new
5727 * requests will be treated as if the image had no
5730 * If !pii.has_overlap, the parent image spec is not
5731 * applicable. It's there to avoid duplication in each
5734 if (rbd_dev->parent_overlap) {
5735 rbd_dev->parent_overlap = 0;
5736 rbd_dev_parent_put(rbd_dev);
5737 pr_info("%s: clone image has been flattened\n",
5738 rbd_dev->disk->disk_name);
5741 goto out; /* No parent? No problem. */
5744 /* The ceph file layout needs to fit pool id in 32 bits */
5747 if (pii.pool_id > (u64)U32_MAX) {
5748 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5749 (unsigned long long)pii.pool_id, U32_MAX);
5754 * The parent won't change (except when the clone is
5755 * flattened, already handled that). So we only need to
5756 * record the parent spec we have not already done so.
5758 if (!rbd_dev->parent_spec) {
5759 parent_spec->pool_id = pii.pool_id;
5760 if (pii.pool_ns && *pii.pool_ns) {
5761 parent_spec->pool_ns = pii.pool_ns;
5764 parent_spec->image_id = pii.image_id;
5765 pii.image_id = NULL;
5766 parent_spec->snap_id = pii.snap_id;
5768 rbd_dev->parent_spec = parent_spec;
5769 parent_spec = NULL; /* rbd_dev now owns this */
5773 * We always update the parent overlap. If it's zero we issue
5774 * a warning, as we will proceed as if there was no parent.
5778 /* refresh, careful to warn just once */
5779 if (rbd_dev->parent_overlap)
5781 "clone now standalone (overlap became 0)");
5784 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5787 rbd_dev->parent_overlap = pii.overlap;
5793 kfree(pii.image_id);
5794 rbd_spec_put(parent_spec);
5798 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5802 __le64 stripe_count;
5803 } __attribute__ ((packed)) striping_info_buf = { 0 };
5804 size_t size = sizeof (striping_info_buf);
5808 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5809 &rbd_dev->header_oloc, "get_stripe_unit_count",
5810 NULL, 0, &striping_info_buf, size);
5811 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5817 p = &striping_info_buf;
5818 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5819 rbd_dev->header.stripe_count = ceph_decode_64(&p);
5823 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5825 __le64 data_pool_id;
5828 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5829 &rbd_dev->header_oloc, "get_data_pool",
5830 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5833 if (ret < sizeof(data_pool_id))
5836 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5837 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5841 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5843 CEPH_DEFINE_OID_ONSTACK(oid);
5844 size_t image_id_size;
5849 void *reply_buf = NULL;
5851 char *image_name = NULL;
5854 rbd_assert(!rbd_dev->spec->image_name);
5856 len = strlen(rbd_dev->spec->image_id);
5857 image_id_size = sizeof (__le32) + len;
5858 image_id = kmalloc(image_id_size, GFP_KERNEL);
5863 end = image_id + image_id_size;
5864 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5866 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5867 reply_buf = kmalloc(size, GFP_KERNEL);
5871 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5872 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5873 "dir_get_name", image_id, image_id_size,
5878 end = reply_buf + ret;
5880 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5881 if (IS_ERR(image_name))
5884 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5892 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5894 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5895 const char *snap_name;
5898 /* Skip over names until we find the one we are looking for */
5900 snap_name = rbd_dev->header.snap_names;
5901 while (which < snapc->num_snaps) {
5902 if (!strcmp(name, snap_name))
5903 return snapc->snaps[which];
5904 snap_name += strlen(snap_name) + 1;
5910 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5912 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5917 for (which = 0; !found && which < snapc->num_snaps; which++) {
5918 const char *snap_name;
5920 snap_id = snapc->snaps[which];
5921 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5922 if (IS_ERR(snap_name)) {
5923 /* ignore no-longer existing snapshots */
5924 if (PTR_ERR(snap_name) == -ENOENT)
5929 found = !strcmp(name, snap_name);
5932 return found ? snap_id : CEPH_NOSNAP;
5936 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5937 * no snapshot by that name is found, or if an error occurs.
5939 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5941 if (rbd_dev->image_format == 1)
5942 return rbd_v1_snap_id_by_name(rbd_dev, name);
5944 return rbd_v2_snap_id_by_name(rbd_dev, name);
5948 * An image being mapped will have everything but the snap id.
5950 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5952 struct rbd_spec *spec = rbd_dev->spec;
5954 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5955 rbd_assert(spec->image_id && spec->image_name);
5956 rbd_assert(spec->snap_name);
5958 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5961 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5962 if (snap_id == CEPH_NOSNAP)
5965 spec->snap_id = snap_id;
5967 spec->snap_id = CEPH_NOSNAP;
5974 * A parent image will have all ids but none of the names.
5976 * All names in an rbd spec are dynamically allocated. It's OK if we
5977 * can't figure out the name for an image id.
5979 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5981 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5982 struct rbd_spec *spec = rbd_dev->spec;
5983 const char *pool_name;
5984 const char *image_name;
5985 const char *snap_name;
5988 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5989 rbd_assert(spec->image_id);
5990 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5992 /* Get the pool name; we have to make our own copy of this */
5994 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5996 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5999 pool_name = kstrdup(pool_name, GFP_KERNEL);
6003 /* Fetch the image name; tolerate failure here */
6005 image_name = rbd_dev_image_name(rbd_dev);
6007 rbd_warn(rbd_dev, "unable to get image name");
6009 /* Fetch the snapshot name */
6011 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6012 if (IS_ERR(snap_name)) {
6013 ret = PTR_ERR(snap_name);
6017 spec->pool_name = pool_name;
6018 spec->image_name = image_name;
6019 spec->snap_name = snap_name;
6029 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6038 struct ceph_snap_context *snapc;
6042 * We'll need room for the seq value (maximum snapshot id),
6043 * snapshot count, and array of that many snapshot ids.
6044 * For now we have a fixed upper limit on the number we're
6045 * prepared to receive.
6047 size = sizeof (__le64) + sizeof (__le32) +
6048 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6049 reply_buf = kzalloc(size, GFP_KERNEL);
6053 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6054 &rbd_dev->header_oloc, "get_snapcontext",
6055 NULL, 0, reply_buf, size);
6056 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6061 end = reply_buf + ret;
6063 ceph_decode_64_safe(&p, end, seq, out);
6064 ceph_decode_32_safe(&p, end, snap_count, out);
6067 * Make sure the reported number of snapshot ids wouldn't go
6068 * beyond the end of our buffer. But before checking that,
6069 * make sure the computed size of the snapshot context we
6070 * allocate is representable in a size_t.
6072 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6077 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6081 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6087 for (i = 0; i < snap_count; i++)
6088 snapc->snaps[i] = ceph_decode_64(&p);
6090 ceph_put_snap_context(rbd_dev->header.snapc);
6091 rbd_dev->header.snapc = snapc;
6093 dout(" snap context seq = %llu, snap_count = %u\n",
6094 (unsigned long long)seq, (unsigned int)snap_count);
6101 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6112 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6113 reply_buf = kmalloc(size, GFP_KERNEL);
6115 return ERR_PTR(-ENOMEM);
6117 snapid = cpu_to_le64(snap_id);
6118 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6119 &rbd_dev->header_oloc, "get_snapshot_name",
6120 &snapid, sizeof(snapid), reply_buf, size);
6121 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6123 snap_name = ERR_PTR(ret);
6128 end = reply_buf + ret;
6129 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6130 if (IS_ERR(snap_name))
6133 dout(" snap_id 0x%016llx snap_name = %s\n",
6134 (unsigned long long)snap_id, snap_name);
6141 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6143 bool first_time = rbd_dev->header.object_prefix == NULL;
6146 ret = rbd_dev_v2_image_size(rbd_dev);
6151 ret = rbd_dev_v2_header_onetime(rbd_dev);
6156 ret = rbd_dev_v2_snap_context(rbd_dev);
6157 if (ret && first_time) {
6158 kfree(rbd_dev->header.object_prefix);
6159 rbd_dev->header.object_prefix = NULL;
6165 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6167 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6169 if (rbd_dev->image_format == 1)
6170 return rbd_dev_v1_header_info(rbd_dev);
6172 return rbd_dev_v2_header_info(rbd_dev);
6176 * Skips over white space at *buf, and updates *buf to point to the
6177 * first found non-space character (if any). Returns the length of
6178 * the token (string of non-white space characters) found. Note
6179 * that *buf must be terminated with '\0'.
6181 static inline size_t next_token(const char **buf)
6184 * These are the characters that produce nonzero for
6185 * isspace() in the "C" and "POSIX" locales.
6187 static const char spaces[] = " \f\n\r\t\v";
6189 *buf += strspn(*buf, spaces); /* Find start of token */
6191 return strcspn(*buf, spaces); /* Return token length */
6195 * Finds the next token in *buf, dynamically allocates a buffer big
6196 * enough to hold a copy of it, and copies the token into the new
6197 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6198 * that a duplicate buffer is created even for a zero-length token.
6200 * Returns a pointer to the newly-allocated duplicate, or a null
6201 * pointer if memory for the duplicate was not available. If
6202 * the lenp argument is a non-null pointer, the length of the token
6203 * (not including the '\0') is returned in *lenp.
6205 * If successful, the *buf pointer will be updated to point beyond
6206 * the end of the found token.
6208 * Note: uses GFP_KERNEL for allocation.
6210 static inline char *dup_token(const char **buf, size_t *lenp)
6215 len = next_token(buf);
6216 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6219 *(dup + len) = '\0';
6228 static int rbd_parse_param(struct fs_parameter *param,
6229 struct rbd_parse_opts_ctx *pctx)
6231 struct rbd_options *opt = pctx->opts;
6232 struct fs_parse_result result;
6233 struct p_log log = {.prefix = "rbd"};
6236 ret = ceph_parse_param(param, pctx->copts, NULL);
6237 if (ret != -ENOPARAM)
6240 token = __fs_parse(&log, rbd_parameters, param, &result);
6241 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6243 if (token == -ENOPARAM)
6244 return inval_plog(&log, "Unknown parameter '%s'",
6250 case Opt_queue_depth:
6251 if (result.uint_32 < 1)
6253 opt->queue_depth = result.uint_32;
6255 case Opt_alloc_size:
6256 if (result.uint_32 < SECTOR_SIZE)
6258 if (!is_power_of_2(result.uint_32))
6259 return inval_plog(&log, "alloc_size must be a power of 2");
6260 opt->alloc_size = result.uint_32;
6262 case Opt_lock_timeout:
6263 /* 0 is "wait forever" (i.e. infinite timeout) */
6264 if (result.uint_32 > INT_MAX / 1000)
6266 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6269 kfree(pctx->spec->pool_ns);
6270 pctx->spec->pool_ns = param->string;
6271 param->string = NULL;
6273 case Opt_compression_hint:
6274 switch (result.uint_32) {
6275 case Opt_compression_hint_none:
6276 opt->alloc_hint_flags &=
6277 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6278 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6280 case Opt_compression_hint_compressible:
6281 opt->alloc_hint_flags |=
6282 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6283 opt->alloc_hint_flags &=
6284 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6286 case Opt_compression_hint_incompressible:
6287 opt->alloc_hint_flags |=
6288 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6289 opt->alloc_hint_flags &=
6290 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6297 opt->read_only = true;
6299 case Opt_read_write:
6300 opt->read_only = false;
6302 case Opt_lock_on_read:
6303 opt->lock_on_read = true;
6306 opt->exclusive = true;
6318 return inval_plog(&log, "%s out of range", param->key);
6322 * This duplicates most of generic_parse_monolithic(), untying it from
6323 * fs_context and skipping standard superblock and security options.
6325 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6330 dout("%s '%s'\n", __func__, options);
6331 while ((key = strsep(&options, ",")) != NULL) {
6333 struct fs_parameter param = {
6335 .type = fs_value_is_flag,
6337 char *value = strchr(key, '=');
6344 v_len = strlen(value);
6345 param.string = kmemdup_nul(value, v_len,
6349 param.type = fs_value_is_string;
6353 ret = rbd_parse_param(¶m, pctx);
6354 kfree(param.string);
6364 * Parse the options provided for an "rbd add" (i.e., rbd image
6365 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6366 * and the data written is passed here via a NUL-terminated buffer.
6367 * Returns 0 if successful or an error code otherwise.
6369 * The information extracted from these options is recorded in
6370 * the other parameters which return dynamically-allocated
6373 * The address of a pointer that will refer to a ceph options
6374 * structure. Caller must release the returned pointer using
6375 * ceph_destroy_options() when it is no longer needed.
6377 * Address of an rbd options pointer. Fully initialized by
6378 * this function; caller must release with kfree().
6380 * Address of an rbd image specification pointer. Fully
6381 * initialized by this function based on parsed options.
6382 * Caller must release with rbd_spec_put().
6384 * The options passed take this form:
6385 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6388 * A comma-separated list of one or more monitor addresses.
6389 * A monitor address is an ip address, optionally followed
6390 * by a port number (separated by a colon).
6391 * I.e.: ip1[:port1][,ip2[:port2]...]
6393 * A comma-separated list of ceph and/or rbd options.
6395 * The name of the rados pool containing the rbd image.
6397 * The name of the image in that pool to map.
6399 * An optional snapshot id. If provided, the mapping will
6400 * present data from the image at the time that snapshot was
6401 * created. The image head is used if no snapshot id is
6402 * provided. Snapshot mappings are always read-only.
6404 static int rbd_add_parse_args(const char *buf,
6405 struct ceph_options **ceph_opts,
6406 struct rbd_options **opts,
6407 struct rbd_spec **rbd_spec)
6411 const char *mon_addrs;
6413 size_t mon_addrs_size;
6414 struct rbd_parse_opts_ctx pctx = { 0 };
6417 /* The first four tokens are required */
6419 len = next_token(&buf);
6421 rbd_warn(NULL, "no monitor address(es) provided");
6425 mon_addrs_size = len;
6429 options = dup_token(&buf, NULL);
6433 rbd_warn(NULL, "no options provided");
6437 pctx.spec = rbd_spec_alloc();
6441 pctx.spec->pool_name = dup_token(&buf, NULL);
6442 if (!pctx.spec->pool_name)
6444 if (!*pctx.spec->pool_name) {
6445 rbd_warn(NULL, "no pool name provided");
6449 pctx.spec->image_name = dup_token(&buf, NULL);
6450 if (!pctx.spec->image_name)
6452 if (!*pctx.spec->image_name) {
6453 rbd_warn(NULL, "no image name provided");
6458 * Snapshot name is optional; default is to use "-"
6459 * (indicating the head/no snapshot).
6461 len = next_token(&buf);
6463 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6464 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6465 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6466 ret = -ENAMETOOLONG;
6469 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6472 *(snap_name + len) = '\0';
6473 pctx.spec->snap_name = snap_name;
6475 pctx.copts = ceph_alloc_options();
6479 /* Initialize all rbd options to the defaults */
6481 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6485 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6486 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6487 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6488 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6489 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6490 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6491 pctx.opts->trim = RBD_TRIM_DEFAULT;
6493 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL,
6498 ret = rbd_parse_options(options, &pctx);
6502 *ceph_opts = pctx.copts;
6504 *rbd_spec = pctx.spec;
6512 ceph_destroy_options(pctx.copts);
6513 rbd_spec_put(pctx.spec);
6518 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6520 down_write(&rbd_dev->lock_rwsem);
6521 if (__rbd_is_lock_owner(rbd_dev))
6522 __rbd_release_lock(rbd_dev);
6523 up_write(&rbd_dev->lock_rwsem);
6527 * If the wait is interrupted, an error is returned even if the lock
6528 * was successfully acquired. rbd_dev_image_unlock() will release it
6531 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6535 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6536 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6539 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6543 if (rbd_is_ro(rbd_dev))
6546 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6547 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6548 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6549 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6551 ret = rbd_dev->acquire_err;
6553 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6559 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6564 * The lock may have been released by now, unless automatic lock
6565 * transitions are disabled.
6567 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6572 * An rbd format 2 image has a unique identifier, distinct from the
6573 * name given to it by the user. Internally, that identifier is
6574 * what's used to specify the names of objects related to the image.
6576 * A special "rbd id" object is used to map an rbd image name to its
6577 * id. If that object doesn't exist, then there is no v2 rbd image
6578 * with the supplied name.
6580 * This function will record the given rbd_dev's image_id field if
6581 * it can be determined, and in that case will return 0. If any
6582 * errors occur a negative errno will be returned and the rbd_dev's
6583 * image_id field will be unchanged (and should be NULL).
6585 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6589 CEPH_DEFINE_OID_ONSTACK(oid);
6594 * When probing a parent image, the image id is already
6595 * known (and the image name likely is not). There's no
6596 * need to fetch the image id again in this case. We
6597 * do still need to set the image format though.
6599 if (rbd_dev->spec->image_id) {
6600 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6606 * First, see if the format 2 image id file exists, and if
6607 * so, get the image's persistent id from it.
6609 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6610 rbd_dev->spec->image_name);
6614 dout("rbd id object name is %s\n", oid.name);
6616 /* Response will be an encoded string, which includes a length */
6617 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6618 response = kzalloc(size, GFP_NOIO);
6624 /* If it doesn't exist we'll assume it's a format 1 image */
6626 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6629 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6630 if (ret == -ENOENT) {
6631 image_id = kstrdup("", GFP_KERNEL);
6632 ret = image_id ? 0 : -ENOMEM;
6634 rbd_dev->image_format = 1;
6635 } else if (ret >= 0) {
6638 image_id = ceph_extract_encoded_string(&p, p + ret,
6640 ret = PTR_ERR_OR_ZERO(image_id);
6642 rbd_dev->image_format = 2;
6646 rbd_dev->spec->image_id = image_id;
6647 dout("image_id is %s\n", image_id);
6651 ceph_oid_destroy(&oid);
6656 * Undo whatever state changes are made by v1 or v2 header info
6659 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6661 struct rbd_image_header *header;
6663 rbd_dev_parent_put(rbd_dev);
6664 rbd_object_map_free(rbd_dev);
6665 rbd_dev_mapping_clear(rbd_dev);
6667 /* Free dynamic fields from the header, then zero it out */
6669 header = &rbd_dev->header;
6670 ceph_put_snap_context(header->snapc);
6671 kfree(header->snap_sizes);
6672 kfree(header->snap_names);
6673 kfree(header->object_prefix);
6674 memset(header, 0, sizeof (*header));
6677 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6681 ret = rbd_dev_v2_object_prefix(rbd_dev);
6686 * Get the and check features for the image. Currently the
6687 * features are assumed to never change.
6689 ret = rbd_dev_v2_features(rbd_dev);
6693 /* If the image supports fancy striping, get its parameters */
6695 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6696 ret = rbd_dev_v2_striping_info(rbd_dev);
6701 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6702 ret = rbd_dev_v2_data_pool(rbd_dev);
6707 rbd_init_layout(rbd_dev);
6711 rbd_dev->header.features = 0;
6712 kfree(rbd_dev->header.object_prefix);
6713 rbd_dev->header.object_prefix = NULL;
6718 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6719 * rbd_dev_image_probe() recursion depth, which means it's also the
6720 * length of the already discovered part of the parent chain.
6722 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6724 struct rbd_device *parent = NULL;
6727 if (!rbd_dev->parent_spec)
6730 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6731 pr_info("parent chain is too long (%d)\n", depth);
6736 parent = __rbd_dev_create(rbd_dev->parent_spec);
6743 * Images related by parent/child relationships always share
6744 * rbd_client and spec/parent_spec, so bump their refcounts.
6746 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6747 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6749 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6751 ret = rbd_dev_image_probe(parent, depth);
6755 rbd_dev->parent = parent;
6756 atomic_set(&rbd_dev->parent_ref, 1);
6760 rbd_dev_unparent(rbd_dev);
6761 rbd_dev_destroy(parent);
6765 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6767 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6768 rbd_free_disk(rbd_dev);
6770 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6774 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6777 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6781 /* Record our major and minor device numbers. */
6783 if (!single_major) {
6784 ret = register_blkdev(0, rbd_dev->name);
6786 goto err_out_unlock;
6788 rbd_dev->major = ret;
6791 rbd_dev->major = rbd_major;
6792 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6795 /* Set up the blkdev mapping. */
6797 ret = rbd_init_disk(rbd_dev);
6799 goto err_out_blkdev;
6801 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6802 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6804 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6808 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6809 up_write(&rbd_dev->header_rwsem);
6813 rbd_free_disk(rbd_dev);
6816 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6818 up_write(&rbd_dev->header_rwsem);
6822 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6824 struct rbd_spec *spec = rbd_dev->spec;
6827 /* Record the header object name for this rbd image. */
6829 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6830 if (rbd_dev->image_format == 1)
6831 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6832 spec->image_name, RBD_SUFFIX);
6834 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6835 RBD_HEADER_PREFIX, spec->image_id);
6840 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6843 pr_info("image %s/%s%s%s does not exist\n",
6844 rbd_dev->spec->pool_name,
6845 rbd_dev->spec->pool_ns ?: "",
6846 rbd_dev->spec->pool_ns ? "/" : "",
6847 rbd_dev->spec->image_name);
6849 pr_info("snap %s/%s%s%s@%s does not exist\n",
6850 rbd_dev->spec->pool_name,
6851 rbd_dev->spec->pool_ns ?: "",
6852 rbd_dev->spec->pool_ns ? "/" : "",
6853 rbd_dev->spec->image_name,
6854 rbd_dev->spec->snap_name);
6858 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6860 if (!rbd_is_ro(rbd_dev))
6861 rbd_unregister_watch(rbd_dev);
6863 rbd_dev_unprobe(rbd_dev);
6864 rbd_dev->image_format = 0;
6865 kfree(rbd_dev->spec->image_id);
6866 rbd_dev->spec->image_id = NULL;
6870 * Probe for the existence of the header object for the given rbd
6871 * device. If this image is the one being mapped (i.e., not a
6872 * parent), initiate a watch on its header object before using that
6873 * object to get detailed information about the rbd image.
6875 * On success, returns with header_rwsem held for write if called
6878 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6880 bool need_watch = !rbd_is_ro(rbd_dev);
6884 * Get the id from the image id object. Unless there's an
6885 * error, rbd_dev->spec->image_id will be filled in with
6886 * a dynamically-allocated string, and rbd_dev->image_format
6887 * will be set to either 1 or 2.
6889 ret = rbd_dev_image_id(rbd_dev);
6893 ret = rbd_dev_header_name(rbd_dev);
6895 goto err_out_format;
6898 ret = rbd_register_watch(rbd_dev);
6901 rbd_print_dne(rbd_dev, false);
6902 goto err_out_format;
6907 down_write(&rbd_dev->header_rwsem);
6909 ret = rbd_dev_header_info(rbd_dev);
6911 if (ret == -ENOENT && !need_watch)
6912 rbd_print_dne(rbd_dev, false);
6917 * If this image is the one being mapped, we have pool name and
6918 * id, image name and id, and snap name - need to fill snap id.
6919 * Otherwise this is a parent image, identified by pool, image
6920 * and snap ids - need to fill in names for those ids.
6923 ret = rbd_spec_fill_snap_id(rbd_dev);
6925 ret = rbd_spec_fill_names(rbd_dev);
6928 rbd_print_dne(rbd_dev, true);
6932 ret = rbd_dev_mapping_set(rbd_dev);
6936 if (rbd_is_snap(rbd_dev) &&
6937 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6938 ret = rbd_object_map_load(rbd_dev);
6943 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6944 ret = rbd_dev_v2_parent_info(rbd_dev);
6949 ret = rbd_dev_probe_parent(rbd_dev, depth);
6953 dout("discovered format %u image, header name is %s\n",
6954 rbd_dev->image_format, rbd_dev->header_oid.name);
6959 up_write(&rbd_dev->header_rwsem);
6961 rbd_unregister_watch(rbd_dev);
6962 rbd_dev_unprobe(rbd_dev);
6964 rbd_dev->image_format = 0;
6965 kfree(rbd_dev->spec->image_id);
6966 rbd_dev->spec->image_id = NULL;
6970 static ssize_t do_rbd_add(const char *buf, size_t count)
6972 struct rbd_device *rbd_dev = NULL;
6973 struct ceph_options *ceph_opts = NULL;
6974 struct rbd_options *rbd_opts = NULL;
6975 struct rbd_spec *spec = NULL;
6976 struct rbd_client *rbdc;
6979 if (!capable(CAP_SYS_ADMIN))
6982 if (!try_module_get(THIS_MODULE))
6985 /* parse add command */
6986 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
6990 rbdc = rbd_get_client(ceph_opts);
6997 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7000 pr_info("pool %s does not exist\n", spec->pool_name);
7001 goto err_out_client;
7003 spec->pool_id = (u64)rc;
7005 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7008 goto err_out_client;
7010 rbdc = NULL; /* rbd_dev now owns this */
7011 spec = NULL; /* rbd_dev now owns this */
7012 rbd_opts = NULL; /* rbd_dev now owns this */
7014 /* if we are mapping a snapshot it will be a read-only mapping */
7015 if (rbd_dev->opts->read_only ||
7016 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7017 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7019 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7020 if (!rbd_dev->config_info) {
7022 goto err_out_rbd_dev;
7025 rc = rbd_dev_image_probe(rbd_dev, 0);
7027 goto err_out_rbd_dev;
7029 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7030 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7031 rbd_dev->layout.object_size);
7032 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7035 rc = rbd_dev_device_setup(rbd_dev);
7037 goto err_out_image_probe;
7039 rc = rbd_add_acquire_lock(rbd_dev);
7041 goto err_out_image_lock;
7043 /* Everything's ready. Announce the disk to the world. */
7045 rc = device_add(&rbd_dev->dev);
7047 goto err_out_image_lock;
7049 rc = device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7051 goto err_out_cleanup_disk;
7053 spin_lock(&rbd_dev_list_lock);
7054 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7055 spin_unlock(&rbd_dev_list_lock);
7057 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7058 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7059 rbd_dev->header.features);
7062 module_put(THIS_MODULE);
7065 err_out_cleanup_disk:
7066 rbd_free_disk(rbd_dev);
7068 rbd_dev_image_unlock(rbd_dev);
7069 rbd_dev_device_release(rbd_dev);
7070 err_out_image_probe:
7071 rbd_dev_image_release(rbd_dev);
7073 rbd_dev_destroy(rbd_dev);
7075 rbd_put_client(rbdc);
7082 static ssize_t add_store(const struct bus_type *bus, const char *buf, size_t count)
7087 return do_rbd_add(buf, count);
7090 static ssize_t add_single_major_store(const struct bus_type *bus, const char *buf,
7093 return do_rbd_add(buf, count);
7096 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7098 while (rbd_dev->parent) {
7099 struct rbd_device *first = rbd_dev;
7100 struct rbd_device *second = first->parent;
7101 struct rbd_device *third;
7104 * Follow to the parent with no grandparent and
7107 while (second && (third = second->parent)) {
7112 rbd_dev_image_release(second);
7113 rbd_dev_destroy(second);
7114 first->parent = NULL;
7115 first->parent_overlap = 0;
7117 rbd_assert(first->parent_spec);
7118 rbd_spec_put(first->parent_spec);
7119 first->parent_spec = NULL;
7123 static ssize_t do_rbd_remove(const char *buf, size_t count)
7125 struct rbd_device *rbd_dev = NULL;
7126 struct list_head *tmp;
7132 if (!capable(CAP_SYS_ADMIN))
7137 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7139 pr_err("dev_id out of range\n");
7142 if (opt_buf[0] != '\0') {
7143 if (!strcmp(opt_buf, "force")) {
7146 pr_err("bad remove option at '%s'\n", opt_buf);
7152 spin_lock(&rbd_dev_list_lock);
7153 list_for_each(tmp, &rbd_dev_list) {
7154 rbd_dev = list_entry(tmp, struct rbd_device, node);
7155 if (rbd_dev->dev_id == dev_id) {
7161 spin_lock_irq(&rbd_dev->lock);
7162 if (rbd_dev->open_count && !force)
7164 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7167 spin_unlock_irq(&rbd_dev->lock);
7169 spin_unlock(&rbd_dev_list_lock);
7175 * Prevent new IO from being queued and wait for existing
7176 * IO to complete/fail.
7178 blk_mq_freeze_queue(rbd_dev->disk->queue);
7179 blk_mark_disk_dead(rbd_dev->disk);
7182 del_gendisk(rbd_dev->disk);
7183 spin_lock(&rbd_dev_list_lock);
7184 list_del_init(&rbd_dev->node);
7185 spin_unlock(&rbd_dev_list_lock);
7186 device_del(&rbd_dev->dev);
7188 rbd_dev_image_unlock(rbd_dev);
7189 rbd_dev_device_release(rbd_dev);
7190 rbd_dev_image_release(rbd_dev);
7191 rbd_dev_destroy(rbd_dev);
7195 static ssize_t remove_store(const struct bus_type *bus, const char *buf, size_t count)
7200 return do_rbd_remove(buf, count);
7203 static ssize_t remove_single_major_store(const struct bus_type *bus, const char *buf,
7206 return do_rbd_remove(buf, count);
7210 * create control files in sysfs
7213 static int __init rbd_sysfs_init(void)
7217 ret = device_register(&rbd_root_dev);
7219 put_device(&rbd_root_dev);
7223 ret = bus_register(&rbd_bus_type);
7225 device_unregister(&rbd_root_dev);
7230 static void __exit rbd_sysfs_cleanup(void)
7232 bus_unregister(&rbd_bus_type);
7233 device_unregister(&rbd_root_dev);
7236 static int __init rbd_slab_init(void)
7238 rbd_assert(!rbd_img_request_cache);
7239 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7240 if (!rbd_img_request_cache)
7243 rbd_assert(!rbd_obj_request_cache);
7244 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7245 if (!rbd_obj_request_cache)
7251 kmem_cache_destroy(rbd_img_request_cache);
7252 rbd_img_request_cache = NULL;
7256 static void rbd_slab_exit(void)
7258 rbd_assert(rbd_obj_request_cache);
7259 kmem_cache_destroy(rbd_obj_request_cache);
7260 rbd_obj_request_cache = NULL;
7262 rbd_assert(rbd_img_request_cache);
7263 kmem_cache_destroy(rbd_img_request_cache);
7264 rbd_img_request_cache = NULL;
7267 static int __init rbd_init(void)
7271 if (!libceph_compatible(NULL)) {
7272 rbd_warn(NULL, "libceph incompatibility (quitting)");
7276 rc = rbd_slab_init();
7281 * The number of active work items is limited by the number of
7282 * rbd devices * queue depth, so leave @max_active at default.
7284 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7291 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7292 if (rbd_major < 0) {
7298 rc = rbd_sysfs_init();
7300 goto err_out_blkdev;
7303 pr_info("loaded (major %d)\n", rbd_major);
7305 pr_info("loaded\n");
7311 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7313 destroy_workqueue(rbd_wq);
7319 static void __exit rbd_exit(void)
7321 ida_destroy(&rbd_dev_id_ida);
7322 rbd_sysfs_cleanup();
7324 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7325 destroy_workqueue(rbd_wq);
7329 module_init(rbd_init);
7330 module_exit(rbd_exit);
7332 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7333 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7334 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7335 /* following authorship retained from original osdblk.c */
7336 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7338 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7339 MODULE_LICENSE("GPL");