3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
406 static struct bus_type rbd_bus_type = {
408 .bus_attrs = rbd_bus_attrs,
411 static void rbd_root_dev_release(struct device *dev)
415 static struct device rbd_root_dev = {
417 .release = rbd_root_dev_release,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
423 struct va_format vaf;
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
500 static int rbd_release(struct gendisk *disk, fmode_t mode)
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
517 static const struct block_device_operations rbd_bd_ops = {
518 .owner = THIS_MODULE,
520 .release = rbd_release,
524 * Initialize an rbd client instance. Success or not, this function
525 * consumes ceph_opts.
527 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
529 struct rbd_client *rbdc;
532 dout("%s:\n", __func__);
533 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
537 kref_init(&rbdc->kref);
538 INIT_LIST_HEAD(&rbdc->node);
540 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
542 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
543 if (IS_ERR(rbdc->client))
545 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
547 ret = ceph_open_session(rbdc->client);
551 spin_lock(&rbd_client_list_lock);
552 list_add_tail(&rbdc->node, &rbd_client_list);
553 spin_unlock(&rbd_client_list_lock);
555 mutex_unlock(&ctl_mutex);
556 dout("%s: rbdc %p\n", __func__, rbdc);
561 ceph_destroy_client(rbdc->client);
563 mutex_unlock(&ctl_mutex);
567 ceph_destroy_options(ceph_opts);
568 dout("%s: error %d\n", __func__, ret);
573 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
575 kref_get(&rbdc->kref);
581 * Find a ceph client with specific addr and configuration. If
582 * found, bump its reference count.
584 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
586 struct rbd_client *client_node;
589 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
592 spin_lock(&rbd_client_list_lock);
593 list_for_each_entry(client_node, &rbd_client_list, node) {
594 if (!ceph_compare_options(ceph_opts, client_node->client)) {
595 __rbd_get_client(client_node);
601 spin_unlock(&rbd_client_list_lock);
603 return found ? client_node : NULL;
613 /* string args above */
616 /* Boolean args above */
620 static match_table_t rbd_opts_tokens = {
622 /* string args above */
623 {Opt_read_only, "read_only"},
624 {Opt_read_only, "ro"}, /* Alternate spelling */
625 {Opt_read_write, "read_write"},
626 {Opt_read_write, "rw"}, /* Alternate spelling */
627 /* Boolean args above */
635 #define RBD_READ_ONLY_DEFAULT false
637 static int parse_rbd_opts_token(char *c, void *private)
639 struct rbd_options *rbd_opts = private;
640 substring_t argstr[MAX_OPT_ARGS];
641 int token, intval, ret;
643 token = match_token(c, rbd_opts_tokens, argstr);
647 if (token < Opt_last_int) {
648 ret = match_int(&argstr[0], &intval);
650 pr_err("bad mount option arg (not int) "
654 dout("got int token %d val %d\n", token, intval);
655 } else if (token > Opt_last_int && token < Opt_last_string) {
656 dout("got string token %d val %s\n", token,
658 } else if (token > Opt_last_string && token < Opt_last_bool) {
659 dout("got Boolean token %d\n", token);
661 dout("got token %d\n", token);
666 rbd_opts->read_only = true;
669 rbd_opts->read_only = false;
679 * Get a ceph client with specific addr and configuration, if one does
680 * not exist create it. Either way, ceph_opts is consumed by this
683 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
685 struct rbd_client *rbdc;
687 rbdc = rbd_client_find(ceph_opts);
688 if (rbdc) /* using an existing client */
689 ceph_destroy_options(ceph_opts);
691 rbdc = rbd_client_create(ceph_opts);
697 * Destroy ceph client
699 * Caller must hold rbd_client_list_lock.
701 static void rbd_client_release(struct kref *kref)
703 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
705 dout("%s: rbdc %p\n", __func__, rbdc);
706 spin_lock(&rbd_client_list_lock);
707 list_del(&rbdc->node);
708 spin_unlock(&rbd_client_list_lock);
710 ceph_destroy_client(rbdc->client);
715 * Drop reference to ceph client node. If it's not referenced anymore, release
718 static void rbd_put_client(struct rbd_client *rbdc)
721 kref_put(&rbdc->kref, rbd_client_release);
724 static bool rbd_image_format_valid(u32 image_format)
726 return image_format == 1 || image_format == 2;
729 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
734 /* The header has to start with the magic rbd header text */
735 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
738 /* The bio layer requires at least sector-sized I/O */
740 if (ondisk->options.order < SECTOR_SHIFT)
743 /* If we use u64 in a few spots we may be able to loosen this */
745 if (ondisk->options.order > 8 * sizeof (int) - 1)
749 * The size of a snapshot header has to fit in a size_t, and
750 * that limits the number of snapshots.
752 snap_count = le32_to_cpu(ondisk->snap_count);
753 size = SIZE_MAX - sizeof (struct ceph_snap_context);
754 if (snap_count > size / sizeof (__le64))
758 * Not only that, but the size of the entire the snapshot
759 * header must also be representable in a size_t.
761 size -= snap_count * sizeof (__le64);
762 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
769 * Fill an rbd image header with information from the given format 1
772 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
773 struct rbd_image_header_ondisk *ondisk)
775 struct rbd_image_header *header = &rbd_dev->header;
776 bool first_time = header->object_prefix == NULL;
777 struct ceph_snap_context *snapc;
778 char *object_prefix = NULL;
779 char *snap_names = NULL;
780 u64 *snap_sizes = NULL;
786 /* Allocate this now to avoid having to handle failure below */
791 len = strnlen(ondisk->object_prefix,
792 sizeof (ondisk->object_prefix));
793 object_prefix = kmalloc(len + 1, GFP_KERNEL);
796 memcpy(object_prefix, ondisk->object_prefix, len);
797 object_prefix[len] = '\0';
800 /* Allocate the snapshot context and fill it in */
802 snap_count = le32_to_cpu(ondisk->snap_count);
803 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
806 snapc->seq = le64_to_cpu(ondisk->snap_seq);
808 struct rbd_image_snap_ondisk *snaps;
809 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
811 /* We'll keep a copy of the snapshot names... */
813 if (snap_names_len > (u64)SIZE_MAX)
815 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
819 /* ...as well as the array of their sizes. */
821 size = snap_count * sizeof (*header->snap_sizes);
822 snap_sizes = kmalloc(size, GFP_KERNEL);
827 * Copy the names, and fill in each snapshot's id
830 * Note that rbd_dev_v1_header_info() guarantees the
831 * ondisk buffer we're working with has
832 * snap_names_len bytes beyond the end of the
833 * snapshot id array, this memcpy() is safe.
835 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
836 snaps = ondisk->snaps;
837 for (i = 0; i < snap_count; i++) {
838 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
839 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
843 /* We won't fail any more, fill in the header */
845 down_write(&rbd_dev->header_rwsem);
847 header->object_prefix = object_prefix;
848 header->obj_order = ondisk->options.order;
849 header->crypt_type = ondisk->options.crypt_type;
850 header->comp_type = ondisk->options.comp_type;
851 /* The rest aren't used for format 1 images */
852 header->stripe_unit = 0;
853 header->stripe_count = 0;
854 header->features = 0;
856 ceph_put_snap_context(header->snapc);
857 kfree(header->snap_names);
858 kfree(header->snap_sizes);
861 /* The remaining fields always get updated (when we refresh) */
863 header->image_size = le64_to_cpu(ondisk->image_size);
864 header->snapc = snapc;
865 header->snap_names = snap_names;
866 header->snap_sizes = snap_sizes;
868 /* Make sure mapping size is consistent with header info */
870 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
871 if (rbd_dev->mapping.size != header->image_size)
872 rbd_dev->mapping.size = header->image_size;
874 up_write(&rbd_dev->header_rwsem);
882 ceph_put_snap_context(snapc);
883 kfree(object_prefix);
888 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
890 const char *snap_name;
892 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
894 /* Skip over names until we find the one we are looking for */
896 snap_name = rbd_dev->header.snap_names;
898 snap_name += strlen(snap_name) + 1;
900 return kstrdup(snap_name, GFP_KERNEL);
904 * Snapshot id comparison function for use with qsort()/bsearch().
905 * Note that result is for snapshots in *descending* order.
907 static int snapid_compare_reverse(const void *s1, const void *s2)
909 u64 snap_id1 = *(u64 *)s1;
910 u64 snap_id2 = *(u64 *)s2;
912 if (snap_id1 < snap_id2)
914 return snap_id1 == snap_id2 ? 0 : -1;
918 * Search a snapshot context to see if the given snapshot id is
921 * Returns the position of the snapshot id in the array if it's found,
922 * or BAD_SNAP_INDEX otherwise.
924 * Note: The snapshot array is in kept sorted (by the osd) in
925 * reverse order, highest snapshot id first.
927 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
929 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
932 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
933 sizeof (snap_id), snapid_compare_reverse);
935 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
938 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
943 which = rbd_dev_snap_index(rbd_dev, snap_id);
944 if (which == BAD_SNAP_INDEX)
947 return _rbd_dev_v1_snap_name(rbd_dev, which);
950 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
952 if (snap_id == CEPH_NOSNAP)
953 return RBD_SNAP_HEAD_NAME;
955 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
956 if (rbd_dev->image_format == 1)
957 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
959 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
962 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
965 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
966 if (snap_id == CEPH_NOSNAP) {
967 *snap_size = rbd_dev->header.image_size;
968 } else if (rbd_dev->image_format == 1) {
971 which = rbd_dev_snap_index(rbd_dev, snap_id);
972 if (which == BAD_SNAP_INDEX)
975 *snap_size = rbd_dev->header.snap_sizes[which];
980 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
989 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
992 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
993 if (snap_id == CEPH_NOSNAP) {
994 *snap_features = rbd_dev->header.features;
995 } else if (rbd_dev->image_format == 1) {
996 *snap_features = 0; /* No features for format 1 */
1001 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1005 *snap_features = features;
1010 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1012 u64 snap_id = rbd_dev->spec->snap_id;
1017 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1020 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1024 rbd_dev->mapping.size = size;
1025 rbd_dev->mapping.features = features;
1030 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1032 rbd_dev->mapping.size = 0;
1033 rbd_dev->mapping.features = 0;
1036 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1042 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1045 segment = offset >> rbd_dev->header.obj_order;
1046 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
1047 rbd_dev->header.object_prefix, segment);
1048 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1049 pr_err("error formatting segment name for #%llu (%d)\n",
1058 static void rbd_segment_name_free(const char *name)
1060 /* The explicit cast here is needed to drop the const qualifier */
1062 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1065 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1067 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1069 return offset & (segment_size - 1);
1072 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1073 u64 offset, u64 length)
1075 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1077 offset &= segment_size - 1;
1079 rbd_assert(length <= U64_MAX - offset);
1080 if (offset + length > segment_size)
1081 length = segment_size - offset;
1087 * returns the size of an object in the image
1089 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1091 return 1 << header->obj_order;
1098 static void bio_chain_put(struct bio *chain)
1104 chain = chain->bi_next;
1110 * zeros a bio chain, starting at specific offset
1112 static void zero_bio_chain(struct bio *chain, int start_ofs)
1115 unsigned long flags;
1121 bio_for_each_segment(bv, chain, i) {
1122 if (pos + bv->bv_len > start_ofs) {
1123 int remainder = max(start_ofs - pos, 0);
1124 buf = bvec_kmap_irq(bv, &flags);
1125 memset(buf + remainder, 0,
1126 bv->bv_len - remainder);
1127 bvec_kunmap_irq(buf, &flags);
1132 chain = chain->bi_next;
1137 * similar to zero_bio_chain(), zeros data defined by a page array,
1138 * starting at the given byte offset from the start of the array and
1139 * continuing up to the given end offset. The pages array is
1140 * assumed to be big enough to hold all bytes up to the end.
1142 static void zero_pages(struct page **pages, u64 offset, u64 end)
1144 struct page **page = &pages[offset >> PAGE_SHIFT];
1146 rbd_assert(end > offset);
1147 rbd_assert(end - offset <= (u64)SIZE_MAX);
1148 while (offset < end) {
1151 unsigned long flags;
1154 page_offset = (size_t)(offset & ~PAGE_MASK);
1155 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1156 local_irq_save(flags);
1157 kaddr = kmap_atomic(*page);
1158 memset(kaddr + page_offset, 0, length);
1159 kunmap_atomic(kaddr);
1160 local_irq_restore(flags);
1168 * Clone a portion of a bio, starting at the given byte offset
1169 * and continuing for the number of bytes indicated.
1171 static struct bio *bio_clone_range(struct bio *bio_src,
1172 unsigned int offset,
1180 unsigned short end_idx;
1181 unsigned short vcnt;
1184 /* Handle the easy case for the caller */
1186 if (!offset && len == bio_src->bi_size)
1187 return bio_clone(bio_src, gfpmask);
1189 if (WARN_ON_ONCE(!len))
1191 if (WARN_ON_ONCE(len > bio_src->bi_size))
1193 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1196 /* Find first affected segment... */
1199 __bio_for_each_segment(bv, bio_src, idx, 0) {
1200 if (resid < bv->bv_len)
1202 resid -= bv->bv_len;
1206 /* ...and the last affected segment */
1209 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1210 if (resid <= bv->bv_len)
1212 resid -= bv->bv_len;
1214 vcnt = end_idx - idx + 1;
1216 /* Build the clone */
1218 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1220 return NULL; /* ENOMEM */
1222 bio->bi_bdev = bio_src->bi_bdev;
1223 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1224 bio->bi_rw = bio_src->bi_rw;
1225 bio->bi_flags |= 1 << BIO_CLONED;
1228 * Copy over our part of the bio_vec, then update the first
1229 * and last (or only) entries.
1231 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1232 vcnt * sizeof (struct bio_vec));
1233 bio->bi_io_vec[0].bv_offset += voff;
1235 bio->bi_io_vec[0].bv_len -= voff;
1236 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1238 bio->bi_io_vec[0].bv_len = len;
1241 bio->bi_vcnt = vcnt;
1249 * Clone a portion of a bio chain, starting at the given byte offset
1250 * into the first bio in the source chain and continuing for the
1251 * number of bytes indicated. The result is another bio chain of
1252 * exactly the given length, or a null pointer on error.
1254 * The bio_src and offset parameters are both in-out. On entry they
1255 * refer to the first source bio and the offset into that bio where
1256 * the start of data to be cloned is located.
1258 * On return, bio_src is updated to refer to the bio in the source
1259 * chain that contains first un-cloned byte, and *offset will
1260 * contain the offset of that byte within that bio.
1262 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1263 unsigned int *offset,
1267 struct bio *bi = *bio_src;
1268 unsigned int off = *offset;
1269 struct bio *chain = NULL;
1272 /* Build up a chain of clone bios up to the limit */
1274 if (!bi || off >= bi->bi_size || !len)
1275 return NULL; /* Nothing to clone */
1279 unsigned int bi_size;
1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1284 goto out_err; /* EINVAL; ran out of bio's */
1286 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1287 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1289 goto out_err; /* ENOMEM */
1292 end = &bio->bi_next;
1295 if (off == bi->bi_size) {
1306 bio_chain_put(chain);
1312 * The default/initial value for all object request flags is 0. For
1313 * each flag, once its value is set to 1 it is never reset to 0
1316 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1318 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1319 struct rbd_device *rbd_dev;
1321 rbd_dev = obj_request->img_request->rbd_dev;
1322 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1327 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1330 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1333 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1335 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1336 struct rbd_device *rbd_dev = NULL;
1338 if (obj_request_img_data_test(obj_request))
1339 rbd_dev = obj_request->img_request->rbd_dev;
1340 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1345 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1348 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1352 * This sets the KNOWN flag after (possibly) setting the EXISTS
1353 * flag. The latter is set based on the "exists" value provided.
1355 * Note that for our purposes once an object exists it never goes
1356 * away again. It's possible that the response from two existence
1357 * checks are separated by the creation of the target object, and
1358 * the first ("doesn't exist") response arrives *after* the second
1359 * ("does exist"). In that case we ignore the second one.
1361 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1365 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1366 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1370 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1373 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1376 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1379 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1382 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1384 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1385 atomic_read(&obj_request->kref.refcount));
1386 kref_get(&obj_request->kref);
1389 static void rbd_obj_request_destroy(struct kref *kref);
1390 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1392 rbd_assert(obj_request != NULL);
1393 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1394 atomic_read(&obj_request->kref.refcount));
1395 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1398 static bool img_request_child_test(struct rbd_img_request *img_request);
1399 static void rbd_parent_request_destroy(struct kref *kref);
1400 static void rbd_img_request_destroy(struct kref *kref);
1401 static void rbd_img_request_put(struct rbd_img_request *img_request)
1403 rbd_assert(img_request != NULL);
1404 dout("%s: img %p (was %d)\n", __func__, img_request,
1405 atomic_read(&img_request->kref.refcount));
1406 if (img_request_child_test(img_request))
1407 kref_put(&img_request->kref, rbd_parent_request_destroy);
1409 kref_put(&img_request->kref, rbd_img_request_destroy);
1412 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1413 struct rbd_obj_request *obj_request)
1415 rbd_assert(obj_request->img_request == NULL);
1417 /* Image request now owns object's original reference */
1418 obj_request->img_request = img_request;
1419 obj_request->which = img_request->obj_request_count;
1420 rbd_assert(!obj_request_img_data_test(obj_request));
1421 obj_request_img_data_set(obj_request);
1422 rbd_assert(obj_request->which != BAD_WHICH);
1423 img_request->obj_request_count++;
1424 list_add_tail(&obj_request->links, &img_request->obj_requests);
1425 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1426 obj_request->which);
1429 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1430 struct rbd_obj_request *obj_request)
1432 rbd_assert(obj_request->which != BAD_WHICH);
1434 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1435 obj_request->which);
1436 list_del(&obj_request->links);
1437 rbd_assert(img_request->obj_request_count > 0);
1438 img_request->obj_request_count--;
1439 rbd_assert(obj_request->which == img_request->obj_request_count);
1440 obj_request->which = BAD_WHICH;
1441 rbd_assert(obj_request_img_data_test(obj_request));
1442 rbd_assert(obj_request->img_request == img_request);
1443 obj_request->img_request = NULL;
1444 obj_request->callback = NULL;
1445 rbd_obj_request_put(obj_request);
1448 static bool obj_request_type_valid(enum obj_request_type type)
1451 case OBJ_REQUEST_NODATA:
1452 case OBJ_REQUEST_BIO:
1453 case OBJ_REQUEST_PAGES:
1460 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1461 struct rbd_obj_request *obj_request)
1463 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1465 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1468 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1471 dout("%s: img %p\n", __func__, img_request);
1474 * If no error occurred, compute the aggregate transfer
1475 * count for the image request. We could instead use
1476 * atomic64_cmpxchg() to update it as each object request
1477 * completes; not clear which way is better off hand.
1479 if (!img_request->result) {
1480 struct rbd_obj_request *obj_request;
1483 for_each_obj_request(img_request, obj_request)
1484 xferred += obj_request->xferred;
1485 img_request->xferred = xferred;
1488 if (img_request->callback)
1489 img_request->callback(img_request);
1491 rbd_img_request_put(img_request);
1494 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1496 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1498 dout("%s: obj %p\n", __func__, obj_request);
1500 return wait_for_completion_interruptible(&obj_request->completion);
1504 * The default/initial value for all image request flags is 0. Each
1505 * is conditionally set to 1 at image request initialization time
1506 * and currently never change thereafter.
1508 static void img_request_write_set(struct rbd_img_request *img_request)
1510 set_bit(IMG_REQ_WRITE, &img_request->flags);
1514 static bool img_request_write_test(struct rbd_img_request *img_request)
1517 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1520 static void img_request_child_set(struct rbd_img_request *img_request)
1522 set_bit(IMG_REQ_CHILD, &img_request->flags);
1526 static void img_request_child_clear(struct rbd_img_request *img_request)
1528 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1532 static bool img_request_child_test(struct rbd_img_request *img_request)
1535 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1538 static void img_request_layered_set(struct rbd_img_request *img_request)
1540 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1544 static void img_request_layered_clear(struct rbd_img_request *img_request)
1546 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1550 static bool img_request_layered_test(struct rbd_img_request *img_request)
1553 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1557 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1559 u64 xferred = obj_request->xferred;
1560 u64 length = obj_request->length;
1562 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1563 obj_request, obj_request->img_request, obj_request->result,
1566 * ENOENT means a hole in the image. We zero-fill the
1567 * entire length of the request. A short read also implies
1568 * zero-fill to the end of the request. Either way we
1569 * update the xferred count to indicate the whole request
1572 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1573 if (obj_request->result == -ENOENT) {
1574 if (obj_request->type == OBJ_REQUEST_BIO)
1575 zero_bio_chain(obj_request->bio_list, 0);
1577 zero_pages(obj_request->pages, 0, length);
1578 obj_request->result = 0;
1579 obj_request->xferred = length;
1580 } else if (xferred < length && !obj_request->result) {
1581 if (obj_request->type == OBJ_REQUEST_BIO)
1582 zero_bio_chain(obj_request->bio_list, xferred);
1584 zero_pages(obj_request->pages, xferred, length);
1585 obj_request->xferred = length;
1587 obj_request_done_set(obj_request);
1590 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1592 dout("%s: obj %p cb %p\n", __func__, obj_request,
1593 obj_request->callback);
1594 if (obj_request->callback)
1595 obj_request->callback(obj_request);
1597 complete_all(&obj_request->completion);
1600 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1602 dout("%s: obj %p\n", __func__, obj_request);
1603 obj_request_done_set(obj_request);
1606 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1608 struct rbd_img_request *img_request = NULL;
1609 struct rbd_device *rbd_dev = NULL;
1610 bool layered = false;
1612 if (obj_request_img_data_test(obj_request)) {
1613 img_request = obj_request->img_request;
1614 layered = img_request && img_request_layered_test(img_request);
1615 rbd_dev = img_request->rbd_dev;
1618 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1619 obj_request, img_request, obj_request->result,
1620 obj_request->xferred, obj_request->length);
1621 if (layered && obj_request->result == -ENOENT &&
1622 obj_request->img_offset < rbd_dev->parent_overlap)
1623 rbd_img_parent_read(obj_request);
1624 else if (img_request)
1625 rbd_img_obj_request_read_callback(obj_request);
1627 obj_request_done_set(obj_request);
1630 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1632 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1633 obj_request->result, obj_request->length);
1635 * There is no such thing as a successful short write. Set
1636 * it to our originally-requested length.
1638 obj_request->xferred = obj_request->length;
1639 obj_request_done_set(obj_request);
1643 * For a simple stat call there's nothing to do. We'll do more if
1644 * this is part of a write sequence for a layered image.
1646 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1648 dout("%s: obj %p\n", __func__, obj_request);
1649 obj_request_done_set(obj_request);
1652 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1653 struct ceph_msg *msg)
1655 struct rbd_obj_request *obj_request = osd_req->r_priv;
1658 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1659 rbd_assert(osd_req == obj_request->osd_req);
1660 if (obj_request_img_data_test(obj_request)) {
1661 rbd_assert(obj_request->img_request);
1662 rbd_assert(obj_request->which != BAD_WHICH);
1664 rbd_assert(obj_request->which == BAD_WHICH);
1667 if (osd_req->r_result < 0)
1668 obj_request->result = osd_req->r_result;
1670 BUG_ON(osd_req->r_num_ops > 2);
1673 * We support a 64-bit length, but ultimately it has to be
1674 * passed to blk_end_request(), which takes an unsigned int.
1676 obj_request->xferred = osd_req->r_reply_op_len[0];
1677 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1678 opcode = osd_req->r_ops[0].op;
1680 case CEPH_OSD_OP_READ:
1681 rbd_osd_read_callback(obj_request);
1683 case CEPH_OSD_OP_WRITE:
1684 rbd_osd_write_callback(obj_request);
1686 case CEPH_OSD_OP_STAT:
1687 rbd_osd_stat_callback(obj_request);
1689 case CEPH_OSD_OP_CALL:
1690 case CEPH_OSD_OP_NOTIFY_ACK:
1691 case CEPH_OSD_OP_WATCH:
1692 rbd_osd_trivial_callback(obj_request);
1695 rbd_warn(NULL, "%s: unsupported op %hu\n",
1696 obj_request->object_name, (unsigned short) opcode);
1700 if (obj_request_done_test(obj_request))
1701 rbd_obj_request_complete(obj_request);
1704 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1706 struct rbd_img_request *img_request = obj_request->img_request;
1707 struct ceph_osd_request *osd_req = obj_request->osd_req;
1710 rbd_assert(osd_req != NULL);
1712 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1713 ceph_osdc_build_request(osd_req, obj_request->offset,
1714 NULL, snap_id, NULL);
1717 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1719 struct rbd_img_request *img_request = obj_request->img_request;
1720 struct ceph_osd_request *osd_req = obj_request->osd_req;
1721 struct ceph_snap_context *snapc;
1722 struct timespec mtime = CURRENT_TIME;
1724 rbd_assert(osd_req != NULL);
1726 snapc = img_request ? img_request->snapc : NULL;
1727 ceph_osdc_build_request(osd_req, obj_request->offset,
1728 snapc, CEPH_NOSNAP, &mtime);
1731 static struct ceph_osd_request *rbd_osd_req_create(
1732 struct rbd_device *rbd_dev,
1734 struct rbd_obj_request *obj_request)
1736 struct ceph_snap_context *snapc = NULL;
1737 struct ceph_osd_client *osdc;
1738 struct ceph_osd_request *osd_req;
1740 if (obj_request_img_data_test(obj_request)) {
1741 struct rbd_img_request *img_request = obj_request->img_request;
1743 rbd_assert(write_request ==
1744 img_request_write_test(img_request));
1746 snapc = img_request->snapc;
1749 /* Allocate and initialize the request, for the single op */
1751 osdc = &rbd_dev->rbd_client->client->osdc;
1752 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1754 return NULL; /* ENOMEM */
1757 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1759 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1761 osd_req->r_callback = rbd_osd_req_callback;
1762 osd_req->r_priv = obj_request;
1764 osd_req->r_oid_len = strlen(obj_request->object_name);
1765 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1766 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1768 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1774 * Create a copyup osd request based on the information in the
1775 * object request supplied. A copyup request has two osd ops,
1776 * a copyup method call, and a "normal" write request.
1778 static struct ceph_osd_request *
1779 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1781 struct rbd_img_request *img_request;
1782 struct ceph_snap_context *snapc;
1783 struct rbd_device *rbd_dev;
1784 struct ceph_osd_client *osdc;
1785 struct ceph_osd_request *osd_req;
1787 rbd_assert(obj_request_img_data_test(obj_request));
1788 img_request = obj_request->img_request;
1789 rbd_assert(img_request);
1790 rbd_assert(img_request_write_test(img_request));
1792 /* Allocate and initialize the request, for the two ops */
1794 snapc = img_request->snapc;
1795 rbd_dev = img_request->rbd_dev;
1796 osdc = &rbd_dev->rbd_client->client->osdc;
1797 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1799 return NULL; /* ENOMEM */
1801 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1802 osd_req->r_callback = rbd_osd_req_callback;
1803 osd_req->r_priv = obj_request;
1805 osd_req->r_oid_len = strlen(obj_request->object_name);
1806 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1807 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1809 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1815 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1817 ceph_osdc_put_request(osd_req);
1820 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1822 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1823 u64 offset, u64 length,
1824 enum obj_request_type type)
1826 struct rbd_obj_request *obj_request;
1830 rbd_assert(obj_request_type_valid(type));
1832 size = strlen(object_name) + 1;
1833 name = kmalloc(size, GFP_KERNEL);
1837 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1843 obj_request->object_name = memcpy(name, object_name, size);
1844 obj_request->offset = offset;
1845 obj_request->length = length;
1846 obj_request->flags = 0;
1847 obj_request->which = BAD_WHICH;
1848 obj_request->type = type;
1849 INIT_LIST_HEAD(&obj_request->links);
1850 init_completion(&obj_request->completion);
1851 kref_init(&obj_request->kref);
1853 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1854 offset, length, (int)type, obj_request);
1859 static void rbd_obj_request_destroy(struct kref *kref)
1861 struct rbd_obj_request *obj_request;
1863 obj_request = container_of(kref, struct rbd_obj_request, kref);
1865 dout("%s: obj %p\n", __func__, obj_request);
1867 rbd_assert(obj_request->img_request == NULL);
1868 rbd_assert(obj_request->which == BAD_WHICH);
1870 if (obj_request->osd_req)
1871 rbd_osd_req_destroy(obj_request->osd_req);
1873 rbd_assert(obj_request_type_valid(obj_request->type));
1874 switch (obj_request->type) {
1875 case OBJ_REQUEST_NODATA:
1876 break; /* Nothing to do */
1877 case OBJ_REQUEST_BIO:
1878 if (obj_request->bio_list)
1879 bio_chain_put(obj_request->bio_list);
1881 case OBJ_REQUEST_PAGES:
1882 if (obj_request->pages)
1883 ceph_release_page_vector(obj_request->pages,
1884 obj_request->page_count);
1888 kfree(obj_request->object_name);
1889 obj_request->object_name = NULL;
1890 kmem_cache_free(rbd_obj_request_cache, obj_request);
1893 /* It's OK to call this for a device with no parent */
1895 static void rbd_spec_put(struct rbd_spec *spec);
1896 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1898 rbd_dev_remove_parent(rbd_dev);
1899 rbd_spec_put(rbd_dev->parent_spec);
1900 rbd_dev->parent_spec = NULL;
1901 rbd_dev->parent_overlap = 0;
1905 * Parent image reference counting is used to determine when an
1906 * image's parent fields can be safely torn down--after there are no
1907 * more in-flight requests to the parent image. When the last
1908 * reference is dropped, cleaning them up is safe.
1910 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1914 if (!rbd_dev->parent_spec)
1917 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1921 /* Last reference; clean up parent data structures */
1924 rbd_dev_unparent(rbd_dev);
1926 rbd_warn(rbd_dev, "parent reference underflow\n");
1930 * If an image has a non-zero parent overlap, get a reference to its
1933 * We must get the reference before checking for the overlap to
1934 * coordinate properly with zeroing the parent overlap in
1935 * rbd_dev_v2_parent_info() when an image gets flattened. We
1936 * drop it again if there is no overlap.
1938 * Returns true if the rbd device has a parent with a non-zero
1939 * overlap and a reference for it was successfully taken, or
1942 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1946 if (!rbd_dev->parent_spec)
1949 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1950 if (counter > 0 && rbd_dev->parent_overlap)
1953 /* Image was flattened, but parent is not yet torn down */
1956 rbd_warn(rbd_dev, "parent reference overflow\n");
1962 * Caller is responsible for filling in the list of object requests
1963 * that comprises the image request, and the Linux request pointer
1964 * (if there is one).
1966 static struct rbd_img_request *rbd_img_request_create(
1967 struct rbd_device *rbd_dev,
1968 u64 offset, u64 length,
1971 struct rbd_img_request *img_request;
1973 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1977 if (write_request) {
1978 down_read(&rbd_dev->header_rwsem);
1979 ceph_get_snap_context(rbd_dev->header.snapc);
1980 up_read(&rbd_dev->header_rwsem);
1983 img_request->rq = NULL;
1984 img_request->rbd_dev = rbd_dev;
1985 img_request->offset = offset;
1986 img_request->length = length;
1987 img_request->flags = 0;
1988 if (write_request) {
1989 img_request_write_set(img_request);
1990 img_request->snapc = rbd_dev->header.snapc;
1992 img_request->snap_id = rbd_dev->spec->snap_id;
1994 if (rbd_dev_parent_get(rbd_dev))
1995 img_request_layered_set(img_request);
1996 spin_lock_init(&img_request->completion_lock);
1997 img_request->next_completion = 0;
1998 img_request->callback = NULL;
1999 img_request->result = 0;
2000 img_request->obj_request_count = 0;
2001 INIT_LIST_HEAD(&img_request->obj_requests);
2002 kref_init(&img_request->kref);
2004 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2005 write_request ? "write" : "read", offset, length,
2011 static void rbd_img_request_destroy(struct kref *kref)
2013 struct rbd_img_request *img_request;
2014 struct rbd_obj_request *obj_request;
2015 struct rbd_obj_request *next_obj_request;
2017 img_request = container_of(kref, struct rbd_img_request, kref);
2019 dout("%s: img %p\n", __func__, img_request);
2021 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2022 rbd_img_obj_request_del(img_request, obj_request);
2023 rbd_assert(img_request->obj_request_count == 0);
2025 if (img_request_layered_test(img_request)) {
2026 img_request_layered_clear(img_request);
2027 rbd_dev_parent_put(img_request->rbd_dev);
2030 if (img_request_write_test(img_request))
2031 ceph_put_snap_context(img_request->snapc);
2033 kmem_cache_free(rbd_img_request_cache, img_request);
2036 static struct rbd_img_request *rbd_parent_request_create(
2037 struct rbd_obj_request *obj_request,
2038 u64 img_offset, u64 length)
2040 struct rbd_img_request *parent_request;
2041 struct rbd_device *rbd_dev;
2043 rbd_assert(obj_request->img_request);
2044 rbd_dev = obj_request->img_request->rbd_dev;
2046 parent_request = rbd_img_request_create(rbd_dev->parent,
2047 img_offset, length, false);
2048 if (!parent_request)
2051 img_request_child_set(parent_request);
2052 rbd_obj_request_get(obj_request);
2053 parent_request->obj_request = obj_request;
2055 return parent_request;
2058 static void rbd_parent_request_destroy(struct kref *kref)
2060 struct rbd_img_request *parent_request;
2061 struct rbd_obj_request *orig_request;
2063 parent_request = container_of(kref, struct rbd_img_request, kref);
2064 orig_request = parent_request->obj_request;
2066 parent_request->obj_request = NULL;
2067 rbd_obj_request_put(orig_request);
2068 img_request_child_clear(parent_request);
2070 rbd_img_request_destroy(kref);
2073 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2075 struct rbd_img_request *img_request;
2076 unsigned int xferred;
2080 rbd_assert(obj_request_img_data_test(obj_request));
2081 img_request = obj_request->img_request;
2083 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2084 xferred = (unsigned int)obj_request->xferred;
2085 result = obj_request->result;
2087 struct rbd_device *rbd_dev = img_request->rbd_dev;
2089 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2090 img_request_write_test(img_request) ? "write" : "read",
2091 obj_request->length, obj_request->img_offset,
2092 obj_request->offset);
2093 rbd_warn(rbd_dev, " result %d xferred %x\n",
2095 if (!img_request->result)
2096 img_request->result = result;
2099 /* Image object requests don't own their page array */
2101 if (obj_request->type == OBJ_REQUEST_PAGES) {
2102 obj_request->pages = NULL;
2103 obj_request->page_count = 0;
2106 if (img_request_child_test(img_request)) {
2107 rbd_assert(img_request->obj_request != NULL);
2108 more = obj_request->which < img_request->obj_request_count - 1;
2110 rbd_assert(img_request->rq != NULL);
2111 more = blk_end_request(img_request->rq, result, xferred);
2117 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2119 struct rbd_img_request *img_request;
2120 u32 which = obj_request->which;
2123 rbd_assert(obj_request_img_data_test(obj_request));
2124 img_request = obj_request->img_request;
2126 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2127 rbd_assert(img_request != NULL);
2128 rbd_assert(img_request->obj_request_count > 0);
2129 rbd_assert(which != BAD_WHICH);
2130 rbd_assert(which < img_request->obj_request_count);
2131 rbd_assert(which >= img_request->next_completion);
2133 spin_lock_irq(&img_request->completion_lock);
2134 if (which != img_request->next_completion)
2137 for_each_obj_request_from(img_request, obj_request) {
2139 rbd_assert(which < img_request->obj_request_count);
2141 if (!obj_request_done_test(obj_request))
2143 more = rbd_img_obj_end_request(obj_request);
2147 rbd_assert(more ^ (which == img_request->obj_request_count));
2148 img_request->next_completion = which;
2150 spin_unlock_irq(&img_request->completion_lock);
2153 rbd_img_request_complete(img_request);
2157 * Split up an image request into one or more object requests, each
2158 * to a different object. The "type" parameter indicates whether
2159 * "data_desc" is the pointer to the head of a list of bio
2160 * structures, or the base of a page array. In either case this
2161 * function assumes data_desc describes memory sufficient to hold
2162 * all data described by the image request.
2164 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2165 enum obj_request_type type,
2168 struct rbd_device *rbd_dev = img_request->rbd_dev;
2169 struct rbd_obj_request *obj_request = NULL;
2170 struct rbd_obj_request *next_obj_request;
2171 bool write_request = img_request_write_test(img_request);
2172 struct bio *bio_list;
2173 unsigned int bio_offset = 0;
2174 struct page **pages;
2179 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2180 (int)type, data_desc);
2182 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2183 img_offset = img_request->offset;
2184 resid = img_request->length;
2185 rbd_assert(resid > 0);
2187 if (type == OBJ_REQUEST_BIO) {
2188 bio_list = data_desc;
2189 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2191 rbd_assert(type == OBJ_REQUEST_PAGES);
2196 struct ceph_osd_request *osd_req;
2197 const char *object_name;
2201 object_name = rbd_segment_name(rbd_dev, img_offset);
2204 offset = rbd_segment_offset(rbd_dev, img_offset);
2205 length = rbd_segment_length(rbd_dev, img_offset, resid);
2206 obj_request = rbd_obj_request_create(object_name,
2207 offset, length, type);
2208 /* object request has its own copy of the object name */
2209 rbd_segment_name_free(object_name);
2213 if (type == OBJ_REQUEST_BIO) {
2214 unsigned int clone_size;
2216 rbd_assert(length <= (u64)UINT_MAX);
2217 clone_size = (unsigned int)length;
2218 obj_request->bio_list =
2219 bio_chain_clone_range(&bio_list,
2223 if (!obj_request->bio_list)
2226 unsigned int page_count;
2228 obj_request->pages = pages;
2229 page_count = (u32)calc_pages_for(offset, length);
2230 obj_request->page_count = page_count;
2231 if ((offset + length) & ~PAGE_MASK)
2232 page_count--; /* more on last page */
2233 pages += page_count;
2236 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2240 obj_request->osd_req = osd_req;
2241 obj_request->callback = rbd_img_obj_callback;
2243 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2245 if (type == OBJ_REQUEST_BIO)
2246 osd_req_op_extent_osd_data_bio(osd_req, 0,
2247 obj_request->bio_list, length);
2249 osd_req_op_extent_osd_data_pages(osd_req, 0,
2250 obj_request->pages, length,
2251 offset & ~PAGE_MASK, false, false);
2254 rbd_osd_req_format_write(obj_request);
2256 rbd_osd_req_format_read(obj_request);
2258 obj_request->img_offset = img_offset;
2259 rbd_img_obj_request_add(img_request, obj_request);
2261 img_offset += length;
2268 rbd_obj_request_put(obj_request);
2270 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2271 rbd_obj_request_put(obj_request);
2277 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2279 struct rbd_img_request *img_request;
2280 struct rbd_device *rbd_dev;
2281 struct page **pages;
2284 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2285 rbd_assert(obj_request_img_data_test(obj_request));
2286 img_request = obj_request->img_request;
2287 rbd_assert(img_request);
2289 rbd_dev = img_request->rbd_dev;
2290 rbd_assert(rbd_dev);
2292 pages = obj_request->copyup_pages;
2293 rbd_assert(pages != NULL);
2294 obj_request->copyup_pages = NULL;
2295 page_count = obj_request->copyup_page_count;
2296 rbd_assert(page_count);
2297 obj_request->copyup_page_count = 0;
2298 ceph_release_page_vector(pages, page_count);
2301 * We want the transfer count to reflect the size of the
2302 * original write request. There is no such thing as a
2303 * successful short write, so if the request was successful
2304 * we can just set it to the originally-requested length.
2306 if (!obj_request->result)
2307 obj_request->xferred = obj_request->length;
2309 /* Finish up with the normal image object callback */
2311 rbd_img_obj_callback(obj_request);
2315 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2317 struct rbd_obj_request *orig_request;
2318 struct ceph_osd_request *osd_req;
2319 struct ceph_osd_client *osdc;
2320 struct rbd_device *rbd_dev;
2321 struct page **pages;
2328 rbd_assert(img_request_child_test(img_request));
2330 /* First get what we need from the image request */
2332 pages = img_request->copyup_pages;
2333 rbd_assert(pages != NULL);
2334 img_request->copyup_pages = NULL;
2335 page_count = img_request->copyup_page_count;
2336 rbd_assert(page_count);
2337 img_request->copyup_page_count = 0;
2339 orig_request = img_request->obj_request;
2340 rbd_assert(orig_request != NULL);
2341 rbd_assert(obj_request_type_valid(orig_request->type));
2342 img_result = img_request->result;
2343 parent_length = img_request->length;
2344 rbd_assert(parent_length == img_request->xferred);
2345 rbd_img_request_put(img_request);
2347 rbd_assert(orig_request->img_request);
2348 rbd_dev = orig_request->img_request->rbd_dev;
2349 rbd_assert(rbd_dev);
2352 * If the overlap has become 0 (most likely because the
2353 * image has been flattened) we need to free the pages
2354 * and re-submit the original write request.
2356 if (!rbd_dev->parent_overlap) {
2357 struct ceph_osd_client *osdc;
2359 ceph_release_page_vector(pages, page_count);
2360 osdc = &rbd_dev->rbd_client->client->osdc;
2361 img_result = rbd_obj_request_submit(osdc, orig_request);
2370 * The original osd request is of no use to use any more.
2371 * We need a new one that can hold the two ops in a copyup
2372 * request. Allocate the new copyup osd request for the
2373 * original request, and release the old one.
2375 img_result = -ENOMEM;
2376 osd_req = rbd_osd_req_create_copyup(orig_request);
2379 rbd_osd_req_destroy(orig_request->osd_req);
2380 orig_request->osd_req = osd_req;
2381 orig_request->copyup_pages = pages;
2382 orig_request->copyup_page_count = page_count;
2384 /* Initialize the copyup op */
2386 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2387 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2390 /* Then the original write request op */
2392 offset = orig_request->offset;
2393 length = orig_request->length;
2394 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2395 offset, length, 0, 0);
2396 if (orig_request->type == OBJ_REQUEST_BIO)
2397 osd_req_op_extent_osd_data_bio(osd_req, 1,
2398 orig_request->bio_list, length);
2400 osd_req_op_extent_osd_data_pages(osd_req, 1,
2401 orig_request->pages, length,
2402 offset & ~PAGE_MASK, false, false);
2404 rbd_osd_req_format_write(orig_request);
2406 /* All set, send it off. */
2408 orig_request->callback = rbd_img_obj_copyup_callback;
2409 osdc = &rbd_dev->rbd_client->client->osdc;
2410 img_result = rbd_obj_request_submit(osdc, orig_request);
2414 /* Record the error code and complete the request */
2416 orig_request->result = img_result;
2417 orig_request->xferred = 0;
2418 obj_request_done_set(orig_request);
2419 rbd_obj_request_complete(orig_request);
2423 * Read from the parent image the range of data that covers the
2424 * entire target of the given object request. This is used for
2425 * satisfying a layered image write request when the target of an
2426 * object request from the image request does not exist.
2428 * A page array big enough to hold the returned data is allocated
2429 * and supplied to rbd_img_request_fill() as the "data descriptor."
2430 * When the read completes, this page array will be transferred to
2431 * the original object request for the copyup operation.
2433 * If an error occurs, record it as the result of the original
2434 * object request and mark it done so it gets completed.
2436 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2438 struct rbd_img_request *img_request = NULL;
2439 struct rbd_img_request *parent_request = NULL;
2440 struct rbd_device *rbd_dev;
2443 struct page **pages = NULL;
2447 rbd_assert(obj_request_img_data_test(obj_request));
2448 rbd_assert(obj_request_type_valid(obj_request->type));
2450 img_request = obj_request->img_request;
2451 rbd_assert(img_request != NULL);
2452 rbd_dev = img_request->rbd_dev;
2453 rbd_assert(rbd_dev->parent != NULL);
2456 * Determine the byte range covered by the object in the
2457 * child image to which the original request was to be sent.
2459 img_offset = obj_request->img_offset - obj_request->offset;
2460 length = (u64)1 << rbd_dev->header.obj_order;
2463 * There is no defined parent data beyond the parent
2464 * overlap, so limit what we read at that boundary if
2467 if (img_offset + length > rbd_dev->parent_overlap) {
2468 rbd_assert(img_offset < rbd_dev->parent_overlap);
2469 length = rbd_dev->parent_overlap - img_offset;
2473 * Allocate a page array big enough to receive the data read
2476 page_count = (u32)calc_pages_for(0, length);
2477 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2478 if (IS_ERR(pages)) {
2479 result = PTR_ERR(pages);
2485 parent_request = rbd_parent_request_create(obj_request,
2486 img_offset, length);
2487 if (!parent_request)
2490 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2493 parent_request->copyup_pages = pages;
2494 parent_request->copyup_page_count = page_count;
2496 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2497 result = rbd_img_request_submit(parent_request);
2501 parent_request->copyup_pages = NULL;
2502 parent_request->copyup_page_count = 0;
2503 parent_request->obj_request = NULL;
2504 rbd_obj_request_put(obj_request);
2507 ceph_release_page_vector(pages, page_count);
2509 rbd_img_request_put(parent_request);
2510 obj_request->result = result;
2511 obj_request->xferred = 0;
2512 obj_request_done_set(obj_request);
2517 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2519 struct rbd_obj_request *orig_request;
2520 struct rbd_device *rbd_dev;
2523 rbd_assert(!obj_request_img_data_test(obj_request));
2526 * All we need from the object request is the original
2527 * request and the result of the STAT op. Grab those, then
2528 * we're done with the request.
2530 orig_request = obj_request->obj_request;
2531 obj_request->obj_request = NULL;
2532 rbd_assert(orig_request);
2533 rbd_assert(orig_request->img_request);
2535 result = obj_request->result;
2536 obj_request->result = 0;
2538 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2539 obj_request, orig_request, result,
2540 obj_request->xferred, obj_request->length);
2541 rbd_obj_request_put(obj_request);
2544 * If the overlap has become 0 (most likely because the
2545 * image has been flattened) we need to free the pages
2546 * and re-submit the original write request.
2548 rbd_dev = orig_request->img_request->rbd_dev;
2549 if (!rbd_dev->parent_overlap) {
2550 struct ceph_osd_client *osdc;
2552 rbd_obj_request_put(orig_request);
2553 osdc = &rbd_dev->rbd_client->client->osdc;
2554 result = rbd_obj_request_submit(osdc, orig_request);
2560 * Our only purpose here is to determine whether the object
2561 * exists, and we don't want to treat the non-existence as
2562 * an error. If something else comes back, transfer the
2563 * error to the original request and complete it now.
2566 obj_request_existence_set(orig_request, true);
2567 } else if (result == -ENOENT) {
2568 obj_request_existence_set(orig_request, false);
2569 } else if (result) {
2570 orig_request->result = result;
2575 * Resubmit the original request now that we have recorded
2576 * whether the target object exists.
2578 orig_request->result = rbd_img_obj_request_submit(orig_request);
2580 if (orig_request->result)
2581 rbd_obj_request_complete(orig_request);
2582 rbd_obj_request_put(orig_request);
2585 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2587 struct rbd_obj_request *stat_request;
2588 struct rbd_device *rbd_dev;
2589 struct ceph_osd_client *osdc;
2590 struct page **pages = NULL;
2596 * The response data for a STAT call consists of:
2603 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2604 page_count = (u32)calc_pages_for(0, size);
2605 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2607 return PTR_ERR(pages);
2610 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2615 rbd_obj_request_get(obj_request);
2616 stat_request->obj_request = obj_request;
2617 stat_request->pages = pages;
2618 stat_request->page_count = page_count;
2620 rbd_assert(obj_request->img_request);
2621 rbd_dev = obj_request->img_request->rbd_dev;
2622 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2624 if (!stat_request->osd_req)
2626 stat_request->callback = rbd_img_obj_exists_callback;
2628 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2629 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2631 rbd_osd_req_format_read(stat_request);
2633 osdc = &rbd_dev->rbd_client->client->osdc;
2634 ret = rbd_obj_request_submit(osdc, stat_request);
2637 rbd_obj_request_put(obj_request);
2642 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2644 struct rbd_img_request *img_request;
2645 struct rbd_device *rbd_dev;
2648 rbd_assert(obj_request_img_data_test(obj_request));
2650 img_request = obj_request->img_request;
2651 rbd_assert(img_request);
2652 rbd_dev = img_request->rbd_dev;
2655 * Only writes to layered images need special handling.
2656 * Reads and non-layered writes are simple object requests.
2657 * Layered writes that start beyond the end of the overlap
2658 * with the parent have no parent data, so they too are
2659 * simple object requests. Finally, if the target object is
2660 * known to already exist, its parent data has already been
2661 * copied, so a write to the object can also be handled as a
2662 * simple object request.
2664 if (!img_request_write_test(img_request) ||
2665 !img_request_layered_test(img_request) ||
2666 rbd_dev->parent_overlap <= obj_request->img_offset ||
2667 ((known = obj_request_known_test(obj_request)) &&
2668 obj_request_exists_test(obj_request))) {
2670 struct rbd_device *rbd_dev;
2671 struct ceph_osd_client *osdc;
2673 rbd_dev = obj_request->img_request->rbd_dev;
2674 osdc = &rbd_dev->rbd_client->client->osdc;
2676 return rbd_obj_request_submit(osdc, obj_request);
2680 * It's a layered write. The target object might exist but
2681 * we may not know that yet. If we know it doesn't exist,
2682 * start by reading the data for the full target object from
2683 * the parent so we can use it for a copyup to the target.
2686 return rbd_img_obj_parent_read_full(obj_request);
2688 /* We don't know whether the target exists. Go find out. */
2690 return rbd_img_obj_exists_submit(obj_request);
2693 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2695 struct rbd_obj_request *obj_request;
2696 struct rbd_obj_request *next_obj_request;
2698 dout("%s: img %p\n", __func__, img_request);
2699 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2702 ret = rbd_img_obj_request_submit(obj_request);
2710 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2712 struct rbd_obj_request *obj_request;
2713 struct rbd_device *rbd_dev;
2718 rbd_assert(img_request_child_test(img_request));
2720 /* First get what we need from the image request and release it */
2722 obj_request = img_request->obj_request;
2723 img_xferred = img_request->xferred;
2724 img_result = img_request->result;
2725 rbd_img_request_put(img_request);
2728 * If the overlap has become 0 (most likely because the
2729 * image has been flattened) we need to re-submit the
2732 rbd_assert(obj_request);
2733 rbd_assert(obj_request->img_request);
2734 rbd_dev = obj_request->img_request->rbd_dev;
2735 if (!rbd_dev->parent_overlap) {
2736 struct ceph_osd_client *osdc;
2738 osdc = &rbd_dev->rbd_client->client->osdc;
2739 img_result = rbd_obj_request_submit(osdc, obj_request);
2744 obj_request->result = img_result;
2745 if (obj_request->result)
2749 * We need to zero anything beyond the parent overlap
2750 * boundary. Since rbd_img_obj_request_read_callback()
2751 * will zero anything beyond the end of a short read, an
2752 * easy way to do this is to pretend the data from the
2753 * parent came up short--ending at the overlap boundary.
2755 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2756 obj_end = obj_request->img_offset + obj_request->length;
2757 if (obj_end > rbd_dev->parent_overlap) {
2760 if (obj_request->img_offset < rbd_dev->parent_overlap)
2761 xferred = rbd_dev->parent_overlap -
2762 obj_request->img_offset;
2764 obj_request->xferred = min(img_xferred, xferred);
2766 obj_request->xferred = img_xferred;
2769 rbd_img_obj_request_read_callback(obj_request);
2770 rbd_obj_request_complete(obj_request);
2773 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2775 struct rbd_img_request *img_request;
2778 rbd_assert(obj_request_img_data_test(obj_request));
2779 rbd_assert(obj_request->img_request != NULL);
2780 rbd_assert(obj_request->result == (s32) -ENOENT);
2781 rbd_assert(obj_request_type_valid(obj_request->type));
2783 /* rbd_read_finish(obj_request, obj_request->length); */
2784 img_request = rbd_parent_request_create(obj_request,
2785 obj_request->img_offset,
2786 obj_request->length);
2791 if (obj_request->type == OBJ_REQUEST_BIO)
2792 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2793 obj_request->bio_list);
2795 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2796 obj_request->pages);
2800 img_request->callback = rbd_img_parent_read_callback;
2801 result = rbd_img_request_submit(img_request);
2808 rbd_img_request_put(img_request);
2809 obj_request->result = result;
2810 obj_request->xferred = 0;
2811 obj_request_done_set(obj_request);
2814 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2816 struct rbd_obj_request *obj_request;
2817 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2820 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2821 OBJ_REQUEST_NODATA);
2826 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2827 if (!obj_request->osd_req)
2829 obj_request->callback = rbd_obj_request_put;
2831 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2833 rbd_osd_req_format_read(obj_request);
2835 ret = rbd_obj_request_submit(osdc, obj_request);
2838 rbd_obj_request_put(obj_request);
2843 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2845 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2851 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2852 rbd_dev->header_name, (unsigned long long)notify_id,
2853 (unsigned int)opcode);
2854 ret = rbd_dev_refresh(rbd_dev);
2856 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2858 rbd_obj_notify_ack(rbd_dev, notify_id);
2862 * Request sync osd watch/unwatch. The value of "start" determines
2863 * whether a watch request is being initiated or torn down.
2865 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2867 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2868 struct rbd_obj_request *obj_request;
2871 rbd_assert(start ^ !!rbd_dev->watch_event);
2872 rbd_assert(start ^ !!rbd_dev->watch_request);
2875 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2876 &rbd_dev->watch_event);
2879 rbd_assert(rbd_dev->watch_event != NULL);
2883 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2884 OBJ_REQUEST_NODATA);
2888 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2889 if (!obj_request->osd_req)
2893 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2895 ceph_osdc_unregister_linger_request(osdc,
2896 rbd_dev->watch_request->osd_req);
2898 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2899 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2900 rbd_osd_req_format_write(obj_request);
2902 ret = rbd_obj_request_submit(osdc, obj_request);
2905 ret = rbd_obj_request_wait(obj_request);
2908 ret = obj_request->result;
2913 * A watch request is set to linger, so the underlying osd
2914 * request won't go away until we unregister it. We retain
2915 * a pointer to the object request during that time (in
2916 * rbd_dev->watch_request), so we'll keep a reference to
2917 * it. We'll drop that reference (below) after we've
2921 rbd_dev->watch_request = obj_request;
2926 /* We have successfully torn down the watch request */
2928 rbd_obj_request_put(rbd_dev->watch_request);
2929 rbd_dev->watch_request = NULL;
2931 /* Cancel the event if we're tearing down, or on error */
2932 ceph_osdc_cancel_event(rbd_dev->watch_event);
2933 rbd_dev->watch_event = NULL;
2935 rbd_obj_request_put(obj_request);
2941 * Synchronous osd object method call. Returns the number of bytes
2942 * returned in the outbound buffer, or a negative error code.
2944 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2945 const char *object_name,
2946 const char *class_name,
2947 const char *method_name,
2948 const void *outbound,
2949 size_t outbound_size,
2951 size_t inbound_size)
2953 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2954 struct rbd_obj_request *obj_request;
2955 struct page **pages;
2960 * Method calls are ultimately read operations. The result
2961 * should placed into the inbound buffer provided. They
2962 * also supply outbound data--parameters for the object
2963 * method. Currently if this is present it will be a
2966 page_count = (u32)calc_pages_for(0, inbound_size);
2967 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2969 return PTR_ERR(pages);
2972 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2977 obj_request->pages = pages;
2978 obj_request->page_count = page_count;
2980 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2981 if (!obj_request->osd_req)
2984 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2985 class_name, method_name);
2986 if (outbound_size) {
2987 struct ceph_pagelist *pagelist;
2989 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2993 ceph_pagelist_init(pagelist);
2994 ceph_pagelist_append(pagelist, outbound, outbound_size);
2995 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2998 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2999 obj_request->pages, inbound_size,
3001 rbd_osd_req_format_read(obj_request);
3003 ret = rbd_obj_request_submit(osdc, obj_request);
3006 ret = rbd_obj_request_wait(obj_request);
3010 ret = obj_request->result;
3014 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3015 ret = (int)obj_request->xferred;
3016 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3019 rbd_obj_request_put(obj_request);
3021 ceph_release_page_vector(pages, page_count);
3026 static void rbd_request_fn(struct request_queue *q)
3027 __releases(q->queue_lock) __acquires(q->queue_lock)
3029 struct rbd_device *rbd_dev = q->queuedata;
3030 bool read_only = rbd_dev->mapping.read_only;
3034 while ((rq = blk_fetch_request(q))) {
3035 bool write_request = rq_data_dir(rq) == WRITE;
3036 struct rbd_img_request *img_request;
3040 /* Ignore any non-FS requests that filter through. */
3042 if (rq->cmd_type != REQ_TYPE_FS) {
3043 dout("%s: non-fs request type %d\n", __func__,
3044 (int) rq->cmd_type);
3045 __blk_end_request_all(rq, 0);
3049 /* Ignore/skip any zero-length requests */
3051 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3052 length = (u64) blk_rq_bytes(rq);
3055 dout("%s: zero-length request\n", __func__);
3056 __blk_end_request_all(rq, 0);
3060 spin_unlock_irq(q->queue_lock);
3062 /* Disallow writes to a read-only device */
3064 if (write_request) {
3068 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3072 * Quit early if the mapped snapshot no longer
3073 * exists. It's still possible the snapshot will
3074 * have disappeared by the time our request arrives
3075 * at the osd, but there's no sense in sending it if
3078 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3079 dout("request for non-existent snapshot");
3080 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3086 if (offset && length > U64_MAX - offset + 1) {
3087 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3089 goto end_request; /* Shouldn't happen */
3093 if (offset + length > rbd_dev->mapping.size) {
3094 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3095 offset, length, rbd_dev->mapping.size);
3100 img_request = rbd_img_request_create(rbd_dev, offset, length,
3105 img_request->rq = rq;
3107 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3110 result = rbd_img_request_submit(img_request);
3112 rbd_img_request_put(img_request);
3114 spin_lock_irq(q->queue_lock);
3116 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3117 write_request ? "write" : "read",
3118 length, offset, result);
3120 __blk_end_request_all(rq, result);
3126 * a queue callback. Makes sure that we don't create a bio that spans across
3127 * multiple osd objects. One exception would be with a single page bios,
3128 * which we handle later at bio_chain_clone_range()
3130 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3131 struct bio_vec *bvec)
3133 struct rbd_device *rbd_dev = q->queuedata;
3134 sector_t sector_offset;
3135 sector_t sectors_per_obj;
3136 sector_t obj_sector_offset;
3140 * Find how far into its rbd object the partition-relative
3141 * bio start sector is to offset relative to the enclosing
3144 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3145 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3146 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3149 * Compute the number of bytes from that offset to the end
3150 * of the object. Account for what's already used by the bio.
3152 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3153 if (ret > bmd->bi_size)
3154 ret -= bmd->bi_size;
3159 * Don't send back more than was asked for. And if the bio
3160 * was empty, let the whole thing through because: "Note
3161 * that a block device *must* allow a single page to be
3162 * added to an empty bio."
3164 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3165 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3166 ret = (int) bvec->bv_len;
3171 static void rbd_free_disk(struct rbd_device *rbd_dev)
3173 struct gendisk *disk = rbd_dev->disk;
3178 rbd_dev->disk = NULL;
3179 if (disk->flags & GENHD_FL_UP) {
3182 blk_cleanup_queue(disk->queue);
3187 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3188 const char *object_name,
3189 u64 offset, u64 length, void *buf)
3192 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3193 struct rbd_obj_request *obj_request;
3194 struct page **pages = NULL;
3199 page_count = (u32) calc_pages_for(offset, length);
3200 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3202 ret = PTR_ERR(pages);
3205 obj_request = rbd_obj_request_create(object_name, offset, length,
3210 obj_request->pages = pages;
3211 obj_request->page_count = page_count;
3213 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3214 if (!obj_request->osd_req)
3217 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3218 offset, length, 0, 0);
3219 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3221 obj_request->length,
3222 obj_request->offset & ~PAGE_MASK,
3224 rbd_osd_req_format_read(obj_request);
3226 ret = rbd_obj_request_submit(osdc, obj_request);
3229 ret = rbd_obj_request_wait(obj_request);
3233 ret = obj_request->result;
3237 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3238 size = (size_t) obj_request->xferred;
3239 ceph_copy_from_page_vector(pages, buf, 0, size);
3240 rbd_assert(size <= (size_t)INT_MAX);
3244 rbd_obj_request_put(obj_request);
3246 ceph_release_page_vector(pages, page_count);
3252 * Read the complete header for the given rbd device. On successful
3253 * return, the rbd_dev->header field will contain up-to-date
3254 * information about the image.
3256 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3258 struct rbd_image_header_ondisk *ondisk = NULL;
3265 * The complete header will include an array of its 64-bit
3266 * snapshot ids, followed by the names of those snapshots as
3267 * a contiguous block of NUL-terminated strings. Note that
3268 * the number of snapshots could change by the time we read
3269 * it in, in which case we re-read it.
3276 size = sizeof (*ondisk);
3277 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3279 ondisk = kmalloc(size, GFP_KERNEL);
3283 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3287 if ((size_t)ret < size) {
3289 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3293 if (!rbd_dev_ondisk_valid(ondisk)) {
3295 rbd_warn(rbd_dev, "invalid header");
3299 names_size = le64_to_cpu(ondisk->snap_names_len);
3300 want_count = snap_count;
3301 snap_count = le32_to_cpu(ondisk->snap_count);
3302 } while (snap_count != want_count);
3304 ret = rbd_header_from_disk(rbd_dev, ondisk);
3312 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3313 * has disappeared from the (just updated) snapshot context.
3315 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3319 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3322 snap_id = rbd_dev->spec->snap_id;
3323 if (snap_id == CEPH_NOSNAP)
3326 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3327 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3330 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3335 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3336 mapping_size = rbd_dev->mapping.size;
3337 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3338 if (rbd_dev->image_format == 1)
3339 ret = rbd_dev_v1_header_info(rbd_dev);
3341 ret = rbd_dev_v2_header_info(rbd_dev);
3343 /* If it's a mapped snapshot, validate its EXISTS flag */
3345 rbd_exists_validate(rbd_dev);
3346 mutex_unlock(&ctl_mutex);
3347 if (mapping_size != rbd_dev->mapping.size) {
3350 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3351 dout("setting size to %llu sectors", (unsigned long long)size);
3352 set_capacity(rbd_dev->disk, size);
3353 revalidate_disk(rbd_dev->disk);
3359 static int rbd_init_disk(struct rbd_device *rbd_dev)
3361 struct gendisk *disk;
3362 struct request_queue *q;
3365 /* create gendisk info */
3366 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3370 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3372 disk->major = rbd_dev->major;
3373 disk->first_minor = 0;
3374 disk->fops = &rbd_bd_ops;
3375 disk->private_data = rbd_dev;
3377 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3381 /* We use the default size, but let's be explicit about it. */
3382 blk_queue_physical_block_size(q, SECTOR_SIZE);
3384 /* set io sizes to object size */
3385 segment_size = rbd_obj_bytes(&rbd_dev->header);
3386 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3387 blk_queue_max_segment_size(q, segment_size);
3388 blk_queue_io_min(q, segment_size);
3389 blk_queue_io_opt(q, segment_size);
3391 blk_queue_merge_bvec(q, rbd_merge_bvec);
3394 q->queuedata = rbd_dev;
3396 rbd_dev->disk = disk;
3409 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3411 return container_of(dev, struct rbd_device, dev);
3414 static ssize_t rbd_size_show(struct device *dev,
3415 struct device_attribute *attr, char *buf)
3417 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3419 return sprintf(buf, "%llu\n",
3420 (unsigned long long)rbd_dev->mapping.size);
3424 * Note this shows the features for whatever's mapped, which is not
3425 * necessarily the base image.
3427 static ssize_t rbd_features_show(struct device *dev,
3428 struct device_attribute *attr, char *buf)
3430 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3432 return sprintf(buf, "0x%016llx\n",
3433 (unsigned long long)rbd_dev->mapping.features);
3436 static ssize_t rbd_major_show(struct device *dev,
3437 struct device_attribute *attr, char *buf)
3439 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3442 return sprintf(buf, "%d\n", rbd_dev->major);
3444 return sprintf(buf, "(none)\n");
3448 static ssize_t rbd_client_id_show(struct device *dev,
3449 struct device_attribute *attr, char *buf)
3451 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3453 return sprintf(buf, "client%lld\n",
3454 ceph_client_id(rbd_dev->rbd_client->client));
3457 static ssize_t rbd_pool_show(struct device *dev,
3458 struct device_attribute *attr, char *buf)
3460 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3462 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3465 static ssize_t rbd_pool_id_show(struct device *dev,
3466 struct device_attribute *attr, char *buf)
3468 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3470 return sprintf(buf, "%llu\n",
3471 (unsigned long long) rbd_dev->spec->pool_id);
3474 static ssize_t rbd_name_show(struct device *dev,
3475 struct device_attribute *attr, char *buf)
3477 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3479 if (rbd_dev->spec->image_name)
3480 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3482 return sprintf(buf, "(unknown)\n");
3485 static ssize_t rbd_image_id_show(struct device *dev,
3486 struct device_attribute *attr, char *buf)
3488 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3490 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3494 * Shows the name of the currently-mapped snapshot (or
3495 * RBD_SNAP_HEAD_NAME for the base image).
3497 static ssize_t rbd_snap_show(struct device *dev,
3498 struct device_attribute *attr,
3501 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3503 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3507 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3508 * for the parent image. If there is no parent, simply shows
3509 * "(no parent image)".
3511 static ssize_t rbd_parent_show(struct device *dev,
3512 struct device_attribute *attr,
3515 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3516 struct rbd_spec *spec = rbd_dev->parent_spec;
3521 return sprintf(buf, "(no parent image)\n");
3523 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3524 (unsigned long long) spec->pool_id, spec->pool_name);
3529 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3530 spec->image_name ? spec->image_name : "(unknown)");
3535 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3536 (unsigned long long) spec->snap_id, spec->snap_name);
3541 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3546 return (ssize_t) (bufp - buf);
3549 static ssize_t rbd_image_refresh(struct device *dev,
3550 struct device_attribute *attr,
3554 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3557 ret = rbd_dev_refresh(rbd_dev);
3559 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3561 return ret < 0 ? ret : size;
3564 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3565 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3566 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3567 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3568 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3569 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3570 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3571 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3572 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3573 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3574 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3576 static struct attribute *rbd_attrs[] = {
3577 &dev_attr_size.attr,
3578 &dev_attr_features.attr,
3579 &dev_attr_major.attr,
3580 &dev_attr_client_id.attr,
3581 &dev_attr_pool.attr,
3582 &dev_attr_pool_id.attr,
3583 &dev_attr_name.attr,
3584 &dev_attr_image_id.attr,
3585 &dev_attr_current_snap.attr,
3586 &dev_attr_parent.attr,
3587 &dev_attr_refresh.attr,
3591 static struct attribute_group rbd_attr_group = {
3595 static const struct attribute_group *rbd_attr_groups[] = {
3600 static void rbd_sysfs_dev_release(struct device *dev)
3604 static struct device_type rbd_device_type = {
3606 .groups = rbd_attr_groups,
3607 .release = rbd_sysfs_dev_release,
3610 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3612 kref_get(&spec->kref);
3617 static void rbd_spec_free(struct kref *kref);
3618 static void rbd_spec_put(struct rbd_spec *spec)
3621 kref_put(&spec->kref, rbd_spec_free);
3624 static struct rbd_spec *rbd_spec_alloc(void)
3626 struct rbd_spec *spec;
3628 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3631 kref_init(&spec->kref);
3636 static void rbd_spec_free(struct kref *kref)
3638 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3640 kfree(spec->pool_name);
3641 kfree(spec->image_id);
3642 kfree(spec->image_name);
3643 kfree(spec->snap_name);
3647 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3648 struct rbd_spec *spec)
3650 struct rbd_device *rbd_dev;
3652 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3656 spin_lock_init(&rbd_dev->lock);
3658 atomic_set(&rbd_dev->parent_ref, 0);
3659 INIT_LIST_HEAD(&rbd_dev->node);
3660 init_rwsem(&rbd_dev->header_rwsem);
3662 rbd_dev->spec = spec;
3663 rbd_dev->rbd_client = rbdc;
3665 /* Initialize the layout used for all rbd requests */
3667 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3668 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3669 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3670 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3675 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3677 rbd_put_client(rbd_dev->rbd_client);
3678 rbd_spec_put(rbd_dev->spec);
3683 * Get the size and object order for an image snapshot, or if
3684 * snap_id is CEPH_NOSNAP, gets this information for the base
3687 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3688 u8 *order, u64 *snap_size)
3690 __le64 snapid = cpu_to_le64(snap_id);
3695 } __attribute__ ((packed)) size_buf = { 0 };
3697 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3699 &snapid, sizeof (snapid),
3700 &size_buf, sizeof (size_buf));
3701 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3704 if (ret < sizeof (size_buf))
3708 *order = size_buf.order;
3709 *snap_size = le64_to_cpu(size_buf.size);
3711 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3712 (unsigned long long)snap_id, (unsigned int)*order,
3713 (unsigned long long)*snap_size);
3718 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3720 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3721 &rbd_dev->header.obj_order,
3722 &rbd_dev->header.image_size);
3725 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3731 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3735 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3736 "rbd", "get_object_prefix", NULL, 0,
3737 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3738 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3743 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3744 p + ret, NULL, GFP_NOIO);
3747 if (IS_ERR(rbd_dev->header.object_prefix)) {
3748 ret = PTR_ERR(rbd_dev->header.object_prefix);
3749 rbd_dev->header.object_prefix = NULL;
3751 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3759 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3762 __le64 snapid = cpu_to_le64(snap_id);
3766 } __attribute__ ((packed)) features_buf = { 0 };
3770 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3771 "rbd", "get_features",
3772 &snapid, sizeof (snapid),
3773 &features_buf, sizeof (features_buf));
3774 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3777 if (ret < sizeof (features_buf))
3780 incompat = le64_to_cpu(features_buf.incompat);
3781 if (incompat & ~RBD_FEATURES_SUPPORTED)
3784 *snap_features = le64_to_cpu(features_buf.features);
3786 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3787 (unsigned long long)snap_id,
3788 (unsigned long long)*snap_features,
3789 (unsigned long long)le64_to_cpu(features_buf.incompat));
3794 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3796 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3797 &rbd_dev->header.features);
3800 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3802 struct rbd_spec *parent_spec;
3804 void *reply_buf = NULL;
3813 parent_spec = rbd_spec_alloc();
3817 size = sizeof (__le64) + /* pool_id */
3818 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3819 sizeof (__le64) + /* snap_id */
3820 sizeof (__le64); /* overlap */
3821 reply_buf = kmalloc(size, GFP_KERNEL);
3827 snapid = cpu_to_le64(CEPH_NOSNAP);
3828 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3829 "rbd", "get_parent",
3830 &snapid, sizeof (snapid),
3832 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3837 end = reply_buf + ret;
3839 ceph_decode_64_safe(&p, end, pool_id, out_err);
3840 if (pool_id == CEPH_NOPOOL) {
3842 * Either the parent never existed, or we have
3843 * record of it but the image got flattened so it no
3844 * longer has a parent. When the parent of a
3845 * layered image disappears we immediately set the
3846 * overlap to 0. The effect of this is that all new
3847 * requests will be treated as if the image had no
3850 if (rbd_dev->parent_overlap) {
3851 rbd_dev->parent_overlap = 0;
3853 rbd_dev_parent_put(rbd_dev);
3854 pr_info("%s: clone image has been flattened\n",
3855 rbd_dev->disk->disk_name);
3858 goto out; /* No parent? No problem. */
3861 /* The ceph file layout needs to fit pool id in 32 bits */
3864 if (pool_id > (u64)U32_MAX) {
3865 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3866 (unsigned long long)pool_id, U32_MAX);
3869 parent_spec->pool_id = pool_id;
3871 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3872 if (IS_ERR(image_id)) {
3873 ret = PTR_ERR(image_id);
3876 parent_spec->image_id = image_id;
3877 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3878 ceph_decode_64_safe(&p, end, overlap, out_err);
3881 rbd_spec_put(rbd_dev->parent_spec);
3882 rbd_dev->parent_spec = parent_spec;
3883 parent_spec = NULL; /* rbd_dev now owns this */
3884 rbd_dev->parent_overlap = overlap;
3886 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3892 rbd_spec_put(parent_spec);
3897 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3901 __le64 stripe_count;
3902 } __attribute__ ((packed)) striping_info_buf = { 0 };
3903 size_t size = sizeof (striping_info_buf);
3910 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3911 "rbd", "get_stripe_unit_count", NULL, 0,
3912 (char *)&striping_info_buf, size);
3913 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3920 * We don't actually support the "fancy striping" feature
3921 * (STRIPINGV2) yet, but if the striping sizes are the
3922 * defaults the behavior is the same as before. So find
3923 * out, and only fail if the image has non-default values.
3926 obj_size = (u64)1 << rbd_dev->header.obj_order;
3927 p = &striping_info_buf;
3928 stripe_unit = ceph_decode_64(&p);
3929 if (stripe_unit != obj_size) {
3930 rbd_warn(rbd_dev, "unsupported stripe unit "
3931 "(got %llu want %llu)",
3932 stripe_unit, obj_size);
3935 stripe_count = ceph_decode_64(&p);
3936 if (stripe_count != 1) {
3937 rbd_warn(rbd_dev, "unsupported stripe count "
3938 "(got %llu want 1)", stripe_count);
3941 rbd_dev->header.stripe_unit = stripe_unit;
3942 rbd_dev->header.stripe_count = stripe_count;
3947 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3949 size_t image_id_size;
3954 void *reply_buf = NULL;
3956 char *image_name = NULL;
3959 rbd_assert(!rbd_dev->spec->image_name);
3961 len = strlen(rbd_dev->spec->image_id);
3962 image_id_size = sizeof (__le32) + len;
3963 image_id = kmalloc(image_id_size, GFP_KERNEL);
3968 end = image_id + image_id_size;
3969 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3971 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3972 reply_buf = kmalloc(size, GFP_KERNEL);
3976 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3977 "rbd", "dir_get_name",
3978 image_id, image_id_size,
3983 end = reply_buf + ret;
3985 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3986 if (IS_ERR(image_name))
3989 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3997 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3999 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4000 const char *snap_name;
4003 /* Skip over names until we find the one we are looking for */
4005 snap_name = rbd_dev->header.snap_names;
4006 while (which < snapc->num_snaps) {
4007 if (!strcmp(name, snap_name))
4008 return snapc->snaps[which];
4009 snap_name += strlen(snap_name) + 1;
4015 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4017 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4022 for (which = 0; !found && which < snapc->num_snaps; which++) {
4023 const char *snap_name;
4025 snap_id = snapc->snaps[which];
4026 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4027 if (IS_ERR(snap_name))
4029 found = !strcmp(name, snap_name);
4032 return found ? snap_id : CEPH_NOSNAP;
4036 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4037 * no snapshot by that name is found, or if an error occurs.
4039 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4041 if (rbd_dev->image_format == 1)
4042 return rbd_v1_snap_id_by_name(rbd_dev, name);
4044 return rbd_v2_snap_id_by_name(rbd_dev, name);
4048 * When an rbd image has a parent image, it is identified by the
4049 * pool, image, and snapshot ids (not names). This function fills
4050 * in the names for those ids. (It's OK if we can't figure out the
4051 * name for an image id, but the pool and snapshot ids should always
4052 * exist and have names.) All names in an rbd spec are dynamically
4055 * When an image being mapped (not a parent) is probed, we have the
4056 * pool name and pool id, image name and image id, and the snapshot
4057 * name. The only thing we're missing is the snapshot id.
4059 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4061 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4062 struct rbd_spec *spec = rbd_dev->spec;
4063 const char *pool_name;
4064 const char *image_name;
4065 const char *snap_name;
4069 * An image being mapped will have the pool name (etc.), but
4070 * we need to look up the snapshot id.
4072 if (spec->pool_name) {
4073 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4076 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4077 if (snap_id == CEPH_NOSNAP)
4079 spec->snap_id = snap_id;
4081 spec->snap_id = CEPH_NOSNAP;
4087 /* Get the pool name; we have to make our own copy of this */
4089 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4091 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4094 pool_name = kstrdup(pool_name, GFP_KERNEL);
4098 /* Fetch the image name; tolerate failure here */
4100 image_name = rbd_dev_image_name(rbd_dev);
4102 rbd_warn(rbd_dev, "unable to get image name");
4104 /* Look up the snapshot name, and make a copy */
4106 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4112 spec->pool_name = pool_name;
4113 spec->image_name = image_name;
4114 spec->snap_name = snap_name;
4124 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4133 struct ceph_snap_context *snapc;
4137 * We'll need room for the seq value (maximum snapshot id),
4138 * snapshot count, and array of that many snapshot ids.
4139 * For now we have a fixed upper limit on the number we're
4140 * prepared to receive.
4142 size = sizeof (__le64) + sizeof (__le32) +
4143 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4144 reply_buf = kzalloc(size, GFP_KERNEL);
4148 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4149 "rbd", "get_snapcontext", NULL, 0,
4151 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4156 end = reply_buf + ret;
4158 ceph_decode_64_safe(&p, end, seq, out);
4159 ceph_decode_32_safe(&p, end, snap_count, out);
4162 * Make sure the reported number of snapshot ids wouldn't go
4163 * beyond the end of our buffer. But before checking that,
4164 * make sure the computed size of the snapshot context we
4165 * allocate is representable in a size_t.
4167 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4172 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4176 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4182 for (i = 0; i < snap_count; i++)
4183 snapc->snaps[i] = ceph_decode_64(&p);
4185 ceph_put_snap_context(rbd_dev->header.snapc);
4186 rbd_dev->header.snapc = snapc;
4188 dout(" snap context seq = %llu, snap_count = %u\n",
4189 (unsigned long long)seq, (unsigned int)snap_count);
4196 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4207 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4208 reply_buf = kmalloc(size, GFP_KERNEL);
4210 return ERR_PTR(-ENOMEM);
4212 snapid = cpu_to_le64(snap_id);
4213 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4214 "rbd", "get_snapshot_name",
4215 &snapid, sizeof (snapid),
4217 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4219 snap_name = ERR_PTR(ret);
4224 end = reply_buf + ret;
4225 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4226 if (IS_ERR(snap_name))
4229 dout(" snap_id 0x%016llx snap_name = %s\n",
4230 (unsigned long long)snap_id, snap_name);
4237 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4239 bool first_time = rbd_dev->header.object_prefix == NULL;
4242 down_write(&rbd_dev->header_rwsem);
4245 ret = rbd_dev_v2_header_onetime(rbd_dev);
4251 * If the image supports layering, get the parent info. We
4252 * need to probe the first time regardless. Thereafter we
4253 * only need to if there's a parent, to see if it has
4254 * disappeared due to the mapped image getting flattened.
4256 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4257 (first_time || rbd_dev->parent_spec)) {
4260 ret = rbd_dev_v2_parent_info(rbd_dev);
4265 * Print a warning if this is the initial probe and
4266 * the image has a parent. Don't print it if the
4267 * image now being probed is itself a parent. We
4268 * can tell at this point because we won't know its
4269 * pool name yet (just its pool id).
4271 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4272 if (first_time && warn)
4273 rbd_warn(rbd_dev, "WARNING: kernel layering "
4274 "is EXPERIMENTAL!");
4277 ret = rbd_dev_v2_image_size(rbd_dev);
4281 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4282 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4283 rbd_dev->mapping.size = rbd_dev->header.image_size;
4285 ret = rbd_dev_v2_snap_context(rbd_dev);
4286 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4288 up_write(&rbd_dev->header_rwsem);
4293 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4298 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4300 dev = &rbd_dev->dev;
4301 dev->bus = &rbd_bus_type;
4302 dev->type = &rbd_device_type;
4303 dev->parent = &rbd_root_dev;
4304 dev->release = rbd_dev_device_release;
4305 dev_set_name(dev, "%d", rbd_dev->dev_id);
4306 ret = device_register(dev);
4308 mutex_unlock(&ctl_mutex);
4313 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4315 device_unregister(&rbd_dev->dev);
4318 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4321 * Get a unique rbd identifier for the given new rbd_dev, and add
4322 * the rbd_dev to the global list. The minimum rbd id is 1.
4324 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4326 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4328 spin_lock(&rbd_dev_list_lock);
4329 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4330 spin_unlock(&rbd_dev_list_lock);
4331 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4332 (unsigned long long) rbd_dev->dev_id);
4336 * Remove an rbd_dev from the global list, and record that its
4337 * identifier is no longer in use.
4339 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4341 struct list_head *tmp;
4342 int rbd_id = rbd_dev->dev_id;
4345 rbd_assert(rbd_id > 0);
4347 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4348 (unsigned long long) rbd_dev->dev_id);
4349 spin_lock(&rbd_dev_list_lock);
4350 list_del_init(&rbd_dev->node);
4353 * If the id being "put" is not the current maximum, there
4354 * is nothing special we need to do.
4356 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4357 spin_unlock(&rbd_dev_list_lock);
4362 * We need to update the current maximum id. Search the
4363 * list to find out what it is. We're more likely to find
4364 * the maximum at the end, so search the list backward.
4367 list_for_each_prev(tmp, &rbd_dev_list) {
4368 struct rbd_device *rbd_dev;
4370 rbd_dev = list_entry(tmp, struct rbd_device, node);
4371 if (rbd_dev->dev_id > max_id)
4372 max_id = rbd_dev->dev_id;
4374 spin_unlock(&rbd_dev_list_lock);
4377 * The max id could have been updated by rbd_dev_id_get(), in
4378 * which case it now accurately reflects the new maximum.
4379 * Be careful not to overwrite the maximum value in that
4382 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4383 dout(" max dev id has been reset\n");
4387 * Skips over white space at *buf, and updates *buf to point to the
4388 * first found non-space character (if any). Returns the length of
4389 * the token (string of non-white space characters) found. Note
4390 * that *buf must be terminated with '\0'.
4392 static inline size_t next_token(const char **buf)
4395 * These are the characters that produce nonzero for
4396 * isspace() in the "C" and "POSIX" locales.
4398 const char *spaces = " \f\n\r\t\v";
4400 *buf += strspn(*buf, spaces); /* Find start of token */
4402 return strcspn(*buf, spaces); /* Return token length */
4406 * Finds the next token in *buf, and if the provided token buffer is
4407 * big enough, copies the found token into it. The result, if
4408 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4409 * must be terminated with '\0' on entry.
4411 * Returns the length of the token found (not including the '\0').
4412 * Return value will be 0 if no token is found, and it will be >=
4413 * token_size if the token would not fit.
4415 * The *buf pointer will be updated to point beyond the end of the
4416 * found token. Note that this occurs even if the token buffer is
4417 * too small to hold it.
4419 static inline size_t copy_token(const char **buf,
4425 len = next_token(buf);
4426 if (len < token_size) {
4427 memcpy(token, *buf, len);
4428 *(token + len) = '\0';
4436 * Finds the next token in *buf, dynamically allocates a buffer big
4437 * enough to hold a copy of it, and copies the token into the new
4438 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4439 * that a duplicate buffer is created even for a zero-length token.
4441 * Returns a pointer to the newly-allocated duplicate, or a null
4442 * pointer if memory for the duplicate was not available. If
4443 * the lenp argument is a non-null pointer, the length of the token
4444 * (not including the '\0') is returned in *lenp.
4446 * If successful, the *buf pointer will be updated to point beyond
4447 * the end of the found token.
4449 * Note: uses GFP_KERNEL for allocation.
4451 static inline char *dup_token(const char **buf, size_t *lenp)
4456 len = next_token(buf);
4457 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4460 *(dup + len) = '\0';
4470 * Parse the options provided for an "rbd add" (i.e., rbd image
4471 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4472 * and the data written is passed here via a NUL-terminated buffer.
4473 * Returns 0 if successful or an error code otherwise.
4475 * The information extracted from these options is recorded in
4476 * the other parameters which return dynamically-allocated
4479 * The address of a pointer that will refer to a ceph options
4480 * structure. Caller must release the returned pointer using
4481 * ceph_destroy_options() when it is no longer needed.
4483 * Address of an rbd options pointer. Fully initialized by
4484 * this function; caller must release with kfree().
4486 * Address of an rbd image specification pointer. Fully
4487 * initialized by this function based on parsed options.
4488 * Caller must release with rbd_spec_put().
4490 * The options passed take this form:
4491 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4494 * A comma-separated list of one or more monitor addresses.
4495 * A monitor address is an ip address, optionally followed
4496 * by a port number (separated by a colon).
4497 * I.e.: ip1[:port1][,ip2[:port2]...]
4499 * A comma-separated list of ceph and/or rbd options.
4501 * The name of the rados pool containing the rbd image.
4503 * The name of the image in that pool to map.
4505 * An optional snapshot id. If provided, the mapping will
4506 * present data from the image at the time that snapshot was
4507 * created. The image head is used if no snapshot id is
4508 * provided. Snapshot mappings are always read-only.
4510 static int rbd_add_parse_args(const char *buf,
4511 struct ceph_options **ceph_opts,
4512 struct rbd_options **opts,
4513 struct rbd_spec **rbd_spec)
4517 const char *mon_addrs;
4519 size_t mon_addrs_size;
4520 struct rbd_spec *spec = NULL;
4521 struct rbd_options *rbd_opts = NULL;
4522 struct ceph_options *copts;
4525 /* The first four tokens are required */
4527 len = next_token(&buf);
4529 rbd_warn(NULL, "no monitor address(es) provided");
4533 mon_addrs_size = len + 1;
4537 options = dup_token(&buf, NULL);
4541 rbd_warn(NULL, "no options provided");
4545 spec = rbd_spec_alloc();
4549 spec->pool_name = dup_token(&buf, NULL);
4550 if (!spec->pool_name)
4552 if (!*spec->pool_name) {
4553 rbd_warn(NULL, "no pool name provided");
4557 spec->image_name = dup_token(&buf, NULL);
4558 if (!spec->image_name)
4560 if (!*spec->image_name) {
4561 rbd_warn(NULL, "no image name provided");
4566 * Snapshot name is optional; default is to use "-"
4567 * (indicating the head/no snapshot).
4569 len = next_token(&buf);
4571 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4572 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4573 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4574 ret = -ENAMETOOLONG;
4577 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4580 *(snap_name + len) = '\0';
4581 spec->snap_name = snap_name;
4583 /* Initialize all rbd options to the defaults */
4585 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4589 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4591 copts = ceph_parse_options(options, mon_addrs,
4592 mon_addrs + mon_addrs_size - 1,
4593 parse_rbd_opts_token, rbd_opts);
4594 if (IS_ERR(copts)) {
4595 ret = PTR_ERR(copts);
4616 * An rbd format 2 image has a unique identifier, distinct from the
4617 * name given to it by the user. Internally, that identifier is
4618 * what's used to specify the names of objects related to the image.
4620 * A special "rbd id" object is used to map an rbd image name to its
4621 * id. If that object doesn't exist, then there is no v2 rbd image
4622 * with the supplied name.
4624 * This function will record the given rbd_dev's image_id field if
4625 * it can be determined, and in that case will return 0. If any
4626 * errors occur a negative errno will be returned and the rbd_dev's
4627 * image_id field will be unchanged (and should be NULL).
4629 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4638 * When probing a parent image, the image id is already
4639 * known (and the image name likely is not). There's no
4640 * need to fetch the image id again in this case. We
4641 * do still need to set the image format though.
4643 if (rbd_dev->spec->image_id) {
4644 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4650 * First, see if the format 2 image id file exists, and if
4651 * so, get the image's persistent id from it.
4653 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4654 object_name = kmalloc(size, GFP_NOIO);
4657 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4658 dout("rbd id object name is %s\n", object_name);
4660 /* Response will be an encoded string, which includes a length */
4662 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4663 response = kzalloc(size, GFP_NOIO);
4669 /* If it doesn't exist we'll assume it's a format 1 image */
4671 ret = rbd_obj_method_sync(rbd_dev, object_name,
4672 "rbd", "get_id", NULL, 0,
4673 response, RBD_IMAGE_ID_LEN_MAX);
4674 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4675 if (ret == -ENOENT) {
4676 image_id = kstrdup("", GFP_KERNEL);
4677 ret = image_id ? 0 : -ENOMEM;
4679 rbd_dev->image_format = 1;
4680 } else if (ret > sizeof (__le32)) {
4683 image_id = ceph_extract_encoded_string(&p, p + ret,
4685 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4687 rbd_dev->image_format = 2;
4693 rbd_dev->spec->image_id = image_id;
4694 dout("image_id is %s\n", image_id);
4704 * Undo whatever state changes are made by v1 or v2 header info
4707 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4709 struct rbd_image_header *header;
4711 /* Drop parent reference unless it's already been done (or none) */
4713 if (rbd_dev->parent_overlap)
4714 rbd_dev_parent_put(rbd_dev);
4716 /* Free dynamic fields from the header, then zero it out */
4718 header = &rbd_dev->header;
4719 ceph_put_snap_context(header->snapc);
4720 kfree(header->snap_sizes);
4721 kfree(header->snap_names);
4722 kfree(header->object_prefix);
4723 memset(header, 0, sizeof (*header));
4726 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4730 ret = rbd_dev_v2_object_prefix(rbd_dev);
4735 * Get the and check features for the image. Currently the
4736 * features are assumed to never change.
4738 ret = rbd_dev_v2_features(rbd_dev);
4742 /* If the image supports fancy striping, get its parameters */
4744 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4745 ret = rbd_dev_v2_striping_info(rbd_dev);
4749 /* No support for crypto and compression type format 2 images */
4753 rbd_dev->header.features = 0;
4754 kfree(rbd_dev->header.object_prefix);
4755 rbd_dev->header.object_prefix = NULL;
4760 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4762 struct rbd_device *parent = NULL;
4763 struct rbd_spec *parent_spec;
4764 struct rbd_client *rbdc;
4767 if (!rbd_dev->parent_spec)
4770 * We need to pass a reference to the client and the parent
4771 * spec when creating the parent rbd_dev. Images related by
4772 * parent/child relationships always share both.
4774 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4775 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4778 parent = rbd_dev_create(rbdc, parent_spec);
4782 ret = rbd_dev_image_probe(parent, false);
4785 rbd_dev->parent = parent;
4786 atomic_set(&rbd_dev->parent_ref, 1);
4791 rbd_dev_unparent(rbd_dev);
4792 kfree(rbd_dev->header_name);
4793 rbd_dev_destroy(parent);
4795 rbd_put_client(rbdc);
4796 rbd_spec_put(parent_spec);
4802 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4806 /* generate unique id: find highest unique id, add one */
4807 rbd_dev_id_get(rbd_dev);
4809 /* Fill in the device name, now that we have its id. */
4810 BUILD_BUG_ON(DEV_NAME_LEN
4811 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4812 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4814 /* Get our block major device number. */
4816 ret = register_blkdev(0, rbd_dev->name);
4819 rbd_dev->major = ret;
4821 /* Set up the blkdev mapping. */
4823 ret = rbd_init_disk(rbd_dev);
4825 goto err_out_blkdev;
4827 ret = rbd_dev_mapping_set(rbd_dev);
4830 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4832 ret = rbd_bus_add_dev(rbd_dev);
4834 goto err_out_mapping;
4836 /* Everything's ready. Announce the disk to the world. */
4838 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4839 add_disk(rbd_dev->disk);
4841 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4842 (unsigned long long) rbd_dev->mapping.size);
4847 rbd_dev_mapping_clear(rbd_dev);
4849 rbd_free_disk(rbd_dev);
4851 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4853 rbd_dev_id_put(rbd_dev);
4854 rbd_dev_mapping_clear(rbd_dev);
4859 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4861 struct rbd_spec *spec = rbd_dev->spec;
4864 /* Record the header object name for this rbd image. */
4866 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4868 if (rbd_dev->image_format == 1)
4869 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4871 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4873 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4874 if (!rbd_dev->header_name)
4877 if (rbd_dev->image_format == 1)
4878 sprintf(rbd_dev->header_name, "%s%s",
4879 spec->image_name, RBD_SUFFIX);
4881 sprintf(rbd_dev->header_name, "%s%s",
4882 RBD_HEADER_PREFIX, spec->image_id);
4886 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4888 rbd_dev_unprobe(rbd_dev);
4889 kfree(rbd_dev->header_name);
4890 rbd_dev->header_name = NULL;
4891 rbd_dev->image_format = 0;
4892 kfree(rbd_dev->spec->image_id);
4893 rbd_dev->spec->image_id = NULL;
4895 rbd_dev_destroy(rbd_dev);
4899 * Probe for the existence of the header object for the given rbd
4900 * device. If this image is the one being mapped (i.e., not a
4901 * parent), initiate a watch on its header object before using that
4902 * object to get detailed information about the rbd image.
4904 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4910 * Get the id from the image id object. Unless there's an
4911 * error, rbd_dev->spec->image_id will be filled in with
4912 * a dynamically-allocated string, and rbd_dev->image_format
4913 * will be set to either 1 or 2.
4915 ret = rbd_dev_image_id(rbd_dev);
4918 rbd_assert(rbd_dev->spec->image_id);
4919 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4921 ret = rbd_dev_header_name(rbd_dev);
4923 goto err_out_format;
4926 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4928 goto out_header_name;
4931 if (rbd_dev->image_format == 1)
4932 ret = rbd_dev_v1_header_info(rbd_dev);
4934 ret = rbd_dev_v2_header_info(rbd_dev);
4938 ret = rbd_dev_spec_update(rbd_dev);
4942 ret = rbd_dev_probe_parent(rbd_dev);
4946 dout("discovered format %u image, header name is %s\n",
4947 rbd_dev->image_format, rbd_dev->header_name);
4951 rbd_dev_unprobe(rbd_dev);
4954 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4956 rbd_warn(rbd_dev, "unable to tear down "
4957 "watch request (%d)\n", tmp);
4960 kfree(rbd_dev->header_name);
4961 rbd_dev->header_name = NULL;
4963 rbd_dev->image_format = 0;
4964 kfree(rbd_dev->spec->image_id);
4965 rbd_dev->spec->image_id = NULL;
4967 dout("probe failed, returning %d\n", ret);
4972 static ssize_t rbd_add(struct bus_type *bus,
4976 struct rbd_device *rbd_dev = NULL;
4977 struct ceph_options *ceph_opts = NULL;
4978 struct rbd_options *rbd_opts = NULL;
4979 struct rbd_spec *spec = NULL;
4980 struct rbd_client *rbdc;
4981 struct ceph_osd_client *osdc;
4985 if (!try_module_get(THIS_MODULE))
4988 /* parse add command */
4989 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4991 goto err_out_module;
4992 read_only = rbd_opts->read_only;
4994 rbd_opts = NULL; /* done with this */
4996 rbdc = rbd_get_client(ceph_opts);
5003 osdc = &rbdc->client->osdc;
5004 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5006 goto err_out_client;
5007 spec->pool_id = (u64)rc;
5009 /* The ceph file layout needs to fit pool id in 32 bits */
5011 if (spec->pool_id > (u64)U32_MAX) {
5012 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5013 (unsigned long long)spec->pool_id, U32_MAX);
5015 goto err_out_client;
5018 rbd_dev = rbd_dev_create(rbdc, spec);
5020 goto err_out_client;
5021 rbdc = NULL; /* rbd_dev now owns this */
5022 spec = NULL; /* rbd_dev now owns this */
5024 rc = rbd_dev_image_probe(rbd_dev, true);
5026 goto err_out_rbd_dev;
5028 /* If we are mapping a snapshot it must be marked read-only */
5030 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5032 rbd_dev->mapping.read_only = read_only;
5034 rc = rbd_dev_device_setup(rbd_dev);
5036 rbd_dev_image_release(rbd_dev);
5037 goto err_out_module;
5043 rbd_dev_destroy(rbd_dev);
5045 rbd_put_client(rbdc);
5049 module_put(THIS_MODULE);
5051 dout("Error adding device %s\n", buf);
5056 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5058 struct list_head *tmp;
5059 struct rbd_device *rbd_dev;
5061 spin_lock(&rbd_dev_list_lock);
5062 list_for_each(tmp, &rbd_dev_list) {
5063 rbd_dev = list_entry(tmp, struct rbd_device, node);
5064 if (rbd_dev->dev_id == dev_id) {
5065 spin_unlock(&rbd_dev_list_lock);
5069 spin_unlock(&rbd_dev_list_lock);
5073 static void rbd_dev_device_release(struct device *dev)
5075 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5077 rbd_free_disk(rbd_dev);
5078 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5079 rbd_dev_mapping_clear(rbd_dev);
5080 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5082 rbd_dev_id_put(rbd_dev);
5083 rbd_dev_mapping_clear(rbd_dev);
5086 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5088 while (rbd_dev->parent) {
5089 struct rbd_device *first = rbd_dev;
5090 struct rbd_device *second = first->parent;
5091 struct rbd_device *third;
5094 * Follow to the parent with no grandparent and
5097 while (second && (third = second->parent)) {
5102 rbd_dev_image_release(second);
5103 first->parent = NULL;
5104 first->parent_overlap = 0;
5106 rbd_assert(first->parent_spec);
5107 rbd_spec_put(first->parent_spec);
5108 first->parent_spec = NULL;
5112 static ssize_t rbd_remove(struct bus_type *bus,
5116 struct rbd_device *rbd_dev = NULL;
5121 ret = strict_strtoul(buf, 10, &ul);
5125 /* convert to int; abort if we lost anything in the conversion */
5126 target_id = (int) ul;
5127 if (target_id != ul)
5130 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5132 rbd_dev = __rbd_get_dev(target_id);
5138 spin_lock_irq(&rbd_dev->lock);
5139 if (rbd_dev->open_count)
5142 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5143 spin_unlock_irq(&rbd_dev->lock);
5146 rbd_bus_del_dev(rbd_dev);
5147 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5149 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5150 rbd_dev_image_release(rbd_dev);
5151 module_put(THIS_MODULE);
5154 mutex_unlock(&ctl_mutex);
5160 * create control files in sysfs
5163 static int rbd_sysfs_init(void)
5167 ret = device_register(&rbd_root_dev);
5171 ret = bus_register(&rbd_bus_type);
5173 device_unregister(&rbd_root_dev);
5178 static void rbd_sysfs_cleanup(void)
5180 bus_unregister(&rbd_bus_type);
5181 device_unregister(&rbd_root_dev);
5184 static int rbd_slab_init(void)
5186 rbd_assert(!rbd_img_request_cache);
5187 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5188 sizeof (struct rbd_img_request),
5189 __alignof__(struct rbd_img_request),
5191 if (!rbd_img_request_cache)
5194 rbd_assert(!rbd_obj_request_cache);
5195 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5196 sizeof (struct rbd_obj_request),
5197 __alignof__(struct rbd_obj_request),
5199 if (!rbd_obj_request_cache)
5202 rbd_assert(!rbd_segment_name_cache);
5203 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5204 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5205 if (rbd_segment_name_cache)
5208 if (rbd_obj_request_cache) {
5209 kmem_cache_destroy(rbd_obj_request_cache);
5210 rbd_obj_request_cache = NULL;
5213 kmem_cache_destroy(rbd_img_request_cache);
5214 rbd_img_request_cache = NULL;
5219 static void rbd_slab_exit(void)
5221 rbd_assert(rbd_segment_name_cache);
5222 kmem_cache_destroy(rbd_segment_name_cache);
5223 rbd_segment_name_cache = NULL;
5225 rbd_assert(rbd_obj_request_cache);
5226 kmem_cache_destroy(rbd_obj_request_cache);
5227 rbd_obj_request_cache = NULL;
5229 rbd_assert(rbd_img_request_cache);
5230 kmem_cache_destroy(rbd_img_request_cache);
5231 rbd_img_request_cache = NULL;
5234 static int __init rbd_init(void)
5238 if (!libceph_compatible(NULL)) {
5239 rbd_warn(NULL, "libceph incompatibility (quitting)");
5243 rc = rbd_slab_init();
5246 rc = rbd_sysfs_init();
5250 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5255 static void __exit rbd_exit(void)
5257 rbd_sysfs_cleanup();
5261 module_init(rbd_init);
5262 module_exit(rbd_exit);
5264 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5265 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5266 MODULE_DESCRIPTION("rados block device");
5268 /* following authorship retained from original osdblk.c */
5269 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5271 MODULE_LICENSE("GPL");