2 rbd.c -- Export ceph rados objects as a Linux block device
5 based on drivers/block/osdblk.c:
7 Copyright 2009 Red Hat, Inc.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation.
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with this program; see the file COPYING. If not, write to
20 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
24 For usage instructions, please refer to:
26 Documentation/ABI/testing/sysfs-bus-rbd
30 #include <linux/ceph/libceph.h>
31 #include <linux/ceph/osd_client.h>
32 #include <linux/ceph/mon_client.h>
33 #include <linux/ceph/decode.h>
34 #include <linux/parser.h>
36 #include <linux/kernel.h>
37 #include <linux/device.h>
38 #include <linux/module.h>
40 #include <linux/blkdev.h>
42 #include "rbd_types.h"
44 #define RBD_DEBUG /* Activate rbd_assert() calls */
47 * The basic unit of block I/O is a sector. It is interpreted in a
48 * number of contexts in Linux (blk, bio, genhd), but the default is
49 * universally 512 bytes. These symbols are just slightly more
50 * meaningful than the bare numbers they represent.
52 #define SECTOR_SHIFT 9
53 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
55 /* It might be useful to have this defined elsewhere too */
57 #define U64_MAX ((u64) (~0ULL))
59 #define RBD_DRV_NAME "rbd"
60 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
62 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
64 #define RBD_MAX_SNAP_NAME_LEN 32
65 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
66 #define RBD_MAX_OPT_LEN 1024
68 #define RBD_SNAP_HEAD_NAME "-"
70 #define RBD_IMAGE_ID_LEN_MAX 64
71 #define RBD_OBJ_PREFIX_LEN_MAX 64
74 * An RBD device name will be "rbd#", where the "rbd" comes from
75 * RBD_DRV_NAME above, and # is a unique integer identifier.
76 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
77 * enough to hold all possible device names.
79 #define DEV_NAME_LEN 32
80 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
82 #define RBD_READ_ONLY_DEFAULT false
85 * block device image metadata (in-memory version)
87 struct rbd_image_header {
88 /* These four fields never change for a given rbd image */
95 /* The remaining fields need to be updated occasionally */
97 struct ceph_snap_context *snapc;
109 * an instance of the client. multiple devices may share an rbd client.
112 struct ceph_client *client;
114 struct list_head node;
118 * a request completion status
120 struct rbd_req_status {
127 * a collection of requests
129 struct rbd_req_coll {
133 struct rbd_req_status status[0];
137 * a single io request
140 struct request *rq; /* blk layer request */
141 struct bio *bio; /* cloned bio */
142 struct page **pages; /* list of used pages */
145 struct rbd_req_coll *coll;
152 struct list_head node;
170 int dev_id; /* blkdev unique id */
172 int major; /* blkdev assigned major */
173 struct gendisk *disk; /* blkdev's gendisk and rq */
175 u32 image_format; /* Either 1 or 2 */
176 struct rbd_options rbd_opts;
177 struct rbd_client *rbd_client;
179 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
181 spinlock_t lock; /* queue lock */
183 struct rbd_image_header header;
187 size_t image_name_len;
192 struct ceph_osd_event *watch_event;
193 struct ceph_osd_request *watch_request;
195 /* protects updating the header */
196 struct rw_semaphore header_rwsem;
198 struct rbd_mapping mapping;
200 struct list_head node;
202 /* list of snapshots */
203 struct list_head snaps;
209 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
211 static LIST_HEAD(rbd_dev_list); /* devices */
212 static DEFINE_SPINLOCK(rbd_dev_list_lock);
214 static LIST_HEAD(rbd_client_list); /* clients */
215 static DEFINE_SPINLOCK(rbd_client_list_lock);
217 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
218 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev);
220 static void rbd_dev_release(struct device *dev);
221 static void __rbd_remove_snap_dev(struct rbd_snap *snap);
223 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
225 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
228 static struct bus_attribute rbd_bus_attrs[] = {
229 __ATTR(add, S_IWUSR, NULL, rbd_add),
230 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
234 static struct bus_type rbd_bus_type = {
236 .bus_attrs = rbd_bus_attrs,
239 static void rbd_root_dev_release(struct device *dev)
243 static struct device rbd_root_dev = {
245 .release = rbd_root_dev_release,
249 #define rbd_assert(expr) \
250 if (unlikely(!(expr))) { \
251 printk(KERN_ERR "\nAssertion failure in %s() " \
253 "\trbd_assert(%s);\n\n", \
254 __func__, __LINE__, #expr); \
257 #else /* !RBD_DEBUG */
258 # define rbd_assert(expr) ((void) 0)
259 #endif /* !RBD_DEBUG */
261 static struct device *rbd_get_dev(struct rbd_device *rbd_dev)
263 return get_device(&rbd_dev->dev);
266 static void rbd_put_dev(struct rbd_device *rbd_dev)
268 put_device(&rbd_dev->dev);
271 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver);
272 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver);
274 static int rbd_open(struct block_device *bdev, fmode_t mode)
276 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
278 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
281 rbd_get_dev(rbd_dev);
282 set_device_ro(bdev, rbd_dev->mapping.read_only);
287 static int rbd_release(struct gendisk *disk, fmode_t mode)
289 struct rbd_device *rbd_dev = disk->private_data;
291 rbd_put_dev(rbd_dev);
296 static const struct block_device_operations rbd_bd_ops = {
297 .owner = THIS_MODULE,
299 .release = rbd_release,
303 * Initialize an rbd client instance.
306 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
308 struct rbd_client *rbdc;
311 dout("rbd_client_create\n");
312 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
316 kref_init(&rbdc->kref);
317 INIT_LIST_HEAD(&rbdc->node);
319 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
321 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
322 if (IS_ERR(rbdc->client))
324 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
326 ret = ceph_open_session(rbdc->client);
330 spin_lock(&rbd_client_list_lock);
331 list_add_tail(&rbdc->node, &rbd_client_list);
332 spin_unlock(&rbd_client_list_lock);
334 mutex_unlock(&ctl_mutex);
336 dout("rbd_client_create created %p\n", rbdc);
340 ceph_destroy_client(rbdc->client);
342 mutex_unlock(&ctl_mutex);
346 ceph_destroy_options(ceph_opts);
351 * Find a ceph client with specific addr and configuration. If
352 * found, bump its reference count.
354 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
356 struct rbd_client *client_node;
359 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
362 spin_lock(&rbd_client_list_lock);
363 list_for_each_entry(client_node, &rbd_client_list, node) {
364 if (!ceph_compare_options(ceph_opts, client_node->client)) {
365 kref_get(&client_node->kref);
370 spin_unlock(&rbd_client_list_lock);
372 return found ? client_node : NULL;
382 /* string args above */
385 /* Boolean args above */
389 static match_table_t rbd_opts_tokens = {
391 /* string args above */
392 {Opt_read_only, "mapping.read_only"},
393 {Opt_read_only, "ro"}, /* Alternate spelling */
394 {Opt_read_write, "read_write"},
395 {Opt_read_write, "rw"}, /* Alternate spelling */
396 /* Boolean args above */
400 static int parse_rbd_opts_token(char *c, void *private)
402 struct rbd_options *rbd_opts = private;
403 substring_t argstr[MAX_OPT_ARGS];
404 int token, intval, ret;
406 token = match_token(c, rbd_opts_tokens, argstr);
410 if (token < Opt_last_int) {
411 ret = match_int(&argstr[0], &intval);
413 pr_err("bad mount option arg (not int) "
417 dout("got int token %d val %d\n", token, intval);
418 } else if (token > Opt_last_int && token < Opt_last_string) {
419 dout("got string token %d val %s\n", token,
421 } else if (token > Opt_last_string && token < Opt_last_bool) {
422 dout("got Boolean token %d\n", token);
424 dout("got token %d\n", token);
429 rbd_opts->read_only = true;
432 rbd_opts->read_only = false;
442 * Get a ceph client with specific addr and configuration, if one does
443 * not exist create it.
445 static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
446 size_t mon_addr_len, char *options)
448 struct rbd_options *rbd_opts = &rbd_dev->rbd_opts;
449 struct ceph_options *ceph_opts;
450 struct rbd_client *rbdc;
452 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
454 ceph_opts = ceph_parse_options(options, mon_addr,
455 mon_addr + mon_addr_len,
456 parse_rbd_opts_token, rbd_opts);
457 if (IS_ERR(ceph_opts))
458 return PTR_ERR(ceph_opts);
460 rbdc = rbd_client_find(ceph_opts);
462 /* using an existing client */
463 ceph_destroy_options(ceph_opts);
465 rbdc = rbd_client_create(ceph_opts);
467 return PTR_ERR(rbdc);
469 rbd_dev->rbd_client = rbdc;
475 * Destroy ceph client
477 * Caller must hold rbd_client_list_lock.
479 static void rbd_client_release(struct kref *kref)
481 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
483 dout("rbd_release_client %p\n", rbdc);
484 spin_lock(&rbd_client_list_lock);
485 list_del(&rbdc->node);
486 spin_unlock(&rbd_client_list_lock);
488 ceph_destroy_client(rbdc->client);
493 * Drop reference to ceph client node. If it's not referenced anymore, release
496 static void rbd_put_client(struct rbd_device *rbd_dev)
498 kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
499 rbd_dev->rbd_client = NULL;
503 * Destroy requests collection
505 static void rbd_coll_release(struct kref *kref)
507 struct rbd_req_coll *coll =
508 container_of(kref, struct rbd_req_coll, kref);
510 dout("rbd_coll_release %p\n", coll);
514 static bool rbd_image_format_valid(u32 image_format)
516 return image_format == 1 || image_format == 2;
519 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
524 /* The header has to start with the magic rbd header text */
525 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
529 * The size of a snapshot header has to fit in a size_t, and
530 * that limits the number of snapshots.
532 snap_count = le32_to_cpu(ondisk->snap_count);
533 size = SIZE_MAX - sizeof (struct ceph_snap_context);
534 if (snap_count > size / sizeof (__le64))
538 * Not only that, but the size of the entire the snapshot
539 * header must also be representable in a size_t.
541 size -= snap_count * sizeof (__le64);
542 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
549 * Create a new header structure, translate header format from the on-disk
552 static int rbd_header_from_disk(struct rbd_image_header *header,
553 struct rbd_image_header_ondisk *ondisk)
560 memset(header, 0, sizeof (*header));
562 snap_count = le32_to_cpu(ondisk->snap_count);
564 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
565 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
566 if (!header->object_prefix)
568 memcpy(header->object_prefix, ondisk->object_prefix, len);
569 header->object_prefix[len] = '\0';
572 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
574 /* Save a copy of the snapshot names */
576 if (snap_names_len > (u64) SIZE_MAX)
578 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
579 if (!header->snap_names)
582 * Note that rbd_dev_v1_header_read() guarantees
583 * the ondisk buffer we're working with has
584 * snap_names_len bytes beyond the end of the
585 * snapshot id array, this memcpy() is safe.
587 memcpy(header->snap_names, &ondisk->snaps[snap_count],
590 /* Record each snapshot's size */
592 size = snap_count * sizeof (*header->snap_sizes);
593 header->snap_sizes = kmalloc(size, GFP_KERNEL);
594 if (!header->snap_sizes)
596 for (i = 0; i < snap_count; i++)
597 header->snap_sizes[i] =
598 le64_to_cpu(ondisk->snaps[i].image_size);
600 WARN_ON(ondisk->snap_names_len);
601 header->snap_names = NULL;
602 header->snap_sizes = NULL;
605 header->features = 0; /* No features support in v1 images */
606 header->obj_order = ondisk->options.order;
607 header->crypt_type = ondisk->options.crypt_type;
608 header->comp_type = ondisk->options.comp_type;
610 /* Allocate and fill in the snapshot context */
612 header->image_size = le64_to_cpu(ondisk->image_size);
613 size = sizeof (struct ceph_snap_context);
614 size += snap_count * sizeof (header->snapc->snaps[0]);
615 header->snapc = kzalloc(size, GFP_KERNEL);
619 atomic_set(&header->snapc->nref, 1);
620 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
621 header->snapc->num_snaps = snap_count;
622 for (i = 0; i < snap_count; i++)
623 header->snapc->snaps[i] =
624 le64_to_cpu(ondisk->snaps[i].id);
629 kfree(header->snap_sizes);
630 header->snap_sizes = NULL;
631 kfree(header->snap_names);
632 header->snap_names = NULL;
633 kfree(header->object_prefix);
634 header->object_prefix = NULL;
639 static int snap_by_name(struct rbd_device *rbd_dev, const char *snap_name)
642 struct rbd_snap *snap;
644 list_for_each_entry(snap, &rbd_dev->snaps, node) {
645 if (!strcmp(snap_name, snap->name)) {
646 rbd_dev->mapping.snap_id = snap->id;
647 rbd_dev->mapping.size = snap->size;
648 rbd_dev->mapping.features = snap->features;
657 static int rbd_dev_set_mapping(struct rbd_device *rbd_dev, char *snap_name)
661 if (!memcmp(snap_name, RBD_SNAP_HEAD_NAME,
662 sizeof (RBD_SNAP_HEAD_NAME))) {
663 rbd_dev->mapping.snap_id = CEPH_NOSNAP;
664 rbd_dev->mapping.size = rbd_dev->header.image_size;
665 rbd_dev->mapping.features = rbd_dev->header.features;
666 rbd_dev->mapping.snap_exists = false;
667 rbd_dev->mapping.read_only = rbd_dev->rbd_opts.read_only;
670 ret = snap_by_name(rbd_dev, snap_name);
673 rbd_dev->mapping.snap_exists = true;
674 rbd_dev->mapping.read_only = true;
676 rbd_dev->mapping.snap_name = snap_name;
681 static void rbd_header_free(struct rbd_image_header *header)
683 kfree(header->object_prefix);
684 header->object_prefix = NULL;
685 kfree(header->snap_sizes);
686 header->snap_sizes = NULL;
687 kfree(header->snap_names);
688 header->snap_names = NULL;
689 ceph_put_snap_context(header->snapc);
690 header->snapc = NULL;
693 static char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
699 name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
702 segment = offset >> rbd_dev->header.obj_order;
703 ret = snprintf(name, RBD_MAX_SEG_NAME_LEN, "%s.%012llx",
704 rbd_dev->header.object_prefix, segment);
705 if (ret < 0 || ret >= RBD_MAX_SEG_NAME_LEN) {
706 pr_err("error formatting segment name for #%llu (%d)\n",
715 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
717 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
719 return offset & (segment_size - 1);
722 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
723 u64 offset, u64 length)
725 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
727 offset &= segment_size - 1;
729 rbd_assert(length <= U64_MAX - offset);
730 if (offset + length > segment_size)
731 length = segment_size - offset;
736 static int rbd_get_num_segments(struct rbd_image_header *header,
744 if (len - 1 > U64_MAX - ofs)
747 start_seg = ofs >> header->obj_order;
748 end_seg = (ofs + len - 1) >> header->obj_order;
750 return end_seg - start_seg + 1;
754 * returns the size of an object in the image
756 static u64 rbd_obj_bytes(struct rbd_image_header *header)
758 return 1 << header->obj_order;
765 static void bio_chain_put(struct bio *chain)
771 chain = chain->bi_next;
777 * zeros a bio chain, starting at specific offset
779 static void zero_bio_chain(struct bio *chain, int start_ofs)
788 bio_for_each_segment(bv, chain, i) {
789 if (pos + bv->bv_len > start_ofs) {
790 int remainder = max(start_ofs - pos, 0);
791 buf = bvec_kmap_irq(bv, &flags);
792 memset(buf + remainder, 0,
793 bv->bv_len - remainder);
794 bvec_kunmap_irq(buf, &flags);
799 chain = chain->bi_next;
804 * bio_chain_clone - clone a chain of bios up to a certain length.
805 * might return a bio_pair that will need to be released.
807 static struct bio *bio_chain_clone(struct bio **old, struct bio **next,
808 struct bio_pair **bp,
809 int len, gfp_t gfpmask)
811 struct bio *old_chain = *old;
812 struct bio *new_chain = NULL;
817 bio_pair_release(*bp);
821 while (old_chain && (total < len)) {
824 tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
827 gfpmask &= ~__GFP_WAIT; /* can't wait after the first */
829 if (total + old_chain->bi_size > len) {
833 * this split can only happen with a single paged bio,
834 * split_bio will BUG_ON if this is not the case
836 dout("bio_chain_clone split! total=%d remaining=%d"
838 total, len - total, old_chain->bi_size);
840 /* split the bio. We'll release it either in the next
841 call, or it will have to be released outside */
842 bp = bio_split(old_chain, (len - total) / SECTOR_SIZE);
846 __bio_clone(tmp, &bp->bio1);
850 __bio_clone(tmp, old_chain);
851 *next = old_chain->bi_next;
861 old_chain = old_chain->bi_next;
863 total += tmp->bi_size;
866 rbd_assert(total == len);
873 dout("bio_chain_clone with err\n");
874 bio_chain_put(new_chain);
879 * helpers for osd request op vectors.
881 static struct ceph_osd_req_op *rbd_create_rw_ops(int num_ops,
882 int opcode, u32 payload_len)
884 struct ceph_osd_req_op *ops;
886 ops = kzalloc(sizeof (*ops) * (num_ops + 1), GFP_NOIO);
893 * op extent offset and length will be set later on
894 * in calc_raw_layout()
896 ops[0].payload_len = payload_len;
901 static void rbd_destroy_ops(struct ceph_osd_req_op *ops)
906 static void rbd_coll_end_req_index(struct request *rq,
907 struct rbd_req_coll *coll,
911 struct request_queue *q;
914 dout("rbd_coll_end_req_index %p index %d ret %d len %llu\n",
915 coll, index, ret, (unsigned long long) len);
921 blk_end_request(rq, ret, len);
927 spin_lock_irq(q->queue_lock);
928 coll->status[index].done = 1;
929 coll->status[index].rc = ret;
930 coll->status[index].bytes = len;
931 max = min = coll->num_done;
932 while (max < coll->total && coll->status[max].done)
935 for (i = min; i<max; i++) {
936 __blk_end_request(rq, coll->status[i].rc,
937 coll->status[i].bytes);
939 kref_put(&coll->kref, rbd_coll_release);
941 spin_unlock_irq(q->queue_lock);
944 static void rbd_coll_end_req(struct rbd_request *req,
947 rbd_coll_end_req_index(req->rq, req->coll, req->coll_index, ret, len);
951 * Send ceph osd request
953 static int rbd_do_request(struct request *rq,
954 struct rbd_device *rbd_dev,
955 struct ceph_snap_context *snapc,
957 const char *object_name, u64 ofs, u64 len,
962 struct ceph_osd_req_op *ops,
963 struct rbd_req_coll *coll,
965 void (*rbd_cb)(struct ceph_osd_request *req,
966 struct ceph_msg *msg),
967 struct ceph_osd_request **linger_req,
970 struct ceph_osd_request *req;
971 struct ceph_file_layout *layout;
974 struct timespec mtime = CURRENT_TIME;
975 struct rbd_request *req_data;
976 struct ceph_osd_request_head *reqhead;
977 struct ceph_osd_client *osdc;
979 req_data = kzalloc(sizeof(*req_data), GFP_NOIO);
982 rbd_coll_end_req_index(rq, coll, coll_index,
988 req_data->coll = coll;
989 req_data->coll_index = coll_index;
992 dout("rbd_do_request object_name=%s ofs=%llu len=%llu\n", object_name,
993 (unsigned long long) ofs, (unsigned long long) len);
995 osdc = &rbd_dev->rbd_client->client->osdc;
996 req = ceph_osdc_alloc_request(osdc, flags, snapc, ops,
997 false, GFP_NOIO, pages, bio);
1003 req->r_callback = rbd_cb;
1006 req_data->bio = bio;
1007 req_data->pages = pages;
1008 req_data->len = len;
1010 req->r_priv = req_data;
1012 reqhead = req->r_request->front.iov_base;
1013 reqhead->snapid = cpu_to_le64(CEPH_NOSNAP);
1015 strncpy(req->r_oid, object_name, sizeof(req->r_oid));
1016 req->r_oid_len = strlen(req->r_oid);
1018 layout = &req->r_file_layout;
1019 memset(layout, 0, sizeof(*layout));
1020 layout->fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
1021 layout->fl_stripe_count = cpu_to_le32(1);
1022 layout->fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
1023 layout->fl_pg_pool = cpu_to_le32(rbd_dev->pool_id);
1024 ret = ceph_calc_raw_layout(osdc, layout, snapid, ofs, &len, &bno,
1026 rbd_assert(ret == 0);
1028 ceph_osdc_build_request(req, ofs, &len,
1032 req->r_oid, req->r_oid_len);
1035 ceph_osdc_set_request_linger(osdc, req);
1039 ret = ceph_osdc_start_request(osdc, req, false);
1044 ret = ceph_osdc_wait_request(osdc, req);
1046 *ver = le64_to_cpu(req->r_reassert_version.version);
1047 dout("reassert_ver=%llu\n",
1048 (unsigned long long)
1049 le64_to_cpu(req->r_reassert_version.version));
1050 ceph_osdc_put_request(req);
1055 bio_chain_put(req_data->bio);
1056 ceph_osdc_put_request(req);
1058 rbd_coll_end_req(req_data, ret, len);
1064 * Ceph osd op callback
1066 static void rbd_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1068 struct rbd_request *req_data = req->r_priv;
1069 struct ceph_osd_reply_head *replyhead;
1070 struct ceph_osd_op *op;
1076 replyhead = msg->front.iov_base;
1077 WARN_ON(le32_to_cpu(replyhead->num_ops) == 0);
1078 op = (void *)(replyhead + 1);
1079 rc = le32_to_cpu(replyhead->result);
1080 bytes = le64_to_cpu(op->extent.length);
1081 read_op = (le16_to_cpu(op->op) == CEPH_OSD_OP_READ);
1083 dout("rbd_req_cb bytes=%llu readop=%d rc=%d\n",
1084 (unsigned long long) bytes, read_op, (int) rc);
1086 if (rc == -ENOENT && read_op) {
1087 zero_bio_chain(req_data->bio, 0);
1089 } else if (rc == 0 && read_op && bytes < req_data->len) {
1090 zero_bio_chain(req_data->bio, bytes);
1091 bytes = req_data->len;
1094 rbd_coll_end_req(req_data, rc, bytes);
1097 bio_chain_put(req_data->bio);
1099 ceph_osdc_put_request(req);
1103 static void rbd_simple_req_cb(struct ceph_osd_request *req, struct ceph_msg *msg)
1105 ceph_osdc_put_request(req);
1109 * Do a synchronous ceph osd operation
1111 static int rbd_req_sync_op(struct rbd_device *rbd_dev,
1112 struct ceph_snap_context *snapc,
1115 struct ceph_osd_req_op *ops,
1116 const char *object_name,
1117 u64 ofs, u64 inbound_size,
1119 struct ceph_osd_request **linger_req,
1123 struct page **pages;
1126 rbd_assert(ops != NULL);
1128 num_pages = calc_pages_for(ofs, inbound_size);
1129 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1131 return PTR_ERR(pages);
1133 ret = rbd_do_request(NULL, rbd_dev, snapc, snapid,
1134 object_name, ofs, inbound_size, NULL,
1144 if ((flags & CEPH_OSD_FLAG_READ) && inbound)
1145 ret = ceph_copy_from_page_vector(pages, inbound, ofs, ret);
1148 ceph_release_page_vector(pages, num_pages);
1153 * Do an asynchronous ceph osd operation
1155 static int rbd_do_op(struct request *rq,
1156 struct rbd_device *rbd_dev,
1157 struct ceph_snap_context *snapc,
1159 int opcode, int flags,
1162 struct rbd_req_coll *coll,
1169 struct ceph_osd_req_op *ops;
1172 seg_name = rbd_segment_name(rbd_dev, ofs);
1175 seg_len = rbd_segment_length(rbd_dev, ofs, len);
1176 seg_ofs = rbd_segment_offset(rbd_dev, ofs);
1178 payload_len = (flags & CEPH_OSD_FLAG_WRITE ? seg_len : 0);
1181 ops = rbd_create_rw_ops(1, opcode, payload_len);
1185 /* we've taken care of segment sizes earlier when we
1186 cloned the bios. We should never have a segment
1187 truncated at this point */
1188 rbd_assert(seg_len == len);
1190 ret = rbd_do_request(rq, rbd_dev, snapc, snapid,
1191 seg_name, seg_ofs, seg_len,
1197 rbd_req_cb, 0, NULL);
1199 rbd_destroy_ops(ops);
1206 * Request async osd write
1208 static int rbd_req_write(struct request *rq,
1209 struct rbd_device *rbd_dev,
1210 struct ceph_snap_context *snapc,
1213 struct rbd_req_coll *coll,
1216 return rbd_do_op(rq, rbd_dev, snapc, CEPH_NOSNAP,
1218 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1219 ofs, len, bio, coll, coll_index);
1223 * Request async osd read
1225 static int rbd_req_read(struct request *rq,
1226 struct rbd_device *rbd_dev,
1230 struct rbd_req_coll *coll,
1233 return rbd_do_op(rq, rbd_dev, NULL,
1237 ofs, len, bio, coll, coll_index);
1241 * Request sync osd read
1243 static int rbd_req_sync_read(struct rbd_device *rbd_dev,
1245 const char *object_name,
1250 struct ceph_osd_req_op *ops;
1253 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_READ, 0);
1257 ret = rbd_req_sync_op(rbd_dev, NULL,
1260 ops, object_name, ofs, len, buf, NULL, ver);
1261 rbd_destroy_ops(ops);
1267 * Request sync osd watch
1269 static int rbd_req_sync_notify_ack(struct rbd_device *rbd_dev,
1273 struct ceph_osd_req_op *ops;
1276 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_NOTIFY_ACK, 0);
1280 ops[0].watch.ver = cpu_to_le64(ver);
1281 ops[0].watch.cookie = notify_id;
1282 ops[0].watch.flag = 0;
1284 ret = rbd_do_request(NULL, rbd_dev, NULL, CEPH_NOSNAP,
1285 rbd_dev->header_name, 0, 0, NULL,
1290 rbd_simple_req_cb, 0, NULL);
1292 rbd_destroy_ops(ops);
1296 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
1298 struct rbd_device *rbd_dev = (struct rbd_device *)data;
1305 dout("rbd_watch_cb %s notify_id=%llu opcode=%u\n",
1306 rbd_dev->header_name, (unsigned long long) notify_id,
1307 (unsigned int) opcode);
1308 rc = rbd_dev_refresh(rbd_dev, &hver);
1310 pr_warning(RBD_DRV_NAME "%d got notification but failed to "
1311 " update snaps: %d\n", rbd_dev->major, rc);
1313 rbd_req_sync_notify_ack(rbd_dev, hver, notify_id);
1317 * Request sync osd watch
1319 static int rbd_req_sync_watch(struct rbd_device *rbd_dev)
1321 struct ceph_osd_req_op *ops;
1322 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1325 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1329 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, 0,
1330 (void *)rbd_dev, &rbd_dev->watch_event);
1334 ops[0].watch.ver = cpu_to_le64(rbd_dev->header.obj_version);
1335 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1336 ops[0].watch.flag = 1;
1338 ret = rbd_req_sync_op(rbd_dev, NULL,
1340 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1342 rbd_dev->header_name,
1344 &rbd_dev->watch_request, NULL);
1349 rbd_destroy_ops(ops);
1353 ceph_osdc_cancel_event(rbd_dev->watch_event);
1354 rbd_dev->watch_event = NULL;
1356 rbd_destroy_ops(ops);
1361 * Request sync osd unwatch
1363 static int rbd_req_sync_unwatch(struct rbd_device *rbd_dev)
1365 struct ceph_osd_req_op *ops;
1368 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_WATCH, 0);
1372 ops[0].watch.ver = 0;
1373 ops[0].watch.cookie = cpu_to_le64(rbd_dev->watch_event->cookie);
1374 ops[0].watch.flag = 0;
1376 ret = rbd_req_sync_op(rbd_dev, NULL,
1378 CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
1380 rbd_dev->header_name,
1381 0, 0, NULL, NULL, NULL);
1384 rbd_destroy_ops(ops);
1385 ceph_osdc_cancel_event(rbd_dev->watch_event);
1386 rbd_dev->watch_event = NULL;
1391 * Synchronous osd object method call
1393 static int rbd_req_sync_exec(struct rbd_device *rbd_dev,
1394 const char *object_name,
1395 const char *class_name,
1396 const char *method_name,
1397 const char *outbound,
1398 size_t outbound_size,
1400 size_t inbound_size,
1404 struct ceph_osd_req_op *ops;
1405 int class_name_len = strlen(class_name);
1406 int method_name_len = strlen(method_name);
1411 * Any input parameters required by the method we're calling
1412 * will be sent along with the class and method names as
1413 * part of the message payload. That data and its size are
1414 * supplied via the indata and indata_len fields (named from
1415 * the perspective of the server side) in the OSD request
1418 payload_size = class_name_len + method_name_len + outbound_size;
1419 ops = rbd_create_rw_ops(1, CEPH_OSD_OP_CALL, payload_size);
1423 ops[0].cls.class_name = class_name;
1424 ops[0].cls.class_len = (__u8) class_name_len;
1425 ops[0].cls.method_name = method_name;
1426 ops[0].cls.method_len = (__u8) method_name_len;
1427 ops[0].cls.argc = 0;
1428 ops[0].cls.indata = outbound;
1429 ops[0].cls.indata_len = outbound_size;
1431 ret = rbd_req_sync_op(rbd_dev, NULL,
1434 object_name, 0, inbound_size, inbound,
1437 rbd_destroy_ops(ops);
1439 dout("cls_exec returned %d\n", ret);
1443 static struct rbd_req_coll *rbd_alloc_coll(int num_reqs)
1445 struct rbd_req_coll *coll =
1446 kzalloc(sizeof(struct rbd_req_coll) +
1447 sizeof(struct rbd_req_status) * num_reqs,
1452 coll->total = num_reqs;
1453 kref_init(&coll->kref);
1458 * block device queue callback
1460 static void rbd_rq_fn(struct request_queue *q)
1462 struct rbd_device *rbd_dev = q->queuedata;
1464 struct bio_pair *bp = NULL;
1466 while ((rq = blk_fetch_request(q))) {
1468 struct bio *rq_bio, *next_bio = NULL;
1473 int num_segs, cur_seg = 0;
1474 struct rbd_req_coll *coll;
1475 struct ceph_snap_context *snapc;
1477 dout("fetched request\n");
1479 /* filter out block requests we don't understand */
1480 if ((rq->cmd_type != REQ_TYPE_FS)) {
1481 __blk_end_request_all(rq, 0);
1485 /* deduce our operation (read, write) */
1486 do_write = (rq_data_dir(rq) == WRITE);
1488 size = blk_rq_bytes(rq);
1489 ofs = blk_rq_pos(rq) * SECTOR_SIZE;
1491 if (do_write && rbd_dev->mapping.read_only) {
1492 __blk_end_request_all(rq, -EROFS);
1496 spin_unlock_irq(q->queue_lock);
1498 down_read(&rbd_dev->header_rwsem);
1500 if (rbd_dev->mapping.snap_id != CEPH_NOSNAP &&
1501 !rbd_dev->mapping.snap_exists) {
1502 up_read(&rbd_dev->header_rwsem);
1503 dout("request for non-existent snapshot");
1504 spin_lock_irq(q->queue_lock);
1505 __blk_end_request_all(rq, -ENXIO);
1509 snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1511 up_read(&rbd_dev->header_rwsem);
1513 dout("%s 0x%x bytes at 0x%llx\n",
1514 do_write ? "write" : "read",
1515 size, (unsigned long long) blk_rq_pos(rq) * SECTOR_SIZE);
1517 num_segs = rbd_get_num_segments(&rbd_dev->header, ofs, size);
1518 if (num_segs <= 0) {
1519 spin_lock_irq(q->queue_lock);
1520 __blk_end_request_all(rq, num_segs);
1521 ceph_put_snap_context(snapc);
1524 coll = rbd_alloc_coll(num_segs);
1526 spin_lock_irq(q->queue_lock);
1527 __blk_end_request_all(rq, -ENOMEM);
1528 ceph_put_snap_context(snapc);
1533 /* a bio clone to be passed down to OSD req */
1534 dout("rq->bio->bi_vcnt=%hu\n", rq->bio->bi_vcnt);
1535 op_size = rbd_segment_length(rbd_dev, ofs, size);
1536 kref_get(&coll->kref);
1537 bio = bio_chain_clone(&rq_bio, &next_bio, &bp,
1538 op_size, GFP_ATOMIC);
1540 rbd_coll_end_req_index(rq, coll, cur_seg,
1546 /* init OSD command: write or read */
1548 rbd_req_write(rq, rbd_dev,
1554 rbd_req_read(rq, rbd_dev,
1555 rbd_dev->mapping.snap_id,
1567 kref_put(&coll->kref, rbd_coll_release);
1570 bio_pair_release(bp);
1571 spin_lock_irq(q->queue_lock);
1573 ceph_put_snap_context(snapc);
1578 * a queue callback. Makes sure that we don't create a bio that spans across
1579 * multiple osd objects. One exception would be with a single page bios,
1580 * which we handle later at bio_chain_clone
1582 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
1583 struct bio_vec *bvec)
1585 struct rbd_device *rbd_dev = q->queuedata;
1586 unsigned int chunk_sectors;
1588 unsigned int bio_sectors;
1591 chunk_sectors = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
1592 sector = bmd->bi_sector + get_start_sect(bmd->bi_bdev);
1593 bio_sectors = bmd->bi_size >> SECTOR_SHIFT;
1595 max = (chunk_sectors - ((sector & (chunk_sectors - 1))
1596 + bio_sectors)) << SECTOR_SHIFT;
1598 max = 0; /* bio_add cannot handle a negative return */
1599 if (max <= bvec->bv_len && bio_sectors == 0)
1600 return bvec->bv_len;
1604 static void rbd_free_disk(struct rbd_device *rbd_dev)
1606 struct gendisk *disk = rbd_dev->disk;
1611 if (disk->flags & GENHD_FL_UP)
1614 blk_cleanup_queue(disk->queue);
1619 * Read the complete header for the given rbd device.
1621 * Returns a pointer to a dynamically-allocated buffer containing
1622 * the complete and validated header. Caller can pass the address
1623 * of a variable that will be filled in with the version of the
1624 * header object at the time it was read.
1626 * Returns a pointer-coded errno if a failure occurs.
1628 static struct rbd_image_header_ondisk *
1629 rbd_dev_v1_header_read(struct rbd_device *rbd_dev, u64 *version)
1631 struct rbd_image_header_ondisk *ondisk = NULL;
1638 * The complete header will include an array of its 64-bit
1639 * snapshot ids, followed by the names of those snapshots as
1640 * a contiguous block of NUL-terminated strings. Note that
1641 * the number of snapshots could change by the time we read
1642 * it in, in which case we re-read it.
1649 size = sizeof (*ondisk);
1650 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
1652 ondisk = kmalloc(size, GFP_KERNEL);
1654 return ERR_PTR(-ENOMEM);
1656 ret = rbd_req_sync_read(rbd_dev, CEPH_NOSNAP,
1657 rbd_dev->header_name,
1659 (char *) ondisk, version);
1663 if (WARN_ON((size_t) ret < size)) {
1665 pr_warning("short header read for image %s"
1666 " (want %zd got %d)\n",
1667 rbd_dev->image_name, size, ret);
1670 if (!rbd_dev_ondisk_valid(ondisk)) {
1672 pr_warning("invalid header for image %s\n",
1673 rbd_dev->image_name);
1677 names_size = le64_to_cpu(ondisk->snap_names_len);
1678 want_count = snap_count;
1679 snap_count = le32_to_cpu(ondisk->snap_count);
1680 } while (snap_count != want_count);
1687 return ERR_PTR(ret);
1691 * reload the ondisk the header
1693 static int rbd_read_header(struct rbd_device *rbd_dev,
1694 struct rbd_image_header *header)
1696 struct rbd_image_header_ondisk *ondisk;
1700 ondisk = rbd_dev_v1_header_read(rbd_dev, &ver);
1702 return PTR_ERR(ondisk);
1703 ret = rbd_header_from_disk(header, ondisk);
1705 header->obj_version = ver;
1711 static void __rbd_remove_all_snaps(struct rbd_device *rbd_dev)
1713 struct rbd_snap *snap;
1714 struct rbd_snap *next;
1716 list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node)
1717 __rbd_remove_snap_dev(snap);
1720 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
1724 if (rbd_dev->mapping.snap_id != CEPH_NOSNAP)
1727 size = (sector_t) rbd_dev->header.image_size / SECTOR_SIZE;
1728 dout("setting size to %llu sectors", (unsigned long long) size);
1729 rbd_dev->mapping.size = (u64) size;
1730 set_capacity(rbd_dev->disk, size);
1734 * only read the first part of the ondisk header, without the snaps info
1736 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev, u64 *hver)
1739 struct rbd_image_header h;
1741 ret = rbd_read_header(rbd_dev, &h);
1745 down_write(&rbd_dev->header_rwsem);
1747 /* Update image size, and check for resize of mapped image */
1748 rbd_dev->header.image_size = h.image_size;
1749 rbd_update_mapping_size(rbd_dev);
1751 /* rbd_dev->header.object_prefix shouldn't change */
1752 kfree(rbd_dev->header.snap_sizes);
1753 kfree(rbd_dev->header.snap_names);
1754 /* osd requests may still refer to snapc */
1755 ceph_put_snap_context(rbd_dev->header.snapc);
1758 *hver = h.obj_version;
1759 rbd_dev->header.obj_version = h.obj_version;
1760 rbd_dev->header.image_size = h.image_size;
1761 rbd_dev->header.snapc = h.snapc;
1762 rbd_dev->header.snap_names = h.snap_names;
1763 rbd_dev->header.snap_sizes = h.snap_sizes;
1764 /* Free the extra copy of the object prefix */
1765 WARN_ON(strcmp(rbd_dev->header.object_prefix, h.object_prefix));
1766 kfree(h.object_prefix);
1768 ret = rbd_dev_snaps_update(rbd_dev);
1770 ret = rbd_dev_snaps_register(rbd_dev);
1772 up_write(&rbd_dev->header_rwsem);
1777 static int rbd_dev_refresh(struct rbd_device *rbd_dev, u64 *hver)
1781 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1782 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
1783 if (rbd_dev->image_format == 1)
1784 ret = rbd_dev_v1_refresh(rbd_dev, hver);
1786 ret = rbd_dev_v2_refresh(rbd_dev, hver);
1787 mutex_unlock(&ctl_mutex);
1792 static int rbd_init_disk(struct rbd_device *rbd_dev)
1794 struct gendisk *disk;
1795 struct request_queue *q;
1798 /* create gendisk info */
1799 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
1803 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
1805 disk->major = rbd_dev->major;
1806 disk->first_minor = 0;
1807 disk->fops = &rbd_bd_ops;
1808 disk->private_data = rbd_dev;
1811 q = blk_init_queue(rbd_rq_fn, &rbd_dev->lock);
1815 /* We use the default size, but let's be explicit about it. */
1816 blk_queue_physical_block_size(q, SECTOR_SIZE);
1818 /* set io sizes to object size */
1819 segment_size = rbd_obj_bytes(&rbd_dev->header);
1820 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
1821 blk_queue_max_segment_size(q, segment_size);
1822 blk_queue_io_min(q, segment_size);
1823 blk_queue_io_opt(q, segment_size);
1825 blk_queue_merge_bvec(q, rbd_merge_bvec);
1828 q->queuedata = rbd_dev;
1830 rbd_dev->disk = disk;
1832 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
1845 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
1847 return container_of(dev, struct rbd_device, dev);
1850 static ssize_t rbd_size_show(struct device *dev,
1851 struct device_attribute *attr, char *buf)
1853 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1856 down_read(&rbd_dev->header_rwsem);
1857 size = get_capacity(rbd_dev->disk);
1858 up_read(&rbd_dev->header_rwsem);
1860 return sprintf(buf, "%llu\n", (unsigned long long) size * SECTOR_SIZE);
1864 * Note this shows the features for whatever's mapped, which is not
1865 * necessarily the base image.
1867 static ssize_t rbd_features_show(struct device *dev,
1868 struct device_attribute *attr, char *buf)
1870 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1872 return sprintf(buf, "0x%016llx\n",
1873 (unsigned long long) rbd_dev->mapping.features);
1876 static ssize_t rbd_major_show(struct device *dev,
1877 struct device_attribute *attr, char *buf)
1879 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1881 return sprintf(buf, "%d\n", rbd_dev->major);
1884 static ssize_t rbd_client_id_show(struct device *dev,
1885 struct device_attribute *attr, char *buf)
1887 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1889 return sprintf(buf, "client%lld\n",
1890 ceph_client_id(rbd_dev->rbd_client->client));
1893 static ssize_t rbd_pool_show(struct device *dev,
1894 struct device_attribute *attr, char *buf)
1896 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1898 return sprintf(buf, "%s\n", rbd_dev->pool_name);
1901 static ssize_t rbd_pool_id_show(struct device *dev,
1902 struct device_attribute *attr, char *buf)
1904 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1906 return sprintf(buf, "%d\n", rbd_dev->pool_id);
1909 static ssize_t rbd_name_show(struct device *dev,
1910 struct device_attribute *attr, char *buf)
1912 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1914 return sprintf(buf, "%s\n", rbd_dev->image_name);
1917 static ssize_t rbd_image_id_show(struct device *dev,
1918 struct device_attribute *attr, char *buf)
1920 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1922 return sprintf(buf, "%s\n", rbd_dev->image_id);
1926 * Shows the name of the currently-mapped snapshot (or
1927 * RBD_SNAP_HEAD_NAME for the base image).
1929 static ssize_t rbd_snap_show(struct device *dev,
1930 struct device_attribute *attr,
1933 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1935 return sprintf(buf, "%s\n", rbd_dev->mapping.snap_name);
1938 static ssize_t rbd_image_refresh(struct device *dev,
1939 struct device_attribute *attr,
1943 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
1946 ret = rbd_dev_refresh(rbd_dev, NULL);
1948 return ret < 0 ? ret : size;
1951 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
1952 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
1953 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
1954 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
1955 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
1956 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
1957 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
1958 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
1959 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
1960 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
1962 static struct attribute *rbd_attrs[] = {
1963 &dev_attr_size.attr,
1964 &dev_attr_features.attr,
1965 &dev_attr_major.attr,
1966 &dev_attr_client_id.attr,
1967 &dev_attr_pool.attr,
1968 &dev_attr_pool_id.attr,
1969 &dev_attr_name.attr,
1970 &dev_attr_image_id.attr,
1971 &dev_attr_current_snap.attr,
1972 &dev_attr_refresh.attr,
1976 static struct attribute_group rbd_attr_group = {
1980 static const struct attribute_group *rbd_attr_groups[] = {
1985 static void rbd_sysfs_dev_release(struct device *dev)
1989 static struct device_type rbd_device_type = {
1991 .groups = rbd_attr_groups,
1992 .release = rbd_sysfs_dev_release,
2000 static ssize_t rbd_snap_size_show(struct device *dev,
2001 struct device_attribute *attr,
2004 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2006 return sprintf(buf, "%llu\n", (unsigned long long)snap->size);
2009 static ssize_t rbd_snap_id_show(struct device *dev,
2010 struct device_attribute *attr,
2013 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2015 return sprintf(buf, "%llu\n", (unsigned long long)snap->id);
2018 static ssize_t rbd_snap_features_show(struct device *dev,
2019 struct device_attribute *attr,
2022 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2024 return sprintf(buf, "0x%016llx\n",
2025 (unsigned long long) snap->features);
2028 static DEVICE_ATTR(snap_size, S_IRUGO, rbd_snap_size_show, NULL);
2029 static DEVICE_ATTR(snap_id, S_IRUGO, rbd_snap_id_show, NULL);
2030 static DEVICE_ATTR(snap_features, S_IRUGO, rbd_snap_features_show, NULL);
2032 static struct attribute *rbd_snap_attrs[] = {
2033 &dev_attr_snap_size.attr,
2034 &dev_attr_snap_id.attr,
2035 &dev_attr_snap_features.attr,
2039 static struct attribute_group rbd_snap_attr_group = {
2040 .attrs = rbd_snap_attrs,
2043 static void rbd_snap_dev_release(struct device *dev)
2045 struct rbd_snap *snap = container_of(dev, struct rbd_snap, dev);
2050 static const struct attribute_group *rbd_snap_attr_groups[] = {
2051 &rbd_snap_attr_group,
2055 static struct device_type rbd_snap_device_type = {
2056 .groups = rbd_snap_attr_groups,
2057 .release = rbd_snap_dev_release,
2060 static bool rbd_snap_registered(struct rbd_snap *snap)
2062 bool ret = snap->dev.type == &rbd_snap_device_type;
2063 bool reg = device_is_registered(&snap->dev);
2065 rbd_assert(!ret ^ reg);
2070 static void __rbd_remove_snap_dev(struct rbd_snap *snap)
2072 list_del(&snap->node);
2073 if (device_is_registered(&snap->dev))
2074 device_unregister(&snap->dev);
2077 static int rbd_register_snap_dev(struct rbd_snap *snap,
2078 struct device *parent)
2080 struct device *dev = &snap->dev;
2083 dev->type = &rbd_snap_device_type;
2084 dev->parent = parent;
2085 dev->release = rbd_snap_dev_release;
2086 dev_set_name(dev, "snap_%s", snap->name);
2087 dout("%s: registering device for snapshot %s\n", __func__, snap->name);
2089 ret = device_register(dev);
2094 static struct rbd_snap *__rbd_add_snap_dev(struct rbd_device *rbd_dev,
2095 const char *snap_name,
2096 u64 snap_id, u64 snap_size,
2099 struct rbd_snap *snap;
2102 snap = kzalloc(sizeof (*snap), GFP_KERNEL);
2104 return ERR_PTR(-ENOMEM);
2107 snap->name = kstrdup(snap_name, GFP_KERNEL);
2112 snap->size = snap_size;
2113 snap->features = snap_features;
2121 return ERR_PTR(ret);
2124 static char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
2125 u64 *snap_size, u64 *snap_features)
2129 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
2131 *snap_size = rbd_dev->header.snap_sizes[which];
2132 *snap_features = 0; /* No features for v1 */
2134 /* Skip over names until we find the one we are looking for */
2136 snap_name = rbd_dev->header.snap_names;
2138 snap_name += strlen(snap_name) + 1;
2144 * Get the size and object order for an image snapshot, or if
2145 * snap_id is CEPH_NOSNAP, gets this information for the base
2148 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
2149 u8 *order, u64 *snap_size)
2151 __le64 snapid = cpu_to_le64(snap_id);
2156 } __attribute__ ((packed)) size_buf = { 0 };
2158 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2160 (char *) &snapid, sizeof (snapid),
2161 (char *) &size_buf, sizeof (size_buf),
2162 CEPH_OSD_FLAG_READ, NULL);
2163 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2167 *order = size_buf.order;
2168 *snap_size = le64_to_cpu(size_buf.size);
2170 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
2171 (unsigned long long) snap_id, (unsigned int) *order,
2172 (unsigned long long) *snap_size);
2177 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
2179 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
2180 &rbd_dev->header.obj_order,
2181 &rbd_dev->header.image_size);
2184 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
2190 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
2194 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2195 "rbd", "get_object_prefix",
2197 reply_buf, RBD_OBJ_PREFIX_LEN_MAX,
2198 CEPH_OSD_FLAG_READ, NULL);
2199 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2204 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
2205 p + RBD_OBJ_PREFIX_LEN_MAX,
2208 if (IS_ERR(rbd_dev->header.object_prefix)) {
2209 ret = PTR_ERR(rbd_dev->header.object_prefix);
2210 rbd_dev->header.object_prefix = NULL;
2212 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
2221 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
2224 __le64 snapid = cpu_to_le64(snap_id);
2228 } features_buf = { 0 };
2231 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2232 "rbd", "get_features",
2233 (char *) &snapid, sizeof (snapid),
2234 (char *) &features_buf, sizeof (features_buf),
2235 CEPH_OSD_FLAG_READ, NULL);
2236 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2239 *snap_features = le64_to_cpu(features_buf.features);
2241 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
2242 (unsigned long long) snap_id,
2243 (unsigned long long) *snap_features,
2244 (unsigned long long) le64_to_cpu(features_buf.incompat));
2249 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
2251 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
2252 &rbd_dev->header.features);
2255 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev, u64 *ver)
2264 struct ceph_snap_context *snapc;
2268 * We'll need room for the seq value (maximum snapshot id),
2269 * snapshot count, and array of that many snapshot ids.
2270 * For now we have a fixed upper limit on the number we're
2271 * prepared to receive.
2273 size = sizeof (__le64) + sizeof (__le32) +
2274 RBD_MAX_SNAP_COUNT * sizeof (__le64);
2275 reply_buf = kzalloc(size, GFP_KERNEL);
2279 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2280 "rbd", "get_snapcontext",
2283 CEPH_OSD_FLAG_READ, ver);
2284 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2290 end = (char *) reply_buf + size;
2291 ceph_decode_64_safe(&p, end, seq, out);
2292 ceph_decode_32_safe(&p, end, snap_count, out);
2295 * Make sure the reported number of snapshot ids wouldn't go
2296 * beyond the end of our buffer. But before checking that,
2297 * make sure the computed size of the snapshot context we
2298 * allocate is representable in a size_t.
2300 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
2305 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
2308 size = sizeof (struct ceph_snap_context) +
2309 snap_count * sizeof (snapc->snaps[0]);
2310 snapc = kmalloc(size, GFP_KERNEL);
2316 atomic_set(&snapc->nref, 1);
2318 snapc->num_snaps = snap_count;
2319 for (i = 0; i < snap_count; i++)
2320 snapc->snaps[i] = ceph_decode_64(&p);
2322 rbd_dev->header.snapc = snapc;
2324 dout(" snap context seq = %llu, snap_count = %u\n",
2325 (unsigned long long) seq, (unsigned int) snap_count);
2333 static char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
2341 size_t snap_name_len;
2344 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
2345 reply_buf = kmalloc(size, GFP_KERNEL);
2347 return ERR_PTR(-ENOMEM);
2349 snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
2350 ret = rbd_req_sync_exec(rbd_dev, rbd_dev->header_name,
2351 "rbd", "get_snapshot_name",
2352 (char *) &snap_id, sizeof (snap_id),
2354 CEPH_OSD_FLAG_READ, NULL);
2355 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2360 end = (char *) reply_buf + size;
2362 snap_name = ceph_extract_encoded_string(&p, end, &snap_name_len,
2364 if (IS_ERR(snap_name)) {
2365 ret = PTR_ERR(snap_name);
2368 dout(" snap_id 0x%016llx snap_name = %s\n",
2369 (unsigned long long) le64_to_cpu(snap_id), snap_name);
2377 return ERR_PTR(ret);
2380 static char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
2381 u64 *snap_size, u64 *snap_features)
2387 snap_id = rbd_dev->header.snapc->snaps[which];
2388 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, &order, snap_size);
2390 return ERR_PTR(ret);
2391 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, snap_features);
2393 return ERR_PTR(ret);
2395 return rbd_dev_v2_snap_name(rbd_dev, which);
2398 static char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
2399 u64 *snap_size, u64 *snap_features)
2401 if (rbd_dev->image_format == 1)
2402 return rbd_dev_v1_snap_info(rbd_dev, which,
2403 snap_size, snap_features);
2404 if (rbd_dev->image_format == 2)
2405 return rbd_dev_v2_snap_info(rbd_dev, which,
2406 snap_size, snap_features);
2407 return ERR_PTR(-EINVAL);
2410 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev, u64 *hver)
2415 down_write(&rbd_dev->header_rwsem);
2417 /* Grab old order first, to see if it changes */
2419 obj_order = rbd_dev->header.obj_order,
2420 ret = rbd_dev_v2_image_size(rbd_dev);
2423 if (rbd_dev->header.obj_order != obj_order) {
2427 rbd_update_mapping_size(rbd_dev);
2429 ret = rbd_dev_v2_snap_context(rbd_dev, hver);
2430 dout("rbd_dev_v2_snap_context returned %d\n", ret);
2433 ret = rbd_dev_snaps_update(rbd_dev);
2434 dout("rbd_dev_snaps_update returned %d\n", ret);
2437 ret = rbd_dev_snaps_register(rbd_dev);
2438 dout("rbd_dev_snaps_register returned %d\n", ret);
2440 up_write(&rbd_dev->header_rwsem);
2446 * Scan the rbd device's current snapshot list and compare it to the
2447 * newly-received snapshot context. Remove any existing snapshots
2448 * not present in the new snapshot context. Add a new snapshot for
2449 * any snaphots in the snapshot context not in the current list.
2450 * And verify there are no changes to snapshots we already know
2453 * Assumes the snapshots in the snapshot context are sorted by
2454 * snapshot id, highest id first. (Snapshots in the rbd_dev's list
2455 * are also maintained in that order.)
2457 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
2459 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
2460 const u32 snap_count = snapc->num_snaps;
2461 struct list_head *head = &rbd_dev->snaps;
2462 struct list_head *links = head->next;
2465 dout("%s: snap count is %u\n", __func__, (unsigned int) snap_count);
2466 while (index < snap_count || links != head) {
2468 struct rbd_snap *snap;
2471 u64 snap_features = 0;
2473 snap_id = index < snap_count ? snapc->snaps[index]
2475 snap = links != head ? list_entry(links, struct rbd_snap, node)
2477 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
2479 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
2480 struct list_head *next = links->next;
2482 /* Existing snapshot not in the new snap context */
2484 if (rbd_dev->mapping.snap_id == snap->id)
2485 rbd_dev->mapping.snap_exists = false;
2486 __rbd_remove_snap_dev(snap);
2487 dout("%ssnap id %llu has been removed\n",
2488 rbd_dev->mapping.snap_id == snap->id ?
2490 (unsigned long long) snap->id);
2492 /* Done with this list entry; advance */
2498 snap_name = rbd_dev_snap_info(rbd_dev, index,
2499 &snap_size, &snap_features);
2500 if (IS_ERR(snap_name))
2501 return PTR_ERR(snap_name);
2503 dout("entry %u: snap_id = %llu\n", (unsigned int) snap_count,
2504 (unsigned long long) snap_id);
2505 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
2506 struct rbd_snap *new_snap;
2508 /* We haven't seen this snapshot before */
2510 new_snap = __rbd_add_snap_dev(rbd_dev, snap_name,
2511 snap_id, snap_size, snap_features);
2512 if (IS_ERR(new_snap)) {
2513 int err = PTR_ERR(new_snap);
2515 dout(" failed to add dev, error %d\n", err);
2520 /* New goes before existing, or at end of list */
2522 dout(" added dev%s\n", snap ? "" : " at end\n");
2524 list_add_tail(&new_snap->node, &snap->node);
2526 list_add_tail(&new_snap->node, head);
2528 /* Already have this one */
2530 dout(" already present\n");
2532 rbd_assert(snap->size == snap_size);
2533 rbd_assert(!strcmp(snap->name, snap_name));
2534 rbd_assert(snap->features == snap_features);
2536 /* Done with this list entry; advance */
2538 links = links->next;
2541 /* Advance to the next entry in the snapshot context */
2545 dout("%s: done\n", __func__);
2551 * Scan the list of snapshots and register the devices for any that
2552 * have not already been registered.
2554 static int rbd_dev_snaps_register(struct rbd_device *rbd_dev)
2556 struct rbd_snap *snap;
2559 dout("%s called\n", __func__);
2560 if (WARN_ON(!device_is_registered(&rbd_dev->dev)))
2563 list_for_each_entry(snap, &rbd_dev->snaps, node) {
2564 if (!rbd_snap_registered(snap)) {
2565 ret = rbd_register_snap_dev(snap, &rbd_dev->dev);
2570 dout("%s: returning %d\n", __func__, ret);
2575 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
2580 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
2582 dev = &rbd_dev->dev;
2583 dev->bus = &rbd_bus_type;
2584 dev->type = &rbd_device_type;
2585 dev->parent = &rbd_root_dev;
2586 dev->release = rbd_dev_release;
2587 dev_set_name(dev, "%d", rbd_dev->dev_id);
2588 ret = device_register(dev);
2590 mutex_unlock(&ctl_mutex);
2595 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
2597 device_unregister(&rbd_dev->dev);
2600 static int rbd_init_watch_dev(struct rbd_device *rbd_dev)
2605 ret = rbd_req_sync_watch(rbd_dev);
2606 if (ret == -ERANGE) {
2607 rc = rbd_dev_refresh(rbd_dev, NULL);
2611 } while (ret == -ERANGE);
2616 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
2619 * Get a unique rbd identifier for the given new rbd_dev, and add
2620 * the rbd_dev to the global list. The minimum rbd id is 1.
2622 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
2624 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
2626 spin_lock(&rbd_dev_list_lock);
2627 list_add_tail(&rbd_dev->node, &rbd_dev_list);
2628 spin_unlock(&rbd_dev_list_lock);
2629 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
2630 (unsigned long long) rbd_dev->dev_id);
2634 * Remove an rbd_dev from the global list, and record that its
2635 * identifier is no longer in use.
2637 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
2639 struct list_head *tmp;
2640 int rbd_id = rbd_dev->dev_id;
2643 rbd_assert(rbd_id > 0);
2645 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
2646 (unsigned long long) rbd_dev->dev_id);
2647 spin_lock(&rbd_dev_list_lock);
2648 list_del_init(&rbd_dev->node);
2651 * If the id being "put" is not the current maximum, there
2652 * is nothing special we need to do.
2654 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
2655 spin_unlock(&rbd_dev_list_lock);
2660 * We need to update the current maximum id. Search the
2661 * list to find out what it is. We're more likely to find
2662 * the maximum at the end, so search the list backward.
2665 list_for_each_prev(tmp, &rbd_dev_list) {
2666 struct rbd_device *rbd_dev;
2668 rbd_dev = list_entry(tmp, struct rbd_device, node);
2669 if (rbd_id > max_id)
2672 spin_unlock(&rbd_dev_list_lock);
2675 * The max id could have been updated by rbd_dev_id_get(), in
2676 * which case it now accurately reflects the new maximum.
2677 * Be careful not to overwrite the maximum value in that
2680 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
2681 dout(" max dev id has been reset\n");
2685 * Skips over white space at *buf, and updates *buf to point to the
2686 * first found non-space character (if any). Returns the length of
2687 * the token (string of non-white space characters) found. Note
2688 * that *buf must be terminated with '\0'.
2690 static inline size_t next_token(const char **buf)
2693 * These are the characters that produce nonzero for
2694 * isspace() in the "C" and "POSIX" locales.
2696 const char *spaces = " \f\n\r\t\v";
2698 *buf += strspn(*buf, spaces); /* Find start of token */
2700 return strcspn(*buf, spaces); /* Return token length */
2704 * Finds the next token in *buf, and if the provided token buffer is
2705 * big enough, copies the found token into it. The result, if
2706 * copied, is guaranteed to be terminated with '\0'. Note that *buf
2707 * must be terminated with '\0' on entry.
2709 * Returns the length of the token found (not including the '\0').
2710 * Return value will be 0 if no token is found, and it will be >=
2711 * token_size if the token would not fit.
2713 * The *buf pointer will be updated to point beyond the end of the
2714 * found token. Note that this occurs even if the token buffer is
2715 * too small to hold it.
2717 static inline size_t copy_token(const char **buf,
2723 len = next_token(buf);
2724 if (len < token_size) {
2725 memcpy(token, *buf, len);
2726 *(token + len) = '\0';
2734 * Finds the next token in *buf, dynamically allocates a buffer big
2735 * enough to hold a copy of it, and copies the token into the new
2736 * buffer. The copy is guaranteed to be terminated with '\0'. Note
2737 * that a duplicate buffer is created even for a zero-length token.
2739 * Returns a pointer to the newly-allocated duplicate, or a null
2740 * pointer if memory for the duplicate was not available. If
2741 * the lenp argument is a non-null pointer, the length of the token
2742 * (not including the '\0') is returned in *lenp.
2744 * If successful, the *buf pointer will be updated to point beyond
2745 * the end of the found token.
2747 * Note: uses GFP_KERNEL for allocation.
2749 static inline char *dup_token(const char **buf, size_t *lenp)
2754 len = next_token(buf);
2755 dup = kmalloc(len + 1, GFP_KERNEL);
2759 memcpy(dup, *buf, len);
2760 *(dup + len) = '\0';
2770 * This fills in the pool_name, image_name, image_name_len, rbd_dev,
2771 * rbd_md_name, and name fields of the given rbd_dev, based on the
2772 * list of monitor addresses and other options provided via
2773 * /sys/bus/rbd/add. Returns a pointer to a dynamically-allocated
2774 * copy of the snapshot name to map if successful, or a
2775 * pointer-coded error otherwise.
2777 * Note: rbd_dev is assumed to have been initially zero-filled.
2779 static char *rbd_add_parse_args(struct rbd_device *rbd_dev,
2781 const char **mon_addrs,
2782 size_t *mon_addrs_size,
2784 size_t options_size)
2787 char *err_ptr = ERR_PTR(-EINVAL);
2790 /* The first four tokens are required */
2792 len = next_token(&buf);
2795 *mon_addrs_size = len + 1;
2800 len = copy_token(&buf, options, options_size);
2801 if (!len || len >= options_size)
2804 err_ptr = ERR_PTR(-ENOMEM);
2805 rbd_dev->pool_name = dup_token(&buf, NULL);
2806 if (!rbd_dev->pool_name)
2809 rbd_dev->image_name = dup_token(&buf, &rbd_dev->image_name_len);
2810 if (!rbd_dev->image_name)
2813 /* Snapshot name is optional */
2814 len = next_token(&buf);
2816 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
2817 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
2819 snap_name = kmalloc(len + 1, GFP_KERNEL);
2822 memcpy(snap_name, buf, len);
2823 *(snap_name + len) = '\0';
2825 dout(" SNAP_NAME is <%s>, len is %zd\n", snap_name, len);
2830 kfree(rbd_dev->image_name);
2831 rbd_dev->image_name = NULL;
2832 rbd_dev->image_name_len = 0;
2833 kfree(rbd_dev->pool_name);
2834 rbd_dev->pool_name = NULL;
2840 * An rbd format 2 image has a unique identifier, distinct from the
2841 * name given to it by the user. Internally, that identifier is
2842 * what's used to specify the names of objects related to the image.
2844 * A special "rbd id" object is used to map an rbd image name to its
2845 * id. If that object doesn't exist, then there is no v2 rbd image
2846 * with the supplied name.
2848 * This function will record the given rbd_dev's image_id field if
2849 * it can be determined, and in that case will return 0. If any
2850 * errors occur a negative errno will be returned and the rbd_dev's
2851 * image_id field will be unchanged (and should be NULL).
2853 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
2862 * First, see if the format 2 image id file exists, and if
2863 * so, get the image's persistent id from it.
2865 size = sizeof (RBD_ID_PREFIX) + rbd_dev->image_name_len;
2866 object_name = kmalloc(size, GFP_NOIO);
2869 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->image_name);
2870 dout("rbd id object name is %s\n", object_name);
2872 /* Response will be an encoded string, which includes a length */
2874 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
2875 response = kzalloc(size, GFP_NOIO);
2881 ret = rbd_req_sync_exec(rbd_dev, object_name,
2884 response, RBD_IMAGE_ID_LEN_MAX,
2885 CEPH_OSD_FLAG_READ, NULL);
2886 dout("%s: rbd_req_sync_exec returned %d\n", __func__, ret);
2891 rbd_dev->image_id = ceph_extract_encoded_string(&p,
2892 p + RBD_IMAGE_ID_LEN_MAX,
2893 &rbd_dev->image_id_len,
2895 if (IS_ERR(rbd_dev->image_id)) {
2896 ret = PTR_ERR(rbd_dev->image_id);
2897 rbd_dev->image_id = NULL;
2899 dout("image_id is %s\n", rbd_dev->image_id);
2908 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
2913 /* Version 1 images have no id; empty string is used */
2915 rbd_dev->image_id = kstrdup("", GFP_KERNEL);
2916 if (!rbd_dev->image_id)
2918 rbd_dev->image_id_len = 0;
2920 /* Record the header object name for this rbd image. */
2922 size = rbd_dev->image_name_len + sizeof (RBD_SUFFIX);
2923 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
2924 if (!rbd_dev->header_name) {
2928 sprintf(rbd_dev->header_name, "%s%s", rbd_dev->image_name, RBD_SUFFIX);
2930 /* Populate rbd image metadata */
2932 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
2935 rbd_dev->image_format = 1;
2937 dout("discovered version 1 image, header name is %s\n",
2938 rbd_dev->header_name);
2943 kfree(rbd_dev->header_name);
2944 rbd_dev->header_name = NULL;
2945 kfree(rbd_dev->image_id);
2946 rbd_dev->image_id = NULL;
2951 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
2958 * Image id was filled in by the caller. Record the header
2959 * object name for this rbd image.
2961 size = sizeof (RBD_HEADER_PREFIX) + rbd_dev->image_id_len;
2962 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
2963 if (!rbd_dev->header_name)
2965 sprintf(rbd_dev->header_name, "%s%s",
2966 RBD_HEADER_PREFIX, rbd_dev->image_id);
2968 /* Get the size and object order for the image */
2970 ret = rbd_dev_v2_image_size(rbd_dev);
2974 /* Get the object prefix (a.k.a. block_name) for the image */
2976 ret = rbd_dev_v2_object_prefix(rbd_dev);
2980 /* Get the features for the image */
2982 ret = rbd_dev_v2_features(rbd_dev);
2986 /* crypto and compression type aren't (yet) supported for v2 images */
2988 rbd_dev->header.crypt_type = 0;
2989 rbd_dev->header.comp_type = 0;
2991 /* Get the snapshot context, plus the header version */
2993 ret = rbd_dev_v2_snap_context(rbd_dev, &ver);
2996 rbd_dev->header.obj_version = ver;
2998 rbd_dev->image_format = 2;
3000 dout("discovered version 2 image, header name is %s\n",
3001 rbd_dev->header_name);
3005 kfree(rbd_dev->header_name);
3006 rbd_dev->header_name = NULL;
3007 kfree(rbd_dev->header.object_prefix);
3008 rbd_dev->header.object_prefix = NULL;
3014 * Probe for the existence of the header object for the given rbd
3015 * device. For format 2 images this includes determining the image
3018 static int rbd_dev_probe(struct rbd_device *rbd_dev)
3023 * Get the id from the image id object. If it's not a
3024 * format 2 image, we'll get ENOENT back, and we'll assume
3025 * it's a format 1 image.
3027 ret = rbd_dev_image_id(rbd_dev);
3029 ret = rbd_dev_v1_probe(rbd_dev);
3031 ret = rbd_dev_v2_probe(rbd_dev);
3033 dout("probe failed, returning %d\n", ret);
3038 static ssize_t rbd_add(struct bus_type *bus,
3043 struct rbd_device *rbd_dev = NULL;
3044 const char *mon_addrs = NULL;
3045 size_t mon_addrs_size = 0;
3046 struct ceph_osd_client *osdc;
3050 if (!try_module_get(THIS_MODULE))
3053 options = kmalloc(count, GFP_KERNEL);
3056 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
3060 /* static rbd_device initialization */
3061 spin_lock_init(&rbd_dev->lock);
3062 INIT_LIST_HEAD(&rbd_dev->node);
3063 INIT_LIST_HEAD(&rbd_dev->snaps);
3064 init_rwsem(&rbd_dev->header_rwsem);
3066 /* parse add command */
3067 snap_name = rbd_add_parse_args(rbd_dev, buf,
3068 &mon_addrs, &mon_addrs_size, options, count);
3069 if (IS_ERR(snap_name)) {
3070 rc = PTR_ERR(snap_name);
3074 rc = rbd_get_client(rbd_dev, mon_addrs, mon_addrs_size - 1, options);
3079 osdc = &rbd_dev->rbd_client->client->osdc;
3080 rc = ceph_pg_poolid_by_name(osdc->osdmap, rbd_dev->pool_name);
3082 goto err_out_client;
3083 rbd_dev->pool_id = rc;
3085 rc = rbd_dev_probe(rbd_dev);
3087 goto err_out_client;
3089 /* no need to lock here, as rbd_dev is not registered yet */
3090 rc = rbd_dev_snaps_update(rbd_dev);
3092 goto err_out_header;
3094 rc = rbd_dev_set_mapping(rbd_dev, snap_name);
3096 goto err_out_header;
3098 /* generate unique id: find highest unique id, add one */
3099 rbd_dev_id_get(rbd_dev);
3101 /* Fill in the device name, now that we have its id. */
3102 BUILD_BUG_ON(DEV_NAME_LEN
3103 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
3104 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
3106 /* Get our block major device number. */
3108 rc = register_blkdev(0, rbd_dev->name);
3111 rbd_dev->major = rc;
3113 /* Set up the blkdev mapping. */
3115 rc = rbd_init_disk(rbd_dev);
3117 goto err_out_blkdev;
3119 rc = rbd_bus_add_dev(rbd_dev);
3124 * At this point cleanup in the event of an error is the job
3125 * of the sysfs code (initiated by rbd_bus_del_dev()).
3128 down_write(&rbd_dev->header_rwsem);
3129 rc = rbd_dev_snaps_register(rbd_dev);
3130 up_write(&rbd_dev->header_rwsem);
3134 rc = rbd_init_watch_dev(rbd_dev);
3138 /* Everything's ready. Announce the disk to the world. */
3140 add_disk(rbd_dev->disk);
3142 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
3143 (unsigned long long) rbd_dev->mapping.size);
3148 /* this will also clean up rest of rbd_dev stuff */
3150 rbd_bus_del_dev(rbd_dev);
3155 rbd_free_disk(rbd_dev);
3157 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3159 rbd_dev_id_put(rbd_dev);
3161 rbd_header_free(&rbd_dev->header);
3163 kfree(rbd_dev->header_name);
3164 rbd_put_client(rbd_dev);
3165 kfree(rbd_dev->image_id);
3167 kfree(rbd_dev->mapping.snap_name);
3168 kfree(rbd_dev->image_name);
3169 kfree(rbd_dev->pool_name);
3174 dout("Error adding device %s\n", buf);
3175 module_put(THIS_MODULE);
3177 return (ssize_t) rc;
3180 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
3182 struct list_head *tmp;
3183 struct rbd_device *rbd_dev;
3185 spin_lock(&rbd_dev_list_lock);
3186 list_for_each(tmp, &rbd_dev_list) {
3187 rbd_dev = list_entry(tmp, struct rbd_device, node);
3188 if (rbd_dev->dev_id == dev_id) {
3189 spin_unlock(&rbd_dev_list_lock);
3193 spin_unlock(&rbd_dev_list_lock);
3197 static void rbd_dev_release(struct device *dev)
3199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3201 if (rbd_dev->watch_request) {
3202 struct ceph_client *client = rbd_dev->rbd_client->client;
3204 ceph_osdc_unregister_linger_request(&client->osdc,
3205 rbd_dev->watch_request);
3207 if (rbd_dev->watch_event)
3208 rbd_req_sync_unwatch(rbd_dev);
3210 rbd_put_client(rbd_dev);
3212 /* clean up and free blkdev */
3213 rbd_free_disk(rbd_dev);
3214 unregister_blkdev(rbd_dev->major, rbd_dev->name);
3216 /* release allocated disk header fields */
3217 rbd_header_free(&rbd_dev->header);
3219 /* done with the id, and with the rbd_dev */
3220 kfree(rbd_dev->mapping.snap_name);
3221 kfree(rbd_dev->image_id);
3222 kfree(rbd_dev->header_name);
3223 kfree(rbd_dev->pool_name);
3224 kfree(rbd_dev->image_name);
3225 rbd_dev_id_put(rbd_dev);
3228 /* release module ref */
3229 module_put(THIS_MODULE);
3232 static ssize_t rbd_remove(struct bus_type *bus,
3236 struct rbd_device *rbd_dev = NULL;
3241 rc = strict_strtoul(buf, 10, &ul);
3245 /* convert to int; abort if we lost anything in the conversion */
3246 target_id = (int) ul;
3247 if (target_id != ul)
3250 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3252 rbd_dev = __rbd_get_dev(target_id);
3258 __rbd_remove_all_snaps(rbd_dev);
3259 rbd_bus_del_dev(rbd_dev);
3262 mutex_unlock(&ctl_mutex);
3268 * create control files in sysfs
3271 static int rbd_sysfs_init(void)
3275 ret = device_register(&rbd_root_dev);
3279 ret = bus_register(&rbd_bus_type);
3281 device_unregister(&rbd_root_dev);
3286 static void rbd_sysfs_cleanup(void)
3288 bus_unregister(&rbd_bus_type);
3289 device_unregister(&rbd_root_dev);
3292 int __init rbd_init(void)
3296 rc = rbd_sysfs_init();
3299 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
3303 void __exit rbd_exit(void)
3305 rbd_sysfs_cleanup();
3308 module_init(rbd_init);
3309 module_exit(rbd_exit);
3311 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
3312 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
3313 MODULE_DESCRIPTION("rados block device");
3315 /* following authorship retained from original osdblk.c */
3316 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
3318 MODULE_LICENSE("GPL");