c8eb9cb77d361e3757da02955c81f0112cc7f586
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / block / rbd.c
1
2 /*
3    rbd.c -- Export ceph rados objects as a Linux block device
4
5
6    based on drivers/block/osdblk.c:
7
8    Copyright 2009 Red Hat, Inc.
9
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation.
13
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING.  If not, write to
21    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25    For usage instructions, please refer to:
26
27                  Documentation/ABI/testing/sysfs-bus-rbd
28
29  */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/fs.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44
45 #include "rbd_types.h"
46
47 #define RBD_DEBUG       /* Activate rbd_assert() calls */
48
49 /*
50  * The basic unit of block I/O is a sector.  It is interpreted in a
51  * number of contexts in Linux (blk, bio, genhd), but the default is
52  * universally 512 bytes.  These symbols are just slightly more
53  * meaningful than the bare numbers they represent.
54  */
55 #define SECTOR_SHIFT    9
56 #define SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
57
58 /*
59  * Increment the given counter and return its updated value.
60  * If the counter is already 0 it will not be incremented.
61  * If the counter is already at its maximum value returns
62  * -EINVAL without updating it.
63  */
64 static int atomic_inc_return_safe(atomic_t *v)
65 {
66         unsigned int counter;
67
68         counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69         if (counter <= (unsigned int)INT_MAX)
70                 return (int)counter;
71
72         atomic_dec(v);
73
74         return -EINVAL;
75 }
76
77 /* Decrement the counter.  Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
79 {
80         int counter;
81
82         counter = atomic_dec_return(v);
83         if (counter >= 0)
84                 return counter;
85
86         atomic_inc(v);
87
88         return -EINVAL;
89 }
90
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
93
94 #define RBD_MINORS_PER_MAJOR    256             /* max minors per blkdev */
95
96 #define RBD_SNAP_DEV_NAME_PREFIX        "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN   \
98                         (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
99
100 #define RBD_MAX_SNAP_COUNT      510     /* allows max snapc to fit in 4KB */
101
102 #define RBD_SNAP_HEAD_NAME      "-"
103
104 #define BAD_SNAP_INDEX  U32_MAX         /* invalid index into snap array */
105
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX  (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX    64
109
110 #define RBD_OBJ_PREFIX_LEN_MAX  64
111
112 /* Feature bits */
113
114 #define RBD_FEATURE_LAYERING    (1<<0)
115 #define RBD_FEATURE_STRIPINGV2  (1<<1)
116 #define RBD_FEATURES_ALL \
117             (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
118
119 /* Features supported by this (client software) implementation. */
120
121 #define RBD_FEATURES_SUPPORTED  (RBD_FEATURES_ALL)
122
123 /*
124  * An RBD device name will be "rbd#", where the "rbd" comes from
125  * RBD_DRV_NAME above, and # is a unique integer identifier.
126  * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127  * enough to hold all possible device names.
128  */
129 #define DEV_NAME_LEN            32
130 #define MAX_INT_FORMAT_WIDTH    ((5 * sizeof (int)) / 2 + 1)
131
132 /*
133  * block device image metadata (in-memory version)
134  */
135 struct rbd_image_header {
136         /* These six fields never change for a given rbd image */
137         char *object_prefix;
138         __u8 obj_order;
139         __u8 crypt_type;
140         __u8 comp_type;
141         u64 stripe_unit;
142         u64 stripe_count;
143         u64 features;           /* Might be changeable someday? */
144
145         /* The remaining fields need to be updated occasionally */
146         u64 image_size;
147         struct ceph_snap_context *snapc;
148         char *snap_names;       /* format 1 only */
149         u64 *snap_sizes;        /* format 1 only */
150 };
151
152 /*
153  * An rbd image specification.
154  *
155  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156  * identify an image.  Each rbd_dev structure includes a pointer to
157  * an rbd_spec structure that encapsulates this identity.
158  *
159  * Each of the id's in an rbd_spec has an associated name.  For a
160  * user-mapped image, the names are supplied and the id's associated
161  * with them are looked up.  For a layered image, a parent image is
162  * defined by the tuple, and the names are looked up.
163  *
164  * An rbd_dev structure contains a parent_spec pointer which is
165  * non-null if the image it represents is a child in a layered
166  * image.  This pointer will refer to the rbd_spec structure used
167  * by the parent rbd_dev for its own identity (i.e., the structure
168  * is shared between the parent and child).
169  *
170  * Since these structures are populated once, during the discovery
171  * phase of image construction, they are effectively immutable so
172  * we make no effort to synchronize access to them.
173  *
174  * Note that code herein does not assume the image name is known (it
175  * could be a null pointer).
176  */
177 struct rbd_spec {
178         u64             pool_id;
179         const char      *pool_name;
180
181         const char      *image_id;
182         const char      *image_name;
183
184         u64             snap_id;
185         const char      *snap_name;
186
187         struct kref     kref;
188 };
189
190 /*
191  * an instance of the client.  multiple devices may share an rbd client.
192  */
193 struct rbd_client {
194         struct ceph_client      *client;
195         struct kref             kref;
196         struct list_head        node;
197 };
198
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
201
202 #define BAD_WHICH       U32_MAX         /* Good which or bad which, which? */
203
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
206
207 enum obj_request_type {
208         OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
209 };
210
211 enum obj_req_flags {
212         OBJ_REQ_DONE,           /* completion flag: not done = 0, done = 1 */
213         OBJ_REQ_IMG_DATA,       /* object usage: standalone = 0, image = 1 */
214         OBJ_REQ_KNOWN,          /* EXISTS flag valid: no = 0, yes = 1 */
215         OBJ_REQ_EXISTS,         /* target exists: no = 0, yes = 1 */
216 };
217
218 struct rbd_obj_request {
219         const char              *object_name;
220         u64                     offset;         /* object start byte */
221         u64                     length;         /* bytes from offset */
222         unsigned long           flags;
223
224         /*
225          * An object request associated with an image will have its
226          * img_data flag set; a standalone object request will not.
227          *
228          * A standalone object request will have which == BAD_WHICH
229          * and a null obj_request pointer.
230          *
231          * An object request initiated in support of a layered image
232          * object (to check for its existence before a write) will
233          * have which == BAD_WHICH and a non-null obj_request pointer.
234          *
235          * Finally, an object request for rbd image data will have
236          * which != BAD_WHICH, and will have a non-null img_request
237          * pointer.  The value of which will be in the range
238          * 0..(img_request->obj_request_count-1).
239          */
240         union {
241                 struct rbd_obj_request  *obj_request;   /* STAT op */
242                 struct {
243                         struct rbd_img_request  *img_request;
244                         u64                     img_offset;
245                         /* links for img_request->obj_requests list */
246                         struct list_head        links;
247                 };
248         };
249         u32                     which;          /* posn image request list */
250
251         enum obj_request_type   type;
252         union {
253                 struct bio      *bio_list;
254                 struct {
255                         struct page     **pages;
256                         u32             page_count;
257                 };
258         };
259         struct page             **copyup_pages;
260         u32                     copyup_page_count;
261
262         struct ceph_osd_request *osd_req;
263
264         u64                     xferred;        /* bytes transferred */
265         int                     result;
266
267         rbd_obj_callback_t      callback;
268         struct completion       completion;
269
270         struct kref             kref;
271 };
272
273 enum img_req_flags {
274         IMG_REQ_WRITE,          /* I/O direction: read = 0, write = 1 */
275         IMG_REQ_CHILD,          /* initiator: block = 0, child image = 1 */
276         IMG_REQ_LAYERED,        /* ENOENT handling: normal = 0, layered = 1 */
277 };
278
279 struct rbd_img_request {
280         struct rbd_device       *rbd_dev;
281         u64                     offset; /* starting image byte offset */
282         u64                     length; /* byte count from offset */
283         unsigned long           flags;
284         union {
285                 u64                     snap_id;        /* for reads */
286                 struct ceph_snap_context *snapc;        /* for writes */
287         };
288         union {
289                 struct request          *rq;            /* block request */
290                 struct rbd_obj_request  *obj_request;   /* obj req initiator */
291         };
292         struct page             **copyup_pages;
293         u32                     copyup_page_count;
294         spinlock_t              completion_lock;/* protects next_completion */
295         u32                     next_completion;
296         rbd_img_callback_t      callback;
297         u64                     xferred;/* aggregate bytes transferred */
298         int                     result; /* first nonzero obj_request result */
299
300         u32                     obj_request_count;
301         struct list_head        obj_requests;   /* rbd_obj_request structs */
302
303         struct kref             kref;
304 };
305
306 #define for_each_obj_request(ireq, oreq) \
307         list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309         list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311         list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
312
313 struct rbd_mapping {
314         u64                     size;
315         u64                     features;
316         bool                    read_only;
317 };
318
319 /*
320  * a single device
321  */
322 struct rbd_device {
323         int                     dev_id;         /* blkdev unique id */
324
325         int                     major;          /* blkdev assigned major */
326         struct gendisk          *disk;          /* blkdev's gendisk and rq */
327
328         u32                     image_format;   /* Either 1 or 2 */
329         struct rbd_client       *rbd_client;
330
331         char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
332
333         spinlock_t              lock;           /* queue, flags, open_count */
334
335         struct rbd_image_header header;
336         unsigned long           flags;          /* possibly lock protected */
337         struct rbd_spec         *spec;
338
339         char                    *header_name;
340
341         struct ceph_file_layout layout;
342
343         struct ceph_osd_event   *watch_event;
344         struct rbd_obj_request  *watch_request;
345
346         struct rbd_spec         *parent_spec;
347         u64                     parent_overlap;
348         atomic_t                parent_ref;
349         struct rbd_device       *parent;
350
351         /* protects updating the header */
352         struct rw_semaphore     header_rwsem;
353
354         struct rbd_mapping      mapping;
355
356         struct list_head        node;
357
358         /* sysfs related */
359         struct device           dev;
360         unsigned long           open_count;     /* protected by lock */
361 };
362
363 /*
364  * Flag bits for rbd_dev->flags.  If atomicity is required,
365  * rbd_dev->lock is used to protect access.
366  *
367  * Currently, only the "removing" flag (which is coupled with the
368  * "open_count" field) requires atomic access.
369  */
370 enum rbd_dev_flags {
371         RBD_DEV_FLAG_EXISTS,    /* mapped snapshot has not been deleted */
372         RBD_DEV_FLAG_REMOVING,  /* this mapping is being removed */
373 };
374
375 static DEFINE_MUTEX(ctl_mutex);   /* Serialize open/close/setup/teardown */
376
377 static LIST_HEAD(rbd_dev_list);    /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
379
380 static LIST_HEAD(rbd_client_list);              /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
382
383 /* Slab caches for frequently-allocated structures */
384
385 static struct kmem_cache        *rbd_img_request_cache;
386 static struct kmem_cache        *rbd_obj_request_cache;
387 static struct kmem_cache        *rbd_segment_name_cache;
388
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
390
391 static void rbd_dev_device_release(struct device *dev);
392
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
394                        size_t count);
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
396                           size_t count);
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
399
400 static struct bus_attribute rbd_bus_attrs[] = {
401         __ATTR(add, S_IWUSR, NULL, rbd_add),
402         __ATTR(remove, S_IWUSR, NULL, rbd_remove),
403         __ATTR_NULL
404 };
405
406 static struct bus_type rbd_bus_type = {
407         .name           = "rbd",
408         .bus_attrs      = rbd_bus_attrs,
409 };
410
411 static void rbd_root_dev_release(struct device *dev)
412 {
413 }
414
415 static struct device rbd_root_dev = {
416         .init_name =    "rbd",
417         .release =      rbd_root_dev_release,
418 };
419
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
422 {
423         struct va_format vaf;
424         va_list args;
425
426         va_start(args, fmt);
427         vaf.fmt = fmt;
428         vaf.va = &args;
429
430         if (!rbd_dev)
431                 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432         else if (rbd_dev->disk)
433                 printk(KERN_WARNING "%s: %s: %pV\n",
434                         RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435         else if (rbd_dev->spec && rbd_dev->spec->image_name)
436                 printk(KERN_WARNING "%s: image %s: %pV\n",
437                         RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438         else if (rbd_dev->spec && rbd_dev->spec->image_id)
439                 printk(KERN_WARNING "%s: id %s: %pV\n",
440                         RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
441         else    /* punt */
442                 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443                         RBD_DRV_NAME, rbd_dev, &vaf);
444         va_end(args);
445 }
446
447 #ifdef RBD_DEBUG
448 #define rbd_assert(expr)                                                \
449                 if (unlikely(!(expr))) {                                \
450                         printk(KERN_ERR "\nAssertion failure in %s() "  \
451                                                 "at line %d:\n\n"       \
452                                         "\trbd_assert(%s);\n\n",        \
453                                         __func__, __LINE__, #expr);     \
454                         BUG();                                          \
455                 }
456 #else /* !RBD_DEBUG */
457 #  define rbd_assert(expr)      ((void) 0)
458 #endif /* !RBD_DEBUG */
459
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
463
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
468                                         u64 snap_id);
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470                                 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
472                 u64 *snap_features);
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
474
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
476 {
477         struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478         bool removing = false;
479
480         if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
481                 return -EROFS;
482
483         spin_lock_irq(&rbd_dev->lock);
484         if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
485                 removing = true;
486         else
487                 rbd_dev->open_count++;
488         spin_unlock_irq(&rbd_dev->lock);
489         if (removing)
490                 return -ENOENT;
491
492         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493         (void) get_device(&rbd_dev->dev);
494         set_device_ro(bdev, rbd_dev->mapping.read_only);
495         mutex_unlock(&ctl_mutex);
496
497         return 0;
498 }
499
500 static int rbd_release(struct gendisk *disk, fmode_t mode)
501 {
502         struct rbd_device *rbd_dev = disk->private_data;
503         unsigned long open_count_before;
504
505         spin_lock_irq(&rbd_dev->lock);
506         open_count_before = rbd_dev->open_count--;
507         spin_unlock_irq(&rbd_dev->lock);
508         rbd_assert(open_count_before > 0);
509
510         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511         put_device(&rbd_dev->dev);
512         mutex_unlock(&ctl_mutex);
513
514         return 0;
515 }
516
517 static const struct block_device_operations rbd_bd_ops = {
518         .owner                  = THIS_MODULE,
519         .open                   = rbd_open,
520         .release                = rbd_release,
521 };
522
523 /*
524  * Initialize an rbd client instance.  Success or not, this function
525  * consumes ceph_opts.
526  */
527 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
528 {
529         struct rbd_client *rbdc;
530         int ret = -ENOMEM;
531
532         dout("%s:\n", __func__);
533         rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
534         if (!rbdc)
535                 goto out_opt;
536
537         kref_init(&rbdc->kref);
538         INIT_LIST_HEAD(&rbdc->node);
539
540         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
541
542         rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
543         if (IS_ERR(rbdc->client))
544                 goto out_mutex;
545         ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
546
547         ret = ceph_open_session(rbdc->client);
548         if (ret < 0)
549                 goto out_err;
550
551         spin_lock(&rbd_client_list_lock);
552         list_add_tail(&rbdc->node, &rbd_client_list);
553         spin_unlock(&rbd_client_list_lock);
554
555         mutex_unlock(&ctl_mutex);
556         dout("%s: rbdc %p\n", __func__, rbdc);
557
558         return rbdc;
559
560 out_err:
561         ceph_destroy_client(rbdc->client);
562 out_mutex:
563         mutex_unlock(&ctl_mutex);
564         kfree(rbdc);
565 out_opt:
566         if (ceph_opts)
567                 ceph_destroy_options(ceph_opts);
568         dout("%s: error %d\n", __func__, ret);
569
570         return ERR_PTR(ret);
571 }
572
573 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
574 {
575         kref_get(&rbdc->kref);
576
577         return rbdc;
578 }
579
580 /*
581  * Find a ceph client with specific addr and configuration.  If
582  * found, bump its reference count.
583  */
584 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
585 {
586         struct rbd_client *client_node;
587         bool found = false;
588
589         if (ceph_opts->flags & CEPH_OPT_NOSHARE)
590                 return NULL;
591
592         spin_lock(&rbd_client_list_lock);
593         list_for_each_entry(client_node, &rbd_client_list, node) {
594                 if (!ceph_compare_options(ceph_opts, client_node->client)) {
595                         __rbd_get_client(client_node);
596
597                         found = true;
598                         break;
599                 }
600         }
601         spin_unlock(&rbd_client_list_lock);
602
603         return found ? client_node : NULL;
604 }
605
606 /*
607  * mount options
608  */
609 enum {
610         Opt_last_int,
611         /* int args above */
612         Opt_last_string,
613         /* string args above */
614         Opt_read_only,
615         Opt_read_write,
616         /* Boolean args above */
617         Opt_last_bool,
618 };
619
620 static match_table_t rbd_opts_tokens = {
621         /* int args above */
622         /* string args above */
623         {Opt_read_only, "read_only"},
624         {Opt_read_only, "ro"},          /* Alternate spelling */
625         {Opt_read_write, "read_write"},
626         {Opt_read_write, "rw"},         /* Alternate spelling */
627         /* Boolean args above */
628         {-1, NULL}
629 };
630
631 struct rbd_options {
632         bool    read_only;
633 };
634
635 #define RBD_READ_ONLY_DEFAULT   false
636
637 static int parse_rbd_opts_token(char *c, void *private)
638 {
639         struct rbd_options *rbd_opts = private;
640         substring_t argstr[MAX_OPT_ARGS];
641         int token, intval, ret;
642
643         token = match_token(c, rbd_opts_tokens, argstr);
644         if (token < 0)
645                 return -EINVAL;
646
647         if (token < Opt_last_int) {
648                 ret = match_int(&argstr[0], &intval);
649                 if (ret < 0) {
650                         pr_err("bad mount option arg (not int) "
651                                "at '%s'\n", c);
652                         return ret;
653                 }
654                 dout("got int token %d val %d\n", token, intval);
655         } else if (token > Opt_last_int && token < Opt_last_string) {
656                 dout("got string token %d val %s\n", token,
657                      argstr[0].from);
658         } else if (token > Opt_last_string && token < Opt_last_bool) {
659                 dout("got Boolean token %d\n", token);
660         } else {
661                 dout("got token %d\n", token);
662         }
663
664         switch (token) {
665         case Opt_read_only:
666                 rbd_opts->read_only = true;
667                 break;
668         case Opt_read_write:
669                 rbd_opts->read_only = false;
670                 break;
671         default:
672                 rbd_assert(false);
673                 break;
674         }
675         return 0;
676 }
677
678 /*
679  * Get a ceph client with specific addr and configuration, if one does
680  * not exist create it.  Either way, ceph_opts is consumed by this
681  * function.
682  */
683 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
684 {
685         struct rbd_client *rbdc;
686
687         rbdc = rbd_client_find(ceph_opts);
688         if (rbdc)       /* using an existing client */
689                 ceph_destroy_options(ceph_opts);
690         else
691                 rbdc = rbd_client_create(ceph_opts);
692
693         return rbdc;
694 }
695
696 /*
697  * Destroy ceph client
698  *
699  * Caller must hold rbd_client_list_lock.
700  */
701 static void rbd_client_release(struct kref *kref)
702 {
703         struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
704
705         dout("%s: rbdc %p\n", __func__, rbdc);
706         spin_lock(&rbd_client_list_lock);
707         list_del(&rbdc->node);
708         spin_unlock(&rbd_client_list_lock);
709
710         ceph_destroy_client(rbdc->client);
711         kfree(rbdc);
712 }
713
714 /*
715  * Drop reference to ceph client node. If it's not referenced anymore, release
716  * it.
717  */
718 static void rbd_put_client(struct rbd_client *rbdc)
719 {
720         if (rbdc)
721                 kref_put(&rbdc->kref, rbd_client_release);
722 }
723
724 static bool rbd_image_format_valid(u32 image_format)
725 {
726         return image_format == 1 || image_format == 2;
727 }
728
729 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
730 {
731         size_t size;
732         u32 snap_count;
733
734         /* The header has to start with the magic rbd header text */
735         if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
736                 return false;
737
738         /* The bio layer requires at least sector-sized I/O */
739
740         if (ondisk->options.order < SECTOR_SHIFT)
741                 return false;
742
743         /* If we use u64 in a few spots we may be able to loosen this */
744
745         if (ondisk->options.order > 8 * sizeof (int) - 1)
746                 return false;
747
748         /*
749          * The size of a snapshot header has to fit in a size_t, and
750          * that limits the number of snapshots.
751          */
752         snap_count = le32_to_cpu(ondisk->snap_count);
753         size = SIZE_MAX - sizeof (struct ceph_snap_context);
754         if (snap_count > size / sizeof (__le64))
755                 return false;
756
757         /*
758          * Not only that, but the size of the entire the snapshot
759          * header must also be representable in a size_t.
760          */
761         size -= snap_count * sizeof (__le64);
762         if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
763                 return false;
764
765         return true;
766 }
767
768 /*
769  * Fill an rbd image header with information from the given format 1
770  * on-disk header.
771  */
772 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
773                                  struct rbd_image_header_ondisk *ondisk)
774 {
775         struct rbd_image_header *header = &rbd_dev->header;
776         bool first_time = header->object_prefix == NULL;
777         struct ceph_snap_context *snapc;
778         char *object_prefix = NULL;
779         char *snap_names = NULL;
780         u64 *snap_sizes = NULL;
781         u32 snap_count;
782         size_t size;
783         int ret = -ENOMEM;
784         u32 i;
785
786         /* Allocate this now to avoid having to handle failure below */
787
788         if (first_time) {
789                 size_t len;
790
791                 len = strnlen(ondisk->object_prefix,
792                                 sizeof (ondisk->object_prefix));
793                 object_prefix = kmalloc(len + 1, GFP_KERNEL);
794                 if (!object_prefix)
795                         return -ENOMEM;
796                 memcpy(object_prefix, ondisk->object_prefix, len);
797                 object_prefix[len] = '\0';
798         }
799
800         /* Allocate the snapshot context and fill it in */
801
802         snap_count = le32_to_cpu(ondisk->snap_count);
803         snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
804         if (!snapc)
805                 goto out_err;
806         snapc->seq = le64_to_cpu(ondisk->snap_seq);
807         if (snap_count) {
808                 struct rbd_image_snap_ondisk *snaps;
809                 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
810
811                 /* We'll keep a copy of the snapshot names... */
812
813                 if (snap_names_len > (u64)SIZE_MAX)
814                         goto out_2big;
815                 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
816                 if (!snap_names)
817                         goto out_err;
818
819                 /* ...as well as the array of their sizes. */
820
821                 size = snap_count * sizeof (*header->snap_sizes);
822                 snap_sizes = kmalloc(size, GFP_KERNEL);
823                 if (!snap_sizes)
824                         goto out_err;
825
826                 /*
827                  * Copy the names, and fill in each snapshot's id
828                  * and size.
829                  *
830                  * Note that rbd_dev_v1_header_info() guarantees the
831                  * ondisk buffer we're working with has
832                  * snap_names_len bytes beyond the end of the
833                  * snapshot id array, this memcpy() is safe.
834                  */
835                 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
836                 snaps = ondisk->snaps;
837                 for (i = 0; i < snap_count; i++) {
838                         snapc->snaps[i] = le64_to_cpu(snaps[i].id);
839                         snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
840                 }
841         }
842
843         /* We won't fail any more, fill in the header */
844
845         down_write(&rbd_dev->header_rwsem);
846         if (first_time) {
847                 header->object_prefix = object_prefix;
848                 header->obj_order = ondisk->options.order;
849                 header->crypt_type = ondisk->options.crypt_type;
850                 header->comp_type = ondisk->options.comp_type;
851                 /* The rest aren't used for format 1 images */
852                 header->stripe_unit = 0;
853                 header->stripe_count = 0;
854                 header->features = 0;
855         } else {
856                 ceph_put_snap_context(header->snapc);
857                 kfree(header->snap_names);
858                 kfree(header->snap_sizes);
859         }
860
861         /* The remaining fields always get updated (when we refresh) */
862
863         header->image_size = le64_to_cpu(ondisk->image_size);
864         header->snapc = snapc;
865         header->snap_names = snap_names;
866         header->snap_sizes = snap_sizes;
867
868         /* Make sure mapping size is consistent with header info */
869
870         if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
871                 if (rbd_dev->mapping.size != header->image_size)
872                         rbd_dev->mapping.size = header->image_size;
873
874         up_write(&rbd_dev->header_rwsem);
875
876         return 0;
877 out_2big:
878         ret = -EIO;
879 out_err:
880         kfree(snap_sizes);
881         kfree(snap_names);
882         ceph_put_snap_context(snapc);
883         kfree(object_prefix);
884
885         return ret;
886 }
887
888 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
889 {
890         const char *snap_name;
891
892         rbd_assert(which < rbd_dev->header.snapc->num_snaps);
893
894         /* Skip over names until we find the one we are looking for */
895
896         snap_name = rbd_dev->header.snap_names;
897         while (which--)
898                 snap_name += strlen(snap_name) + 1;
899
900         return kstrdup(snap_name, GFP_KERNEL);
901 }
902
903 /*
904  * Snapshot id comparison function for use with qsort()/bsearch().
905  * Note that result is for snapshots in *descending* order.
906  */
907 static int snapid_compare_reverse(const void *s1, const void *s2)
908 {
909         u64 snap_id1 = *(u64 *)s1;
910         u64 snap_id2 = *(u64 *)s2;
911
912         if (snap_id1 < snap_id2)
913                 return 1;
914         return snap_id1 == snap_id2 ? 0 : -1;
915 }
916
917 /*
918  * Search a snapshot context to see if the given snapshot id is
919  * present.
920  *
921  * Returns the position of the snapshot id in the array if it's found,
922  * or BAD_SNAP_INDEX otherwise.
923  *
924  * Note: The snapshot array is in kept sorted (by the osd) in
925  * reverse order, highest snapshot id first.
926  */
927 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
928 {
929         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
930         u64 *found;
931
932         found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
933                                 sizeof (snap_id), snapid_compare_reverse);
934
935         return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
936 }
937
938 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
939                                         u64 snap_id)
940 {
941         u32 which;
942
943         which = rbd_dev_snap_index(rbd_dev, snap_id);
944         if (which == BAD_SNAP_INDEX)
945                 return NULL;
946
947         return _rbd_dev_v1_snap_name(rbd_dev, which);
948 }
949
950 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
951 {
952         if (snap_id == CEPH_NOSNAP)
953                 return RBD_SNAP_HEAD_NAME;
954
955         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
956         if (rbd_dev->image_format == 1)
957                 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
958
959         return rbd_dev_v2_snap_name(rbd_dev, snap_id);
960 }
961
962 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
963                                 u64 *snap_size)
964 {
965         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
966         if (snap_id == CEPH_NOSNAP) {
967                 *snap_size = rbd_dev->header.image_size;
968         } else if (rbd_dev->image_format == 1) {
969                 u32 which;
970
971                 which = rbd_dev_snap_index(rbd_dev, snap_id);
972                 if (which == BAD_SNAP_INDEX)
973                         return -ENOENT;
974
975                 *snap_size = rbd_dev->header.snap_sizes[which];
976         } else {
977                 u64 size = 0;
978                 int ret;
979
980                 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
981                 if (ret)
982                         return ret;
983
984                 *snap_size = size;
985         }
986         return 0;
987 }
988
989 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
990                         u64 *snap_features)
991 {
992         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
993         if (snap_id == CEPH_NOSNAP) {
994                 *snap_features = rbd_dev->header.features;
995         } else if (rbd_dev->image_format == 1) {
996                 *snap_features = 0;     /* No features for format 1 */
997         } else {
998                 u64 features = 0;
999                 int ret;
1000
1001                 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1002                 if (ret)
1003                         return ret;
1004
1005                 *snap_features = features;
1006         }
1007         return 0;
1008 }
1009
1010 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1011 {
1012         u64 snap_id = rbd_dev->spec->snap_id;
1013         u64 size = 0;
1014         u64 features = 0;
1015         int ret;
1016
1017         ret = rbd_snap_size(rbd_dev, snap_id, &size);
1018         if (ret)
1019                 return ret;
1020         ret = rbd_snap_features(rbd_dev, snap_id, &features);
1021         if (ret)
1022                 return ret;
1023
1024         rbd_dev->mapping.size = size;
1025         rbd_dev->mapping.features = features;
1026
1027         return 0;
1028 }
1029
1030 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1031 {
1032         rbd_dev->mapping.size = 0;
1033         rbd_dev->mapping.features = 0;
1034 }
1035
1036 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1037 {
1038         char *name;
1039         u64 segment;
1040         int ret;
1041         char *name_format;
1042
1043         name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1044         if (!name)
1045                 return NULL;
1046         segment = offset >> rbd_dev->header.obj_order;
1047         name_format = "%s.%012llx";
1048         if (rbd_dev->image_format == 2)
1049                 name_format = "%s.%016llx";
1050         ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1051                         rbd_dev->header.object_prefix, segment);
1052         if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1053                 pr_err("error formatting segment name for #%llu (%d)\n",
1054                         segment, ret);
1055                 kfree(name);
1056                 name = NULL;
1057         }
1058
1059         return name;
1060 }
1061
1062 static void rbd_segment_name_free(const char *name)
1063 {
1064         /* The explicit cast here is needed to drop the const qualifier */
1065
1066         kmem_cache_free(rbd_segment_name_cache, (void *)name);
1067 }
1068
1069 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1070 {
1071         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1072
1073         return offset & (segment_size - 1);
1074 }
1075
1076 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1077                                 u64 offset, u64 length)
1078 {
1079         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1080
1081         offset &= segment_size - 1;
1082
1083         rbd_assert(length <= U64_MAX - offset);
1084         if (offset + length > segment_size)
1085                 length = segment_size - offset;
1086
1087         return length;
1088 }
1089
1090 /*
1091  * returns the size of an object in the image
1092  */
1093 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1094 {
1095         return 1 << header->obj_order;
1096 }
1097
1098 /*
1099  * bio helpers
1100  */
1101
1102 static void bio_chain_put(struct bio *chain)
1103 {
1104         struct bio *tmp;
1105
1106         while (chain) {
1107                 tmp = chain;
1108                 chain = chain->bi_next;
1109                 bio_put(tmp);
1110         }
1111 }
1112
1113 /*
1114  * zeros a bio chain, starting at specific offset
1115  */
1116 static void zero_bio_chain(struct bio *chain, int start_ofs)
1117 {
1118         struct bio_vec *bv;
1119         unsigned long flags;
1120         void *buf;
1121         int i;
1122         int pos = 0;
1123
1124         while (chain) {
1125                 bio_for_each_segment(bv, chain, i) {
1126                         if (pos + bv->bv_len > start_ofs) {
1127                                 int remainder = max(start_ofs - pos, 0);
1128                                 buf = bvec_kmap_irq(bv, &flags);
1129                                 memset(buf + remainder, 0,
1130                                        bv->bv_len - remainder);
1131                                 bvec_kunmap_irq(buf, &flags);
1132                         }
1133                         pos += bv->bv_len;
1134                 }
1135
1136                 chain = chain->bi_next;
1137         }
1138 }
1139
1140 /*
1141  * similar to zero_bio_chain(), zeros data defined by a page array,
1142  * starting at the given byte offset from the start of the array and
1143  * continuing up to the given end offset.  The pages array is
1144  * assumed to be big enough to hold all bytes up to the end.
1145  */
1146 static void zero_pages(struct page **pages, u64 offset, u64 end)
1147 {
1148         struct page **page = &pages[offset >> PAGE_SHIFT];
1149
1150         rbd_assert(end > offset);
1151         rbd_assert(end - offset <= (u64)SIZE_MAX);
1152         while (offset < end) {
1153                 size_t page_offset;
1154                 size_t length;
1155                 unsigned long flags;
1156                 void *kaddr;
1157
1158                 page_offset = (size_t)(offset & ~PAGE_MASK);
1159                 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1160                 local_irq_save(flags);
1161                 kaddr = kmap_atomic(*page);
1162                 memset(kaddr + page_offset, 0, length);
1163                 kunmap_atomic(kaddr);
1164                 local_irq_restore(flags);
1165
1166                 offset += length;
1167                 page++;
1168         }
1169 }
1170
1171 /*
1172  * Clone a portion of a bio, starting at the given byte offset
1173  * and continuing for the number of bytes indicated.
1174  */
1175 static struct bio *bio_clone_range(struct bio *bio_src,
1176                                         unsigned int offset,
1177                                         unsigned int len,
1178                                         gfp_t gfpmask)
1179 {
1180         struct bio_vec *bv;
1181         unsigned int resid;
1182         unsigned short idx;
1183         unsigned int voff;
1184         unsigned short end_idx;
1185         unsigned short vcnt;
1186         struct bio *bio;
1187
1188         /* Handle the easy case for the caller */
1189
1190         if (!offset && len == bio_src->bi_size)
1191                 return bio_clone(bio_src, gfpmask);
1192
1193         if (WARN_ON_ONCE(!len))
1194                 return NULL;
1195         if (WARN_ON_ONCE(len > bio_src->bi_size))
1196                 return NULL;
1197         if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1198                 return NULL;
1199
1200         /* Find first affected segment... */
1201
1202         resid = offset;
1203         __bio_for_each_segment(bv, bio_src, idx, 0) {
1204                 if (resid < bv->bv_len)
1205                         break;
1206                 resid -= bv->bv_len;
1207         }
1208         voff = resid;
1209
1210         /* ...and the last affected segment */
1211
1212         resid += len;
1213         __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1214                 if (resid <= bv->bv_len)
1215                         break;
1216                 resid -= bv->bv_len;
1217         }
1218         vcnt = end_idx - idx + 1;
1219
1220         /* Build the clone */
1221
1222         bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1223         if (!bio)
1224                 return NULL;    /* ENOMEM */
1225
1226         bio->bi_bdev = bio_src->bi_bdev;
1227         bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1228         bio->bi_rw = bio_src->bi_rw;
1229         bio->bi_flags |= 1 << BIO_CLONED;
1230
1231         /*
1232          * Copy over our part of the bio_vec, then update the first
1233          * and last (or only) entries.
1234          */
1235         memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1236                         vcnt * sizeof (struct bio_vec));
1237         bio->bi_io_vec[0].bv_offset += voff;
1238         if (vcnt > 1) {
1239                 bio->bi_io_vec[0].bv_len -= voff;
1240                 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1241         } else {
1242                 bio->bi_io_vec[0].bv_len = len;
1243         }
1244
1245         bio->bi_vcnt = vcnt;
1246         bio->bi_size = len;
1247         bio->bi_idx = 0;
1248
1249         return bio;
1250 }
1251
1252 /*
1253  * Clone a portion of a bio chain, starting at the given byte offset
1254  * into the first bio in the source chain and continuing for the
1255  * number of bytes indicated.  The result is another bio chain of
1256  * exactly the given length, or a null pointer on error.
1257  *
1258  * The bio_src and offset parameters are both in-out.  On entry they
1259  * refer to the first source bio and the offset into that bio where
1260  * the start of data to be cloned is located.
1261  *
1262  * On return, bio_src is updated to refer to the bio in the source
1263  * chain that contains first un-cloned byte, and *offset will
1264  * contain the offset of that byte within that bio.
1265  */
1266 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1267                                         unsigned int *offset,
1268                                         unsigned int len,
1269                                         gfp_t gfpmask)
1270 {
1271         struct bio *bi = *bio_src;
1272         unsigned int off = *offset;
1273         struct bio *chain = NULL;
1274         struct bio **end;
1275
1276         /* Build up a chain of clone bios up to the limit */
1277
1278         if (!bi || off >= bi->bi_size || !len)
1279                 return NULL;            /* Nothing to clone */
1280
1281         end = &chain;
1282         while (len) {
1283                 unsigned int bi_size;
1284                 struct bio *bio;
1285
1286                 if (!bi) {
1287                         rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1288                         goto out_err;   /* EINVAL; ran out of bio's */
1289                 }
1290                 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1291                 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1292                 if (!bio)
1293                         goto out_err;   /* ENOMEM */
1294
1295                 *end = bio;
1296                 end = &bio->bi_next;
1297
1298                 off += bi_size;
1299                 if (off == bi->bi_size) {
1300                         bi = bi->bi_next;
1301                         off = 0;
1302                 }
1303                 len -= bi_size;
1304         }
1305         *bio_src = bi;
1306         *offset = off;
1307
1308         return chain;
1309 out_err:
1310         bio_chain_put(chain);
1311
1312         return NULL;
1313 }
1314
1315 /*
1316  * The default/initial value for all object request flags is 0.  For
1317  * each flag, once its value is set to 1 it is never reset to 0
1318  * again.
1319  */
1320 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1321 {
1322         if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1323                 struct rbd_device *rbd_dev;
1324
1325                 rbd_dev = obj_request->img_request->rbd_dev;
1326                 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1327                         obj_request);
1328         }
1329 }
1330
1331 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1332 {
1333         smp_mb();
1334         return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1335 }
1336
1337 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1338 {
1339         if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1340                 struct rbd_device *rbd_dev = NULL;
1341
1342                 if (obj_request_img_data_test(obj_request))
1343                         rbd_dev = obj_request->img_request->rbd_dev;
1344                 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1345                         obj_request);
1346         }
1347 }
1348
1349 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1350 {
1351         smp_mb();
1352         return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1353 }
1354
1355 /*
1356  * This sets the KNOWN flag after (possibly) setting the EXISTS
1357  * flag.  The latter is set based on the "exists" value provided.
1358  *
1359  * Note that for our purposes once an object exists it never goes
1360  * away again.  It's possible that the response from two existence
1361  * checks are separated by the creation of the target object, and
1362  * the first ("doesn't exist") response arrives *after* the second
1363  * ("does exist").  In that case we ignore the second one.
1364  */
1365 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1366                                 bool exists)
1367 {
1368         if (exists)
1369                 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1370         set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1371         smp_mb();
1372 }
1373
1374 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1375 {
1376         smp_mb();
1377         return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1378 }
1379
1380 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1381 {
1382         smp_mb();
1383         return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1384 }
1385
1386 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1387 {
1388         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1389                 atomic_read(&obj_request->kref.refcount));
1390         kref_get(&obj_request->kref);
1391 }
1392
1393 static void rbd_obj_request_destroy(struct kref *kref);
1394 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1395 {
1396         rbd_assert(obj_request != NULL);
1397         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1398                 atomic_read(&obj_request->kref.refcount));
1399         kref_put(&obj_request->kref, rbd_obj_request_destroy);
1400 }
1401
1402 static bool img_request_child_test(struct rbd_img_request *img_request);
1403 static void rbd_parent_request_destroy(struct kref *kref);
1404 static void rbd_img_request_destroy(struct kref *kref);
1405 static void rbd_img_request_put(struct rbd_img_request *img_request)
1406 {
1407         rbd_assert(img_request != NULL);
1408         dout("%s: img %p (was %d)\n", __func__, img_request,
1409                 atomic_read(&img_request->kref.refcount));
1410         if (img_request_child_test(img_request))
1411                 kref_put(&img_request->kref, rbd_parent_request_destroy);
1412         else
1413                 kref_put(&img_request->kref, rbd_img_request_destroy);
1414 }
1415
1416 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1417                                         struct rbd_obj_request *obj_request)
1418 {
1419         rbd_assert(obj_request->img_request == NULL);
1420
1421         /* Image request now owns object's original reference */
1422         obj_request->img_request = img_request;
1423         obj_request->which = img_request->obj_request_count;
1424         rbd_assert(!obj_request_img_data_test(obj_request));
1425         obj_request_img_data_set(obj_request);
1426         rbd_assert(obj_request->which != BAD_WHICH);
1427         img_request->obj_request_count++;
1428         list_add_tail(&obj_request->links, &img_request->obj_requests);
1429         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1430                 obj_request->which);
1431 }
1432
1433 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1434                                         struct rbd_obj_request *obj_request)
1435 {
1436         rbd_assert(obj_request->which != BAD_WHICH);
1437
1438         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1439                 obj_request->which);
1440         list_del(&obj_request->links);
1441         rbd_assert(img_request->obj_request_count > 0);
1442         img_request->obj_request_count--;
1443         rbd_assert(obj_request->which == img_request->obj_request_count);
1444         obj_request->which = BAD_WHICH;
1445         rbd_assert(obj_request_img_data_test(obj_request));
1446         rbd_assert(obj_request->img_request == img_request);
1447         obj_request->img_request = NULL;
1448         obj_request->callback = NULL;
1449         rbd_obj_request_put(obj_request);
1450 }
1451
1452 static bool obj_request_type_valid(enum obj_request_type type)
1453 {
1454         switch (type) {
1455         case OBJ_REQUEST_NODATA:
1456         case OBJ_REQUEST_BIO:
1457         case OBJ_REQUEST_PAGES:
1458                 return true;
1459         default:
1460                 return false;
1461         }
1462 }
1463
1464 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1465                                 struct rbd_obj_request *obj_request)
1466 {
1467         dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1468
1469         return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1470 }
1471
1472 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1473 {
1474
1475         dout("%s: img %p\n", __func__, img_request);
1476
1477         /*
1478          * If no error occurred, compute the aggregate transfer
1479          * count for the image request.  We could instead use
1480          * atomic64_cmpxchg() to update it as each object request
1481          * completes; not clear which way is better off hand.
1482          */
1483         if (!img_request->result) {
1484                 struct rbd_obj_request *obj_request;
1485                 u64 xferred = 0;
1486
1487                 for_each_obj_request(img_request, obj_request)
1488                         xferred += obj_request->xferred;
1489                 img_request->xferred = xferred;
1490         }
1491
1492         if (img_request->callback)
1493                 img_request->callback(img_request);
1494         else
1495                 rbd_img_request_put(img_request);
1496 }
1497
1498 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1499
1500 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1501 {
1502         dout("%s: obj %p\n", __func__, obj_request);
1503
1504         return wait_for_completion_interruptible(&obj_request->completion);
1505 }
1506
1507 /*
1508  * The default/initial value for all image request flags is 0.  Each
1509  * is conditionally set to 1 at image request initialization time
1510  * and currently never change thereafter.
1511  */
1512 static void img_request_write_set(struct rbd_img_request *img_request)
1513 {
1514         set_bit(IMG_REQ_WRITE, &img_request->flags);
1515         smp_mb();
1516 }
1517
1518 static bool img_request_write_test(struct rbd_img_request *img_request)
1519 {
1520         smp_mb();
1521         return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1522 }
1523
1524 static void img_request_child_set(struct rbd_img_request *img_request)
1525 {
1526         set_bit(IMG_REQ_CHILD, &img_request->flags);
1527         smp_mb();
1528 }
1529
1530 static void img_request_child_clear(struct rbd_img_request *img_request)
1531 {
1532         clear_bit(IMG_REQ_CHILD, &img_request->flags);
1533         smp_mb();
1534 }
1535
1536 static bool img_request_child_test(struct rbd_img_request *img_request)
1537 {
1538         smp_mb();
1539         return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1540 }
1541
1542 static void img_request_layered_set(struct rbd_img_request *img_request)
1543 {
1544         set_bit(IMG_REQ_LAYERED, &img_request->flags);
1545         smp_mb();
1546 }
1547
1548 static void img_request_layered_clear(struct rbd_img_request *img_request)
1549 {
1550         clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1551         smp_mb();
1552 }
1553
1554 static bool img_request_layered_test(struct rbd_img_request *img_request)
1555 {
1556         smp_mb();
1557         return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1558 }
1559
1560 static void
1561 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1562 {
1563         u64 xferred = obj_request->xferred;
1564         u64 length = obj_request->length;
1565
1566         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1567                 obj_request, obj_request->img_request, obj_request->result,
1568                 xferred, length);
1569         /*
1570          * ENOENT means a hole in the image.  We zero-fill the
1571          * entire length of the request.  A short read also implies
1572          * zero-fill to the end of the request.  Either way we
1573          * update the xferred count to indicate the whole request
1574          * was satisfied.
1575          */
1576         rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1577         if (obj_request->result == -ENOENT) {
1578                 if (obj_request->type == OBJ_REQUEST_BIO)
1579                         zero_bio_chain(obj_request->bio_list, 0);
1580                 else
1581                         zero_pages(obj_request->pages, 0, length);
1582                 obj_request->result = 0;
1583                 obj_request->xferred = length;
1584         } else if (xferred < length && !obj_request->result) {
1585                 if (obj_request->type == OBJ_REQUEST_BIO)
1586                         zero_bio_chain(obj_request->bio_list, xferred);
1587                 else
1588                         zero_pages(obj_request->pages, xferred, length);
1589                 obj_request->xferred = length;
1590         }
1591         obj_request_done_set(obj_request);
1592 }
1593
1594 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1595 {
1596         dout("%s: obj %p cb %p\n", __func__, obj_request,
1597                 obj_request->callback);
1598         if (obj_request->callback)
1599                 obj_request->callback(obj_request);
1600         else
1601                 complete_all(&obj_request->completion);
1602 }
1603
1604 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1605 {
1606         dout("%s: obj %p\n", __func__, obj_request);
1607         obj_request_done_set(obj_request);
1608 }
1609
1610 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1611 {
1612         struct rbd_img_request *img_request = NULL;
1613         struct rbd_device *rbd_dev = NULL;
1614         bool layered = false;
1615
1616         if (obj_request_img_data_test(obj_request)) {
1617                 img_request = obj_request->img_request;
1618                 layered = img_request && img_request_layered_test(img_request);
1619                 rbd_dev = img_request->rbd_dev;
1620         }
1621
1622         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1623                 obj_request, img_request, obj_request->result,
1624                 obj_request->xferred, obj_request->length);
1625         if (layered && obj_request->result == -ENOENT &&
1626                         obj_request->img_offset < rbd_dev->parent_overlap)
1627                 rbd_img_parent_read(obj_request);
1628         else if (img_request)
1629                 rbd_img_obj_request_read_callback(obj_request);
1630         else
1631                 obj_request_done_set(obj_request);
1632 }
1633
1634 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1635 {
1636         dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1637                 obj_request->result, obj_request->length);
1638         /*
1639          * There is no such thing as a successful short write.  Set
1640          * it to our originally-requested length.
1641          */
1642         obj_request->xferred = obj_request->length;
1643         obj_request_done_set(obj_request);
1644 }
1645
1646 /*
1647  * For a simple stat call there's nothing to do.  We'll do more if
1648  * this is part of a write sequence for a layered image.
1649  */
1650 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1651 {
1652         dout("%s: obj %p\n", __func__, obj_request);
1653         obj_request_done_set(obj_request);
1654 }
1655
1656 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1657                                 struct ceph_msg *msg)
1658 {
1659         struct rbd_obj_request *obj_request = osd_req->r_priv;
1660         u16 opcode;
1661
1662         dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1663         rbd_assert(osd_req == obj_request->osd_req);
1664         if (obj_request_img_data_test(obj_request)) {
1665                 rbd_assert(obj_request->img_request);
1666                 rbd_assert(obj_request->which != BAD_WHICH);
1667         } else {
1668                 rbd_assert(obj_request->which == BAD_WHICH);
1669         }
1670
1671         if (osd_req->r_result < 0)
1672                 obj_request->result = osd_req->r_result;
1673
1674         BUG_ON(osd_req->r_num_ops > 2);
1675
1676         /*
1677          * We support a 64-bit length, but ultimately it has to be
1678          * passed to blk_end_request(), which takes an unsigned int.
1679          */
1680         obj_request->xferred = osd_req->r_reply_op_len[0];
1681         rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1682         opcode = osd_req->r_ops[0].op;
1683         switch (opcode) {
1684         case CEPH_OSD_OP_READ:
1685                 rbd_osd_read_callback(obj_request);
1686                 break;
1687         case CEPH_OSD_OP_WRITE:
1688                 rbd_osd_write_callback(obj_request);
1689                 break;
1690         case CEPH_OSD_OP_STAT:
1691                 rbd_osd_stat_callback(obj_request);
1692                 break;
1693         case CEPH_OSD_OP_CALL:
1694         case CEPH_OSD_OP_NOTIFY_ACK:
1695         case CEPH_OSD_OP_WATCH:
1696                 rbd_osd_trivial_callback(obj_request);
1697                 break;
1698         default:
1699                 rbd_warn(NULL, "%s: unsupported op %hu\n",
1700                         obj_request->object_name, (unsigned short) opcode);
1701                 break;
1702         }
1703
1704         if (obj_request_done_test(obj_request))
1705                 rbd_obj_request_complete(obj_request);
1706 }
1707
1708 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1709 {
1710         struct rbd_img_request *img_request = obj_request->img_request;
1711         struct ceph_osd_request *osd_req = obj_request->osd_req;
1712         u64 snap_id;
1713
1714         rbd_assert(osd_req != NULL);
1715
1716         snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1717         ceph_osdc_build_request(osd_req, obj_request->offset,
1718                         NULL, snap_id, NULL);
1719 }
1720
1721 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1722 {
1723         struct rbd_img_request *img_request = obj_request->img_request;
1724         struct ceph_osd_request *osd_req = obj_request->osd_req;
1725         struct ceph_snap_context *snapc;
1726         struct timespec mtime = CURRENT_TIME;
1727
1728         rbd_assert(osd_req != NULL);
1729
1730         snapc = img_request ? img_request->snapc : NULL;
1731         ceph_osdc_build_request(osd_req, obj_request->offset,
1732                         snapc, CEPH_NOSNAP, &mtime);
1733 }
1734
1735 static struct ceph_osd_request *rbd_osd_req_create(
1736                                         struct rbd_device *rbd_dev,
1737                                         bool write_request,
1738                                         struct rbd_obj_request *obj_request)
1739 {
1740         struct ceph_snap_context *snapc = NULL;
1741         struct ceph_osd_client *osdc;
1742         struct ceph_osd_request *osd_req;
1743
1744         if (obj_request_img_data_test(obj_request)) {
1745                 struct rbd_img_request *img_request = obj_request->img_request;
1746
1747                 rbd_assert(write_request ==
1748                                 img_request_write_test(img_request));
1749                 if (write_request)
1750                         snapc = img_request->snapc;
1751         }
1752
1753         /* Allocate and initialize the request, for the single op */
1754
1755         osdc = &rbd_dev->rbd_client->client->osdc;
1756         osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1757         if (!osd_req)
1758                 return NULL;    /* ENOMEM */
1759
1760         if (write_request)
1761                 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1762         else
1763                 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1764
1765         osd_req->r_callback = rbd_osd_req_callback;
1766         osd_req->r_priv = obj_request;
1767
1768         osd_req->r_oid_len = strlen(obj_request->object_name);
1769         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1770         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1771
1772         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1773
1774         return osd_req;
1775 }
1776
1777 /*
1778  * Create a copyup osd request based on the information in the
1779  * object request supplied.  A copyup request has two osd ops,
1780  * a copyup method call, and a "normal" write request.
1781  */
1782 static struct ceph_osd_request *
1783 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1784 {
1785         struct rbd_img_request *img_request;
1786         struct ceph_snap_context *snapc;
1787         struct rbd_device *rbd_dev;
1788         struct ceph_osd_client *osdc;
1789         struct ceph_osd_request *osd_req;
1790
1791         rbd_assert(obj_request_img_data_test(obj_request));
1792         img_request = obj_request->img_request;
1793         rbd_assert(img_request);
1794         rbd_assert(img_request_write_test(img_request));
1795
1796         /* Allocate and initialize the request, for the two ops */
1797
1798         snapc = img_request->snapc;
1799         rbd_dev = img_request->rbd_dev;
1800         osdc = &rbd_dev->rbd_client->client->osdc;
1801         osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1802         if (!osd_req)
1803                 return NULL;    /* ENOMEM */
1804
1805         osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1806         osd_req->r_callback = rbd_osd_req_callback;
1807         osd_req->r_priv = obj_request;
1808
1809         osd_req->r_oid_len = strlen(obj_request->object_name);
1810         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1811         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1812
1813         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1814
1815         return osd_req;
1816 }
1817
1818
1819 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1820 {
1821         ceph_osdc_put_request(osd_req);
1822 }
1823
1824 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1825
1826 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1827                                                 u64 offset, u64 length,
1828                                                 enum obj_request_type type)
1829 {
1830         struct rbd_obj_request *obj_request;
1831         size_t size;
1832         char *name;
1833
1834         rbd_assert(obj_request_type_valid(type));
1835
1836         size = strlen(object_name) + 1;
1837         name = kmalloc(size, GFP_KERNEL);
1838         if (!name)
1839                 return NULL;
1840
1841         obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1842         if (!obj_request) {
1843                 kfree(name);
1844                 return NULL;
1845         }
1846
1847         obj_request->object_name = memcpy(name, object_name, size);
1848         obj_request->offset = offset;
1849         obj_request->length = length;
1850         obj_request->flags = 0;
1851         obj_request->which = BAD_WHICH;
1852         obj_request->type = type;
1853         INIT_LIST_HEAD(&obj_request->links);
1854         init_completion(&obj_request->completion);
1855         kref_init(&obj_request->kref);
1856
1857         dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1858                 offset, length, (int)type, obj_request);
1859
1860         return obj_request;
1861 }
1862
1863 static void rbd_obj_request_destroy(struct kref *kref)
1864 {
1865         struct rbd_obj_request *obj_request;
1866
1867         obj_request = container_of(kref, struct rbd_obj_request, kref);
1868
1869         dout("%s: obj %p\n", __func__, obj_request);
1870
1871         rbd_assert(obj_request->img_request == NULL);
1872         rbd_assert(obj_request->which == BAD_WHICH);
1873
1874         if (obj_request->osd_req)
1875                 rbd_osd_req_destroy(obj_request->osd_req);
1876
1877         rbd_assert(obj_request_type_valid(obj_request->type));
1878         switch (obj_request->type) {
1879         case OBJ_REQUEST_NODATA:
1880                 break;          /* Nothing to do */
1881         case OBJ_REQUEST_BIO:
1882                 if (obj_request->bio_list)
1883                         bio_chain_put(obj_request->bio_list);
1884                 break;
1885         case OBJ_REQUEST_PAGES:
1886                 if (obj_request->pages)
1887                         ceph_release_page_vector(obj_request->pages,
1888                                                 obj_request->page_count);
1889                 break;
1890         }
1891
1892         kfree(obj_request->object_name);
1893         obj_request->object_name = NULL;
1894         kmem_cache_free(rbd_obj_request_cache, obj_request);
1895 }
1896
1897 /* It's OK to call this for a device with no parent */
1898
1899 static void rbd_spec_put(struct rbd_spec *spec);
1900 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1901 {
1902         rbd_dev_remove_parent(rbd_dev);
1903         rbd_spec_put(rbd_dev->parent_spec);
1904         rbd_dev->parent_spec = NULL;
1905         rbd_dev->parent_overlap = 0;
1906 }
1907
1908 /*
1909  * Parent image reference counting is used to determine when an
1910  * image's parent fields can be safely torn down--after there are no
1911  * more in-flight requests to the parent image.  When the last
1912  * reference is dropped, cleaning them up is safe.
1913  */
1914 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1915 {
1916         int counter;
1917
1918         if (!rbd_dev->parent_spec)
1919                 return;
1920
1921         counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1922         if (counter > 0)
1923                 return;
1924
1925         /* Last reference; clean up parent data structures */
1926
1927         if (!counter)
1928                 rbd_dev_unparent(rbd_dev);
1929         else
1930                 rbd_warn(rbd_dev, "parent reference underflow\n");
1931 }
1932
1933 /*
1934  * If an image has a non-zero parent overlap, get a reference to its
1935  * parent.
1936  *
1937  * We must get the reference before checking for the overlap to
1938  * coordinate properly with zeroing the parent overlap in
1939  * rbd_dev_v2_parent_info() when an image gets flattened.  We
1940  * drop it again if there is no overlap.
1941  *
1942  * Returns true if the rbd device has a parent with a non-zero
1943  * overlap and a reference for it was successfully taken, or
1944  * false otherwise.
1945  */
1946 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1947 {
1948         int counter;
1949
1950         if (!rbd_dev->parent_spec)
1951                 return false;
1952
1953         counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1954         if (counter > 0 && rbd_dev->parent_overlap)
1955                 return true;
1956
1957         /* Image was flattened, but parent is not yet torn down */
1958
1959         if (counter < 0)
1960                 rbd_warn(rbd_dev, "parent reference overflow\n");
1961
1962         return false;
1963 }
1964
1965 /*
1966  * Caller is responsible for filling in the list of object requests
1967  * that comprises the image request, and the Linux request pointer
1968  * (if there is one).
1969  */
1970 static struct rbd_img_request *rbd_img_request_create(
1971                                         struct rbd_device *rbd_dev,
1972                                         u64 offset, u64 length,
1973                                         bool write_request)
1974 {
1975         struct rbd_img_request *img_request;
1976
1977         img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1978         if (!img_request)
1979                 return NULL;
1980
1981         if (write_request) {
1982                 down_read(&rbd_dev->header_rwsem);
1983                 ceph_get_snap_context(rbd_dev->header.snapc);
1984                 up_read(&rbd_dev->header_rwsem);
1985         }
1986
1987         img_request->rq = NULL;
1988         img_request->rbd_dev = rbd_dev;
1989         img_request->offset = offset;
1990         img_request->length = length;
1991         img_request->flags = 0;
1992         if (write_request) {
1993                 img_request_write_set(img_request);
1994                 img_request->snapc = rbd_dev->header.snapc;
1995         } else {
1996                 img_request->snap_id = rbd_dev->spec->snap_id;
1997         }
1998         if (rbd_dev_parent_get(rbd_dev))
1999                 img_request_layered_set(img_request);
2000         spin_lock_init(&img_request->completion_lock);
2001         img_request->next_completion = 0;
2002         img_request->callback = NULL;
2003         img_request->result = 0;
2004         img_request->obj_request_count = 0;
2005         INIT_LIST_HEAD(&img_request->obj_requests);
2006         kref_init(&img_request->kref);
2007
2008         dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2009                 write_request ? "write" : "read", offset, length,
2010                 img_request);
2011
2012         return img_request;
2013 }
2014
2015 static void rbd_img_request_destroy(struct kref *kref)
2016 {
2017         struct rbd_img_request *img_request;
2018         struct rbd_obj_request *obj_request;
2019         struct rbd_obj_request *next_obj_request;
2020
2021         img_request = container_of(kref, struct rbd_img_request, kref);
2022
2023         dout("%s: img %p\n", __func__, img_request);
2024
2025         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2026                 rbd_img_obj_request_del(img_request, obj_request);
2027         rbd_assert(img_request->obj_request_count == 0);
2028
2029         if (img_request_layered_test(img_request)) {
2030                 img_request_layered_clear(img_request);
2031                 rbd_dev_parent_put(img_request->rbd_dev);
2032         }
2033
2034         if (img_request_write_test(img_request))
2035                 ceph_put_snap_context(img_request->snapc);
2036
2037         kmem_cache_free(rbd_img_request_cache, img_request);
2038 }
2039
2040 static struct rbd_img_request *rbd_parent_request_create(
2041                                         struct rbd_obj_request *obj_request,
2042                                         u64 img_offset, u64 length)
2043 {
2044         struct rbd_img_request *parent_request;
2045         struct rbd_device *rbd_dev;
2046
2047         rbd_assert(obj_request->img_request);
2048         rbd_dev = obj_request->img_request->rbd_dev;
2049
2050         parent_request = rbd_img_request_create(rbd_dev->parent,
2051                                                 img_offset, length, false);
2052         if (!parent_request)
2053                 return NULL;
2054
2055         img_request_child_set(parent_request);
2056         rbd_obj_request_get(obj_request);
2057         parent_request->obj_request = obj_request;
2058
2059         return parent_request;
2060 }
2061
2062 static void rbd_parent_request_destroy(struct kref *kref)
2063 {
2064         struct rbd_img_request *parent_request;
2065         struct rbd_obj_request *orig_request;
2066
2067         parent_request = container_of(kref, struct rbd_img_request, kref);
2068         orig_request = parent_request->obj_request;
2069
2070         parent_request->obj_request = NULL;
2071         rbd_obj_request_put(orig_request);
2072         img_request_child_clear(parent_request);
2073
2074         rbd_img_request_destroy(kref);
2075 }
2076
2077 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2078 {
2079         struct rbd_img_request *img_request;
2080         unsigned int xferred;
2081         int result;
2082         bool more;
2083
2084         rbd_assert(obj_request_img_data_test(obj_request));
2085         img_request = obj_request->img_request;
2086
2087         rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2088         xferred = (unsigned int)obj_request->xferred;
2089         result = obj_request->result;
2090         if (result) {
2091                 struct rbd_device *rbd_dev = img_request->rbd_dev;
2092
2093                 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2094                         img_request_write_test(img_request) ? "write" : "read",
2095                         obj_request->length, obj_request->img_offset,
2096                         obj_request->offset);
2097                 rbd_warn(rbd_dev, "  result %d xferred %x\n",
2098                         result, xferred);
2099                 if (!img_request->result)
2100                         img_request->result = result;
2101         }
2102
2103         /* Image object requests don't own their page array */
2104
2105         if (obj_request->type == OBJ_REQUEST_PAGES) {
2106                 obj_request->pages = NULL;
2107                 obj_request->page_count = 0;
2108         }
2109
2110         if (img_request_child_test(img_request)) {
2111                 rbd_assert(img_request->obj_request != NULL);
2112                 more = obj_request->which < img_request->obj_request_count - 1;
2113         } else {
2114                 rbd_assert(img_request->rq != NULL);
2115                 more = blk_end_request(img_request->rq, result, xferred);
2116         }
2117
2118         return more;
2119 }
2120
2121 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2122 {
2123         struct rbd_img_request *img_request;
2124         u32 which = obj_request->which;
2125         bool more = true;
2126
2127         rbd_assert(obj_request_img_data_test(obj_request));
2128         img_request = obj_request->img_request;
2129
2130         dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2131         rbd_assert(img_request != NULL);
2132         rbd_assert(img_request->obj_request_count > 0);
2133         rbd_assert(which != BAD_WHICH);
2134         rbd_assert(which < img_request->obj_request_count);
2135         rbd_assert(which >= img_request->next_completion);
2136
2137         spin_lock_irq(&img_request->completion_lock);
2138         if (which != img_request->next_completion)
2139                 goto out;
2140
2141         for_each_obj_request_from(img_request, obj_request) {
2142                 rbd_assert(more);
2143                 rbd_assert(which < img_request->obj_request_count);
2144
2145                 if (!obj_request_done_test(obj_request))
2146                         break;
2147                 more = rbd_img_obj_end_request(obj_request);
2148                 which++;
2149         }
2150
2151         rbd_assert(more ^ (which == img_request->obj_request_count));
2152         img_request->next_completion = which;
2153 out:
2154         spin_unlock_irq(&img_request->completion_lock);
2155
2156         if (!more)
2157                 rbd_img_request_complete(img_request);
2158 }
2159
2160 /*
2161  * Split up an image request into one or more object requests, each
2162  * to a different object.  The "type" parameter indicates whether
2163  * "data_desc" is the pointer to the head of a list of bio
2164  * structures, or the base of a page array.  In either case this
2165  * function assumes data_desc describes memory sufficient to hold
2166  * all data described by the image request.
2167  */
2168 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2169                                         enum obj_request_type type,
2170                                         void *data_desc)
2171 {
2172         struct rbd_device *rbd_dev = img_request->rbd_dev;
2173         struct rbd_obj_request *obj_request = NULL;
2174         struct rbd_obj_request *next_obj_request;
2175         bool write_request = img_request_write_test(img_request);
2176         struct bio *bio_list;
2177         unsigned int bio_offset = 0;
2178         struct page **pages;
2179         u64 img_offset;
2180         u64 resid;
2181         u16 opcode;
2182
2183         dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2184                 (int)type, data_desc);
2185
2186         opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2187         img_offset = img_request->offset;
2188         resid = img_request->length;
2189         rbd_assert(resid > 0);
2190
2191         if (type == OBJ_REQUEST_BIO) {
2192                 bio_list = data_desc;
2193                 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2194         } else {
2195                 rbd_assert(type == OBJ_REQUEST_PAGES);
2196                 pages = data_desc;
2197         }
2198
2199         while (resid) {
2200                 struct ceph_osd_request *osd_req;
2201                 const char *object_name;
2202                 u64 offset;
2203                 u64 length;
2204
2205                 object_name = rbd_segment_name(rbd_dev, img_offset);
2206                 if (!object_name)
2207                         goto out_unwind;
2208                 offset = rbd_segment_offset(rbd_dev, img_offset);
2209                 length = rbd_segment_length(rbd_dev, img_offset, resid);
2210                 obj_request = rbd_obj_request_create(object_name,
2211                                                 offset, length, type);
2212                 /* object request has its own copy of the object name */
2213                 rbd_segment_name_free(object_name);
2214                 if (!obj_request)
2215                         goto out_unwind;
2216
2217                 if (type == OBJ_REQUEST_BIO) {
2218                         unsigned int clone_size;
2219
2220                         rbd_assert(length <= (u64)UINT_MAX);
2221                         clone_size = (unsigned int)length;
2222                         obj_request->bio_list =
2223                                         bio_chain_clone_range(&bio_list,
2224                                                                 &bio_offset,
2225                                                                 clone_size,
2226                                                                 GFP_ATOMIC);
2227                         if (!obj_request->bio_list)
2228                                 goto out_partial;
2229                 } else {
2230                         unsigned int page_count;
2231
2232                         obj_request->pages = pages;
2233                         page_count = (u32)calc_pages_for(offset, length);
2234                         obj_request->page_count = page_count;
2235                         if ((offset + length) & ~PAGE_MASK)
2236                                 page_count--;   /* more on last page */
2237                         pages += page_count;
2238                 }
2239
2240                 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2241                                                 obj_request);
2242                 if (!osd_req)
2243                         goto out_partial;
2244                 obj_request->osd_req = osd_req;
2245                 obj_request->callback = rbd_img_obj_callback;
2246
2247                 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2248                                                 0, 0);
2249                 if (type == OBJ_REQUEST_BIO)
2250                         osd_req_op_extent_osd_data_bio(osd_req, 0,
2251                                         obj_request->bio_list, length);
2252                 else
2253                         osd_req_op_extent_osd_data_pages(osd_req, 0,
2254                                         obj_request->pages, length,
2255                                         offset & ~PAGE_MASK, false, false);
2256
2257                 /*
2258                  * set obj_request->img_request before formatting
2259                  * the osd_request so that it gets the right snapc
2260                  */
2261                 rbd_img_obj_request_add(img_request, obj_request);
2262                 if (write_request)
2263                         rbd_osd_req_format_write(obj_request);
2264                 else
2265                         rbd_osd_req_format_read(obj_request);
2266
2267                 obj_request->img_offset = img_offset;
2268
2269                 img_offset += length;
2270                 resid -= length;
2271         }
2272
2273         return 0;
2274
2275 out_partial:
2276         rbd_obj_request_put(obj_request);
2277 out_unwind:
2278         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2279                 rbd_obj_request_put(obj_request);
2280
2281         return -ENOMEM;
2282 }
2283
2284 static void
2285 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2286 {
2287         struct rbd_img_request *img_request;
2288         struct rbd_device *rbd_dev;
2289         struct page **pages;
2290         u32 page_count;
2291
2292         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2293         rbd_assert(obj_request_img_data_test(obj_request));
2294         img_request = obj_request->img_request;
2295         rbd_assert(img_request);
2296
2297         rbd_dev = img_request->rbd_dev;
2298         rbd_assert(rbd_dev);
2299
2300         pages = obj_request->copyup_pages;
2301         rbd_assert(pages != NULL);
2302         obj_request->copyup_pages = NULL;
2303         page_count = obj_request->copyup_page_count;
2304         rbd_assert(page_count);
2305         obj_request->copyup_page_count = 0;
2306         ceph_release_page_vector(pages, page_count);
2307
2308         /*
2309          * We want the transfer count to reflect the size of the
2310          * original write request.  There is no such thing as a
2311          * successful short write, so if the request was successful
2312          * we can just set it to the originally-requested length.
2313          */
2314         if (!obj_request->result)
2315                 obj_request->xferred = obj_request->length;
2316
2317         /* Finish up with the normal image object callback */
2318
2319         rbd_img_obj_callback(obj_request);
2320 }
2321
2322 static void
2323 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2324 {
2325         struct rbd_obj_request *orig_request;
2326         struct ceph_osd_request *osd_req;
2327         struct ceph_osd_client *osdc;
2328         struct rbd_device *rbd_dev;
2329         struct page **pages;
2330         u32 page_count;
2331         int img_result;
2332         u64 parent_length;
2333         u64 offset;
2334         u64 length;
2335
2336         rbd_assert(img_request_child_test(img_request));
2337
2338         /* First get what we need from the image request */
2339
2340         pages = img_request->copyup_pages;
2341         rbd_assert(pages != NULL);
2342         img_request->copyup_pages = NULL;
2343         page_count = img_request->copyup_page_count;
2344         rbd_assert(page_count);
2345         img_request->copyup_page_count = 0;
2346
2347         orig_request = img_request->obj_request;
2348         rbd_assert(orig_request != NULL);
2349         rbd_assert(obj_request_type_valid(orig_request->type));
2350         img_result = img_request->result;
2351         parent_length = img_request->length;
2352         rbd_assert(parent_length == img_request->xferred);
2353         rbd_img_request_put(img_request);
2354
2355         rbd_assert(orig_request->img_request);
2356         rbd_dev = orig_request->img_request->rbd_dev;
2357         rbd_assert(rbd_dev);
2358
2359         /*
2360          * If the overlap has become 0 (most likely because the
2361          * image has been flattened) we need to free the pages
2362          * and re-submit the original write request.
2363          */
2364         if (!rbd_dev->parent_overlap) {
2365                 struct ceph_osd_client *osdc;
2366
2367                 ceph_release_page_vector(pages, page_count);
2368                 osdc = &rbd_dev->rbd_client->client->osdc;
2369                 img_result = rbd_obj_request_submit(osdc, orig_request);
2370                 if (!img_result)
2371                         return;
2372         }
2373
2374         if (img_result)
2375                 goto out_err;
2376
2377         /*
2378          * The original osd request is of no use to use any more.
2379          * We need a new one that can hold the two ops in a copyup
2380          * request.  Allocate the new copyup osd request for the
2381          * original request, and release the old one.
2382          */
2383         img_result = -ENOMEM;
2384         osd_req = rbd_osd_req_create_copyup(orig_request);
2385         if (!osd_req)
2386                 goto out_err;
2387         rbd_osd_req_destroy(orig_request->osd_req);
2388         orig_request->osd_req = osd_req;
2389         orig_request->copyup_pages = pages;
2390         orig_request->copyup_page_count = page_count;
2391
2392         /* Initialize the copyup op */
2393
2394         osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2395         osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2396                                                 false, false);
2397
2398         /* Then the original write request op */
2399
2400         offset = orig_request->offset;
2401         length = orig_request->length;
2402         osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2403                                         offset, length, 0, 0);
2404         if (orig_request->type == OBJ_REQUEST_BIO)
2405                 osd_req_op_extent_osd_data_bio(osd_req, 1,
2406                                         orig_request->bio_list, length);
2407         else
2408                 osd_req_op_extent_osd_data_pages(osd_req, 1,
2409                                         orig_request->pages, length,
2410                                         offset & ~PAGE_MASK, false, false);
2411
2412         rbd_osd_req_format_write(orig_request);
2413
2414         /* All set, send it off. */
2415
2416         orig_request->callback = rbd_img_obj_copyup_callback;
2417         osdc = &rbd_dev->rbd_client->client->osdc;
2418         img_result = rbd_obj_request_submit(osdc, orig_request);
2419         if (!img_result)
2420                 return;
2421 out_err:
2422         /* Record the error code and complete the request */
2423
2424         orig_request->result = img_result;
2425         orig_request->xferred = 0;
2426         obj_request_done_set(orig_request);
2427         rbd_obj_request_complete(orig_request);
2428 }
2429
2430 /*
2431  * Read from the parent image the range of data that covers the
2432  * entire target of the given object request.  This is used for
2433  * satisfying a layered image write request when the target of an
2434  * object request from the image request does not exist.
2435  *
2436  * A page array big enough to hold the returned data is allocated
2437  * and supplied to rbd_img_request_fill() as the "data descriptor."
2438  * When the read completes, this page array will be transferred to
2439  * the original object request for the copyup operation.
2440  *
2441  * If an error occurs, record it as the result of the original
2442  * object request and mark it done so it gets completed.
2443  */
2444 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2445 {
2446         struct rbd_img_request *img_request = NULL;
2447         struct rbd_img_request *parent_request = NULL;
2448         struct rbd_device *rbd_dev;
2449         u64 img_offset;
2450         u64 length;
2451         struct page **pages = NULL;
2452         u32 page_count;
2453         int result;
2454
2455         rbd_assert(obj_request_img_data_test(obj_request));
2456         rbd_assert(obj_request_type_valid(obj_request->type));
2457
2458         img_request = obj_request->img_request;
2459         rbd_assert(img_request != NULL);
2460         rbd_dev = img_request->rbd_dev;
2461         rbd_assert(rbd_dev->parent != NULL);
2462
2463         /*
2464          * Determine the byte range covered by the object in the
2465          * child image to which the original request was to be sent.
2466          */
2467         img_offset = obj_request->img_offset - obj_request->offset;
2468         length = (u64)1 << rbd_dev->header.obj_order;
2469
2470         /*
2471          * There is no defined parent data beyond the parent
2472          * overlap, so limit what we read at that boundary if
2473          * necessary.
2474          */
2475         if (img_offset + length > rbd_dev->parent_overlap) {
2476                 rbd_assert(img_offset < rbd_dev->parent_overlap);
2477                 length = rbd_dev->parent_overlap - img_offset;
2478         }
2479
2480         /*
2481          * Allocate a page array big enough to receive the data read
2482          * from the parent.
2483          */
2484         page_count = (u32)calc_pages_for(0, length);
2485         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2486         if (IS_ERR(pages)) {
2487                 result = PTR_ERR(pages);
2488                 pages = NULL;
2489                 goto out_err;
2490         }
2491
2492         result = -ENOMEM;
2493         parent_request = rbd_parent_request_create(obj_request,
2494                                                 img_offset, length);
2495         if (!parent_request)
2496                 goto out_err;
2497
2498         result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2499         if (result)
2500                 goto out_err;
2501         parent_request->copyup_pages = pages;
2502         parent_request->copyup_page_count = page_count;
2503
2504         parent_request->callback = rbd_img_obj_parent_read_full_callback;
2505         result = rbd_img_request_submit(parent_request);
2506         if (!result)
2507                 return 0;
2508
2509         parent_request->copyup_pages = NULL;
2510         parent_request->copyup_page_count = 0;
2511         parent_request->obj_request = NULL;
2512         rbd_obj_request_put(obj_request);
2513 out_err:
2514         if (pages)
2515                 ceph_release_page_vector(pages, page_count);
2516         if (parent_request)
2517                 rbd_img_request_put(parent_request);
2518         obj_request->result = result;
2519         obj_request->xferred = 0;
2520         obj_request_done_set(obj_request);
2521
2522         return result;
2523 }
2524
2525 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2526 {
2527         struct rbd_obj_request *orig_request;
2528         struct rbd_device *rbd_dev;
2529         int result;
2530
2531         rbd_assert(!obj_request_img_data_test(obj_request));
2532
2533         /*
2534          * All we need from the object request is the original
2535          * request and the result of the STAT op.  Grab those, then
2536          * we're done with the request.
2537          */
2538         orig_request = obj_request->obj_request;
2539         obj_request->obj_request = NULL;
2540         rbd_assert(orig_request);
2541         rbd_assert(orig_request->img_request);
2542
2543         result = obj_request->result;
2544         obj_request->result = 0;
2545
2546         dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2547                 obj_request, orig_request, result,
2548                 obj_request->xferred, obj_request->length);
2549         rbd_obj_request_put(obj_request);
2550
2551         /*
2552          * If the overlap has become 0 (most likely because the
2553          * image has been flattened) we need to free the pages
2554          * and re-submit the original write request.
2555          */
2556         rbd_dev = orig_request->img_request->rbd_dev;
2557         if (!rbd_dev->parent_overlap) {
2558                 struct ceph_osd_client *osdc;
2559
2560                 rbd_obj_request_put(orig_request);
2561                 osdc = &rbd_dev->rbd_client->client->osdc;
2562                 result = rbd_obj_request_submit(osdc, orig_request);
2563                 if (!result)
2564                         return;
2565         }
2566
2567         /*
2568          * Our only purpose here is to determine whether the object
2569          * exists, and we don't want to treat the non-existence as
2570          * an error.  If something else comes back, transfer the
2571          * error to the original request and complete it now.
2572          */
2573         if (!result) {
2574                 obj_request_existence_set(orig_request, true);
2575         } else if (result == -ENOENT) {
2576                 obj_request_existence_set(orig_request, false);
2577         } else if (result) {
2578                 orig_request->result = result;
2579                 goto out;
2580         }
2581
2582         /*
2583          * Resubmit the original request now that we have recorded
2584          * whether the target object exists.
2585          */
2586         orig_request->result = rbd_img_obj_request_submit(orig_request);
2587 out:
2588         if (orig_request->result)
2589                 rbd_obj_request_complete(orig_request);
2590         rbd_obj_request_put(orig_request);
2591 }
2592
2593 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2594 {
2595         struct rbd_obj_request *stat_request;
2596         struct rbd_device *rbd_dev;
2597         struct ceph_osd_client *osdc;
2598         struct page **pages = NULL;
2599         u32 page_count;
2600         size_t size;
2601         int ret;
2602
2603         /*
2604          * The response data for a STAT call consists of:
2605          *     le64 length;
2606          *     struct {
2607          *         le32 tv_sec;
2608          *         le32 tv_nsec;
2609          *     } mtime;
2610          */
2611         size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2612         page_count = (u32)calc_pages_for(0, size);
2613         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2614         if (IS_ERR(pages))
2615                 return PTR_ERR(pages);
2616
2617         ret = -ENOMEM;
2618         stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2619                                                         OBJ_REQUEST_PAGES);
2620         if (!stat_request)
2621                 goto out;
2622
2623         rbd_obj_request_get(obj_request);
2624         stat_request->obj_request = obj_request;
2625         stat_request->pages = pages;
2626         stat_request->page_count = page_count;
2627
2628         rbd_assert(obj_request->img_request);
2629         rbd_dev = obj_request->img_request->rbd_dev;
2630         stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2631                                                 stat_request);
2632         if (!stat_request->osd_req)
2633                 goto out;
2634         stat_request->callback = rbd_img_obj_exists_callback;
2635
2636         osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2637         osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2638                                         false, false);
2639         rbd_osd_req_format_read(stat_request);
2640
2641         osdc = &rbd_dev->rbd_client->client->osdc;
2642         ret = rbd_obj_request_submit(osdc, stat_request);
2643 out:
2644         if (ret)
2645                 rbd_obj_request_put(obj_request);
2646
2647         return ret;
2648 }
2649
2650 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2651 {
2652         struct rbd_img_request *img_request;
2653         struct rbd_device *rbd_dev;
2654         bool known;
2655
2656         rbd_assert(obj_request_img_data_test(obj_request));
2657
2658         img_request = obj_request->img_request;
2659         rbd_assert(img_request);
2660         rbd_dev = img_request->rbd_dev;
2661
2662         /*
2663          * Only writes to layered images need special handling.
2664          * Reads and non-layered writes are simple object requests.
2665          * Layered writes that start beyond the end of the overlap
2666          * with the parent have no parent data, so they too are
2667          * simple object requests.  Finally, if the target object is
2668          * known to already exist, its parent data has already been
2669          * copied, so a write to the object can also be handled as a
2670          * simple object request.
2671          */
2672         if (!img_request_write_test(img_request) ||
2673                 !img_request_layered_test(img_request) ||
2674                 rbd_dev->parent_overlap <= obj_request->img_offset ||
2675                 ((known = obj_request_known_test(obj_request)) &&
2676                         obj_request_exists_test(obj_request))) {
2677
2678                 struct rbd_device *rbd_dev;
2679                 struct ceph_osd_client *osdc;
2680
2681                 rbd_dev = obj_request->img_request->rbd_dev;
2682                 osdc = &rbd_dev->rbd_client->client->osdc;
2683
2684                 return rbd_obj_request_submit(osdc, obj_request);
2685         }
2686
2687         /*
2688          * It's a layered write.  The target object might exist but
2689          * we may not know that yet.  If we know it doesn't exist,
2690          * start by reading the data for the full target object from
2691          * the parent so we can use it for a copyup to the target.
2692          */
2693         if (known)
2694                 return rbd_img_obj_parent_read_full(obj_request);
2695
2696         /* We don't know whether the target exists.  Go find out. */
2697
2698         return rbd_img_obj_exists_submit(obj_request);
2699 }
2700
2701 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2702 {
2703         struct rbd_obj_request *obj_request;
2704         struct rbd_obj_request *next_obj_request;
2705
2706         dout("%s: img %p\n", __func__, img_request);
2707         for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2708                 int ret;
2709
2710                 ret = rbd_img_obj_request_submit(obj_request);
2711                 if (ret)
2712                         return ret;
2713         }
2714
2715         return 0;
2716 }
2717
2718 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2719 {
2720         struct rbd_obj_request *obj_request;
2721         struct rbd_device *rbd_dev;
2722         u64 obj_end;
2723         u64 img_xferred;
2724         int img_result;
2725
2726         rbd_assert(img_request_child_test(img_request));
2727
2728         /* First get what we need from the image request and release it */
2729
2730         obj_request = img_request->obj_request;
2731         img_xferred = img_request->xferred;
2732         img_result = img_request->result;
2733         rbd_img_request_put(img_request);
2734
2735         /*
2736          * If the overlap has become 0 (most likely because the
2737          * image has been flattened) we need to re-submit the
2738          * original request.
2739          */
2740         rbd_assert(obj_request);
2741         rbd_assert(obj_request->img_request);
2742         rbd_dev = obj_request->img_request->rbd_dev;
2743         if (!rbd_dev->parent_overlap) {
2744                 struct ceph_osd_client *osdc;
2745
2746                 osdc = &rbd_dev->rbd_client->client->osdc;
2747                 img_result = rbd_obj_request_submit(osdc, obj_request);
2748                 if (!img_result)
2749                         return;
2750         }
2751
2752         obj_request->result = img_result;
2753         if (obj_request->result)
2754                 goto out;
2755
2756         /*
2757          * We need to zero anything beyond the parent overlap
2758          * boundary.  Since rbd_img_obj_request_read_callback()
2759          * will zero anything beyond the end of a short read, an
2760          * easy way to do this is to pretend the data from the
2761          * parent came up short--ending at the overlap boundary.
2762          */
2763         rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2764         obj_end = obj_request->img_offset + obj_request->length;
2765         if (obj_end > rbd_dev->parent_overlap) {
2766                 u64 xferred = 0;
2767
2768                 if (obj_request->img_offset < rbd_dev->parent_overlap)
2769                         xferred = rbd_dev->parent_overlap -
2770                                         obj_request->img_offset;
2771
2772                 obj_request->xferred = min(img_xferred, xferred);
2773         } else {
2774                 obj_request->xferred = img_xferred;
2775         }
2776 out:
2777         rbd_img_obj_request_read_callback(obj_request);
2778         rbd_obj_request_complete(obj_request);
2779 }
2780
2781 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2782 {
2783         struct rbd_img_request *img_request;
2784         int result;
2785
2786         rbd_assert(obj_request_img_data_test(obj_request));
2787         rbd_assert(obj_request->img_request != NULL);
2788         rbd_assert(obj_request->result == (s32) -ENOENT);
2789         rbd_assert(obj_request_type_valid(obj_request->type));
2790
2791         /* rbd_read_finish(obj_request, obj_request->length); */
2792         img_request = rbd_parent_request_create(obj_request,
2793                                                 obj_request->img_offset,
2794                                                 obj_request->length);
2795         result = -ENOMEM;
2796         if (!img_request)
2797                 goto out_err;
2798
2799         if (obj_request->type == OBJ_REQUEST_BIO)
2800                 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2801                                                 obj_request->bio_list);
2802         else
2803                 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2804                                                 obj_request->pages);
2805         if (result)
2806                 goto out_err;
2807
2808         img_request->callback = rbd_img_parent_read_callback;
2809         result = rbd_img_request_submit(img_request);
2810         if (result)
2811                 goto out_err;
2812
2813         return;
2814 out_err:
2815         if (img_request)
2816                 rbd_img_request_put(img_request);
2817         obj_request->result = result;
2818         obj_request->xferred = 0;
2819         obj_request_done_set(obj_request);
2820 }
2821
2822 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2823 {
2824         struct rbd_obj_request *obj_request;
2825         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2826         int ret;
2827
2828         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2829                                                         OBJ_REQUEST_NODATA);
2830         if (!obj_request)
2831                 return -ENOMEM;
2832
2833         ret = -ENOMEM;
2834         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2835         if (!obj_request->osd_req)
2836                 goto out;
2837         obj_request->callback = rbd_obj_request_put;
2838
2839         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2840                                         notify_id, 0, 0);
2841         rbd_osd_req_format_read(obj_request);
2842
2843         ret = rbd_obj_request_submit(osdc, obj_request);
2844 out:
2845         if (ret)
2846                 rbd_obj_request_put(obj_request);
2847
2848         return ret;
2849 }
2850
2851 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2852 {
2853         struct rbd_device *rbd_dev = (struct rbd_device *)data;
2854         int ret;
2855
2856         if (!rbd_dev)
2857                 return;
2858
2859         dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2860                 rbd_dev->header_name, (unsigned long long)notify_id,
2861                 (unsigned int)opcode);
2862         ret = rbd_dev_refresh(rbd_dev);
2863         if (ret)
2864                 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2865
2866         rbd_obj_notify_ack(rbd_dev, notify_id);
2867 }
2868
2869 /*
2870  * Request sync osd watch/unwatch.  The value of "start" determines
2871  * whether a watch request is being initiated or torn down.
2872  */
2873 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2874 {
2875         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2876         struct rbd_obj_request *obj_request;
2877         int ret;
2878
2879         rbd_assert(start ^ !!rbd_dev->watch_event);
2880         rbd_assert(start ^ !!rbd_dev->watch_request);
2881
2882         if (start) {
2883                 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2884                                                 &rbd_dev->watch_event);
2885                 if (ret < 0)
2886                         return ret;
2887                 rbd_assert(rbd_dev->watch_event != NULL);
2888         }
2889
2890         ret = -ENOMEM;
2891         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2892                                                         OBJ_REQUEST_NODATA);
2893         if (!obj_request)
2894                 goto out_cancel;
2895
2896         obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2897         if (!obj_request->osd_req)
2898                 goto out_cancel;
2899
2900         if (start)
2901                 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2902         else
2903                 ceph_osdc_unregister_linger_request(osdc,
2904                                         rbd_dev->watch_request->osd_req);
2905
2906         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2907                                 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2908         rbd_osd_req_format_write(obj_request);
2909
2910         ret = rbd_obj_request_submit(osdc, obj_request);
2911         if (ret)
2912                 goto out_cancel;
2913         ret = rbd_obj_request_wait(obj_request);
2914         if (ret)
2915                 goto out_cancel;
2916         ret = obj_request->result;
2917         if (ret)
2918                 goto out_cancel;
2919
2920         /*
2921          * A watch request is set to linger, so the underlying osd
2922          * request won't go away until we unregister it.  We retain
2923          * a pointer to the object request during that time (in
2924          * rbd_dev->watch_request), so we'll keep a reference to
2925          * it.  We'll drop that reference (below) after we've
2926          * unregistered it.
2927          */
2928         if (start) {
2929                 rbd_dev->watch_request = obj_request;
2930
2931                 return 0;
2932         }
2933
2934         /* We have successfully torn down the watch request */
2935
2936         rbd_obj_request_put(rbd_dev->watch_request);
2937         rbd_dev->watch_request = NULL;
2938 out_cancel:
2939         /* Cancel the event if we're tearing down, or on error */
2940         ceph_osdc_cancel_event(rbd_dev->watch_event);
2941         rbd_dev->watch_event = NULL;
2942         if (obj_request)
2943                 rbd_obj_request_put(obj_request);
2944
2945         return ret;
2946 }
2947
2948 /*
2949  * Synchronous osd object method call.  Returns the number of bytes
2950  * returned in the outbound buffer, or a negative error code.
2951  */
2952 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2953                              const char *object_name,
2954                              const char *class_name,
2955                              const char *method_name,
2956                              const void *outbound,
2957                              size_t outbound_size,
2958                              void *inbound,
2959                              size_t inbound_size)
2960 {
2961         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2962         struct rbd_obj_request *obj_request;
2963         struct page **pages;
2964         u32 page_count;
2965         int ret;
2966
2967         /*
2968          * Method calls are ultimately read operations.  The result
2969          * should placed into the inbound buffer provided.  They
2970          * also supply outbound data--parameters for the object
2971          * method.  Currently if this is present it will be a
2972          * snapshot id.
2973          */
2974         page_count = (u32)calc_pages_for(0, inbound_size);
2975         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2976         if (IS_ERR(pages))
2977                 return PTR_ERR(pages);
2978
2979         ret = -ENOMEM;
2980         obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2981                                                         OBJ_REQUEST_PAGES);
2982         if (!obj_request)
2983                 goto out;
2984
2985         obj_request->pages = pages;
2986         obj_request->page_count = page_count;
2987
2988         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2989         if (!obj_request->osd_req)
2990                 goto out;
2991
2992         osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2993                                         class_name, method_name);
2994         if (outbound_size) {
2995                 struct ceph_pagelist *pagelist;
2996
2997                 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2998                 if (!pagelist)
2999                         goto out;
3000
3001                 ceph_pagelist_init(pagelist);
3002                 ceph_pagelist_append(pagelist, outbound, outbound_size);
3003                 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3004                                                 pagelist);
3005         }
3006         osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3007                                         obj_request->pages, inbound_size,
3008                                         0, false, false);
3009         rbd_osd_req_format_read(obj_request);
3010
3011         ret = rbd_obj_request_submit(osdc, obj_request);
3012         if (ret)
3013                 goto out;
3014         ret = rbd_obj_request_wait(obj_request);
3015         if (ret)
3016                 goto out;
3017
3018         ret = obj_request->result;
3019         if (ret < 0)
3020                 goto out;
3021
3022         rbd_assert(obj_request->xferred < (u64)INT_MAX);
3023         ret = (int)obj_request->xferred;
3024         ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3025 out:
3026         if (obj_request)
3027                 rbd_obj_request_put(obj_request);
3028         else
3029                 ceph_release_page_vector(pages, page_count);
3030
3031         return ret;
3032 }
3033
3034 static void rbd_request_fn(struct request_queue *q)
3035                 __releases(q->queue_lock) __acquires(q->queue_lock)
3036 {
3037         struct rbd_device *rbd_dev = q->queuedata;
3038         bool read_only = rbd_dev->mapping.read_only;
3039         struct request *rq;
3040         int result;
3041
3042         while ((rq = blk_fetch_request(q))) {
3043                 bool write_request = rq_data_dir(rq) == WRITE;
3044                 struct rbd_img_request *img_request;
3045                 u64 offset;
3046                 u64 length;
3047
3048                 /* Ignore any non-FS requests that filter through. */
3049
3050                 if (rq->cmd_type != REQ_TYPE_FS) {
3051                         dout("%s: non-fs request type %d\n", __func__,
3052                                 (int) rq->cmd_type);
3053                         __blk_end_request_all(rq, 0);
3054                         continue;
3055                 }
3056
3057                 /* Ignore/skip any zero-length requests */
3058
3059                 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3060                 length = (u64) blk_rq_bytes(rq);
3061
3062                 if (!length) {
3063                         dout("%s: zero-length request\n", __func__);
3064                         __blk_end_request_all(rq, 0);
3065                         continue;
3066                 }
3067
3068                 spin_unlock_irq(q->queue_lock);
3069
3070                 /* Disallow writes to a read-only device */
3071
3072                 if (write_request) {
3073                         result = -EROFS;
3074                         if (read_only)
3075                                 goto end_request;
3076                         rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3077                 }
3078
3079                 /*
3080                  * Quit early if the mapped snapshot no longer
3081                  * exists.  It's still possible the snapshot will
3082                  * have disappeared by the time our request arrives
3083                  * at the osd, but there's no sense in sending it if
3084                  * we already know.
3085                  */
3086                 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3087                         dout("request for non-existent snapshot");
3088                         rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3089                         result = -ENXIO;
3090                         goto end_request;
3091                 }
3092
3093                 result = -EINVAL;
3094                 if (offset && length > U64_MAX - offset + 1) {
3095                         rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3096                                 offset, length);
3097                         goto end_request;       /* Shouldn't happen */
3098                 }
3099
3100                 result = -EIO;
3101                 if (offset + length > rbd_dev->mapping.size) {
3102                         rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3103                                 offset, length, rbd_dev->mapping.size);
3104                         goto end_request;
3105                 }
3106
3107                 result = -ENOMEM;
3108                 img_request = rbd_img_request_create(rbd_dev, offset, length,
3109                                                         write_request);
3110                 if (!img_request)
3111                         goto end_request;
3112
3113                 img_request->rq = rq;
3114
3115                 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3116                                                 rq->bio);
3117                 if (!result)
3118                         result = rbd_img_request_submit(img_request);
3119                 if (result)
3120                         rbd_img_request_put(img_request);
3121 end_request:
3122                 spin_lock_irq(q->queue_lock);
3123                 if (result < 0) {
3124                         rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3125                                 write_request ? "write" : "read",
3126                                 length, offset, result);
3127
3128                         __blk_end_request_all(rq, result);
3129                 }
3130         }
3131 }
3132
3133 /*
3134  * a queue callback. Makes sure that we don't create a bio that spans across
3135  * multiple osd objects. One exception would be with a single page bios,
3136  * which we handle later at bio_chain_clone_range()
3137  */
3138 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3139                           struct bio_vec *bvec)
3140 {
3141         struct rbd_device *rbd_dev = q->queuedata;
3142         sector_t sector_offset;
3143         sector_t sectors_per_obj;
3144         sector_t obj_sector_offset;
3145         int ret;
3146
3147         /*
3148          * Find how far into its rbd object the partition-relative
3149          * bio start sector is to offset relative to the enclosing
3150          * device.
3151          */
3152         sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3153         sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3154         obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3155
3156         /*
3157          * Compute the number of bytes from that offset to the end
3158          * of the object.  Account for what's already used by the bio.
3159          */
3160         ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3161         if (ret > bmd->bi_size)
3162                 ret -= bmd->bi_size;
3163         else
3164                 ret = 0;
3165
3166         /*
3167          * Don't send back more than was asked for.  And if the bio
3168          * was empty, let the whole thing through because:  "Note
3169          * that a block device *must* allow a single page to be
3170          * added to an empty bio."
3171          */
3172         rbd_assert(bvec->bv_len <= PAGE_SIZE);
3173         if (ret > (int) bvec->bv_len || !bmd->bi_size)
3174                 ret = (int) bvec->bv_len;
3175
3176         return ret;
3177 }
3178
3179 static void rbd_free_disk(struct rbd_device *rbd_dev)
3180 {
3181         struct gendisk *disk = rbd_dev->disk;
3182
3183         if (!disk)
3184                 return;
3185
3186         rbd_dev->disk = NULL;
3187         if (disk->flags & GENHD_FL_UP) {
3188                 del_gendisk(disk);
3189                 if (disk->queue)
3190                         blk_cleanup_queue(disk->queue);
3191         }
3192         put_disk(disk);
3193 }
3194
3195 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3196                                 const char *object_name,
3197                                 u64 offset, u64 length, void *buf)
3198
3199 {
3200         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3201         struct rbd_obj_request *obj_request;
3202         struct page **pages = NULL;
3203         u32 page_count;
3204         size_t size;
3205         int ret;
3206
3207         page_count = (u32) calc_pages_for(offset, length);
3208         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3209         if (IS_ERR(pages))
3210                 ret = PTR_ERR(pages);
3211
3212         ret = -ENOMEM;
3213         obj_request = rbd_obj_request_create(object_name, offset, length,
3214                                                         OBJ_REQUEST_PAGES);
3215         if (!obj_request)
3216                 goto out;
3217
3218         obj_request->pages = pages;
3219         obj_request->page_count = page_count;
3220
3221         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3222         if (!obj_request->osd_req)
3223                 goto out;
3224
3225         osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3226                                         offset, length, 0, 0);
3227         osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3228                                         obj_request->pages,
3229                                         obj_request->length,
3230                                         obj_request->offset & ~PAGE_MASK,
3231                                         false, false);
3232         rbd_osd_req_format_read(obj_request);
3233
3234         ret = rbd_obj_request_submit(osdc, obj_request);
3235         if (ret)
3236                 goto out;
3237         ret = rbd_obj_request_wait(obj_request);
3238         if (ret)
3239                 goto out;
3240
3241         ret = obj_request->result;
3242         if (ret < 0)
3243                 goto out;
3244
3245         rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3246         size = (size_t) obj_request->xferred;
3247         ceph_copy_from_page_vector(pages, buf, 0, size);
3248         rbd_assert(size <= (size_t)INT_MAX);
3249         ret = (int)size;
3250 out:
3251         if (obj_request)
3252                 rbd_obj_request_put(obj_request);
3253         else
3254                 ceph_release_page_vector(pages, page_count);
3255
3256         return ret;
3257 }
3258
3259 /*
3260  * Read the complete header for the given rbd device.  On successful
3261  * return, the rbd_dev->header field will contain up-to-date
3262  * information about the image.
3263  */
3264 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3265 {
3266         struct rbd_image_header_ondisk *ondisk = NULL;
3267         u32 snap_count = 0;
3268         u64 names_size = 0;
3269         u32 want_count;
3270         int ret;
3271
3272         /*
3273          * The complete header will include an array of its 64-bit
3274          * snapshot ids, followed by the names of those snapshots as
3275          * a contiguous block of NUL-terminated strings.  Note that
3276          * the number of snapshots could change by the time we read
3277          * it in, in which case we re-read it.
3278          */
3279         do {
3280                 size_t size;
3281
3282                 kfree(ondisk);
3283
3284                 size = sizeof (*ondisk);
3285                 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3286                 size += names_size;
3287                 ondisk = kmalloc(size, GFP_KERNEL);
3288                 if (!ondisk)
3289                         return -ENOMEM;
3290
3291                 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3292                                        0, size, ondisk);
3293                 if (ret < 0)
3294                         goto out;
3295                 if ((size_t)ret < size) {
3296                         ret = -ENXIO;
3297                         rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3298                                 size, ret);
3299                         goto out;
3300                 }
3301                 if (!rbd_dev_ondisk_valid(ondisk)) {
3302                         ret = -ENXIO;
3303                         rbd_warn(rbd_dev, "invalid header");
3304                         goto out;
3305                 }
3306
3307                 names_size = le64_to_cpu(ondisk->snap_names_len);
3308                 want_count = snap_count;
3309                 snap_count = le32_to_cpu(ondisk->snap_count);
3310         } while (snap_count != want_count);
3311
3312         ret = rbd_header_from_disk(rbd_dev, ondisk);
3313 out:
3314         kfree(ondisk);
3315
3316         return ret;
3317 }
3318
3319 /*
3320  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3321  * has disappeared from the (just updated) snapshot context.
3322  */
3323 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3324 {
3325         u64 snap_id;
3326
3327         if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3328                 return;
3329
3330         snap_id = rbd_dev->spec->snap_id;
3331         if (snap_id == CEPH_NOSNAP)
3332                 return;
3333
3334         if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3335                 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3336 }
3337
3338 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3339 {
3340         u64 mapping_size;
3341         int ret;
3342
3343         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3344         mapping_size = rbd_dev->mapping.size;
3345         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3346         if (rbd_dev->image_format == 1)
3347                 ret = rbd_dev_v1_header_info(rbd_dev);
3348         else
3349                 ret = rbd_dev_v2_header_info(rbd_dev);
3350
3351         /* If it's a mapped snapshot, validate its EXISTS flag */
3352
3353         rbd_exists_validate(rbd_dev);
3354         mutex_unlock(&ctl_mutex);
3355         if (mapping_size != rbd_dev->mapping.size) {
3356                 sector_t size;
3357
3358                 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3359                 dout("setting size to %llu sectors", (unsigned long long)size);
3360                 set_capacity(rbd_dev->disk, size);
3361                 revalidate_disk(rbd_dev->disk);
3362         }
3363
3364         return ret;
3365 }
3366
3367 static int rbd_init_disk(struct rbd_device *rbd_dev)
3368 {
3369         struct gendisk *disk;
3370         struct request_queue *q;
3371         u64 segment_size;
3372
3373         /* create gendisk info */
3374         disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3375         if (!disk)
3376                 return -ENOMEM;
3377
3378         snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3379                  rbd_dev->dev_id);
3380         disk->major = rbd_dev->major;
3381         disk->first_minor = 0;
3382         disk->fops = &rbd_bd_ops;
3383         disk->private_data = rbd_dev;
3384
3385         q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3386         if (!q)
3387                 goto out_disk;
3388
3389         /* We use the default size, but let's be explicit about it. */
3390         blk_queue_physical_block_size(q, SECTOR_SIZE);
3391
3392         /* set io sizes to object size */
3393         segment_size = rbd_obj_bytes(&rbd_dev->header);
3394         blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3395         blk_queue_max_segment_size(q, segment_size);
3396         blk_queue_io_min(q, segment_size);
3397         blk_queue_io_opt(q, segment_size);
3398
3399         blk_queue_merge_bvec(q, rbd_merge_bvec);
3400         disk->queue = q;
3401
3402         q->queuedata = rbd_dev;
3403
3404         rbd_dev->disk = disk;
3405
3406         return 0;
3407 out_disk:
3408         put_disk(disk);
3409
3410         return -ENOMEM;
3411 }
3412
3413 /*
3414   sysfs
3415 */
3416
3417 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3418 {
3419         return container_of(dev, struct rbd_device, dev);
3420 }
3421
3422 static ssize_t rbd_size_show(struct device *dev,
3423                              struct device_attribute *attr, char *buf)
3424 {
3425         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3426
3427         return sprintf(buf, "%llu\n",
3428                 (unsigned long long)rbd_dev->mapping.size);
3429 }
3430
3431 /*
3432  * Note this shows the features for whatever's mapped, which is not
3433  * necessarily the base image.
3434  */
3435 static ssize_t rbd_features_show(struct device *dev,
3436                              struct device_attribute *attr, char *buf)
3437 {
3438         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3439
3440         return sprintf(buf, "0x%016llx\n",
3441                         (unsigned long long)rbd_dev->mapping.features);
3442 }
3443
3444 static ssize_t rbd_major_show(struct device *dev,
3445                               struct device_attribute *attr, char *buf)
3446 {
3447         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3448
3449         if (rbd_dev->major)
3450                 return sprintf(buf, "%d\n", rbd_dev->major);
3451
3452         return sprintf(buf, "(none)\n");
3453
3454 }
3455
3456 static ssize_t rbd_client_id_show(struct device *dev,
3457                                   struct device_attribute *attr, char *buf)
3458 {
3459         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3460
3461         return sprintf(buf, "client%lld\n",
3462                         ceph_client_id(rbd_dev->rbd_client->client));
3463 }
3464
3465 static ssize_t rbd_pool_show(struct device *dev,
3466                              struct device_attribute *attr, char *buf)
3467 {
3468         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3469
3470         return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3471 }
3472
3473 static ssize_t rbd_pool_id_show(struct device *dev,
3474                              struct device_attribute *attr, char *buf)
3475 {
3476         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3477
3478         return sprintf(buf, "%llu\n",
3479                         (unsigned long long) rbd_dev->spec->pool_id);
3480 }
3481
3482 static ssize_t rbd_name_show(struct device *dev,
3483                              struct device_attribute *attr, char *buf)
3484 {
3485         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3486
3487         if (rbd_dev->spec->image_name)
3488                 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3489
3490         return sprintf(buf, "(unknown)\n");
3491 }
3492
3493 static ssize_t rbd_image_id_show(struct device *dev,
3494                              struct device_attribute *attr, char *buf)
3495 {
3496         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3497
3498         return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3499 }
3500
3501 /*
3502  * Shows the name of the currently-mapped snapshot (or
3503  * RBD_SNAP_HEAD_NAME for the base image).
3504  */
3505 static ssize_t rbd_snap_show(struct device *dev,
3506                              struct device_attribute *attr,
3507                              char *buf)
3508 {
3509         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3510
3511         return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3512 }
3513
3514 /*
3515  * For an rbd v2 image, shows the pool id, image id, and snapshot id
3516  * for the parent image.  If there is no parent, simply shows
3517  * "(no parent image)".
3518  */
3519 static ssize_t rbd_parent_show(struct device *dev,
3520                              struct device_attribute *attr,
3521                              char *buf)
3522 {
3523         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3524         struct rbd_spec *spec = rbd_dev->parent_spec;
3525         int count;
3526         char *bufp = buf;
3527
3528         if (!spec)
3529                 return sprintf(buf, "(no parent image)\n");
3530
3531         count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3532                         (unsigned long long) spec->pool_id, spec->pool_name);
3533         if (count < 0)
3534                 return count;
3535         bufp += count;
3536
3537         count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3538                         spec->image_name ? spec->image_name : "(unknown)");
3539         if (count < 0)
3540                 return count;
3541         bufp += count;
3542
3543         count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3544                         (unsigned long long) spec->snap_id, spec->snap_name);
3545         if (count < 0)
3546                 return count;
3547         bufp += count;
3548
3549         count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3550         if (count < 0)
3551                 return count;
3552         bufp += count;
3553
3554         return (ssize_t) (bufp - buf);
3555 }
3556
3557 static ssize_t rbd_image_refresh(struct device *dev,
3558                                  struct device_attribute *attr,
3559                                  const char *buf,
3560                                  size_t size)
3561 {
3562         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3563         int ret;
3564
3565         ret = rbd_dev_refresh(rbd_dev);
3566         if (ret)
3567                 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3568
3569         return ret < 0 ? ret : size;
3570 }
3571
3572 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3573 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3574 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3575 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3576 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3577 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3578 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3579 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3580 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3581 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3582 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3583
3584 static struct attribute *rbd_attrs[] = {
3585         &dev_attr_size.attr,
3586         &dev_attr_features.attr,
3587         &dev_attr_major.attr,
3588         &dev_attr_client_id.attr,
3589         &dev_attr_pool.attr,
3590         &dev_attr_pool_id.attr,
3591         &dev_attr_name.attr,
3592         &dev_attr_image_id.attr,
3593         &dev_attr_current_snap.attr,
3594         &dev_attr_parent.attr,
3595         &dev_attr_refresh.attr,
3596         NULL
3597 };
3598
3599 static struct attribute_group rbd_attr_group = {
3600         .attrs = rbd_attrs,
3601 };
3602
3603 static const struct attribute_group *rbd_attr_groups[] = {
3604         &rbd_attr_group,
3605         NULL
3606 };
3607
3608 static void rbd_sysfs_dev_release(struct device *dev)
3609 {
3610 }
3611
3612 static struct device_type rbd_device_type = {
3613         .name           = "rbd",
3614         .groups         = rbd_attr_groups,
3615         .release        = rbd_sysfs_dev_release,
3616 };
3617
3618 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3619 {
3620         kref_get(&spec->kref);
3621
3622         return spec;
3623 }
3624
3625 static void rbd_spec_free(struct kref *kref);
3626 static void rbd_spec_put(struct rbd_spec *spec)
3627 {
3628         if (spec)
3629                 kref_put(&spec->kref, rbd_spec_free);
3630 }
3631
3632 static struct rbd_spec *rbd_spec_alloc(void)
3633 {
3634         struct rbd_spec *spec;
3635
3636         spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3637         if (!spec)
3638                 return NULL;
3639         kref_init(&spec->kref);
3640
3641         return spec;
3642 }
3643
3644 static void rbd_spec_free(struct kref *kref)
3645 {
3646         struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3647
3648         kfree(spec->pool_name);
3649         kfree(spec->image_id);
3650         kfree(spec->image_name);
3651         kfree(spec->snap_name);
3652         kfree(spec);
3653 }
3654
3655 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3656                                 struct rbd_spec *spec)
3657 {
3658         struct rbd_device *rbd_dev;
3659
3660         rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3661         if (!rbd_dev)
3662                 return NULL;
3663
3664         spin_lock_init(&rbd_dev->lock);
3665         rbd_dev->flags = 0;
3666         atomic_set(&rbd_dev->parent_ref, 0);
3667         INIT_LIST_HEAD(&rbd_dev->node);
3668         init_rwsem(&rbd_dev->header_rwsem);
3669
3670         rbd_dev->spec = spec;
3671         rbd_dev->rbd_client = rbdc;
3672
3673         /* Initialize the layout used for all rbd requests */
3674
3675         rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3676         rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3677         rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3678         rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3679
3680         return rbd_dev;
3681 }
3682
3683 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3684 {
3685         rbd_put_client(rbd_dev->rbd_client);
3686         rbd_spec_put(rbd_dev->spec);
3687         kfree(rbd_dev);
3688 }
3689
3690 /*
3691  * Get the size and object order for an image snapshot, or if
3692  * snap_id is CEPH_NOSNAP, gets this information for the base
3693  * image.
3694  */
3695 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3696                                 u8 *order, u64 *snap_size)
3697 {
3698         __le64 snapid = cpu_to_le64(snap_id);
3699         int ret;
3700         struct {
3701                 u8 order;
3702                 __le64 size;
3703         } __attribute__ ((packed)) size_buf = { 0 };
3704
3705         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3706                                 "rbd", "get_size",
3707                                 &snapid, sizeof (snapid),
3708                                 &size_buf, sizeof (size_buf));
3709         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3710         if (ret < 0)
3711                 return ret;
3712         if (ret < sizeof (size_buf))
3713                 return -ERANGE;
3714
3715         if (order)
3716                 *order = size_buf.order;
3717         *snap_size = le64_to_cpu(size_buf.size);
3718
3719         dout("  snap_id 0x%016llx order = %u, snap_size = %llu\n",
3720                 (unsigned long long)snap_id, (unsigned int)*order,
3721                 (unsigned long long)*snap_size);
3722
3723         return 0;
3724 }
3725
3726 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3727 {
3728         return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3729                                         &rbd_dev->header.obj_order,
3730                                         &rbd_dev->header.image_size);
3731 }
3732
3733 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3734 {
3735         void *reply_buf;
3736         int ret;
3737         void *p;
3738
3739         reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3740         if (!reply_buf)
3741                 return -ENOMEM;
3742
3743         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3744                                 "rbd", "get_object_prefix", NULL, 0,
3745                                 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3746         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3747         if (ret < 0)
3748                 goto out;
3749
3750         p = reply_buf;
3751         rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3752                                                 p + ret, NULL, GFP_NOIO);
3753         ret = 0;
3754
3755         if (IS_ERR(rbd_dev->header.object_prefix)) {
3756                 ret = PTR_ERR(rbd_dev->header.object_prefix);
3757                 rbd_dev->header.object_prefix = NULL;
3758         } else {
3759                 dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
3760         }
3761 out:
3762         kfree(reply_buf);
3763
3764         return ret;
3765 }
3766
3767 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3768                 u64 *snap_features)
3769 {
3770         __le64 snapid = cpu_to_le64(snap_id);
3771         struct {
3772                 __le64 features;
3773                 __le64 incompat;
3774         } __attribute__ ((packed)) features_buf = { 0 };
3775         u64 incompat;
3776         int ret;
3777
3778         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3779                                 "rbd", "get_features",
3780                                 &snapid, sizeof (snapid),
3781                                 &features_buf, sizeof (features_buf));
3782         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3783         if (ret < 0)
3784                 return ret;
3785         if (ret < sizeof (features_buf))
3786                 return -ERANGE;
3787
3788         incompat = le64_to_cpu(features_buf.incompat);
3789         if (incompat & ~RBD_FEATURES_SUPPORTED)
3790                 return -ENXIO;
3791
3792         *snap_features = le64_to_cpu(features_buf.features);
3793
3794         dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3795                 (unsigned long long)snap_id,
3796                 (unsigned long long)*snap_features,
3797                 (unsigned long long)le64_to_cpu(features_buf.incompat));
3798
3799         return 0;
3800 }
3801
3802 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3803 {
3804         return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3805                                                 &rbd_dev->header.features);
3806 }
3807
3808 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3809 {
3810         struct rbd_spec *parent_spec;
3811         size_t size;
3812         void *reply_buf = NULL;
3813         __le64 snapid;
3814         void *p;
3815         void *end;
3816         u64 pool_id;
3817         char *image_id;
3818         u64 overlap;
3819         int ret;
3820
3821         parent_spec = rbd_spec_alloc();
3822         if (!parent_spec)
3823                 return -ENOMEM;
3824
3825         size = sizeof (__le64) +                                /* pool_id */
3826                 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
3827                 sizeof (__le64) +                               /* snap_id */
3828                 sizeof (__le64);                                /* overlap */
3829         reply_buf = kmalloc(size, GFP_KERNEL);
3830         if (!reply_buf) {
3831                 ret = -ENOMEM;
3832                 goto out_err;
3833         }
3834
3835         snapid = cpu_to_le64(CEPH_NOSNAP);
3836         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3837                                 "rbd", "get_parent",
3838                                 &snapid, sizeof (snapid),
3839                                 reply_buf, size);
3840         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3841         if (ret < 0)
3842                 goto out_err;
3843
3844         p = reply_buf;
3845         end = reply_buf + ret;
3846         ret = -ERANGE;
3847         ceph_decode_64_safe(&p, end, pool_id, out_err);
3848         if (pool_id == CEPH_NOPOOL) {
3849                 /*
3850                  * Either the parent never existed, or we have
3851                  * record of it but the image got flattened so it no
3852                  * longer has a parent.  When the parent of a
3853                  * layered image disappears we immediately set the
3854                  * overlap to 0.  The effect of this is that all new
3855                  * requests will be treated as if the image had no
3856                  * parent.
3857                  */
3858                 if (rbd_dev->parent_overlap) {
3859                         rbd_dev->parent_overlap = 0;
3860                         smp_mb();
3861                         rbd_dev_parent_put(rbd_dev);
3862                         pr_info("%s: clone image has been flattened\n",
3863                                 rbd_dev->disk->disk_name);
3864                 }
3865
3866                 goto out;       /* No parent?  No problem. */
3867         }
3868
3869         /* The ceph file layout needs to fit pool id in 32 bits */
3870
3871         ret = -EIO;
3872         if (pool_id > (u64)U32_MAX) {
3873                 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3874                         (unsigned long long)pool_id, U32_MAX);
3875                 goto out_err;
3876         }
3877         parent_spec->pool_id = pool_id;
3878
3879         image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3880         if (IS_ERR(image_id)) {
3881                 ret = PTR_ERR(image_id);
3882                 goto out_err;
3883         }
3884         parent_spec->image_id = image_id;
3885         ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3886         ceph_decode_64_safe(&p, end, overlap, out_err);
3887
3888         if (overlap) {
3889                 rbd_spec_put(rbd_dev->parent_spec);
3890                 rbd_dev->parent_spec = parent_spec;
3891                 parent_spec = NULL;     /* rbd_dev now owns this */
3892                 rbd_dev->parent_overlap = overlap;
3893         } else {
3894                 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3895         }
3896 out:
3897         ret = 0;
3898 out_err:
3899         kfree(reply_buf);
3900         rbd_spec_put(parent_spec);
3901
3902         return ret;
3903 }
3904
3905 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3906 {
3907         struct {
3908                 __le64 stripe_unit;
3909                 __le64 stripe_count;
3910         } __attribute__ ((packed)) striping_info_buf = { 0 };
3911         size_t size = sizeof (striping_info_buf);
3912         void *p;
3913         u64 obj_size;
3914         u64 stripe_unit;
3915         u64 stripe_count;
3916         int ret;
3917
3918         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3919                                 "rbd", "get_stripe_unit_count", NULL, 0,
3920                                 (char *)&striping_info_buf, size);
3921         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3922         if (ret < 0)
3923                 return ret;
3924         if (ret < size)
3925                 return -ERANGE;
3926
3927         /*
3928          * We don't actually support the "fancy striping" feature
3929          * (STRIPINGV2) yet, but if the striping sizes are the
3930          * defaults the behavior is the same as before.  So find
3931          * out, and only fail if the image has non-default values.
3932          */
3933         ret = -EINVAL;
3934         obj_size = (u64)1 << rbd_dev->header.obj_order;
3935         p = &striping_info_buf;
3936         stripe_unit = ceph_decode_64(&p);
3937         if (stripe_unit != obj_size) {
3938                 rbd_warn(rbd_dev, "unsupported stripe unit "
3939                                 "(got %llu want %llu)",
3940                                 stripe_unit, obj_size);
3941                 return -EINVAL;
3942         }
3943         stripe_count = ceph_decode_64(&p);
3944         if (stripe_count != 1) {
3945                 rbd_warn(rbd_dev, "unsupported stripe count "
3946                                 "(got %llu want 1)", stripe_count);
3947                 return -EINVAL;
3948         }
3949         rbd_dev->header.stripe_unit = stripe_unit;
3950         rbd_dev->header.stripe_count = stripe_count;
3951
3952         return 0;
3953 }
3954
3955 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3956 {
3957         size_t image_id_size;
3958         char *image_id;
3959         void *p;
3960         void *end;
3961         size_t size;
3962         void *reply_buf = NULL;
3963         size_t len = 0;
3964         char *image_name = NULL;
3965         int ret;
3966
3967         rbd_assert(!rbd_dev->spec->image_name);
3968
3969         len = strlen(rbd_dev->spec->image_id);
3970         image_id_size = sizeof (__le32) + len;
3971         image_id = kmalloc(image_id_size, GFP_KERNEL);
3972         if (!image_id)
3973                 return NULL;
3974
3975         p = image_id;
3976         end = image_id + image_id_size;
3977         ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3978
3979         size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3980         reply_buf = kmalloc(size, GFP_KERNEL);
3981         if (!reply_buf)
3982                 goto out;
3983
3984         ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3985                                 "rbd", "dir_get_name",
3986                                 image_id, image_id_size,
3987                                 reply_buf, size);
3988         if (ret < 0)
3989                 goto out;
3990         p = reply_buf;
3991         end = reply_buf + ret;
3992
3993         image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3994         if (IS_ERR(image_name))
3995                 image_name = NULL;
3996         else
3997                 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3998 out:
3999         kfree(reply_buf);
4000         kfree(image_id);
4001
4002         return image_name;
4003 }
4004
4005 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4006 {
4007         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4008         const char *snap_name;
4009         u32 which = 0;
4010
4011         /* Skip over names until we find the one we are looking for */
4012
4013         snap_name = rbd_dev->header.snap_names;
4014         while (which < snapc->num_snaps) {
4015                 if (!strcmp(name, snap_name))
4016                         return snapc->snaps[which];
4017                 snap_name += strlen(snap_name) + 1;
4018                 which++;
4019         }
4020         return CEPH_NOSNAP;
4021 }
4022
4023 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4024 {
4025         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4026         u32 which;
4027         bool found = false;
4028         u64 snap_id;
4029
4030         for (which = 0; !found && which < snapc->num_snaps; which++) {
4031                 const char *snap_name;
4032
4033                 snap_id = snapc->snaps[which];
4034                 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4035                 if (IS_ERR(snap_name))
4036                         break;
4037                 found = !strcmp(name, snap_name);
4038                 kfree(snap_name);
4039         }
4040         return found ? snap_id : CEPH_NOSNAP;
4041 }
4042
4043 /*
4044  * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4045  * no snapshot by that name is found, or if an error occurs.
4046  */
4047 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4048 {
4049         if (rbd_dev->image_format == 1)
4050                 return rbd_v1_snap_id_by_name(rbd_dev, name);
4051
4052         return rbd_v2_snap_id_by_name(rbd_dev, name);
4053 }
4054
4055 /*
4056  * When an rbd image has a parent image, it is identified by the
4057  * pool, image, and snapshot ids (not names).  This function fills
4058  * in the names for those ids.  (It's OK if we can't figure out the
4059  * name for an image id, but the pool and snapshot ids should always
4060  * exist and have names.)  All names in an rbd spec are dynamically
4061  * allocated.
4062  *
4063  * When an image being mapped (not a parent) is probed, we have the
4064  * pool name and pool id, image name and image id, and the snapshot
4065  * name.  The only thing we're missing is the snapshot id.
4066  */
4067 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4068 {
4069         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4070         struct rbd_spec *spec = rbd_dev->spec;
4071         const char *pool_name;
4072         const char *image_name;
4073         const char *snap_name;
4074         int ret;
4075
4076         /*
4077          * An image being mapped will have the pool name (etc.), but
4078          * we need to look up the snapshot id.
4079          */
4080         if (spec->pool_name) {
4081                 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4082                         u64 snap_id;
4083
4084                         snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4085                         if (snap_id == CEPH_NOSNAP)
4086                                 return -ENOENT;
4087                         spec->snap_id = snap_id;
4088                 } else {
4089                         spec->snap_id = CEPH_NOSNAP;
4090                 }
4091
4092                 return 0;
4093         }
4094
4095         /* Get the pool name; we have to make our own copy of this */
4096
4097         pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4098         if (!pool_name) {
4099                 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4100                 return -EIO;
4101         }
4102         pool_name = kstrdup(pool_name, GFP_KERNEL);
4103         if (!pool_name)
4104                 return -ENOMEM;
4105
4106         /* Fetch the image name; tolerate failure here */
4107
4108         image_name = rbd_dev_image_name(rbd_dev);
4109         if (!image_name)
4110                 rbd_warn(rbd_dev, "unable to get image name");
4111
4112         /* Look up the snapshot name, and make a copy */
4113
4114         snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4115         if (!snap_name) {
4116                 ret = -ENOMEM;
4117                 goto out_err;
4118         }
4119
4120         spec->pool_name = pool_name;
4121         spec->image_name = image_name;
4122         spec->snap_name = snap_name;
4123
4124         return 0;
4125 out_err:
4126         kfree(image_name);
4127         kfree(pool_name);
4128
4129         return ret;
4130 }
4131
4132 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4133 {
4134         size_t size;
4135         int ret;
4136         void *reply_buf;
4137         void *p;
4138         void *end;
4139         u64 seq;
4140         u32 snap_count;
4141         struct ceph_snap_context *snapc;
4142         u32 i;
4143
4144         /*
4145          * We'll need room for the seq value (maximum snapshot id),
4146          * snapshot count, and array of that many snapshot ids.
4147          * For now we have a fixed upper limit on the number we're
4148          * prepared to receive.
4149          */
4150         size = sizeof (__le64) + sizeof (__le32) +
4151                         RBD_MAX_SNAP_COUNT * sizeof (__le64);
4152         reply_buf = kzalloc(size, GFP_KERNEL);
4153         if (!reply_buf)
4154                 return -ENOMEM;
4155
4156         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4157                                 "rbd", "get_snapcontext", NULL, 0,
4158                                 reply_buf, size);
4159         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4160         if (ret < 0)
4161                 goto out;
4162
4163         p = reply_buf;
4164         end = reply_buf + ret;
4165         ret = -ERANGE;
4166         ceph_decode_64_safe(&p, end, seq, out);
4167         ceph_decode_32_safe(&p, end, snap_count, out);
4168
4169         /*
4170          * Make sure the reported number of snapshot ids wouldn't go
4171          * beyond the end of our buffer.  But before checking that,
4172          * make sure the computed size of the snapshot context we
4173          * allocate is representable in a size_t.
4174          */
4175         if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4176                                  / sizeof (u64)) {
4177                 ret = -EINVAL;
4178                 goto out;
4179         }
4180         if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4181                 goto out;
4182         ret = 0;
4183
4184         snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4185         if (!snapc) {
4186                 ret = -ENOMEM;
4187                 goto out;
4188         }
4189         snapc->seq = seq;
4190         for (i = 0; i < snap_count; i++)
4191                 snapc->snaps[i] = ceph_decode_64(&p);
4192
4193         ceph_put_snap_context(rbd_dev->header.snapc);
4194         rbd_dev->header.snapc = snapc;
4195
4196         dout("  snap context seq = %llu, snap_count = %u\n",
4197                 (unsigned long long)seq, (unsigned int)snap_count);
4198 out:
4199         kfree(reply_buf);
4200
4201         return ret;
4202 }
4203
4204 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4205                                         u64 snap_id)
4206 {
4207         size_t size;
4208         void *reply_buf;
4209         __le64 snapid;
4210         int ret;
4211         void *p;
4212         void *end;
4213         char *snap_name;
4214
4215         size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4216         reply_buf = kmalloc(size, GFP_KERNEL);
4217         if (!reply_buf)
4218                 return ERR_PTR(-ENOMEM);
4219
4220         snapid = cpu_to_le64(snap_id);
4221         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4222                                 "rbd", "get_snapshot_name",
4223                                 &snapid, sizeof (snapid),
4224                                 reply_buf, size);
4225         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4226         if (ret < 0) {
4227                 snap_name = ERR_PTR(ret);
4228                 goto out;
4229         }
4230
4231         p = reply_buf;
4232         end = reply_buf + ret;
4233         snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4234         if (IS_ERR(snap_name))
4235                 goto out;
4236
4237         dout("  snap_id 0x%016llx snap_name = %s\n",
4238                 (unsigned long long)snap_id, snap_name);
4239 out:
4240         kfree(reply_buf);
4241
4242         return snap_name;
4243 }
4244
4245 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4246 {
4247         bool first_time = rbd_dev->header.object_prefix == NULL;
4248         int ret;
4249
4250         down_write(&rbd_dev->header_rwsem);
4251
4252         ret = rbd_dev_v2_image_size(rbd_dev);
4253         if (ret)
4254                 goto out;
4255
4256         if (first_time) {
4257                 ret = rbd_dev_v2_header_onetime(rbd_dev);
4258                 if (ret)
4259                         goto out;
4260         }
4261
4262         /*
4263          * If the image supports layering, get the parent info.  We
4264          * need to probe the first time regardless.  Thereafter we
4265          * only need to if there's a parent, to see if it has
4266          * disappeared due to the mapped image getting flattened.
4267          */
4268         if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4269                         (first_time || rbd_dev->parent_spec)) {
4270                 bool warn;
4271
4272                 ret = rbd_dev_v2_parent_info(rbd_dev);
4273                 if (ret)
4274                         goto out;
4275
4276                 /*
4277                  * Print a warning if this is the initial probe and
4278                  * the image has a parent.  Don't print it if the
4279                  * image now being probed is itself a parent.  We
4280                  * can tell at this point because we won't know its
4281                  * pool name yet (just its pool id).
4282                  */
4283                 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4284                 if (first_time && warn)
4285                         rbd_warn(rbd_dev, "WARNING: kernel layering "
4286                                         "is EXPERIMENTAL!");
4287         }
4288
4289         if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4290                 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4291                         rbd_dev->mapping.size = rbd_dev->header.image_size;
4292
4293         ret = rbd_dev_v2_snap_context(rbd_dev);
4294         dout("rbd_dev_v2_snap_context returned %d\n", ret);
4295 out:
4296         up_write(&rbd_dev->header_rwsem);
4297
4298         return ret;
4299 }
4300
4301 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4302 {
4303         struct device *dev;
4304         int ret;
4305
4306         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4307
4308         dev = &rbd_dev->dev;
4309         dev->bus = &rbd_bus_type;
4310         dev->type = &rbd_device_type;
4311         dev->parent = &rbd_root_dev;
4312         dev->release = rbd_dev_device_release;
4313         dev_set_name(dev, "%d", rbd_dev->dev_id);
4314         ret = device_register(dev);
4315
4316         mutex_unlock(&ctl_mutex);
4317
4318         return ret;
4319 }
4320
4321 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4322 {
4323         device_unregister(&rbd_dev->dev);
4324 }
4325
4326 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4327
4328 /*
4329  * Get a unique rbd identifier for the given new rbd_dev, and add
4330  * the rbd_dev to the global list.  The minimum rbd id is 1.
4331  */
4332 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4333 {
4334         rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4335
4336         spin_lock(&rbd_dev_list_lock);
4337         list_add_tail(&rbd_dev->node, &rbd_dev_list);
4338         spin_unlock(&rbd_dev_list_lock);
4339         dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4340                 (unsigned long long) rbd_dev->dev_id);
4341 }
4342
4343 /*
4344  * Remove an rbd_dev from the global list, and record that its
4345  * identifier is no longer in use.
4346  */
4347 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4348 {
4349         struct list_head *tmp;
4350         int rbd_id = rbd_dev->dev_id;
4351         int max_id;
4352
4353         rbd_assert(rbd_id > 0);
4354
4355         dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4356                 (unsigned long long) rbd_dev->dev_id);
4357         spin_lock(&rbd_dev_list_lock);
4358         list_del_init(&rbd_dev->node);
4359
4360         /*
4361          * If the id being "put" is not the current maximum, there
4362          * is nothing special we need to do.
4363          */
4364         if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4365                 spin_unlock(&rbd_dev_list_lock);
4366                 return;
4367         }
4368
4369         /*
4370          * We need to update the current maximum id.  Search the
4371          * list to find out what it is.  We're more likely to find
4372          * the maximum at the end, so search the list backward.
4373          */
4374         max_id = 0;
4375         list_for_each_prev(tmp, &rbd_dev_list) {
4376                 struct rbd_device *rbd_dev;
4377
4378                 rbd_dev = list_entry(tmp, struct rbd_device, node);
4379                 if (rbd_dev->dev_id > max_id)
4380                         max_id = rbd_dev->dev_id;
4381         }
4382         spin_unlock(&rbd_dev_list_lock);
4383
4384         /*
4385          * The max id could have been updated by rbd_dev_id_get(), in
4386          * which case it now accurately reflects the new maximum.
4387          * Be careful not to overwrite the maximum value in that
4388          * case.
4389          */
4390         atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4391         dout("  max dev id has been reset\n");
4392 }
4393
4394 /*
4395  * Skips over white space at *buf, and updates *buf to point to the
4396  * first found non-space character (if any). Returns the length of
4397  * the token (string of non-white space characters) found.  Note
4398  * that *buf must be terminated with '\0'.
4399  */
4400 static inline size_t next_token(const char **buf)
4401 {
4402         /*
4403         * These are the characters that produce nonzero for
4404         * isspace() in the "C" and "POSIX" locales.
4405         */
4406         const char *spaces = " \f\n\r\t\v";
4407
4408         *buf += strspn(*buf, spaces);   /* Find start of token */
4409
4410         return strcspn(*buf, spaces);   /* Return token length */
4411 }
4412
4413 /*
4414  * Finds the next token in *buf, and if the provided token buffer is
4415  * big enough, copies the found token into it.  The result, if
4416  * copied, is guaranteed to be terminated with '\0'.  Note that *buf
4417  * must be terminated with '\0' on entry.
4418  *
4419  * Returns the length of the token found (not including the '\0').
4420  * Return value will be 0 if no token is found, and it will be >=
4421  * token_size if the token would not fit.
4422  *
4423  * The *buf pointer will be updated to point beyond the end of the
4424  * found token.  Note that this occurs even if the token buffer is
4425  * too small to hold it.
4426  */
4427 static inline size_t copy_token(const char **buf,
4428                                 char *token,
4429                                 size_t token_size)
4430 {
4431         size_t len;
4432
4433         len = next_token(buf);
4434         if (len < token_size) {
4435                 memcpy(token, *buf, len);
4436                 *(token + len) = '\0';
4437         }
4438         *buf += len;
4439
4440         return len;
4441 }
4442
4443 /*
4444  * Finds the next token in *buf, dynamically allocates a buffer big
4445  * enough to hold a copy of it, and copies the token into the new
4446  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
4447  * that a duplicate buffer is created even for a zero-length token.
4448  *
4449  * Returns a pointer to the newly-allocated duplicate, or a null
4450  * pointer if memory for the duplicate was not available.  If
4451  * the lenp argument is a non-null pointer, the length of the token
4452  * (not including the '\0') is returned in *lenp.
4453  *
4454  * If successful, the *buf pointer will be updated to point beyond
4455  * the end of the found token.
4456  *
4457  * Note: uses GFP_KERNEL for allocation.
4458  */
4459 static inline char *dup_token(const char **buf, size_t *lenp)
4460 {
4461         char *dup;
4462         size_t len;
4463
4464         len = next_token(buf);
4465         dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4466         if (!dup)
4467                 return NULL;
4468         *(dup + len) = '\0';
4469         *buf += len;
4470
4471         if (lenp)
4472                 *lenp = len;
4473
4474         return dup;
4475 }
4476
4477 /*
4478  * Parse the options provided for an "rbd add" (i.e., rbd image
4479  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
4480  * and the data written is passed here via a NUL-terminated buffer.
4481  * Returns 0 if successful or an error code otherwise.
4482  *
4483  * The information extracted from these options is recorded in
4484  * the other parameters which return dynamically-allocated
4485  * structures:
4486  *  ceph_opts
4487  *      The address of a pointer that will refer to a ceph options
4488  *      structure.  Caller must release the returned pointer using
4489  *      ceph_destroy_options() when it is no longer needed.
4490  *  rbd_opts
4491  *      Address of an rbd options pointer.  Fully initialized by
4492  *      this function; caller must release with kfree().
4493  *  spec
4494  *      Address of an rbd image specification pointer.  Fully
4495  *      initialized by this function based on parsed options.
4496  *      Caller must release with rbd_spec_put().
4497  *
4498  * The options passed take this form:
4499  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4500  * where:
4501  *  <mon_addrs>
4502  *      A comma-separated list of one or more monitor addresses.
4503  *      A monitor address is an ip address, optionally followed
4504  *      by a port number (separated by a colon).
4505  *        I.e.:  ip1[:port1][,ip2[:port2]...]
4506  *  <options>
4507  *      A comma-separated list of ceph and/or rbd options.
4508  *  <pool_name>
4509  *      The name of the rados pool containing the rbd image.
4510  *  <image_name>
4511  *      The name of the image in that pool to map.
4512  *  <snap_id>
4513  *      An optional snapshot id.  If provided, the mapping will
4514  *      present data from the image at the time that snapshot was
4515  *      created.  The image head is used if no snapshot id is
4516  *      provided.  Snapshot mappings are always read-only.
4517  */
4518 static int rbd_add_parse_args(const char *buf,
4519                                 struct ceph_options **ceph_opts,
4520                                 struct rbd_options **opts,
4521                                 struct rbd_spec **rbd_spec)
4522 {
4523         size_t len;
4524         char *options;
4525         const char *mon_addrs;
4526         char *snap_name;
4527         size_t mon_addrs_size;
4528         struct rbd_spec *spec = NULL;
4529         struct rbd_options *rbd_opts = NULL;
4530         struct ceph_options *copts;
4531         int ret;
4532
4533         /* The first four tokens are required */
4534
4535         len = next_token(&buf);
4536         if (!len) {
4537                 rbd_warn(NULL, "no monitor address(es) provided");
4538                 return -EINVAL;
4539         }
4540         mon_addrs = buf;
4541         mon_addrs_size = len + 1;
4542         buf += len;
4543
4544         ret = -EINVAL;
4545         options = dup_token(&buf, NULL);
4546         if (!options)
4547                 return -ENOMEM;
4548         if (!*options) {
4549                 rbd_warn(NULL, "no options provided");
4550                 goto out_err;
4551         }
4552
4553         spec = rbd_spec_alloc();
4554         if (!spec)
4555                 goto out_mem;
4556
4557         spec->pool_name = dup_token(&buf, NULL);
4558         if (!spec->pool_name)
4559                 goto out_mem;
4560         if (!*spec->pool_name) {
4561                 rbd_warn(NULL, "no pool name provided");
4562                 goto out_err;
4563         }
4564
4565         spec->image_name = dup_token(&buf, NULL);
4566         if (!spec->image_name)
4567                 goto out_mem;
4568         if (!*spec->image_name) {
4569                 rbd_warn(NULL, "no image name provided");
4570                 goto out_err;
4571         }
4572
4573         /*
4574          * Snapshot name is optional; default is to use "-"
4575          * (indicating the head/no snapshot).
4576          */
4577         len = next_token(&buf);
4578         if (!len) {
4579                 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4580                 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4581         } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4582                 ret = -ENAMETOOLONG;
4583                 goto out_err;
4584         }
4585         snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4586         if (!snap_name)
4587                 goto out_mem;
4588         *(snap_name + len) = '\0';
4589         spec->snap_name = snap_name;
4590
4591         /* Initialize all rbd options to the defaults */
4592
4593         rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4594         if (!rbd_opts)
4595                 goto out_mem;
4596
4597         rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4598
4599         copts = ceph_parse_options(options, mon_addrs,
4600                                         mon_addrs + mon_addrs_size - 1,
4601                                         parse_rbd_opts_token, rbd_opts);
4602         if (IS_ERR(copts)) {
4603                 ret = PTR_ERR(copts);
4604                 goto out_err;
4605         }
4606         kfree(options);
4607
4608         *ceph_opts = copts;
4609         *opts = rbd_opts;
4610         *rbd_spec = spec;
4611
4612         return 0;
4613 out_mem:
4614         ret = -ENOMEM;
4615 out_err:
4616         kfree(rbd_opts);
4617         rbd_spec_put(spec);
4618         kfree(options);
4619
4620         return ret;
4621 }
4622
4623 /*
4624  * An rbd format 2 image has a unique identifier, distinct from the
4625  * name given to it by the user.  Internally, that identifier is
4626  * what's used to specify the names of objects related to the image.
4627  *
4628  * A special "rbd id" object is used to map an rbd image name to its
4629  * id.  If that object doesn't exist, then there is no v2 rbd image
4630  * with the supplied name.
4631  *
4632  * This function will record the given rbd_dev's image_id field if
4633  * it can be determined, and in that case will return 0.  If any
4634  * errors occur a negative errno will be returned and the rbd_dev's
4635  * image_id field will be unchanged (and should be NULL).
4636  */
4637 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4638 {
4639         int ret;
4640         size_t size;
4641         char *object_name;
4642         void *response;
4643         char *image_id;
4644
4645         /*
4646          * When probing a parent image, the image id is already
4647          * known (and the image name likely is not).  There's no
4648          * need to fetch the image id again in this case.  We
4649          * do still need to set the image format though.
4650          */
4651         if (rbd_dev->spec->image_id) {
4652                 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4653
4654                 return 0;
4655         }
4656
4657         /*
4658          * First, see if the format 2 image id file exists, and if
4659          * so, get the image's persistent id from it.
4660          */
4661         size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4662         object_name = kmalloc(size, GFP_NOIO);
4663         if (!object_name)
4664                 return -ENOMEM;
4665         sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4666         dout("rbd id object name is %s\n", object_name);
4667
4668         /* Response will be an encoded string, which includes a length */
4669
4670         size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4671         response = kzalloc(size, GFP_NOIO);
4672         if (!response) {
4673                 ret = -ENOMEM;
4674                 goto out;
4675         }
4676
4677         /* If it doesn't exist we'll assume it's a format 1 image */
4678
4679         ret = rbd_obj_method_sync(rbd_dev, object_name,
4680                                 "rbd", "get_id", NULL, 0,
4681                                 response, RBD_IMAGE_ID_LEN_MAX);
4682         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4683         if (ret == -ENOENT) {
4684                 image_id = kstrdup("", GFP_KERNEL);
4685                 ret = image_id ? 0 : -ENOMEM;
4686                 if (!ret)
4687                         rbd_dev->image_format = 1;
4688         } else if (ret > sizeof (__le32)) {
4689                 void *p = response;
4690
4691                 image_id = ceph_extract_encoded_string(&p, p + ret,
4692                                                 NULL, GFP_NOIO);
4693                 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4694                 if (!ret)
4695                         rbd_dev->image_format = 2;
4696         } else {
4697                 ret = -EINVAL;
4698         }
4699
4700         if (!ret) {
4701                 rbd_dev->spec->image_id = image_id;
4702                 dout("image_id is %s\n", image_id);
4703         }
4704 out:
4705         kfree(response);
4706         kfree(object_name);
4707
4708         return ret;
4709 }
4710
4711 /*
4712  * Undo whatever state changes are made by v1 or v2 header info
4713  * call.
4714  */
4715 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4716 {
4717         struct rbd_image_header *header;
4718
4719         /* Drop parent reference unless it's already been done (or none) */
4720
4721         if (rbd_dev->parent_overlap)
4722                 rbd_dev_parent_put(rbd_dev);
4723
4724         /* Free dynamic fields from the header, then zero it out */
4725
4726         header = &rbd_dev->header;
4727         ceph_put_snap_context(header->snapc);
4728         kfree(header->snap_sizes);
4729         kfree(header->snap_names);
4730         kfree(header->object_prefix);
4731         memset(header, 0, sizeof (*header));
4732 }
4733
4734 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4735 {
4736         int ret;
4737
4738         ret = rbd_dev_v2_object_prefix(rbd_dev);
4739         if (ret)
4740                 goto out_err;
4741
4742         /*
4743          * Get the and check features for the image.  Currently the
4744          * features are assumed to never change.
4745          */
4746         ret = rbd_dev_v2_features(rbd_dev);
4747         if (ret)
4748                 goto out_err;
4749
4750         /* If the image supports fancy striping, get its parameters */
4751
4752         if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4753                 ret = rbd_dev_v2_striping_info(rbd_dev);
4754                 if (ret < 0)
4755                         goto out_err;
4756         }
4757         /* No support for crypto and compression type format 2 images */
4758
4759         return 0;
4760 out_err:
4761         rbd_dev->header.features = 0;
4762         kfree(rbd_dev->header.object_prefix);
4763         rbd_dev->header.object_prefix = NULL;
4764
4765         return ret;
4766 }
4767
4768 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4769 {
4770         struct rbd_device *parent = NULL;
4771         struct rbd_spec *parent_spec;
4772         struct rbd_client *rbdc;
4773         int ret;
4774
4775         if (!rbd_dev->parent_spec)
4776                 return 0;
4777         /*
4778          * We need to pass a reference to the client and the parent
4779          * spec when creating the parent rbd_dev.  Images related by
4780          * parent/child relationships always share both.
4781          */
4782         parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4783         rbdc = __rbd_get_client(rbd_dev->rbd_client);
4784
4785         ret = -ENOMEM;
4786         parent = rbd_dev_create(rbdc, parent_spec);
4787         if (!parent)
4788                 goto out_err;
4789
4790         ret = rbd_dev_image_probe(parent, false);
4791         if (ret < 0)
4792                 goto out_err;
4793         rbd_dev->parent = parent;
4794         atomic_set(&rbd_dev->parent_ref, 1);
4795
4796         return 0;
4797 out_err:
4798         if (parent) {
4799                 rbd_dev_unparent(rbd_dev);
4800                 kfree(rbd_dev->header_name);
4801                 rbd_dev_destroy(parent);
4802         } else {
4803                 rbd_put_client(rbdc);
4804                 rbd_spec_put(parent_spec);
4805         }
4806
4807         return ret;
4808 }
4809
4810 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4811 {
4812         int ret;
4813
4814         /* generate unique id: find highest unique id, add one */
4815         rbd_dev_id_get(rbd_dev);
4816
4817         /* Fill in the device name, now that we have its id. */
4818         BUILD_BUG_ON(DEV_NAME_LEN
4819                         < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4820         sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4821
4822         /* Get our block major device number. */
4823
4824         ret = register_blkdev(0, rbd_dev->name);
4825         if (ret < 0)
4826                 goto err_out_id;
4827         rbd_dev->major = ret;
4828
4829         /* Set up the blkdev mapping. */
4830
4831         ret = rbd_init_disk(rbd_dev);
4832         if (ret)
4833                 goto err_out_blkdev;
4834
4835         ret = rbd_dev_mapping_set(rbd_dev);
4836         if (ret)
4837                 goto err_out_disk;
4838         set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4839
4840         ret = rbd_bus_add_dev(rbd_dev);
4841         if (ret)
4842                 goto err_out_mapping;
4843
4844         /* Everything's ready.  Announce the disk to the world. */
4845
4846         set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4847         add_disk(rbd_dev->disk);
4848
4849         pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4850                 (unsigned long long) rbd_dev->mapping.size);
4851
4852         return ret;
4853
4854 err_out_mapping:
4855         rbd_dev_mapping_clear(rbd_dev);
4856 err_out_disk:
4857         rbd_free_disk(rbd_dev);
4858 err_out_blkdev:
4859         unregister_blkdev(rbd_dev->major, rbd_dev->name);
4860 err_out_id:
4861         rbd_dev_id_put(rbd_dev);
4862         rbd_dev_mapping_clear(rbd_dev);
4863
4864         return ret;
4865 }
4866
4867 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4868 {
4869         struct rbd_spec *spec = rbd_dev->spec;
4870         size_t size;
4871
4872         /* Record the header object name for this rbd image. */
4873
4874         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4875
4876         if (rbd_dev->image_format == 1)
4877                 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4878         else
4879                 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4880
4881         rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4882         if (!rbd_dev->header_name)
4883                 return -ENOMEM;
4884
4885         if (rbd_dev->image_format == 1)
4886                 sprintf(rbd_dev->header_name, "%s%s",
4887                         spec->image_name, RBD_SUFFIX);
4888         else
4889                 sprintf(rbd_dev->header_name, "%s%s",
4890                         RBD_HEADER_PREFIX, spec->image_id);
4891         return 0;
4892 }
4893
4894 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4895 {
4896         rbd_dev_unprobe(rbd_dev);
4897         kfree(rbd_dev->header_name);
4898         rbd_dev->header_name = NULL;
4899         rbd_dev->image_format = 0;
4900         kfree(rbd_dev->spec->image_id);
4901         rbd_dev->spec->image_id = NULL;
4902
4903         rbd_dev_destroy(rbd_dev);
4904 }
4905
4906 /*
4907  * Probe for the existence of the header object for the given rbd
4908  * device.  If this image is the one being mapped (i.e., not a
4909  * parent), initiate a watch on its header object before using that
4910  * object to get detailed information about the rbd image.
4911  */
4912 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4913 {
4914         int ret;
4915         int tmp;
4916
4917         /*
4918          * Get the id from the image id object.  Unless there's an
4919          * error, rbd_dev->spec->image_id will be filled in with
4920          * a dynamically-allocated string, and rbd_dev->image_format
4921          * will be set to either 1 or 2.
4922          */
4923         ret = rbd_dev_image_id(rbd_dev);
4924         if (ret)
4925                 return ret;
4926         rbd_assert(rbd_dev->spec->image_id);
4927         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4928
4929         ret = rbd_dev_header_name(rbd_dev);
4930         if (ret)
4931                 goto err_out_format;
4932
4933         if (mapping) {
4934                 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4935                 if (ret)
4936                         goto out_header_name;
4937         }
4938
4939         if (rbd_dev->image_format == 1)
4940                 ret = rbd_dev_v1_header_info(rbd_dev);
4941         else
4942                 ret = rbd_dev_v2_header_info(rbd_dev);
4943         if (ret)
4944                 goto err_out_watch;
4945
4946         ret = rbd_dev_spec_update(rbd_dev);
4947         if (ret)
4948                 goto err_out_probe;
4949
4950         ret = rbd_dev_probe_parent(rbd_dev);
4951         if (ret)
4952                 goto err_out_probe;
4953
4954         dout("discovered format %u image, header name is %s\n",
4955                 rbd_dev->image_format, rbd_dev->header_name);
4956
4957         return 0;
4958 err_out_probe:
4959         rbd_dev_unprobe(rbd_dev);
4960 err_out_watch:
4961         if (mapping) {
4962                 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4963                 if (tmp)
4964                         rbd_warn(rbd_dev, "unable to tear down "
4965                                         "watch request (%d)\n", tmp);
4966         }
4967 out_header_name:
4968         kfree(rbd_dev->header_name);
4969         rbd_dev->header_name = NULL;
4970 err_out_format:
4971         rbd_dev->image_format = 0;
4972         kfree(rbd_dev->spec->image_id);
4973         rbd_dev->spec->image_id = NULL;
4974
4975         dout("probe failed, returning %d\n", ret);
4976
4977         return ret;
4978 }
4979
4980 static ssize_t rbd_add(struct bus_type *bus,
4981                        const char *buf,
4982                        size_t count)
4983 {
4984         struct rbd_device *rbd_dev = NULL;
4985         struct ceph_options *ceph_opts = NULL;
4986         struct rbd_options *rbd_opts = NULL;
4987         struct rbd_spec *spec = NULL;
4988         struct rbd_client *rbdc;
4989         struct ceph_osd_client *osdc;
4990         bool read_only;
4991         int rc = -ENOMEM;
4992
4993         if (!try_module_get(THIS_MODULE))
4994                 return -ENODEV;
4995
4996         /* parse add command */
4997         rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4998         if (rc < 0)
4999                 goto err_out_module;
5000         read_only = rbd_opts->read_only;
5001         kfree(rbd_opts);
5002         rbd_opts = NULL;        /* done with this */
5003
5004         rbdc = rbd_get_client(ceph_opts);
5005         if (IS_ERR(rbdc)) {
5006                 rc = PTR_ERR(rbdc);
5007                 goto err_out_args;
5008         }
5009
5010         /* pick the pool */
5011         osdc = &rbdc->client->osdc;
5012         rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5013         if (rc < 0)
5014                 goto err_out_client;
5015         spec->pool_id = (u64)rc;
5016
5017         /* The ceph file layout needs to fit pool id in 32 bits */
5018
5019         if (spec->pool_id > (u64)U32_MAX) {
5020                 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5021                                 (unsigned long long)spec->pool_id, U32_MAX);
5022                 rc = -EIO;
5023                 goto err_out_client;
5024         }
5025
5026         rbd_dev = rbd_dev_create(rbdc, spec);
5027         if (!rbd_dev)
5028                 goto err_out_client;
5029         rbdc = NULL;            /* rbd_dev now owns this */
5030         spec = NULL;            /* rbd_dev now owns this */
5031
5032         rc = rbd_dev_image_probe(rbd_dev, true);
5033         if (rc < 0)
5034                 goto err_out_rbd_dev;
5035
5036         /* If we are mapping a snapshot it must be marked read-only */
5037
5038         if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5039                 read_only = true;
5040         rbd_dev->mapping.read_only = read_only;
5041
5042         rc = rbd_dev_device_setup(rbd_dev);
5043         if (rc) {
5044                 rbd_dev_image_release(rbd_dev);
5045                 goto err_out_module;
5046         }
5047
5048         return count;
5049
5050 err_out_rbd_dev:
5051         rbd_dev_destroy(rbd_dev);
5052 err_out_client:
5053         rbd_put_client(rbdc);
5054 err_out_args:
5055         rbd_spec_put(spec);
5056 err_out_module:
5057         module_put(THIS_MODULE);
5058
5059         dout("Error adding device %s\n", buf);
5060
5061         return (ssize_t)rc;
5062 }
5063
5064 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5065 {
5066         struct list_head *tmp;
5067         struct rbd_device *rbd_dev;
5068
5069         spin_lock(&rbd_dev_list_lock);
5070         list_for_each(tmp, &rbd_dev_list) {
5071                 rbd_dev = list_entry(tmp, struct rbd_device, node);
5072                 if (rbd_dev->dev_id == dev_id) {
5073                         spin_unlock(&rbd_dev_list_lock);
5074                         return rbd_dev;
5075                 }
5076         }
5077         spin_unlock(&rbd_dev_list_lock);
5078         return NULL;
5079 }
5080
5081 static void rbd_dev_device_release(struct device *dev)
5082 {
5083         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5084
5085         rbd_free_disk(rbd_dev);
5086         clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5087         rbd_dev_mapping_clear(rbd_dev);
5088         unregister_blkdev(rbd_dev->major, rbd_dev->name);
5089         rbd_dev->major = 0;
5090         rbd_dev_id_put(rbd_dev);
5091         rbd_dev_mapping_clear(rbd_dev);
5092 }
5093
5094 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5095 {
5096         while (rbd_dev->parent) {
5097                 struct rbd_device *first = rbd_dev;
5098                 struct rbd_device *second = first->parent;
5099                 struct rbd_device *third;
5100
5101                 /*
5102                  * Follow to the parent with no grandparent and
5103                  * remove it.
5104                  */
5105                 while (second && (third = second->parent)) {
5106                         first = second;
5107                         second = third;
5108                 }
5109                 rbd_assert(second);
5110                 rbd_dev_image_release(second);
5111                 first->parent = NULL;
5112                 first->parent_overlap = 0;
5113
5114                 rbd_assert(first->parent_spec);
5115                 rbd_spec_put(first->parent_spec);
5116                 first->parent_spec = NULL;
5117         }
5118 }
5119
5120 static ssize_t rbd_remove(struct bus_type *bus,
5121                           const char *buf,
5122                           size_t count)
5123 {
5124         struct rbd_device *rbd_dev = NULL;
5125         int target_id;
5126         unsigned long ul;
5127         int ret;
5128
5129         ret = strict_strtoul(buf, 10, &ul);
5130         if (ret)
5131                 return ret;
5132
5133         /* convert to int; abort if we lost anything in the conversion */
5134         target_id = (int) ul;
5135         if (target_id != ul)
5136                 return -EINVAL;
5137
5138         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5139
5140         rbd_dev = __rbd_get_dev(target_id);
5141         if (!rbd_dev) {
5142                 ret = -ENOENT;
5143                 goto done;
5144         }
5145
5146         spin_lock_irq(&rbd_dev->lock);
5147         if (rbd_dev->open_count)
5148                 ret = -EBUSY;
5149         else
5150                 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5151         spin_unlock_irq(&rbd_dev->lock);
5152         if (ret < 0)
5153                 goto done;
5154         rbd_bus_del_dev(rbd_dev);
5155         ret = rbd_dev_header_watch_sync(rbd_dev, false);
5156         if (ret)
5157                 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5158         rbd_dev_image_release(rbd_dev);
5159         module_put(THIS_MODULE);
5160         ret = count;
5161 done:
5162         mutex_unlock(&ctl_mutex);
5163
5164         return ret;
5165 }
5166
5167 /*
5168  * create control files in sysfs
5169  * /sys/bus/rbd/...
5170  */
5171 static int rbd_sysfs_init(void)
5172 {
5173         int ret;
5174
5175         ret = device_register(&rbd_root_dev);
5176         if (ret < 0)
5177                 return ret;
5178
5179         ret = bus_register(&rbd_bus_type);
5180         if (ret < 0)
5181                 device_unregister(&rbd_root_dev);
5182
5183         return ret;
5184 }
5185
5186 static void rbd_sysfs_cleanup(void)
5187 {
5188         bus_unregister(&rbd_bus_type);
5189         device_unregister(&rbd_root_dev);
5190 }
5191
5192 static int rbd_slab_init(void)
5193 {
5194         rbd_assert(!rbd_img_request_cache);
5195         rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5196                                         sizeof (struct rbd_img_request),
5197                                         __alignof__(struct rbd_img_request),
5198                                         0, NULL);
5199         if (!rbd_img_request_cache)
5200                 return -ENOMEM;
5201
5202         rbd_assert(!rbd_obj_request_cache);
5203         rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5204                                         sizeof (struct rbd_obj_request),
5205                                         __alignof__(struct rbd_obj_request),
5206                                         0, NULL);
5207         if (!rbd_obj_request_cache)
5208                 goto out_err;
5209
5210         rbd_assert(!rbd_segment_name_cache);
5211         rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5212                                         MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5213         if (rbd_segment_name_cache)
5214                 return 0;
5215 out_err:
5216         if (rbd_obj_request_cache) {
5217                 kmem_cache_destroy(rbd_obj_request_cache);
5218                 rbd_obj_request_cache = NULL;
5219         }
5220
5221         kmem_cache_destroy(rbd_img_request_cache);
5222         rbd_img_request_cache = NULL;
5223
5224         return -ENOMEM;
5225 }
5226
5227 static void rbd_slab_exit(void)
5228 {
5229         rbd_assert(rbd_segment_name_cache);
5230         kmem_cache_destroy(rbd_segment_name_cache);
5231         rbd_segment_name_cache = NULL;
5232
5233         rbd_assert(rbd_obj_request_cache);
5234         kmem_cache_destroy(rbd_obj_request_cache);
5235         rbd_obj_request_cache = NULL;
5236
5237         rbd_assert(rbd_img_request_cache);
5238         kmem_cache_destroy(rbd_img_request_cache);
5239         rbd_img_request_cache = NULL;
5240 }
5241
5242 static int __init rbd_init(void)
5243 {
5244         int rc;
5245
5246         if (!libceph_compatible(NULL)) {
5247                 rbd_warn(NULL, "libceph incompatibility (quitting)");
5248
5249                 return -EINVAL;
5250         }
5251         rc = rbd_slab_init();
5252         if (rc)
5253                 return rc;
5254         rc = rbd_sysfs_init();
5255         if (rc)
5256                 rbd_slab_exit();
5257         else
5258                 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5259
5260         return rc;
5261 }
5262
5263 static void __exit rbd_exit(void)
5264 {
5265         rbd_sysfs_cleanup();
5266         rbd_slab_exit();
5267 }
5268
5269 module_init(rbd_init);
5270 module_exit(rbd_exit);
5271
5272 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5273 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5274 MODULE_DESCRIPTION("rados block device");
5275
5276 /* following authorship retained from original osdblk.c */
5277 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5278
5279 MODULE_LICENSE("GPL");