xen-blkfront: Handle NULL gendisk
[platform/kernel/linux-rpi.git] / drivers / block / xen-blkfront.c
1 /*
2  * blkfront.c
3  *
4  * XenLinux virtual block device driver.
5  *
6  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
7  * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
8  * Copyright (c) 2004, Christian Limpach
9  * Copyright (c) 2004, Andrew Warfield
10  * Copyright (c) 2005, Christopher Clark
11  * Copyright (c) 2005, XenSource Ltd
12  *
13  * This program is free software; you can redistribute it and/or
14  * modify it under the terms of the GNU General Public License version 2
15  * as published by the Free Software Foundation; or, when distributed
16  * separately from the Linux kernel or incorporated into other
17  * software packages, subject to the following license:
18  *
19  * Permission is hereby granted, free of charge, to any person obtaining a copy
20  * of this source file (the "Software"), to deal in the Software without
21  * restriction, including without limitation the rights to use, copy, modify,
22  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23  * and to permit persons to whom the Software is furnished to do so, subject to
24  * the following conditions:
25  *
26  * The above copyright notice and this permission notice shall be included in
27  * all copies or substantial portions of the Software.
28  *
29  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35  * IN THE SOFTWARE.
36  */
37
38 #include <linux/interrupt.h>
39 #include <linux/blkdev.h>
40 #include <linux/blk-mq.h>
41 #include <linux/hdreg.h>
42 #include <linux/cdrom.h>
43 #include <linux/module.h>
44 #include <linux/slab.h>
45 #include <linux/major.h>
46 #include <linux/mutex.h>
47 #include <linux/scatterlist.h>
48 #include <linux/bitmap.h>
49 #include <linux/list.h>
50 #include <linux/workqueue.h>
51 #include <linux/sched/mm.h>
52
53 #include <xen/xen.h>
54 #include <xen/xenbus.h>
55 #include <xen/grant_table.h>
56 #include <xen/events.h>
57 #include <xen/page.h>
58 #include <xen/platform_pci.h>
59
60 #include <xen/interface/grant_table.h>
61 #include <xen/interface/io/blkif.h>
62 #include <xen/interface/io/protocols.h>
63
64 #include <asm/xen/hypervisor.h>
65
66 /*
67  * The minimal size of segment supported by the block framework is PAGE_SIZE.
68  * When Linux is using a different page size than Xen, it may not be possible
69  * to put all the data in a single segment.
70  * This can happen when the backend doesn't support indirect descriptor and
71  * therefore the maximum amount of data that a request can carry is
72  * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE = 44KB
73  *
74  * Note that we only support one extra request. So the Linux page size
75  * should be <= ( 2 * BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) =
76  * 88KB.
77  */
78 #define HAS_EXTRA_REQ (BLKIF_MAX_SEGMENTS_PER_REQUEST < XEN_PFN_PER_PAGE)
79
80 enum blkif_state {
81         BLKIF_STATE_DISCONNECTED,
82         BLKIF_STATE_CONNECTED,
83         BLKIF_STATE_SUSPENDED,
84         BLKIF_STATE_ERROR,
85 };
86
87 struct grant {
88         grant_ref_t gref;
89         struct page *page;
90         struct list_head node;
91 };
92
93 enum blk_req_status {
94         REQ_PROCESSING,
95         REQ_WAITING,
96         REQ_DONE,
97         REQ_ERROR,
98         REQ_EOPNOTSUPP,
99 };
100
101 struct blk_shadow {
102         struct blkif_request req;
103         struct request *request;
104         struct grant **grants_used;
105         struct grant **indirect_grants;
106         struct scatterlist *sg;
107         unsigned int num_sg;
108         enum blk_req_status status;
109
110         #define NO_ASSOCIATED_ID ~0UL
111         /*
112          * Id of the sibling if we ever need 2 requests when handling a
113          * block I/O request
114          */
115         unsigned long associated_id;
116 };
117
118 struct blkif_req {
119         blk_status_t    error;
120 };
121
122 static inline struct blkif_req *blkif_req(struct request *rq)
123 {
124         return blk_mq_rq_to_pdu(rq);
125 }
126
127 static DEFINE_MUTEX(blkfront_mutex);
128 static const struct block_device_operations xlvbd_block_fops;
129 static struct delayed_work blkfront_work;
130 static LIST_HEAD(info_list);
131
132 /*
133  * Maximum number of segments in indirect requests, the actual value used by
134  * the frontend driver is the minimum of this value and the value provided
135  * by the backend driver.
136  */
137
138 static unsigned int xen_blkif_max_segments = 32;
139 module_param_named(max_indirect_segments, xen_blkif_max_segments, uint, 0444);
140 MODULE_PARM_DESC(max_indirect_segments,
141                  "Maximum amount of segments in indirect requests (default is 32)");
142
143 static unsigned int xen_blkif_max_queues = 4;
144 module_param_named(max_queues, xen_blkif_max_queues, uint, 0444);
145 MODULE_PARM_DESC(max_queues, "Maximum number of hardware queues/rings used per virtual disk");
146
147 /*
148  * Maximum order of pages to be used for the shared ring between front and
149  * backend, 4KB page granularity is used.
150  */
151 static unsigned int xen_blkif_max_ring_order;
152 module_param_named(max_ring_page_order, xen_blkif_max_ring_order, int, 0444);
153 MODULE_PARM_DESC(max_ring_page_order, "Maximum order of pages to be used for the shared ring");
154
155 #define BLK_RING_SIZE(info)     \
156         __CONST_RING_SIZE(blkif, XEN_PAGE_SIZE * (info)->nr_ring_pages)
157
158 /*
159  * ring-ref%u i=(-1UL) would take 11 characters + 'ring-ref' is 8, so 19
160  * characters are enough. Define to 20 to keep consistent with backend.
161  */
162 #define RINGREF_NAME_LEN (20)
163 /*
164  * queue-%u would take 7 + 10(UINT_MAX) = 17 characters.
165  */
166 #define QUEUE_NAME_LEN (17)
167
168 /*
169  *  Per-ring info.
170  *  Every blkfront device can associate with one or more blkfront_ring_info,
171  *  depending on how many hardware queues/rings to be used.
172  */
173 struct blkfront_ring_info {
174         /* Lock to protect data in every ring buffer. */
175         spinlock_t ring_lock;
176         struct blkif_front_ring ring;
177         unsigned int ring_ref[XENBUS_MAX_RING_GRANTS];
178         unsigned int evtchn, irq;
179         struct work_struct work;
180         struct gnttab_free_callback callback;
181         struct list_head indirect_pages;
182         struct list_head grants;
183         unsigned int persistent_gnts_c;
184         unsigned long shadow_free;
185         struct blkfront_info *dev_info;
186         struct blk_shadow shadow[];
187 };
188
189 /*
190  * We have one of these per vbd, whether ide, scsi or 'other'.  They
191  * hang in private_data off the gendisk structure. We may end up
192  * putting all kinds of interesting stuff here :-)
193  */
194 struct blkfront_info
195 {
196         struct mutex mutex;
197         struct xenbus_device *xbdev;
198         struct gendisk *gd;
199         u16 sector_size;
200         unsigned int physical_sector_size;
201         int vdevice;
202         blkif_vdev_t handle;
203         enum blkif_state connected;
204         /* Number of pages per ring buffer. */
205         unsigned int nr_ring_pages;
206         struct request_queue *rq;
207         unsigned int feature_flush:1;
208         unsigned int feature_fua:1;
209         unsigned int feature_discard:1;
210         unsigned int feature_secdiscard:1;
211         unsigned int feature_persistent:1;
212         unsigned int discard_granularity;
213         unsigned int discard_alignment;
214         /* Number of 4KB segments handled */
215         unsigned int max_indirect_segments;
216         int is_ready;
217         struct blk_mq_tag_set tag_set;
218         struct blkfront_ring_info *rinfo;
219         unsigned int nr_rings;
220         unsigned int rinfo_size;
221         /* Save uncomplete reqs and bios for migration. */
222         struct list_head requests;
223         struct bio_list bio_list;
224         struct list_head info_list;
225 };
226
227 static unsigned int nr_minors;
228 static unsigned long *minors;
229 static DEFINE_SPINLOCK(minor_lock);
230
231 #define GRANT_INVALID_REF       0
232
233 #define PARTS_PER_DISK          16
234 #define PARTS_PER_EXT_DISK      256
235
236 #define BLKIF_MAJOR(dev) ((dev)>>8)
237 #define BLKIF_MINOR(dev) ((dev) & 0xff)
238
239 #define EXT_SHIFT 28
240 #define EXTENDED (1<<EXT_SHIFT)
241 #define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
242 #define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
243 #define EMULATED_HD_DISK_MINOR_OFFSET (0)
244 #define EMULATED_HD_DISK_NAME_OFFSET (EMULATED_HD_DISK_MINOR_OFFSET / 256)
245 #define EMULATED_SD_DISK_MINOR_OFFSET (0)
246 #define EMULATED_SD_DISK_NAME_OFFSET (EMULATED_SD_DISK_MINOR_OFFSET / 256)
247
248 #define DEV_NAME        "xvd"   /* name in /dev */
249
250 /*
251  * Grants are always the same size as a Xen page (i.e 4KB).
252  * A physical segment is always the same size as a Linux page.
253  * Number of grants per physical segment
254  */
255 #define GRANTS_PER_PSEG (PAGE_SIZE / XEN_PAGE_SIZE)
256
257 #define GRANTS_PER_INDIRECT_FRAME \
258         (XEN_PAGE_SIZE / sizeof(struct blkif_request_segment))
259
260 #define INDIRECT_GREFS(_grants)         \
261         DIV_ROUND_UP(_grants, GRANTS_PER_INDIRECT_FRAME)
262
263 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
264 static void blkfront_gather_backend_features(struct blkfront_info *info);
265 static int negotiate_mq(struct blkfront_info *info);
266
267 #define for_each_rinfo(info, ptr, idx)                          \
268         for ((ptr) = (info)->rinfo, (idx) = 0;                  \
269              (idx) < (info)->nr_rings;                          \
270              (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
271
272 static inline struct blkfront_ring_info *
273 get_rinfo(const struct blkfront_info *info, unsigned int i)
274 {
275         BUG_ON(i >= info->nr_rings);
276         return (void *)info->rinfo + i * info->rinfo_size;
277 }
278
279 static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
280 {
281         unsigned long free = rinfo->shadow_free;
282
283         BUG_ON(free >= BLK_RING_SIZE(rinfo->dev_info));
284         rinfo->shadow_free = rinfo->shadow[free].req.u.rw.id;
285         rinfo->shadow[free].req.u.rw.id = 0x0fffffee; /* debug */
286         return free;
287 }
288
289 static int add_id_to_freelist(struct blkfront_ring_info *rinfo,
290                               unsigned long id)
291 {
292         if (rinfo->shadow[id].req.u.rw.id != id)
293                 return -EINVAL;
294         if (rinfo->shadow[id].request == NULL)
295                 return -EINVAL;
296         rinfo->shadow[id].req.u.rw.id  = rinfo->shadow_free;
297         rinfo->shadow[id].request = NULL;
298         rinfo->shadow_free = id;
299         return 0;
300 }
301
302 static int fill_grant_buffer(struct blkfront_ring_info *rinfo, int num)
303 {
304         struct blkfront_info *info = rinfo->dev_info;
305         struct page *granted_page;
306         struct grant *gnt_list_entry, *n;
307         int i = 0;
308
309         while (i < num) {
310                 gnt_list_entry = kzalloc(sizeof(struct grant), GFP_NOIO);
311                 if (!gnt_list_entry)
312                         goto out_of_memory;
313
314                 if (info->feature_persistent) {
315                         granted_page = alloc_page(GFP_NOIO);
316                         if (!granted_page) {
317                                 kfree(gnt_list_entry);
318                                 goto out_of_memory;
319                         }
320                         gnt_list_entry->page = granted_page;
321                 }
322
323                 gnt_list_entry->gref = GRANT_INVALID_REF;
324                 list_add(&gnt_list_entry->node, &rinfo->grants);
325                 i++;
326         }
327
328         return 0;
329
330 out_of_memory:
331         list_for_each_entry_safe(gnt_list_entry, n,
332                                  &rinfo->grants, node) {
333                 list_del(&gnt_list_entry->node);
334                 if (info->feature_persistent)
335                         __free_page(gnt_list_entry->page);
336                 kfree(gnt_list_entry);
337                 i--;
338         }
339         BUG_ON(i != 0);
340         return -ENOMEM;
341 }
342
343 static struct grant *get_free_grant(struct blkfront_ring_info *rinfo)
344 {
345         struct grant *gnt_list_entry;
346
347         BUG_ON(list_empty(&rinfo->grants));
348         gnt_list_entry = list_first_entry(&rinfo->grants, struct grant,
349                                           node);
350         list_del(&gnt_list_entry->node);
351
352         if (gnt_list_entry->gref != GRANT_INVALID_REF)
353                 rinfo->persistent_gnts_c--;
354
355         return gnt_list_entry;
356 }
357
358 static inline void grant_foreign_access(const struct grant *gnt_list_entry,
359                                         const struct blkfront_info *info)
360 {
361         gnttab_page_grant_foreign_access_ref_one(gnt_list_entry->gref,
362                                                  info->xbdev->otherend_id,
363                                                  gnt_list_entry->page,
364                                                  0);
365 }
366
367 static struct grant *get_grant(grant_ref_t *gref_head,
368                                unsigned long gfn,
369                                struct blkfront_ring_info *rinfo)
370 {
371         struct grant *gnt_list_entry = get_free_grant(rinfo);
372         struct blkfront_info *info = rinfo->dev_info;
373
374         if (gnt_list_entry->gref != GRANT_INVALID_REF)
375                 return gnt_list_entry;
376
377         /* Assign a gref to this page */
378         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
379         BUG_ON(gnt_list_entry->gref == -ENOSPC);
380         if (info->feature_persistent)
381                 grant_foreign_access(gnt_list_entry, info);
382         else {
383                 /* Grant access to the GFN passed by the caller */
384                 gnttab_grant_foreign_access_ref(gnt_list_entry->gref,
385                                                 info->xbdev->otherend_id,
386                                                 gfn, 0);
387         }
388
389         return gnt_list_entry;
390 }
391
392 static struct grant *get_indirect_grant(grant_ref_t *gref_head,
393                                         struct blkfront_ring_info *rinfo)
394 {
395         struct grant *gnt_list_entry = get_free_grant(rinfo);
396         struct blkfront_info *info = rinfo->dev_info;
397
398         if (gnt_list_entry->gref != GRANT_INVALID_REF)
399                 return gnt_list_entry;
400
401         /* Assign a gref to this page */
402         gnt_list_entry->gref = gnttab_claim_grant_reference(gref_head);
403         BUG_ON(gnt_list_entry->gref == -ENOSPC);
404         if (!info->feature_persistent) {
405                 struct page *indirect_page;
406
407                 /* Fetch a pre-allocated page to use for indirect grefs */
408                 BUG_ON(list_empty(&rinfo->indirect_pages));
409                 indirect_page = list_first_entry(&rinfo->indirect_pages,
410                                                  struct page, lru);
411                 list_del(&indirect_page->lru);
412                 gnt_list_entry->page = indirect_page;
413         }
414         grant_foreign_access(gnt_list_entry, info);
415
416         return gnt_list_entry;
417 }
418
419 static const char *op_name(int op)
420 {
421         static const char *const names[] = {
422                 [BLKIF_OP_READ] = "read",
423                 [BLKIF_OP_WRITE] = "write",
424                 [BLKIF_OP_WRITE_BARRIER] = "barrier",
425                 [BLKIF_OP_FLUSH_DISKCACHE] = "flush",
426                 [BLKIF_OP_DISCARD] = "discard" };
427
428         if (op < 0 || op >= ARRAY_SIZE(names))
429                 return "unknown";
430
431         if (!names[op])
432                 return "reserved";
433
434         return names[op];
435 }
436 static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
437 {
438         unsigned int end = minor + nr;
439         int rc;
440
441         if (end > nr_minors) {
442                 unsigned long *bitmap, *old;
443
444                 bitmap = kcalloc(BITS_TO_LONGS(end), sizeof(*bitmap),
445                                  GFP_KERNEL);
446                 if (bitmap == NULL)
447                         return -ENOMEM;
448
449                 spin_lock(&minor_lock);
450                 if (end > nr_minors) {
451                         old = minors;
452                         memcpy(bitmap, minors,
453                                BITS_TO_LONGS(nr_minors) * sizeof(*bitmap));
454                         minors = bitmap;
455                         nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG;
456                 } else
457                         old = bitmap;
458                 spin_unlock(&minor_lock);
459                 kfree(old);
460         }
461
462         spin_lock(&minor_lock);
463         if (find_next_bit(minors, end, minor) >= end) {
464                 bitmap_set(minors, minor, nr);
465                 rc = 0;
466         } else
467                 rc = -EBUSY;
468         spin_unlock(&minor_lock);
469
470         return rc;
471 }
472
473 static void xlbd_release_minors(unsigned int minor, unsigned int nr)
474 {
475         unsigned int end = minor + nr;
476
477         BUG_ON(end > nr_minors);
478         spin_lock(&minor_lock);
479         bitmap_clear(minors,  minor, nr);
480         spin_unlock(&minor_lock);
481 }
482
483 static void blkif_restart_queue_callback(void *arg)
484 {
485         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)arg;
486         schedule_work(&rinfo->work);
487 }
488
489 static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
490 {
491         /* We don't have real geometry info, but let's at least return
492            values consistent with the size of the device */
493         sector_t nsect = get_capacity(bd->bd_disk);
494         sector_t cylinders = nsect;
495
496         hg->heads = 0xff;
497         hg->sectors = 0x3f;
498         sector_div(cylinders, hg->heads * hg->sectors);
499         hg->cylinders = cylinders;
500         if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
501                 hg->cylinders = 0xffff;
502         return 0;
503 }
504
505 static int blkif_ioctl(struct block_device *bdev, fmode_t mode,
506                        unsigned command, unsigned long argument)
507 {
508         int i;
509
510         switch (command) {
511         case CDROMMULTISESSION:
512                 for (i = 0; i < sizeof(struct cdrom_multisession); i++)
513                         if (put_user(0, (char __user *)(argument + i)))
514                                 return -EFAULT;
515                 return 0;
516         case CDROM_GET_CAPABILITY:
517                 if (bdev->bd_disk->flags & GENHD_FL_CD)
518                         return 0;
519                 return -EINVAL;
520         default:
521                 return -EINVAL;
522         }
523 }
524
525 static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo,
526                                             struct request *req,
527                                             struct blkif_request **ring_req)
528 {
529         unsigned long id;
530
531         *ring_req = RING_GET_REQUEST(&rinfo->ring, rinfo->ring.req_prod_pvt);
532         rinfo->ring.req_prod_pvt++;
533
534         id = get_id_from_freelist(rinfo);
535         rinfo->shadow[id].request = req;
536         rinfo->shadow[id].status = REQ_PROCESSING;
537         rinfo->shadow[id].associated_id = NO_ASSOCIATED_ID;
538
539         rinfo->shadow[id].req.u.rw.id = id;
540
541         return id;
542 }
543
544 static int blkif_queue_discard_req(struct request *req, struct blkfront_ring_info *rinfo)
545 {
546         struct blkfront_info *info = rinfo->dev_info;
547         struct blkif_request *ring_req, *final_ring_req;
548         unsigned long id;
549
550         /* Fill out a communications ring structure. */
551         id = blkif_ring_get_request(rinfo, req, &final_ring_req);
552         ring_req = &rinfo->shadow[id].req;
553
554         ring_req->operation = BLKIF_OP_DISCARD;
555         ring_req->u.discard.nr_sectors = blk_rq_sectors(req);
556         ring_req->u.discard.id = id;
557         ring_req->u.discard.sector_number = (blkif_sector_t)blk_rq_pos(req);
558         if (req_op(req) == REQ_OP_SECURE_ERASE && info->feature_secdiscard)
559                 ring_req->u.discard.flag = BLKIF_DISCARD_SECURE;
560         else
561                 ring_req->u.discard.flag = 0;
562
563         /* Copy the request to the ring page. */
564         *final_ring_req = *ring_req;
565         rinfo->shadow[id].status = REQ_WAITING;
566
567         return 0;
568 }
569
570 struct setup_rw_req {
571         unsigned int grant_idx;
572         struct blkif_request_segment *segments;
573         struct blkfront_ring_info *rinfo;
574         struct blkif_request *ring_req;
575         grant_ref_t gref_head;
576         unsigned int id;
577         /* Only used when persistent grant is used and it's a read request */
578         bool need_copy;
579         unsigned int bvec_off;
580         char *bvec_data;
581
582         bool require_extra_req;
583         struct blkif_request *extra_ring_req;
584 };
585
586 static void blkif_setup_rw_req_grant(unsigned long gfn, unsigned int offset,
587                                      unsigned int len, void *data)
588 {
589         struct setup_rw_req *setup = data;
590         int n, ref;
591         struct grant *gnt_list_entry;
592         unsigned int fsect, lsect;
593         /* Convenient aliases */
594         unsigned int grant_idx = setup->grant_idx;
595         struct blkif_request *ring_req = setup->ring_req;
596         struct blkfront_ring_info *rinfo = setup->rinfo;
597         /*
598          * We always use the shadow of the first request to store the list
599          * of grant associated to the block I/O request. This made the
600          * completion more easy to handle even if the block I/O request is
601          * split.
602          */
603         struct blk_shadow *shadow = &rinfo->shadow[setup->id];
604
605         if (unlikely(setup->require_extra_req &&
606                      grant_idx >= BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
607                 /*
608                  * We are using the second request, setup grant_idx
609                  * to be the index of the segment array.
610                  */
611                 grant_idx -= BLKIF_MAX_SEGMENTS_PER_REQUEST;
612                 ring_req = setup->extra_ring_req;
613         }
614
615         if ((ring_req->operation == BLKIF_OP_INDIRECT) &&
616             (grant_idx % GRANTS_PER_INDIRECT_FRAME == 0)) {
617                 if (setup->segments)
618                         kunmap_atomic(setup->segments);
619
620                 n = grant_idx / GRANTS_PER_INDIRECT_FRAME;
621                 gnt_list_entry = get_indirect_grant(&setup->gref_head, rinfo);
622                 shadow->indirect_grants[n] = gnt_list_entry;
623                 setup->segments = kmap_atomic(gnt_list_entry->page);
624                 ring_req->u.indirect.indirect_grefs[n] = gnt_list_entry->gref;
625         }
626
627         gnt_list_entry = get_grant(&setup->gref_head, gfn, rinfo);
628         ref = gnt_list_entry->gref;
629         /*
630          * All the grants are stored in the shadow of the first
631          * request. Therefore we have to use the global index.
632          */
633         shadow->grants_used[setup->grant_idx] = gnt_list_entry;
634
635         if (setup->need_copy) {
636                 void *shared_data;
637
638                 shared_data = kmap_atomic(gnt_list_entry->page);
639                 /*
640                  * this does not wipe data stored outside the
641                  * range sg->offset..sg->offset+sg->length.
642                  * Therefore, blkback *could* see data from
643                  * previous requests. This is OK as long as
644                  * persistent grants are shared with just one
645                  * domain. It may need refactoring if this
646                  * changes
647                  */
648                 memcpy(shared_data + offset,
649                        setup->bvec_data + setup->bvec_off,
650                        len);
651
652                 kunmap_atomic(shared_data);
653                 setup->bvec_off += len;
654         }
655
656         fsect = offset >> 9;
657         lsect = fsect + (len >> 9) - 1;
658         if (ring_req->operation != BLKIF_OP_INDIRECT) {
659                 ring_req->u.rw.seg[grant_idx] =
660                         (struct blkif_request_segment) {
661                                 .gref       = ref,
662                                 .first_sect = fsect,
663                                 .last_sect  = lsect };
664         } else {
665                 setup->segments[grant_idx % GRANTS_PER_INDIRECT_FRAME] =
666                         (struct blkif_request_segment) {
667                                 .gref       = ref,
668                                 .first_sect = fsect,
669                                 .last_sect  = lsect };
670         }
671
672         (setup->grant_idx)++;
673 }
674
675 static void blkif_setup_extra_req(struct blkif_request *first,
676                                   struct blkif_request *second)
677 {
678         uint16_t nr_segments = first->u.rw.nr_segments;
679
680         /*
681          * The second request is only present when the first request uses
682          * all its segments. It's always the continuity of the first one.
683          */
684         first->u.rw.nr_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
685
686         second->u.rw.nr_segments = nr_segments - BLKIF_MAX_SEGMENTS_PER_REQUEST;
687         second->u.rw.sector_number = first->u.rw.sector_number +
688                 (BLKIF_MAX_SEGMENTS_PER_REQUEST * XEN_PAGE_SIZE) / 512;
689
690         second->u.rw.handle = first->u.rw.handle;
691         second->operation = first->operation;
692 }
693
694 static int blkif_queue_rw_req(struct request *req, struct blkfront_ring_info *rinfo)
695 {
696         struct blkfront_info *info = rinfo->dev_info;
697         struct blkif_request *ring_req, *extra_ring_req = NULL;
698         struct blkif_request *final_ring_req, *final_extra_ring_req = NULL;
699         unsigned long id, extra_id = NO_ASSOCIATED_ID;
700         bool require_extra_req = false;
701         int i;
702         struct setup_rw_req setup = {
703                 .grant_idx = 0,
704                 .segments = NULL,
705                 .rinfo = rinfo,
706                 .need_copy = rq_data_dir(req) && info->feature_persistent,
707         };
708
709         /*
710          * Used to store if we are able to queue the request by just using
711          * existing persistent grants, or if we have to get new grants,
712          * as there are not sufficiently many free.
713          */
714         bool new_persistent_gnts = false;
715         struct scatterlist *sg;
716         int num_sg, max_grefs, num_grant;
717
718         max_grefs = req->nr_phys_segments * GRANTS_PER_PSEG;
719         if (max_grefs > BLKIF_MAX_SEGMENTS_PER_REQUEST)
720                 /*
721                  * If we are using indirect segments we need to account
722                  * for the indirect grefs used in the request.
723                  */
724                 max_grefs += INDIRECT_GREFS(max_grefs);
725
726         /* Check if we have enough persistent grants to allocate a requests */
727         if (rinfo->persistent_gnts_c < max_grefs) {
728                 new_persistent_gnts = true;
729
730                 if (gnttab_alloc_grant_references(
731                     max_grefs - rinfo->persistent_gnts_c,
732                     &setup.gref_head) < 0) {
733                         gnttab_request_free_callback(
734                                 &rinfo->callback,
735                                 blkif_restart_queue_callback,
736                                 rinfo,
737                                 max_grefs - rinfo->persistent_gnts_c);
738                         return 1;
739                 }
740         }
741
742         /* Fill out a communications ring structure. */
743         id = blkif_ring_get_request(rinfo, req, &final_ring_req);
744         ring_req = &rinfo->shadow[id].req;
745
746         num_sg = blk_rq_map_sg(req->q, req, rinfo->shadow[id].sg);
747         num_grant = 0;
748         /* Calculate the number of grant used */
749         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i)
750                num_grant += gnttab_count_grant(sg->offset, sg->length);
751
752         require_extra_req = info->max_indirect_segments == 0 &&
753                 num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST;
754         BUG_ON(!HAS_EXTRA_REQ && require_extra_req);
755
756         rinfo->shadow[id].num_sg = num_sg;
757         if (num_grant > BLKIF_MAX_SEGMENTS_PER_REQUEST &&
758             likely(!require_extra_req)) {
759                 /*
760                  * The indirect operation can only be a BLKIF_OP_READ or
761                  * BLKIF_OP_WRITE
762                  */
763                 BUG_ON(req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA);
764                 ring_req->operation = BLKIF_OP_INDIRECT;
765                 ring_req->u.indirect.indirect_op = rq_data_dir(req) ?
766                         BLKIF_OP_WRITE : BLKIF_OP_READ;
767                 ring_req->u.indirect.sector_number = (blkif_sector_t)blk_rq_pos(req);
768                 ring_req->u.indirect.handle = info->handle;
769                 ring_req->u.indirect.nr_segments = num_grant;
770         } else {
771                 ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req);
772                 ring_req->u.rw.handle = info->handle;
773                 ring_req->operation = rq_data_dir(req) ?
774                         BLKIF_OP_WRITE : BLKIF_OP_READ;
775                 if (req_op(req) == REQ_OP_FLUSH || req->cmd_flags & REQ_FUA) {
776                         /*
777                          * Ideally we can do an unordered flush-to-disk.
778                          * In case the backend onlysupports barriers, use that.
779                          * A barrier request a superset of FUA, so we can
780                          * implement it the same way.  (It's also a FLUSH+FUA,
781                          * since it is guaranteed ordered WRT previous writes.)
782                          */
783                         if (info->feature_flush && info->feature_fua)
784                                 ring_req->operation =
785                                         BLKIF_OP_WRITE_BARRIER;
786                         else if (info->feature_flush)
787                                 ring_req->operation =
788                                         BLKIF_OP_FLUSH_DISKCACHE;
789                         else
790                                 ring_req->operation = 0;
791                 }
792                 ring_req->u.rw.nr_segments = num_grant;
793                 if (unlikely(require_extra_req)) {
794                         extra_id = blkif_ring_get_request(rinfo, req,
795                                                           &final_extra_ring_req);
796                         extra_ring_req = &rinfo->shadow[extra_id].req;
797
798                         /*
799                          * Only the first request contains the scatter-gather
800                          * list.
801                          */
802                         rinfo->shadow[extra_id].num_sg = 0;
803
804                         blkif_setup_extra_req(ring_req, extra_ring_req);
805
806                         /* Link the 2 requests together */
807                         rinfo->shadow[extra_id].associated_id = id;
808                         rinfo->shadow[id].associated_id = extra_id;
809                 }
810         }
811
812         setup.ring_req = ring_req;
813         setup.id = id;
814
815         setup.require_extra_req = require_extra_req;
816         if (unlikely(require_extra_req))
817                 setup.extra_ring_req = extra_ring_req;
818
819         for_each_sg(rinfo->shadow[id].sg, sg, num_sg, i) {
820                 BUG_ON(sg->offset + sg->length > PAGE_SIZE);
821
822                 if (setup.need_copy) {
823                         setup.bvec_off = sg->offset;
824                         setup.bvec_data = kmap_atomic(sg_page(sg));
825                 }
826
827                 gnttab_foreach_grant_in_range(sg_page(sg),
828                                               sg->offset,
829                                               sg->length,
830                                               blkif_setup_rw_req_grant,
831                                               &setup);
832
833                 if (setup.need_copy)
834                         kunmap_atomic(setup.bvec_data);
835         }
836         if (setup.segments)
837                 kunmap_atomic(setup.segments);
838
839         /* Copy request(s) to the ring page. */
840         *final_ring_req = *ring_req;
841         rinfo->shadow[id].status = REQ_WAITING;
842         if (unlikely(require_extra_req)) {
843                 *final_extra_ring_req = *extra_ring_req;
844                 rinfo->shadow[extra_id].status = REQ_WAITING;
845         }
846
847         if (new_persistent_gnts)
848                 gnttab_free_grant_references(setup.gref_head);
849
850         return 0;
851 }
852
853 /*
854  * Generate a Xen blkfront IO request from a blk layer request.  Reads
855  * and writes are handled as expected.
856  *
857  * @req: a request struct
858  */
859 static int blkif_queue_request(struct request *req, struct blkfront_ring_info *rinfo)
860 {
861         if (unlikely(rinfo->dev_info->connected != BLKIF_STATE_CONNECTED))
862                 return 1;
863
864         if (unlikely(req_op(req) == REQ_OP_DISCARD ||
865                      req_op(req) == REQ_OP_SECURE_ERASE))
866                 return blkif_queue_discard_req(req, rinfo);
867         else
868                 return blkif_queue_rw_req(req, rinfo);
869 }
870
871 static inline void flush_requests(struct blkfront_ring_info *rinfo)
872 {
873         int notify;
874
875         RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&rinfo->ring, notify);
876
877         if (notify)
878                 notify_remote_via_irq(rinfo->irq);
879 }
880
881 static inline bool blkif_request_flush_invalid(struct request *req,
882                                                struct blkfront_info *info)
883 {
884         return (blk_rq_is_passthrough(req) ||
885                 ((req_op(req) == REQ_OP_FLUSH) &&
886                  !info->feature_flush) ||
887                 ((req->cmd_flags & REQ_FUA) &&
888                  !info->feature_fua));
889 }
890
891 static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
892                           const struct blk_mq_queue_data *qd)
893 {
894         unsigned long flags;
895         int qid = hctx->queue_num;
896         struct blkfront_info *info = hctx->queue->queuedata;
897         struct blkfront_ring_info *rinfo = NULL;
898
899         rinfo = get_rinfo(info, qid);
900         blk_mq_start_request(qd->rq);
901         spin_lock_irqsave(&rinfo->ring_lock, flags);
902         if (RING_FULL(&rinfo->ring))
903                 goto out_busy;
904
905         if (blkif_request_flush_invalid(qd->rq, rinfo->dev_info))
906                 goto out_err;
907
908         if (blkif_queue_request(qd->rq, rinfo))
909                 goto out_busy;
910
911         flush_requests(rinfo);
912         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
913         return BLK_STS_OK;
914
915 out_err:
916         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
917         return BLK_STS_IOERR;
918
919 out_busy:
920         blk_mq_stop_hw_queue(hctx);
921         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
922         return BLK_STS_DEV_RESOURCE;
923 }
924
925 static void blkif_complete_rq(struct request *rq)
926 {
927         blk_mq_end_request(rq, blkif_req(rq)->error);
928 }
929
930 static const struct blk_mq_ops blkfront_mq_ops = {
931         .queue_rq = blkif_queue_rq,
932         .complete = blkif_complete_rq,
933 };
934
935 static void blkif_set_queue_limits(struct blkfront_info *info)
936 {
937         struct request_queue *rq = info->rq;
938         struct gendisk *gd = info->gd;
939         unsigned int segments = info->max_indirect_segments ? :
940                                 BLKIF_MAX_SEGMENTS_PER_REQUEST;
941
942         blk_queue_flag_set(QUEUE_FLAG_VIRT, rq);
943
944         if (info->feature_discard) {
945                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq);
946                 blk_queue_max_discard_sectors(rq, get_capacity(gd));
947                 rq->limits.discard_granularity = info->discard_granularity ?:
948                                                  info->physical_sector_size;
949                 rq->limits.discard_alignment = info->discard_alignment;
950                 if (info->feature_secdiscard)
951                         blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq);
952         }
953
954         /* Hard sector size and max sectors impersonate the equiv. hardware. */
955         blk_queue_logical_block_size(rq, info->sector_size);
956         blk_queue_physical_block_size(rq, info->physical_sector_size);
957         blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
958
959         /* Each segment in a request is up to an aligned page in size. */
960         blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
961         blk_queue_max_segment_size(rq, PAGE_SIZE);
962
963         /* Ensure a merged request will fit in a single I/O ring slot. */
964         blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
965
966         /* Make sure buffer addresses are sector-aligned. */
967         blk_queue_dma_alignment(rq, 511);
968 }
969
970 static const char *flush_info(struct blkfront_info *info)
971 {
972         if (info->feature_flush && info->feature_fua)
973                 return "barrier: enabled;";
974         else if (info->feature_flush)
975                 return "flush diskcache: enabled;";
976         else
977                 return "barrier or flush: disabled;";
978 }
979
980 static void xlvbd_flush(struct blkfront_info *info)
981 {
982         blk_queue_write_cache(info->rq, info->feature_flush ? true : false,
983                               info->feature_fua ? true : false);
984         pr_info("blkfront: %s: %s %s %s %s %s\n",
985                 info->gd->disk_name, flush_info(info),
986                 "persistent grants:", info->feature_persistent ?
987                 "enabled;" : "disabled;", "indirect descriptors:",
988                 info->max_indirect_segments ? "enabled;" : "disabled;");
989 }
990
991 static int xen_translate_vdev(int vdevice, int *minor, unsigned int *offset)
992 {
993         int major;
994         major = BLKIF_MAJOR(vdevice);
995         *minor = BLKIF_MINOR(vdevice);
996         switch (major) {
997                 case XEN_IDE0_MAJOR:
998                         *offset = (*minor / 64) + EMULATED_HD_DISK_NAME_OFFSET;
999                         *minor = ((*minor / 64) * PARTS_PER_DISK) +
1000                                 EMULATED_HD_DISK_MINOR_OFFSET;
1001                         break;
1002                 case XEN_IDE1_MAJOR:
1003                         *offset = (*minor / 64) + 2 + EMULATED_HD_DISK_NAME_OFFSET;
1004                         *minor = (((*minor / 64) + 2) * PARTS_PER_DISK) +
1005                                 EMULATED_HD_DISK_MINOR_OFFSET;
1006                         break;
1007                 case XEN_SCSI_DISK0_MAJOR:
1008                         *offset = (*minor / PARTS_PER_DISK) + EMULATED_SD_DISK_NAME_OFFSET;
1009                         *minor = *minor + EMULATED_SD_DISK_MINOR_OFFSET;
1010                         break;
1011                 case XEN_SCSI_DISK1_MAJOR:
1012                 case XEN_SCSI_DISK2_MAJOR:
1013                 case XEN_SCSI_DISK3_MAJOR:
1014                 case XEN_SCSI_DISK4_MAJOR:
1015                 case XEN_SCSI_DISK5_MAJOR:
1016                 case XEN_SCSI_DISK6_MAJOR:
1017                 case XEN_SCSI_DISK7_MAJOR:
1018                         *offset = (*minor / PARTS_PER_DISK) + 
1019                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16) +
1020                                 EMULATED_SD_DISK_NAME_OFFSET;
1021                         *minor = *minor +
1022                                 ((major - XEN_SCSI_DISK1_MAJOR + 1) * 16 * PARTS_PER_DISK) +
1023                                 EMULATED_SD_DISK_MINOR_OFFSET;
1024                         break;
1025                 case XEN_SCSI_DISK8_MAJOR:
1026                 case XEN_SCSI_DISK9_MAJOR:
1027                 case XEN_SCSI_DISK10_MAJOR:
1028                 case XEN_SCSI_DISK11_MAJOR:
1029                 case XEN_SCSI_DISK12_MAJOR:
1030                 case XEN_SCSI_DISK13_MAJOR:
1031                 case XEN_SCSI_DISK14_MAJOR:
1032                 case XEN_SCSI_DISK15_MAJOR:
1033                         *offset = (*minor / PARTS_PER_DISK) + 
1034                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16) +
1035                                 EMULATED_SD_DISK_NAME_OFFSET;
1036                         *minor = *minor +
1037                                 ((major - XEN_SCSI_DISK8_MAJOR + 8) * 16 * PARTS_PER_DISK) +
1038                                 EMULATED_SD_DISK_MINOR_OFFSET;
1039                         break;
1040                 case XENVBD_MAJOR:
1041                         *offset = *minor / PARTS_PER_DISK;
1042                         break;
1043                 default:
1044                         printk(KERN_WARNING "blkfront: your disk configuration is "
1045                                         "incorrect, please use an xvd device instead\n");
1046                         return -ENODEV;
1047         }
1048         return 0;
1049 }
1050
1051 static char *encode_disk_name(char *ptr, unsigned int n)
1052 {
1053         if (n >= 26)
1054                 ptr = encode_disk_name(ptr, n / 26 - 1);
1055         *ptr = 'a' + n % 26;
1056         return ptr + 1;
1057 }
1058
1059 static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
1060                                struct blkfront_info *info,
1061                                u16 vdisk_info, u16 sector_size,
1062                                unsigned int physical_sector_size)
1063 {
1064         struct gendisk *gd;
1065         int nr_minors = 1;
1066         int err;
1067         unsigned int offset;
1068         int minor;
1069         int nr_parts;
1070         char *ptr;
1071
1072         BUG_ON(info->gd != NULL);
1073         BUG_ON(info->rq != NULL);
1074
1075         if ((info->vdevice>>EXT_SHIFT) > 1) {
1076                 /* this is above the extended range; something is wrong */
1077                 printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", info->vdevice);
1078                 return -ENODEV;
1079         }
1080
1081         if (!VDEV_IS_EXTENDED(info->vdevice)) {
1082                 err = xen_translate_vdev(info->vdevice, &minor, &offset);
1083                 if (err)
1084                         return err;
1085                 nr_parts = PARTS_PER_DISK;
1086         } else {
1087                 minor = BLKIF_MINOR_EXT(info->vdevice);
1088                 nr_parts = PARTS_PER_EXT_DISK;
1089                 offset = minor / nr_parts;
1090                 if (xen_hvm_domain() && offset < EMULATED_HD_DISK_NAME_OFFSET + 4)
1091                         printk(KERN_WARNING "blkfront: vdevice 0x%x might conflict with "
1092                                         "emulated IDE disks,\n\t choose an xvd device name"
1093                                         "from xvde on\n", info->vdevice);
1094         }
1095         if (minor >> MINORBITS) {
1096                 pr_warn("blkfront: %#x's minor (%#x) out of range; ignoring\n",
1097                         info->vdevice, minor);
1098                 return -ENODEV;
1099         }
1100
1101         if ((minor % nr_parts) == 0)
1102                 nr_minors = nr_parts;
1103
1104         err = xlbd_reserve_minors(minor, nr_minors);
1105         if (err)
1106                 return err;
1107
1108         memset(&info->tag_set, 0, sizeof(info->tag_set));
1109         info->tag_set.ops = &blkfront_mq_ops;
1110         info->tag_set.nr_hw_queues = info->nr_rings;
1111         if (HAS_EXTRA_REQ && info->max_indirect_segments == 0) {
1112                 /*
1113                  * When indirect descriptior is not supported, the I/O request
1114                  * will be split between multiple request in the ring.
1115                  * To avoid problems when sending the request, divide by
1116                  * 2 the depth of the queue.
1117                  */
1118                 info->tag_set.queue_depth =  BLK_RING_SIZE(info) / 2;
1119         } else
1120                 info->tag_set.queue_depth = BLK_RING_SIZE(info);
1121         info->tag_set.numa_node = NUMA_NO_NODE;
1122         info->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
1123         info->tag_set.cmd_size = sizeof(struct blkif_req);
1124         info->tag_set.driver_data = info;
1125
1126         err = blk_mq_alloc_tag_set(&info->tag_set);
1127         if (err)
1128                 goto out_release_minors;
1129
1130         gd = blk_mq_alloc_disk(&info->tag_set, info);
1131         if (IS_ERR(gd)) {
1132                 err = PTR_ERR(gd);
1133                 goto out_free_tag_set;
1134         }
1135
1136         strcpy(gd->disk_name, DEV_NAME);
1137         ptr = encode_disk_name(gd->disk_name + sizeof(DEV_NAME) - 1, offset);
1138         BUG_ON(ptr >= gd->disk_name + DISK_NAME_LEN);
1139         if (nr_minors > 1)
1140                 *ptr = 0;
1141         else
1142                 snprintf(ptr, gd->disk_name + DISK_NAME_LEN - ptr,
1143                          "%d", minor & (nr_parts - 1));
1144
1145         gd->major = XENVBD_MAJOR;
1146         gd->first_minor = minor;
1147         gd->minors = nr_minors;
1148         gd->fops = &xlvbd_block_fops;
1149         gd->private_data = info;
1150         set_capacity(gd, capacity);
1151
1152         info->rq = gd->queue;
1153         info->gd = gd;
1154         info->sector_size = sector_size;
1155         info->physical_sector_size = physical_sector_size;
1156         blkif_set_queue_limits(info);
1157
1158         xlvbd_flush(info);
1159
1160         if (vdisk_info & VDISK_READONLY)
1161                 set_disk_ro(gd, 1);
1162
1163         if (vdisk_info & VDISK_REMOVABLE)
1164                 gd->flags |= GENHD_FL_REMOVABLE;
1165
1166         if (vdisk_info & VDISK_CDROM)
1167                 gd->flags |= GENHD_FL_CD;
1168
1169         return 0;
1170
1171 out_free_tag_set:
1172         blk_mq_free_tag_set(&info->tag_set);
1173 out_release_minors:
1174         xlbd_release_minors(minor, nr_minors);
1175         return err;
1176 }
1177
1178 /* Already hold rinfo->ring_lock. */
1179 static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo)
1180 {
1181         if (!RING_FULL(&rinfo->ring))
1182                 blk_mq_start_stopped_hw_queues(rinfo->dev_info->rq, true);
1183 }
1184
1185 static void kick_pending_request_queues(struct blkfront_ring_info *rinfo)
1186 {
1187         unsigned long flags;
1188
1189         spin_lock_irqsave(&rinfo->ring_lock, flags);
1190         kick_pending_request_queues_locked(rinfo);
1191         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1192 }
1193
1194 static void blkif_restart_queue(struct work_struct *work)
1195 {
1196         struct blkfront_ring_info *rinfo = container_of(work, struct blkfront_ring_info, work);
1197
1198         if (rinfo->dev_info->connected == BLKIF_STATE_CONNECTED)
1199                 kick_pending_request_queues(rinfo);
1200 }
1201
1202 static void blkif_free_ring(struct blkfront_ring_info *rinfo)
1203 {
1204         struct grant *persistent_gnt, *n;
1205         struct blkfront_info *info = rinfo->dev_info;
1206         int i, j, segs;
1207
1208         /*
1209          * Remove indirect pages, this only happens when using indirect
1210          * descriptors but not persistent grants
1211          */
1212         if (!list_empty(&rinfo->indirect_pages)) {
1213                 struct page *indirect_page, *n;
1214
1215                 BUG_ON(info->feature_persistent);
1216                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
1217                         list_del(&indirect_page->lru);
1218                         __free_page(indirect_page);
1219                 }
1220         }
1221
1222         /* Remove all persistent grants. */
1223         if (!list_empty(&rinfo->grants)) {
1224                 list_for_each_entry_safe(persistent_gnt, n,
1225                                          &rinfo->grants, node) {
1226                         list_del(&persistent_gnt->node);
1227                         if (persistent_gnt->gref != GRANT_INVALID_REF) {
1228                                 gnttab_end_foreign_access(persistent_gnt->gref,
1229                                                           0, 0UL);
1230                                 rinfo->persistent_gnts_c--;
1231                         }
1232                         if (info->feature_persistent)
1233                                 __free_page(persistent_gnt->page);
1234                         kfree(persistent_gnt);
1235                 }
1236         }
1237         BUG_ON(rinfo->persistent_gnts_c != 0);
1238
1239         for (i = 0; i < BLK_RING_SIZE(info); i++) {
1240                 /*
1241                  * Clear persistent grants present in requests already
1242                  * on the shared ring
1243                  */
1244                 if (!rinfo->shadow[i].request)
1245                         goto free_shadow;
1246
1247                 segs = rinfo->shadow[i].req.operation == BLKIF_OP_INDIRECT ?
1248                        rinfo->shadow[i].req.u.indirect.nr_segments :
1249                        rinfo->shadow[i].req.u.rw.nr_segments;
1250                 for (j = 0; j < segs; j++) {
1251                         persistent_gnt = rinfo->shadow[i].grants_used[j];
1252                         gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1253                         if (info->feature_persistent)
1254                                 __free_page(persistent_gnt->page);
1255                         kfree(persistent_gnt);
1256                 }
1257
1258                 if (rinfo->shadow[i].req.operation != BLKIF_OP_INDIRECT)
1259                         /*
1260                          * If this is not an indirect operation don't try to
1261                          * free indirect segments
1262                          */
1263                         goto free_shadow;
1264
1265                 for (j = 0; j < INDIRECT_GREFS(segs); j++) {
1266                         persistent_gnt = rinfo->shadow[i].indirect_grants[j];
1267                         gnttab_end_foreign_access(persistent_gnt->gref, 0, 0UL);
1268                         __free_page(persistent_gnt->page);
1269                         kfree(persistent_gnt);
1270                 }
1271
1272 free_shadow:
1273                 kvfree(rinfo->shadow[i].grants_used);
1274                 rinfo->shadow[i].grants_used = NULL;
1275                 kvfree(rinfo->shadow[i].indirect_grants);
1276                 rinfo->shadow[i].indirect_grants = NULL;
1277                 kvfree(rinfo->shadow[i].sg);
1278                 rinfo->shadow[i].sg = NULL;
1279         }
1280
1281         /* No more gnttab callback work. */
1282         gnttab_cancel_free_callback(&rinfo->callback);
1283
1284         /* Flush gnttab callback work. Must be done with no locks held. */
1285         flush_work(&rinfo->work);
1286
1287         /* Free resources associated with old device channel. */
1288         for (i = 0; i < info->nr_ring_pages; i++) {
1289                 if (rinfo->ring_ref[i] != GRANT_INVALID_REF) {
1290                         gnttab_end_foreign_access(rinfo->ring_ref[i], 0, 0);
1291                         rinfo->ring_ref[i] = GRANT_INVALID_REF;
1292                 }
1293         }
1294         free_pages_exact(rinfo->ring.sring,
1295                          info->nr_ring_pages * XEN_PAGE_SIZE);
1296         rinfo->ring.sring = NULL;
1297
1298         if (rinfo->irq)
1299                 unbind_from_irqhandler(rinfo->irq, rinfo);
1300         rinfo->evtchn = rinfo->irq = 0;
1301 }
1302
1303 static void blkif_free(struct blkfront_info *info, int suspend)
1304 {
1305         unsigned int i;
1306         struct blkfront_ring_info *rinfo;
1307
1308         /* Prevent new requests being issued until we fix things up. */
1309         info->connected = suspend ?
1310                 BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
1311         /* No more blkif_request(). */
1312         if (info->rq)
1313                 blk_mq_stop_hw_queues(info->rq);
1314
1315         for_each_rinfo(info, rinfo, i)
1316                 blkif_free_ring(rinfo);
1317
1318         kvfree(info->rinfo);
1319         info->rinfo = NULL;
1320         info->nr_rings = 0;
1321 }
1322
1323 struct copy_from_grant {
1324         const struct blk_shadow *s;
1325         unsigned int grant_idx;
1326         unsigned int bvec_offset;
1327         char *bvec_data;
1328 };
1329
1330 static void blkif_copy_from_grant(unsigned long gfn, unsigned int offset,
1331                                   unsigned int len, void *data)
1332 {
1333         struct copy_from_grant *info = data;
1334         char *shared_data;
1335         /* Convenient aliases */
1336         const struct blk_shadow *s = info->s;
1337
1338         shared_data = kmap_atomic(s->grants_used[info->grant_idx]->page);
1339
1340         memcpy(info->bvec_data + info->bvec_offset,
1341                shared_data + offset, len);
1342
1343         info->bvec_offset += len;
1344         info->grant_idx++;
1345
1346         kunmap_atomic(shared_data);
1347 }
1348
1349 static enum blk_req_status blkif_rsp_to_req_status(int rsp)
1350 {
1351         switch (rsp)
1352         {
1353         case BLKIF_RSP_OKAY:
1354                 return REQ_DONE;
1355         case BLKIF_RSP_EOPNOTSUPP:
1356                 return REQ_EOPNOTSUPP;
1357         case BLKIF_RSP_ERROR:
1358         default:
1359                 return REQ_ERROR;
1360         }
1361 }
1362
1363 /*
1364  * Get the final status of the block request based on two ring response
1365  */
1366 static int blkif_get_final_status(enum blk_req_status s1,
1367                                   enum blk_req_status s2)
1368 {
1369         BUG_ON(s1 < REQ_DONE);
1370         BUG_ON(s2 < REQ_DONE);
1371
1372         if (s1 == REQ_ERROR || s2 == REQ_ERROR)
1373                 return BLKIF_RSP_ERROR;
1374         else if (s1 == REQ_EOPNOTSUPP || s2 == REQ_EOPNOTSUPP)
1375                 return BLKIF_RSP_EOPNOTSUPP;
1376         return BLKIF_RSP_OKAY;
1377 }
1378
1379 /*
1380  * Return values:
1381  *  1 response processed.
1382  *  0 missing further responses.
1383  * -1 error while processing.
1384  */
1385 static int blkif_completion(unsigned long *id,
1386                             struct blkfront_ring_info *rinfo,
1387                             struct blkif_response *bret)
1388 {
1389         int i = 0;
1390         struct scatterlist *sg;
1391         int num_sg, num_grant;
1392         struct blkfront_info *info = rinfo->dev_info;
1393         struct blk_shadow *s = &rinfo->shadow[*id];
1394         struct copy_from_grant data = {
1395                 .grant_idx = 0,
1396         };
1397
1398         num_grant = s->req.operation == BLKIF_OP_INDIRECT ?
1399                 s->req.u.indirect.nr_segments : s->req.u.rw.nr_segments;
1400
1401         /* The I/O request may be split in two. */
1402         if (unlikely(s->associated_id != NO_ASSOCIATED_ID)) {
1403                 struct blk_shadow *s2 = &rinfo->shadow[s->associated_id];
1404
1405                 /* Keep the status of the current response in shadow. */
1406                 s->status = blkif_rsp_to_req_status(bret->status);
1407
1408                 /* Wait the second response if not yet here. */
1409                 if (s2->status < REQ_DONE)
1410                         return 0;
1411
1412                 bret->status = blkif_get_final_status(s->status,
1413                                                       s2->status);
1414
1415                 /*
1416                  * All the grants is stored in the first shadow in order
1417                  * to make the completion code simpler.
1418                  */
1419                 num_grant += s2->req.u.rw.nr_segments;
1420
1421                 /*
1422                  * The two responses may not come in order. Only the
1423                  * first request will store the scatter-gather list.
1424                  */
1425                 if (s2->num_sg != 0) {
1426                         /* Update "id" with the ID of the first response. */
1427                         *id = s->associated_id;
1428                         s = s2;
1429                 }
1430
1431                 /*
1432                  * We don't need anymore the second request, so recycling
1433                  * it now.
1434                  */
1435                 if (add_id_to_freelist(rinfo, s->associated_id))
1436                         WARN(1, "%s: can't recycle the second part (id = %ld) of the request\n",
1437                              info->gd->disk_name, s->associated_id);
1438         }
1439
1440         data.s = s;
1441         num_sg = s->num_sg;
1442
1443         if (bret->operation == BLKIF_OP_READ && info->feature_persistent) {
1444                 for_each_sg(s->sg, sg, num_sg, i) {
1445                         BUG_ON(sg->offset + sg->length > PAGE_SIZE);
1446
1447                         data.bvec_offset = sg->offset;
1448                         data.bvec_data = kmap_atomic(sg_page(sg));
1449
1450                         gnttab_foreach_grant_in_range(sg_page(sg),
1451                                                       sg->offset,
1452                                                       sg->length,
1453                                                       blkif_copy_from_grant,
1454                                                       &data);
1455
1456                         kunmap_atomic(data.bvec_data);
1457                 }
1458         }
1459         /* Add the persistent grant into the list of free grants */
1460         for (i = 0; i < num_grant; i++) {
1461                 if (!gnttab_try_end_foreign_access(s->grants_used[i]->gref)) {
1462                         /*
1463                          * If the grant is still mapped by the backend (the
1464                          * backend has chosen to make this grant persistent)
1465                          * we add it at the head of the list, so it will be
1466                          * reused first.
1467                          */
1468                         if (!info->feature_persistent) {
1469                                 pr_alert("backed has not unmapped grant: %u\n",
1470                                          s->grants_used[i]->gref);
1471                                 return -1;
1472                         }
1473                         list_add(&s->grants_used[i]->node, &rinfo->grants);
1474                         rinfo->persistent_gnts_c++;
1475                 } else {
1476                         /*
1477                          * If the grant is not mapped by the backend we add it
1478                          * to the tail of the list, so it will not be picked
1479                          * again unless we run out of persistent grants.
1480                          */
1481                         s->grants_used[i]->gref = GRANT_INVALID_REF;
1482                         list_add_tail(&s->grants_used[i]->node, &rinfo->grants);
1483                 }
1484         }
1485         if (s->req.operation == BLKIF_OP_INDIRECT) {
1486                 for (i = 0; i < INDIRECT_GREFS(num_grant); i++) {
1487                         if (!gnttab_try_end_foreign_access(s->indirect_grants[i]->gref)) {
1488                                 if (!info->feature_persistent) {
1489                                         pr_alert("backed has not unmapped grant: %u\n",
1490                                                  s->indirect_grants[i]->gref);
1491                                         return -1;
1492                                 }
1493                                 list_add(&s->indirect_grants[i]->node, &rinfo->grants);
1494                                 rinfo->persistent_gnts_c++;
1495                         } else {
1496                                 struct page *indirect_page;
1497
1498                                 /*
1499                                  * Add the used indirect page back to the list of
1500                                  * available pages for indirect grefs.
1501                                  */
1502                                 if (!info->feature_persistent) {
1503                                         indirect_page = s->indirect_grants[i]->page;
1504                                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
1505                                 }
1506                                 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1507                                 list_add_tail(&s->indirect_grants[i]->node, &rinfo->grants);
1508                         }
1509                 }
1510         }
1511
1512         return 1;
1513 }
1514
1515 static irqreturn_t blkif_interrupt(int irq, void *dev_id)
1516 {
1517         struct request *req;
1518         struct blkif_response bret;
1519         RING_IDX i, rp;
1520         unsigned long flags;
1521         struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
1522         struct blkfront_info *info = rinfo->dev_info;
1523         unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1524
1525         if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
1526                 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
1527                 return IRQ_HANDLED;
1528         }
1529
1530         spin_lock_irqsave(&rinfo->ring_lock, flags);
1531  again:
1532         rp = READ_ONCE(rinfo->ring.sring->rsp_prod);
1533         virt_rmb(); /* Ensure we see queued responses up to 'rp'. */
1534         if (RING_RESPONSE_PROD_OVERFLOW(&rinfo->ring, rp)) {
1535                 pr_alert("%s: illegal number of responses %u\n",
1536                          info->gd->disk_name, rp - rinfo->ring.rsp_cons);
1537                 goto err;
1538         }
1539
1540         for (i = rinfo->ring.rsp_cons; i != rp; i++) {
1541                 unsigned long id;
1542                 unsigned int op;
1543
1544                 eoiflag = 0;
1545
1546                 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
1547                 id = bret.id;
1548
1549                 /*
1550                  * The backend has messed up and given us an id that we would
1551                  * never have given to it (we stamp it up to BLK_RING_SIZE -
1552                  * look in get_id_from_freelist.
1553                  */
1554                 if (id >= BLK_RING_SIZE(info)) {
1555                         pr_alert("%s: response has incorrect id (%ld)\n",
1556                                  info->gd->disk_name, id);
1557                         goto err;
1558                 }
1559                 if (rinfo->shadow[id].status != REQ_WAITING) {
1560                         pr_alert("%s: response references no pending request\n",
1561                                  info->gd->disk_name);
1562                         goto err;
1563                 }
1564
1565                 rinfo->shadow[id].status = REQ_PROCESSING;
1566                 req  = rinfo->shadow[id].request;
1567
1568                 op = rinfo->shadow[id].req.operation;
1569                 if (op == BLKIF_OP_INDIRECT)
1570                         op = rinfo->shadow[id].req.u.indirect.indirect_op;
1571                 if (bret.operation != op) {
1572                         pr_alert("%s: response has wrong operation (%u instead of %u)\n",
1573                                  info->gd->disk_name, bret.operation, op);
1574                         goto err;
1575                 }
1576
1577                 if (bret.operation != BLKIF_OP_DISCARD) {
1578                         int ret;
1579
1580                         /*
1581                          * We may need to wait for an extra response if the
1582                          * I/O request is split in 2
1583                          */
1584                         ret = blkif_completion(&id, rinfo, &bret);
1585                         if (!ret)
1586                                 continue;
1587                         if (unlikely(ret < 0))
1588                                 goto err;
1589                 }
1590
1591                 if (add_id_to_freelist(rinfo, id)) {
1592                         WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n",
1593                              info->gd->disk_name, op_name(bret.operation), id);
1594                         continue;
1595                 }
1596
1597                 if (bret.status == BLKIF_RSP_OKAY)
1598                         blkif_req(req)->error = BLK_STS_OK;
1599                 else
1600                         blkif_req(req)->error = BLK_STS_IOERR;
1601
1602                 switch (bret.operation) {
1603                 case BLKIF_OP_DISCARD:
1604                         if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1605                                 struct request_queue *rq = info->rq;
1606
1607                                 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1608                                            info->gd->disk_name, op_name(bret.operation));
1609                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1610                                 info->feature_discard = 0;
1611                                 info->feature_secdiscard = 0;
1612                                 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, rq);
1613                                 blk_queue_flag_clear(QUEUE_FLAG_SECERASE, rq);
1614                         }
1615                         break;
1616                 case BLKIF_OP_FLUSH_DISKCACHE:
1617                 case BLKIF_OP_WRITE_BARRIER:
1618                         if (unlikely(bret.status == BLKIF_RSP_EOPNOTSUPP)) {
1619                                 pr_warn_ratelimited("blkfront: %s: %s op failed\n",
1620                                        info->gd->disk_name, op_name(bret.operation));
1621                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1622                         }
1623                         if (unlikely(bret.status == BLKIF_RSP_ERROR &&
1624                                      rinfo->shadow[id].req.u.rw.nr_segments == 0)) {
1625                                 pr_warn_ratelimited("blkfront: %s: empty %s op failed\n",
1626                                        info->gd->disk_name, op_name(bret.operation));
1627                                 blkif_req(req)->error = BLK_STS_NOTSUPP;
1628                         }
1629                         if (unlikely(blkif_req(req)->error)) {
1630                                 if (blkif_req(req)->error == BLK_STS_NOTSUPP)
1631                                         blkif_req(req)->error = BLK_STS_OK;
1632                                 info->feature_fua = 0;
1633                                 info->feature_flush = 0;
1634                                 xlvbd_flush(info);
1635                         }
1636                         fallthrough;
1637                 case BLKIF_OP_READ:
1638                 case BLKIF_OP_WRITE:
1639                         if (unlikely(bret.status != BLKIF_RSP_OKAY))
1640                                 dev_dbg_ratelimited(&info->xbdev->dev,
1641                                         "Bad return from blkdev data request: %#x\n",
1642                                         bret.status);
1643
1644                         break;
1645                 default:
1646                         BUG();
1647                 }
1648
1649                 if (likely(!blk_should_fake_timeout(req->q)))
1650                         blk_mq_complete_request(req);
1651         }
1652
1653         rinfo->ring.rsp_cons = i;
1654
1655         if (i != rinfo->ring.req_prod_pvt) {
1656                 int more_to_do;
1657                 RING_FINAL_CHECK_FOR_RESPONSES(&rinfo->ring, more_to_do);
1658                 if (more_to_do)
1659                         goto again;
1660         } else
1661                 rinfo->ring.sring->rsp_event = i + 1;
1662
1663         kick_pending_request_queues_locked(rinfo);
1664
1665         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1666
1667         xen_irq_lateeoi(irq, eoiflag);
1668
1669         return IRQ_HANDLED;
1670
1671  err:
1672         info->connected = BLKIF_STATE_ERROR;
1673
1674         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
1675
1676         /* No EOI in order to avoid further interrupts. */
1677
1678         pr_alert("%s disabled for further use\n", info->gd->disk_name);
1679         return IRQ_HANDLED;
1680 }
1681
1682
1683 static int setup_blkring(struct xenbus_device *dev,
1684                          struct blkfront_ring_info *rinfo)
1685 {
1686         struct blkif_sring *sring;
1687         int err, i;
1688         struct blkfront_info *info = rinfo->dev_info;
1689         unsigned long ring_size = info->nr_ring_pages * XEN_PAGE_SIZE;
1690         grant_ref_t gref[XENBUS_MAX_RING_GRANTS];
1691
1692         for (i = 0; i < info->nr_ring_pages; i++)
1693                 rinfo->ring_ref[i] = GRANT_INVALID_REF;
1694
1695         sring = alloc_pages_exact(ring_size, GFP_NOIO);
1696         if (!sring) {
1697                 xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
1698                 return -ENOMEM;
1699         }
1700         SHARED_RING_INIT(sring);
1701         FRONT_RING_INIT(&rinfo->ring, sring, ring_size);
1702
1703         err = xenbus_grant_ring(dev, rinfo->ring.sring, info->nr_ring_pages, gref);
1704         if (err < 0) {
1705                 free_pages_exact(sring, ring_size);
1706                 rinfo->ring.sring = NULL;
1707                 goto fail;
1708         }
1709         for (i = 0; i < info->nr_ring_pages; i++)
1710                 rinfo->ring_ref[i] = gref[i];
1711
1712         err = xenbus_alloc_evtchn(dev, &rinfo->evtchn);
1713         if (err)
1714                 goto fail;
1715
1716         err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
1717                                                 0, "blkif", rinfo);
1718         if (err <= 0) {
1719                 xenbus_dev_fatal(dev, err,
1720                                  "bind_evtchn_to_irqhandler failed");
1721                 goto fail;
1722         }
1723         rinfo->irq = err;
1724
1725         return 0;
1726 fail:
1727         blkif_free(info, 0);
1728         return err;
1729 }
1730
1731 /*
1732  * Write out per-ring/queue nodes including ring-ref and event-channel, and each
1733  * ring buffer may have multi pages depending on ->nr_ring_pages.
1734  */
1735 static int write_per_ring_nodes(struct xenbus_transaction xbt,
1736                                 struct blkfront_ring_info *rinfo, const char *dir)
1737 {
1738         int err;
1739         unsigned int i;
1740         const char *message = NULL;
1741         struct blkfront_info *info = rinfo->dev_info;
1742
1743         if (info->nr_ring_pages == 1) {
1744                 err = xenbus_printf(xbt, dir, "ring-ref", "%u", rinfo->ring_ref[0]);
1745                 if (err) {
1746                         message = "writing ring-ref";
1747                         goto abort_transaction;
1748                 }
1749         } else {
1750                 for (i = 0; i < info->nr_ring_pages; i++) {
1751                         char ring_ref_name[RINGREF_NAME_LEN];
1752
1753                         snprintf(ring_ref_name, RINGREF_NAME_LEN, "ring-ref%u", i);
1754                         err = xenbus_printf(xbt, dir, ring_ref_name,
1755                                             "%u", rinfo->ring_ref[i]);
1756                         if (err) {
1757                                 message = "writing ring-ref";
1758                                 goto abort_transaction;
1759                         }
1760                 }
1761         }
1762
1763         err = xenbus_printf(xbt, dir, "event-channel", "%u", rinfo->evtchn);
1764         if (err) {
1765                 message = "writing event-channel";
1766                 goto abort_transaction;
1767         }
1768
1769         return 0;
1770
1771 abort_transaction:
1772         xenbus_transaction_end(xbt, 1);
1773         if (message)
1774                 xenbus_dev_fatal(info->xbdev, err, "%s", message);
1775
1776         return err;
1777 }
1778
1779 /* Common code used when first setting up, and when resuming. */
1780 static int talk_to_blkback(struct xenbus_device *dev,
1781                            struct blkfront_info *info)
1782 {
1783         const char *message = NULL;
1784         struct xenbus_transaction xbt;
1785         int err;
1786         unsigned int i, max_page_order;
1787         unsigned int ring_page_order;
1788         struct blkfront_ring_info *rinfo;
1789
1790         if (!info)
1791                 return -ENODEV;
1792
1793         max_page_order = xenbus_read_unsigned(info->xbdev->otherend,
1794                                               "max-ring-page-order", 0);
1795         ring_page_order = min(xen_blkif_max_ring_order, max_page_order);
1796         info->nr_ring_pages = 1 << ring_page_order;
1797
1798         err = negotiate_mq(info);
1799         if (err)
1800                 goto destroy_blkring;
1801
1802         for_each_rinfo(info, rinfo, i) {
1803                 /* Create shared ring, alloc event channel. */
1804                 err = setup_blkring(dev, rinfo);
1805                 if (err)
1806                         goto destroy_blkring;
1807         }
1808
1809 again:
1810         err = xenbus_transaction_start(&xbt);
1811         if (err) {
1812                 xenbus_dev_fatal(dev, err, "starting transaction");
1813                 goto destroy_blkring;
1814         }
1815
1816         if (info->nr_ring_pages > 1) {
1817                 err = xenbus_printf(xbt, dev->nodename, "ring-page-order", "%u",
1818                                     ring_page_order);
1819                 if (err) {
1820                         message = "writing ring-page-order";
1821                         goto abort_transaction;
1822                 }
1823         }
1824
1825         /* We already got the number of queues/rings in _probe */
1826         if (info->nr_rings == 1) {
1827                 err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
1828                 if (err)
1829                         goto destroy_blkring;
1830         } else {
1831                 char *path;
1832                 size_t pathsize;
1833
1834                 err = xenbus_printf(xbt, dev->nodename, "multi-queue-num-queues", "%u",
1835                                     info->nr_rings);
1836                 if (err) {
1837                         message = "writing multi-queue-num-queues";
1838                         goto abort_transaction;
1839                 }
1840
1841                 pathsize = strlen(dev->nodename) + QUEUE_NAME_LEN;
1842                 path = kmalloc(pathsize, GFP_KERNEL);
1843                 if (!path) {
1844                         err = -ENOMEM;
1845                         message = "ENOMEM while writing ring references";
1846                         goto abort_transaction;
1847                 }
1848
1849                 for_each_rinfo(info, rinfo, i) {
1850                         memset(path, 0, pathsize);
1851                         snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
1852                         err = write_per_ring_nodes(xbt, rinfo, path);
1853                         if (err) {
1854                                 kfree(path);
1855                                 goto destroy_blkring;
1856                         }
1857                 }
1858                 kfree(path);
1859         }
1860         err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
1861                             XEN_IO_PROTO_ABI_NATIVE);
1862         if (err) {
1863                 message = "writing protocol";
1864                 goto abort_transaction;
1865         }
1866         err = xenbus_printf(xbt, dev->nodename, "feature-persistent", "%u",
1867                         info->feature_persistent);
1868         if (err)
1869                 dev_warn(&dev->dev,
1870                          "writing persistent grants feature to xenbus");
1871
1872         err = xenbus_transaction_end(xbt, 0);
1873         if (err) {
1874                 if (err == -EAGAIN)
1875                         goto again;
1876                 xenbus_dev_fatal(dev, err, "completing transaction");
1877                 goto destroy_blkring;
1878         }
1879
1880         for_each_rinfo(info, rinfo, i) {
1881                 unsigned int j;
1882
1883                 for (j = 0; j < BLK_RING_SIZE(info); j++)
1884                         rinfo->shadow[j].req.u.rw.id = j + 1;
1885                 rinfo->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1886         }
1887         xenbus_switch_state(dev, XenbusStateInitialised);
1888
1889         return 0;
1890
1891  abort_transaction:
1892         xenbus_transaction_end(xbt, 1);
1893         if (message)
1894                 xenbus_dev_fatal(dev, err, "%s", message);
1895  destroy_blkring:
1896         blkif_free(info, 0);
1897         return err;
1898 }
1899
1900 static int negotiate_mq(struct blkfront_info *info)
1901 {
1902         unsigned int backend_max_queues;
1903         unsigned int i;
1904         struct blkfront_ring_info *rinfo;
1905
1906         BUG_ON(info->nr_rings);
1907
1908         /* Check if backend supports multiple queues. */
1909         backend_max_queues = xenbus_read_unsigned(info->xbdev->otherend,
1910                                                   "multi-queue-max-queues", 1);
1911         info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
1912         /* We need at least one ring. */
1913         if (!info->nr_rings)
1914                 info->nr_rings = 1;
1915
1916         info->rinfo_size = struct_size(info->rinfo, shadow,
1917                                        BLK_RING_SIZE(info));
1918         info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
1919         if (!info->rinfo) {
1920                 xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
1921                 info->nr_rings = 0;
1922                 return -ENOMEM;
1923         }
1924
1925         for_each_rinfo(info, rinfo, i) {
1926                 INIT_LIST_HEAD(&rinfo->indirect_pages);
1927                 INIT_LIST_HEAD(&rinfo->grants);
1928                 rinfo->dev_info = info;
1929                 INIT_WORK(&rinfo->work, blkif_restart_queue);
1930                 spin_lock_init(&rinfo->ring_lock);
1931         }
1932         return 0;
1933 }
1934
1935 /* Enable the persistent grants feature. */
1936 static bool feature_persistent = true;
1937 module_param(feature_persistent, bool, 0644);
1938 MODULE_PARM_DESC(feature_persistent,
1939                 "Enables the persistent grants feature");
1940
1941 /*
1942  * Entry point to this code when a new device is created.  Allocate the basic
1943  * structures and the ring buffer for communication with the backend, and
1944  * inform the backend of the appropriate details for those.  Switch to
1945  * Initialised state.
1946  */
1947 static int blkfront_probe(struct xenbus_device *dev,
1948                           const struct xenbus_device_id *id)
1949 {
1950         int err, vdevice;
1951         struct blkfront_info *info;
1952
1953         /* FIXME: Use dynamic device id if this is not set. */
1954         err = xenbus_scanf(XBT_NIL, dev->nodename,
1955                            "virtual-device", "%i", &vdevice);
1956         if (err != 1) {
1957                 /* go looking in the extended area instead */
1958                 err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
1959                                    "%i", &vdevice);
1960                 if (err != 1) {
1961                         xenbus_dev_fatal(dev, err, "reading virtual-device");
1962                         return err;
1963                 }
1964         }
1965
1966         if (xen_hvm_domain()) {
1967                 char *type;
1968                 int len;
1969                 /* no unplug has been done: do not hook devices != xen vbds */
1970                 if (xen_has_pv_and_legacy_disk_devices()) {
1971                         int major;
1972
1973                         if (!VDEV_IS_EXTENDED(vdevice))
1974                                 major = BLKIF_MAJOR(vdevice);
1975                         else
1976                                 major = XENVBD_MAJOR;
1977
1978                         if (major != XENVBD_MAJOR) {
1979                                 printk(KERN_INFO
1980                                                 "%s: HVM does not support vbd %d as xen block device\n",
1981                                                 __func__, vdevice);
1982                                 return -ENODEV;
1983                         }
1984                 }
1985                 /* do not create a PV cdrom device if we are an HVM guest */
1986                 type = xenbus_read(XBT_NIL, dev->nodename, "device-type", &len);
1987                 if (IS_ERR(type))
1988                         return -ENODEV;
1989                 if (strncmp(type, "cdrom", 5) == 0) {
1990                         kfree(type);
1991                         return -ENODEV;
1992                 }
1993                 kfree(type);
1994         }
1995         info = kzalloc(sizeof(*info), GFP_KERNEL);
1996         if (!info) {
1997                 xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
1998                 return -ENOMEM;
1999         }
2000
2001         info->xbdev = dev;
2002
2003         mutex_init(&info->mutex);
2004         info->vdevice = vdevice;
2005         info->connected = BLKIF_STATE_DISCONNECTED;
2006
2007         info->feature_persistent = feature_persistent;
2008
2009         /* Front end dir is a number, which is used as the id. */
2010         info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0);
2011         dev_set_drvdata(&dev->dev, info);
2012
2013         mutex_lock(&blkfront_mutex);
2014         list_add(&info->info_list, &info_list);
2015         mutex_unlock(&blkfront_mutex);
2016
2017         return 0;
2018 }
2019
2020 static int blkif_recover(struct blkfront_info *info)
2021 {
2022         unsigned int r_index;
2023         struct request *req, *n;
2024         int rc;
2025         struct bio *bio;
2026         unsigned int segs;
2027         struct blkfront_ring_info *rinfo;
2028
2029         blkfront_gather_backend_features(info);
2030         /* Reset limits changed by blk_mq_update_nr_hw_queues(). */
2031         blkif_set_queue_limits(info);
2032         segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
2033         blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
2034
2035         for_each_rinfo(info, rinfo, r_index) {
2036                 rc = blkfront_setup_indirect(rinfo);
2037                 if (rc)
2038                         return rc;
2039         }
2040         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2041
2042         /* Now safe for us to use the shared ring */
2043         info->connected = BLKIF_STATE_CONNECTED;
2044
2045         for_each_rinfo(info, rinfo, r_index) {
2046                 /* Kick any other new requests queued since we resumed */
2047                 kick_pending_request_queues(rinfo);
2048         }
2049
2050         list_for_each_entry_safe(req, n, &info->requests, queuelist) {
2051                 /* Requeue pending requests (flush or discard) */
2052                 list_del_init(&req->queuelist);
2053                 BUG_ON(req->nr_phys_segments > segs);
2054                 blk_mq_requeue_request(req, false);
2055         }
2056         blk_mq_start_stopped_hw_queues(info->rq, true);
2057         blk_mq_kick_requeue_list(info->rq);
2058
2059         while ((bio = bio_list_pop(&info->bio_list)) != NULL) {
2060                 /* Traverse the list of pending bios and re-queue them */
2061                 submit_bio(bio);
2062         }
2063
2064         return 0;
2065 }
2066
2067 /*
2068  * We are reconnecting to the backend, due to a suspend/resume, or a backend
2069  * driver restart.  We tear down our blkif structure and recreate it, but
2070  * leave the device-layer structures intact so that this is transparent to the
2071  * rest of the kernel.
2072  */
2073 static int blkfront_resume(struct xenbus_device *dev)
2074 {
2075         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2076         int err = 0;
2077         unsigned int i, j;
2078         struct blkfront_ring_info *rinfo;
2079
2080         dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
2081
2082         bio_list_init(&info->bio_list);
2083         INIT_LIST_HEAD(&info->requests);
2084         for_each_rinfo(info, rinfo, i) {
2085                 struct bio_list merge_bio;
2086                 struct blk_shadow *shadow = rinfo->shadow;
2087
2088                 for (j = 0; j < BLK_RING_SIZE(info); j++) {
2089                         /* Not in use? */
2090                         if (!shadow[j].request)
2091                                 continue;
2092
2093                         /*
2094                          * Get the bios in the request so we can re-queue them.
2095                          */
2096                         if (req_op(shadow[j].request) == REQ_OP_FLUSH ||
2097                             req_op(shadow[j].request) == REQ_OP_DISCARD ||
2098                             req_op(shadow[j].request) == REQ_OP_SECURE_ERASE ||
2099                             shadow[j].request->cmd_flags & REQ_FUA) {
2100                                 /*
2101                                  * Flush operations don't contain bios, so
2102                                  * we need to requeue the whole request
2103                                  *
2104                                  * XXX: but this doesn't make any sense for a
2105                                  * write with the FUA flag set..
2106                                  */
2107                                 list_add(&shadow[j].request->queuelist, &info->requests);
2108                                 continue;
2109                         }
2110                         merge_bio.head = shadow[j].request->bio;
2111                         merge_bio.tail = shadow[j].request->biotail;
2112                         bio_list_merge(&info->bio_list, &merge_bio);
2113                         shadow[j].request->bio = NULL;
2114                         blk_mq_end_request(shadow[j].request, BLK_STS_OK);
2115                 }
2116         }
2117
2118         blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
2119
2120         err = talk_to_blkback(dev, info);
2121         if (!err)
2122                 blk_mq_update_nr_hw_queues(&info->tag_set, info->nr_rings);
2123
2124         /*
2125          * We have to wait for the backend to switch to
2126          * connected state, since we want to read which
2127          * features it supports.
2128          */
2129
2130         return err;
2131 }
2132
2133 static void blkfront_closing(struct blkfront_info *info)
2134 {
2135         struct xenbus_device *xbdev = info->xbdev;
2136         struct blkfront_ring_info *rinfo;
2137         unsigned int i;
2138
2139         if (xbdev->state == XenbusStateClosing)
2140                 return;
2141
2142         /* No more blkif_request(). */
2143         if (info->rq && info->gd) {
2144                 blk_mq_stop_hw_queues(info->rq);
2145                 blk_mark_disk_dead(info->gd);
2146                 set_capacity(info->gd, 0);
2147         }
2148
2149         for_each_rinfo(info, rinfo, i) {
2150                 /* No more gnttab callback work. */
2151                 gnttab_cancel_free_callback(&rinfo->callback);
2152
2153                 /* Flush gnttab callback work. Must be done with no locks held. */
2154                 flush_work(&rinfo->work);
2155         }
2156
2157         xenbus_frontend_closed(xbdev);
2158 }
2159
2160 static void blkfront_setup_discard(struct blkfront_info *info)
2161 {
2162         info->feature_discard = 1;
2163         info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend,
2164                                                          "discard-granularity",
2165                                                          0);
2166         info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend,
2167                                                        "discard-alignment", 0);
2168         info->feature_secdiscard =
2169                 !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure",
2170                                        0);
2171 }
2172
2173 static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
2174 {
2175         unsigned int psegs, grants, memflags;
2176         int err, i;
2177         struct blkfront_info *info = rinfo->dev_info;
2178
2179         memflags = memalloc_noio_save();
2180
2181         if (info->max_indirect_segments == 0) {
2182                 if (!HAS_EXTRA_REQ)
2183                         grants = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2184                 else {
2185                         /*
2186                          * When an extra req is required, the maximum
2187                          * grants supported is related to the size of the
2188                          * Linux block segment.
2189                          */
2190                         grants = GRANTS_PER_PSEG;
2191                 }
2192         }
2193         else
2194                 grants = info->max_indirect_segments;
2195         psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
2196
2197         err = fill_grant_buffer(rinfo,
2198                                 (grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
2199         if (err)
2200                 goto out_of_memory;
2201
2202         if (!info->feature_persistent && info->max_indirect_segments) {
2203                 /*
2204                  * We are using indirect descriptors but not persistent
2205                  * grants, we need to allocate a set of pages that can be
2206                  * used for mapping indirect grefs
2207                  */
2208                 int num = INDIRECT_GREFS(grants) * BLK_RING_SIZE(info);
2209
2210                 BUG_ON(!list_empty(&rinfo->indirect_pages));
2211                 for (i = 0; i < num; i++) {
2212                         struct page *indirect_page = alloc_page(GFP_KERNEL);
2213                         if (!indirect_page)
2214                                 goto out_of_memory;
2215                         list_add(&indirect_page->lru, &rinfo->indirect_pages);
2216                 }
2217         }
2218
2219         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2220                 rinfo->shadow[i].grants_used =
2221                         kvcalloc(grants,
2222                                  sizeof(rinfo->shadow[i].grants_used[0]),
2223                                  GFP_KERNEL);
2224                 rinfo->shadow[i].sg = kvcalloc(psegs,
2225                                                sizeof(rinfo->shadow[i].sg[0]),
2226                                                GFP_KERNEL);
2227                 if (info->max_indirect_segments)
2228                         rinfo->shadow[i].indirect_grants =
2229                                 kvcalloc(INDIRECT_GREFS(grants),
2230                                          sizeof(rinfo->shadow[i].indirect_grants[0]),
2231                                          GFP_KERNEL);
2232                 if ((rinfo->shadow[i].grants_used == NULL) ||
2233                         (rinfo->shadow[i].sg == NULL) ||
2234                      (info->max_indirect_segments &&
2235                      (rinfo->shadow[i].indirect_grants == NULL)))
2236                         goto out_of_memory;
2237                 sg_init_table(rinfo->shadow[i].sg, psegs);
2238         }
2239
2240         memalloc_noio_restore(memflags);
2241
2242         return 0;
2243
2244 out_of_memory:
2245         for (i = 0; i < BLK_RING_SIZE(info); i++) {
2246                 kvfree(rinfo->shadow[i].grants_used);
2247                 rinfo->shadow[i].grants_used = NULL;
2248                 kvfree(rinfo->shadow[i].sg);
2249                 rinfo->shadow[i].sg = NULL;
2250                 kvfree(rinfo->shadow[i].indirect_grants);
2251                 rinfo->shadow[i].indirect_grants = NULL;
2252         }
2253         if (!list_empty(&rinfo->indirect_pages)) {
2254                 struct page *indirect_page, *n;
2255                 list_for_each_entry_safe(indirect_page, n, &rinfo->indirect_pages, lru) {
2256                         list_del(&indirect_page->lru);
2257                         __free_page(indirect_page);
2258                 }
2259         }
2260
2261         memalloc_noio_restore(memflags);
2262
2263         return -ENOMEM;
2264 }
2265
2266 /*
2267  * Gather all backend feature-*
2268  */
2269 static void blkfront_gather_backend_features(struct blkfront_info *info)
2270 {
2271         unsigned int indirect_segments;
2272
2273         info->feature_flush = 0;
2274         info->feature_fua = 0;
2275
2276         /*
2277          * If there's no "feature-barrier" defined, then it means
2278          * we're dealing with a very old backend which writes
2279          * synchronously; nothing to do.
2280          *
2281          * If there are barriers, then we use flush.
2282          */
2283         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-barrier", 0)) {
2284                 info->feature_flush = 1;
2285                 info->feature_fua = 1;
2286         }
2287
2288         /*
2289          * And if there is "feature-flush-cache" use that above
2290          * barriers.
2291          */
2292         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-flush-cache",
2293                                  0)) {
2294                 info->feature_flush = 1;
2295                 info->feature_fua = 0;
2296         }
2297
2298         if (xenbus_read_unsigned(info->xbdev->otherend, "feature-discard", 0))
2299                 blkfront_setup_discard(info);
2300
2301         if (info->feature_persistent)
2302                 info->feature_persistent =
2303                         !!xenbus_read_unsigned(info->xbdev->otherend,
2304                                                "feature-persistent", 0);
2305
2306         indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
2307                                         "feature-max-indirect-segments", 0);
2308         if (indirect_segments > xen_blkif_max_segments)
2309                 indirect_segments = xen_blkif_max_segments;
2310         if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
2311                 indirect_segments = 0;
2312         info->max_indirect_segments = indirect_segments;
2313
2314         if (info->feature_persistent) {
2315                 mutex_lock(&blkfront_mutex);
2316                 schedule_delayed_work(&blkfront_work, HZ * 10);
2317                 mutex_unlock(&blkfront_mutex);
2318         }
2319 }
2320
2321 /*
2322  * Invoked when the backend is finally 'ready' (and has told produced
2323  * the details about the physical device - #sectors, size, etc).
2324  */
2325 static void blkfront_connect(struct blkfront_info *info)
2326 {
2327         unsigned long long sectors;
2328         unsigned long sector_size;
2329         unsigned int physical_sector_size;
2330         unsigned int binfo;
2331         int err, i;
2332         struct blkfront_ring_info *rinfo;
2333
2334         switch (info->connected) {
2335         case BLKIF_STATE_CONNECTED:
2336                 /*
2337                  * Potentially, the back-end may be signalling
2338                  * a capacity change; update the capacity.
2339                  */
2340                 err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
2341                                    "sectors", "%Lu", &sectors);
2342                 if (XENBUS_EXIST_ERR(err))
2343                         return;
2344                 printk(KERN_INFO "Setting capacity to %Lu\n",
2345                        sectors);
2346                 set_capacity_and_notify(info->gd, sectors);
2347
2348                 return;
2349         case BLKIF_STATE_SUSPENDED:
2350                 /*
2351                  * If we are recovering from suspension, we need to wait
2352                  * for the backend to announce it's features before
2353                  * reconnecting, at least we need to know if the backend
2354                  * supports indirect descriptors, and how many.
2355                  */
2356                 blkif_recover(info);
2357                 return;
2358
2359         default:
2360                 break;
2361         }
2362
2363         dev_dbg(&info->xbdev->dev, "%s:%s.\n",
2364                 __func__, info->xbdev->otherend);
2365
2366         err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
2367                             "sectors", "%llu", &sectors,
2368                             "info", "%u", &binfo,
2369                             "sector-size", "%lu", &sector_size,
2370                             NULL);
2371         if (err) {
2372                 xenbus_dev_fatal(info->xbdev, err,
2373                                  "reading backend fields at %s",
2374                                  info->xbdev->otherend);
2375                 return;
2376         }
2377
2378         /*
2379          * physical-sector-size is a newer field, so old backends may not
2380          * provide this. Assume physical sector size to be the same as
2381          * sector_size in that case.
2382          */
2383         physical_sector_size = xenbus_read_unsigned(info->xbdev->otherend,
2384                                                     "physical-sector-size",
2385                                                     sector_size);
2386         blkfront_gather_backend_features(info);
2387         for_each_rinfo(info, rinfo, i) {
2388                 err = blkfront_setup_indirect(rinfo);
2389                 if (err) {
2390                         xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
2391                                          info->xbdev->otherend);
2392                         blkif_free(info, 0);
2393                         break;
2394                 }
2395         }
2396
2397         err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size,
2398                                   physical_sector_size);
2399         if (err) {
2400                 xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
2401                                  info->xbdev->otherend);
2402                 goto fail;
2403         }
2404
2405         xenbus_switch_state(info->xbdev, XenbusStateConnected);
2406
2407         /* Kick pending requests. */
2408         info->connected = BLKIF_STATE_CONNECTED;
2409         for_each_rinfo(info, rinfo, i)
2410                 kick_pending_request_queues(rinfo);
2411
2412         device_add_disk(&info->xbdev->dev, info->gd, NULL);
2413
2414         info->is_ready = 1;
2415         return;
2416
2417 fail:
2418         blkif_free(info, 0);
2419         return;
2420 }
2421
2422 /*
2423  * Callback received when the backend's state changes.
2424  */
2425 static void blkback_changed(struct xenbus_device *dev,
2426                             enum xenbus_state backend_state)
2427 {
2428         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2429
2430         dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state);
2431
2432         switch (backend_state) {
2433         case XenbusStateInitWait:
2434                 if (dev->state != XenbusStateInitialising)
2435                         break;
2436                 if (talk_to_blkback(dev, info))
2437                         break;
2438                 break;
2439         case XenbusStateInitialising:
2440         case XenbusStateInitialised:
2441         case XenbusStateReconfiguring:
2442         case XenbusStateReconfigured:
2443         case XenbusStateUnknown:
2444                 break;
2445
2446         case XenbusStateConnected:
2447                 /*
2448                  * talk_to_blkback sets state to XenbusStateInitialised
2449                  * and blkfront_connect sets it to XenbusStateConnected
2450                  * (if connection went OK).
2451                  *
2452                  * If the backend (or toolstack) decides to poke at backend
2453                  * state (and re-trigger the watch by setting the state repeatedly
2454                  * to XenbusStateConnected (4)) we need to deal with this.
2455                  * This is allowed as this is used to communicate to the guest
2456                  * that the size of disk has changed!
2457                  */
2458                 if ((dev->state != XenbusStateInitialised) &&
2459                     (dev->state != XenbusStateConnected)) {
2460                         if (talk_to_blkback(dev, info))
2461                                 break;
2462                 }
2463
2464                 blkfront_connect(info);
2465                 break;
2466
2467         case XenbusStateClosed:
2468                 if (dev->state == XenbusStateClosed)
2469                         break;
2470                 fallthrough;
2471         case XenbusStateClosing:
2472                 blkfront_closing(info);
2473                 break;
2474         }
2475 }
2476
2477 static int blkfront_remove(struct xenbus_device *xbdev)
2478 {
2479         struct blkfront_info *info = dev_get_drvdata(&xbdev->dev);
2480
2481         dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename);
2482
2483         if (info->gd)
2484                 del_gendisk(info->gd);
2485
2486         mutex_lock(&blkfront_mutex);
2487         list_del(&info->info_list);
2488         mutex_unlock(&blkfront_mutex);
2489
2490         blkif_free(info, 0);
2491         if (info->gd) {
2492                 xlbd_release_minors(info->gd->first_minor, info->gd->minors);
2493                 blk_cleanup_disk(info->gd);
2494                 blk_mq_free_tag_set(&info->tag_set);
2495         }
2496
2497         kfree(info);
2498         return 0;
2499 }
2500
2501 static int blkfront_is_ready(struct xenbus_device *dev)
2502 {
2503         struct blkfront_info *info = dev_get_drvdata(&dev->dev);
2504
2505         return info->is_ready && info->xbdev;
2506 }
2507
2508 static const struct block_device_operations xlvbd_block_fops =
2509 {
2510         .owner = THIS_MODULE,
2511         .getgeo = blkif_getgeo,
2512         .ioctl = blkif_ioctl,
2513         .compat_ioctl = blkdev_compat_ptr_ioctl,
2514 };
2515
2516
2517 static const struct xenbus_device_id blkfront_ids[] = {
2518         { "vbd" },
2519         { "" }
2520 };
2521
2522 static struct xenbus_driver blkfront_driver = {
2523         .ids  = blkfront_ids,
2524         .probe = blkfront_probe,
2525         .remove = blkfront_remove,
2526         .resume = blkfront_resume,
2527         .otherend_changed = blkback_changed,
2528         .is_ready = blkfront_is_ready,
2529 };
2530
2531 static void purge_persistent_grants(struct blkfront_info *info)
2532 {
2533         unsigned int i;
2534         unsigned long flags;
2535         struct blkfront_ring_info *rinfo;
2536
2537         for_each_rinfo(info, rinfo, i) {
2538                 struct grant *gnt_list_entry, *tmp;
2539
2540                 spin_lock_irqsave(&rinfo->ring_lock, flags);
2541
2542                 if (rinfo->persistent_gnts_c == 0) {
2543                         spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2544                         continue;
2545                 }
2546
2547                 list_for_each_entry_safe(gnt_list_entry, tmp, &rinfo->grants,
2548                                          node) {
2549                         if (gnt_list_entry->gref == GRANT_INVALID_REF ||
2550                             !gnttab_try_end_foreign_access(gnt_list_entry->gref))
2551                                 continue;
2552
2553                         list_del(&gnt_list_entry->node);
2554                         rinfo->persistent_gnts_c--;
2555                         gnt_list_entry->gref = GRANT_INVALID_REF;
2556                         list_add_tail(&gnt_list_entry->node, &rinfo->grants);
2557                 }
2558
2559                 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
2560         }
2561 }
2562
2563 static void blkfront_delay_work(struct work_struct *work)
2564 {
2565         struct blkfront_info *info;
2566         bool need_schedule_work = false;
2567
2568         mutex_lock(&blkfront_mutex);
2569
2570         list_for_each_entry(info, &info_list, info_list) {
2571                 if (info->feature_persistent) {
2572                         need_schedule_work = true;
2573                         mutex_lock(&info->mutex);
2574                         purge_persistent_grants(info);
2575                         mutex_unlock(&info->mutex);
2576                 }
2577         }
2578
2579         if (need_schedule_work)
2580                 schedule_delayed_work(&blkfront_work, HZ * 10);
2581
2582         mutex_unlock(&blkfront_mutex);
2583 }
2584
2585 static int __init xlblk_init(void)
2586 {
2587         int ret;
2588         int nr_cpus = num_online_cpus();
2589
2590         if (!xen_domain())
2591                 return -ENODEV;
2592
2593         if (!xen_has_pv_disk_devices())
2594                 return -ENODEV;
2595
2596         if (register_blkdev(XENVBD_MAJOR, DEV_NAME)) {
2597                 pr_warn("xen_blk: can't get major %d with name %s\n",
2598                         XENVBD_MAJOR, DEV_NAME);
2599                 return -ENODEV;
2600         }
2601
2602         if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
2603                 xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
2604
2605         if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
2606                 pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
2607                         xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
2608                 xen_blkif_max_ring_order = XENBUS_MAX_RING_GRANT_ORDER;
2609         }
2610
2611         if (xen_blkif_max_queues > nr_cpus) {
2612                 pr_info("Invalid max_queues (%d), will use default max: %d.\n",
2613                         xen_blkif_max_queues, nr_cpus);
2614                 xen_blkif_max_queues = nr_cpus;
2615         }
2616
2617         INIT_DELAYED_WORK(&blkfront_work, blkfront_delay_work);
2618
2619         ret = xenbus_register_frontend(&blkfront_driver);
2620         if (ret) {
2621                 unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2622                 return ret;
2623         }
2624
2625         return 0;
2626 }
2627 module_init(xlblk_init);
2628
2629
2630 static void __exit xlblk_exit(void)
2631 {
2632         cancel_delayed_work_sync(&blkfront_work);
2633
2634         xenbus_unregister_driver(&blkfront_driver);
2635         unregister_blkdev(XENVBD_MAJOR, DEV_NAME);
2636         kfree(minors);
2637 }
2638 module_exit(xlblk_exit);
2639
2640 MODULE_DESCRIPTION("Xen virtual block device frontend");
2641 MODULE_LICENSE("GPL");
2642 MODULE_ALIAS_BLOCKDEV_MAJOR(XENVBD_MAJOR);
2643 MODULE_ALIAS("xen:vbd");
2644 MODULE_ALIAS("xenblk");