ceph: move mount state enum to super.h
[platform/kernel/linux-rpi.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <uapi/linux/virtio_ring.h>
19
20 #define PART_BITS 4
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
23
24 /* The maximum number of sg elements that fit into a virtqueue */
25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
26
27 #ifdef CONFIG_ARCH_NO_SG_CHAIN
28 #define VIRTIO_BLK_INLINE_SG_CNT        0
29 #else
30 #define VIRTIO_BLK_INLINE_SG_CNT        2
31 #endif
32
33 static unsigned int num_request_queues;
34 module_param(num_request_queues, uint, 0644);
35 MODULE_PARM_DESC(num_request_queues,
36                  "Limit the number of request queues to use for blk device. "
37                  "0 for no limit. "
38                  "Values > nr_cpu_ids truncated to nr_cpu_ids.");
39
40 static unsigned int poll_queues;
41 module_param(poll_queues, uint, 0644);
42 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
43
44 static int major;
45 static DEFINE_IDA(vd_index_ida);
46
47 static struct workqueue_struct *virtblk_wq;
48
49 struct virtio_blk_vq {
50         struct virtqueue *vq;
51         spinlock_t lock;
52         char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
54
55 struct virtio_blk {
56         /*
57          * This mutex must be held by anything that may run after
58          * virtblk_remove() sets vblk->vdev to NULL.
59          *
60          * blk-mq, virtqueue processing, and sysfs attribute code paths are
61          * shut down before vblk->vdev is set to NULL and therefore do not need
62          * to hold this mutex.
63          */
64         struct mutex vdev_mutex;
65         struct virtio_device *vdev;
66
67         /* The disk structure for the kernel. */
68         struct gendisk *disk;
69
70         /* Block layer tags. */
71         struct blk_mq_tag_set tag_set;
72
73         /* Process context for config space updates */
74         struct work_struct config_work;
75
76         /* Ida index - used to track minor number allocations. */
77         int index;
78
79         /* num of vqs */
80         int num_vqs;
81         int io_queues[HCTX_MAX_TYPES];
82         struct virtio_blk_vq *vqs;
83 };
84
85 struct virtblk_req {
86         struct virtio_blk_outhdr out_hdr;
87         u8 status;
88         struct sg_table sg_table;
89         struct scatterlist sg[];
90 };
91
92 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
93 {
94         switch (vbr->status) {
95         case VIRTIO_BLK_S_OK:
96                 return BLK_STS_OK;
97         case VIRTIO_BLK_S_UNSUPP:
98                 return BLK_STS_NOTSUPP;
99         default:
100                 return BLK_STS_IOERR;
101         }
102 }
103
104 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
105 {
106         struct virtio_blk *vblk = hctx->queue->queuedata;
107         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
108
109         return vq;
110 }
111
112 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
113 {
114         struct scatterlist hdr, status, *sgs[3];
115         unsigned int num_out = 0, num_in = 0;
116
117         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
118         sgs[num_out++] = &hdr;
119
120         if (vbr->sg_table.nents) {
121                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
122                         sgs[num_out++] = vbr->sg_table.sgl;
123                 else
124                         sgs[num_out + num_in++] = vbr->sg_table.sgl;
125         }
126
127         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
128         sgs[num_out + num_in++] = &status;
129
130         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
131 }
132
133 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
134 {
135         unsigned short segments = blk_rq_nr_discard_segments(req);
136         unsigned short n = 0;
137         struct virtio_blk_discard_write_zeroes *range;
138         struct bio *bio;
139         u32 flags = 0;
140
141         if (unmap)
142                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
143
144         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
145         if (!range)
146                 return -ENOMEM;
147
148         /*
149          * Single max discard segment means multi-range discard isn't
150          * supported, and block layer only runs contiguity merge like
151          * normal RW request. So we can't reply on bio for retrieving
152          * each range info.
153          */
154         if (queue_max_discard_segments(req->q) == 1) {
155                 range[0].flags = cpu_to_le32(flags);
156                 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
157                 range[0].sector = cpu_to_le64(blk_rq_pos(req));
158                 n = 1;
159         } else {
160                 __rq_for_each_bio(bio, req) {
161                         u64 sector = bio->bi_iter.bi_sector;
162                         u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
163
164                         range[n].flags = cpu_to_le32(flags);
165                         range[n].num_sectors = cpu_to_le32(num_sectors);
166                         range[n].sector = cpu_to_le64(sector);
167                         n++;
168                 }
169         }
170
171         WARN_ON_ONCE(n != segments);
172
173         req->special_vec.bv_page = virt_to_page(range);
174         req->special_vec.bv_offset = offset_in_page(range);
175         req->special_vec.bv_len = sizeof(*range) * segments;
176         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
177
178         return 0;
179 }
180
181 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
182 {
183         if (blk_rq_nr_phys_segments(req))
184                 sg_free_table_chained(&vbr->sg_table,
185                                       VIRTIO_BLK_INLINE_SG_CNT);
186 }
187
188 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
189                 struct virtblk_req *vbr)
190 {
191         int err;
192
193         if (!blk_rq_nr_phys_segments(req))
194                 return 0;
195
196         vbr->sg_table.sgl = vbr->sg;
197         err = sg_alloc_table_chained(&vbr->sg_table,
198                                      blk_rq_nr_phys_segments(req),
199                                      vbr->sg_table.sgl,
200                                      VIRTIO_BLK_INLINE_SG_CNT);
201         if (unlikely(err))
202                 return -ENOMEM;
203
204         return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
205 }
206
207 static void virtblk_cleanup_cmd(struct request *req)
208 {
209         if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
210                 kfree(bvec_virt(&req->special_vec));
211 }
212
213 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
214                                       struct request *req,
215                                       struct virtblk_req *vbr)
216 {
217         bool unmap = false;
218         u32 type;
219
220         vbr->out_hdr.sector = 0;
221
222         switch (req_op(req)) {
223         case REQ_OP_READ:
224                 type = VIRTIO_BLK_T_IN;
225                 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
226                                                       blk_rq_pos(req));
227                 break;
228         case REQ_OP_WRITE:
229                 type = VIRTIO_BLK_T_OUT;
230                 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
231                                                       blk_rq_pos(req));
232                 break;
233         case REQ_OP_FLUSH:
234                 type = VIRTIO_BLK_T_FLUSH;
235                 break;
236         case REQ_OP_DISCARD:
237                 type = VIRTIO_BLK_T_DISCARD;
238                 break;
239         case REQ_OP_WRITE_ZEROES:
240                 type = VIRTIO_BLK_T_WRITE_ZEROES;
241                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
242                 break;
243         case REQ_OP_SECURE_ERASE:
244                 type = VIRTIO_BLK_T_SECURE_ERASE;
245                 break;
246         case REQ_OP_DRV_IN:
247                 type = VIRTIO_BLK_T_GET_ID;
248                 break;
249         default:
250                 WARN_ON_ONCE(1);
251                 return BLK_STS_IOERR;
252         }
253
254         vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
255         vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
256
257         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
258             type == VIRTIO_BLK_T_SECURE_ERASE) {
259                 if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
260                         return BLK_STS_RESOURCE;
261         }
262
263         return 0;
264 }
265
266 static inline void virtblk_request_done(struct request *req)
267 {
268         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
269
270         virtblk_unmap_data(req, vbr);
271         virtblk_cleanup_cmd(req);
272         blk_mq_end_request(req, virtblk_result(vbr));
273 }
274
275 static void virtblk_done(struct virtqueue *vq)
276 {
277         struct virtio_blk *vblk = vq->vdev->priv;
278         bool req_done = false;
279         int qid = vq->index;
280         struct virtblk_req *vbr;
281         unsigned long flags;
282         unsigned int len;
283
284         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
285         do {
286                 virtqueue_disable_cb(vq);
287                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
288                         struct request *req = blk_mq_rq_from_pdu(vbr);
289
290                         if (likely(!blk_should_fake_timeout(req->q)))
291                                 blk_mq_complete_request(req);
292                         req_done = true;
293                 }
294                 if (unlikely(virtqueue_is_broken(vq)))
295                         break;
296         } while (!virtqueue_enable_cb(vq));
297
298         /* In case queue is stopped waiting for more buffers. */
299         if (req_done)
300                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
301         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
302 }
303
304 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
305 {
306         struct virtio_blk *vblk = hctx->queue->queuedata;
307         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
308         bool kick;
309
310         spin_lock_irq(&vq->lock);
311         kick = virtqueue_kick_prepare(vq->vq);
312         spin_unlock_irq(&vq->lock);
313
314         if (kick)
315                 virtqueue_notify(vq->vq);
316 }
317
318 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
319 {
320         virtblk_cleanup_cmd(req);
321         switch (rc) {
322         case -ENOSPC:
323                 return BLK_STS_DEV_RESOURCE;
324         case -ENOMEM:
325                 return BLK_STS_RESOURCE;
326         default:
327                 return BLK_STS_IOERR;
328         }
329 }
330
331 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
332                                         struct virtio_blk *vblk,
333                                         struct request *req,
334                                         struct virtblk_req *vbr)
335 {
336         blk_status_t status;
337         int num;
338
339         status = virtblk_setup_cmd(vblk->vdev, req, vbr);
340         if (unlikely(status))
341                 return status;
342
343         num = virtblk_map_data(hctx, req, vbr);
344         if (unlikely(num < 0))
345                 return virtblk_fail_to_queue(req, -ENOMEM);
346         vbr->sg_table.nents = num;
347
348         blk_mq_start_request(req);
349
350         return BLK_STS_OK;
351 }
352
353 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
354                            const struct blk_mq_queue_data *bd)
355 {
356         struct virtio_blk *vblk = hctx->queue->queuedata;
357         struct request *req = bd->rq;
358         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
359         unsigned long flags;
360         int qid = hctx->queue_num;
361         bool notify = false;
362         blk_status_t status;
363         int err;
364
365         status = virtblk_prep_rq(hctx, vblk, req, vbr);
366         if (unlikely(status))
367                 return status;
368
369         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
370         err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
371         if (err) {
372                 virtqueue_kick(vblk->vqs[qid].vq);
373                 /* Don't stop the queue if -ENOMEM: we may have failed to
374                  * bounce the buffer due to global resource outage.
375                  */
376                 if (err == -ENOSPC)
377                         blk_mq_stop_hw_queue(hctx);
378                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
379                 virtblk_unmap_data(req, vbr);
380                 return virtblk_fail_to_queue(req, err);
381         }
382
383         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
384                 notify = true;
385         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
386
387         if (notify)
388                 virtqueue_notify(vblk->vqs[qid].vq);
389         return BLK_STS_OK;
390 }
391
392 static bool virtblk_prep_rq_batch(struct request *req)
393 {
394         struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
395         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
396
397         req->mq_hctx->tags->rqs[req->tag] = req;
398
399         return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
400 }
401
402 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
403                                         struct request **rqlist)
404 {
405         unsigned long flags;
406         int err;
407         bool kick;
408
409         spin_lock_irqsave(&vq->lock, flags);
410
411         while (!rq_list_empty(*rqlist)) {
412                 struct request *req = rq_list_pop(rqlist);
413                 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
414
415                 err = virtblk_add_req(vq->vq, vbr);
416                 if (err) {
417                         virtblk_unmap_data(req, vbr);
418                         virtblk_cleanup_cmd(req);
419                         blk_mq_requeue_request(req, true);
420                 }
421         }
422
423         kick = virtqueue_kick_prepare(vq->vq);
424         spin_unlock_irqrestore(&vq->lock, flags);
425
426         return kick;
427 }
428
429 static void virtio_queue_rqs(struct request **rqlist)
430 {
431         struct request *req, *next, *prev = NULL;
432         struct request *requeue_list = NULL;
433
434         rq_list_for_each_safe(rqlist, req, next) {
435                 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
436                 bool kick;
437
438                 if (!virtblk_prep_rq_batch(req)) {
439                         rq_list_move(rqlist, &requeue_list, req, prev);
440                         req = prev;
441                         if (!req)
442                                 continue;
443                 }
444
445                 if (!next || req->mq_hctx != next->mq_hctx) {
446                         req->rq_next = NULL;
447                         kick = virtblk_add_req_batch(vq, rqlist);
448                         if (kick)
449                                 virtqueue_notify(vq->vq);
450
451                         *rqlist = next;
452                         prev = NULL;
453                 } else
454                         prev = req;
455         }
456
457         *rqlist = requeue_list;
458 }
459
460 /* return id (s/n) string for *disk to *id_str
461  */
462 static int virtblk_get_id(struct gendisk *disk, char *id_str)
463 {
464         struct virtio_blk *vblk = disk->private_data;
465         struct request_queue *q = vblk->disk->queue;
466         struct request *req;
467         int err;
468
469         req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
470         if (IS_ERR(req))
471                 return PTR_ERR(req);
472
473         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
474         if (err)
475                 goto out;
476
477         blk_execute_rq(req, false);
478         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
479 out:
480         blk_mq_free_request(req);
481         return err;
482 }
483
484 /* We provide getgeo only to please some old bootloader/partitioning tools */
485 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
486 {
487         struct virtio_blk *vblk = bd->bd_disk->private_data;
488         int ret = 0;
489
490         mutex_lock(&vblk->vdev_mutex);
491
492         if (!vblk->vdev) {
493                 ret = -ENXIO;
494                 goto out;
495         }
496
497         /* see if the host passed in geometry config */
498         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
499                 virtio_cread(vblk->vdev, struct virtio_blk_config,
500                              geometry.cylinders, &geo->cylinders);
501                 virtio_cread(vblk->vdev, struct virtio_blk_config,
502                              geometry.heads, &geo->heads);
503                 virtio_cread(vblk->vdev, struct virtio_blk_config,
504                              geometry.sectors, &geo->sectors);
505         } else {
506                 /* some standard values, similar to sd */
507                 geo->heads = 1 << 6;
508                 geo->sectors = 1 << 5;
509                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
510         }
511 out:
512         mutex_unlock(&vblk->vdev_mutex);
513         return ret;
514 }
515
516 static void virtblk_free_disk(struct gendisk *disk)
517 {
518         struct virtio_blk *vblk = disk->private_data;
519
520         ida_free(&vd_index_ida, vblk->index);
521         mutex_destroy(&vblk->vdev_mutex);
522         kfree(vblk);
523 }
524
525 static const struct block_device_operations virtblk_fops = {
526         .owner          = THIS_MODULE,
527         .getgeo         = virtblk_getgeo,
528         .free_disk      = virtblk_free_disk,
529 };
530
531 static int index_to_minor(int index)
532 {
533         return index << PART_BITS;
534 }
535
536 static int minor_to_index(int minor)
537 {
538         return minor >> PART_BITS;
539 }
540
541 static ssize_t serial_show(struct device *dev,
542                            struct device_attribute *attr, char *buf)
543 {
544         struct gendisk *disk = dev_to_disk(dev);
545         int err;
546
547         /* sysfs gives us a PAGE_SIZE buffer */
548         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
549
550         buf[VIRTIO_BLK_ID_BYTES] = '\0';
551         err = virtblk_get_id(disk, buf);
552         if (!err)
553                 return strlen(buf);
554
555         if (err == -EIO) /* Unsupported? Make it empty. */
556                 return 0;
557
558         return err;
559 }
560
561 static DEVICE_ATTR_RO(serial);
562
563 /* The queue's logical block size must be set before calling this */
564 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
565 {
566         struct virtio_device *vdev = vblk->vdev;
567         struct request_queue *q = vblk->disk->queue;
568         char cap_str_2[10], cap_str_10[10];
569         unsigned long long nblocks;
570         u64 capacity;
571
572         /* Host must always specify the capacity. */
573         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
574
575         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
576
577         string_get_size(nblocks, queue_logical_block_size(q),
578                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
579         string_get_size(nblocks, queue_logical_block_size(q),
580                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
581
582         dev_notice(&vdev->dev,
583                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
584                    vblk->disk->disk_name,
585                    resize ? "new size: " : "",
586                    nblocks,
587                    queue_logical_block_size(q),
588                    cap_str_10,
589                    cap_str_2);
590
591         set_capacity_and_notify(vblk->disk, capacity);
592 }
593
594 static void virtblk_config_changed_work(struct work_struct *work)
595 {
596         struct virtio_blk *vblk =
597                 container_of(work, struct virtio_blk, config_work);
598
599         virtblk_update_capacity(vblk, true);
600 }
601
602 static void virtblk_config_changed(struct virtio_device *vdev)
603 {
604         struct virtio_blk *vblk = vdev->priv;
605
606         queue_work(virtblk_wq, &vblk->config_work);
607 }
608
609 static int init_vq(struct virtio_blk *vblk)
610 {
611         int err;
612         int i;
613         vq_callback_t **callbacks;
614         const char **names;
615         struct virtqueue **vqs;
616         unsigned short num_vqs;
617         unsigned int num_poll_vqs;
618         struct virtio_device *vdev = vblk->vdev;
619         struct irq_affinity desc = { 0, };
620
621         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
622                                    struct virtio_blk_config, num_queues,
623                                    &num_vqs);
624         if (err)
625                 num_vqs = 1;
626
627         if (!err && !num_vqs) {
628                 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
629                 return -EINVAL;
630         }
631
632         num_vqs = min_t(unsigned int,
633                         min_not_zero(num_request_queues, nr_cpu_ids),
634                         num_vqs);
635
636         num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
637
638         vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
639         vblk->io_queues[HCTX_TYPE_READ] = 0;
640         vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
641
642         dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
643                                 vblk->io_queues[HCTX_TYPE_DEFAULT],
644                                 vblk->io_queues[HCTX_TYPE_READ],
645                                 vblk->io_queues[HCTX_TYPE_POLL]);
646
647         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
648         if (!vblk->vqs)
649                 return -ENOMEM;
650
651         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
652         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
653         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
654         if (!names || !callbacks || !vqs) {
655                 err = -ENOMEM;
656                 goto out;
657         }
658
659         for (i = 0; i < num_vqs - num_poll_vqs; i++) {
660                 callbacks[i] = virtblk_done;
661                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
662                 names[i] = vblk->vqs[i].name;
663         }
664
665         for (; i < num_vqs; i++) {
666                 callbacks[i] = NULL;
667                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
668                 names[i] = vblk->vqs[i].name;
669         }
670
671         /* Discover virtqueues and write information to configuration.  */
672         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
673         if (err)
674                 goto out;
675
676         for (i = 0; i < num_vqs; i++) {
677                 spin_lock_init(&vblk->vqs[i].lock);
678                 vblk->vqs[i].vq = vqs[i];
679         }
680         vblk->num_vqs = num_vqs;
681
682 out:
683         kfree(vqs);
684         kfree(callbacks);
685         kfree(names);
686         if (err)
687                 kfree(vblk->vqs);
688         return err;
689 }
690
691 /*
692  * Legacy naming scheme used for virtio devices.  We are stuck with it for
693  * virtio blk but don't ever use it for any new driver.
694  */
695 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
696 {
697         const int base = 'z' - 'a' + 1;
698         char *begin = buf + strlen(prefix);
699         char *end = buf + buflen;
700         char *p;
701         int unit;
702
703         p = end - 1;
704         *p = '\0';
705         unit = base;
706         do {
707                 if (p == begin)
708                         return -EINVAL;
709                 *--p = 'a' + (index % unit);
710                 index = (index / unit) - 1;
711         } while (index >= 0);
712
713         memmove(begin, p, end - p);
714         memcpy(buf, prefix, strlen(prefix));
715
716         return 0;
717 }
718
719 static int virtblk_get_cache_mode(struct virtio_device *vdev)
720 {
721         u8 writeback;
722         int err;
723
724         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
725                                    struct virtio_blk_config, wce,
726                                    &writeback);
727
728         /*
729          * If WCE is not configurable and flush is not available,
730          * assume no writeback cache is in use.
731          */
732         if (err)
733                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
734
735         return writeback;
736 }
737
738 static void virtblk_update_cache_mode(struct virtio_device *vdev)
739 {
740         u8 writeback = virtblk_get_cache_mode(vdev);
741         struct virtio_blk *vblk = vdev->priv;
742
743         blk_queue_write_cache(vblk->disk->queue, writeback, false);
744 }
745
746 static const char *const virtblk_cache_types[] = {
747         "write through", "write back"
748 };
749
750 static ssize_t
751 cache_type_store(struct device *dev, struct device_attribute *attr,
752                  const char *buf, size_t count)
753 {
754         struct gendisk *disk = dev_to_disk(dev);
755         struct virtio_blk *vblk = disk->private_data;
756         struct virtio_device *vdev = vblk->vdev;
757         int i;
758
759         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
760         i = sysfs_match_string(virtblk_cache_types, buf);
761         if (i < 0)
762                 return i;
763
764         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
765         virtblk_update_cache_mode(vdev);
766         return count;
767 }
768
769 static ssize_t
770 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
771 {
772         struct gendisk *disk = dev_to_disk(dev);
773         struct virtio_blk *vblk = disk->private_data;
774         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
775
776         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
777         return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
778 }
779
780 static DEVICE_ATTR_RW(cache_type);
781
782 static struct attribute *virtblk_attrs[] = {
783         &dev_attr_serial.attr,
784         &dev_attr_cache_type.attr,
785         NULL,
786 };
787
788 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
789                 struct attribute *a, int n)
790 {
791         struct device *dev = kobj_to_dev(kobj);
792         struct gendisk *disk = dev_to_disk(dev);
793         struct virtio_blk *vblk = disk->private_data;
794         struct virtio_device *vdev = vblk->vdev;
795
796         if (a == &dev_attr_cache_type.attr &&
797             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
798                 return S_IRUGO;
799
800         return a->mode;
801 }
802
803 static const struct attribute_group virtblk_attr_group = {
804         .attrs = virtblk_attrs,
805         .is_visible = virtblk_attrs_are_visible,
806 };
807
808 static const struct attribute_group *virtblk_attr_groups[] = {
809         &virtblk_attr_group,
810         NULL,
811 };
812
813 static void virtblk_map_queues(struct blk_mq_tag_set *set)
814 {
815         struct virtio_blk *vblk = set->driver_data;
816         int i, qoff;
817
818         for (i = 0, qoff = 0; i < set->nr_maps; i++) {
819                 struct blk_mq_queue_map *map = &set->map[i];
820
821                 map->nr_queues = vblk->io_queues[i];
822                 map->queue_offset = qoff;
823                 qoff += map->nr_queues;
824
825                 if (map->nr_queues == 0)
826                         continue;
827
828                 /*
829                  * Regular queues have interrupts and hence CPU affinity is
830                  * defined by the core virtio code, but polling queues have
831                  * no interrupts so we let the block layer assign CPU affinity.
832                  */
833                 if (i == HCTX_TYPE_POLL)
834                         blk_mq_map_queues(&set->map[i]);
835                 else
836                         blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
837         }
838 }
839
840 static void virtblk_complete_batch(struct io_comp_batch *iob)
841 {
842         struct request *req;
843
844         rq_list_for_each(&iob->req_list, req) {
845                 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
846                 virtblk_cleanup_cmd(req);
847         }
848         blk_mq_end_request_batch(iob);
849 }
850
851 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
852 {
853         struct virtio_blk *vblk = hctx->queue->queuedata;
854         struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
855         struct virtblk_req *vbr;
856         unsigned long flags;
857         unsigned int len;
858         int found = 0;
859
860         spin_lock_irqsave(&vq->lock, flags);
861
862         while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
863                 struct request *req = blk_mq_rq_from_pdu(vbr);
864
865                 found++;
866                 if (!blk_mq_add_to_batch(req, iob, vbr->status,
867                                                 virtblk_complete_batch))
868                         blk_mq_complete_request(req);
869         }
870
871         if (found)
872                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
873
874         spin_unlock_irqrestore(&vq->lock, flags);
875
876         return found;
877 }
878
879 static const struct blk_mq_ops virtio_mq_ops = {
880         .queue_rq       = virtio_queue_rq,
881         .queue_rqs      = virtio_queue_rqs,
882         .commit_rqs     = virtio_commit_rqs,
883         .complete       = virtblk_request_done,
884         .map_queues     = virtblk_map_queues,
885         .poll           = virtblk_poll,
886 };
887
888 static unsigned int virtblk_queue_depth;
889 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
890
891 static int virtblk_probe(struct virtio_device *vdev)
892 {
893         struct virtio_blk *vblk;
894         struct request_queue *q;
895         int err, index;
896
897         u32 v, blk_size, max_size, sg_elems, opt_io_size;
898         u32 max_discard_segs = 0;
899         u32 discard_granularity = 0;
900         u16 min_io_size;
901         u8 physical_block_exp, alignment_offset;
902         unsigned int queue_depth;
903
904         if (!vdev->config->get) {
905                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
906                         __func__);
907                 return -EINVAL;
908         }
909
910         err = ida_alloc_range(&vd_index_ida, 0,
911                               minor_to_index(1 << MINORBITS) - 1, GFP_KERNEL);
912         if (err < 0)
913                 goto out;
914         index = err;
915
916         /* We need to know how many segments before we allocate. */
917         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
918                                    struct virtio_blk_config, seg_max,
919                                    &sg_elems);
920
921         /* We need at least one SG element, whatever they say. */
922         if (err || !sg_elems)
923                 sg_elems = 1;
924
925         /* Prevent integer overflows and honor max vq size */
926         sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
927
928         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
929         if (!vblk) {
930                 err = -ENOMEM;
931                 goto out_free_index;
932         }
933
934         mutex_init(&vblk->vdev_mutex);
935
936         vblk->vdev = vdev;
937
938         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
939
940         err = init_vq(vblk);
941         if (err)
942                 goto out_free_vblk;
943
944         /* Default queue sizing is to fill the ring. */
945         if (!virtblk_queue_depth) {
946                 queue_depth = vblk->vqs[0].vq->num_free;
947                 /* ... but without indirect descs, we use 2 descs per req */
948                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
949                         queue_depth /= 2;
950         } else {
951                 queue_depth = virtblk_queue_depth;
952         }
953
954         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
955         vblk->tag_set.ops = &virtio_mq_ops;
956         vblk->tag_set.queue_depth = queue_depth;
957         vblk->tag_set.numa_node = NUMA_NO_NODE;
958         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
959         vblk->tag_set.cmd_size =
960                 sizeof(struct virtblk_req) +
961                 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
962         vblk->tag_set.driver_data = vblk;
963         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
964         vblk->tag_set.nr_maps = 1;
965         if (vblk->io_queues[HCTX_TYPE_POLL])
966                 vblk->tag_set.nr_maps = 3;
967
968         err = blk_mq_alloc_tag_set(&vblk->tag_set);
969         if (err)
970                 goto out_free_vq;
971
972         vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
973         if (IS_ERR(vblk->disk)) {
974                 err = PTR_ERR(vblk->disk);
975                 goto out_free_tags;
976         }
977         q = vblk->disk->queue;
978
979         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
980
981         vblk->disk->major = major;
982         vblk->disk->first_minor = index_to_minor(index);
983         vblk->disk->minors = 1 << PART_BITS;
984         vblk->disk->private_data = vblk;
985         vblk->disk->fops = &virtblk_fops;
986         vblk->index = index;
987
988         /* configure queue flush support */
989         virtblk_update_cache_mode(vdev);
990
991         /* If disk is read-only in the host, the guest should obey */
992         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
993                 set_disk_ro(vblk->disk, 1);
994
995         /* We can handle whatever the host told us to handle. */
996         blk_queue_max_segments(q, sg_elems);
997
998         /* No real sector limit. */
999         blk_queue_max_hw_sectors(q, UINT_MAX);
1000
1001         max_size = virtio_max_dma_size(vdev);
1002
1003         /* Host can optionally specify maximum segment size and number of
1004          * segments. */
1005         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
1006                                    struct virtio_blk_config, size_max, &v);
1007         if (!err)
1008                 max_size = min(max_size, v);
1009
1010         blk_queue_max_segment_size(q, max_size);
1011
1012         /* Host can optionally specify the block size of the device */
1013         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1014                                    struct virtio_blk_config, blk_size,
1015                                    &blk_size);
1016         if (!err) {
1017                 err = blk_validate_block_size(blk_size);
1018                 if (err) {
1019                         dev_err(&vdev->dev,
1020                                 "virtio_blk: invalid block size: 0x%x\n",
1021                                 blk_size);
1022                         goto out_cleanup_disk;
1023                 }
1024
1025                 blk_queue_logical_block_size(q, blk_size);
1026         } else
1027                 blk_size = queue_logical_block_size(q);
1028
1029         /* Use topology information if available */
1030         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1031                                    struct virtio_blk_config, physical_block_exp,
1032                                    &physical_block_exp);
1033         if (!err && physical_block_exp)
1034                 blk_queue_physical_block_size(q,
1035                                 blk_size * (1 << physical_block_exp));
1036
1037         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1038                                    struct virtio_blk_config, alignment_offset,
1039                                    &alignment_offset);
1040         if (!err && alignment_offset)
1041                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1042
1043         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1044                                    struct virtio_blk_config, min_io_size,
1045                                    &min_io_size);
1046         if (!err && min_io_size)
1047                 blk_queue_io_min(q, blk_size * min_io_size);
1048
1049         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1050                                    struct virtio_blk_config, opt_io_size,
1051                                    &opt_io_size);
1052         if (!err && opt_io_size)
1053                 blk_queue_io_opt(q, blk_size * opt_io_size);
1054
1055         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1056                 virtio_cread(vdev, struct virtio_blk_config,
1057                              discard_sector_alignment, &discard_granularity);
1058
1059                 virtio_cread(vdev, struct virtio_blk_config,
1060                              max_discard_sectors, &v);
1061                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1062
1063                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1064                              &max_discard_segs);
1065         }
1066
1067         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1068                 virtio_cread(vdev, struct virtio_blk_config,
1069                              max_write_zeroes_sectors, &v);
1070                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1071         }
1072
1073         /* The discard and secure erase limits are combined since the Linux
1074          * block layer uses the same limit for both commands.
1075          *
1076          * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
1077          * are negotiated, we will use the minimum between the limits.
1078          *
1079          * discard sector alignment is set to the minimum between discard_sector_alignment
1080          * and secure_erase_sector_alignment.
1081          *
1082          * max discard sectors is set to the minimum between max_discard_seg and
1083          * max_secure_erase_seg.
1084          */
1085         if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1086
1087                 virtio_cread(vdev, struct virtio_blk_config,
1088                              secure_erase_sector_alignment, &v);
1089
1090                 /* secure_erase_sector_alignment should not be zero, the device should set a
1091                  * valid number of sectors.
1092                  */
1093                 if (!v) {
1094                         dev_err(&vdev->dev,
1095                                 "virtio_blk: secure_erase_sector_alignment can't be 0\n");
1096                         err = -EINVAL;
1097                         goto out_cleanup_disk;
1098                 }
1099
1100                 discard_granularity = min_not_zero(discard_granularity, v);
1101
1102                 virtio_cread(vdev, struct virtio_blk_config,
1103                              max_secure_erase_sectors, &v);
1104
1105                 /* max_secure_erase_sectors should not be zero, the device should set a
1106                  * valid number of sectors.
1107                  */
1108                 if (!v) {
1109                         dev_err(&vdev->dev,
1110                                 "virtio_blk: max_secure_erase_sectors can't be 0\n");
1111                         err = -EINVAL;
1112                         goto out_cleanup_disk;
1113                 }
1114
1115                 blk_queue_max_secure_erase_sectors(q, v);
1116
1117                 virtio_cread(vdev, struct virtio_blk_config,
1118                              max_secure_erase_seg, &v);
1119
1120                 /* max_secure_erase_seg should not be zero, the device should set a
1121                  * valid number of segments
1122                  */
1123                 if (!v) {
1124                         dev_err(&vdev->dev,
1125                                 "virtio_blk: max_secure_erase_seg can't be 0\n");
1126                         err = -EINVAL;
1127                         goto out_cleanup_disk;
1128                 }
1129
1130                 max_discard_segs = min_not_zero(max_discard_segs, v);
1131         }
1132
1133         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
1134             virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1135                 /* max_discard_seg and discard_granularity will be 0 only
1136                  * if max_discard_seg and discard_sector_alignment fields in the virtio
1137                  * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
1138                  * In this case, we use default values.
1139                  */
1140                 if (!max_discard_segs)
1141                         max_discard_segs = sg_elems;
1142
1143                 blk_queue_max_discard_segments(q,
1144                                                min(max_discard_segs, MAX_DISCARD_SEGMENTS));
1145
1146                 if (discard_granularity)
1147                         q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
1148                 else
1149                         q->limits.discard_granularity = blk_size;
1150         }
1151
1152         virtblk_update_capacity(vblk, false);
1153         virtio_device_ready(vdev);
1154
1155         err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1156         if (err)
1157                 goto out_cleanup_disk;
1158
1159         return 0;
1160
1161 out_cleanup_disk:
1162         put_disk(vblk->disk);
1163 out_free_tags:
1164         blk_mq_free_tag_set(&vblk->tag_set);
1165 out_free_vq:
1166         vdev->config->del_vqs(vdev);
1167         kfree(vblk->vqs);
1168 out_free_vblk:
1169         kfree(vblk);
1170 out_free_index:
1171         ida_free(&vd_index_ida, index);
1172 out:
1173         return err;
1174 }
1175
1176 static void virtblk_remove(struct virtio_device *vdev)
1177 {
1178         struct virtio_blk *vblk = vdev->priv;
1179
1180         /* Make sure no work handler is accessing the device. */
1181         flush_work(&vblk->config_work);
1182
1183         del_gendisk(vblk->disk);
1184         blk_mq_free_tag_set(&vblk->tag_set);
1185
1186         mutex_lock(&vblk->vdev_mutex);
1187
1188         /* Stop all the virtqueues. */
1189         virtio_reset_device(vdev);
1190
1191         /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1192         vblk->vdev = NULL;
1193
1194         vdev->config->del_vqs(vdev);
1195         kfree(vblk->vqs);
1196
1197         mutex_unlock(&vblk->vdev_mutex);
1198
1199         put_disk(vblk->disk);
1200 }
1201
1202 #ifdef CONFIG_PM_SLEEP
1203 static int virtblk_freeze(struct virtio_device *vdev)
1204 {
1205         struct virtio_blk *vblk = vdev->priv;
1206
1207         /* Ensure we don't receive any more interrupts */
1208         virtio_reset_device(vdev);
1209
1210         /* Make sure no work handler is accessing the device. */
1211         flush_work(&vblk->config_work);
1212
1213         blk_mq_quiesce_queue(vblk->disk->queue);
1214
1215         vdev->config->del_vqs(vdev);
1216         kfree(vblk->vqs);
1217
1218         return 0;
1219 }
1220
1221 static int virtblk_restore(struct virtio_device *vdev)
1222 {
1223         struct virtio_blk *vblk = vdev->priv;
1224         int ret;
1225
1226         ret = init_vq(vdev->priv);
1227         if (ret)
1228                 return ret;
1229
1230         virtio_device_ready(vdev);
1231
1232         blk_mq_unquiesce_queue(vblk->disk->queue);
1233         return 0;
1234 }
1235 #endif
1236
1237 static const struct virtio_device_id id_table[] = {
1238         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1239         { 0 },
1240 };
1241
1242 static unsigned int features_legacy[] = {
1243         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1244         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1245         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1246         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1247         VIRTIO_BLK_F_SECURE_ERASE,
1248 }
1249 ;
1250 static unsigned int features[] = {
1251         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1252         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1253         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1254         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1255         VIRTIO_BLK_F_SECURE_ERASE,
1256 };
1257
1258 static struct virtio_driver virtio_blk = {
1259         .feature_table                  = features,
1260         .feature_table_size             = ARRAY_SIZE(features),
1261         .feature_table_legacy           = features_legacy,
1262         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
1263         .driver.name                    = KBUILD_MODNAME,
1264         .driver.owner                   = THIS_MODULE,
1265         .id_table                       = id_table,
1266         .probe                          = virtblk_probe,
1267         .remove                         = virtblk_remove,
1268         .config_changed                 = virtblk_config_changed,
1269 #ifdef CONFIG_PM_SLEEP
1270         .freeze                         = virtblk_freeze,
1271         .restore                        = virtblk_restore,
1272 #endif
1273 };
1274
1275 static int __init virtio_blk_init(void)
1276 {
1277         int error;
1278
1279         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1280         if (!virtblk_wq)
1281                 return -ENOMEM;
1282
1283         major = register_blkdev(0, "virtblk");
1284         if (major < 0) {
1285                 error = major;
1286                 goto out_destroy_workqueue;
1287         }
1288
1289         error = register_virtio_driver(&virtio_blk);
1290         if (error)
1291                 goto out_unregister_blkdev;
1292         return 0;
1293
1294 out_unregister_blkdev:
1295         unregister_blkdev(major, "virtblk");
1296 out_destroy_workqueue:
1297         destroy_workqueue(virtblk_wq);
1298         return error;
1299 }
1300
1301 static void __exit virtio_blk_fini(void)
1302 {
1303         unregister_virtio_driver(&virtio_blk);
1304         unregister_blkdev(major, "virtblk");
1305         destroy_workqueue(virtblk_wq);
1306 }
1307 module_init(virtio_blk_init);
1308 module_exit(virtio_blk_fini);
1309
1310 MODULE_DEVICE_TABLE(virtio, id_table);
1311 MODULE_DESCRIPTION("Virtio block driver");
1312 MODULE_LICENSE("GPL");