Merge tag 'drm-next-2020-04-08' of git://anongit.freedesktop.org/drm/drm
[platform/kernel/linux-rpi.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18
19 #define PART_BITS 4
20 #define VQ_NAME_LEN 16
21 #define MAX_DISCARD_SEGMENTS 256u
22
23 static int major;
24 static DEFINE_IDA(vd_index_ida);
25
26 static struct workqueue_struct *virtblk_wq;
27
28 struct virtio_blk_vq {
29         struct virtqueue *vq;
30         spinlock_t lock;
31         char name[VQ_NAME_LEN];
32 } ____cacheline_aligned_in_smp;
33
34 struct virtio_blk {
35         struct virtio_device *vdev;
36
37         /* The disk structure for the kernel. */
38         struct gendisk *disk;
39
40         /* Block layer tags. */
41         struct blk_mq_tag_set tag_set;
42
43         /* Process context for config space updates */
44         struct work_struct config_work;
45
46         /* What host tells us, plus 2 for header & tailer. */
47         unsigned int sg_elems;
48
49         /* Ida index - used to track minor number allocations. */
50         int index;
51
52         /* num of vqs */
53         int num_vqs;
54         struct virtio_blk_vq *vqs;
55 };
56
57 struct virtblk_req {
58         struct virtio_blk_outhdr out_hdr;
59         u8 status;
60         struct scatterlist sg[];
61 };
62
63 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
64 {
65         switch (vbr->status) {
66         case VIRTIO_BLK_S_OK:
67                 return BLK_STS_OK;
68         case VIRTIO_BLK_S_UNSUPP:
69                 return BLK_STS_NOTSUPP;
70         default:
71                 return BLK_STS_IOERR;
72         }
73 }
74
75 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
76                 struct scatterlist *data_sg, bool have_data)
77 {
78         struct scatterlist hdr, status, *sgs[3];
79         unsigned int num_out = 0, num_in = 0;
80
81         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
82         sgs[num_out++] = &hdr;
83
84         if (have_data) {
85                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
86                         sgs[num_out++] = data_sg;
87                 else
88                         sgs[num_out + num_in++] = data_sg;
89         }
90
91         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
92         sgs[num_out + num_in++] = &status;
93
94         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
95 }
96
97 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
98 {
99         unsigned short segments = blk_rq_nr_discard_segments(req);
100         unsigned short n = 0;
101         struct virtio_blk_discard_write_zeroes *range;
102         struct bio *bio;
103         u32 flags = 0;
104
105         if (unmap)
106                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
107
108         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
109         if (!range)
110                 return -ENOMEM;
111
112         __rq_for_each_bio(bio, req) {
113                 u64 sector = bio->bi_iter.bi_sector;
114                 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
115
116                 range[n].flags = cpu_to_le32(flags);
117                 range[n].num_sectors = cpu_to_le32(num_sectors);
118                 range[n].sector = cpu_to_le64(sector);
119                 n++;
120         }
121
122         req->special_vec.bv_page = virt_to_page(range);
123         req->special_vec.bv_offset = offset_in_page(range);
124         req->special_vec.bv_len = sizeof(*range) * segments;
125         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
126
127         return 0;
128 }
129
130 static inline void virtblk_request_done(struct request *req)
131 {
132         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
133
134         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
135                 kfree(page_address(req->special_vec.bv_page) +
136                       req->special_vec.bv_offset);
137         }
138
139         blk_mq_end_request(req, virtblk_result(vbr));
140 }
141
142 static void virtblk_done(struct virtqueue *vq)
143 {
144         struct virtio_blk *vblk = vq->vdev->priv;
145         bool req_done = false;
146         int qid = vq->index;
147         struct virtblk_req *vbr;
148         unsigned long flags;
149         unsigned int len;
150
151         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
152         do {
153                 virtqueue_disable_cb(vq);
154                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
155                         struct request *req = blk_mq_rq_from_pdu(vbr);
156
157                         blk_mq_complete_request(req);
158                         req_done = true;
159                 }
160                 if (unlikely(virtqueue_is_broken(vq)))
161                         break;
162         } while (!virtqueue_enable_cb(vq));
163
164         /* In case queue is stopped waiting for more buffers. */
165         if (req_done)
166                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
167         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
168 }
169
170 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
171 {
172         struct virtio_blk *vblk = hctx->queue->queuedata;
173         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
174         bool kick;
175
176         spin_lock_irq(&vq->lock);
177         kick = virtqueue_kick_prepare(vq->vq);
178         spin_unlock_irq(&vq->lock);
179
180         if (kick)
181                 virtqueue_notify(vq->vq);
182 }
183
184 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
185                            const struct blk_mq_queue_data *bd)
186 {
187         struct virtio_blk *vblk = hctx->queue->queuedata;
188         struct request *req = bd->rq;
189         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
190         unsigned long flags;
191         unsigned int num;
192         int qid = hctx->queue_num;
193         int err;
194         bool notify = false;
195         bool unmap = false;
196         u32 type;
197
198         BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
199
200         switch (req_op(req)) {
201         case REQ_OP_READ:
202         case REQ_OP_WRITE:
203                 type = 0;
204                 break;
205         case REQ_OP_FLUSH:
206                 type = VIRTIO_BLK_T_FLUSH;
207                 break;
208         case REQ_OP_DISCARD:
209                 type = VIRTIO_BLK_T_DISCARD;
210                 break;
211         case REQ_OP_WRITE_ZEROES:
212                 type = VIRTIO_BLK_T_WRITE_ZEROES;
213                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
214                 break;
215         case REQ_OP_DRV_IN:
216                 type = VIRTIO_BLK_T_GET_ID;
217                 break;
218         default:
219                 WARN_ON_ONCE(1);
220                 return BLK_STS_IOERR;
221         }
222
223         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
224         vbr->out_hdr.sector = type ?
225                 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
226         vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
227
228         blk_mq_start_request(req);
229
230         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
231                 err = virtblk_setup_discard_write_zeroes(req, unmap);
232                 if (err)
233                         return BLK_STS_RESOURCE;
234         }
235
236         num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
237         if (num) {
238                 if (rq_data_dir(req) == WRITE)
239                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
240                 else
241                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
242         }
243
244         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
245         err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
246         if (err) {
247                 virtqueue_kick(vblk->vqs[qid].vq);
248                 /* Don't stop the queue if -ENOMEM: we may have failed to
249                  * bounce the buffer due to global resource outage.
250                  */
251                 if (err == -ENOSPC)
252                         blk_mq_stop_hw_queue(hctx);
253                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
254                 switch (err) {
255                 case -ENOSPC:
256                         return BLK_STS_DEV_RESOURCE;
257                 case -ENOMEM:
258                         return BLK_STS_RESOURCE;
259                 default:
260                         return BLK_STS_IOERR;
261                 }
262         }
263
264         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
265                 notify = true;
266         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
267
268         if (notify)
269                 virtqueue_notify(vblk->vqs[qid].vq);
270         return BLK_STS_OK;
271 }
272
273 /* return id (s/n) string for *disk to *id_str
274  */
275 static int virtblk_get_id(struct gendisk *disk, char *id_str)
276 {
277         struct virtio_blk *vblk = disk->private_data;
278         struct request_queue *q = vblk->disk->queue;
279         struct request *req;
280         int err;
281
282         req = blk_get_request(q, REQ_OP_DRV_IN, 0);
283         if (IS_ERR(req))
284                 return PTR_ERR(req);
285
286         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
287         if (err)
288                 goto out;
289
290         blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
291         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
292 out:
293         blk_put_request(req);
294         return err;
295 }
296
297 /* We provide getgeo only to please some old bootloader/partitioning tools */
298 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
299 {
300         struct virtio_blk *vblk = bd->bd_disk->private_data;
301
302         /* see if the host passed in geometry config */
303         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
304                 virtio_cread(vblk->vdev, struct virtio_blk_config,
305                              geometry.cylinders, &geo->cylinders);
306                 virtio_cread(vblk->vdev, struct virtio_blk_config,
307                              geometry.heads, &geo->heads);
308                 virtio_cread(vblk->vdev, struct virtio_blk_config,
309                              geometry.sectors, &geo->sectors);
310         } else {
311                 /* some standard values, similar to sd */
312                 geo->heads = 1 << 6;
313                 geo->sectors = 1 << 5;
314                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
315         }
316         return 0;
317 }
318
319 static const struct block_device_operations virtblk_fops = {
320         .owner  = THIS_MODULE,
321         .getgeo = virtblk_getgeo,
322 };
323
324 static int index_to_minor(int index)
325 {
326         return index << PART_BITS;
327 }
328
329 static int minor_to_index(int minor)
330 {
331         return minor >> PART_BITS;
332 }
333
334 static ssize_t serial_show(struct device *dev,
335                            struct device_attribute *attr, char *buf)
336 {
337         struct gendisk *disk = dev_to_disk(dev);
338         int err;
339
340         /* sysfs gives us a PAGE_SIZE buffer */
341         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
342
343         buf[VIRTIO_BLK_ID_BYTES] = '\0';
344         err = virtblk_get_id(disk, buf);
345         if (!err)
346                 return strlen(buf);
347
348         if (err == -EIO) /* Unsupported? Make it empty. */
349                 return 0;
350
351         return err;
352 }
353
354 static DEVICE_ATTR_RO(serial);
355
356 /* The queue's logical block size must be set before calling this */
357 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
358 {
359         struct virtio_device *vdev = vblk->vdev;
360         struct request_queue *q = vblk->disk->queue;
361         char cap_str_2[10], cap_str_10[10];
362         unsigned long long nblocks;
363         u64 capacity;
364
365         /* Host must always specify the capacity. */
366         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
367
368         /* If capacity is too big, truncate with warning. */
369         if ((sector_t)capacity != capacity) {
370                 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
371                          (unsigned long long)capacity);
372                 capacity = (sector_t)-1;
373         }
374
375         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
376
377         string_get_size(nblocks, queue_logical_block_size(q),
378                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
379         string_get_size(nblocks, queue_logical_block_size(q),
380                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
381
382         dev_notice(&vdev->dev,
383                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
384                    vblk->disk->disk_name,
385                    resize ? "new size: " : "",
386                    nblocks,
387                    queue_logical_block_size(q),
388                    cap_str_10,
389                    cap_str_2);
390
391         set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
392 }
393
394 static void virtblk_config_changed_work(struct work_struct *work)
395 {
396         struct virtio_blk *vblk =
397                 container_of(work, struct virtio_blk, config_work);
398
399         virtblk_update_capacity(vblk, true);
400 }
401
402 static void virtblk_config_changed(struct virtio_device *vdev)
403 {
404         struct virtio_blk *vblk = vdev->priv;
405
406         queue_work(virtblk_wq, &vblk->config_work);
407 }
408
409 static int init_vq(struct virtio_blk *vblk)
410 {
411         int err;
412         int i;
413         vq_callback_t **callbacks;
414         const char **names;
415         struct virtqueue **vqs;
416         unsigned short num_vqs;
417         struct virtio_device *vdev = vblk->vdev;
418         struct irq_affinity desc = { 0, };
419
420         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
421                                    struct virtio_blk_config, num_queues,
422                                    &num_vqs);
423         if (err)
424                 num_vqs = 1;
425
426         num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
427
428         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
429         if (!vblk->vqs)
430                 return -ENOMEM;
431
432         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
433         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
434         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
435         if (!names || !callbacks || !vqs) {
436                 err = -ENOMEM;
437                 goto out;
438         }
439
440         for (i = 0; i < num_vqs; i++) {
441                 callbacks[i] = virtblk_done;
442                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
443                 names[i] = vblk->vqs[i].name;
444         }
445
446         /* Discover virtqueues and write information to configuration.  */
447         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
448         if (err)
449                 goto out;
450
451         for (i = 0; i < num_vqs; i++) {
452                 spin_lock_init(&vblk->vqs[i].lock);
453                 vblk->vqs[i].vq = vqs[i];
454         }
455         vblk->num_vqs = num_vqs;
456
457 out:
458         kfree(vqs);
459         kfree(callbacks);
460         kfree(names);
461         if (err)
462                 kfree(vblk->vqs);
463         return err;
464 }
465
466 /*
467  * Legacy naming scheme used for virtio devices.  We are stuck with it for
468  * virtio blk but don't ever use it for any new driver.
469  */
470 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
471 {
472         const int base = 'z' - 'a' + 1;
473         char *begin = buf + strlen(prefix);
474         char *end = buf + buflen;
475         char *p;
476         int unit;
477
478         p = end - 1;
479         *p = '\0';
480         unit = base;
481         do {
482                 if (p == begin)
483                         return -EINVAL;
484                 *--p = 'a' + (index % unit);
485                 index = (index / unit) - 1;
486         } while (index >= 0);
487
488         memmove(begin, p, end - p);
489         memcpy(buf, prefix, strlen(prefix));
490
491         return 0;
492 }
493
494 static int virtblk_get_cache_mode(struct virtio_device *vdev)
495 {
496         u8 writeback;
497         int err;
498
499         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
500                                    struct virtio_blk_config, wce,
501                                    &writeback);
502
503         /*
504          * If WCE is not configurable and flush is not available,
505          * assume no writeback cache is in use.
506          */
507         if (err)
508                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
509
510         return writeback;
511 }
512
513 static void virtblk_update_cache_mode(struct virtio_device *vdev)
514 {
515         u8 writeback = virtblk_get_cache_mode(vdev);
516         struct virtio_blk *vblk = vdev->priv;
517
518         blk_queue_write_cache(vblk->disk->queue, writeback, false);
519         revalidate_disk(vblk->disk);
520 }
521
522 static const char *const virtblk_cache_types[] = {
523         "write through", "write back"
524 };
525
526 static ssize_t
527 cache_type_store(struct device *dev, struct device_attribute *attr,
528                  const char *buf, size_t count)
529 {
530         struct gendisk *disk = dev_to_disk(dev);
531         struct virtio_blk *vblk = disk->private_data;
532         struct virtio_device *vdev = vblk->vdev;
533         int i;
534
535         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
536         i = sysfs_match_string(virtblk_cache_types, buf);
537         if (i < 0)
538                 return i;
539
540         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
541         virtblk_update_cache_mode(vdev);
542         return count;
543 }
544
545 static ssize_t
546 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
547 {
548         struct gendisk *disk = dev_to_disk(dev);
549         struct virtio_blk *vblk = disk->private_data;
550         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
551
552         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
553         return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
554 }
555
556 static DEVICE_ATTR_RW(cache_type);
557
558 static struct attribute *virtblk_attrs[] = {
559         &dev_attr_serial.attr,
560         &dev_attr_cache_type.attr,
561         NULL,
562 };
563
564 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
565                 struct attribute *a, int n)
566 {
567         struct device *dev = container_of(kobj, struct device, kobj);
568         struct gendisk *disk = dev_to_disk(dev);
569         struct virtio_blk *vblk = disk->private_data;
570         struct virtio_device *vdev = vblk->vdev;
571
572         if (a == &dev_attr_cache_type.attr &&
573             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
574                 return S_IRUGO;
575
576         return a->mode;
577 }
578
579 static const struct attribute_group virtblk_attr_group = {
580         .attrs = virtblk_attrs,
581         .is_visible = virtblk_attrs_are_visible,
582 };
583
584 static const struct attribute_group *virtblk_attr_groups[] = {
585         &virtblk_attr_group,
586         NULL,
587 };
588
589 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
590                 unsigned int hctx_idx, unsigned int numa_node)
591 {
592         struct virtio_blk *vblk = set->driver_data;
593         struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
594
595         sg_init_table(vbr->sg, vblk->sg_elems);
596         return 0;
597 }
598
599 static int virtblk_map_queues(struct blk_mq_tag_set *set)
600 {
601         struct virtio_blk *vblk = set->driver_data;
602
603         return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
604                                         vblk->vdev, 0);
605 }
606
607 static const struct blk_mq_ops virtio_mq_ops = {
608         .queue_rq       = virtio_queue_rq,
609         .commit_rqs     = virtio_commit_rqs,
610         .complete       = virtblk_request_done,
611         .init_request   = virtblk_init_request,
612         .map_queues     = virtblk_map_queues,
613 };
614
615 static unsigned int virtblk_queue_depth;
616 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
617
618 static int virtblk_probe(struct virtio_device *vdev)
619 {
620         struct virtio_blk *vblk;
621         struct request_queue *q;
622         int err, index;
623
624         u32 v, blk_size, max_size, sg_elems, opt_io_size;
625         u16 min_io_size;
626         u8 physical_block_exp, alignment_offset;
627
628         if (!vdev->config->get) {
629                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
630                         __func__);
631                 return -EINVAL;
632         }
633
634         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
635                              GFP_KERNEL);
636         if (err < 0)
637                 goto out;
638         index = err;
639
640         /* We need to know how many segments before we allocate. */
641         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
642                                    struct virtio_blk_config, seg_max,
643                                    &sg_elems);
644
645         /* We need at least one SG element, whatever they say. */
646         if (err || !sg_elems)
647                 sg_elems = 1;
648
649         /* We need an extra sg elements at head and tail. */
650         sg_elems += 2;
651         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
652         if (!vblk) {
653                 err = -ENOMEM;
654                 goto out_free_index;
655         }
656
657         vblk->vdev = vdev;
658         vblk->sg_elems = sg_elems;
659
660         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
661
662         err = init_vq(vblk);
663         if (err)
664                 goto out_free_vblk;
665
666         /* FIXME: How many partitions?  How long is a piece of string? */
667         vblk->disk = alloc_disk(1 << PART_BITS);
668         if (!vblk->disk) {
669                 err = -ENOMEM;
670                 goto out_free_vq;
671         }
672
673         /* Default queue sizing is to fill the ring. */
674         if (!virtblk_queue_depth) {
675                 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
676                 /* ... but without indirect descs, we use 2 descs per req */
677                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
678                         virtblk_queue_depth /= 2;
679         }
680
681         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
682         vblk->tag_set.ops = &virtio_mq_ops;
683         vblk->tag_set.queue_depth = virtblk_queue_depth;
684         vblk->tag_set.numa_node = NUMA_NO_NODE;
685         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
686         vblk->tag_set.cmd_size =
687                 sizeof(struct virtblk_req) +
688                 sizeof(struct scatterlist) * sg_elems;
689         vblk->tag_set.driver_data = vblk;
690         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
691
692         err = blk_mq_alloc_tag_set(&vblk->tag_set);
693         if (err)
694                 goto out_put_disk;
695
696         q = blk_mq_init_queue(&vblk->tag_set);
697         if (IS_ERR(q)) {
698                 err = -ENOMEM;
699                 goto out_free_tags;
700         }
701         vblk->disk->queue = q;
702
703         q->queuedata = vblk;
704
705         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
706
707         vblk->disk->major = major;
708         vblk->disk->first_minor = index_to_minor(index);
709         vblk->disk->private_data = vblk;
710         vblk->disk->fops = &virtblk_fops;
711         vblk->disk->flags |= GENHD_FL_EXT_DEVT;
712         vblk->index = index;
713
714         /* configure queue flush support */
715         virtblk_update_cache_mode(vdev);
716
717         /* If disk is read-only in the host, the guest should obey */
718         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
719                 set_disk_ro(vblk->disk, 1);
720
721         /* We can handle whatever the host told us to handle. */
722         blk_queue_max_segments(q, vblk->sg_elems-2);
723
724         /* No real sector limit. */
725         blk_queue_max_hw_sectors(q, -1U);
726
727         max_size = virtio_max_dma_size(vdev);
728
729         /* Host can optionally specify maximum segment size and number of
730          * segments. */
731         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
732                                    struct virtio_blk_config, size_max, &v);
733         if (!err)
734                 max_size = min(max_size, v);
735
736         blk_queue_max_segment_size(q, max_size);
737
738         /* Host can optionally specify the block size of the device */
739         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
740                                    struct virtio_blk_config, blk_size,
741                                    &blk_size);
742         if (!err)
743                 blk_queue_logical_block_size(q, blk_size);
744         else
745                 blk_size = queue_logical_block_size(q);
746
747         /* Use topology information if available */
748         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
749                                    struct virtio_blk_config, physical_block_exp,
750                                    &physical_block_exp);
751         if (!err && physical_block_exp)
752                 blk_queue_physical_block_size(q,
753                                 blk_size * (1 << physical_block_exp));
754
755         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
756                                    struct virtio_blk_config, alignment_offset,
757                                    &alignment_offset);
758         if (!err && alignment_offset)
759                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
760
761         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
762                                    struct virtio_blk_config, min_io_size,
763                                    &min_io_size);
764         if (!err && min_io_size)
765                 blk_queue_io_min(q, blk_size * min_io_size);
766
767         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
768                                    struct virtio_blk_config, opt_io_size,
769                                    &opt_io_size);
770         if (!err && opt_io_size)
771                 blk_queue_io_opt(q, blk_size * opt_io_size);
772
773         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
774                 q->limits.discard_granularity = blk_size;
775
776                 virtio_cread(vdev, struct virtio_blk_config,
777                              discard_sector_alignment, &v);
778                 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
779
780                 virtio_cread(vdev, struct virtio_blk_config,
781                              max_discard_sectors, &v);
782                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
783
784                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
785                              &v);
786                 blk_queue_max_discard_segments(q,
787                                                min_not_zero(v,
788                                                             MAX_DISCARD_SEGMENTS));
789
790                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
791         }
792
793         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
794                 virtio_cread(vdev, struct virtio_blk_config,
795                              max_write_zeroes_sectors, &v);
796                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
797         }
798
799         virtblk_update_capacity(vblk, false);
800         virtio_device_ready(vdev);
801
802         device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
803         return 0;
804
805 out_free_tags:
806         blk_mq_free_tag_set(&vblk->tag_set);
807 out_put_disk:
808         put_disk(vblk->disk);
809 out_free_vq:
810         vdev->config->del_vqs(vdev);
811 out_free_vblk:
812         kfree(vblk);
813 out_free_index:
814         ida_simple_remove(&vd_index_ida, index);
815 out:
816         return err;
817 }
818
819 static void virtblk_remove(struct virtio_device *vdev)
820 {
821         struct virtio_blk *vblk = vdev->priv;
822         int index = vblk->index;
823         int refc;
824
825         /* Make sure no work handler is accessing the device. */
826         flush_work(&vblk->config_work);
827
828         del_gendisk(vblk->disk);
829         blk_cleanup_queue(vblk->disk->queue);
830
831         blk_mq_free_tag_set(&vblk->tag_set);
832
833         /* Stop all the virtqueues. */
834         vdev->config->reset(vdev);
835
836         refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
837         put_disk(vblk->disk);
838         vdev->config->del_vqs(vdev);
839         kfree(vblk->vqs);
840         kfree(vblk);
841
842         /* Only free device id if we don't have any users */
843         if (refc == 1)
844                 ida_simple_remove(&vd_index_ida, index);
845 }
846
847 #ifdef CONFIG_PM_SLEEP
848 static int virtblk_freeze(struct virtio_device *vdev)
849 {
850         struct virtio_blk *vblk = vdev->priv;
851
852         /* Ensure we don't receive any more interrupts */
853         vdev->config->reset(vdev);
854
855         /* Make sure no work handler is accessing the device. */
856         flush_work(&vblk->config_work);
857
858         blk_mq_quiesce_queue(vblk->disk->queue);
859
860         vdev->config->del_vqs(vdev);
861         return 0;
862 }
863
864 static int virtblk_restore(struct virtio_device *vdev)
865 {
866         struct virtio_blk *vblk = vdev->priv;
867         int ret;
868
869         ret = init_vq(vdev->priv);
870         if (ret)
871                 return ret;
872
873         virtio_device_ready(vdev);
874
875         blk_mq_unquiesce_queue(vblk->disk->queue);
876         return 0;
877 }
878 #endif
879
880 static const struct virtio_device_id id_table[] = {
881         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
882         { 0 },
883 };
884
885 static unsigned int features_legacy[] = {
886         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
887         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
888         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
889         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
890 }
891 ;
892 static unsigned int features[] = {
893         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
894         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
895         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
896         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
897 };
898
899 static struct virtio_driver virtio_blk = {
900         .feature_table                  = features,
901         .feature_table_size             = ARRAY_SIZE(features),
902         .feature_table_legacy           = features_legacy,
903         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
904         .driver.name                    = KBUILD_MODNAME,
905         .driver.owner                   = THIS_MODULE,
906         .id_table                       = id_table,
907         .probe                          = virtblk_probe,
908         .remove                         = virtblk_remove,
909         .config_changed                 = virtblk_config_changed,
910 #ifdef CONFIG_PM_SLEEP
911         .freeze                         = virtblk_freeze,
912         .restore                        = virtblk_restore,
913 #endif
914 };
915
916 static int __init init(void)
917 {
918         int error;
919
920         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
921         if (!virtblk_wq)
922                 return -ENOMEM;
923
924         major = register_blkdev(0, "virtblk");
925         if (major < 0) {
926                 error = major;
927                 goto out_destroy_workqueue;
928         }
929
930         error = register_virtio_driver(&virtio_blk);
931         if (error)
932                 goto out_unregister_blkdev;
933         return 0;
934
935 out_unregister_blkdev:
936         unregister_blkdev(major, "virtblk");
937 out_destroy_workqueue:
938         destroy_workqueue(virtblk_wq);
939         return error;
940 }
941
942 static void __exit fini(void)
943 {
944         unregister_virtio_driver(&virtio_blk);
945         unregister_blkdev(major, "virtblk");
946         destroy_workqueue(virtblk_wq);
947 }
948 module_init(init);
949 module_exit(fini);
950
951 MODULE_DEVICE_TABLE(virtio, id_table);
952 MODULE_DESCRIPTION("Virtio block driver");
953 MODULE_LICENSE("GPL");