1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <uapi/linux/virtio_ring.h>
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
24 /* The maximum number of sg elements that fit into a virtqueue */
25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
27 #ifdef CONFIG_ARCH_NO_SG_CHAIN
28 #define VIRTIO_BLK_INLINE_SG_CNT 0
30 #define VIRTIO_BLK_INLINE_SG_CNT 2
33 static unsigned int num_request_queues;
34 module_param(num_request_queues, uint, 0644);
35 MODULE_PARM_DESC(num_request_queues,
36 "Limit the number of request queues to use for blk device. "
38 "Values > nr_cpu_ids truncated to nr_cpu_ids.");
40 static unsigned int poll_queues;
41 module_param(poll_queues, uint, 0644);
42 MODULE_PARM_DESC(poll_queues, "The number of dedicated virtqueues for polling I/O");
45 static DEFINE_IDA(vd_index_ida);
47 static struct workqueue_struct *virtblk_wq;
49 struct virtio_blk_vq {
52 char name[VQ_NAME_LEN];
53 } ____cacheline_aligned_in_smp;
57 * This mutex must be held by anything that may run after
58 * virtblk_remove() sets vblk->vdev to NULL.
60 * blk-mq, virtqueue processing, and sysfs attribute code paths are
61 * shut down before vblk->vdev is set to NULL and therefore do not need
64 struct mutex vdev_mutex;
65 struct virtio_device *vdev;
67 /* The disk structure for the kernel. */
70 /* Block layer tags. */
71 struct blk_mq_tag_set tag_set;
73 /* Process context for config space updates */
74 struct work_struct config_work;
76 /* Ida index - used to track minor number allocations. */
81 int io_queues[HCTX_MAX_TYPES];
82 struct virtio_blk_vq *vqs;
86 struct virtio_blk_outhdr out_hdr;
88 struct sg_table sg_table;
89 struct scatterlist sg[];
92 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
94 switch (vbr->status) {
97 case VIRTIO_BLK_S_UNSUPP:
98 return BLK_STS_NOTSUPP;
100 return BLK_STS_IOERR;
104 static inline struct virtio_blk_vq *get_virtio_blk_vq(struct blk_mq_hw_ctx *hctx)
106 struct virtio_blk *vblk = hctx->queue->queuedata;
107 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
112 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr)
114 struct scatterlist hdr, status, *sgs[3];
115 unsigned int num_out = 0, num_in = 0;
117 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
118 sgs[num_out++] = &hdr;
120 if (vbr->sg_table.nents) {
121 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
122 sgs[num_out++] = vbr->sg_table.sgl;
124 sgs[num_out + num_in++] = vbr->sg_table.sgl;
127 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
128 sgs[num_out + num_in++] = &status;
130 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
133 static int virtblk_setup_discard_write_zeroes_erase(struct request *req, bool unmap)
135 unsigned short segments = blk_rq_nr_discard_segments(req);
136 unsigned short n = 0;
137 struct virtio_blk_discard_write_zeroes *range;
142 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
144 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
149 * Single max discard segment means multi-range discard isn't
150 * supported, and block layer only runs contiguity merge like
151 * normal RW request. So we can't reply on bio for retrieving
154 if (queue_max_discard_segments(req->q) == 1) {
155 range[0].flags = cpu_to_le32(flags);
156 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
157 range[0].sector = cpu_to_le64(blk_rq_pos(req));
160 __rq_for_each_bio(bio, req) {
161 u64 sector = bio->bi_iter.bi_sector;
162 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
164 range[n].flags = cpu_to_le32(flags);
165 range[n].num_sectors = cpu_to_le32(num_sectors);
166 range[n].sector = cpu_to_le64(sector);
171 WARN_ON_ONCE(n != segments);
173 req->special_vec.bv_page = virt_to_page(range);
174 req->special_vec.bv_offset = offset_in_page(range);
175 req->special_vec.bv_len = sizeof(*range) * segments;
176 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
181 static void virtblk_unmap_data(struct request *req, struct virtblk_req *vbr)
183 if (blk_rq_nr_phys_segments(req))
184 sg_free_table_chained(&vbr->sg_table,
185 VIRTIO_BLK_INLINE_SG_CNT);
188 static int virtblk_map_data(struct blk_mq_hw_ctx *hctx, struct request *req,
189 struct virtblk_req *vbr)
193 if (!blk_rq_nr_phys_segments(req))
196 vbr->sg_table.sgl = vbr->sg;
197 err = sg_alloc_table_chained(&vbr->sg_table,
198 blk_rq_nr_phys_segments(req),
200 VIRTIO_BLK_INLINE_SG_CNT);
204 return blk_rq_map_sg(hctx->queue, req, vbr->sg_table.sgl);
207 static void virtblk_cleanup_cmd(struct request *req)
209 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
210 kfree(bvec_virt(&req->special_vec));
213 static blk_status_t virtblk_setup_cmd(struct virtio_device *vdev,
215 struct virtblk_req *vbr)
220 vbr->out_hdr.sector = 0;
222 switch (req_op(req)) {
224 type = VIRTIO_BLK_T_IN;
225 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
229 type = VIRTIO_BLK_T_OUT;
230 vbr->out_hdr.sector = cpu_to_virtio64(vdev,
234 type = VIRTIO_BLK_T_FLUSH;
237 type = VIRTIO_BLK_T_DISCARD;
239 case REQ_OP_WRITE_ZEROES:
240 type = VIRTIO_BLK_T_WRITE_ZEROES;
241 unmap = !(req->cmd_flags & REQ_NOUNMAP);
243 case REQ_OP_SECURE_ERASE:
244 type = VIRTIO_BLK_T_SECURE_ERASE;
247 type = VIRTIO_BLK_T_GET_ID;
251 return BLK_STS_IOERR;
254 vbr->out_hdr.type = cpu_to_virtio32(vdev, type);
255 vbr->out_hdr.ioprio = cpu_to_virtio32(vdev, req_get_ioprio(req));
257 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES ||
258 type == VIRTIO_BLK_T_SECURE_ERASE) {
259 if (virtblk_setup_discard_write_zeroes_erase(req, unmap))
260 return BLK_STS_RESOURCE;
266 static inline void virtblk_request_done(struct request *req)
268 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
270 virtblk_unmap_data(req, vbr);
271 virtblk_cleanup_cmd(req);
272 blk_mq_end_request(req, virtblk_result(vbr));
275 static void virtblk_done(struct virtqueue *vq)
277 struct virtio_blk *vblk = vq->vdev->priv;
278 bool req_done = false;
280 struct virtblk_req *vbr;
284 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
286 virtqueue_disable_cb(vq);
287 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
288 struct request *req = blk_mq_rq_from_pdu(vbr);
290 if (likely(!blk_should_fake_timeout(req->q)))
291 blk_mq_complete_request(req);
294 if (unlikely(virtqueue_is_broken(vq)))
296 } while (!virtqueue_enable_cb(vq));
298 /* In case queue is stopped waiting for more buffers. */
300 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
301 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
304 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
306 struct virtio_blk *vblk = hctx->queue->queuedata;
307 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
310 spin_lock_irq(&vq->lock);
311 kick = virtqueue_kick_prepare(vq->vq);
312 spin_unlock_irq(&vq->lock);
315 virtqueue_notify(vq->vq);
318 static blk_status_t virtblk_fail_to_queue(struct request *req, int rc)
320 virtblk_cleanup_cmd(req);
323 return BLK_STS_DEV_RESOURCE;
325 return BLK_STS_RESOURCE;
327 return BLK_STS_IOERR;
331 static blk_status_t virtblk_prep_rq(struct blk_mq_hw_ctx *hctx,
332 struct virtio_blk *vblk,
334 struct virtblk_req *vbr)
338 status = virtblk_setup_cmd(vblk->vdev, req, vbr);
339 if (unlikely(status))
342 vbr->sg_table.nents = virtblk_map_data(hctx, req, vbr);
343 if (unlikely(vbr->sg_table.nents < 0))
344 return virtblk_fail_to_queue(req, -ENOMEM);
346 blk_mq_start_request(req);
351 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
352 const struct blk_mq_queue_data *bd)
354 struct virtio_blk *vblk = hctx->queue->queuedata;
355 struct request *req = bd->rq;
356 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
358 int qid = hctx->queue_num;
363 status = virtblk_prep_rq(hctx, vblk, req, vbr);
364 if (unlikely(status))
367 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
368 err = virtblk_add_req(vblk->vqs[qid].vq, vbr);
370 virtqueue_kick(vblk->vqs[qid].vq);
371 /* Don't stop the queue if -ENOMEM: we may have failed to
372 * bounce the buffer due to global resource outage.
375 blk_mq_stop_hw_queue(hctx);
376 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
377 virtblk_unmap_data(req, vbr);
378 return virtblk_fail_to_queue(req, err);
381 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
383 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
386 virtqueue_notify(vblk->vqs[qid].vq);
390 static bool virtblk_prep_rq_batch(struct request *req)
392 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata;
393 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
395 req->mq_hctx->tags->rqs[req->tag] = req;
397 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK;
400 static bool virtblk_add_req_batch(struct virtio_blk_vq *vq,
401 struct request **rqlist)
407 spin_lock_irqsave(&vq->lock, flags);
409 while (!rq_list_empty(*rqlist)) {
410 struct request *req = rq_list_pop(rqlist);
411 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
413 err = virtblk_add_req(vq->vq, vbr);
415 virtblk_unmap_data(req, vbr);
416 virtblk_cleanup_cmd(req);
417 blk_mq_requeue_request(req, true);
421 kick = virtqueue_kick_prepare(vq->vq);
422 spin_unlock_irqrestore(&vq->lock, flags);
427 static void virtio_queue_rqs(struct request **rqlist)
429 struct request *req, *next, *prev = NULL;
430 struct request *requeue_list = NULL;
432 rq_list_for_each_safe(rqlist, req, next) {
433 struct virtio_blk_vq *vq = get_virtio_blk_vq(req->mq_hctx);
436 if (!virtblk_prep_rq_batch(req)) {
437 rq_list_move(rqlist, &requeue_list, req, prev);
443 if (!next || req->mq_hctx != next->mq_hctx) {
445 kick = virtblk_add_req_batch(vq, rqlist);
447 virtqueue_notify(vq->vq);
455 *rqlist = requeue_list;
458 /* return id (s/n) string for *disk to *id_str
460 static int virtblk_get_id(struct gendisk *disk, char *id_str)
462 struct virtio_blk *vblk = disk->private_data;
463 struct request_queue *q = vblk->disk->queue;
467 req = blk_mq_alloc_request(q, REQ_OP_DRV_IN, 0);
471 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
475 blk_execute_rq(req, false);
476 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
478 blk_mq_free_request(req);
482 /* We provide getgeo only to please some old bootloader/partitioning tools */
483 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
485 struct virtio_blk *vblk = bd->bd_disk->private_data;
488 mutex_lock(&vblk->vdev_mutex);
495 /* see if the host passed in geometry config */
496 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
497 virtio_cread(vblk->vdev, struct virtio_blk_config,
498 geometry.cylinders, &geo->cylinders);
499 virtio_cread(vblk->vdev, struct virtio_blk_config,
500 geometry.heads, &geo->heads);
501 virtio_cread(vblk->vdev, struct virtio_blk_config,
502 geometry.sectors, &geo->sectors);
504 /* some standard values, similar to sd */
506 geo->sectors = 1 << 5;
507 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
510 mutex_unlock(&vblk->vdev_mutex);
514 static void virtblk_free_disk(struct gendisk *disk)
516 struct virtio_blk *vblk = disk->private_data;
518 ida_simple_remove(&vd_index_ida, vblk->index);
519 mutex_destroy(&vblk->vdev_mutex);
523 static const struct block_device_operations virtblk_fops = {
524 .owner = THIS_MODULE,
525 .getgeo = virtblk_getgeo,
526 .free_disk = virtblk_free_disk,
529 static int index_to_minor(int index)
531 return index << PART_BITS;
534 static int minor_to_index(int minor)
536 return minor >> PART_BITS;
539 static ssize_t serial_show(struct device *dev,
540 struct device_attribute *attr, char *buf)
542 struct gendisk *disk = dev_to_disk(dev);
545 /* sysfs gives us a PAGE_SIZE buffer */
546 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
548 buf[VIRTIO_BLK_ID_BYTES] = '\0';
549 err = virtblk_get_id(disk, buf);
553 if (err == -EIO) /* Unsupported? Make it empty. */
559 static DEVICE_ATTR_RO(serial);
561 /* The queue's logical block size must be set before calling this */
562 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
564 struct virtio_device *vdev = vblk->vdev;
565 struct request_queue *q = vblk->disk->queue;
566 char cap_str_2[10], cap_str_10[10];
567 unsigned long long nblocks;
570 /* Host must always specify the capacity. */
571 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
573 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
575 string_get_size(nblocks, queue_logical_block_size(q),
576 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
577 string_get_size(nblocks, queue_logical_block_size(q),
578 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
580 dev_notice(&vdev->dev,
581 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
582 vblk->disk->disk_name,
583 resize ? "new size: " : "",
585 queue_logical_block_size(q),
589 set_capacity_and_notify(vblk->disk, capacity);
592 static void virtblk_config_changed_work(struct work_struct *work)
594 struct virtio_blk *vblk =
595 container_of(work, struct virtio_blk, config_work);
597 virtblk_update_capacity(vblk, true);
600 static void virtblk_config_changed(struct virtio_device *vdev)
602 struct virtio_blk *vblk = vdev->priv;
604 queue_work(virtblk_wq, &vblk->config_work);
607 static int init_vq(struct virtio_blk *vblk)
611 vq_callback_t **callbacks;
613 struct virtqueue **vqs;
614 unsigned short num_vqs;
615 unsigned int num_poll_vqs;
616 struct virtio_device *vdev = vblk->vdev;
617 struct irq_affinity desc = { 0, };
619 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
620 struct virtio_blk_config, num_queues,
625 if (!err && !num_vqs) {
626 dev_err(&vdev->dev, "MQ advertised but zero queues reported\n");
630 num_vqs = min_t(unsigned int,
631 min_not_zero(num_request_queues, nr_cpu_ids),
634 num_poll_vqs = min_t(unsigned int, poll_queues, num_vqs - 1);
636 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs;
637 vblk->io_queues[HCTX_TYPE_READ] = 0;
638 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs;
640 dev_info(&vdev->dev, "%d/%d/%d default/read/poll queues\n",
641 vblk->io_queues[HCTX_TYPE_DEFAULT],
642 vblk->io_queues[HCTX_TYPE_READ],
643 vblk->io_queues[HCTX_TYPE_POLL]);
645 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
649 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
650 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
651 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
652 if (!names || !callbacks || !vqs) {
657 for (i = 0; i < num_vqs - num_poll_vqs; i++) {
658 callbacks[i] = virtblk_done;
659 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
660 names[i] = vblk->vqs[i].name;
663 for (; i < num_vqs; i++) {
665 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i);
666 names[i] = vblk->vqs[i].name;
669 /* Discover virtqueues and write information to configuration. */
670 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
674 for (i = 0; i < num_vqs; i++) {
675 spin_lock_init(&vblk->vqs[i].lock);
676 vblk->vqs[i].vq = vqs[i];
678 vblk->num_vqs = num_vqs;
690 * Legacy naming scheme used for virtio devices. We are stuck with it for
691 * virtio blk but don't ever use it for any new driver.
693 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
695 const int base = 'z' - 'a' + 1;
696 char *begin = buf + strlen(prefix);
697 char *end = buf + buflen;
707 *--p = 'a' + (index % unit);
708 index = (index / unit) - 1;
709 } while (index >= 0);
711 memmove(begin, p, end - p);
712 memcpy(buf, prefix, strlen(prefix));
717 static int virtblk_get_cache_mode(struct virtio_device *vdev)
722 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
723 struct virtio_blk_config, wce,
727 * If WCE is not configurable and flush is not available,
728 * assume no writeback cache is in use.
731 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
736 static void virtblk_update_cache_mode(struct virtio_device *vdev)
738 u8 writeback = virtblk_get_cache_mode(vdev);
739 struct virtio_blk *vblk = vdev->priv;
741 blk_queue_write_cache(vblk->disk->queue, writeback, false);
744 static const char *const virtblk_cache_types[] = {
745 "write through", "write back"
749 cache_type_store(struct device *dev, struct device_attribute *attr,
750 const char *buf, size_t count)
752 struct gendisk *disk = dev_to_disk(dev);
753 struct virtio_blk *vblk = disk->private_data;
754 struct virtio_device *vdev = vblk->vdev;
757 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
758 i = sysfs_match_string(virtblk_cache_types, buf);
762 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
763 virtblk_update_cache_mode(vdev);
768 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
770 struct gendisk *disk = dev_to_disk(dev);
771 struct virtio_blk *vblk = disk->private_data;
772 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
774 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
775 return sysfs_emit(buf, "%s\n", virtblk_cache_types[writeback]);
778 static DEVICE_ATTR_RW(cache_type);
780 static struct attribute *virtblk_attrs[] = {
781 &dev_attr_serial.attr,
782 &dev_attr_cache_type.attr,
786 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
787 struct attribute *a, int n)
789 struct device *dev = kobj_to_dev(kobj);
790 struct gendisk *disk = dev_to_disk(dev);
791 struct virtio_blk *vblk = disk->private_data;
792 struct virtio_device *vdev = vblk->vdev;
794 if (a == &dev_attr_cache_type.attr &&
795 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
801 static const struct attribute_group virtblk_attr_group = {
802 .attrs = virtblk_attrs,
803 .is_visible = virtblk_attrs_are_visible,
806 static const struct attribute_group *virtblk_attr_groups[] = {
811 static void virtblk_map_queues(struct blk_mq_tag_set *set)
813 struct virtio_blk *vblk = set->driver_data;
816 for (i = 0, qoff = 0; i < set->nr_maps; i++) {
817 struct blk_mq_queue_map *map = &set->map[i];
819 map->nr_queues = vblk->io_queues[i];
820 map->queue_offset = qoff;
821 qoff += map->nr_queues;
823 if (map->nr_queues == 0)
827 * Regular queues have interrupts and hence CPU affinity is
828 * defined by the core virtio code, but polling queues have
829 * no interrupts so we let the block layer assign CPU affinity.
831 if (i == HCTX_TYPE_POLL)
832 blk_mq_map_queues(&set->map[i]);
834 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0);
838 static void virtblk_complete_batch(struct io_comp_batch *iob)
842 rq_list_for_each(&iob->req_list, req) {
843 virtblk_unmap_data(req, blk_mq_rq_to_pdu(req));
844 virtblk_cleanup_cmd(req);
846 blk_mq_end_request_batch(iob);
849 static int virtblk_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
851 struct virtio_blk *vblk = hctx->queue->queuedata;
852 struct virtio_blk_vq *vq = get_virtio_blk_vq(hctx);
853 struct virtblk_req *vbr;
858 spin_lock_irqsave(&vq->lock, flags);
860 while ((vbr = virtqueue_get_buf(vq->vq, &len)) != NULL) {
861 struct request *req = blk_mq_rq_from_pdu(vbr);
864 if (!blk_mq_add_to_batch(req, iob, vbr->status,
865 virtblk_complete_batch))
866 blk_mq_complete_request(req);
870 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
872 spin_unlock_irqrestore(&vq->lock, flags);
877 static const struct blk_mq_ops virtio_mq_ops = {
878 .queue_rq = virtio_queue_rq,
879 .queue_rqs = virtio_queue_rqs,
880 .commit_rqs = virtio_commit_rqs,
881 .complete = virtblk_request_done,
882 .map_queues = virtblk_map_queues,
883 .poll = virtblk_poll,
886 static unsigned int virtblk_queue_depth;
887 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
889 static int virtblk_probe(struct virtio_device *vdev)
891 struct virtio_blk *vblk;
892 struct request_queue *q;
895 u32 v, blk_size, max_size, sg_elems, opt_io_size;
896 u32 max_discard_segs = 0;
897 u32 discard_granularity = 0;
899 u8 physical_block_exp, alignment_offset;
900 unsigned int queue_depth;
902 if (!vdev->config->get) {
903 dev_err(&vdev->dev, "%s failure: config access disabled\n",
908 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
914 /* We need to know how many segments before we allocate. */
915 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
916 struct virtio_blk_config, seg_max,
919 /* We need at least one SG element, whatever they say. */
920 if (err || !sg_elems)
923 /* Prevent integer overflows and honor max vq size */
924 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
926 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
932 mutex_init(&vblk->vdev_mutex);
936 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
942 /* Default queue sizing is to fill the ring. */
943 if (!virtblk_queue_depth) {
944 queue_depth = vblk->vqs[0].vq->num_free;
945 /* ... but without indirect descs, we use 2 descs per req */
946 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
949 queue_depth = virtblk_queue_depth;
952 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
953 vblk->tag_set.ops = &virtio_mq_ops;
954 vblk->tag_set.queue_depth = queue_depth;
955 vblk->tag_set.numa_node = NUMA_NO_NODE;
956 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
957 vblk->tag_set.cmd_size =
958 sizeof(struct virtblk_req) +
959 sizeof(struct scatterlist) * VIRTIO_BLK_INLINE_SG_CNT;
960 vblk->tag_set.driver_data = vblk;
961 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
962 vblk->tag_set.nr_maps = 1;
963 if (vblk->io_queues[HCTX_TYPE_POLL])
964 vblk->tag_set.nr_maps = 3;
966 err = blk_mq_alloc_tag_set(&vblk->tag_set);
970 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
971 if (IS_ERR(vblk->disk)) {
972 err = PTR_ERR(vblk->disk);
975 q = vblk->disk->queue;
977 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
979 vblk->disk->major = major;
980 vblk->disk->first_minor = index_to_minor(index);
981 vblk->disk->minors = 1 << PART_BITS;
982 vblk->disk->private_data = vblk;
983 vblk->disk->fops = &virtblk_fops;
986 /* configure queue flush support */
987 virtblk_update_cache_mode(vdev);
989 /* If disk is read-only in the host, the guest should obey */
990 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
991 set_disk_ro(vblk->disk, 1);
993 /* We can handle whatever the host told us to handle. */
994 blk_queue_max_segments(q, sg_elems);
996 /* No real sector limit. */
997 blk_queue_max_hw_sectors(q, -1U);
999 max_size = virtio_max_dma_size(vdev);
1001 /* Host can optionally specify maximum segment size and number of
1003 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
1004 struct virtio_blk_config, size_max, &v);
1006 max_size = min(max_size, v);
1008 blk_queue_max_segment_size(q, max_size);
1010 /* Host can optionally specify the block size of the device */
1011 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
1012 struct virtio_blk_config, blk_size,
1015 err = blk_validate_block_size(blk_size);
1018 "virtio_blk: invalid block size: 0x%x\n",
1020 goto out_cleanup_disk;
1023 blk_queue_logical_block_size(q, blk_size);
1025 blk_size = queue_logical_block_size(q);
1027 /* Use topology information if available */
1028 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1029 struct virtio_blk_config, physical_block_exp,
1030 &physical_block_exp);
1031 if (!err && physical_block_exp)
1032 blk_queue_physical_block_size(q,
1033 blk_size * (1 << physical_block_exp));
1035 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1036 struct virtio_blk_config, alignment_offset,
1038 if (!err && alignment_offset)
1039 blk_queue_alignment_offset(q, blk_size * alignment_offset);
1041 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1042 struct virtio_blk_config, min_io_size,
1044 if (!err && min_io_size)
1045 blk_queue_io_min(q, blk_size * min_io_size);
1047 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
1048 struct virtio_blk_config, opt_io_size,
1050 if (!err && opt_io_size)
1051 blk_queue_io_opt(q, blk_size * opt_io_size);
1053 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
1054 virtio_cread(vdev, struct virtio_blk_config,
1055 discard_sector_alignment, &discard_granularity);
1057 virtio_cread(vdev, struct virtio_blk_config,
1058 max_discard_sectors, &v);
1059 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
1061 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
1065 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
1066 virtio_cread(vdev, struct virtio_blk_config,
1067 max_write_zeroes_sectors, &v);
1068 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
1071 /* The discard and secure erase limits are combined since the Linux
1072 * block layer uses the same limit for both commands.
1074 * If both VIRTIO_BLK_F_SECURE_ERASE and VIRTIO_BLK_F_DISCARD features
1075 * are negotiated, we will use the minimum between the limits.
1077 * discard sector alignment is set to the minimum between discard_sector_alignment
1078 * and secure_erase_sector_alignment.
1080 * max discard sectors is set to the minimum between max_discard_seg and
1081 * max_secure_erase_seg.
1083 if (virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1085 virtio_cread(vdev, struct virtio_blk_config,
1086 secure_erase_sector_alignment, &v);
1088 /* secure_erase_sector_alignment should not be zero, the device should set a
1089 * valid number of sectors.
1093 "virtio_blk: secure_erase_sector_alignment can't be 0\n");
1095 goto out_cleanup_disk;
1098 discard_granularity = min_not_zero(discard_granularity, v);
1100 virtio_cread(vdev, struct virtio_blk_config,
1101 max_secure_erase_sectors, &v);
1103 /* max_secure_erase_sectors should not be zero, the device should set a
1104 * valid number of sectors.
1108 "virtio_blk: max_secure_erase_sectors can't be 0\n");
1110 goto out_cleanup_disk;
1113 blk_queue_max_secure_erase_sectors(q, v);
1115 virtio_cread(vdev, struct virtio_blk_config,
1116 max_secure_erase_seg, &v);
1118 /* max_secure_erase_seg should not be zero, the device should set a
1119 * valid number of segments
1123 "virtio_blk: max_secure_erase_seg can't be 0\n");
1125 goto out_cleanup_disk;
1128 max_discard_segs = min_not_zero(max_discard_segs, v);
1131 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD) ||
1132 virtio_has_feature(vdev, VIRTIO_BLK_F_SECURE_ERASE)) {
1133 /* max_discard_seg and discard_granularity will be 0 only
1134 * if max_discard_seg and discard_sector_alignment fields in the virtio
1135 * config are 0 and VIRTIO_BLK_F_SECURE_ERASE feature is not negotiated.
1136 * In this case, we use default values.
1138 if (!max_discard_segs)
1139 max_discard_segs = sg_elems;
1141 blk_queue_max_discard_segments(q,
1142 min(max_discard_segs, MAX_DISCARD_SEGMENTS));
1144 if (discard_granularity)
1145 q->limits.discard_granularity = discard_granularity << SECTOR_SHIFT;
1147 q->limits.discard_granularity = blk_size;
1150 virtblk_update_capacity(vblk, false);
1151 virtio_device_ready(vdev);
1153 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
1155 goto out_cleanup_disk;
1160 put_disk(vblk->disk);
1162 blk_mq_free_tag_set(&vblk->tag_set);
1164 vdev->config->del_vqs(vdev);
1169 ida_simple_remove(&vd_index_ida, index);
1174 static void virtblk_remove(struct virtio_device *vdev)
1176 struct virtio_blk *vblk = vdev->priv;
1178 /* Make sure no work handler is accessing the device. */
1179 flush_work(&vblk->config_work);
1181 del_gendisk(vblk->disk);
1182 blk_mq_free_tag_set(&vblk->tag_set);
1184 mutex_lock(&vblk->vdev_mutex);
1186 /* Stop all the virtqueues. */
1187 virtio_reset_device(vdev);
1189 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
1192 vdev->config->del_vqs(vdev);
1195 mutex_unlock(&vblk->vdev_mutex);
1197 put_disk(vblk->disk);
1200 #ifdef CONFIG_PM_SLEEP
1201 static int virtblk_freeze(struct virtio_device *vdev)
1203 struct virtio_blk *vblk = vdev->priv;
1205 /* Ensure we don't receive any more interrupts */
1206 virtio_reset_device(vdev);
1208 /* Make sure no work handler is accessing the device. */
1209 flush_work(&vblk->config_work);
1211 blk_mq_quiesce_queue(vblk->disk->queue);
1213 vdev->config->del_vqs(vdev);
1219 static int virtblk_restore(struct virtio_device *vdev)
1221 struct virtio_blk *vblk = vdev->priv;
1224 ret = init_vq(vdev->priv);
1228 virtio_device_ready(vdev);
1230 blk_mq_unquiesce_queue(vblk->disk->queue);
1235 static const struct virtio_device_id id_table[] = {
1236 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
1240 static unsigned int features_legacy[] = {
1241 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1242 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1243 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1244 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1245 VIRTIO_BLK_F_SECURE_ERASE,
1248 static unsigned int features[] = {
1249 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1250 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1251 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1252 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1253 VIRTIO_BLK_F_SECURE_ERASE,
1256 static struct virtio_driver virtio_blk = {
1257 .feature_table = features,
1258 .feature_table_size = ARRAY_SIZE(features),
1259 .feature_table_legacy = features_legacy,
1260 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1261 .driver.name = KBUILD_MODNAME,
1262 .driver.owner = THIS_MODULE,
1263 .id_table = id_table,
1264 .probe = virtblk_probe,
1265 .remove = virtblk_remove,
1266 .config_changed = virtblk_config_changed,
1267 #ifdef CONFIG_PM_SLEEP
1268 .freeze = virtblk_freeze,
1269 .restore = virtblk_restore,
1273 static int __init virtio_blk_init(void)
1277 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1281 major = register_blkdev(0, "virtblk");
1284 goto out_destroy_workqueue;
1287 error = register_virtio_driver(&virtio_blk);
1289 goto out_unregister_blkdev;
1292 out_unregister_blkdev:
1293 unregister_blkdev(major, "virtblk");
1294 out_destroy_workqueue:
1295 destroy_workqueue(virtblk_wq);
1299 static void __exit virtio_blk_fini(void)
1301 unregister_virtio_driver(&virtio_blk);
1302 unregister_blkdev(major, "virtblk");
1303 destroy_workqueue(virtblk_wq);
1305 module_init(virtio_blk_init);
1306 module_exit(virtio_blk_fini);
1308 MODULE_DEVICE_TABLE(virtio, id_table);
1309 MODULE_DESCRIPTION("Virtio block driver");
1310 MODULE_LICENSE("GPL");