1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <linux/idr.h>
15 #include <linux/blk-mq.h>
16 #include <linux/blk-mq-virtio.h>
17 #include <linux/numa.h>
18 #include <uapi/linux/virtio_ring.h>
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
24 /* The maximum number of sg elements that fit into a virtqueue */
25 #define VIRTIO_BLK_MAX_SG_ELEMS 32768
28 static DEFINE_IDA(vd_index_ida);
30 static struct workqueue_struct *virtblk_wq;
32 struct virtio_blk_vq {
35 char name[VQ_NAME_LEN];
36 } ____cacheline_aligned_in_smp;
40 * This mutex must be held by anything that may run after
41 * virtblk_remove() sets vblk->vdev to NULL.
43 * blk-mq, virtqueue processing, and sysfs attribute code paths are
44 * shut down before vblk->vdev is set to NULL and therefore do not need
47 struct mutex vdev_mutex;
48 struct virtio_device *vdev;
50 /* The disk structure for the kernel. */
53 /* Block layer tags. */
54 struct blk_mq_tag_set tag_set;
56 /* Process context for config space updates */
57 struct work_struct config_work;
60 * Tracks references from block_device_operations open/release and
61 * virtio_driver probe/remove so this object can be freed once no
66 /* What host tells us, plus 2 for header & tailer. */
67 unsigned int sg_elems;
69 /* Ida index - used to track minor number allocations. */
74 struct virtio_blk_vq *vqs;
78 struct virtio_blk_outhdr out_hdr;
80 struct scatterlist sg[];
83 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
85 switch (vbr->status) {
88 case VIRTIO_BLK_S_UNSUPP:
89 return BLK_STS_NOTSUPP;
95 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
96 struct scatterlist *data_sg, bool have_data)
98 struct scatterlist hdr, status, *sgs[3];
99 unsigned int num_out = 0, num_in = 0;
101 sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
102 sgs[num_out++] = &hdr;
105 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
106 sgs[num_out++] = data_sg;
108 sgs[num_out + num_in++] = data_sg;
111 sg_init_one(&status, &vbr->status, sizeof(vbr->status));
112 sgs[num_out + num_in++] = &status;
114 return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
117 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
119 unsigned short segments = blk_rq_nr_discard_segments(req);
120 unsigned short n = 0;
121 struct virtio_blk_discard_write_zeroes *range;
126 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
128 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
133 * Single max discard segment means multi-range discard isn't
134 * supported, and block layer only runs contiguity merge like
135 * normal RW request. So we can't reply on bio for retrieving
138 if (queue_max_discard_segments(req->q) == 1) {
139 range[0].flags = cpu_to_le32(flags);
140 range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
141 range[0].sector = cpu_to_le64(blk_rq_pos(req));
144 __rq_for_each_bio(bio, req) {
145 u64 sector = bio->bi_iter.bi_sector;
146 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
148 range[n].flags = cpu_to_le32(flags);
149 range[n].num_sectors = cpu_to_le32(num_sectors);
150 range[n].sector = cpu_to_le64(sector);
155 WARN_ON_ONCE(n != segments);
157 req->special_vec.bv_page = virt_to_page(range);
158 req->special_vec.bv_offset = offset_in_page(range);
159 req->special_vec.bv_len = sizeof(*range) * segments;
160 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
165 static inline void virtblk_request_done(struct request *req)
167 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
169 if (req->rq_flags & RQF_SPECIAL_PAYLOAD)
170 kfree(bvec_virt(&req->special_vec));
171 blk_mq_end_request(req, virtblk_result(vbr));
174 static void virtblk_done(struct virtqueue *vq)
176 struct virtio_blk *vblk = vq->vdev->priv;
177 bool req_done = false;
179 struct virtblk_req *vbr;
183 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
185 virtqueue_disable_cb(vq);
186 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
187 struct request *req = blk_mq_rq_from_pdu(vbr);
189 if (likely(!blk_should_fake_timeout(req->q)))
190 blk_mq_complete_request(req);
193 if (unlikely(virtqueue_is_broken(vq)))
195 } while (!virtqueue_enable_cb(vq));
197 /* In case queue is stopped waiting for more buffers. */
199 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
200 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
203 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
205 struct virtio_blk *vblk = hctx->queue->queuedata;
206 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
209 spin_lock_irq(&vq->lock);
210 kick = virtqueue_kick_prepare(vq->vq);
211 spin_unlock_irq(&vq->lock);
214 virtqueue_notify(vq->vq);
217 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
218 const struct blk_mq_queue_data *bd)
220 struct virtio_blk *vblk = hctx->queue->queuedata;
221 struct request *req = bd->rq;
222 struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
225 int qid = hctx->queue_num;
231 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
233 switch (req_op(req)) {
239 type = VIRTIO_BLK_T_FLUSH;
242 type = VIRTIO_BLK_T_DISCARD;
244 case REQ_OP_WRITE_ZEROES:
245 type = VIRTIO_BLK_T_WRITE_ZEROES;
246 unmap = !(req->cmd_flags & REQ_NOUNMAP);
249 type = VIRTIO_BLK_T_GET_ID;
253 return BLK_STS_IOERR;
256 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
257 vbr->out_hdr.sector = type ?
258 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
259 vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
261 blk_mq_start_request(req);
263 if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
264 err = virtblk_setup_discard_write_zeroes(req, unmap);
266 return BLK_STS_RESOURCE;
269 num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
271 if (rq_data_dir(req) == WRITE)
272 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
274 vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
277 spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
278 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
280 virtqueue_kick(vblk->vqs[qid].vq);
281 /* Don't stop the queue if -ENOMEM: we may have failed to
282 * bounce the buffer due to global resource outage.
285 blk_mq_stop_hw_queue(hctx);
286 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
289 return BLK_STS_DEV_RESOURCE;
291 return BLK_STS_RESOURCE;
293 return BLK_STS_IOERR;
297 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
299 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
302 virtqueue_notify(vblk->vqs[qid].vq);
306 /* return id (s/n) string for *disk to *id_str
308 static int virtblk_get_id(struct gendisk *disk, char *id_str)
310 struct virtio_blk *vblk = disk->private_data;
311 struct request_queue *q = vblk->disk->queue;
315 req = blk_get_request(q, REQ_OP_DRV_IN, 0);
319 err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
323 blk_execute_rq(vblk->disk, req, false);
324 err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
326 blk_put_request(req);
330 static void virtblk_get(struct virtio_blk *vblk)
332 refcount_inc(&vblk->refs);
335 static void virtblk_put(struct virtio_blk *vblk)
337 if (refcount_dec_and_test(&vblk->refs)) {
338 ida_simple_remove(&vd_index_ida, vblk->index);
339 mutex_destroy(&vblk->vdev_mutex);
344 static int virtblk_open(struct block_device *bd, fmode_t mode)
346 struct virtio_blk *vblk = bd->bd_disk->private_data;
349 mutex_lock(&vblk->vdev_mutex);
356 mutex_unlock(&vblk->vdev_mutex);
360 static void virtblk_release(struct gendisk *disk, fmode_t mode)
362 struct virtio_blk *vblk = disk->private_data;
367 /* We provide getgeo only to please some old bootloader/partitioning tools */
368 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
370 struct virtio_blk *vblk = bd->bd_disk->private_data;
373 mutex_lock(&vblk->vdev_mutex);
380 /* see if the host passed in geometry config */
381 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
382 virtio_cread(vblk->vdev, struct virtio_blk_config,
383 geometry.cylinders, &geo->cylinders);
384 virtio_cread(vblk->vdev, struct virtio_blk_config,
385 geometry.heads, &geo->heads);
386 virtio_cread(vblk->vdev, struct virtio_blk_config,
387 geometry.sectors, &geo->sectors);
389 /* some standard values, similar to sd */
391 geo->sectors = 1 << 5;
392 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
395 mutex_unlock(&vblk->vdev_mutex);
399 static const struct block_device_operations virtblk_fops = {
400 .owner = THIS_MODULE,
401 .open = virtblk_open,
402 .release = virtblk_release,
403 .getgeo = virtblk_getgeo,
406 static int index_to_minor(int index)
408 return index << PART_BITS;
411 static int minor_to_index(int minor)
413 return minor >> PART_BITS;
416 static ssize_t serial_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
419 struct gendisk *disk = dev_to_disk(dev);
422 /* sysfs gives us a PAGE_SIZE buffer */
423 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
425 buf[VIRTIO_BLK_ID_BYTES] = '\0';
426 err = virtblk_get_id(disk, buf);
430 if (err == -EIO) /* Unsupported? Make it empty. */
436 static DEVICE_ATTR_RO(serial);
438 /* The queue's logical block size must be set before calling this */
439 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
441 struct virtio_device *vdev = vblk->vdev;
442 struct request_queue *q = vblk->disk->queue;
443 char cap_str_2[10], cap_str_10[10];
444 unsigned long long nblocks;
447 /* Host must always specify the capacity. */
448 virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
450 nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
452 string_get_size(nblocks, queue_logical_block_size(q),
453 STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
454 string_get_size(nblocks, queue_logical_block_size(q),
455 STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
457 dev_notice(&vdev->dev,
458 "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
459 vblk->disk->disk_name,
460 resize ? "new size: " : "",
462 queue_logical_block_size(q),
466 set_capacity_and_notify(vblk->disk, capacity);
469 static void virtblk_config_changed_work(struct work_struct *work)
471 struct virtio_blk *vblk =
472 container_of(work, struct virtio_blk, config_work);
474 virtblk_update_capacity(vblk, true);
477 static void virtblk_config_changed(struct virtio_device *vdev)
479 struct virtio_blk *vblk = vdev->priv;
481 queue_work(virtblk_wq, &vblk->config_work);
484 static int init_vq(struct virtio_blk *vblk)
488 vq_callback_t **callbacks;
490 struct virtqueue **vqs;
491 unsigned short num_vqs;
492 struct virtio_device *vdev = vblk->vdev;
493 struct irq_affinity desc = { 0, };
495 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
496 struct virtio_blk_config, num_queues,
501 num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
503 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
507 names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
508 callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
509 vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
510 if (!names || !callbacks || !vqs) {
515 for (i = 0; i < num_vqs; i++) {
516 callbacks[i] = virtblk_done;
517 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
518 names[i] = vblk->vqs[i].name;
521 /* Discover virtqueues and write information to configuration. */
522 err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
526 for (i = 0; i < num_vqs; i++) {
527 spin_lock_init(&vblk->vqs[i].lock);
528 vblk->vqs[i].vq = vqs[i];
530 vblk->num_vqs = num_vqs;
542 * Legacy naming scheme used for virtio devices. We are stuck with it for
543 * virtio blk but don't ever use it for any new driver.
545 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
547 const int base = 'z' - 'a' + 1;
548 char *begin = buf + strlen(prefix);
549 char *end = buf + buflen;
559 *--p = 'a' + (index % unit);
560 index = (index / unit) - 1;
561 } while (index >= 0);
563 memmove(begin, p, end - p);
564 memcpy(buf, prefix, strlen(prefix));
569 static int virtblk_get_cache_mode(struct virtio_device *vdev)
574 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
575 struct virtio_blk_config, wce,
579 * If WCE is not configurable and flush is not available,
580 * assume no writeback cache is in use.
583 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
588 static void virtblk_update_cache_mode(struct virtio_device *vdev)
590 u8 writeback = virtblk_get_cache_mode(vdev);
591 struct virtio_blk *vblk = vdev->priv;
593 blk_queue_write_cache(vblk->disk->queue, writeback, false);
596 static const char *const virtblk_cache_types[] = {
597 "write through", "write back"
601 cache_type_store(struct device *dev, struct device_attribute *attr,
602 const char *buf, size_t count)
604 struct gendisk *disk = dev_to_disk(dev);
605 struct virtio_blk *vblk = disk->private_data;
606 struct virtio_device *vdev = vblk->vdev;
609 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
610 i = sysfs_match_string(virtblk_cache_types, buf);
614 virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
615 virtblk_update_cache_mode(vdev);
620 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
622 struct gendisk *disk = dev_to_disk(dev);
623 struct virtio_blk *vblk = disk->private_data;
624 u8 writeback = virtblk_get_cache_mode(vblk->vdev);
626 BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
627 return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
630 static DEVICE_ATTR_RW(cache_type);
632 static struct attribute *virtblk_attrs[] = {
633 &dev_attr_serial.attr,
634 &dev_attr_cache_type.attr,
638 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
639 struct attribute *a, int n)
641 struct device *dev = kobj_to_dev(kobj);
642 struct gendisk *disk = dev_to_disk(dev);
643 struct virtio_blk *vblk = disk->private_data;
644 struct virtio_device *vdev = vblk->vdev;
646 if (a == &dev_attr_cache_type.attr &&
647 !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
653 static const struct attribute_group virtblk_attr_group = {
654 .attrs = virtblk_attrs,
655 .is_visible = virtblk_attrs_are_visible,
658 static const struct attribute_group *virtblk_attr_groups[] = {
663 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
664 unsigned int hctx_idx, unsigned int numa_node)
666 struct virtio_blk *vblk = set->driver_data;
667 struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
669 sg_init_table(vbr->sg, vblk->sg_elems);
673 static int virtblk_map_queues(struct blk_mq_tag_set *set)
675 struct virtio_blk *vblk = set->driver_data;
677 return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
681 static const struct blk_mq_ops virtio_mq_ops = {
682 .queue_rq = virtio_queue_rq,
683 .commit_rqs = virtio_commit_rqs,
684 .complete = virtblk_request_done,
685 .init_request = virtblk_init_request,
686 .map_queues = virtblk_map_queues,
689 static unsigned int virtblk_queue_depth;
690 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
692 static int virtblk_validate(struct virtio_device *vdev)
696 if (!vdev->config->get) {
697 dev_err(&vdev->dev, "%s failure: config access disabled\n",
702 if (!virtio_has_feature(vdev, VIRTIO_BLK_F_BLK_SIZE))
705 blk_size = virtio_cread32(vdev,
706 offsetof(struct virtio_blk_config, blk_size));
708 if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE)
709 __virtio_clear_bit(vdev, VIRTIO_BLK_F_BLK_SIZE);
714 static int virtblk_probe(struct virtio_device *vdev)
716 struct virtio_blk *vblk;
717 struct request_queue *q;
720 u32 v, blk_size, max_size, sg_elems, opt_io_size;
722 u8 physical_block_exp, alignment_offset;
723 unsigned int queue_depth;
725 err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
731 /* We need to know how many segments before we allocate. */
732 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
733 struct virtio_blk_config, seg_max,
736 /* We need at least one SG element, whatever they say. */
737 if (err || !sg_elems)
740 /* Prevent integer overflows and honor max vq size */
741 sg_elems = min_t(u32, sg_elems, VIRTIO_BLK_MAX_SG_ELEMS - 2);
743 /* We need extra sg elements at head and tail. */
745 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
751 /* This reference is dropped in virtblk_remove(). */
752 refcount_set(&vblk->refs, 1);
753 mutex_init(&vblk->vdev_mutex);
756 vblk->sg_elems = sg_elems;
758 INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
764 /* Default queue sizing is to fill the ring. */
765 if (!virtblk_queue_depth) {
766 queue_depth = vblk->vqs[0].vq->num_free;
767 /* ... but without indirect descs, we use 2 descs per req */
768 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
771 queue_depth = virtblk_queue_depth;
774 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
775 vblk->tag_set.ops = &virtio_mq_ops;
776 vblk->tag_set.queue_depth = queue_depth;
777 vblk->tag_set.numa_node = NUMA_NO_NODE;
778 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
779 vblk->tag_set.cmd_size =
780 sizeof(struct virtblk_req) +
781 sizeof(struct scatterlist) * sg_elems;
782 vblk->tag_set.driver_data = vblk;
783 vblk->tag_set.nr_hw_queues = vblk->num_vqs;
785 err = blk_mq_alloc_tag_set(&vblk->tag_set);
789 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk);
790 if (IS_ERR(vblk->disk)) {
791 err = PTR_ERR(vblk->disk);
794 q = vblk->disk->queue;
796 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
798 vblk->disk->major = major;
799 vblk->disk->first_minor = index_to_minor(index);
800 vblk->disk->minors = 1 << PART_BITS;
801 vblk->disk->private_data = vblk;
802 vblk->disk->fops = &virtblk_fops;
803 vblk->disk->flags |= GENHD_FL_EXT_DEVT;
806 /* configure queue flush support */
807 virtblk_update_cache_mode(vdev);
809 /* If disk is read-only in the host, the guest should obey */
810 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
811 set_disk_ro(vblk->disk, 1);
813 /* We can handle whatever the host told us to handle. */
814 blk_queue_max_segments(q, vblk->sg_elems-2);
816 /* No real sector limit. */
817 blk_queue_max_hw_sectors(q, -1U);
819 max_size = virtio_max_dma_size(vdev);
821 /* Host can optionally specify maximum segment size and number of
823 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
824 struct virtio_blk_config, size_max, &v);
826 max_size = min(max_size, v);
828 blk_queue_max_segment_size(q, max_size);
830 /* Host can optionally specify the block size of the device */
831 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
832 struct virtio_blk_config, blk_size,
835 blk_queue_logical_block_size(q, blk_size);
837 blk_size = queue_logical_block_size(q);
839 if (blk_size < SECTOR_SIZE || blk_size > PAGE_SIZE) {
841 "block size is changed unexpectedly, now is %u\n",
844 goto out_cleanup_disk;
847 /* Use topology information if available */
848 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
849 struct virtio_blk_config, physical_block_exp,
850 &physical_block_exp);
851 if (!err && physical_block_exp)
852 blk_queue_physical_block_size(q,
853 blk_size * (1 << physical_block_exp));
855 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
856 struct virtio_blk_config, alignment_offset,
858 if (!err && alignment_offset)
859 blk_queue_alignment_offset(q, blk_size * alignment_offset);
861 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
862 struct virtio_blk_config, min_io_size,
864 if (!err && min_io_size)
865 blk_queue_io_min(q, blk_size * min_io_size);
867 err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
868 struct virtio_blk_config, opt_io_size,
870 if (!err && opt_io_size)
871 blk_queue_io_opt(q, blk_size * opt_io_size);
873 if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
874 q->limits.discard_granularity = blk_size;
876 virtio_cread(vdev, struct virtio_blk_config,
877 discard_sector_alignment, &v);
878 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
880 virtio_cread(vdev, struct virtio_blk_config,
881 max_discard_sectors, &v);
882 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
884 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
886 blk_queue_max_discard_segments(q,
888 MAX_DISCARD_SEGMENTS));
890 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
893 if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
894 virtio_cread(vdev, struct virtio_blk_config,
895 max_write_zeroes_sectors, &v);
896 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
899 virtblk_update_capacity(vblk, false);
900 virtio_device_ready(vdev);
902 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
904 goto out_cleanup_disk;
909 blk_cleanup_disk(vblk->disk);
911 blk_mq_free_tag_set(&vblk->tag_set);
913 vdev->config->del_vqs(vdev);
918 ida_simple_remove(&vd_index_ida, index);
923 static void virtblk_remove(struct virtio_device *vdev)
925 struct virtio_blk *vblk = vdev->priv;
927 /* Make sure no work handler is accessing the device. */
928 flush_work(&vblk->config_work);
930 del_gendisk(vblk->disk);
931 blk_cleanup_disk(vblk->disk);
932 blk_mq_free_tag_set(&vblk->tag_set);
934 mutex_lock(&vblk->vdev_mutex);
936 /* Stop all the virtqueues. */
937 vdev->config->reset(vdev);
939 /* Virtqueues are stopped, nothing can use vblk->vdev anymore. */
942 vdev->config->del_vqs(vdev);
945 mutex_unlock(&vblk->vdev_mutex);
950 #ifdef CONFIG_PM_SLEEP
951 static int virtblk_freeze(struct virtio_device *vdev)
953 struct virtio_blk *vblk = vdev->priv;
955 /* Ensure we don't receive any more interrupts */
956 vdev->config->reset(vdev);
958 /* Make sure no work handler is accessing the device. */
959 flush_work(&vblk->config_work);
961 blk_mq_quiesce_queue(vblk->disk->queue);
963 vdev->config->del_vqs(vdev);
969 static int virtblk_restore(struct virtio_device *vdev)
971 struct virtio_blk *vblk = vdev->priv;
974 ret = init_vq(vdev->priv);
978 virtio_device_ready(vdev);
980 blk_mq_unquiesce_queue(vblk->disk->queue);
985 static const struct virtio_device_id id_table[] = {
986 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
990 static unsigned int features_legacy[] = {
991 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
992 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
993 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
994 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
997 static unsigned int features[] = {
998 VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
999 VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1000 VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1001 VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1004 static struct virtio_driver virtio_blk = {
1005 .feature_table = features,
1006 .feature_table_size = ARRAY_SIZE(features),
1007 .feature_table_legacy = features_legacy,
1008 .feature_table_size_legacy = ARRAY_SIZE(features_legacy),
1009 .driver.name = KBUILD_MODNAME,
1010 .driver.owner = THIS_MODULE,
1011 .id_table = id_table,
1012 .validate = virtblk_validate,
1013 .probe = virtblk_probe,
1014 .remove = virtblk_remove,
1015 .config_changed = virtblk_config_changed,
1016 #ifdef CONFIG_PM_SLEEP
1017 .freeze = virtblk_freeze,
1018 .restore = virtblk_restore,
1022 static int __init init(void)
1026 virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1030 major = register_blkdev(0, "virtblk");
1033 goto out_destroy_workqueue;
1036 error = register_virtio_driver(&virtio_blk);
1038 goto out_unregister_blkdev;
1041 out_unregister_blkdev:
1042 unregister_blkdev(major, "virtblk");
1043 out_destroy_workqueue:
1044 destroy_workqueue(virtblk_wq);
1048 static void __exit fini(void)
1050 unregister_virtio_driver(&virtio_blk);
1051 unregister_blkdev(major, "virtblk");
1052 destroy_workqueue(virtblk_wq);
1057 MODULE_DEVICE_TABLE(virtio, id_table);
1058 MODULE_DESCRIPTION("Virtio block driver");
1059 MODULE_LICENSE("GPL");