2 * linux/drivers/block/loop.c
4 * Written by Theodore Ts'o, 3/29/93
6 * Copyright 1993 by Theodore Ts'o. Redistribution of this file is
7 * permitted under the GNU General Public License.
9 * DES encryption plus some minor changes by Werner Almesberger, 30-MAY-1993
10 * more DES encryption plus IDEA encryption by Nicholas J. Leon, June 20, 1996
12 * Modularized and updated for 1.1.16 kernel - Mitch Dsouza 28th May 1994
13 * Adapted for 1.3.59 kernel - Andries Brouwer, 1 Feb 1996
15 * Fixed do_loop_request() re-entrancy - Vincent.Renardias@waw.com Mar 20, 1997
17 * Added devfs support - Richard Gooch <rgooch@atnf.csiro.au> 16-Jan-1998
19 * Handle sparse backing files correctly - Kenn Humborg, Jun 28, 1998
21 * Loadable modules and other fixes by AK, 1998
23 * Make real block number available to downstream transfer functions, enables
24 * CBC (and relatives) mode encryption requiring unique IVs per data block.
25 * Reed H. Petty, rhp@draper.net
27 * Maximum number of loop devices now dynamic via max_loop module parameter.
28 * Russell Kroll <rkroll@exploits.org> 19990701
30 * Maximum number of loop devices when compiled-in now selectable by passing
31 * max_loop=<1-255> to the kernel on boot.
32 * Erik I. Bolsø, <eriki@himolde.no>, Oct 31, 1999
34 * Completely rewrite request handling to be make_request_fn style and
35 * non blocking, pushing work to a helper thread. Lots of fixes from
37 * Jens Axboe <axboe@suse.de>, Nov 2000
39 * Support up to 256 loop devices
40 * Heinz Mauelshagen <mge@sistina.com>, Feb 2002
42 * Support for falling back on the write file operation when the address space
43 * operations write_begin is not available on the backing filesystem.
44 * Anton Altaparmakov, 16 Feb 2005
47 * - Advisory locking is ignored here.
48 * - Should use an own CAP_* category instead of CAP_SYS_ADMIN
52 #include <linux/module.h>
53 #include <linux/moduleparam.h>
54 #include <linux/sched.h>
56 #include <linux/file.h>
57 #include <linux/stat.h>
58 #include <linux/errno.h>
59 #include <linux/major.h>
60 #include <linux/wait.h>
61 #include <linux/blkdev.h>
62 #include <linux/blkpg.h>
63 #include <linux/init.h>
64 #include <linux/swap.h>
65 #include <linux/slab.h>
66 #include <linux/compat.h>
67 #include <linux/suspend.h>
68 #include <linux/freezer.h>
69 #include <linux/mutex.h>
70 #include <linux/writeback.h>
71 #include <linux/completion.h>
72 #include <linux/highmem.h>
73 #include <linux/kthread.h>
74 #include <linux/splice.h>
75 #include <linux/sysfs.h>
76 #include <linux/miscdevice.h>
77 #include <linux/falloc.h>
78 #include <linux/uio.h>
79 #include <linux/ioprio.h>
80 #include <linux/blk-cgroup.h>
84 #include <linux/uaccess.h>
86 static DEFINE_IDR(loop_index_idr);
87 static DEFINE_MUTEX(loop_ctl_mutex);
90 static int part_shift;
92 static int transfer_xor(struct loop_device *lo, int cmd,
93 struct page *raw_page, unsigned raw_off,
94 struct page *loop_page, unsigned loop_off,
95 int size, sector_t real_block)
97 char *raw_buf = kmap_atomic(raw_page) + raw_off;
98 char *loop_buf = kmap_atomic(loop_page) + loop_off;
110 key = lo->lo_encrypt_key;
111 keysize = lo->lo_encrypt_key_size;
112 for (i = 0; i < size; i++)
113 *out++ = *in++ ^ key[(i & 511) % keysize];
115 kunmap_atomic(loop_buf);
116 kunmap_atomic(raw_buf);
121 static int xor_init(struct loop_device *lo, const struct loop_info64 *info)
123 if (unlikely(info->lo_encrypt_key_size <= 0))
128 static struct loop_func_table none_funcs = {
129 .number = LO_CRYPT_NONE,
132 static struct loop_func_table xor_funcs = {
133 .number = LO_CRYPT_XOR,
134 .transfer = transfer_xor,
138 /* xfer_funcs[0] is special - its release function is never called */
139 static struct loop_func_table *xfer_funcs[MAX_LO_CRYPT] = {
144 static loff_t get_size(loff_t offset, loff_t sizelimit, struct file *file)
148 /* Compute loopsize in bytes */
149 loopsize = i_size_read(file->f_mapping->host);
152 /* offset is beyond i_size, weird but possible */
156 if (sizelimit > 0 && sizelimit < loopsize)
157 loopsize = sizelimit;
159 * Unfortunately, if we want to do I/O on the device,
160 * the number of 512-byte sectors has to fit into a sector_t.
162 return loopsize >> 9;
165 static loff_t get_loop_size(struct loop_device *lo, struct file *file)
167 return get_size(lo->lo_offset, lo->lo_sizelimit, file);
170 static void __loop_update_dio(struct loop_device *lo, bool dio)
172 struct file *file = lo->lo_backing_file;
173 struct address_space *mapping = file->f_mapping;
174 struct inode *inode = mapping->host;
175 unsigned short sb_bsize = 0;
176 unsigned dio_align = 0;
179 if (inode->i_sb->s_bdev) {
180 sb_bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
181 dio_align = sb_bsize - 1;
185 * We support direct I/O only if lo_offset is aligned with the
186 * logical I/O size of backing device, and the logical block
187 * size of loop is bigger than the backing device's and the loop
188 * needn't transform transfer.
190 * TODO: the above condition may be loosed in the future, and
191 * direct I/O may be switched runtime at that time because most
192 * of requests in sane applications should be PAGE_SIZE aligned
195 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize &&
196 !(lo->lo_offset & dio_align) &&
197 mapping->a_ops->direct_IO &&
206 if (lo->use_dio == use_dio)
209 /* flush dirty pages before changing direct IO */
213 * The flag of LO_FLAGS_DIRECT_IO is handled similarly with
214 * LO_FLAGS_READ_ONLY, both are set from kernel, and losetup
215 * will get updated by ioctl(LOOP_GET_STATUS)
217 if (lo->lo_state == Lo_bound)
218 blk_mq_freeze_queue(lo->lo_queue);
219 lo->use_dio = use_dio;
221 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue);
222 lo->lo_flags |= LO_FLAGS_DIRECT_IO;
224 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
225 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO;
227 if (lo->lo_state == Lo_bound)
228 blk_mq_unfreeze_queue(lo->lo_queue);
232 * loop_validate_block_size() - validates the passed in block size
233 * @bsize: size to validate
236 loop_validate_block_size(unsigned short bsize)
238 if (bsize < 512 || bsize > PAGE_SIZE || !is_power_of_2(bsize))
245 * loop_set_size() - sets device size and notifies userspace
246 * @lo: struct loop_device to set the size for
247 * @size: new size of the loop device
249 * Callers must validate that the size passed into this function fits into
250 * a sector_t, eg using loop_validate_size()
252 static void loop_set_size(struct loop_device *lo, loff_t size)
254 struct block_device *bdev = lo->lo_device;
256 bd_set_size(bdev, size << SECTOR_SHIFT);
258 set_capacity_revalidate_and_notify(lo->lo_disk, size, false);
262 lo_do_transfer(struct loop_device *lo, int cmd,
263 struct page *rpage, unsigned roffs,
264 struct page *lpage, unsigned loffs,
265 int size, sector_t rblock)
269 ret = lo->transfer(lo, cmd, rpage, roffs, lpage, loffs, size, rblock);
273 printk_ratelimited(KERN_ERR
274 "loop: Transfer error at byte offset %llu, length %i.\n",
275 (unsigned long long)rblock << 9, size);
279 static int lo_write_bvec(struct file *file, struct bio_vec *bvec, loff_t *ppos)
284 iov_iter_bvec(&i, WRITE, bvec, 1, bvec->bv_len);
286 file_start_write(file);
287 bw = vfs_iter_write(file, &i, ppos, 0);
288 file_end_write(file);
290 if (likely(bw == bvec->bv_len))
293 printk_ratelimited(KERN_ERR
294 "loop: Write error at byte offset %llu, length %i.\n",
295 (unsigned long long)*ppos, bvec->bv_len);
301 static int lo_write_simple(struct loop_device *lo, struct request *rq,
305 struct req_iterator iter;
308 rq_for_each_segment(bvec, rq, iter) {
309 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos);
319 * This is the slow, transforming version that needs to double buffer the
320 * data as it cannot do the transformations in place without having direct
321 * access to the destination pages of the backing file.
323 static int lo_write_transfer(struct loop_device *lo, struct request *rq,
326 struct bio_vec bvec, b;
327 struct req_iterator iter;
331 page = alloc_page(GFP_NOIO);
335 rq_for_each_segment(bvec, rq, iter) {
336 ret = lo_do_transfer(lo, WRITE, page, 0, bvec.bv_page,
337 bvec.bv_offset, bvec.bv_len, pos >> 9);
343 b.bv_len = bvec.bv_len;
344 ret = lo_write_bvec(lo->lo_backing_file, &b, &pos);
353 static int lo_read_simple(struct loop_device *lo, struct request *rq,
357 struct req_iterator iter;
361 rq_for_each_segment(bvec, rq, iter) {
362 iov_iter_bvec(&i, READ, &bvec, 1, bvec.bv_len);
363 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
367 flush_dcache_page(bvec.bv_page);
369 if (len != bvec.bv_len) {
372 __rq_for_each_bio(bio, rq)
382 static int lo_read_transfer(struct loop_device *lo, struct request *rq,
385 struct bio_vec bvec, b;
386 struct req_iterator iter;
392 page = alloc_page(GFP_NOIO);
396 rq_for_each_segment(bvec, rq, iter) {
401 b.bv_len = bvec.bv_len;
403 iov_iter_bvec(&i, READ, &b, 1, b.bv_len);
404 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0);
410 ret = lo_do_transfer(lo, READ, page, 0, bvec.bv_page,
411 bvec.bv_offset, len, offset >> 9);
415 flush_dcache_page(bvec.bv_page);
417 if (len != bvec.bv_len) {
420 __rq_for_each_bio(bio, rq)
432 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos,
436 * We use fallocate to manipulate the space mappings used by the image
437 * a.k.a. discard/zerorange. However we do not support this if
438 * encryption is enabled, because it may give an attacker useful
441 struct file *file = lo->lo_backing_file;
442 struct request_queue *q = lo->lo_queue;
445 mode |= FALLOC_FL_KEEP_SIZE;
447 if (!blk_queue_discard(q)) {
452 ret = file->f_op->fallocate(file, mode, pos, blk_rq_bytes(rq));
453 if (unlikely(ret && ret != -EINVAL && ret != -EOPNOTSUPP))
459 static int lo_req_flush(struct loop_device *lo, struct request *rq)
461 struct file *file = lo->lo_backing_file;
462 int ret = vfs_fsync(file, 0);
463 if (unlikely(ret && ret != -EINVAL))
469 static void lo_complete_rq(struct request *rq)
471 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
472 blk_status_t ret = BLK_STS_OK;
474 if (!cmd->use_aio || cmd->ret < 0 || cmd->ret == blk_rq_bytes(rq) ||
475 req_op(rq) != REQ_OP_READ) {
477 ret = errno_to_blk_status(cmd->ret);
482 * Short READ - if we got some data, advance our request and
483 * retry it. If we got no data, end the rest with EIO.
486 blk_update_request(rq, BLK_STS_OK, cmd->ret);
488 blk_mq_requeue_request(rq, true);
491 struct bio *bio = rq->bio;
500 blk_mq_end_request(rq, ret);
504 static void lo_rw_aio_do_completion(struct loop_cmd *cmd)
506 struct request *rq = blk_mq_rq_from_pdu(cmd);
508 if (!atomic_dec_and_test(&cmd->ref))
512 blk_mq_complete_request(rq);
515 static void lo_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
517 struct loop_cmd *cmd = container_of(iocb, struct loop_cmd, iocb);
522 lo_rw_aio_do_completion(cmd);
525 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd,
528 struct iov_iter iter;
529 struct req_iterator rq_iter;
530 struct bio_vec *bvec;
531 struct request *rq = blk_mq_rq_from_pdu(cmd);
532 struct bio *bio = rq->bio;
533 struct file *file = lo->lo_backing_file;
539 rq_for_each_bvec(tmp, rq, rq_iter)
542 if (rq->bio != rq->biotail) {
544 bvec = kmalloc_array(nr_bvec, sizeof(struct bio_vec),
551 * The bios of the request may be started from the middle of
552 * the 'bvec' because of bio splitting, so we can't directly
553 * copy bio->bi_iov_vec to new bvec. The rq_for_each_bvec
554 * API will take care of all details for us.
556 rq_for_each_bvec(tmp, rq, rq_iter) {
564 * Same here, this bio may be started from the middle of the
565 * 'bvec' because of bio splitting, so offset from the bvec
566 * must be passed to iov iterator
568 offset = bio->bi_iter.bi_bvec_done;
569 bvec = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
571 atomic_set(&cmd->ref, 2);
573 iov_iter_bvec(&iter, rw, bvec, nr_bvec, blk_rq_bytes(rq));
574 iter.iov_offset = offset;
576 cmd->iocb.ki_pos = pos;
577 cmd->iocb.ki_filp = file;
578 cmd->iocb.ki_complete = lo_rw_aio_complete;
579 cmd->iocb.ki_flags = IOCB_DIRECT;
580 cmd->iocb.ki_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
582 kthread_associate_blkcg(cmd->css);
585 ret = call_write_iter(file, &cmd->iocb, &iter);
587 ret = call_read_iter(file, &cmd->iocb, &iter);
589 lo_rw_aio_do_completion(cmd);
590 kthread_associate_blkcg(NULL);
592 if (ret != -EIOCBQUEUED)
593 cmd->iocb.ki_complete(&cmd->iocb, ret, 0);
597 static int do_req_filebacked(struct loop_device *lo, struct request *rq)
599 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
600 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset;
603 * lo_write_simple and lo_read_simple should have been covered
604 * by io submit style function like lo_rw_aio(), one blocker
605 * is that lo_read_simple() need to call flush_dcache_page after
606 * the page is written from kernel, and it isn't easy to handle
607 * this in io submit style function which submits all segments
608 * of the req at one time. And direct read IO doesn't need to
609 * run flush_dcache_page().
611 switch (req_op(rq)) {
613 return lo_req_flush(lo, rq);
614 case REQ_OP_WRITE_ZEROES:
616 * If the caller doesn't want deallocation, call zeroout to
617 * write zeroes the range. Otherwise, punch them out.
619 return lo_fallocate(lo, rq, pos,
620 (rq->cmd_flags & REQ_NOUNMAP) ?
621 FALLOC_FL_ZERO_RANGE :
622 FALLOC_FL_PUNCH_HOLE);
624 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE);
627 return lo_write_transfer(lo, rq, pos);
628 else if (cmd->use_aio)
629 return lo_rw_aio(lo, cmd, pos, WRITE);
631 return lo_write_simple(lo, rq, pos);
634 return lo_read_transfer(lo, rq, pos);
635 else if (cmd->use_aio)
636 return lo_rw_aio(lo, cmd, pos, READ);
638 return lo_read_simple(lo, rq, pos);
645 static inline void loop_update_dio(struct loop_device *lo)
647 __loop_update_dio(lo, io_is_direct(lo->lo_backing_file) |
651 static void loop_reread_partitions(struct loop_device *lo,
652 struct block_device *bdev)
656 mutex_lock(&bdev->bd_mutex);
657 rc = bdev_disk_changed(bdev, false);
658 mutex_unlock(&bdev->bd_mutex);
660 pr_warn("%s: partition scan of loop%d (%s) failed (rc=%d)\n",
661 __func__, lo->lo_number, lo->lo_file_name, rc);
664 static inline int is_loop_device(struct file *file)
666 struct inode *i = file->f_mapping->host;
668 return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR;
671 static int loop_validate_file(struct file *file, struct block_device *bdev)
673 struct inode *inode = file->f_mapping->host;
674 struct file *f = file;
676 /* Avoid recursion */
677 while (is_loop_device(f)) {
678 struct loop_device *l;
680 if (f->f_mapping->host->i_bdev == bdev)
683 l = f->f_mapping->host->i_bdev->bd_disk->private_data;
684 if (l->lo_state != Lo_bound) {
687 f = l->lo_backing_file;
689 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))
695 * loop_change_fd switched the backing store of a loopback device to
696 * a new file. This is useful for operating system installers to free up
697 * the original file and in High Availability environments to switch to
698 * an alternative location for the content in case of server meltdown.
699 * This can only work if the loop device is used read-only, and if the
700 * new backing store is the same size and type as the old backing store.
702 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev,
705 struct file *file = NULL, *old_file;
709 error = mutex_lock_killable(&loop_ctl_mutex);
713 if (lo->lo_state != Lo_bound)
716 /* the loop device has to be read-only */
718 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY))
726 error = loop_validate_file(file, bdev);
730 old_file = lo->lo_backing_file;
734 /* size of the new backing store needs to be the same */
735 if (get_loop_size(lo, file) != get_loop_size(lo, old_file))
739 blk_mq_freeze_queue(lo->lo_queue);
740 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
741 lo->lo_backing_file = file;
742 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping);
743 mapping_set_gfp_mask(file->f_mapping,
744 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
746 blk_mq_unfreeze_queue(lo->lo_queue);
747 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
748 mutex_unlock(&loop_ctl_mutex);
750 * We must drop file reference outside of loop_ctl_mutex as dropping
751 * the file ref can take bd_mutex which creates circular locking
756 loop_reread_partitions(lo, bdev);
760 mutex_unlock(&loop_ctl_mutex);
766 /* loop sysfs attributes */
768 static ssize_t loop_attr_show(struct device *dev, char *page,
769 ssize_t (*callback)(struct loop_device *, char *))
771 struct gendisk *disk = dev_to_disk(dev);
772 struct loop_device *lo = disk->private_data;
774 return callback(lo, page);
777 #define LOOP_ATTR_RO(_name) \
778 static ssize_t loop_attr_##_name##_show(struct loop_device *, char *); \
779 static ssize_t loop_attr_do_show_##_name(struct device *d, \
780 struct device_attribute *attr, char *b) \
782 return loop_attr_show(d, b, loop_attr_##_name##_show); \
784 static struct device_attribute loop_attr_##_name = \
785 __ATTR(_name, 0444, loop_attr_do_show_##_name, NULL);
787 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf)
792 spin_lock_irq(&lo->lo_lock);
793 if (lo->lo_backing_file)
794 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1);
795 spin_unlock_irq(&lo->lo_lock);
797 if (IS_ERR_OR_NULL(p))
801 memmove(buf, p, ret);
809 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf)
811 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_offset);
814 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf)
816 return sprintf(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit);
819 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf)
821 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR);
823 return sprintf(buf, "%s\n", autoclear ? "1" : "0");
826 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf)
828 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN);
830 return sprintf(buf, "%s\n", partscan ? "1" : "0");
833 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf)
835 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO);
837 return sprintf(buf, "%s\n", dio ? "1" : "0");
840 LOOP_ATTR_RO(backing_file);
841 LOOP_ATTR_RO(offset);
842 LOOP_ATTR_RO(sizelimit);
843 LOOP_ATTR_RO(autoclear);
844 LOOP_ATTR_RO(partscan);
847 static struct attribute *loop_attrs[] = {
848 &loop_attr_backing_file.attr,
849 &loop_attr_offset.attr,
850 &loop_attr_sizelimit.attr,
851 &loop_attr_autoclear.attr,
852 &loop_attr_partscan.attr,
857 static struct attribute_group loop_attribute_group = {
862 static void loop_sysfs_init(struct loop_device *lo)
864 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj,
865 &loop_attribute_group);
868 static void loop_sysfs_exit(struct loop_device *lo)
870 if (lo->sysfs_inited)
871 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj,
872 &loop_attribute_group);
875 static void loop_config_discard(struct loop_device *lo)
877 struct file *file = lo->lo_backing_file;
878 struct inode *inode = file->f_mapping->host;
879 struct request_queue *q = lo->lo_queue;
882 * If the backing device is a block device, mirror its zeroing
883 * capability. Set the discard sectors to the block device's zeroing
884 * capabilities because loop discards result in blkdev_issue_zeroout(),
885 * not blkdev_issue_discard(). This maintains consistent behavior with
886 * file-backed loop devices: discarded regions read back as zero.
888 if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
889 struct request_queue *backingq;
891 backingq = bdev_get_queue(inode->i_bdev);
892 blk_queue_max_discard_sectors(q,
893 backingq->limits.max_write_zeroes_sectors);
895 blk_queue_max_write_zeroes_sectors(q,
896 backingq->limits.max_write_zeroes_sectors);
899 * We use punch hole to reclaim the free space used by the
900 * image a.k.a. discard. However we do not support discard if
901 * encryption is enabled, because it may give an attacker
902 * useful information.
904 } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
905 q->limits.discard_granularity = 0;
906 q->limits.discard_alignment = 0;
907 blk_queue_max_discard_sectors(q, 0);
908 blk_queue_max_write_zeroes_sectors(q, 0);
911 q->limits.discard_granularity = inode->i_sb->s_blocksize;
912 q->limits.discard_alignment = 0;
914 blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
915 blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
918 if (q->limits.max_write_zeroes_sectors)
919 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
921 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
924 static void loop_unprepare_queue(struct loop_device *lo)
926 kthread_flush_worker(&lo->worker);
927 kthread_stop(lo->worker_task);
930 static int loop_kthread_worker_fn(void *worker_ptr)
932 current->flags |= PF_LOCAL_THROTTLE | PF_MEMALLOC_NOIO;
933 return kthread_worker_fn(worker_ptr);
936 static int loop_prepare_queue(struct loop_device *lo)
938 kthread_init_worker(&lo->worker);
939 lo->worker_task = kthread_run(loop_kthread_worker_fn,
940 &lo->worker, "loop%d", lo->lo_number);
941 if (IS_ERR(lo->worker_task))
943 set_user_nice(lo->worker_task, MIN_NICE);
947 static void loop_update_rotational(struct loop_device *lo)
949 struct file *file = lo->lo_backing_file;
950 struct inode *file_inode = file->f_mapping->host;
951 struct block_device *file_bdev = file_inode->i_sb->s_bdev;
952 struct request_queue *q = lo->lo_queue;
955 /* not all filesystems (e.g. tmpfs) have a sb->s_bdev */
957 nonrot = blk_queue_nonrot(bdev_get_queue(file_bdev));
960 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
962 blk_queue_flag_clear(QUEUE_FLAG_NONROT, q);
966 loop_release_xfer(struct loop_device *lo)
969 struct loop_func_table *xfer = lo->lo_encryption;
973 err = xfer->release(lo);
975 lo->lo_encryption = NULL;
976 module_put(xfer->owner);
982 loop_init_xfer(struct loop_device *lo, struct loop_func_table *xfer,
983 const struct loop_info64 *i)
988 struct module *owner = xfer->owner;
990 if (!try_module_get(owner))
993 err = xfer->init(lo, i);
997 lo->lo_encryption = xfer;
1003 * loop_set_status_from_info - configure device from loop_info
1004 * @lo: struct loop_device to configure
1005 * @info: struct loop_info64 to configure the device with
1007 * Configures the loop device parameters according to the passed
1008 * in loop_info64 configuration.
1011 loop_set_status_from_info(struct loop_device *lo,
1012 const struct loop_info64 *info)
1015 struct loop_func_table *xfer;
1016 kuid_t uid = current_uid();
1018 if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
1021 err = loop_release_xfer(lo);
1025 if (info->lo_encrypt_type) {
1026 unsigned int type = info->lo_encrypt_type;
1028 if (type >= MAX_LO_CRYPT)
1030 xfer = xfer_funcs[type];
1036 err = loop_init_xfer(lo, xfer, info);
1040 lo->lo_offset = info->lo_offset;
1041 lo->lo_sizelimit = info->lo_sizelimit;
1042 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE);
1043 memcpy(lo->lo_crypt_name, info->lo_crypt_name, LO_NAME_SIZE);
1044 lo->lo_file_name[LO_NAME_SIZE-1] = 0;
1045 lo->lo_crypt_name[LO_NAME_SIZE-1] = 0;
1049 lo->transfer = xfer->transfer;
1050 lo->ioctl = xfer->ioctl;
1052 lo->lo_flags = info->lo_flags;
1054 lo->lo_encrypt_key_size = info->lo_encrypt_key_size;
1055 lo->lo_init[0] = info->lo_init[0];
1056 lo->lo_init[1] = info->lo_init[1];
1057 if (info->lo_encrypt_key_size) {
1058 memcpy(lo->lo_encrypt_key, info->lo_encrypt_key,
1059 info->lo_encrypt_key_size);
1060 lo->lo_key_owner = uid;
1066 static int loop_configure(struct loop_device *lo, fmode_t mode,
1067 struct block_device *bdev,
1068 const struct loop_config *config)
1071 struct inode *inode;
1072 struct address_space *mapping;
1073 struct block_device *claimed_bdev = NULL;
1077 unsigned short bsize;
1079 /* This is safe, since we have a reference from open(). */
1080 __module_get(THIS_MODULE);
1083 file = fget(config->fd);
1088 * If we don't hold exclusive handle for the device, upgrade to it
1089 * here to avoid changing device under exclusive owner.
1091 if (!(mode & FMODE_EXCL)) {
1092 claimed_bdev = bd_start_claiming(bdev, loop_configure);
1093 if (IS_ERR(claimed_bdev)) {
1094 error = PTR_ERR(claimed_bdev);
1099 error = mutex_lock_killable(&loop_ctl_mutex);
1104 if (lo->lo_state != Lo_unbound)
1107 error = loop_validate_file(file, bdev);
1111 mapping = file->f_mapping;
1112 inode = mapping->host;
1114 size = get_loop_size(lo, file);
1116 if ((config->info.lo_flags & ~LOOP_CONFIGURE_SETTABLE_FLAGS) != 0) {
1121 if (config->block_size) {
1122 error = loop_validate_block_size(config->block_size);
1127 error = loop_set_status_from_info(lo, &config->info);
1131 if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) ||
1132 !file->f_op->write_iter)
1133 lo->lo_flags |= LO_FLAGS_READ_ONLY;
1135 error = loop_prepare_queue(lo);
1139 set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
1141 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
1142 lo->lo_device = bdev;
1143 lo->lo_backing_file = file;
1144 lo->old_gfp_mask = mapping_gfp_mask(mapping);
1145 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
1147 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync)
1148 blk_queue_write_cache(lo->lo_queue, true, false);
1150 if (config->block_size)
1151 bsize = config->block_size;
1152 else if (io_is_direct(lo->lo_backing_file) && inode->i_sb->s_bdev)
1153 /* In case of direct I/O, match underlying block size */
1154 bsize = bdev_logical_block_size(inode->i_sb->s_bdev);
1158 blk_queue_logical_block_size(lo->lo_queue, bsize);
1159 blk_queue_physical_block_size(lo->lo_queue, bsize);
1160 blk_queue_io_min(lo->lo_queue, bsize);
1162 loop_update_rotational(lo);
1163 loop_update_dio(lo);
1164 loop_sysfs_init(lo);
1165 loop_set_size(lo, size);
1167 set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
1168 block_size(inode->i_bdev) : PAGE_SIZE);
1170 lo->lo_state = Lo_bound;
1172 lo->lo_flags |= LO_FLAGS_PARTSCAN;
1173 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN;
1175 /* Grab the block_device to prevent its destruction after we
1176 * put /dev/loopXX inode. Later in __loop_clr_fd() we bdput(bdev).
1179 mutex_unlock(&loop_ctl_mutex);
1181 loop_reread_partitions(lo, bdev);
1183 bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1187 mutex_unlock(&loop_ctl_mutex);
1190 bd_abort_claiming(bdev, claimed_bdev, loop_configure);
1194 /* This is safe: open() is still holding a reference. */
1195 module_put(THIS_MODULE);
1199 static int __loop_clr_fd(struct loop_device *lo, bool release)
1201 struct file *filp = NULL;
1202 gfp_t gfp = lo->old_gfp_mask;
1203 struct block_device *bdev = lo->lo_device;
1205 bool partscan = false;
1208 mutex_lock(&loop_ctl_mutex);
1209 if (WARN_ON_ONCE(lo->lo_state != Lo_rundown)) {
1214 filp = lo->lo_backing_file;
1220 /* freeze request queue during the transition */
1221 blk_mq_freeze_queue(lo->lo_queue);
1223 spin_lock_irq(&lo->lo_lock);
1224 lo->lo_backing_file = NULL;
1225 spin_unlock_irq(&lo->lo_lock);
1227 loop_release_xfer(lo);
1228 lo->transfer = NULL;
1230 lo->lo_device = NULL;
1231 lo->lo_encryption = NULL;
1233 lo->lo_sizelimit = 0;
1234 lo->lo_encrypt_key_size = 0;
1235 memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
1236 memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
1237 memset(lo->lo_file_name, 0, LO_NAME_SIZE);
1238 blk_queue_logical_block_size(lo->lo_queue, 512);
1239 blk_queue_physical_block_size(lo->lo_queue, 512);
1240 blk_queue_io_min(lo->lo_queue, 512);
1243 invalidate_bdev(bdev);
1244 bdev->bd_inode->i_mapping->wb_err = 0;
1246 set_capacity(lo->lo_disk, 0);
1247 loop_sysfs_exit(lo);
1249 bd_set_size(bdev, 0);
1250 /* let user-space know about this change */
1251 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1253 mapping_set_gfp_mask(filp->f_mapping, gfp);
1254 /* This is safe: open() is still holding a reference. */
1255 module_put(THIS_MODULE);
1256 blk_mq_unfreeze_queue(lo->lo_queue);
1258 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1259 lo_number = lo->lo_number;
1260 loop_unprepare_queue(lo);
1262 mutex_unlock(&loop_ctl_mutex);
1265 * bd_mutex has been held already in release path, so don't
1266 * acquire it if this function is called in such case.
1268 * If the reread partition isn't from release path, lo_refcnt
1269 * must be at least one and it can only become zero when the
1270 * current holder is released.
1273 mutex_lock(&bdev->bd_mutex);
1274 err = bdev_disk_changed(bdev, false);
1276 mutex_unlock(&bdev->bd_mutex);
1278 pr_warn("%s: partition scan of loop%d failed (rc=%d)\n",
1279 __func__, lo_number, err);
1280 /* Device is gone, no point in returning error */
1285 * lo->lo_state is set to Lo_unbound here after above partscan has
1288 * There cannot be anybody else entering __loop_clr_fd() as
1289 * lo->lo_backing_file is already cleared and Lo_rundown state
1290 * protects us from all the other places trying to change the 'lo'
1293 mutex_lock(&loop_ctl_mutex);
1296 lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1297 lo->lo_state = Lo_unbound;
1298 mutex_unlock(&loop_ctl_mutex);
1301 * Need not hold loop_ctl_mutex to fput backing file.
1302 * Calling fput holding loop_ctl_mutex triggers a circular
1303 * lock dependency possibility warning as fput can take
1304 * bd_mutex which is usually taken before loop_ctl_mutex.
1311 static int loop_clr_fd(struct loop_device *lo)
1315 err = mutex_lock_killable(&loop_ctl_mutex);
1318 if (lo->lo_state != Lo_bound) {
1319 mutex_unlock(&loop_ctl_mutex);
1323 * If we've explicitly asked to tear down the loop device,
1324 * and it has an elevated reference count, set it for auto-teardown when
1325 * the last reference goes away. This stops $!~#$@ udev from
1326 * preventing teardown because it decided that it needs to run blkid on
1327 * the loopback device whenever they appear. xfstests is notorious for
1328 * failing tests because blkid via udev races with a losetup
1329 * <dev>/do something like mkfs/losetup -d <dev> causing the losetup -d
1330 * command to fail with EBUSY.
1332 if (atomic_read(&lo->lo_refcnt) > 1) {
1333 lo->lo_flags |= LO_FLAGS_AUTOCLEAR;
1334 mutex_unlock(&loop_ctl_mutex);
1337 lo->lo_state = Lo_rundown;
1338 mutex_unlock(&loop_ctl_mutex);
1340 return __loop_clr_fd(lo, false);
1344 loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
1347 struct block_device *bdev;
1348 kuid_t uid = current_uid();
1350 bool partscan = false;
1351 bool size_changed = false;
1353 err = mutex_lock_killable(&loop_ctl_mutex);
1356 if (lo->lo_encrypt_key_size &&
1357 !uid_eq(lo->lo_key_owner, uid) &&
1358 !capable(CAP_SYS_ADMIN)) {
1362 if (lo->lo_state != Lo_bound) {
1367 if (lo->lo_offset != info->lo_offset ||
1368 lo->lo_sizelimit != info->lo_sizelimit) {
1369 size_changed = true;
1370 sync_blockdev(lo->lo_device);
1371 kill_bdev(lo->lo_device);
1374 /* I/O need to be drained during transfer transition */
1375 blk_mq_freeze_queue(lo->lo_queue);
1377 if (size_changed && lo->lo_device->bd_inode->i_mapping->nrpages) {
1378 /* If any pages were dirtied after kill_bdev(), try again */
1380 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1381 __func__, lo->lo_number, lo->lo_file_name,
1382 lo->lo_device->bd_inode->i_mapping->nrpages);
1386 prev_lo_flags = lo->lo_flags;
1388 err = loop_set_status_from_info(lo, info);
1392 /* Mask out flags that can't be set using LOOP_SET_STATUS. */
1393 lo->lo_flags &= ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1394 /* For those flags, use the previous values instead */
1395 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS;
1396 /* For flags that can't be cleared, use previous values too */
1397 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS;
1400 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit,
1401 lo->lo_backing_file);
1402 loop_set_size(lo, new_size);
1405 loop_config_discard(lo);
1407 /* update dio if lo_offset or transfer is changed */
1408 __loop_update_dio(lo, lo->use_dio);
1411 blk_mq_unfreeze_queue(lo->lo_queue);
1413 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) &&
1414 !(prev_lo_flags & LO_FLAGS_PARTSCAN)) {
1415 lo->lo_disk->flags &= ~GENHD_FL_NO_PART_SCAN;
1416 bdev = lo->lo_device;
1420 mutex_unlock(&loop_ctl_mutex);
1422 loop_reread_partitions(lo, bdev);
1428 loop_get_status(struct loop_device *lo, struct loop_info64 *info)
1434 ret = mutex_lock_killable(&loop_ctl_mutex);
1437 if (lo->lo_state != Lo_bound) {
1438 mutex_unlock(&loop_ctl_mutex);
1442 memset(info, 0, sizeof(*info));
1443 info->lo_number = lo->lo_number;
1444 info->lo_offset = lo->lo_offset;
1445 info->lo_sizelimit = lo->lo_sizelimit;
1446 info->lo_flags = lo->lo_flags;
1447 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE);
1448 memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
1449 info->lo_encrypt_type =
1450 lo->lo_encryption ? lo->lo_encryption->number : 0;
1451 if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
1452 info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
1453 memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
1454 lo->lo_encrypt_key_size);
1457 /* Drop loop_ctl_mutex while we call into the filesystem. */
1458 path = lo->lo_backing_file->f_path;
1460 mutex_unlock(&loop_ctl_mutex);
1461 ret = vfs_getattr(&path, &stat, STATX_INO, AT_STATX_SYNC_AS_STAT);
1463 info->lo_device = huge_encode_dev(stat.dev);
1464 info->lo_inode = stat.ino;
1465 info->lo_rdevice = huge_encode_dev(stat.rdev);
1472 loop_info64_from_old(const struct loop_info *info, struct loop_info64 *info64)
1474 memset(info64, 0, sizeof(*info64));
1475 info64->lo_number = info->lo_number;
1476 info64->lo_device = info->lo_device;
1477 info64->lo_inode = info->lo_inode;
1478 info64->lo_rdevice = info->lo_rdevice;
1479 info64->lo_offset = info->lo_offset;
1480 info64->lo_sizelimit = 0;
1481 info64->lo_encrypt_type = info->lo_encrypt_type;
1482 info64->lo_encrypt_key_size = info->lo_encrypt_key_size;
1483 info64->lo_flags = info->lo_flags;
1484 info64->lo_init[0] = info->lo_init[0];
1485 info64->lo_init[1] = info->lo_init[1];
1486 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1487 memcpy(info64->lo_crypt_name, info->lo_name, LO_NAME_SIZE);
1489 memcpy(info64->lo_file_name, info->lo_name, LO_NAME_SIZE);
1490 memcpy(info64->lo_encrypt_key, info->lo_encrypt_key, LO_KEY_SIZE);
1494 loop_info64_to_old(const struct loop_info64 *info64, struct loop_info *info)
1496 memset(info, 0, sizeof(*info));
1497 info->lo_number = info64->lo_number;
1498 info->lo_device = info64->lo_device;
1499 info->lo_inode = info64->lo_inode;
1500 info->lo_rdevice = info64->lo_rdevice;
1501 info->lo_offset = info64->lo_offset;
1502 info->lo_encrypt_type = info64->lo_encrypt_type;
1503 info->lo_encrypt_key_size = info64->lo_encrypt_key_size;
1504 info->lo_flags = info64->lo_flags;
1505 info->lo_init[0] = info64->lo_init[0];
1506 info->lo_init[1] = info64->lo_init[1];
1507 if (info->lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1508 memcpy(info->lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1510 memcpy(info->lo_name, info64->lo_file_name, LO_NAME_SIZE);
1511 memcpy(info->lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1513 /* error in case values were truncated */
1514 if (info->lo_device != info64->lo_device ||
1515 info->lo_rdevice != info64->lo_rdevice ||
1516 info->lo_inode != info64->lo_inode ||
1517 info->lo_offset != info64->lo_offset)
1524 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg)
1526 struct loop_info info;
1527 struct loop_info64 info64;
1529 if (copy_from_user(&info, arg, sizeof (struct loop_info)))
1531 loop_info64_from_old(&info, &info64);
1532 return loop_set_status(lo, &info64);
1536 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg)
1538 struct loop_info64 info64;
1540 if (copy_from_user(&info64, arg, sizeof (struct loop_info64)))
1542 return loop_set_status(lo, &info64);
1546 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) {
1547 struct loop_info info;
1548 struct loop_info64 info64;
1553 err = loop_get_status(lo, &info64);
1555 err = loop_info64_to_old(&info64, &info);
1556 if (!err && copy_to_user(arg, &info, sizeof(info)))
1563 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) {
1564 struct loop_info64 info64;
1569 err = loop_get_status(lo, &info64);
1570 if (!err && copy_to_user(arg, &info64, sizeof(info64)))
1576 static int loop_set_capacity(struct loop_device *lo)
1580 if (unlikely(lo->lo_state != Lo_bound))
1583 size = get_loop_size(lo, lo->lo_backing_file);
1584 loop_set_size(lo, size);
1589 static int loop_set_dio(struct loop_device *lo, unsigned long arg)
1592 if (lo->lo_state != Lo_bound)
1595 __loop_update_dio(lo, !!arg);
1596 if (lo->use_dio == !!arg)
1603 static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
1607 if (lo->lo_state != Lo_bound)
1610 err = loop_validate_block_size(arg);
1614 if (lo->lo_queue->limits.logical_block_size == arg)
1617 sync_blockdev(lo->lo_device);
1618 kill_bdev(lo->lo_device);
1620 blk_mq_freeze_queue(lo->lo_queue);
1622 /* kill_bdev should have truncated all the pages */
1623 if (lo->lo_device->bd_inode->i_mapping->nrpages) {
1625 pr_warn("%s: loop%d (%s) has still dirty pages (nrpages=%lu)\n",
1626 __func__, lo->lo_number, lo->lo_file_name,
1627 lo->lo_device->bd_inode->i_mapping->nrpages);
1631 blk_queue_logical_block_size(lo->lo_queue, arg);
1632 blk_queue_physical_block_size(lo->lo_queue, arg);
1633 blk_queue_io_min(lo->lo_queue, arg);
1634 loop_update_dio(lo);
1636 blk_mq_unfreeze_queue(lo->lo_queue);
1641 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd,
1646 err = mutex_lock_killable(&loop_ctl_mutex);
1650 case LOOP_SET_CAPACITY:
1651 err = loop_set_capacity(lo);
1653 case LOOP_SET_DIRECT_IO:
1654 err = loop_set_dio(lo, arg);
1656 case LOOP_SET_BLOCK_SIZE:
1657 err = loop_set_block_size(lo, arg);
1660 err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
1662 mutex_unlock(&loop_ctl_mutex);
1666 static int lo_ioctl(struct block_device *bdev, fmode_t mode,
1667 unsigned int cmd, unsigned long arg)
1669 struct loop_device *lo = bdev->bd_disk->private_data;
1670 void __user *argp = (void __user *) arg;
1676 * Legacy case - pass in a zeroed out struct loop_config with
1677 * only the file descriptor set , which corresponds with the
1678 * default parameters we'd have used otherwise.
1680 struct loop_config config;
1682 memset(&config, 0, sizeof(config));
1685 return loop_configure(lo, mode, bdev, &config);
1687 case LOOP_CONFIGURE: {
1688 struct loop_config config;
1690 if (copy_from_user(&config, argp, sizeof(config)))
1693 return loop_configure(lo, mode, bdev, &config);
1695 case LOOP_CHANGE_FD:
1696 return loop_change_fd(lo, bdev, arg);
1698 return loop_clr_fd(lo);
1699 case LOOP_SET_STATUS:
1701 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1702 err = loop_set_status_old(lo, argp);
1705 case LOOP_GET_STATUS:
1706 return loop_get_status_old(lo, argp);
1707 case LOOP_SET_STATUS64:
1709 if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN)) {
1710 err = loop_set_status64(lo, argp);
1713 case LOOP_GET_STATUS64:
1714 return loop_get_status64(lo, argp);
1715 case LOOP_SET_CAPACITY:
1716 case LOOP_SET_DIRECT_IO:
1717 case LOOP_SET_BLOCK_SIZE:
1718 if (!(mode & FMODE_WRITE) && !capable(CAP_SYS_ADMIN))
1722 err = lo_simple_ioctl(lo, cmd, arg);
1729 #ifdef CONFIG_COMPAT
1730 struct compat_loop_info {
1731 compat_int_t lo_number; /* ioctl r/o */
1732 compat_dev_t lo_device; /* ioctl r/o */
1733 compat_ulong_t lo_inode; /* ioctl r/o */
1734 compat_dev_t lo_rdevice; /* ioctl r/o */
1735 compat_int_t lo_offset;
1736 compat_int_t lo_encrypt_type;
1737 compat_int_t lo_encrypt_key_size; /* ioctl w/o */
1738 compat_int_t lo_flags; /* ioctl r/o */
1739 char lo_name[LO_NAME_SIZE];
1740 unsigned char lo_encrypt_key[LO_KEY_SIZE]; /* ioctl w/o */
1741 compat_ulong_t lo_init[2];
1746 * Transfer 32-bit compatibility structure in userspace to 64-bit loop info
1747 * - noinlined to reduce stack space usage in main part of driver
1750 loop_info64_from_compat(const struct compat_loop_info __user *arg,
1751 struct loop_info64 *info64)
1753 struct compat_loop_info info;
1755 if (copy_from_user(&info, arg, sizeof(info)))
1758 memset(info64, 0, sizeof(*info64));
1759 info64->lo_number = info.lo_number;
1760 info64->lo_device = info.lo_device;
1761 info64->lo_inode = info.lo_inode;
1762 info64->lo_rdevice = info.lo_rdevice;
1763 info64->lo_offset = info.lo_offset;
1764 info64->lo_sizelimit = 0;
1765 info64->lo_encrypt_type = info.lo_encrypt_type;
1766 info64->lo_encrypt_key_size = info.lo_encrypt_key_size;
1767 info64->lo_flags = info.lo_flags;
1768 info64->lo_init[0] = info.lo_init[0];
1769 info64->lo_init[1] = info.lo_init[1];
1770 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1771 memcpy(info64->lo_crypt_name, info.lo_name, LO_NAME_SIZE);
1773 memcpy(info64->lo_file_name, info.lo_name, LO_NAME_SIZE);
1774 memcpy(info64->lo_encrypt_key, info.lo_encrypt_key, LO_KEY_SIZE);
1779 * Transfer 64-bit loop info to 32-bit compatibility structure in userspace
1780 * - noinlined to reduce stack space usage in main part of driver
1783 loop_info64_to_compat(const struct loop_info64 *info64,
1784 struct compat_loop_info __user *arg)
1786 struct compat_loop_info info;
1788 memset(&info, 0, sizeof(info));
1789 info.lo_number = info64->lo_number;
1790 info.lo_device = info64->lo_device;
1791 info.lo_inode = info64->lo_inode;
1792 info.lo_rdevice = info64->lo_rdevice;
1793 info.lo_offset = info64->lo_offset;
1794 info.lo_encrypt_type = info64->lo_encrypt_type;
1795 info.lo_encrypt_key_size = info64->lo_encrypt_key_size;
1796 info.lo_flags = info64->lo_flags;
1797 info.lo_init[0] = info64->lo_init[0];
1798 info.lo_init[1] = info64->lo_init[1];
1799 if (info.lo_encrypt_type == LO_CRYPT_CRYPTOAPI)
1800 memcpy(info.lo_name, info64->lo_crypt_name, LO_NAME_SIZE);
1802 memcpy(info.lo_name, info64->lo_file_name, LO_NAME_SIZE);
1803 memcpy(info.lo_encrypt_key, info64->lo_encrypt_key, LO_KEY_SIZE);
1805 /* error in case values were truncated */
1806 if (info.lo_device != info64->lo_device ||
1807 info.lo_rdevice != info64->lo_rdevice ||
1808 info.lo_inode != info64->lo_inode ||
1809 info.lo_offset != info64->lo_offset ||
1810 info.lo_init[0] != info64->lo_init[0] ||
1811 info.lo_init[1] != info64->lo_init[1])
1814 if (copy_to_user(arg, &info, sizeof(info)))
1820 loop_set_status_compat(struct loop_device *lo,
1821 const struct compat_loop_info __user *arg)
1823 struct loop_info64 info64;
1826 ret = loop_info64_from_compat(arg, &info64);
1829 return loop_set_status(lo, &info64);
1833 loop_get_status_compat(struct loop_device *lo,
1834 struct compat_loop_info __user *arg)
1836 struct loop_info64 info64;
1841 err = loop_get_status(lo, &info64);
1843 err = loop_info64_to_compat(&info64, arg);
1847 static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1848 unsigned int cmd, unsigned long arg)
1850 struct loop_device *lo = bdev->bd_disk->private_data;
1854 case LOOP_SET_STATUS:
1855 err = loop_set_status_compat(lo,
1856 (const struct compat_loop_info __user *)arg);
1858 case LOOP_GET_STATUS:
1859 err = loop_get_status_compat(lo,
1860 (struct compat_loop_info __user *)arg);
1862 case LOOP_SET_CAPACITY:
1864 case LOOP_GET_STATUS64:
1865 case LOOP_SET_STATUS64:
1866 case LOOP_CONFIGURE:
1867 arg = (unsigned long) compat_ptr(arg);
1870 case LOOP_CHANGE_FD:
1871 case LOOP_SET_BLOCK_SIZE:
1872 case LOOP_SET_DIRECT_IO:
1873 err = lo_ioctl(bdev, mode, cmd, arg);
1883 static int lo_open(struct block_device *bdev, fmode_t mode)
1885 struct loop_device *lo;
1888 err = mutex_lock_killable(&loop_ctl_mutex);
1891 lo = bdev->bd_disk->private_data;
1897 atomic_inc(&lo->lo_refcnt);
1899 mutex_unlock(&loop_ctl_mutex);
1903 static void lo_release(struct gendisk *disk, fmode_t mode)
1905 struct loop_device *lo;
1907 mutex_lock(&loop_ctl_mutex);
1908 lo = disk->private_data;
1909 if (atomic_dec_return(&lo->lo_refcnt))
1912 if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
1913 if (lo->lo_state != Lo_bound)
1915 lo->lo_state = Lo_rundown;
1916 mutex_unlock(&loop_ctl_mutex);
1918 * In autoclear mode, stop the loop thread
1919 * and remove configuration after last close.
1921 __loop_clr_fd(lo, true);
1923 } else if (lo->lo_state == Lo_bound) {
1925 * Otherwise keep thread (if running) and config,
1926 * but flush possible ongoing bios in thread.
1928 blk_mq_freeze_queue(lo->lo_queue);
1929 blk_mq_unfreeze_queue(lo->lo_queue);
1933 mutex_unlock(&loop_ctl_mutex);
1936 static const struct block_device_operations lo_fops = {
1937 .owner = THIS_MODULE,
1939 .release = lo_release,
1941 #ifdef CONFIG_COMPAT
1942 .compat_ioctl = lo_compat_ioctl,
1947 * And now the modules code and kernel interface.
1949 static int max_loop;
1950 module_param(max_loop, int, 0444);
1951 MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
1952 module_param(max_part, int, 0444);
1953 MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
1954 MODULE_LICENSE("GPL");
1955 MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR);
1957 int loop_register_transfer(struct loop_func_table *funcs)
1959 unsigned int n = funcs->number;
1961 if (n >= MAX_LO_CRYPT || xfer_funcs[n])
1963 xfer_funcs[n] = funcs;
1967 static int unregister_transfer_cb(int id, void *ptr, void *data)
1969 struct loop_device *lo = ptr;
1970 struct loop_func_table *xfer = data;
1972 mutex_lock(&loop_ctl_mutex);
1973 if (lo->lo_encryption == xfer)
1974 loop_release_xfer(lo);
1975 mutex_unlock(&loop_ctl_mutex);
1979 int loop_unregister_transfer(int number)
1981 unsigned int n = number;
1982 struct loop_func_table *xfer;
1984 if (n == 0 || n >= MAX_LO_CRYPT || (xfer = xfer_funcs[n]) == NULL)
1987 xfer_funcs[n] = NULL;
1988 idr_for_each(&loop_index_idr, &unregister_transfer_cb, xfer);
1992 EXPORT_SYMBOL(loop_register_transfer);
1993 EXPORT_SYMBOL(loop_unregister_transfer);
1995 static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1996 const struct blk_mq_queue_data *bd)
1998 struct request *rq = bd->rq;
1999 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2000 struct loop_device *lo = rq->q->queuedata;
2002 blk_mq_start_request(rq);
2004 if (lo->lo_state != Lo_bound)
2005 return BLK_STS_IOERR;
2007 switch (req_op(rq)) {
2009 case REQ_OP_DISCARD:
2010 case REQ_OP_WRITE_ZEROES:
2011 cmd->use_aio = false;
2014 cmd->use_aio = lo->use_dio;
2018 /* always use the first bio's css */
2019 #ifdef CONFIG_BLK_CGROUP
2020 if (cmd->use_aio && rq->bio && rq->bio->bi_blkg) {
2021 cmd->css = &bio_blkcg(rq->bio)->css;
2026 kthread_queue_work(&lo->worker, &cmd->work);
2031 static void loop_handle_cmd(struct loop_cmd *cmd)
2033 struct request *rq = blk_mq_rq_from_pdu(cmd);
2034 const bool write = op_is_write(req_op(rq));
2035 struct loop_device *lo = rq->q->queuedata;
2038 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) {
2043 ret = do_req_filebacked(lo, rq);
2045 /* complete non-aio request */
2046 if (!cmd->use_aio || ret) {
2047 if (ret == -EOPNOTSUPP)
2050 cmd->ret = ret ? -EIO : 0;
2051 blk_mq_complete_request(rq);
2055 static void loop_queue_work(struct kthread_work *work)
2057 struct loop_cmd *cmd =
2058 container_of(work, struct loop_cmd, work);
2060 loop_handle_cmd(cmd);
2063 static int loop_init_request(struct blk_mq_tag_set *set, struct request *rq,
2064 unsigned int hctx_idx, unsigned int numa_node)
2066 struct loop_cmd *cmd = blk_mq_rq_to_pdu(rq);
2068 kthread_init_work(&cmd->work, loop_queue_work);
2072 static const struct blk_mq_ops loop_mq_ops = {
2073 .queue_rq = loop_queue_rq,
2074 .init_request = loop_init_request,
2075 .complete = lo_complete_rq,
2078 static int loop_add(struct loop_device **l, int i)
2080 struct loop_device *lo;
2081 struct gendisk *disk;
2085 lo = kzalloc(sizeof(*lo), GFP_KERNEL);
2089 lo->lo_state = Lo_unbound;
2091 /* allocate id, if @id >= 0, we're requesting that specific id */
2093 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL);
2097 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL);
2104 lo->tag_set.ops = &loop_mq_ops;
2105 lo->tag_set.nr_hw_queues = 1;
2106 lo->tag_set.queue_depth = 128;
2107 lo->tag_set.numa_node = NUMA_NO_NODE;
2108 lo->tag_set.cmd_size = sizeof(struct loop_cmd);
2109 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING;
2110 lo->tag_set.driver_data = lo;
2112 err = blk_mq_alloc_tag_set(&lo->tag_set);
2116 lo->lo_queue = blk_mq_init_queue(&lo->tag_set);
2117 if (IS_ERR(lo->lo_queue)) {
2118 err = PTR_ERR(lo->lo_queue);
2119 goto out_cleanup_tags;
2121 lo->lo_queue->queuedata = lo;
2123 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
2126 * By default, we do buffer IO, so it doesn't make sense to enable
2127 * merge because the I/O submitted to backing file is handled page by
2128 * page. For directio mode, merge does help to dispatch bigger request
2129 * to underlayer disk. We will enable merge once directio is enabled.
2131 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue);
2134 disk = lo->lo_disk = alloc_disk(1 << part_shift);
2136 goto out_free_queue;
2139 * Disable partition scanning by default. The in-kernel partition
2140 * scanning can be requested individually per-device during its
2141 * setup. Userspace can always add and remove partitions from all
2142 * devices. The needed partition minors are allocated from the
2143 * extended minor space, the main loop device numbers will continue
2144 * to match the loop minors, regardless of the number of partitions
2147 * If max_part is given, partition scanning is globally enabled for
2148 * all loop devices. The minors for the main loop devices will be
2149 * multiples of max_part.
2151 * Note: Global-for-all-devices, set-only-at-init, read-only module
2152 * parameteters like 'max_loop' and 'max_part' make things needlessly
2153 * complicated, are too static, inflexible and may surprise
2154 * userspace tools. Parameters like this in general should be avoided.
2157 disk->flags |= GENHD_FL_NO_PART_SCAN;
2158 disk->flags |= GENHD_FL_EXT_DEVT;
2159 atomic_set(&lo->lo_refcnt, 0);
2161 spin_lock_init(&lo->lo_lock);
2162 disk->major = LOOP_MAJOR;
2163 disk->first_minor = i << part_shift;
2164 disk->fops = &lo_fops;
2165 disk->private_data = lo;
2166 disk->queue = lo->lo_queue;
2167 sprintf(disk->disk_name, "loop%d", i);
2170 return lo->lo_number;
2173 blk_cleanup_queue(lo->lo_queue);
2175 blk_mq_free_tag_set(&lo->tag_set);
2177 idr_remove(&loop_index_idr, i);
2184 static void loop_remove(struct loop_device *lo)
2186 del_gendisk(lo->lo_disk);
2187 blk_cleanup_queue(lo->lo_queue);
2188 blk_mq_free_tag_set(&lo->tag_set);
2189 put_disk(lo->lo_disk);
2193 static int find_free_cb(int id, void *ptr, void *data)
2195 struct loop_device *lo = ptr;
2196 struct loop_device **l = data;
2198 if (lo->lo_state == Lo_unbound) {
2205 static int loop_lookup(struct loop_device **l, int i)
2207 struct loop_device *lo;
2213 err = idr_for_each(&loop_index_idr, &find_free_cb, &lo);
2216 ret = lo->lo_number;
2221 /* lookup and return a specific i */
2222 lo = idr_find(&loop_index_idr, i);
2225 ret = lo->lo_number;
2231 static struct kobject *loop_probe(dev_t dev, int *part, void *data)
2233 struct loop_device *lo;
2234 struct kobject *kobj;
2237 mutex_lock(&loop_ctl_mutex);
2238 err = loop_lookup(&lo, MINOR(dev) >> part_shift);
2240 err = loop_add(&lo, MINOR(dev) >> part_shift);
2244 kobj = get_disk_and_module(lo->lo_disk);
2245 mutex_unlock(&loop_ctl_mutex);
2251 static long loop_control_ioctl(struct file *file, unsigned int cmd,
2254 struct loop_device *lo;
2257 ret = mutex_lock_killable(&loop_ctl_mutex);
2264 ret = loop_lookup(&lo, parm);
2269 ret = loop_add(&lo, parm);
2271 case LOOP_CTL_REMOVE:
2272 ret = loop_lookup(&lo, parm);
2275 if (lo->lo_state != Lo_unbound) {
2279 if (atomic_read(&lo->lo_refcnt) > 0) {
2283 lo->lo_disk->private_data = NULL;
2284 idr_remove(&loop_index_idr, lo->lo_number);
2287 case LOOP_CTL_GET_FREE:
2288 ret = loop_lookup(&lo, -1);
2291 ret = loop_add(&lo, -1);
2293 mutex_unlock(&loop_ctl_mutex);
2298 static const struct file_operations loop_ctl_fops = {
2299 .open = nonseekable_open,
2300 .unlocked_ioctl = loop_control_ioctl,
2301 .compat_ioctl = loop_control_ioctl,
2302 .owner = THIS_MODULE,
2303 .llseek = noop_llseek,
2306 static struct miscdevice loop_misc = {
2307 .minor = LOOP_CTRL_MINOR,
2308 .name = "loop-control",
2309 .fops = &loop_ctl_fops,
2312 MODULE_ALIAS_MISCDEV(LOOP_CTRL_MINOR);
2313 MODULE_ALIAS("devname:loop-control");
2315 static int __init loop_init(void)
2318 unsigned long range;
2319 struct loop_device *lo;
2324 part_shift = fls(max_part);
2327 * Adjust max_part according to part_shift as it is exported
2328 * to user space so that user can decide correct minor number
2329 * if [s]he want to create more devices.
2331 * Note that -1 is required because partition 0 is reserved
2332 * for the whole disk.
2334 max_part = (1UL << part_shift) - 1;
2337 if ((1UL << part_shift) > DISK_MAX_PARTS) {
2342 if (max_loop > 1UL << (MINORBITS - part_shift)) {
2348 * If max_loop is specified, create that many devices upfront.
2349 * This also becomes a hard limit. If max_loop is not specified,
2350 * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
2351 * init time. Loop devices can be requested on-demand with the
2352 * /dev/loop-control interface, or be instantiated by accessing
2353 * a 'dead' device node.
2357 range = max_loop << part_shift;
2359 nr = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
2360 range = 1UL << MINORBITS;
2363 err = misc_register(&loop_misc);
2368 if (register_blkdev(LOOP_MAJOR, "loop")) {
2373 blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
2374 THIS_MODULE, loop_probe, NULL, NULL);
2376 /* pre-create number of devices given by config or max_loop */
2377 mutex_lock(&loop_ctl_mutex);
2378 for (i = 0; i < nr; i++)
2380 mutex_unlock(&loop_ctl_mutex);
2382 printk(KERN_INFO "loop: module loaded\n");
2386 misc_deregister(&loop_misc);
2391 static int loop_exit_cb(int id, void *ptr, void *data)
2393 struct loop_device *lo = ptr;
2399 static void __exit loop_exit(void)
2401 unsigned long range;
2403 range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
2405 idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
2406 idr_destroy(&loop_index_idr);
2408 blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
2409 unregister_blkdev(LOOP_MAJOR, "loop");
2411 misc_deregister(&loop_misc);
2414 module_init(loop_init);
2415 module_exit(loop_exit);
2418 static int __init max_loop_setup(char *str)
2420 max_loop = simple_strtol(str, NULL, 0);
2424 __setup("max_loop=", max_loop_setup);