Merge tag 'for-6.5/dm-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/devic...
[platform/kernel/linux-rpi.git] / drivers / md / dm.c
index ea1671c..f0f118a 100644 (file)
@@ -207,7 +207,7 @@ static int __init local_init(void)
        if (r)
                return r;
 
-       deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
+       deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
        if (!deferred_remove_workqueue) {
                r = -ENOMEM;
                goto out_uevent_exit;
@@ -1177,7 +1177,8 @@ static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
 }
 
 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
-                            unsigned int max_granularity)
+                            unsigned int max_granularity,
+                            unsigned int max_sectors)
 {
        sector_t target_offset = dm_target_offset(ti, sector);
        sector_t len = max_io_len_target_boundary(ti, target_offset);
@@ -1191,13 +1192,13 @@ static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
        if (!max_granularity)
                return len;
        return min_t(sector_t, len,
-               min(queue_max_sectors(ti->table->md->queue),
+               min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
                    blk_chunk_sectors_left(target_offset, max_granularity)));
 }
 
 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
 {
-       return __max_io_len(ti, sector, ti->max_io_len);
+       return __max_io_len(ti, sector, ti->max_io_len, 0);
 }
 
 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
@@ -1586,12 +1587,13 @@ static void __send_empty_flush(struct clone_info *ci)
 
 static void __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
                                        unsigned int num_bios,
-                                       unsigned int max_granularity)
+                                       unsigned int max_granularity,
+                                       unsigned int max_sectors)
 {
        unsigned int len, bios;
 
        len = min_t(sector_t, ci->sector_count,
-                   __max_io_len(ti, ci->sector, max_granularity));
+                   __max_io_len(ti, ci->sector, max_granularity, max_sectors));
 
        atomic_add(num_bios, &ci->io->io_count);
        bios = __send_duplicate_bios(ci, ti, num_bios, &len);
@@ -1628,23 +1630,27 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
 {
        unsigned int num_bios = 0;
        unsigned int max_granularity = 0;
+       unsigned int max_sectors = 0;
        struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
 
        switch (bio_op(ci->bio)) {
        case REQ_OP_DISCARD:
                num_bios = ti->num_discard_bios;
+               max_sectors = limits->max_discard_sectors;
                if (ti->max_discard_granularity)
-                       max_granularity = limits->max_discard_sectors;
+                       max_granularity = max_sectors;
                break;
        case REQ_OP_SECURE_ERASE:
                num_bios = ti->num_secure_erase_bios;
+               max_sectors = limits->max_secure_erase_sectors;
                if (ti->max_secure_erase_granularity)
-                       max_granularity = limits->max_secure_erase_sectors;
+                       max_granularity = max_sectors;
                break;
        case REQ_OP_WRITE_ZEROES:
                num_bios = ti->num_write_zeroes_bios;
+               max_sectors = limits->max_write_zeroes_sectors;
                if (ti->max_write_zeroes_granularity)
-                       max_granularity = limits->max_write_zeroes_sectors;
+                       max_granularity = max_sectors;
                break;
        default:
                break;
@@ -1659,7 +1665,8 @@ static blk_status_t __process_abnormal_io(struct clone_info *ci,
        if (unlikely(!num_bios))
                return BLK_STS_NOTSUPP;
 
-       __send_changing_extent_only(ci, ti, num_bios, max_granularity);
+       __send_changing_extent_only(ci, ti, num_bios,
+                                   max_granularity, max_sectors);
        return BLK_STS_OK;
 }
 
@@ -2814,6 +2821,10 @@ retry:
        }
 
        map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
+       if (!map) {
+               /* avoid deadlock with fs/namespace.c:do_mount() */
+               suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
+       }
 
        r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
        if (r)
@@ -3138,6 +3149,8 @@ struct dm_pr {
        bool    fail_early;
        int     ret;
        enum pr_type type;
+       struct pr_keys *read_keys;
+       struct pr_held_reservation *rsv;
 };
 
 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
@@ -3370,12 +3383,79 @@ out:
        return r;
 }
 
+static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
+                            sector_t start, sector_t len, void *data)
+{
+       struct dm_pr *pr = data;
+       const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+       if (!ops || !ops->pr_read_keys) {
+               pr->ret = -EOPNOTSUPP;
+               return -1;
+       }
+
+       pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
+       if (!pr->ret)
+               return -1;
+
+       return 0;
+}
+
+static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
+{
+       struct dm_pr pr = {
+               .read_keys = keys,
+       };
+       int ret;
+
+       ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
+       if (ret)
+               return ret;
+
+       return pr.ret;
+}
+
+static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
+                                   sector_t start, sector_t len, void *data)
+{
+       struct dm_pr *pr = data;
+       const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
+
+       if (!ops || !ops->pr_read_reservation) {
+               pr->ret = -EOPNOTSUPP;
+               return -1;
+       }
+
+       pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
+       if (!pr->ret)
+               return -1;
+
+       return 0;
+}
+
+static int dm_pr_read_reservation(struct block_device *bdev,
+                                 struct pr_held_reservation *rsv)
+{
+       struct dm_pr pr = {
+               .rsv = rsv,
+       };
+       int ret;
+
+       ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
+       if (ret)
+               return ret;
+
+       return pr.ret;
+}
+
 static const struct pr_ops dm_pr_ops = {
        .pr_register    = dm_pr_register,
        .pr_reserve     = dm_pr_reserve,
        .pr_release     = dm_pr_release,
        .pr_preempt     = dm_pr_preempt,
        .pr_clear       = dm_pr_clear,
+       .pr_read_keys   = dm_pr_read_keys,
+       .pr_read_reservation = dm_pr_read_reservation,
 };
 
 static const struct block_device_operations dm_blk_dops = {