Merge tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Jul 2023 17:08:49 +0000 (10:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 28 Jul 2023 17:08:49 +0000 (10:08 -0700)
Pull device mapper fixes from Mike Snitzer:

 - Fix double free on memory allocation failure in DM integrity target's
   integrity_recalc()

 - Fix locking in DM raid target's raid_ctr() and around call to
   md_stop()

 - Fix DM cache target's cleaner policy to always allow work to be
   queued for writeback; even if cache isn't idle.

* tag 'for-6.5/dm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/device-mapper/linux-dm:
  dm cache policy smq: ensure IO doesn't prevent cleaner policy progress
  dm raid: protect md_stop() with 'reconfig_mutex'
  dm raid: clean up four equivalent goto tags in raid_ctr()
  dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths
  dm integrity: fix double free on memory allocation failure

drivers/md/dm-cache-policy-smq.c
drivers/md/dm-integrity.c
drivers/md/dm-raid.c
drivers/md/md.c

index 493a871..8bd2ad7 100644 (file)
@@ -857,7 +857,13 @@ struct smq_policy {
 
        struct background_tracker *bg_work;
 
-       bool migrations_allowed;
+       bool migrations_allowed:1;
+
+       /*
+        * If this is set the policy will try and clean the whole cache
+        * even if the device is not idle.
+        */
+       bool cleaner:1;
 };
 
 /*----------------------------------------------------------------*/
@@ -1138,7 +1144,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
         * Cache entries may not be populated.  So we cannot rely on the
         * size of the clean queue.
         */
-       if (idle) {
+       if (idle || mq->cleaner) {
                /*
                 * We'd like to clean everything.
                 */
@@ -1722,11 +1728,9 @@ static void calc_hotspot_params(sector_t origin_size,
                *hotspot_block_size /= 2u;
 }
 
-static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
-                                           sector_t origin_size,
-                                           sector_t cache_block_size,
-                                           bool mimic_mq,
-                                           bool migrations_allowed)
+static struct dm_cache_policy *
+__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size,
+            bool mimic_mq, bool migrations_allowed, bool cleaner)
 {
        unsigned int i;
        unsigned int nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
@@ -1813,6 +1817,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
                goto bad_btracker;
 
        mq->migrations_allowed = migrations_allowed;
+       mq->cleaner = cleaner;
 
        return &mq->policy;
 
@@ -1836,21 +1841,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
                                          sector_t origin_size,
                                          sector_t cache_block_size)
 {
-       return __smq_create(cache_size, origin_size, cache_block_size, false, true);
+       return __smq_create(cache_size, origin_size, cache_block_size,
+                           false, true, false);
 }
 
 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
                                         sector_t origin_size,
                                         sector_t cache_block_size)
 {
-       return __smq_create(cache_size, origin_size, cache_block_size, true, true);
+       return __smq_create(cache_size, origin_size, cache_block_size,
+                           true, true, false);
 }
 
 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
                                              sector_t origin_size,
                                              sector_t cache_block_size)
 {
-       return __smq_create(cache_size, origin_size, cache_block_size, false, false);
+       return __smq_create(cache_size, origin_size, cache_block_size,
+                           false, false, true);
 }
 
 /*----------------------------------------------------------------*/
index 3d5c56e..97a8d5f 100644 (file)
@@ -2676,6 +2676,7 @@ oom:
        recalc_tags = kvmalloc(recalc_tags_size, GFP_NOIO);
        if (!recalc_tags) {
                vfree(recalc_buffer);
+               recalc_buffer = NULL;
                goto oom;
        }
 
index 8846bf5..becdb68 100644 (file)
@@ -3251,8 +3251,7 @@ size_check:
        r = md_start(&rs->md);
        if (r) {
                ti->error = "Failed to start raid array";
-               mddev_unlock(&rs->md);
-               goto bad_md_start;
+               goto bad_unlock;
        }
 
        /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */
@@ -3260,8 +3259,7 @@ size_check:
                r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode);
                if (r) {
                        ti->error = "Failed to set raid4/5/6 journal mode";
-                       mddev_unlock(&rs->md);
-                       goto bad_journal_mode_set;
+                       goto bad_unlock;
                }
        }
 
@@ -3272,14 +3270,14 @@ size_check:
        if (rs_is_raid456(rs)) {
                r = rs_set_raid456_stripe_cache(rs);
                if (r)
-                       goto bad_stripe_cache;
+                       goto bad_unlock;
        }
 
        /* Now do an early reshape check */
        if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) {
                r = rs_check_reshape(rs);
                if (r)
-                       goto bad_check_reshape;
+                       goto bad_unlock;
 
                /* Restore new, ctr requested layout to perform check */
                rs_config_restore(rs, &rs_layout);
@@ -3288,7 +3286,7 @@ size_check:
                        r = rs->md.pers->check_reshape(&rs->md);
                        if (r) {
                                ti->error = "Reshape check failed";
-                               goto bad_check_reshape;
+                               goto bad_unlock;
                        }
                }
        }
@@ -3299,11 +3297,9 @@ size_check:
        mddev_unlock(&rs->md);
        return 0;
 
-bad_md_start:
-bad_journal_mode_set:
-bad_stripe_cache:
-bad_check_reshape:
+bad_unlock:
        md_stop(&rs->md);
+       mddev_unlock(&rs->md);
 bad:
        raid_set_free(rs);
 
@@ -3314,7 +3310,9 @@ static void raid_dtr(struct dm_target *ti)
 {
        struct raid_set *rs = ti->private;
 
+       mddev_lock_nointr(&rs->md);
        md_stop(&rs->md);
+       mddev_unlock(&rs->md);
        raid_set_free(rs);
 }
 
index 2e38ef4..78be781 100644 (file)
@@ -6247,6 +6247,8 @@ static void __md_stop(struct mddev *mddev)
 
 void md_stop(struct mddev *mddev)
 {
+       lockdep_assert_held(&mddev->reconfig_mutex);
+
        /* stop the array and free an attached data structures.
         * This is called from dm-raid
         */