dm thin metadata: check fail_io before using data_sm
authorLi Lingfeng <lilingfeng3@huawei.com>
Tue, 6 Jun 2023 12:20:24 +0000 (20:20 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 21 Jun 2023 14:00:55 +0000 (16:00 +0200)
commit cb65b282c9640c27d3129e2e04b711ce1b352838 upstream.

Must check pmd->fail_io before using pmd->data_sm since
pmd->data_sm may be destroyed by other processes.

       P1(kworker)                             P2(message)
do_worker
 process_prepared
  process_prepared_discard_passdown_pt2
   dm_pool_dec_data_range
                                    pool_message
                                     commit
                                      dm_pool_commit_metadata
                                        ↓
                                       // commit failed
                                      metadata_operation_failed
                                       abort_transaction
                                        dm_pool_abort_metadata
                                         __open_or_format_metadata
                                           ↓
                                          dm_sm_disk_open
                                            ↓
                                           // open failed
                                           // pmd->data_sm is NULL
    dm_sm_dec_blocks
      ↓
     // try to access pmd->data_sm --> UAF

As shown above, if dm_pool_commit_metadata() and
dm_pool_abort_metadata() fail in pool_message process, kworker may
trigger UAF.

Fixes: be500ed721a6 ("dm space maps: improve performance with inc/dec on ranges of blocks")
Cc: stable@vger.kernel.org
Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/md/dm-thin-metadata.c

index 80545ec..59eb1cb 100644 (file)
@@ -1750,13 +1750,15 @@ int dm_thin_remove_range(struct dm_thin_device *td,
 
 int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *result)
 {
-       int r;
+       int r = -EINVAL;
        uint32_t ref_count;
 
        down_read(&pmd->root_lock);
-       r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
-       if (!r)
-               *result = (ref_count > 1);
+       if (!pmd->fail_io) {
+               r = dm_sm_get_count(pmd->data_sm, b, &ref_count);
+               if (!r)
+                       *result = (ref_count > 1);
+       }
        up_read(&pmd->root_lock);
 
        return r;
@@ -1764,10 +1766,11 @@ int dm_pool_block_is_shared(struct dm_pool_metadata *pmd, dm_block_t b, bool *re
 
 int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
 {
-       int r = 0;
+       int r = -EINVAL;
 
        pmd_write_lock(pmd);
-       r = dm_sm_inc_blocks(pmd->data_sm, b, e);
+       if (!pmd->fail_io)
+               r = dm_sm_inc_blocks(pmd->data_sm, b, e);
        pmd_write_unlock(pmd);
 
        return r;
@@ -1775,10 +1778,11 @@ int dm_pool_inc_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_
 
 int dm_pool_dec_data_range(struct dm_pool_metadata *pmd, dm_block_t b, dm_block_t e)
 {
-       int r = 0;
+       int r = -EINVAL;
 
        pmd_write_lock(pmd);
-       r = dm_sm_dec_blocks(pmd->data_sm, b, e);
+       if (!pmd->fail_io)
+               r = dm_sm_dec_blocks(pmd->data_sm, b, e);
        pmd_write_unlock(pmd);
 
        return r;