dm: address indent/space issues
authorHeinz Mauelshagen <heinzm@redhat.com>
Wed, 25 Jan 2023 22:31:55 +0000 (23:31 +0100)
committerMike Snitzer <snitzer@kernel.org>
Tue, 14 Feb 2023 19:23:06 +0000 (14:23 -0500)
Signed-off-by: Heinz Mauelshagen <heinzm@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@kernel.org>
12 files changed:
drivers/md/dm-cache-policy.h
drivers/md/dm-crypt.c
drivers/md/dm-integrity.c
drivers/md/dm-log.c
drivers/md/dm-raid.c
drivers/md/dm-raid1.c
drivers/md/dm-table.c
drivers/md/dm-thin.c
drivers/md/dm-writecache.c
drivers/md/persistent-data/dm-btree.c
drivers/md/persistent-data/dm-space-map-common.c
drivers/md/persistent-data/dm-space-map-common.h

index 148dc45248dd2220f2a00a45e0f1469d2b9562b6..a1eedcc426771e8897ee961636628808fd306e87 100644 (file)
@@ -76,7 +76,7 @@ struct dm_cache_policy {
         * background work.
         */
        int (*get_background_work)(struct dm_cache_policy *p, bool idle,
-                                  struct policy_work **result);
+                                  struct policy_work **result);
 
        /*
         * You must pass in the same work pointer that you were given, not
index feb641a29679ae239d448974dc185be903bf9f07..47456fb71003c409246023f9aae8e5b88f88f2b5 100644 (file)
@@ -2503,7 +2503,7 @@ static int crypt_set_keyring_key(struct crypt_config *cc, const char *key_string
                type = &key_type_encrypted;
                set_key = set_key_encrypted;
        } else if (IS_ENABLED(CONFIG_TRUSTED_KEYS) &&
-                  !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
+                  !strncmp(key_string, "trusted:", key_desc - key_string + 1)) {
                type = &key_type_trusted;
                set_key = set_key_trusted;
        } else {
index efae9d7e3a565dcc02f5cde427723182b68b4655..46167422164eb9e3e132e16cfc8ca175c8bf15d0 100644 (file)
@@ -2301,7 +2301,6 @@ offload_to_thread:
                else
 skip_check:
                        dec_in_flight(dio);
-
        } else {
                INIT_WORK(&dio->work, integrity_metadata);
                queue_work(ic->metadata_wq, &dio->work);
@@ -4085,7 +4084,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned int argc, char **argv
                } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
                        if (val < 1 << SECTOR_SHIFT ||
                            val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
-                           (val & (val -1))) {
+                           (val & (val - 1))) {
                                r = -EINVAL;
                                ti->error = "Invalid block_size argument";
                                goto bad;
@@ -4405,7 +4404,7 @@ try_smaller_buffer:
        if (ic->internal_hash) {
                size_t recalc_tags_size;
                ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
-               if (!ic->recalc_wq ) {
+               if (!ic->recalc_wq) {
                        ti->error = "Cannot allocate workqueue";
                        r = -ENOMEM;
                        goto bad;
index c87d8758718d53dbcc3d8a96dc0cdf63e509a68e..fe7b24a45812321711651bc30e5816dee5dc7abb 100644 (file)
@@ -758,8 +758,8 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
        log_clear_bit(lc, lc->recovering_bits, region);
        if (in_sync) {
                log_set_bit(lc, lc->sync_bits, region);
-                lc->sync_count++;
-        } else if (log_test_bit(lc->sync_bits, region)) {
+               lc->sync_count++;
+       } else if (log_test_bit(lc->sync_bits, region)) {
                lc->sync_count--;
                log_clear_bit(lc, lc->sync_bits, region);
        }
@@ -767,9 +767,9 @@ static void core_set_region_sync(struct dm_dirty_log *log, region_t region,
 
 static region_t core_get_sync_count(struct dm_dirty_log *log)
 {
-        struct log_c *lc = (struct log_c *) log->context;
+       struct log_c *lc = (struct log_c *) log->context;
 
-        return lc->sync_count;
+       return lc->sync_count;
 }
 
 #define        DMEMIT_SYNC \
index 2b2bf20e3ab3d0a7841d5bddd81046f7e8b41a03..6a2a985106d43120db8eeb75bcec809e41f1075e 100644 (file)
@@ -363,8 +363,8 @@ static struct {
        const int mode;
        const char *param;
 } _raid456_journal_mode[] = {
-       { R5C_JOURNAL_MODE_WRITE_THROUGH , "writethrough" },
-       { R5C_JOURNAL_MODE_WRITE_BACK    , "writeback" }
+       { R5C_JOURNAL_MODE_WRITE_THROUGH, "writethrough" },
+       { R5C_JOURNAL_MODE_WRITE_BACK,    "writeback" }
 };
 
 /* Return MD raid4/5/6 journal mode for dm @journal_mode one */
@@ -1115,7 +1115,7 @@ too_many:
  *    [stripe_cache <sectors>]         Stripe cache size for higher RAIDs
  *    [region_size <sectors>]          Defines granularity of bitmap
  *    [journal_dev <dev>]              raid4/5/6 journaling deviice
- *                                     (i.e. write hole closing log)
+ *                                     (i.e. write hole closing log)
  *
  * RAID10-only options:
  *    [raid10_copies <# copies>]       Number of copies.  (Default: 2)
@@ -4002,7 +4002,7 @@ static int raid_preresume(struct dm_target *ti)
        }
 
        /* Resize bitmap to adjust to changed region size (aka MD bitmap chunksize) or grown device size */
-        if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
+       if (test_bit(RT_FLAG_RS_BITMAP_LOADED, &rs->runtime_flags) && mddev->bitmap &&
            (test_bit(RT_FLAG_RS_GROW, &rs->runtime_flags) ||
             (rs->requested_bitmap_chunk_sectors &&
               mddev->bitmap_info.chunksize != to_bytes(rs->requested_bitmap_chunk_sectors)))) {
index 30d5aa5dec8f54df5ec99def4724317221a8e9e3..86a0ff713b5bcb79723c94123956b3c85f1f11ba 100644 (file)
@@ -904,7 +904,7 @@ static struct mirror_set *alloc_context(unsigned int nr_mirrors,
        if (IS_ERR(ms->io_client)) {
                ti->error = "Error creating dm_io client";
                kfree(ms);
-               return NULL;
+               return NULL;
        }
 
        ms->rh = dm_region_hash_create(ms, dispatch_bios, wakeup_mirrord,
index cf1b43724fc5149174acbe3bb4e90e71c3e035c4..5914b55564d4cd701ce24c235a0989163c29cc58 100644 (file)
@@ -73,7 +73,7 @@ static sector_t high(struct dm_table *t, unsigned int l, unsigned int n)
                n = get_child(n, CHILDREN_PER_NODE - 1);
 
        if (n >= t->counts[l])
-               return (sector_t) - 1;
+               return (sector_t) -1;
 
        return get_node(t, l, n)[KEYS_PER_NODE - 1];
 }
@@ -1530,7 +1530,7 @@ static bool dm_table_any_dev_attr(struct dm_table *t,
                if (ti->type->iterate_devices &&
                    ti->type->iterate_devices(ti, func, data))
                        return true;
-        }
+       }
 
        return false;
 }
index c015b943f2e32d7d65a7cc926d629e6e3e194620..c5a15e8d8cc3e4157d7907b3b9f7671d390c3d4a 100644 (file)
@@ -1181,9 +1181,9 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
        discard_parent = bio_alloc(NULL, 1, 0, GFP_NOIO);
        discard_parent->bi_end_io = passdown_endio;
        discard_parent->bi_private = m;
-       if (m->maybe_shared)
-               passdown_double_checking_shared_status(m, discard_parent);
-       else {
+       if (m->maybe_shared)
+               passdown_double_checking_shared_status(m, discard_parent);
+       else {
                struct discard_op op;
 
                begin_discard(&op, tc, discard_parent);
index 69aa708d6dae77f7d660dba3e4e658394abbd1a8..4f9cbc70686c03f8f26a64d8d8437f5e06a5a0bc 100644 (file)
@@ -531,7 +531,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
                req.notify.context = &endio;
 
                /* writing via async dm-io (implied by notify.fn above) won't return an error */
-               (void) dm_io(&req, 1, &region, NULL);
+               (void) dm_io(&req, 1, &region, NULL);
                i = j;
        }
 
index f7482266102284ab0d50364aa188e792cb1ef916..67cf4c2ca2582e0d6a37e3cbdc9d60df82e0a4aa 100644 (file)
@@ -727,7 +727,7 @@ static int shadow_child(struct dm_btree_info *info, struct dm_btree_value_type *
  * nodes, so saves metadata space.
  */
 static int split_two_into_three(struct shadow_spine *s, unsigned int parent_index,
-                                struct dm_btree_value_type *vt, uint64_t key)
+                               struct dm_btree_value_type *vt, uint64_t key)
 {
        int r;
        unsigned int middle_index;
@@ -782,7 +782,7 @@ static int split_two_into_three(struct shadow_spine *s, unsigned int parent_inde
                if (shadow_current(s) != right)
                        unlock_block(s->info, right);
 
-               return r;
+               return r;
        }
 
 
@@ -1217,7 +1217,7 @@ int btree_get_overwrite_leaf(struct dm_btree_info *info, dm_block_t root,
 static bool need_insert(struct btree_node *node, uint64_t *keys,
                        unsigned int level, unsigned int index)
 {
-        return ((index >= le32_to_cpu(node->header.nr_entries)) ||
+       return ((index >= le32_to_cpu(node->header.nr_entries)) ||
                (le64_to_cpu(node->keys[index]) != keys[level]));
 }
 
index b18b6dc1b634e0f95098780987ef70facdfc1902..4ce082b0d5cbf62165c763fe7e0a441633fafeda 100644 (file)
@@ -391,7 +391,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
 }
 
 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
-                                dm_block_t begin, dm_block_t end, dm_block_t *b)
+                                dm_block_t begin, dm_block_t end, dm_block_t *b)
 {
        int r;
        uint32_t count;
index a6ae642dbee560bfbf46ba65d89659419868a50e..75b95fe346840d87de4fb71e01c4b5406aab5a18 100644 (file)
@@ -121,7 +121,7 @@ int sm_ll_lookup(struct ll_disk *ll, dm_block_t b, uint32_t *result);
 int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
                          dm_block_t end, dm_block_t *result);
 int sm_ll_find_common_free_block(struct ll_disk *old_ll, struct ll_disk *new_ll,
-                                dm_block_t begin, dm_block_t end, dm_block_t *result);
+                                dm_block_t begin, dm_block_t end, dm_block_t *result);
 
 /*
  * The next three functions return (via nr_allocations) the net number of