dm: conditionally enable branching for less used features
authorMike Snitzer <snitzer@kernel.org>
Sat, 26 Mar 2022 18:14:00 +0000 (14:14 -0400)
committerMike Snitzer <snitzer@kernel.org>
Thu, 5 May 2022 21:31:34 +0000 (17:31 -0400)
Use jump_labels to further reduce cost of unlikely branches for zoned
block devices, dm-stats and swap_bios throttling.

Signed-off-by: Mike Snitzer <snitzer@kernel.org>
drivers/md/dm-core.h
drivers/md/dm-stats.c
drivers/md/dm-table.c
drivers/md/dm.c

index 41d6511..8ba99ea 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/ktime.h>
 #include <linux/blk-mq.h>
 #include <linux/blk-crypto-profile.h>
+#include <linux/jump_label.h>
 
 #include <trace/events/block.h>
 
@@ -154,6 +155,10 @@ static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
        return &md->stats;
 }
 
+DECLARE_STATIC_KEY_FALSE(stats_enabled);
+DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
+DECLARE_STATIC_KEY_FALSE(zoned_enabled);
+
 static inline bool dm_emulate_zone_append(struct mapped_device *md)
 {
        if (blk_queue_is_zoned(md->queue))
index 0e039a8..86e0697 100644 (file)
@@ -396,6 +396,9 @@ static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
 
        dm_stats_recalc_precise_timestamps(stats);
 
+       if (!static_key_enabled(&stats_enabled.key))
+               static_branch_enable(&stats_enabled);
+
        mutex_unlock(&stats->mutex);
 
        resume_callback(md);
index 73ed15d..a37c7b7 100644 (file)
@@ -719,6 +719,9 @@ int dm_table_add_target(struct dm_table *t, const char *type,
                DMWARN("%s: %s: ignoring discards_supported because num_discard_bios is zero.",
                       dm_device_name(t->md), type);
 
+       if (tgt->limit_swap_bios && !static_key_enabled(&swap_bios_enabled.key))
+               static_branch_enable(&swap_bios_enabled);
+
        return 0;
 
  bad:
@@ -2040,6 +2043,8 @@ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
                r = dm_set_zones_restrictions(t, q);
                if (r)
                        return r;
+               if (!static_key_enabled(&zoned_enabled.key))
+                       static_branch_enable(&zoned_enabled);
        }
 
        dm_update_crypto_profile(q, t);
index d7b7154..6304322 100644 (file)
@@ -71,6 +71,10 @@ void dm_issue_global_event(void)
        wake_up(&dm_global_eventq);
 }
 
+DEFINE_STATIC_KEY_FALSE(stats_enabled);
+DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
+DEFINE_STATIC_KEY_FALSE(zoned_enabled);
+
 /*
  * One of these is allocated (on-stack) per original bio.
  */
@@ -516,7 +520,8 @@ static void dm_io_acct(bool end, struct mapped_device *md, struct bio *bio,
        else
                bio_end_io_acct(bio, start_time);
 
-       if (unlikely(dm_stats_used(&md->stats)))
+       if (static_branch_unlikely(&stats_enabled) &&
+           unlikely(dm_stats_used(&md->stats)))
                dm_stats_account_io(&md->stats, bio_data_dir(bio),
                                    bio->bi_iter.bi_sector, bio_sectors(bio),
                                    end, start_time, stats_aux);
@@ -586,7 +591,8 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
        io->start_time = jiffies;
        io->flags = 0;
 
-       dm_stats_record_start(&md->stats, &io->stats_aux);
+       if (static_branch_unlikely(&stats_enabled))
+               dm_stats_record_start(&md->stats, &io->stats_aux);
 
        return io;
 }
@@ -1012,21 +1018,25 @@ static void clone_endio(struct bio *bio)
                        disable_write_zeroes(md);
        }
 
-       if (unlikely(blk_queue_is_zoned(q)))
+       if (static_branch_unlikely(&zoned_enabled) &&
+           unlikely(blk_queue_is_zoned(q)))
                dm_zone_endio(io, bio);
 
        if (endio) {
                int r = endio(ti, bio, &error);
                switch (r) {
                case DM_ENDIO_REQUEUE:
-                       /*
-                        * Requeuing writes to a sequential zone of a zoned
-                        * target will break the sequential write pattern:
-                        * fail such IO.
-                        */
-                       if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
-                               error = BLK_STS_IOERR;
-                       else
+                       if (static_branch_unlikely(&zoned_enabled)) {
+                               /*
+                                * Requeuing writes to a sequential zone of a zoned
+                                * target will break the sequential write pattern:
+                                * fail such IO.
+                                */
+                               if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
+                                       error = BLK_STS_IOERR;
+                               else
+                                       error = BLK_STS_DM_REQUEUE;
+                       } else
                                error = BLK_STS_DM_REQUEUE;
                        fallthrough;
                case DM_ENDIO_DONE:
@@ -1040,7 +1050,8 @@ static void clone_endio(struct bio *bio)
                }
        }
 
-       if (unlikely(swap_bios_limit(ti, bio)))
+       if (static_branch_unlikely(&swap_bios_enabled) &&
+           unlikely(swap_bios_limit(ti, bio)))
                up(&md->swap_bios_semaphore);
 
        free_tio(bio);
@@ -1295,21 +1306,25 @@ static void __map_bio(struct bio *clone)
        dm_io_inc_pending(io);
        tio->old_sector = clone->bi_iter.bi_sector;
 
-       if (unlikely(swap_bios_limit(ti, clone))) {
+       if (static_branch_unlikely(&swap_bios_enabled) &&
+           unlikely(swap_bios_limit(ti, clone))) {
                int latch = get_swap_bios();
                if (unlikely(latch != md->swap_bios))
                        __set_swap_bios_limit(md, latch);
                down(&md->swap_bios_semaphore);
        }
 
-       /*
-        * Check if the IO needs a special mapping due to zone append emulation
-        * on zoned target. In this case, dm_zone_map_bio() calls the target
-        * map operation.
-        */
-       if (unlikely(dm_emulate_zone_append(md)))
-               r = dm_zone_map_bio(tio);
-       else
+       if (static_branch_unlikely(&zoned_enabled)) {
+               /*
+                * Check if the IO needs a special mapping due to zone append
+                * emulation on zoned target. In this case, dm_zone_map_bio()
+                * calls the target map operation.
+                */
+               if (unlikely(dm_emulate_zone_append(md)))
+                       r = dm_zone_map_bio(tio);
+               else
+                       r = ti->type->map(ti, clone);
+       } else
                r = ti->type->map(ti, clone);
 
        switch (r) {
@@ -1329,7 +1344,8 @@ static void __map_bio(struct bio *clone)
                break;
        case DM_MAPIO_KILL:
        case DM_MAPIO_REQUEUE:
-               if (unlikely(swap_bios_limit(ti, clone)))
+               if (static_branch_unlikely(&swap_bios_enabled) &&
+                   unlikely(swap_bios_limit(ti, clone)))
                        up(&md->swap_bios_semaphore);
                free_tio(clone);
                if (r == DM_MAPIO_KILL)
@@ -1565,7 +1581,8 @@ static void init_clone_info(struct clone_info *ci, struct mapped_device *md,
        ci->sector_count = bio_sectors(bio);
 
        /* Shouldn't happen but sector_count was being set to 0 so... */
-       if (WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
+       if (static_branch_unlikely(&zoned_enabled) &&
+           WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
                ci->sector_count = 0;
 }