lib/stackdepot: lower the indentation in stack_depot_init
authorAndrey Konovalov <andreyknvl@google.com>
Fri, 10 Feb 2023 21:15:54 +0000 (22:15 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 17 Feb 2023 04:43:49 +0000 (20:43 -0800)
stack_depot_init does most things inside an if check. Move them out and
use a goto statement instead.

No functional changes.

Link: https://lkml.kernel.org/r/8e382f1f0c352e4b2ad47326fec7782af961fe8e.1676063693.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/stackdepot.c

index 9fab711..3c713f7 100644 (file)
@@ -165,46 +165,50 @@ int __init stack_depot_early_init(void)
 int stack_depot_init(void)
 {
        static DEFINE_MUTEX(stack_depot_init_mutex);
+       unsigned long entries;
        int ret = 0;
 
        mutex_lock(&stack_depot_init_mutex);
-       if (!stack_depot_disabled && !stack_table) {
-               unsigned long entries;
 
-               /*
-                * Similarly to stack_depot_early_init, use stack_hash_order
-                * if assigned, and rely on automatic scaling otherwise.
-                */
-               if (stack_hash_order) {
-                       entries = 1UL << stack_hash_order;
-               } else {
-                       int scale = STACK_HASH_SCALE;
-
-                       entries = nr_free_buffer_pages();
-                       entries = roundup_pow_of_two(entries);
-
-                       if (scale > PAGE_SHIFT)
-                               entries >>= (scale - PAGE_SHIFT);
-                       else
-                               entries <<= (PAGE_SHIFT - scale);
-               }
+       if (stack_depot_disabled || stack_table)
+               goto out_unlock;
 
-               if (entries < 1UL << STACK_HASH_ORDER_MIN)
-                       entries = 1UL << STACK_HASH_ORDER_MIN;
-               if (entries > 1UL << STACK_HASH_ORDER_MAX)
-                       entries = 1UL << STACK_HASH_ORDER_MAX;
-
-               pr_info("allocating hash table of %lu entries via kvcalloc\n",
-                               entries);
-               stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
-               if (!stack_table) {
-                       pr_err("hash table allocation failed, disabling\n");
-                       stack_depot_disabled = true;
-                       ret = -ENOMEM;
-               }
-               stack_hash_mask = entries - 1;
+       /*
+        * Similarly to stack_depot_early_init, use stack_hash_order
+        * if assigned, and rely on automatic scaling otherwise.
+        */
+       if (stack_hash_order) {
+               entries = 1UL << stack_hash_order;
+       } else {
+               int scale = STACK_HASH_SCALE;
+
+               entries = nr_free_buffer_pages();
+               entries = roundup_pow_of_two(entries);
+
+               if (scale > PAGE_SHIFT)
+                       entries >>= (scale - PAGE_SHIFT);
+               else
+                       entries <<= (PAGE_SHIFT - scale);
        }
+
+       if (entries < 1UL << STACK_HASH_ORDER_MIN)
+               entries = 1UL << STACK_HASH_ORDER_MIN;
+       if (entries > 1UL << STACK_HASH_ORDER_MAX)
+               entries = 1UL << STACK_HASH_ORDER_MAX;
+
+       pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
+       stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+       if (!stack_table) {
+               pr_err("hash table allocation failed, disabling\n");
+               stack_depot_disabled = true;
+               ret = -ENOMEM;
+               goto out_unlock;
+       }
+       stack_hash_mask = entries - 1;
+
+out_unlock:
        mutex_unlock(&stack_depot_init_mutex);
+
        return ret;
 }
 EXPORT_SYMBOL_GPL(stack_depot_init);