lib/stackdepot: rename hash table constants and variables
authorAndrey Konovalov <andreyknvl@google.com>
Fri, 10 Feb 2023 21:15:56 +0000 (22:15 +0100)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 17 Feb 2023 04:43:50 +0000 (20:43 -0800)
Give more meaningful names to hash table-related constants and variables:

1. Rename STACK_HASH_SCALE to STACK_HASH_TABLE_SCALE to point out that it
   is related to scaling the hash table.

2. Rename STACK_HASH_ORDER_MIN/MAX to STACK_BUCKET_NUMBER_ORDER_MIN/MAX
   to point out that it is related to the number of hash table buckets.

3. Rename stack_hash_order to stack_bucket_number_order for the same
   reason as #2.

No functional changes.

Link: https://lkml.kernel.org/r/f166dd6f3cb2378aea78600714393dd568c33ee9.1676063693.git.andreyknvl@google.com
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
Reviewed-by: Alexander Potapenko <glider@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
lib/stackdepot.c

index de1afe3..d1ab531 100644 (file)
@@ -76,17 +76,17 @@ static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_ST
 static bool __stack_depot_early_init_passed __initdata;
 
 /* Use one hash table bucket per 16 KB of memory. */
-#define STACK_HASH_SCALE       14
+#define STACK_HASH_TABLE_SCALE 14
 /* Limit the number of buckets between 4K and 1M. */
-#define STACK_HASH_ORDER_MIN   12
-#define STACK_HASH_ORDER_MAX   20
+#define STACK_BUCKET_NUMBER_ORDER_MIN 12
+#define STACK_BUCKET_NUMBER_ORDER_MAX 20
 /* Initial seed for jhash2. */
 #define STACK_HASH_SEED 0x9747b28c
 
 /* Hash table of pointers to stored stack traces. */
 static struct stack_record **stack_table;
 /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
-static unsigned int stack_hash_order;
+static unsigned int stack_bucket_number_order;
 /* Hash mask for indexing the table. */
 static unsigned int stack_hash_mask;
 
@@ -137,28 +137,28 @@ int __init stack_depot_early_init(void)
         * in fuzzing scenarios, which leads to a large number of different
         * stack traces being stored in stack depot.
         */
-       if (kasan_enabled() && !stack_hash_order)
-               stack_hash_order = STACK_HASH_ORDER_MAX;
+       if (kasan_enabled() && !stack_bucket_number_order)
+               stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
 
        if (!__stack_depot_early_init_requested || stack_depot_disabled)
                return 0;
 
        /*
-        * If stack_hash_order is not set, leave entries as 0 to rely on the
-        * automatic calculations performed by alloc_large_system_hash.
+        * If stack_bucket_number_order is not set, leave entries as 0 to rely
+        * on the automatic calculations performed by alloc_large_system_hash.
         */
-       if (stack_hash_order)
-               entries = 1UL << stack_hash_order;
+       if (stack_bucket_number_order)
+               entries = 1UL << stack_bucket_number_order;
        pr_info("allocating hash table via alloc_large_system_hash\n");
        stack_table = alloc_large_system_hash("stackdepot",
                                                sizeof(struct stack_record *),
                                                entries,
-                                               STACK_HASH_SCALE,
+                                               STACK_HASH_TABLE_SCALE,
                                                HASH_EARLY | HASH_ZERO,
                                                NULL,
                                                &stack_hash_mask,
-                                               1UL << STACK_HASH_ORDER_MIN,
-                                               1UL << STACK_HASH_ORDER_MAX);
+                                               1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
+                                               1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
        if (!stack_table) {
                pr_err("hash table allocation failed, disabling\n");
                stack_depot_disabled = true;
@@ -181,13 +181,13 @@ int stack_depot_init(void)
                goto out_unlock;
 
        /*
-        * Similarly to stack_depot_early_init, use stack_hash_order
+        * Similarly to stack_depot_early_init, use stack_bucket_number_order
         * if assigned, and rely on automatic scaling otherwise.
         */
-       if (stack_hash_order) {
-               entries = 1UL << stack_hash_order;
+       if (stack_bucket_number_order) {
+               entries = 1UL << stack_bucket_number_order;
        } else {
-               int scale = STACK_HASH_SCALE;
+               int scale = STACK_HASH_TABLE_SCALE;
 
                entries = nr_free_buffer_pages();
                entries = roundup_pow_of_two(entries);
@@ -198,10 +198,10 @@ int stack_depot_init(void)
                        entries <<= (PAGE_SHIFT - scale);
        }
 
-       if (entries < 1UL << STACK_HASH_ORDER_MIN)
-               entries = 1UL << STACK_HASH_ORDER_MIN;
-       if (entries > 1UL << STACK_HASH_ORDER_MAX)
-               entries = 1UL << STACK_HASH_ORDER_MAX;
+       if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
+               entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
+       if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
+               entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
 
        pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
        stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);