lib: cpu_rmap: Use allocator for rmap entries
authorEli Cohen <elic@nvidia.com>
Tue, 14 Feb 2023 07:29:46 +0000 (09:29 +0200)
committerSaeed Mahameed <saeedm@nvidia.com>
Fri, 24 Mar 2023 23:04:10 +0000 (16:04 -0700)
Use a proper allocator for rmap entries using a naive for loop. The
allocator relies on whether an entry is NULL to be considered free.
Remove the used field of rmap which is not needed.

Also, avoid crashing the kernel if an entry is not available.

Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Eli Cohen <elic@nvidia.com>
Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
include/linux/cpu_rmap.h
lib/cpu_rmap.c

index be8aea0..0ec745e 100644 (file)
  * struct cpu_rmap - CPU affinity reverse-map
  * @refcount: kref for object
  * @size: Number of objects to be reverse-mapped
- * @used: Number of objects added
  * @obj: Pointer to array of object pointers
  * @near: For each CPU, the index and distance to the nearest object,
  *      based on affinity masks
  */
 struct cpu_rmap {
        struct kref     refcount;
-       u16             size, used;
+       u16             size;
        void            **obj;
        struct {
                u16     index;
index e77f12b..5d4bf7a 100644 (file)
@@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix)
 }
 #endif
 
+static int get_free_index(struct cpu_rmap *rmap)
+{
+       int i;
+
+       for (i = 0; i < rmap->size; i++)
+               if (!rmap->obj[i])
+                       return i;
+
+       return -ENOSPC;
+}
+
 /**
  * cpu_rmap_add - add object to a rmap
  * @rmap: CPU rmap allocated with alloc_cpu_rmap()
  * @obj: Object to add to rmap
  *
- * Return index of object.
+ * Return index of object or -ENOSPC if no free entry was found
  */
 int cpu_rmap_add(struct cpu_rmap *rmap, void *obj)
 {
-       u16 index;
+       int index = get_free_index(rmap);
+
+       if (index < 0)
+               return index;
 
-       BUG_ON(rmap->used >= rmap->size);
-       index = rmap->used++;
        rmap->obj[index] = obj;
        return index;
 }
@@ -230,7 +242,7 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap)
        if (!rmap)
                return;
 
-       for (index = 0; index < rmap->used; index++) {
+       for (index = 0; index < rmap->size; index++) {
                glue = rmap->obj[index];
                if (glue)
                        irq_set_affinity_notifier(glue->notify.irq, NULL);
@@ -295,13 +307,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq)
        glue->notify.release = irq_cpu_rmap_release;
        glue->rmap = rmap;
        cpu_rmap_get(rmap);
-       glue->index = cpu_rmap_add(rmap, glue);
+       rc = cpu_rmap_add(rmap, glue);
+       if (rc < 0)
+               goto err_add;
+
+       glue->index = rc;
        rc = irq_set_affinity_notifier(irq, &glue->notify);
-       if (rc) {
-               cpu_rmap_put(glue->rmap);
-               rmap->obj[glue->index] = NULL;
-               kfree(glue);
-       }
+       if (rc)
+               goto err_set;
+
+       return rc;
+
+err_set:
+       rmap->obj[glue->index] = NULL;
+err_add:
+       cpu_rmap_put(glue->rmap);
+       kfree(glue);
        return rc;
 }
 EXPORT_SYMBOL(irq_cpu_rmap_add);