irq/matrix: Split out the CPU selection code into a helper
authorDou Liyang <douly.fnst@cn.fujitsu.com>
Sat, 8 Sep 2018 17:58:37 +0000 (01:58 +0800)
committerThomas Gleixner <tglx@linutronix.de>
Tue, 18 Sep 2018 16:27:24 +0000 (18:27 +0200)
Linux finds the CPU which has the lowest vector allocation count to spread
out the non managed interrupts across the possible target CPUs, but does
not do so for managed interrupts.

Split out the CPU selection code into a helper function for reuse. No
functional change.

Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: hpa@zytor.com
Link: https://lkml.kernel.org/r/20180908175838.14450-1-dou_liyang@163.com
kernel/irq/matrix.c

index 5092494..67768bb 100644 (file)
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
        return area;
 }
 
+/* Find the best CPU which has the lowest vector allocation count */
+static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
+                                       const struct cpumask *msk)
+{
+       unsigned int cpu, best_cpu, maxavl = 0;
+       struct cpumap *cm;
+
+       best_cpu = UINT_MAX;
+
+       for_each_cpu(cpu, msk) {
+               cm = per_cpu_ptr(m->maps, cpu);
+
+               if (!cm->online || cm->available <= maxavl)
+                       continue;
+
+               best_cpu = cpu;
+               maxavl = cm->available;
+       }
+       return best_cpu;
+}
+
 /**
  * irq_matrix_assign_system - Assign system wide entry in the matrix
  * @m:         Matrix pointer
@@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
 int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
                     bool reserved, unsigned int *mapped_cpu)
 {
-       unsigned int cpu, best_cpu, maxavl = 0;
+       unsigned int cpu, bit;
        struct cpumap *cm;
-       unsigned int bit;
 
-       best_cpu = UINT_MAX;
-       for_each_cpu(cpu, msk) {
-               cm = per_cpu_ptr(m->maps, cpu);
-
-               if (!cm->online || cm->available <= maxavl)
-                       continue;
+       cpu = matrix_find_best_cpu(m, msk);
+       if (cpu == UINT_MAX)
+               return -ENOSPC;
 
-               best_cpu = cpu;
-               maxavl = cm->available;
-       }
+       cm = per_cpu_ptr(m->maps, cpu);
+       bit = matrix_alloc_area(m, cm, 1, false);
+       if (bit >= m->alloc_end)
+               return -ENOSPC;
+       cm->allocated++;
+       cm->available--;
+       m->total_allocated++;
+       m->global_available--;
+       if (reserved)
+               m->global_reserved--;
+       *mapped_cpu = cpu;
+       trace_irq_matrix_alloc(bit, cpu, m, cm);
+       return bit;
 
-       if (maxavl) {
-               cm = per_cpu_ptr(m->maps, best_cpu);
-               bit = matrix_alloc_area(m, cm, 1, false);
-               if (bit < m->alloc_end) {
-                       cm->allocated++;
-                       cm->available--;
-                       m->total_allocated++;
-                       m->global_available--;
-                       if (reserved)
-                               m->global_reserved--;
-                       *mapped_cpu = best_cpu;
-                       trace_irq_matrix_alloc(bit, best_cpu, m, cm);
-                       return bit;
-               }
-       }
-       return -ENOSPC;
 }
 
 /**