irq/cpumask: make memoryless node zero happy
authorYinghai Lu <yinghai@kernel.org>
Fri, 29 May 2009 01:14:40 +0000 (18:14 -0700)
committerPekka Enberg <penberg@cs.helsinki.fi>
Thu, 11 Jun 2009 16:27:08 +0000 (19:27 +0300)
Don't hardcode to node zero for early boot IRQ setup memory allocations.

[ penberg@cs.helsinki.fi: minor cleanups ]
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
arch/x86/kernel/apic/io_apic.c
kernel/irq/handle.c

index 139201a..94605e7 100644 (file)
@@ -177,16 +177,18 @@ int __init arch_early_irq_init(void)
        struct irq_cfg *cfg;
        struct irq_desc *desc;
        int count;
        struct irq_cfg *cfg;
        struct irq_desc *desc;
        int count;
+       int node;
        int i;
 
        cfg = irq_cfgx;
        count = ARRAY_SIZE(irq_cfgx);
        int i;
 
        cfg = irq_cfgx;
        count = ARRAY_SIZE(irq_cfgx);
+       node= cpu_to_node(boot_cpu_id);
 
        for (i = 0; i < count; i++) {
                desc = irq_to_desc(i);
                desc->chip_data = &cfg[i];
 
        for (i = 0; i < count; i++) {
                desc = irq_to_desc(i);
                desc->chip_data = &cfg[i];
-               alloc_cpumask_var(&cfg[i].domain, GFP_NOWAIT);
-               alloc_cpumask_var(&cfg[i].old_domain, GFP_NOWAIT);
+               alloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
+               alloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
                if (i < NR_IRQS_LEGACY)
                        cpumask_setall(cfg[i].domain);
        }
                if (i < NR_IRQS_LEGACY)
                        cpumask_setall(cfg[i].domain);
        }
index a600184..e161999 100644 (file)
@@ -150,6 +150,7 @@ int __init early_irq_init(void)
 {
        struct irq_desc *desc;
        int legacy_count;
 {
        struct irq_desc *desc;
        int legacy_count;
+       int node;
        int i;
 
        init_irq_default_affinity();
        int i;
 
        init_irq_default_affinity();
@@ -160,20 +161,20 @@ int __init early_irq_init(void)
 
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
 
        desc = irq_desc_legacy;
        legacy_count = ARRAY_SIZE(irq_desc_legacy);
+       node = first_online_node;
 
        /* allocate irq_desc_ptrs array based on nr_irqs */
        irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
 
        /* allocate based on nr_cpu_ids */
 
        /* allocate irq_desc_ptrs array based on nr_irqs */
        irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *));
 
        /* allocate based on nr_cpu_ids */
-       /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */
-       kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids *
-                                         sizeof(int));
+       kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
+                                         sizeof(int), GFP_NOWAIT, node);
 
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
                desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
 
        for (i = 0; i < legacy_count; i++) {
                desc[i].irq = i;
                desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids;
                lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
-               alloc_desc_masks(&desc[i], 0, true);
+               alloc_desc_masks(&desc[i], node, true);
                init_desc_masks(&desc[i]);
                irq_desc_ptrs[i] = desc + i;
        }
                init_desc_masks(&desc[i]);
                irq_desc_ptrs[i] = desc + i;
        }