#include <linux/atomic.h>
-/* Define a way to iterate across irqs. */
-#define for_each_irq(i) \
- for ((i) = 0; (i) < NR_IRQS; ++(i))
-
extern atomic_t ppc_n_lost_interrupts;
/* This number is used when no interrupt has been assigned */
alloc_cpumask_var(&mask, GFP_KERNEL);
- for_each_irq(irq) {
+ for_each_irq_desc(irq, desc) {
struct irq_data *data;
struct irq_chip *chip;
- desc = irq_to_desc(irq);
- if (!desc)
- continue;
-
data = irq_desc_get_irq_data(desc);
if (irqd_is_per_cpu(data))
continue;
void machine_kexec_mask_interrupts(void) {
unsigned int i;
+ struct irq_desc *desc;
- for_each_irq(i) {
- struct irq_desc *desc = irq_to_desc(i);
+ for_each_irq_desc(i, desc) {
struct irq_chip *chip;
- if (!desc)
- continue;
-
chip = irq_desc_get_chip(desc);
if (!chip)
continue;
pr_devel("axon_msi: woff %x roff %x msi %x\n",
write_offset, msic->read_offset, msi);
- if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) {
+ if (msi < nr_irqs && irq_get_chip_data(msi) == msic) {
generic_handle_irq(msi);
msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
} else {
if (rc)
return rc;
- /* We rely on being able to stash a virq in a u16 */
- BUILD_BUG_ON(NR_IRQS > 65536);
-
list_for_each_entry(entry, &dev->msi_list, list) {
virq = irq_create_direct_mapping(msic->irq_domain);
if (virq == NO_IRQ) {
}
memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
- msic->irq_domain = irq_domain_add_nomap(dn, 0, &msic_host_ops, msic);
+ /* We rely on being able to stash a virq in a u16, so limit irqs to < 65536 */
+ msic->irq_domain = irq_domain_add_nomap(dn, 65536, &msic_host_ops, msic);
if (!msic->irq_domain) {
printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
dn->full_name);
{
int i;
- for (i = 1; i < NR_IRQS; i++)
+ for (i = 1; i < nr_irqs; i++)
beat_destruct_irq_plug(i);
}
static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+/* The max irq number this driver deals with is 128; see max_irqs */
+static DECLARE_BITMAP(ppc_lost_interrupts, 128);
+static DECLARE_BITMAP(ppc_cached_irq_mask, 128);
static int pmac_irq_cascade = -1;
static struct irq_domain *pmac_pic_host;
static intctl_cpm2_t __iomem *cpm2_intctl;
static struct irq_domain *cpm2_pic_host;
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+static unsigned long ppc_cached_irq_mask[2]; /* 2 32-bit registers */
static const u_char irq_to_siureg[] = {
1, 1, 1, 1, 1, 1, 1, 1,
{
int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id();
unsigned int irq, virq;
+ struct irq_desc *desc;
/* If we used to be the default server, move to the new "boot_cpuid" */
if (hw_cpu == xics_default_server)
/* Allow IPIs again... */
icp_ops->set_priority(DEFAULT_PRIORITY);
- for_each_irq(virq) {
- struct irq_desc *desc;
+ for_each_irq_desc(virq, desc) {
struct irq_chip *chip;
long server;
unsigned long flags;
/* We can't set affinity on ISA interrupts */
if (virq < NUM_ISA_INTERRUPTS)
continue;
- desc = irq_to_desc(virq);
/* We only need to migrate enabled IRQS */
- if (!desc || !desc->action)
+ if (!desc->action)
continue;
if (desc->irq_data.domain != xics_host)
continue;