dummy_read++;
}
-static inline void unmask_cic_irq(unsigned int irq)
+static void unmask_cic_irq(struct irq_data *d)
{
volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
int vpe;
* Make sure we have IRQ affinity. It may have changed while
* we were processing the IRQ.
*/
- if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity))
+ if (!cpumask_test_cpu(smp_processor_id(), d->affinity))
return;
#endif
vpe = get_current_vpe();
LOCK_VPE(flags, mtflags);
- cic_msk_reg[vpe] |= (1 << (irq - MSP_CIC_INTBASE));
+ cic_msk_reg[vpe] |= (1 << (d->irq - MSP_CIC_INTBASE));
UNLOCK_VPE(flags, mtflags);
cic_wmb();
}
-static inline void mask_cic_irq(unsigned int irq)
+static void mask_cic_irq(struct irq_data *d)
{
volatile u32 *cic_msk_reg = CIC_VPE0_MSK_REG;
int vpe = get_current_vpe();
unsigned long flags, mtflags;
#endif
LOCK_VPE(flags, mtflags);
- cic_msk_reg[vpe] &= ~(1 << (irq - MSP_CIC_INTBASE));
+ cic_msk_reg[vpe] &= ~(1 << (d->irq - MSP_CIC_INTBASE));
UNLOCK_VPE(flags, mtflags);
cic_wmb();
}
-static inline void msp_cic_irq_ack(unsigned int irq)
+static void msp_cic_irq_ack(struct irq_data *d)
{
- mask_cic_irq(irq);
+ mask_cic_irq(d);
/*
* Only really necessary for 18, 16-14 and sometimes 3:0
* (since these can be edge sensitive) but it doesn't
* hurt for the others
*/
- *CIC_STS_REG = (1 << (irq - MSP_CIC_INTBASE));
- smtc_im_ack_irq(irq);
-}
-
-static void msp_cic_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_cic_irq(irq);
+ *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
+ smtc_im_ack_irq(d->irq);
}
/*Note: Limiting to VSMP . Not tested in SMTC */
#ifdef CONFIG_MIPS_MT_SMP
-static inline int msp_cic_irq_set_affinity(unsigned int irq,
- const struct cpumask *cpumask)
+static int msp_cic_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *cpumask, bool force)
{
int cpu;
unsigned long flags;
static struct irq_chip msp_cic_irq_controller = {
.name = "MSP_CIC",
- .mask = mask_cic_irq,
- .mask_ack = msp_cic_irq_ack,
- .unmask = unmask_cic_irq,
- .ack = msp_cic_irq_ack,
- .end = msp_cic_irq_end,
+ .irq_mask = mask_cic_irq,
+ .irq_mask_ack = msp_cic_irq_ack,
+ .irq_unmask = unmask_cic_irq,
+ .irq_ack = msp_cic_irq_ack,
#ifdef CONFIG_MIPS_MT_SMP
- .set_affinity = msp_cic_irq_set_affinity,
+ .irq_set_affinity = msp_cic_irq_set_affinity,
#endif
};
do_IRQ(ffs(pending) + MSP_CIC_INTBASE - 1);
} else{
spurious_interrupt();
- /* Re-enable the CIC cascaded interrupt. */
- irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
}
}
dummy_read++;
}
-static inline void unmask_per_irq(unsigned int irq)
+static inline void unmask_per_irq(struct irq_data *d)
{
#ifdef CONFIG_SMP
unsigned long flags;
spin_lock_irqsave(&per_lock, flags);
- *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
+ *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
spin_unlock_irqrestore(&per_lock, flags);
#else
- *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
+ *PER_INT_MSK_REG |= (1 << (d->irq - MSP_PER_INTBASE));
#endif
per_wmb();
}
-static inline void mask_per_irq(unsigned int irq)
+static inline void mask_per_irq(struct irq_data *d)
{
#ifdef CONFIG_SMP
unsigned long flags;
spin_lock_irqsave(&per_lock, flags);
- *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
+ *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
spin_unlock_irqrestore(&per_lock, flags);
#else
- *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE));
+ *PER_INT_MSK_REG &= ~(1 << (d->irq - MSP_PER_INTBASE));
#endif
per_wmb();
}
-static inline void msp_per_irq_enable(unsigned int irq)
+static inline void msp_per_irq_ack(struct irq_data *d)
{
- unmask_per_irq(irq);
-}
-
-static inline void msp_per_irq_disable(unsigned int irq)
-{
- mask_per_irq(irq);
-}
-
-static unsigned int msp_per_irq_startup(unsigned int irq)
-{
- msp_per_irq_enable(irq);
- return 0;
-}
-
-#define msp_per_irq_shutdown msp_per_irq_disable
-
-static inline void msp_per_irq_ack(unsigned int irq)
-{
- mask_per_irq(irq);
+ mask_per_irq(d);
/*
* In the PER interrupt controller, only bits 11 and 10
* are write-to-clear, (SPI TX complete, SPI RX complete).
* It does nothing for any others.
*/
-
- *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE));
-
- /* Re-enable the CIC cascaded interrupt and return */
- irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
-}
-
-static void msp_per_irq_end(unsigned int irq)
-{
- if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
- unmask_per_irq(irq);
+ *PER_INT_STS_REG = (1 << (d->irq - MSP_PER_INTBASE));
}
#ifdef CONFIG_SMP
-static inline int msp_per_irq_set_affinity(unsigned int irq,
- const struct cpumask *affinity)
+static int msp_per_irq_set_affinity(struct irq_data *d,
+ const struct cpumask *affinity, bool force)
{
- unsigned long flags;
- /*
- * Calls to ack, end, startup, enable are spinlocked in setup_irq and
- * __do_IRQ.Callers of this function do not spinlock,so we need to
- * do so ourselves.
- */
- raw_spin_lock_irqsave(&irq_desc[irq].lock, flags);
- msp_per_irq_enable(irq);
- raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags);
+ /* WTF is this doing ????? */
+ unmask_per_irq(d);
return 0;
-
}
#endif
static struct irq_chip msp_per_irq_controller = {
.name = "MSP_PER",
- .startup = msp_per_irq_startup,
- .shutdown = msp_per_irq_shutdown,
- .enable = msp_per_irq_enable,
- .disable = msp_per_irq_disable,
+ .irq_enable = unmask_per_irq.
+ .irq_disable = mask_per_irq,
+ .irq_ack = msp_per_irq_ack,
#ifdef CONFIG_SMP
- .set_affinity = msp_per_irq_set_affinity,
+ .irq_set_affinity = msp_per_irq_set_affinity,
#endif
- .ack = msp_per_irq_ack,
- .end = msp_per_irq_end,
};
void __init msp_per_irq_init(void)
*PER_INT_STS_REG = 0xFFFFFFFF;
/* initialize all the IRQ descriptors */
for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
- irq_desc[i].status = IRQ_DISABLED;
- irq_desc[i].action = NULL;
- irq_desc[i].depth = 1;
- irq_desc[i].chip = &msp_per_irq_controller;
+ irq_set_chip(i, &msp_per_irq_controller);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask[i] = C_IRQ4;
#endif
do_IRQ(ffs(pending) + MSP_PER_INTBASE - 1);
} else {
spurious_interrupt();
- /* Re-enable the CIC cascaded interrupt and return */
- irq_desc[MSP_INT_CIC].chip->end(MSP_INT_CIC);
}
}
#include <msp_slp_int.h>
#include <msp_regs.h>
-static inline void unmask_msp_slp_irq(unsigned int irq)
+static inline void unmask_msp_slp_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
/* check for PER interrupt range */
if (irq < MSP_PER_INTBASE)
*SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE));
*PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE));
}
-static inline void mask_msp_slp_irq(unsigned int irq)
+static inline void mask_msp_slp_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
/* check for PER interrupt range */
if (irq < MSP_PER_INTBASE)
*SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE));
* While we ack the interrupt interrupts are disabled and thus we don't need
* to deal with concurrency issues. Same for msp_slp_irq_end.
*/
-static inline void ack_msp_slp_irq(unsigned int irq)
+static inline void ack_msp_slp_irq(struct irq_data *d)
{
+ unsigned int irq = d->irq;
+
/* check for PER interrupt range */
if (irq < MSP_PER_INTBASE)
*SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE));
static struct irq_chip msp_slp_irq_controller = {
.name = "MSP_SLP",
- .ack = ack_msp_slp_irq,
- .mask = mask_msp_slp_irq,
- .unmask = unmask_msp_slp_irq,
+ .irq_ack = ack_msp_slp_irq,
+ .irq_mask = mask_msp_slp_irq,
+ .irq_unmask = unmask_msp_slp_irq,
};
void __init msp_slp_irq_init(void)