void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_IPIPE
+ void (*irq_move)(struct irq_data *data);
+ void (*irq_hold)(struct irq_data *data);
+ void (*irq_release)(struct irq_data *data);
+#endif /* CONFIG_IPIPE */
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_PIPELINE_SAFE: Chip can work in pipelined mode
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
IRQCHIP_SKIP_SET_WAKE = (1 << 4),
IRQCHIP_ONESHOT_SAFE = (1 << 5),
IRQCHIP_EOI_THREADED = (1 << 6),
+ IRQCHIP_PIPELINE_SAFE = (1 << 7),
};
#include <linux/irqdesc.h>
extern void irq_chip_mask_parent(struct irq_data *data);
extern void irq_chip_unmask_parent(struct irq_data *data);
extern void irq_chip_eoi_parent(struct irq_data *data);
+#ifdef CONFIG_IPIPE
+extern void irq_chip_hold_parent(struct irq_data *data);
+extern void irq_chip_release_parent(struct irq_data *data);
+#endif
+
extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry);
-extern struct irq_data *irq_get_irq_data(unsigned int irq);
+
+static inline __attribute__((const)) struct irq_data *
+irq_get_irq_data(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return desc ? &desc->irq_data : NULL;
+}
static inline struct irq_chip *irq_get_chip(unsigned int irq)
{
* different flow mechanisms (level/edge) for it.
*/
struct irq_chip_generic {
+#ifdef CONFIG_IPIPE
+ ipipe_spinlock_t lock;
+#else
raw_spinlock_t lock;
+#endif
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc)
{
- raw_spin_lock(&gc->lock);
+ unsigned long flags = 0;
+ raw_spin_lock_irqsave_cond(&gc->lock, flags);
+ return flags;
}
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
+static inline void
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags)
{
- raw_spin_unlock(&gc->lock);
+ raw_spin_unlock_irqrestore_cond(&gc->lock, flags);
}
#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc)
+{
+ return hard_cond_local_irq_save();
+}
+static inline void
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags)
+{
+ hard_cond_local_irq_restore(flags);
+}
#endif
/*
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
+#include <linux/ipipe.h>
#include <trace/events/irq.h>
if (!chip)
chip = &no_irq_chip;
+ else
+ WARN_ONCE(IS_ENABLED(CONFIG_IPIPE) &&
+ (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0,
+ "irqchip %s is not pipeline-safe!", chip->name);
desc->irq_data.chip = chip;
irq_put_desc_unlock(desc, flags);
}
EXPORT_SYMBOL(irq_set_chip_data);
-struct irq_data *irq_get_irq_data(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc ? &desc->irq_data : NULL;
-}
-EXPORT_SYMBOL_GPL(irq_get_irq_data);
-
static void irq_state_clr_disabled(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
irq_domain_activate_irq(d);
if (d->chip->irq_startup) {
+ unsigned long flags = hard_cond_local_irq_save();
ret = d->chip->irq_startup(d);
irq_state_clr_disabled(desc);
irq_state_clr_masked(desc);
+ hard_cond_local_irq_restore(flags);
+#ifdef CONFIG_IPIPE
+ desc->istate &= ~IPIPE_IRQS_NEEDS_STARTUP;
+#endif
} else {
irq_enable(desc);
}
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
irq_state_set_disabled(desc);
irq_state_set_masked(desc);
+#ifdef CONFIG_IPIPE
+ desc->istate |= IPIPE_IRQS_NEEDS_STARTUP;
+#endif
} else {
__irq_disable(desc, true);
}
void irq_enable(struct irq_desc *desc)
{
+ unsigned long flags = hard_cond_local_irq_save();
+
if (!irqd_irq_disabled(&desc->irq_data)) {
unmask_irq(desc);
} else {
unmask_irq(desc);
}
}
+
+ hard_cond_local_irq_restore(flags);
}
static void __irq_disable(struct irq_desc *desc, bool mask)
{
+ unsigned long flags = hard_cond_local_irq_save();
+
if (irqd_irq_disabled(&desc->irq_data)) {
if (mask)
mask_irq(desc);
mask_irq(desc);
}
}
+
+ hard_cond_local_irq_restore(flags);
}
/**
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
+ unsigned long flags = hard_cond_local_irq_save();
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
cpumask_set_cpu(cpu, desc->percpu_enabled);
+ hard_cond_local_irq_restore(flags);
}
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
void unmask_irq(struct irq_desc *desc)
{
+ unsigned long flags;
+
if (!irqd_irq_masked(&desc->irq_data))
return;
if (desc->irq_data.chip->irq_unmask) {
+ flags = hard_cond_local_irq_save();
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
+ hard_cond_local_irq_restore(flags);
}
}
void handle_level_irq(struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
+#ifndef CONFIG_IPIPE
mask_ack_irq(desc);
+#endif
if (!irq_may_run(desc))
goto out_unlock;
static inline void preflow_handler(struct irq_desc *desc) { }
#endif
-static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
+#ifdef CONFIG_IPIPE
+static void cond_release_fasteoi_irq(struct irq_desc *desc,
+ struct irq_chip *chip)
+{
+ if (chip->irq_release &&
+ !irqd_irq_disabled(&desc->irq_data) && !desc->threads_oneshot)
+ chip->irq_release(&desc->irq_data);
+}
+#else
+static inline void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
if (!(desc->istate & IRQS_ONESHOT)) {
chip->irq_eoi(&desc->irq_data);
chip->irq_eoi(&desc->irq_data);
}
}
+#endif /* !CONFIG_IPIPE */
/**
* handle_fasteoi_irq - irq handler for transparent controllers
}
kstat_incr_irqs_this_cpu(desc);
+#ifndef CONFIG_IPIPE
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
+#endif
preflow_handler(desc);
handle_irq_event(desc);
+#ifdef CONFIG_IPIPE
+ /*
+ * IRQCHIP_EOI_IF_HANDLED is ignored as the I-pipe always
+ * sends EOI.
+ */
+ cond_release_fasteoi_irq(desc, chip);
+#else /* !CONFIG_IPIPE */
cond_unmask_eoi_irq(desc, chip);
+#endif /* !CONFIG_IPIPE */
raw_spin_unlock(&desc->lock);
return;
kstat_incr_irqs_this_cpu(desc);
/* Start handling the irq */
+#ifndef CONFIG_IPIPE
desc->irq_data.chip->irq_ack(&desc->irq_data);
+#endif
do {
if (unlikely(!desc->action)) {
kstat_incr_irqs_this_cpu(desc);
+#ifdef CONFIG_IPIPE
+ (void)chip;
+ handle_irq_event_percpu(desc);
+ desc->ipipe_end(desc);
+#else
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+#endif
}
/**
kstat_incr_irqs_this_cpu(desc);
+#ifndef CONFIG_IPIPE
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
+#endif
if (likely(action)) {
trace_irq_handler_entry(irq, action);
res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
trace_irq_handler_exit(irq, action, res);
+#ifdef CONFIG_IPIPE
+ (void)chip;
+ desc->ipipe_end(desc);
+ return;
+#endif
} else {
unsigned int cpu = smp_processor_id();
bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
chip->irq_eoi(&desc->irq_data);
}
+#ifdef CONFIG_IPIPE
+
+void __ipipe_ack_level_irq(struct irq_desc *desc)
+{
+ mask_ack_irq(desc);
+}
+
+void __ipipe_end_level_irq(struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+}
+
+void __ipipe_ack_fasteoi_irq(struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_hold(&desc->irq_data);
+}
+
+void __ipipe_end_fasteoi_irq(struct irq_desc *desc)
+{
+ if (desc->irq_data.chip->irq_release)
+ desc->irq_data.chip->irq_release(&desc->irq_data);
+}
+
+void __ipipe_ack_edge_irq(struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+}
+
+void __ipipe_ack_percpu_irq(struct irq_desc *desc)
+{
+ if (desc->irq_data.chip->irq_ack)
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ if (desc->irq_data.chip->irq_eoi)
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
+}
+
+void __ipipe_nop_irq(struct irq_desc *desc)
+{
+}
+
+void __ipipe_chained_irq(struct irq_desc *desc)
+{
+ /*
+ * XXX: Do NOT fold this into __ipipe_nop_irq(), see
+ * ipipe_chained_irq_p().
+ */
+}
+
+static void __ipipe_ack_bad_irq(struct irq_desc *desc)
+{
+ handle_bad_irq(desc);
+ WARN_ON_ONCE(1);
+}
+
+irq_flow_handler_t
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
+{
+ if (unlikely(handle == NULL)) {
+ desc->ipipe_ack = __ipipe_ack_bad_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else {
+ if (is_chained) {
+ desc->ipipe_ack = handle;
+ desc->ipipe_end = __ipipe_nop_irq;
+ handle = __ipipe_chained_irq;
+ } else if (handle == handle_simple_irq) {
+ desc->ipipe_ack = __ipipe_nop_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else if (handle == handle_level_irq) {
+ desc->ipipe_ack = __ipipe_ack_level_irq;
+ desc->ipipe_end = __ipipe_end_level_irq;
+ } else if (handle == handle_edge_irq) {
+ desc->ipipe_ack = __ipipe_ack_edge_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else if (handle == handle_fasteoi_irq) {
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq;
+ desc->ipipe_end = __ipipe_end_fasteoi_irq;
+ } else if (handle == handle_percpu_irq ||
+ handle == handle_percpu_devid_irq) {
+ if (irq_desc_get_chip(desc) &&
+ irq_desc_get_chip(desc)->irq_hold) {
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq;
+ desc->ipipe_end = __ipipe_end_fasteoi_irq;
+ } else {
+ desc->ipipe_ack = __ipipe_ack_percpu_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ }
+ } else if (irq_desc_get_chip(desc) == &no_irq_chip) {
+ desc->ipipe_ack = __ipipe_nop_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else {
+ desc->ipipe_ack = __ipipe_ack_bad_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ }
+ }
+
+ /* Suppress intermediate trampoline routine. */
+ ipipe_root_domain->irqs[desc->irq_data.irq].ackfn = desc->ipipe_ack;
+
+ return handle;
+}
+
+void ipipe_enable_irq(unsigned int irq)
+{
+ struct irq_desc *desc;
+ struct irq_chip *chip;
+ unsigned long flags;
+
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ return;
+
+ chip = irq_desc_get_chip(desc);
+
+ if (chip->irq_startup && (desc->istate & IPIPE_IRQS_NEEDS_STARTUP)) {
+
+ ipipe_root_only();
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->istate & IPIPE_IRQS_NEEDS_STARTUP) {
+ desc->istate &= ~IPIPE_IRQS_NEEDS_STARTUP;
+ chip->irq_startup(&desc->irq_data);
+ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return;
+ }
+
+ if (WARN_ON_ONCE(chip->irq_enable == NULL && chip->irq_unmask == NULL))
+ return;
+
+ if (chip->irq_enable)
+ chip->irq_enable(&desc->irq_data);
+ else
+ chip->irq_unmask(&desc->irq_data);
+}
+EXPORT_SYMBOL_GPL(ipipe_enable_irq);
+
+#else /* !CONFIG_IPIPE */
+
+irq_flow_handler_t
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
+{
+ return handle;
+}
+
+#endif /* !CONFIG_IPIPE */
+EXPORT_SYMBOL_GPL(__fixup_irq_handler);
+
static void
__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
int is_chained, const char *name)
return;
}
+ handle = __fixup_irq_handler(desc, handle, is_chained);
+
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->irq_data.chip != &no_irq_chip)
}
EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
+#ifdef CONFIG_IPIPE
+void irq_chip_hold_parent(struct irq_data *data)
+{
+ data = data->parent_data;
+ data->chip->irq_hold(data);
+}
+
+void irq_chip_release_parent(struct irq_data *data)
+{
+ data = data->parent_data;
+ data->chip->irq_release(data);
+}
+#endif
+
/**
* irq_chip_unmask_parent - Unmask the parent interrupt
* @data: Pointer to interrupt specific data
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.enable);
*ct->mask_cache |= mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = ~d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.eoi);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
u32 mask = d->mask;
if (!(mask & gc->wake_enabled))
return -EINVAL;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
return 0;
}