CONFIG_IRQ_PORTH_INTA=11
CONFIG_IRQ_MAC_TX=11
CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTA=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_PORTH_INTA=11
CONFIG_IRQ_MAC_TX=11
CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTA=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_PORTH_INTA=11
CONFIG_IRQ_MAC_TX=11
CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=8
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTA=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=8
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_UART0_TX=10
CONFIG_IRQ_UART1_RX=10
CONFIG_IRQ_UART1_TX=10
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
CONFIG_IRQ_WATCH=13
CONFIG_IRQ_PORTF_INTA=12
CONFIG_IRQ_PORTF_INTB=12
CONFIG_IRQ_PORTH_INTA=11
CONFIG_IRQ_MAC_TX=11
CONFIG_IRQ_PORTH_INTB=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTA=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
CONFIG_IRQ_UART1_TX=10
CONFIG_IRQ_MAC_RX=11
CONFIG_IRQ_MAC_TX=11
-CONFIG_IRQ_TMR0=12
-CONFIG_IRQ_TMR1=12
-CONFIG_IRQ_TMR2=12
-CONFIG_IRQ_TMR3=12
-CONFIG_IRQ_TMR4=12
-CONFIG_IRQ_TMR5=12
-CONFIG_IRQ_TMR6=12
-CONFIG_IRQ_TMR7=12
+CONFIG_IRQ_TIMER0=12
+CONFIG_IRQ_TIMER1=12
+CONFIG_IRQ_TIMER2=12
+CONFIG_IRQ_TIMER3=12
+CONFIG_IRQ_TIMER4=12
+CONFIG_IRQ_TIMER5=12
+CONFIG_IRQ_TIMER6=12
+CONFIG_IRQ_TIMER7=12
CONFIG_IRQ_PORTG_INTB=12
CONFIG_IRQ_MEM_DMA0=13
CONFIG_IRQ_MEM_DMA1=13
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter += i;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void atomic_sub(int i, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter -= i;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
int __temp = 0;
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter += i;
__temp = v->counter;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return __temp;
int __temp = 0;
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter -= i;
__temp = v->counter;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return __temp;
}
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter++;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void atomic_dec(volatile atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter--;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter &= ~mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
{
long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
v->counter |= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Atomic operations are already serializing */
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
*a |= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void clear_bit(int nr, volatile unsigned long *addr)
unsigned long flags;
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
*a &= ~mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline void change_bit(int nr, volatile unsigned long *addr)
ADDR += nr >> 5;
mask = 1 << (nr & 31);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
*ADDR ^= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
*a |= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return retval;
}
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
*a &= ~mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return retval;
}
a += nr >> 5;
mask = 1 << (nr & 0x1f);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
retval = (mask & *a) != 0;
*a ^= mask;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return retval;
}
#define SAVE_ALL_SYS save_context_no_interrupts
/* This is used for all normal interrupts. It saves a minimum of registers
to the stack, loads the IRQ number, and jumps to common code. */
+#ifdef CONFIG_IPIPE
+# define LOAD_IPIPE_IPEND \
+ P0.l = lo(IPEND); \
+ P0.h = hi(IPEND); \
+ R1 = [P0];
+#else
+# define LOAD_IPIPE_IPEND
+#endif
#define INTERRUPT_ENTRY(N) \
[--sp] = SYSCFG; \
\
[--sp] = R0; /*orig_r0*/ \
[--sp] = (R7:0,P5:0); \
R0 = (N); \
+ LOAD_IPIPE_IPEND \
jump __common_int_entry;
/* For timer interrupts, we need to save IPEND, since the user_mode
--- /dev/null
+/* -*- linux-c -*-
+ * include/asm-blackfin/ipipe.h
+ *
+ * Copyright (C) 2002-2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_H
+#define __ASM_BLACKFIN_IPIPE_H
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+#include <linux/ipipe_percpu.h>
+#include <asm/ptrace.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/atomic.h>
+#include <asm/traps.h>
+
+#define IPIPE_ARCH_STRING "1.8-00"
+#define IPIPE_MAJOR_NUMBER 1
+#define IPIPE_MINOR_NUMBER 8
+#define IPIPE_PATCH_NUMBER 0
+
+#ifdef CONFIG_SMP
+#error "I-pipe/blackfin: SMP not implemented"
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id() 0
+#endif /* CONFIG_SMP */
+
+#define prepare_arch_switch(next) \
+do { \
+ ipipe_schedule_notify(current, next); \
+ local_irq_disable_hw(); \
+} while (0)
+
+#define task_hijacked(p) \
+ ({ \
+ int __x__ = ipipe_current_domain != ipipe_root_domain; \
+ /* We would need to clear the SYNC flag for the root domain */ \
+ /* over the current processor in SMP mode. */ \
+ local_irq_enable_hw(); __x__; \
+ })
+
+struct ipipe_domain;
+
+struct ipipe_sysinfo {
+
+ int ncpus; /* Number of CPUs on board */
+ u64 cpufreq; /* CPU frequency (in Hz) */
+
+ /* Arch-dependent block */
+
+ struct {
+ unsigned tmirq; /* Timer tick IRQ */
+ u64 tmfreq; /* Timer frequency */
+ } archdep;
+};
+
+#define ipipe_read_tsc(t) \
+ ({ \
+ unsigned long __cy2; \
+ __asm__ __volatile__ ("1: %0 = CYCLES2\n" \
+ "%1 = CYCLES\n" \
+ "%2 = CYCLES2\n" \
+ "CC = %2 == %0\n" \
+ "if ! CC jump 1b\n" \
+ : "=r" (((unsigned long *)&t)[1]), \
+ "=r" (((unsigned long *)&t)[0]), \
+ "=r" (__cy2) \
+ : /*no input*/ : "CC"); \
+ t; \
+ })
+
+#define ipipe_cpu_freq() __ipipe_core_clock
+#define ipipe_tsc2ns(_t) (((unsigned long)(_t)) * __ipipe_freq_scale)
+#define ipipe_tsc2us(_t) (ipipe_tsc2ns(_t) / 1000 + 1)
+
+/* Private interface -- Internal use only */
+
+#define __ipipe_check_platform() do { } while (0)
+
+#define __ipipe_init_platform() do { } while (0)
+
+extern atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+extern unsigned long __ipipe_irq_lvmask;
+
+extern struct ipipe_domain ipipe_root;
+
+/* enable/disable_irqdesc _must_ be used in pairs. */
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd,
+ unsigned irq);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd,
+ unsigned irq);
+
+#define __ipipe_enable_irq(irq) (irq_desc[irq].chip->unmask(irq))
+
+#define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq))
+
+#define __ipipe_lock_root() \
+ set_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+#define __ipipe_unlock_root() \
+ clear_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)
+
+void __ipipe_enable_pipeline(void);
+
+#define __ipipe_hook_critical_ipi(ipd) do { } while (0)
+
+#define __ipipe_sync_pipeline(syncmask) \
+ do { \
+ struct ipipe_domain *ipd = ipipe_current_domain; \
+ if (likely(ipd != ipipe_root_domain || !test_bit(IPIPE_ROOTLOCK_FLAG, &ipd->flags))) \
+ __ipipe_sync_stage(syncmask); \
+ } while (0)
+
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs);
+
+int __ipipe_get_irq_priority(unsigned irq);
+
+int __ipipe_get_irqthread_priority(unsigned irq);
+
+void __ipipe_stall_root_raw(void);
+
+void __ipipe_unstall_root_raw(void);
+
+void __ipipe_serial_debug(const char *fmt, ...);
+
+DECLARE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+extern unsigned long __ipipe_core_clock;
+
+extern unsigned long __ipipe_freq_scale;
+
+extern unsigned long __ipipe_irq_tail_hook;
+
+static inline unsigned long __ipipe_ffnz(unsigned long ul)
+{
+ return ffs(ul) - 1;
+}
+
+#define __ipipe_run_irqtail() /* Must be a macro */ \
+ do { \
+ asmlinkage void __ipipe_call_irqtail(void); \
+ unsigned long __pending; \
+ CSYNC(); \
+ __pending = bfin_read_IPEND(); \
+ if (__pending & 0x8000) { \
+ __pending &= ~0x8010; \
+ if (__pending && (__pending & (__pending - 1)) == 0) \
+ __ipipe_call_irqtail(); \
+ } \
+ } while (0)
+
+#define __ipipe_run_isr(ipd, irq) \
+ do { \
+ if (ipd == ipipe_root_domain) { \
+ /* \
+ * Note: the I-pipe implements a threaded interrupt model on \
+ * this arch for Linux external IRQs. The interrupt handler we \
+ * call here only wakes up the associated IRQ thread. \
+ */ \
+ if (ipipe_virtual_irq_p(irq)) { \
+ /* No irqtail here; virtual interrupts have no effect \
+ on IPEND so there is no need for processing \
+ deferral. */ \
+ local_irq_enable_nohead(ipd); \
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+ local_irq_disable_nohead(ipd); \
+ } else \
+ /* \
+ * No need to run the irqtail here either; \
+ * we can't be preempted by hw IRQs, so \
+ * non-Linux IRQs cannot stack over the short \
+ * thread wakeup code. Which in turn means \
+ * that no irqtail condition could be pending \
+ * for domains above Linux in the pipeline. \
+ */ \
+ ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \
+ } else { \
+ __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+ local_irq_enable_nohead(ipd); \
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \
+ /* Attempt to exit the outer interrupt level before \
+ * starting the deferred IRQ processing. */ \
+ local_irq_disable_nohead(ipd); \
+ __ipipe_run_irqtail(); \
+ __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \
+ } \
+ } while (0)
+
+#define __ipipe_syscall_watched_p(p, sc) \
+ (((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls)
+
+void ipipe_init_irq_threads(void);
+
+int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc);
+
+#define IS_SYSIRQ(irq) ((irq) > IRQ_CORETMR && (irq) <= SYS_IRQS)
+#define IS_GPIOIRQ(irq) ((irq) >= GPIO_IRQ_BASE && (irq) < NR_IRQS)
+
+#define IRQ_SYSTMR IRQ_TIMER0
+#define IRQ_PRIOTMR CONFIG_IRQ_TIMER0
+
+#if defined(CONFIG_BF531) || defined(CONFIG_BF532) || defined(CONFIG_BF533)
+#define PRIO_GPIODEMUX(irq) CONFIG_PFA
+#elif defined(CONFIG_BF534) || defined(CONFIG_BF536) || defined(CONFIG_BF537)
+#define PRIO_GPIODEMUX(irq) CONFIG_IRQ_PROG_INTA
+#elif defined(CONFIG_BF52x)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PORTF_INTA ? CONFIG_IRQ_PORTF_INTA : \
+ (irq) == IRQ_PORTG_INTA ? CONFIG_IRQ_PORTG_INTA : \
+ (irq) == IRQ_PORTH_INTA ? CONFIG_IRQ_PORTH_INTA : \
+ -1)
+#elif defined(CONFIG_BF561)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PROG0_INTA ? CONFIG_IRQ_PROG0_INTA : \
+ (irq) == IRQ_PROG1_INTA ? CONFIG_IRQ_PROG1_INTA : \
+ (irq) == IRQ_PROG2_INTA ? CONFIG_IRQ_PROG2_INTA : \
+ -1)
+#define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val)
+#define bfin_write_TIMER_ENABLE(val) bfin_write_TMRS8_ENABLE(val)
+#define bfin_write_TIMER_STATUS(val) bfin_write_TMRS8_STATUS(val)
+#define bfin_read_TIMER_STATUS() bfin_read_TMRS8_STATUS()
+#elif defined(CONFIG_BF54x)
+#define PRIO_GPIODEMUX(irq) ((irq) == IRQ_PINT0 ? CONFIG_IRQ_PINT0 : \
+ (irq) == IRQ_PINT1 ? CONFIG_IRQ_PINT1 : \
+ (irq) == IRQ_PINT2 ? CONFIG_IRQ_PINT2 : \
+ (irq) == IRQ_PINT3 ? CONFIG_IRQ_PINT3 : \
+ -1)
+#define bfin_write_TIMER_DISABLE(val) bfin_write_TIMER_DISABLE0(val)
+#define bfin_write_TIMER_ENABLE(val) bfin_write_TIMER_ENABLE0(val)
+#define bfin_write_TIMER_STATUS(val) bfin_write_TIMER_STATUS0(val)
+#define bfin_read_TIMER_STATUS(val) bfin_read_TIMER_STATUS0(val)
+#else
+# error "no PRIO_GPIODEMUX() for this part"
+#endif
+
+#define __ipipe_root_tick_p(regs) ((regs->ipend & 0x10) != 0)
+
+#else /* !CONFIG_IPIPE */
+
+#define task_hijacked(p) 0
+#define ipipe_trap_notify(t, r) 0
+
+#define __ipipe_stall_root_raw() do { } while (0)
+#define __ipipe_unstall_root_raw() do { } while (0)
+
+#define ipipe_init_irq_threads() do { } while (0)
+#define ipipe_start_irq_thread(irq, desc) 0
+
+#define IRQ_SYSTMR IRQ_CORETMR
+#define IRQ_PRIOTMR IRQ_CORETMR
+
+#define __ipipe_root_tick_p(regs) 1
+
+#endif /* !CONFIG_IPIPE */
+
+#endif /* !__ASM_BLACKFIN_IPIPE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/asm-blackfin/_baseipipe.h
+ *
+ * Copyright (C) 2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_BLACKFIN_IPIPE_BASE_H
+#define __ASM_BLACKFIN_IPIPE_BASE_H
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_NR_XIRQS NR_IRQS
+#define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */
+
+/* Blackfin-specific, global domain flags */
+#define IPIPE_ROOTLOCK_FLAG 1 /* Lock pipeline for root */
+
+ /* Blackfin traps -- i.e. exception vector numbers */
+#define IPIPE_NR_FAULTS 52 /* We leave a gap after VEC_ILL_RES. */
+/* Pseudo-vectors used for kernel events */
+#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS
+#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT)
+#define IPIPE_EVENT_SCHEDULE (IPIPE_FIRST_EVENT + 1)
+#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 2)
+#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 3)
+#define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4)
+#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5)
+#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6)
+#define IPIPE_LAST_EVENT IPIPE_EVENT_CLEANUP
+#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
+
+#define IPIPE_TIMER_IRQ IRQ_CORETMR
+
+#ifndef __ASSEMBLY__
+
+#include <linux/bitops.h>
+
+extern int test_bit(int nr, const void *addr);
+
+
+extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */
+
+static inline void __ipipe_stall_root(void)
+{
+ volatile unsigned long *p = &__ipipe_root_status;
+ set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_and_stall_root(void)
+{
+ volatile unsigned long *p = &__ipipe_root_status;
+ return test_and_set_bit(0, p);
+}
+
+static inline unsigned long __ipipe_test_root(void)
+{
+ const unsigned long *p = &__ipipe_root_status;
+ return test_bit(0, p);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */
#include <asm/pda.h>
#include <asm/processor.h>
-static __inline__ int irq_canonicalize(int irq)
+#ifdef CONFIG_SMP
+/* Forward decl needed due to cdef inter dependencies */
+static inline uint32_t __pure bfin_dspid(void);
+# define blackfin_core_id() (bfin_dspid() & 0xff)
+# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
+#else
+extern unsigned long bfin_irq_flags;
+#endif
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_trace.h>
+
+void __ipipe_unstall_root(void);
+
+void __ipipe_restore_root(unsigned long flags);
+
+#ifdef CONFIG_DEBUG_HWERR
+# define __all_masked_irq_flags 0x3f
+# define __save_and_cli_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %1;" \
+ : "=&d"(x) \
+ : "d" (0x3F) \
+ )
+#else
+# define __all_masked_irq_flags 0x1f
+# define __save_and_cli_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ : "=&d"(x) \
+ )
+#endif
+
+#define irqs_enabled_from_flags_hw(x) ((x) != __all_masked_irq_flags)
+#define raw_irqs_disabled_flags(flags) (!irqs_enabled_from_flags_hw(flags))
+#define local_test_iflag_hw(x) irqs_enabled_from_flags_hw(x)
+
+#define local_save_flags(x) \
+ do { \
+ (x) = __ipipe_test_root() ? \
+ __all_masked_irq_flags : bfin_irq_flags; \
+ } while (0)
+
+#define local_irq_save(x) \
+ do { \
+ (x) = __ipipe_test_and_stall_root(); \
+ } while (0)
+
+#define local_irq_restore(x) __ipipe_restore_root(x)
+#define local_irq_disable() __ipipe_stall_root()
+#define local_irq_enable() __ipipe_unstall_root()
+#define irqs_disabled() __ipipe_test_root()
+
+#define local_save_flags_hw(x) \
+ __asm__ __volatile__( \
+ "cli %0;" \
+ "sti %0;" \
+ : "=d"(x) \
+ )
+
+#define irqs_disabled_hw() \
+ ({ \
+ unsigned long flags; \
+ local_save_flags_hw(flags); \
+ !irqs_enabled_from_flags_hw(flags); \
+ })
+
+static inline unsigned long raw_mangle_irq_bits(int virt, unsigned long real)
{
- return irq;
+ /* Merge virtual and real interrupt mask bits into a single
+ 32bit word. */
+ return (real & ~(1 << 31)) | ((virt != 0) << 31);
}
+static inline int raw_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 31)) != 0;
+ *x &= ~(1L << 31);
+ return virt;
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+
+#define local_irq_disable_hw() \
+ do { \
+ int _tmp_dummy; \
+ if (!irqs_disabled_hw()) \
+ ipipe_trace_begin(0x80000000); \
+ __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
+ } while (0)
+
+#define local_irq_enable_hw() \
+ do { \
+ if (irqs_disabled_hw()) \
+ ipipe_trace_end(0x80000000); \
+ __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags)); \
+ } while (0)
+
+#define local_irq_save_hw(x) \
+ do { \
+ __save_and_cli_hw(x); \
+ if (local_test_iflag_hw(x)) \
+ ipipe_trace_begin(0x80000001); \
+ } while (0)
+
+#define local_irq_restore_hw(x) \
+ do { \
+ if (local_test_iflag_hw(x)) { \
+ ipipe_trace_end(0x80000001); \
+ local_irq_enable_hw_notrace(); \
+ } \
+ } while (0)
+
+#define local_irq_disable_hw_notrace() \
+ do { \
+ int _tmp_dummy; \
+ __asm__ __volatile__ ("cli %0;" : "=d" (_tmp_dummy) : ); \
+ } while (0)
+
+#define local_irq_enable_hw_notrace() \
+ __asm__ __volatile__( \
+ "sti %0;" \
+ : \
+ : "d"(bfin_irq_flags) \
+ )
+
+#define local_irq_save_hw_notrace(x) __save_and_cli_hw(x)
+
+#define local_irq_restore_hw_notrace(x) \
+ do { \
+ if (local_test_iflag_hw(x)) \
+ local_irq_enable_hw_notrace(); \
+ } while (0)
+
+#else /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define local_irq_enable_hw() \
+ __asm__ __volatile__( \
+ "sti %0;" \
+ : \
+ : "d"(bfin_irq_flags) \
+ )
+
+#define local_irq_disable_hw() \
+ do { \
+ int _tmp_dummy; \
+ __asm__ __volatile__ ( \
+ "cli %0;" \
+ : "=d" (_tmp_dummy)); \
+ } while (0)
+
+#define local_irq_restore_hw(x) \
+ do { \
+ if (irqs_enabled_from_flags_hw(x)) \
+ local_irq_enable_hw(); \
+ } while (0)
+
+#define local_irq_save_hw(x) __save_and_cli_hw(x)
+
+#define local_irq_disable_hw_notrace() local_irq_disable_hw()
+#define local_irq_enable_hw_notrace() local_irq_enable_hw()
+#define local_irq_save_hw_notrace(x) local_irq_save_hw(x)
+#define local_irq_restore_hw_notrace(x) local_irq_restore_hw(x)
+
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#else /* !CONFIG_IPIPE */
+
/*
* Interrupt configuring macros.
*/
); \
} while (0)
-#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
-# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
-#else
-# define NOP_PAD_ANOMALY_05000244
-#endif
-
-#ifdef CONFIG_SMP
-/* Forward decl needed due to cdef inter dependencies */
-static inline uint32_t __pure bfin_dspid(void);
-# define blackfin_core_id() (bfin_dspid() & 0xff)
-# define bfin_irq_flags cpu_pda[blackfin_core_id()].imask
-#else
-extern unsigned long bfin_irq_flags;
-#endif
-
#define local_irq_enable() \
__asm__ __volatile__( \
"sti %0;" \
: "d" (bfin_irq_flags) \
)
-#define idle_with_irq_disabled() \
- __asm__ __volatile__( \
- NOP_PAD_ANOMALY_05000244 \
- ".align 8;" \
- "sti %0;" \
- "idle;" \
- : \
- : "d" (bfin_irq_flags) \
- )
-
#ifdef CONFIG_DEBUG_HWERR
# define __save_and_cli(x) \
__asm__ __volatile__( \
!irqs_enabled_from_flags(flags); \
})
+#define local_irq_save_hw(x) local_irq_save(x)
+#define local_irq_restore_hw(x) local_irq_restore(x)
+#define local_irq_enable_hw() local_irq_enable()
+#define local_irq_disable_hw() local_irq_disable()
+#define irqs_disabled_hw() irqs_disabled()
+
+#endif /* !CONFIG_IPIPE */
+
+#if ANOMALY_05000244 && defined(CONFIG_BFIN_ICACHE)
+# define NOP_PAD_ANOMALY_05000244 "nop; nop;"
+#else
+# define NOP_PAD_ANOMALY_05000244
+#endif
+
+#define idle_with_irq_disabled() \
+ __asm__ __volatile__( \
+ NOP_PAD_ANOMALY_05000244 \
+ ".align 8;" \
+ "sti %0;" \
+ "idle;" \
+ : \
+ : "d" (bfin_irq_flags) \
+ )
+
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
#endif /* _BFIN_IRQ_H_ */
unsigned long tmp = 0;
unsigned long flags = 0;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
switch (size) {
case 1:
: "=&d" (tmp) : "d" (x), "m" (*__xg(ptr)) : "memory");
break;
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return tmp;
}
obj-y += time.o
endif
+obj-$(CONFIG_IPIPE) += ipipe.o
+obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
obj-$(CONFIG_MODULES) += module.o
void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
if (arg) \
gpio_bankb[gpio_bank(gpio)]->name |= gpio_bit(gpio); \
else \
gpio_bankb[gpio_bank(gpio)]->name &= ~gpio_bit(gpio); \
AWA_DUMMY_READ(name); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
} \
EXPORT_SYMBOL(set_gpio_ ## name);
void set_gpio_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
if (arg) \
gpio_bankb[gpio_bank(gpio)]->name ## _set = gpio_bit(gpio); \
else \
gpio_bankb[gpio_bank(gpio)]->name ## _clear = gpio_bit(gpio); \
AWA_DUMMY_READ(name); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
} \
EXPORT_SYMBOL(set_gpio_ ## name);
#else
void set_gpio_toggle(unsigned gpio)
{
unsigned long flags;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_bankb[gpio_bank(gpio)]->toggle = gpio_bit(gpio);
AWA_DUMMY_READ(toggle);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#else
void set_gpio_toggle(unsigned gpio)
void set_gpiop_ ## name(unsigned gpio, unsigned short arg) \
{ \
unsigned long flags; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
gpio_bankb[gpio_bank(gpio)]->name = arg; \
AWA_DUMMY_READ(name); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
} \
EXPORT_SYMBOL(set_gpiop_ ## name);
#else
{ \
unsigned long flags; \
unsigned short ret; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
ret = 0x01 & (gpio_bankb[gpio_bank(gpio)]->name >> gpio_sub_n(gpio)); \
AWA_DUMMY_READ(name); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
return ret; \
} \
EXPORT_SYMBOL(get_gpio_ ## name);
{ \
unsigned long flags; \
unsigned short ret; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
ret = (gpio_bankb[gpio_bank(gpio)]->name); \
AWA_DUMMY_READ(name); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
return ret; \
} \
EXPORT_SYMBOL(get_gpiop_ ## name);
if ((check_gpio(gpio) < 0) || !type)
return -EINVAL;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio);
wakeup_flags_map[gpio] = type;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
if (check_gpio(gpio) < 0)
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
EXPORT_SYMBOL(gpio_pm_wakeup_free);
if (!(per & P_DEFINED))
return -ENODEV;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* If a pin can be muxed as either GPIO or peripheral, make
* sure it is not already a GPIO pin when we request it.
printk(KERN_ERR
"%s: Peripheral %d is already reserved as GPIO by %s !\n",
__func__, ident, get_label(ident));
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
printk(KERN_ERR
"%s: Peripheral %d function %d is already reserved by %s !\n",
__func__, ident, P_FUNCT2MUX(per), get_label(ident));
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
}
#endif
port_setup(ident, PERIPHERAL_USAGE);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
set_label(ident, label);
return 0;
if (check_gpio(ident) < 0)
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) {
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return;
}
set_label(ident, "free");
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
EXPORT_SYMBOL(peripheral_free);
if (check_gpio(gpio) < 0)
return -EINVAL;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/*
* Allow that the identical GPIO can
*/
if (cmp_label(gpio, label) == 0) {
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
dump_stack();
printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
gpio, get_label(gpio));
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))
reserved_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
set_label(gpio, label);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
port_setup(gpio, GPIO_USAGE);
if (check_gpio(gpio) < 0)
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
dump_stack();
gpio_error(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return;
}
set_label(gpio, "free");
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
EXPORT_SYMBOL(bfin_gpio_free);
if (check_gpio(gpio) < 0)
return -EINVAL;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
dump_stack();
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as gpio-irq !\n",
gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
printk(KERN_ERR
"bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
gpio, get_label(gpio));
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return -EBUSY;
}
if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))
reserved_gpio_irq_map[gpio_bank(gpio)] |= gpio_bit(gpio);
set_label(gpio, label);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
port_setup(gpio, GPIO_USAGE);
if (check_gpio(gpio) < 0)
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (unlikely(!(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
dump_stack();
gpio_error(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return;
}
set_label(gpio, "free");
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
return -EINVAL;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
return -EINVAL;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_array[gpio_bank(gpio)]->port_inen &= ~gpio_bit(gpio);
gpio_set_value(gpio, value);
gpio_array[gpio_bank(gpio)]->port_dir_set = gpio_bit(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
port_setup(gpio, GPIO_USAGE);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio);
gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#else
int ret;
if (unlikely(get_gpio_edge(gpio))) {
- local_irq_save(flags);
+ local_irq_save_hw(flags);
set_gpio_edge(gpio, 0);
ret = get_gpio_data(gpio);
set_gpio_edge(gpio, 1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return ret;
} else
return -EINVAL;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_bankb[gpio_bank(gpio)]->dir &= ~gpio_bit(gpio);
gpio_bankb[gpio_bank(gpio)]->inen |= gpio_bit(gpio);
AWA_DUMMY_READ(inen);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
return -EINVAL;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
gpio_bankb[gpio_bank(gpio)]->inen &= ~gpio_bit(gpio);
if (value)
gpio_bankb[gpio_bank(gpio)]->dir |= gpio_bit(gpio);
AWA_DUMMY_READ(dir);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
nr_cplb_flush[cpu]++;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
disable_icplb();
for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
icplb_tbl[cpu][i].data = 0;
bfin_write32(DCPLB_DATA0 + i * 4, 0);
}
enable_dcplb();
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
return;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
current_rwx_mask[cpu] = masks;
d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
addr += PAGE_SIZE;
}
enable_dcplb();
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif
ENTRY(_ret_from_fork)
+#ifdef CONFIG_IPIPE
+ [--sp] = reti; /* IRQs on. */
+ SP += 4;
+#endif /* CONFIG_IPIPE */
SP += -12;
call _schedule_tail;
SP += 12;
--- /dev/null
+/* -*- linux-c -*-
+ * linux/arch/blackfin/kernel/ipipe.c
+ *
+ * Copyright (C) 2005-2007 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-pipe support for the Blackfin.
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/kthread.h>
+#include <asm/unistd.h>
+#include <asm/system.h>
+#include <asm/atomic.h>
+#include <asm/io.h>
+
+static int create_irq_threads;
+
+DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
+
+static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
+
+static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
+
+asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
+
+static void __ipipe_no_irqtail(void);
+
+unsigned long __ipipe_irq_tail_hook = (unsigned long)&__ipipe_no_irqtail;
+EXPORT_SYMBOL(__ipipe_irq_tail_hook);
+
+unsigned long __ipipe_core_clock;
+EXPORT_SYMBOL(__ipipe_core_clock);
+
+unsigned long __ipipe_freq_scale;
+EXPORT_SYMBOL(__ipipe_freq_scale);
+
+atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
+
+unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags;
+EXPORT_SYMBOL(__ipipe_irq_lvmask);
+
+static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
+{
+ desc->ipipe_ack(irq, desc);
+}
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+ unsigned irq;
+
+ __ipipe_core_clock = get_cclk(); /* Fetch this once. */
+ __ipipe_freq_scale = 1000000000UL / __ipipe_core_clock;
+
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ ipipe_virtualize_irq(ipipe_root_domain,
+ irq,
+ (ipipe_irq_handler_t)&asm_do_IRQ,
+ NULL,
+ &__ipipe_ack_irq,
+ IPIPE_HANDLE_MASK | IPIPE_PASS_MASK);
+}
+
+/*
+ * __ipipe_handle_irq() -- IPIPE's generic IRQ handler. An optimistic
+ * interrupt protection log is maintained here for each domain. Hw
+ * interrupts are masked on entry.
+ */
+void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
+{
+ struct ipipe_domain *this_domain, *next_domain;
+ struct list_head *head, *pos;
+ int m_ack, s = -1;
+
+ /*
+ * Software-triggered IRQs do not need any ack. The contents
+ * of the register frame should only be used when processing
+ * the timer interrupt, but not for handling any other
+ * interrupt.
+ */
+ m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
+
+ this_domain = ipipe_current_domain;
+
+ if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
+ head = &this_domain->p_link;
+ else {
+ head = __ipipe_pipeline.next;
+ next_domain = list_entry(head, struct ipipe_domain, p_link);
+ if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
+ if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
+ next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+ if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
+ s = __test_and_set_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_cpudom_var(status));
+ __ipipe_dispatch_wired(next_domain, irq);
+ goto finalize;
+ return;
+ }
+ }
+
+ /* Ack the interrupt. */
+
+ pos = head;
+
+ while (pos != &__ipipe_pipeline) {
+ next_domain = list_entry(pos, struct ipipe_domain, p_link);
+ /*
+ * For each domain handling the incoming IRQ, mark it
+ * as pending in its log.
+ */
+ if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
+ /*
+ * Domains that handle this IRQ are polled for
+ * acknowledging it by decreasing priority
+ * order. The interrupt must be made pending
+ * _first_ in the domain's status flags before
+ * the PIC is unlocked.
+ */
+ __ipipe_set_irq_pending(next_domain, irq);
+
+ if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
+ next_domain->irqs[irq].acknowledge(irq, irq_desc + irq);
+ m_ack = 1;
+ }
+ }
+
+ /*
+ * If the domain does not want the IRQ to be passed
+ * down the interrupt pipe, exit the loop now.
+ */
+ if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
+ break;
+
+ pos = next_domain->p_link.next;
+ }
+
+ /*
+ * Now walk the pipeline, yielding control to the highest
+ * priority domain that has pending interrupt(s) or
+ * immediately to the current domain if the interrupt has been
+ * marked as 'sticky'. This search does not go beyond the
+ * current domain in the pipeline. We also enforce the
+ * additional root stage lock (blackfin-specific). */
+
+ if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags))
+ s = __test_and_set_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_cpudom_var(status));
+finalize:
+
+ __ipipe_walk_pipeline(head);
+
+ if (!s)
+ __clear_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_cpudom_var(status));
+}
+
+int __ipipe_check_root(void)
+{
+ return ipipe_root_domain_p;
+}
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ int prio = desc->ic_prio;
+
+ desc->depth = 0;
+ if (ipd != &ipipe_root &&
+ atomic_inc_return(&__ipipe_irq_lvdepth[prio]) == 1)
+ __set_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_enable_irqdesc);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ int prio = desc->ic_prio;
+
+ if (ipd != &ipipe_root &&
+ atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
+ __clear_bit(prio, &__ipipe_irq_lvmask);
+}
+EXPORT_SYMBOL(__ipipe_disable_irqdesc);
+
+void __ipipe_stall_root_raw(void)
+{
+ /*
+ * This code is called by the ins{bwl} routines (see
+ * arch/blackfin/lib/ins.S), which are heavily used by the
+ * network stack. It masks all interrupts but those handled by
+ * non-root domains, so that we keep decent network transfer
+ * rates for Linux without inducing pathological jitter for
+ * the real-time domain.
+ */
+ __asm__ __volatile__ ("sti %0;" : : "d"(__ipipe_irq_lvmask));
+
+ __set_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_cpudom_var(status));
+}
+
+void __ipipe_unstall_root_raw(void)
+{
+ __clear_bit(IPIPE_STALL_FLAG,
+ &ipipe_root_cpudom_var(status));
+
+ __asm__ __volatile__ ("sti %0;" : : "d"(bfin_irq_flags));
+}
+
+int __ipipe_syscall_root(struct pt_regs *regs)
+{
+ unsigned long flags;
+
+ /* We need to run the IRQ tail hook whenever we don't
+ * propagate a syscall to higher domains, because we know that
+ * important operations might be pending there (e.g. Xenomai
+ * deferred rescheduling). */
+
+ if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) {
+ void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
+ hook();
+ return 0;
+ }
+
+ /*
+ * This routine either returns:
+ * 0 -- if the syscall is to be passed to Linux;
+ * 1 -- if the syscall should not be passed to Linux, and no
+ * tail work should be performed;
+ * -1 -- if the syscall should not be passed to Linux but the
+ * tail work has to be performed (for handling signals etc).
+ */
+
+ if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) &&
+ __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) {
+ if (ipipe_root_domain_p && !in_atomic()) {
+ /*
+ * Sync pending VIRQs before _TIF_NEED_RESCHED
+ * is tested.
+ */
+ local_irq_save_hw(flags);
+ if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0)
+ __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
+ local_irq_restore_hw(flags);
+ return -1;
+ }
+ return 1;
+ }
+
+ return 0;
+}
+
+unsigned long ipipe_critical_enter(void (*syncfn) (void))
+{
+ unsigned long flags;
+
+ local_irq_save_hw(flags);
+
+ return flags;
+}
+
+void ipipe_critical_exit(unsigned long flags)
+{
+ local_irq_restore_hw(flags);
+}
+
+static void __ipipe_no_irqtail(void)
+{
+}
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+ info->ncpus = num_online_cpus();
+ info->cpufreq = ipipe_cpu_freq();
+ info->archdep.tmirq = IPIPE_TIMER_IRQ;
+ info->archdep.tmfreq = info->cpufreq;
+
+ return 0;
+}
+
+/*
+ * ipipe_trigger_irq() -- Push the interrupt at front of the pipeline
+ * just like if it has been actually received from a hw source. Also
+ * works for virtual interrupts.
+ */
+int ipipe_trigger_irq(unsigned irq)
+{
+ unsigned long flags;
+
+ if (irq >= IPIPE_NR_IRQS ||
+ (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
+ return -EINVAL;
+
+ local_irq_save_hw(flags);
+
+ __ipipe_handle_irq(irq, NULL);
+
+ local_irq_restore_hw(flags);
+
+ return 1;
+}
+
+/* Move Linux IRQ to threads. */
+
+static int do_irqd(void *__desc)
+{
+ struct irq_desc *desc = __desc;
+ unsigned irq = desc - irq_desc;
+ int thrprio = desc->thr_prio;
+ int thrmask = 1 << thrprio;
+ int cpu = smp_processor_id();
+ cpumask_t cpumask;
+
+ sigfillset(¤t->blocked);
+ current->flags |= PF_NOFREEZE;
+ cpumask = cpumask_of_cpu(cpu);
+ set_cpus_allowed(current, cpumask);
+ ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
+
+ while (!kthread_should_stop()) {
+ local_irq_disable();
+ if (!(desc->status & IRQ_SCHEDULED)) {
+ set_current_state(TASK_INTERRUPTIBLE);
+resched:
+ local_irq_enable();
+ schedule();
+ local_irq_disable();
+ }
+ __set_current_state(TASK_RUNNING);
+ /*
+ * If higher priority interrupt servers are ready to
+ * run, reschedule immediately. We need this for the
+ * GPIO demux IRQ handler to unmask the interrupt line
+ * _last_, after all GPIO IRQs have run.
+ */
+ if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
+ goto resched;
+ if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
+ per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
+ desc->status &= ~IRQ_SCHEDULED;
+ desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
+ local_irq_enable();
+ }
+ __set_current_state(TASK_RUNNING);
+ return 0;
+}
+
+static void kick_irqd(unsigned irq, void *cookie)
+{
+ struct irq_desc *desc = irq_desc + irq;
+ int thrprio = desc->thr_prio;
+ int thrmask = 1 << thrprio;
+ int cpu = smp_processor_id();
+
+ if (!(desc->status & IRQ_SCHEDULED)) {
+ desc->status |= IRQ_SCHEDULED;
+ per_cpu(pending_irqthread_mask, cpu) |= thrmask;
+ ++per_cpu(pending_irq_count[thrprio], cpu);
+ wake_up_process(desc->thread);
+ }
+}
+
+int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc)
+{
+ if (desc->thread || !create_irq_threads)
+ return 0;
+
+ desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
+ if (desc->thread == NULL) {
+ printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
+ return -ENOMEM;
+ }
+
+ wake_up_process(desc->thread);
+
+ desc->thr_handler = ipipe_root_domain->irqs[irq].handler;
+ ipipe_root_domain->irqs[irq].handler = &kick_irqd;
+
+ return 0;
+}
+
+void __init ipipe_init_irq_threads(void)
+{
+ unsigned irq;
+ struct irq_desc *desc;
+
+ create_irq_threads = 1;
+
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ desc = irq_desc + irq;
+ if (desc->action != NULL ||
+ (desc->status & IRQ_NOREQUEST) != 0)
+ ipipe_start_irq_thread(irq, desc);
+ }
+}
+
+EXPORT_SYMBOL(show_stack);
+
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+void notrace _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
{
struct pt_regs *old_regs;
struct irq_desc *desc = irq_desc + irq;
+#ifndef CONFIG_IPIPE
unsigned short pending, other_ints;
-
+#endif
old_regs = set_irq_regs(regs);
/*
#endif
generic_handle_irq(irq);
+#ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */
/* If we're the only interrupt running (ignoring IRQ15 which is for
syscalls), lower our priority to IRQ14 so that softirqs run at
that level. If there's another, lower-level interrupt, irq_exit
other_ints = pending & (pending - 1);
if (other_ints == 0)
lower_to_irq14();
+#endif /* !CONFIG_IPIPE */
irq_exit();
set_irq_regs(old_regs);
--- /dev/null
+/*
+ * linux/arch/blackfin/mcount.S
+ *
+ * Copyright (C) 2006 Analog Devices Inc.
+ *
+ * 2007/04/12 Save index, length, modify and base registers. --rpm
+ */
+
+#include <linux/linkage.h>
+#include <asm/blackfin.h>
+
+.text
+
+.align 4 /* just in case */
+
+ENTRY(__mcount)
+ [--sp] = i0;
+ [--sp] = i1;
+ [--sp] = i2;
+ [--sp] = i3;
+ [--sp] = l0;
+ [--sp] = l1;
+ [--sp] = l2;
+ [--sp] = l3;
+ [--sp] = m0;
+ [--sp] = m1;
+ [--sp] = m2;
+ [--sp] = m3;
+ [--sp] = b0;
+ [--sp] = b1;
+ [--sp] = b2;
+ [--sp] = b3;
+ [--sp] = ( r7:0, p5:0 );
+ [--sp] = ASTAT;
+
+ p1.L = _ipipe_trace_enable;
+ p1.H = _ipipe_trace_enable;
+ r7 = [p1];
+ CC = r7 == 0;
+ if CC jump out;
+ link 0x10;
+ r0 = 0x0;
+ [sp + 0xc] = r0; /* v */
+ r0 = 0x0; /* type: IPIPE_TRACE_FN */
+ r1 = rets;
+ p0 = [fp]; /* p0: Prior FP */
+ r2 = [p0 + 4]; /* r2: Prior RETS */
+ call ___ipipe_trace;
+ unlink;
+out:
+ ASTAT = [sp++];
+ ( r7:0, p5:0 ) = [sp++];
+ b3 = [sp++];
+ b2 = [sp++];
+ b1 = [sp++];
+ b0 = [sp++];
+ m3 = [sp++];
+ m2 = [sp++];
+ m1 = [sp++];
+ m0 = [sp++];
+ l3 = [sp++];
+ l2 = [sp++];
+ l1 = [sp++];
+ l0 = [sp++];
+ i3 = [sp++];
+ i2 = [sp++];
+ i1 = [sp++];
+ i0 = [sp++];
+ rts;
+ENDPROC(__mcount)
*/
static void default_idle(void)
{
- local_irq_disable();
+#ifdef CONFIG_IPIPE
+ ipipe_suspend_domain();
+#endif
+ local_irq_disable_hw();
if (!need_resched())
idle_with_irq_disabled();
- local_irq_enable();
+ local_irq_enable_hw();
}
/*
#endif
};
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
void __init setup_system_timer0(void)
{
/* Power down the core timer, just to play safe. */
static void __init
time_sched_init(irqreturn_t(*timer_routine) (int, void *))
{
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
setup_system_timer0();
bfin_timer_irq.handler = timer_routine;
setup_irq(IRQ_TIMER0, &bfin_timer_irq);
unsigned long offset;
unsigned long clocks_per_jiffy;
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
offset = bfin_read_TIMER0_COUNTER() / \
(((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
static long last_rtc_update;
write_seqlock(&xtime_lock);
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
+/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */
if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
#endif
do_timer(1);
/* Do it again in 60s. */
last_rtc_update = xtime.tv_sec - 600;
}
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
set_gptimer_status(0, TIMER_STATUS_TIMIL0);
}
#endif
write_sequnlock(&xtime_lock);
+#ifdef CONFIG_IPIPE
+ update_root_process_times(get_irq_regs());
+#else
update_process_times(user_mode(get_irq_regs()));
+#endif
profile_tick(CPU_PROFILING);
return IRQ_HANDLED;
}
}
- info.si_signo = sig;
- info.si_errno = 0;
- info.si_addr = (void __user *)fp->pc;
- force_sig_info(sig, &info, current);
+#ifdef CONFIG_IPIPE
+ if (!ipipe_trap_notify(fp->seqstat & 0x3f, fp))
+#endif
+ {
+ info.si_signo = sig;
+ info.si_errno = 0;
+ info.si_addr = (void __user *)fp->pc;
+ force_sig_info(sig, &info, current);
+ }
trace_buffer_restore(j);
return;
ENTRY(_insl)
#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
cli R3;
+#endif
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
SSYNC;
[P1++] = R0;
NOP;
.Llong_loop_e: NOP;
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
sti R3;
+#endif
RTS;
#else
P0 = R0; /* P0 = port */
SSYNC;
LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2;
.Llong_loop_s:
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
CLI R3;
+#endif
NOP; NOP; NOP;
R0 = [P0];
[P1++] = R0;
.Llong_loop_e:
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
STI R3;
-
+#endif
RTS;
#endif
ENDPROC(_insl)
ENTRY(_insw)
#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
cli R3;
+#endif
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
SSYNC;
W[P1++] = R0;
NOP;
.Lword_loop_e: NOP;
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
sti R3;
+#endif
RTS;
#else
P0 = R0; /* P0 = port */
SSYNC;
LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2;
.Lword_loop_s:
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
CLI R3;
+#endif
NOP; NOP; NOP;
R0 = W[P0];
W[P1++] = R0;
.Lword_loop_e:
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
STI R3;
+#endif
RTS;
#endif
ENTRY(_insw_8)
#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
cli R3;
+#endif
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
SSYNC;
B[P1++] = R0;
NOP;
.Lword8_loop_e: NOP;
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
sti R3;
+#endif
RTS;
#else
P0 = R0; /* P0 = port */
SSYNC;
LSETUP( .Lword8_loop_s, .Lword8_loop_e) LC0 = P2;
.Lword8_loop_s:
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
CLI R3;
+#endif
NOP; NOP; NOP;
R0 = W[P0];
B[P1++] = R0;
B[P1++] = R0;
NOP;
.Lword8_loop_e:
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
STI R3;
-
+#endif
RTS;
#endif
ENDPROC(_insw_8)
ENTRY(_insb)
#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
cli R3;
+#endif
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
SSYNC;
B[P1++] = R0;
NOP;
.Lbyte_loop_e: NOP;
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
sti R3;
+#endif
RTS;
#else
P0 = R0; /* P0 = port */
SSYNC;
LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2;
.Lbyte_loop_s:
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
CLI R3;
+#endif
NOP; NOP; NOP;
R0 = B[P0];
B[P1++] = R0;
.Lbyte_loop_e:
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
STI R3;
-
+#endif
RTS;
#endif
ENDPROC(_insb)
ENTRY(_insl_16)
#ifdef CONFIG_BFIN_INS_LOWOVERHEAD
P0 = R0; /* P0 = port */
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
cli R3;
+#endif
P1 = R1; /* P1 = address */
P2 = R2; /* P2 = count */
SSYNC;
W[P1++] = R0;
NOP;
.Llong16_loop_e: NOP;
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
sti R3;
+#endif
RTS;
#else
P0 = R0; /* P0 = port */
SSYNC;
LSETUP( .Llong16_loop_s, .Llong16_loop_e) LC0 = P2;
.Llong16_loop_s:
+#ifdef CONFIG_IPIPE
+ [--sp] = rets
+ [--sp] = (P5:0);
+ sp += -12
+ call ___ipipe_stall_root_raw
+ sp += 12
+ (P5:0) = [sp++];
+#else
CLI R3;
+#endif
NOP; NOP; NOP;
R0 = [P0];
W[P1++] = R0;
R0 = R0 >> 16;
W[P1++] = R0;
.Llong16_loop_e:
+#ifdef CONFIG_IPIPE
+ sp += -12
+ call ___ipipe_unstall_root_raw
+ sp += 12
+ rets = [sp++]
+#else
STI R3;
+#endif
RTS;
#endif
ENDPROC(_insl_16)
config IRQ_PORTH_INTB
int "IRQ_PORTH_INTB"
default 11
-config IRQ_TMR0
- int "IRQ_TMR0"
- default 12
-config IRQ_TMR1
- int "IRQ_TMR1"
+config IRQ_TIMER0
+ int "IRQ_TIMER0"
+ default 8
+config IRQ_TIMER1
+ int "IRQ_TIMER1"
default 12
-config IRQ_TMR2
- int "IRQ_TMR2"
+config IRQ_TIMER2
+ int "IRQ_TIMER2"
default 12
-config IRQ_TMR3
- int "IRQ_TMR3"
+config IRQ_TIMER3
+ int "IRQ_TIMER3"
default 12
-config IRQ_TMR4
- int "IRQ_TMR4"
+config IRQ_TIMER4
+ int "IRQ_TIMER4"
default 12
-config IRQ_TMR5
- int "IRQ_TMR5"
+config IRQ_TIMER5
+ int "IRQ_TIMER5"
default 12
-config IRQ_TMR6
- int "IRQ_TMR6"
+config IRQ_TIMER6
+ int "IRQ_TIMER6"
default 12
-config IRQ_TMR7
- int "IRQ_TMR7"
+config IRQ_TIMER7
+ int "IRQ_TIMER7"
default 12
config IRQ_PORTG_INTA
int "IRQ_PORTG_INTA"
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF52X_H */
#define IRQ_PORTH_INTA BFIN_IRQ(29) /* Port H Interrupt A */
#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX) */
#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
-#define IRQ_TMR0 BFIN_IRQ(32) /* Timer 0 */
-#define IRQ_TMR1 BFIN_IRQ(33) /* Timer 1 */
-#define IRQ_TMR2 BFIN_IRQ(34) /* Timer 2 */
-#define IRQ_TMR3 BFIN_IRQ(35) /* Timer 3 */
-#define IRQ_TMR4 BFIN_IRQ(36) /* Timer 4 */
-#define IRQ_TMR5 BFIN_IRQ(37) /* Timer 5 */
-#define IRQ_TMR6 BFIN_IRQ(38) /* Timer 6 */
-#define IRQ_TMR7 BFIN_IRQ(39) /* Timer 7 */
+#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */
+#define IRQ_TIMER1 BFIN_IRQ(33) /* Timer 1 */
+#define IRQ_TIMER2 BFIN_IRQ(34) /* Timer 2 */
+#define IRQ_TIMER3 BFIN_IRQ(35) /* Timer 3 */
+#define IRQ_TIMER4 BFIN_IRQ(36) /* Timer 4 */
+#define IRQ_TIMER5 BFIN_IRQ(37) /* Timer 5 */
+#define IRQ_TIMER6 BFIN_IRQ(38) /* Timer 6 */
+#define IRQ_TIMER7 BFIN_IRQ(39) /* Timer 7 */
#define IRQ_PORTG_INTA BFIN_IRQ(40) /* Port G Interrupt A */
#define IRQ_PORTG_INTB BFIN_IRQ(41) /* Port G Interrupt B */
#define IRQ_MEM_DMA0 BFIN_IRQ(42) /* MDMA Stream 0 */
#define IRQ_PORTH_INTB_POS 28
/* IAR4 BIT FIELDS */
-#define IRQ_TMR0_POS 0
-#define IRQ_TMR1_POS 4
-#define IRQ_TMR2_POS 8
-#define IRQ_TMR3_POS 12
-#define IRQ_TMR4_POS 16
-#define IRQ_TMR5_POS 20
-#define IRQ_TMR6_POS 24
-#define IRQ_TMR7_POS 28
+#define IRQ_TIMER0_POS 0
+#define IRQ_TIMER1_POS 4
+#define IRQ_TIMER2_POS 8
+#define IRQ_TIMER3_POS 12
+#define IRQ_TIMER4_POS 16
+#define IRQ_TIMER5_POS 20
+#define IRQ_TIMER6_POS 24
+#define IRQ_TIMER7_POS 28
/* IAR5 BIT FIELDS */
#define IRQ_PORTG_INTA_POS 0
((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
((CONFIG_IRQ_PORTH_INTB - 7) << IRQ_PORTH_INTB_POS));
- bfin_write_SIC_IAR4(((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
- ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
- ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
- ((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) |
- ((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS) |
- ((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) |
- ((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) |
- ((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS));
+ bfin_write_SIC_IAR4(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+ ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+ ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+ ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+ ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) |
+ ((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+ ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+ ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS));
bfin_write_SIC_IAR5(((CONFIG_IRQ_PORTG_INTA - 7) << IRQ_PORTG_INTA_POS) |
((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
config IRQ_PORTH_INTB
int "IRQ_PORTH_INTB"
default 11
-config IRQ_TMR0
- int "IRQ_TMR0"
- default 12
-config IRQ_TMR1
- int "IRQ_TMR1"
+config IRQ_TIMER0
+ int "IRQ_TIMER0"
+ default 8
+config IRQ_TIMER1
+ int "IRQ_TIMER1"
default 12
-config IRQ_TMR2
- int "IRQ_TMR2"
+config IRQ_TIMER2
+ int "IRQ_TIMER2"
default 12
-config IRQ_TMR3
- int "IRQ_TMR3"
+config IRQ_TIMER3
+ int "IRQ_TIMER3"
default 12
-config IRQ_TMR4
- int "IRQ_TMR4"
+config IRQ_TIMER4
+ int "IRQ_TIMER4"
default 12
-config IRQ_TMR5
- int "IRQ_TMR5"
+config IRQ_TIMER5
+ int "IRQ_TIMER5"
default 12
-config IRQ_TMR6
- int "IRQ_TMR6"
+config IRQ_TIMER6
+ int "IRQ_TIMER6"
default 12
-config IRQ_TMR7
- int "IRQ_TMR7"
+config IRQ_TIMER7
+ int "IRQ_TIMER7"
default 12
config IRQ_PORTG_INTA
int "IRQ_PORTG_INTA"
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF52X_H */
#define IRQ_MAC_TX BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
#define IRQ_NFC BFIN_IRQ(30) /* DMA2 Channel (MAC TX/NAND) */
#define IRQ_PORTH_INTB BFIN_IRQ(31) /* Port H Interrupt B */
-#define IRQ_TMR0 BFIN_IRQ(32) /* Timer 0 */
-#define IRQ_TMR1 BFIN_IRQ(33) /* Timer 1 */
-#define IRQ_TMR2 BFIN_IRQ(34) /* Timer 2 */
-#define IRQ_TMR3 BFIN_IRQ(35) /* Timer 3 */
-#define IRQ_TMR4 BFIN_IRQ(36) /* Timer 4 */
-#define IRQ_TMR5 BFIN_IRQ(37) /* Timer 5 */
-#define IRQ_TMR6 BFIN_IRQ(38) /* Timer 6 */
-#define IRQ_TMR7 BFIN_IRQ(39) /* Timer 7 */
+#define IRQ_TIMER0 BFIN_IRQ(32) /* Timer 0 */
+#define IRQ_TIMER1 BFIN_IRQ(33) /* Timer 1 */
+#define IRQ_TIMER2 BFIN_IRQ(34) /* Timer 2 */
+#define IRQ_TIMER3 BFIN_IRQ(35) /* Timer 3 */
+#define IRQ_TIMER4 BFIN_IRQ(36) /* Timer 4 */
+#define IRQ_TIMER5 BFIN_IRQ(37) /* Timer 5 */
+#define IRQ_TIMER6 BFIN_IRQ(38) /* Timer 6 */
+#define IRQ_TIMER7 BFIN_IRQ(39) /* Timer 7 */
#define IRQ_PORTG_INTA BFIN_IRQ(40) /* Port G Interrupt A */
#define IRQ_PORTG_INTB BFIN_IRQ(41) /* Port G Interrupt B */
#define IRQ_MEM_DMA0 BFIN_IRQ(42) /* MDMA Stream 0 */
#define IRQ_PORTH_INTB_POS 28
/* IAR4 BIT FIELDS */
-#define IRQ_TMR0_POS 0
-#define IRQ_TMR1_POS 4
-#define IRQ_TMR2_POS 8
-#define IRQ_TMR3_POS 12
-#define IRQ_TMR4_POS 16
-#define IRQ_TMR5_POS 20
-#define IRQ_TMR6_POS 24
-#define IRQ_TMR7_POS 28
+#define IRQ_TIMER0_POS 0
+#define IRQ_TIMER1_POS 4
+#define IRQ_TIMER2_POS 8
+#define IRQ_TIMER3_POS 12
+#define IRQ_TIMER4_POS 16
+#define IRQ_TIMER5_POS 20
+#define IRQ_TIMER6_POS 24
+#define IRQ_TIMER7_POS 28
/* IAR5 BIT FIELDS */
#define IRQ_PORTG_INTA_POS 0
((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
((CONFIG_IRQ_PORTH_INTB - 7) << IRQ_PORTH_INTB_POS));
- bfin_write_SIC_IAR4(((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
- ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
- ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
- ((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) |
- ((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS) |
- ((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) |
- ((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) |
- ((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS));
+ bfin_write_SIC_IAR4(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+ ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+ ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+ ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+ ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS) |
+ ((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+ ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+ ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS));
bfin_write_SIC_IAR5(((CONFIG_IRQ_PORTG_INTA - 7) << IRQ_PORTG_INTA_POS) |
((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
default 10
config TIMER0
int "TIMER0"
- default 11
+ default 8
config TIMER1
int "TIMER1"
default 11
static inline void bfin_write_FIO_FLAG_##name(unsigned short val) \
{ \
unsigned long flags; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
bfin_write16(FIO_FLAG_##name, val); \
bfin_read_CHIPID(); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
}
BFIN_WRITE_FIO_FLAG(D)
BFIN_WRITE_FIO_FLAG(C)
{ \
unsigned long flags; \
u16 ret; \
- local_irq_save(flags); \
+ local_irq_save_hw(flags); \
ret = bfin_read16(FIO_FLAG_##name); \
bfin_read_CHIPID(); \
- local_irq_restore(flags); \
+ local_irq_restore_hw(flags); \
return ret; \
}
BFIN_READ_FIO_FLAG(D)
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr = bfin_read32(SIC_IWR);
/* Only allow PPL Wakeup) */
asm("IDLE;");
bfin_write32(SIC_IWR, iwr);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr = bfin_read32(SIC_IWR);
/* Only allow PPL Wakeup) */
asm("IDLE;");
bfin_write32(SIC_IWR, iwr);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF532_H */
#define IRQ_SPI 20 /*DMA5 Interrupt (SPI) */
#define IRQ_UART0_RX 21 /*DMA6 Interrupt (UART RX) */
#define IRQ_UART0_TX 22 /*DMA7 Interrupt (UART TX) */
-#define IRQ_TMR0 23 /*Timer 0 */
-#define IRQ_TMR1 24 /*Timer 1 */
-#define IRQ_TMR2 25 /*Timer 2 */
+#define IRQ_TIMER0 23 /*Timer 0 */
+#define IRQ_TIMER1 24 /*Timer 1 */
+#define IRQ_TIMER2 25 /*Timer 2 */
#define IRQ_PROG_INTA 26 /*Programmable Flags A (8) */
#define IRQ_PROG_INTB 27 /*Programmable Flags B (8) */
#define IRQ_MEM_DMA0 28 /*DMA8/9 Interrupt (Memory DMA Stream 0) */
config IRQ_MAC_TX
int "IRQ_MAC_TX"
default 11
-config IRQ_TMR0
- int "IRQ_TMR0"
- default 12
-config IRQ_TMR1
- int "IRQ_TMR1"
+config IRQ_TIMER0
+ int "IRQ_TIMER0"
+ default 8
+config IRQ_TIMER1
+ int "IRQ_TIMER1"
default 12
-config IRQ_TMR2
- int "IRQ_TMR2"
+config IRQ_TIMER2
+ int "IRQ_TIMER2"
default 12
-config IRQ_TMR3
- int "IRQ_TMR3"
+config IRQ_TIMER3
+ int "IRQ_TIMER3"
default 12
-config IRQ_TMR4
- int "IRQ_TMR4"
+config IRQ_TIMER4
+ int "IRQ_TIMER4"
default 12
-config IRQ_TMR5
- int "IRQ_TMR5"
+config IRQ_TIMER5
+ int "IRQ_TIMER5"
default 12
-config IRQ_TMR6
- int "IRQ_TMR6"
+config IRQ_TIMER6
+ int "IRQ_TIMER6"
default 12
-config IRQ_TMR7
- int "IRQ_TMR7"
+config IRQ_TIMER7
+ int "IRQ_TIMER7"
default 12
config IRQ_PROG_INTA
int "IRQ_PROG_INTA"
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr = bfin_read32(SIC_IWR);
/* Only allow PPL Wakeup) */
asm("IDLE;");
bfin_write32(SIC_IWR, iwr);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr = bfin_read32(SIC_IWR);
/* Only allow PPL Wakeup) */
asm("IDLE;");
bfin_write32(SIC_IWR, iwr);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF534_H */
#define IRQ_CAN_TX 23 /*CAN Transmit Interrupt */
#define IRQ_MAC_RX 24 /*DMA1 (Ethernet RX) Interrupt */
#define IRQ_MAC_TX 25 /*DMA2 (Ethernet TX) Interrupt */
-#define IRQ_TMR0 26 /*Timer 0 */
-#define IRQ_TMR1 27 /*Timer 1 */
-#define IRQ_TMR2 28 /*Timer 2 */
-#define IRQ_TMR3 29 /*Timer 3 */
-#define IRQ_TMR4 30 /*Timer 4 */
-#define IRQ_TMR5 31 /*Timer 5 */
-#define IRQ_TMR6 32 /*Timer 6 */
-#define IRQ_TMR7 33 /*Timer 7 */
+#define IRQ_TIMER0 26 /*Timer 0 */
+#define IRQ_TIMER1 27 /*Timer 1 */
+#define IRQ_TIMER2 28 /*Timer 2 */
+#define IRQ_TIMER3 29 /*Timer 3 */
+#define IRQ_TIMER4 30 /*Timer 4 */
+#define IRQ_TIMER5 31 /*Timer 5 */
+#define IRQ_TIMER6 32 /*Timer 6 */
+#define IRQ_TIMER7 33 /*Timer 7 */
#define IRQ_PROG_INTA 34 /* PF Ports F&G (PF15:0) Interrupt A */
#define IRQ_PORTG_INTB 35 /* PF Port G (PF15:0) Interrupt B */
#define IRQ_MEM_DMA0 36 /*(Memory DMA Stream 0) */
#define IRQ_CAN_TX_POS 0
#define IRQ_MAC_RX_POS 4
#define IRQ_MAC_TX_POS 8
-#define IRQ_TMR0_POS 12
-#define IRQ_TMR1_POS 16
-#define IRQ_TMR2_POS 20
-#define IRQ_TMR3_POS 24
-#define IRQ_TMR4_POS 28
+#define IRQ_TIMER0_POS 12
+#define IRQ_TIMER1_POS 16
+#define IRQ_TIMER2_POS 20
+#define IRQ_TIMER3_POS 24
+#define IRQ_TIMER4_POS 28
/* IAR3 BIT FIELDS*/
-#define IRQ_TMR5_POS 0
-#define IRQ_TMR6_POS 4
-#define IRQ_TMR7_POS 8
+#define IRQ_TIMER5_POS 0
+#define IRQ_TIMER6_POS 4
+#define IRQ_TIMER7_POS 8
#define IRQ_PROG_INTA_POS 12
#define IRQ_PORTG_INTB_POS 16
#define IRQ_MEM_DMA0_POS 20
bfin_write_SIC_IAR2(((CONFIG_IRQ_CAN_TX - 7) << IRQ_CAN_TX_POS) |
((CONFIG_IRQ_MAC_RX - 7) << IRQ_MAC_RX_POS) |
((CONFIG_IRQ_MAC_TX - 7) << IRQ_MAC_TX_POS) |
- ((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
- ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
- ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
- ((CONFIG_IRQ_TMR3 - 7) << IRQ_TMR3_POS) |
- ((CONFIG_IRQ_TMR4 - 7) << IRQ_TMR4_POS));
+ ((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+ ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+ ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
+ ((CONFIG_IRQ_TIMER3 - 7) << IRQ_TIMER3_POS) |
+ ((CONFIG_IRQ_TIMER4 - 7) << IRQ_TIMER4_POS));
- bfin_write_SIC_IAR3(((CONFIG_IRQ_TMR5 - 7) << IRQ_TMR5_POS) |
- ((CONFIG_IRQ_TMR6 - 7) << IRQ_TMR6_POS) |
- ((CONFIG_IRQ_TMR7 - 7) << IRQ_TMR7_POS) |
+ bfin_write_SIC_IAR3(((CONFIG_IRQ_TIMER5 - 7) << IRQ_TIMER5_POS) |
+ ((CONFIG_IRQ_TIMER6 - 7) << IRQ_TIMER6_POS) |
+ ((CONFIG_IRQ_TIMER7 - 7) << IRQ_TIMER7_POS) |
((CONFIG_IRQ_PROG_INTA - 7) << IRQ_PROG_INTA_POS) |
((CONFIG_IRQ_PORTG_INTB - 7) << IRQ_PORTG_INTB_POS) |
((CONFIG_IRQ_MEM_DMA0 - 7) << IRQ_MEM_DMA0_POS) |
config IRQ_UART0_TX
int "IRQ_UART0_TX"
default 10
-config IRQ_TMR0
- int "IRQ_TMR0"
- default 11
-config IRQ_TMR1
- int "IRQ_TMR1"
+config IRQ_TIMER0
+ int "IRQ_TIMER0"
+ default 8
+config IRQ_TIMER1
+ int "IRQ_TIMER1"
default 11
-config IRQ_TMR2
- int "IRQ_TMR2"
+config IRQ_TIMER2
+ int "IRQ_TIMER2"
default 11
config IRQ_PORTF_INTA
int "IRQ_PORTF_INTA"
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif
#define IRQ_SPI0 BFIN_IRQ(13) /* DMA 5 Channel (SPI0) */
#define IRQ_UART0_RX BFIN_IRQ(14) /* DMA 6 Channel (UART0 RX) */
#define IRQ_UART0_TX BFIN_IRQ(15) /* DMA 7 Channel (UART0 TX) */
-#define IRQ_TMR0 BFIN_IRQ(16) /* Timer 0 */
-#define IRQ_TMR1 BFIN_IRQ(17) /* Timer 1 */
-#define IRQ_TMR2 BFIN_IRQ(18) /* Timer 2 */
+#define IRQ_TIMER0 BFIN_IRQ(16) /* Timer 0 */
+#define IRQ_TIMER1 BFIN_IRQ(17) /* Timer 1 */
+#define IRQ_TIMER2 BFIN_IRQ(18) /* Timer 2 */
#define IRQ_PORTF_INTA BFIN_IRQ(19) /* Port F Interrupt A */
#define IRQ_PORTF_INTB BFIN_IRQ(20) /* Port F Interrupt B */
#define IRQ_MEM0_DMA0 BFIN_IRQ(21) /* MDMA0 Stream 0 */
#define IRQ_UART0_TX_POS 28
/* IAR2 BIT FIELDS */
-#define IRQ_TMR0_POS 0
-#define IRQ_TMR1_POS 4
-#define IRQ_TMR2_POS 8
+#define IRQ_TIMER0_POS 0
+#define IRQ_TIMER1_POS 4
+#define IRQ_TIMER2_POS 8
#define IRQ_PORTF_INTA_POS 12
#define IRQ_PORTF_INTB_POS 16
#define IRQ_MEM0_DMA0_POS 20
((CONFIG_IRQ_UART0_RX - 7) << IRQ_UART0_RX_POS) |
((CONFIG_IRQ_UART0_TX - 7) << IRQ_UART0_TX_POS));
- bfin_write_SIC_IAR2(((CONFIG_IRQ_TMR0 - 7) << IRQ_TMR0_POS) |
- ((CONFIG_IRQ_TMR1 - 7) << IRQ_TMR1_POS) |
- ((CONFIG_IRQ_TMR2 - 7) << IRQ_TMR2_POS) |
+ bfin_write_SIC_IAR2(((CONFIG_IRQ_TIMER0 - 7) << IRQ_TIMER0_POS) |
+ ((CONFIG_IRQ_TIMER1 - 7) << IRQ_TIMER1_POS) |
+ ((CONFIG_IRQ_TIMER2 - 7) << IRQ_TIMER2_POS) |
((CONFIG_IRQ_PORTF_INTA - 7) << IRQ_PORTF_INTA_POS) |
((CONFIG_IRQ_PORTF_INTB - 7) << IRQ_PORTF_INTB_POS) |
((CONFIG_IRQ_MEM0_DMA0 - 7) << IRQ_MEM0_DMA0_POS) |
default 11
config IRQ_TIMER0
int "IRQ_TIMER0"
- default 11
+ default 8
config IRQ_TIMER1
int "IRQ_TIMER1"
default 11
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
bfin_write32(SIC_IWR2, iwr2);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SIC_IWR0);
iwr1 = bfin_read32(SIC_IWR1);
bfin_write32(SIC_IWR0, iwr0);
bfin_write32(SIC_IWR1, iwr1);
bfin_write32(SIC_IWR2, iwr2);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF54X_H */
#define IRQ_PINT2 BFIN_IRQ(94) /* PINT2 Interrupt */
#define IRQ_PINT3 BFIN_IRQ(95) /* PINT3 Interrupt */
-#define SYS_IRQS IRQ_PINT3
+#define SYS_IRQS IRQ_PINT3
#define BFIN_PA_IRQ(x) ((x) + SYS_IRQS + 1)
#define IRQ_PA0 BFIN_PA_IRQ(0)
default 9
config IRQ_TIMER0
int "TIMER 0 Interrupt"
- default 10
+ default 8
config IRQ_TIMER1
int "TIMER 1 Interrupt"
default 10
if (val == bfin_read_PLL_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SICA_IWR0);
iwr1 = bfin_read32(SICA_IWR1);
bfin_write32(SICA_IWR0, iwr0);
bfin_write32(SICA_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
/* Writing to VR_CTL initiates a PLL relock sequence. */
if (val == bfin_read_VR_CTL())
return;
- local_irq_save(flags);
+ local_irq_save_hw(flags);
/* Enable the PLL Wakeup bit in SIC IWR */
iwr0 = bfin_read32(SICA_IWR0);
iwr1 = bfin_read32(SICA_IWR1);
bfin_write32(SICA_IWR0, iwr0);
bfin_write32(SICA_IWR1, iwr1);
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
#endif /* _CDEF_BF561_H */
cclk_hz, target_freq, freqs.old);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
- local_irq_save(flags);
+ local_irq_save_hw(flags);
plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
tscale = dpm_state_table[index].tscale;
bfin_write_PLL_DIV(plldiv);
cycles += 10; /* ~10 cycles we lose after get_cycles() */
__bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
__bfin_cycles_mod = index;
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
/* TODO: just test case for cycles clock source, remove later */
pr_debug("cpufreq: done\n");
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
#endif
r1 = sp;
SP += -12;
+#ifdef CONFIG_IPIPE
+ call ___ipipe_grab_irq
+ SP += 12;
+ cc = r0 == 0;
+ if cc jump .Lcommon_restore_context;
+#else /* CONFIG_IPIPE */
call _do_irq;
SP += 12;
+#endif /* CONFIG_IPIPE */
call _return_from_int;
.Lcommon_restore_context:
RESTORE_CONTEXT
call _system_call;
jump .Lcommon_restore_context;
ENDPROC(_evt_system_call)
+
+#ifdef CONFIG_IPIPE
+ENTRY(___ipipe_call_irqtail)
+ r0.l = 1f;
+ r0.h = 1f;
+ reti = r0;
+ rti;
+1:
+ [--sp] = rets;
+ [--sp] = ( r7:4, p5:3 );
+ p0.l = ___ipipe_irq_tail_hook;
+ p0.h = ___ipipe_irq_tail_hook;
+ p0 = [p0];
+ sp += -12;
+ call (p0);
+ sp += 12;
+ ( r7:4, p5:3 ) = [sp++];
+ rets = [sp++];
+
+ [--sp] = reti;
+ reti = [sp++]; /* IRQs are off. */
+ r0.h = 3f;
+ r0.l = 3f;
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ [p0] = r0;
+ csync;
+ r0 = 0x401f;
+ sti r0;
+ raise 14;
+ [--sp] = reti; /* IRQs on. */
+2:
+ jump 2b; /* Likely paranoid. */
+3:
+ sp += 4; /* Discard saved RETI */
+ r0.h = _evt14_softirq;
+ r0.l = _evt14_softirq;
+ p0.l = lo(EVT14);
+ p0.h = hi(EVT14);
+ [p0] = r0;
+ csync;
+ p0.l = _bfin_irq_flags;
+ p0.h = _bfin_irq_flags;
+ r0 = [p0];
+ sti r0;
+#if 0 /* FIXME: this actually raises scheduling latencies */
+ /* Reenable interrupts */
+ [--sp] = reti;
+ r0 = [sp++];
+#endif
+ rts;
+ENDPROC(___ipipe_call_irqtail)
+#endif /* CONFIG_IPIPE */
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/irq.h>
+#ifdef CONFIG_IPIPE
+#include <linux/ipipe.h>
+#endif
#ifdef CONFIG_KGDB
#include <linux/kgdb.h>
#endif
static void bfin_core_mask_irq(unsigned int irq)
{
bfin_irq_flags &= ~(1 << irq);
- if (!irqs_disabled())
- local_irq_enable();
+ if (!irqs_disabled_hw())
+ local_irq_enable_hw();
}
static void bfin_core_unmask_irq(unsigned int irq)
* local_irq_enable just does "STI bfin_irq_flags", so it's exactly
* what we need.
*/
- if (!irqs_disabled())
- local_irq_enable();
+ if (!irqs_disabled_hw())
+ local_irq_enable_hw();
return;
}
break;
}
- local_irq_save(flags);
+ local_irq_save_hw(flags);
if (state) {
bfin_sic_iwr[bank] |= (1 << bit);
vr_wakeup &= ~wakeup;
}
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
return 0;
}
#endif
};
+static void bfin_handle_irq(unsigned irq)
+{
+#ifdef CONFIG_IPIPE
+ struct pt_regs regs; /* Contents not used. */
+ ipipe_trace_irq_entry(irq);
+ __ipipe_handle_irq(irq, ®s);
+ ipipe_trace_irq_exit(irq);
+#else /* !CONFIG_IPIPE */
+ struct irq_desc *desc = irq_desc + irq;
+ desc->handle_irq(irq, desc);
+#endif /* !CONFIG_IPIPE */
+}
+
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
static int error_int_mask;
irq = IRQ_UART1_ERROR;
if (irq) {
- if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) {
- struct irq_desc *desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- } else {
+ if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
+ bfin_handle_irq(irq);
+ else {
switch (irq) {
case IRQ_PPI_ERROR:
static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
{
+#ifdef CONFIG_IPIPE
+ _set_irq_handler(irq, handle_edge_irq);
+#else
struct irq_desc *desc = irq_desc + irq;
/* May not call generic set_irq_handler() due to spinlock
recursion. */
desc->handle_irq = handle;
+#endif
}
static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
mask = get_gpiop_data(i) & get_gpiop_maska(i);
while (mask) {
- if (mask & 1) {
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- }
+ if (mask & 1)
+ bfin_handle_irq(irq);
irq++;
mask >>= 1;
}
mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
do {
- if (mask & 1) {
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
- }
+ if (mask & 1)
+ bfin_handle_irq(irq);
irq++;
mask >>= 1;
} while (mask);
while (request) {
if (request & 1) {
irq = pint2irq_lut[pint_val] + SYS_IRQS;
- desc = irq_desc + irq;
- desc->handle_irq(irq, desc);
+ bfin_handle_irq(irq);
}
pint_val++;
request >>= 1;
break;
#ifdef BF537_GENERIC_ERROR_INT_DEMUX
case IRQ_GENERIC_ERROR:
- set_irq_handler(irq, bfin_demux_error_irq);
-
+ set_irq_chained_handler(irq, bfin_demux_error_irq);
break;
#endif
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
case IRQ_TIMER0:
set_irq_handler(irq, handle_percpu_irq);
break;
break;
#endif
default:
+#ifdef CONFIG_IPIPE
+ /*
+ * We want internal interrupt sources to be masked, because
+ * ISRs may trigger interrupts recursively (e.g. DMA), but
+ * interrupts are _not_ masked at CPU level. So let's handle
+ * them as level interrupts.
+ */
+ set_irq_handler(irq, handle_level_irq);
+#else /* !CONFIG_IPIPE */
set_irq_handler(irq, handle_simple_irq);
+#endif /* !CONFIG_IPIPE */
break;
}
}
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
+#ifdef CONFIG_IPIPE
+ for (irq = 0; irq < NR_IRQS; irq++) {
+ struct irq_desc *desc = irq_desc + irq;
+ desc->ic_prio = __ipipe_get_irq_priority(irq);
+ desc->thr_prio = __ipipe_get_irqthread_priority(irq);
+ }
+#endif /* CONFIG_IPIPE */
+
return 0;
}
}
asm_do_IRQ(vec, fp);
}
+
+#ifdef CONFIG_IPIPE
+
+int __ipipe_get_irq_priority(unsigned irq)
+{
+ int ient, prio;
+
+ if (irq <= IRQ_CORETMR)
+ return irq;
+
+ for (ient = 0; ient < NR_PERI_INTS; ient++) {
+ struct ivgx *ivg = ivg_table + ient;
+ if (ivg->irqno == irq) {
+ for (prio = 0; prio <= IVG13-IVG7; prio++) {
+ if (ivg7_13[prio].ifirst <= ivg &&
+ ivg7_13[prio].istop > ivg)
+ return IVG7 + prio;
+ }
+ }
+ }
+
+ return IVG15;
+}
+
+int __ipipe_get_irqthread_priority(unsigned irq)
+{
+ int ient, prio;
+ int demux_irq;
+
+ /* The returned priority value is rescaled to [0..IVG13+1]
+ * with 0 being the lowest effective priority level. */
+
+ if (irq <= IRQ_CORETMR)
+ return IVG13 - irq + 1;
+
+ /* GPIO IRQs are given the priority of the demux
+ * interrupt. */
+ if (IS_GPIOIRQ(irq)) {
+#if defined(CONFIG_BF54x)
+ u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
+ demux_irq = (bank == 0 ? IRQ_PINT0 :
+ bank == 1 ? IRQ_PINT1 :
+ bank == 2 ? IRQ_PINT2 :
+ IRQ_PINT3);
+#elif defined(CONFIG_BF561)
+ demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
+ irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
+ IRQ_PROG0_INTA);
+#elif defined(CONFIG_BF52x)
+ demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
+ irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
+ IRQ_PORTF_INTA);
+#else
+ demux_irq = irq;
+#endif
+ return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
+ }
+
+ /* The GPIO demux interrupt is given a lower priority
+ * than the GPIO IRQs, so that its threaded handler
+ * unmasks the interrupt line after the decoded IRQs
+ * have been processed. */
+ prio = PRIO_GPIODEMUX(irq);
+ /* demux irq? */
+ if (prio != -1)
+ return IVG13 - prio;
+
+ for (ient = 0; ient < NR_PERI_INTS; ient++) {
+ struct ivgx *ivg = ivg_table + ient;
+ if (ivg->irqno == irq) {
+ for (prio = 0; prio <= IVG13-IVG7; prio++) {
+ if (ivg7_13[prio].ifirst <= ivg &&
+ ivg7_13[prio].istop > ivg)
+ return IVG7 - prio;
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
+#ifdef CONFIG_DO_IRQ_L1
+__attribute__((l1_text))
+#endif
+asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
+{
+ struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
+ struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
+ int irq;
+
+ if (likely(vec == EVT_IVTMR_P)) {
+ irq = IRQ_CORETMR;
+ goto handle_irq;
+ }
+
+ SSYNC();
+
+#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
+ {
+ unsigned long sic_status[3];
+
+ sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
+ sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
+#ifdef CONFIG_BF54x
+ sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
+#endif
+ for (;; ivg++) {
+ if (ivg >= ivg_stop) {
+ atomic_inc(&num_spurious);
+ return 0;
+ }
+ if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
+ break;
+ }
+ }
+#else
+ {
+ unsigned long sic_status;
+
+ sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
+
+ for (;; ivg++) {
+ if (ivg >= ivg_stop) {
+ atomic_inc(&num_spurious);
+ return 0;
+ } else if (sic_status & ivg->isrflag)
+ break;
+ }
+ }
+#endif
+
+ irq = ivg->irqno;
+
+ if (irq == IRQ_SYSTMR) {
+ bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
+ /* This is basically what we need from the register frame. */
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
+ __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
+ if (!ipipe_root_domain_p)
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
+ else
+ __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
+ }
+
+handle_irq:
+
+ ipipe_trace_irq_entry(irq);
+ __ipipe_handle_irq(irq, regs);
+ ipipe_trace_irq_exit(irq);
+
+ if (ipipe_root_domain_p)
+ return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
+
+ return 0;
+}
+
+#endif /* CONFIG_IPIPE */
gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
#endif
- local_irq_save(flags);
+ local_irq_save_hw(flags);
bfin_pm_standby_setup();
#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
bfin_write_SIC_IWR(IWR_DISABLE_ALL);
#endif
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
}
int bf53x_suspend_l1_mem(unsigned char *memptr)
wakeup |= GPWE;
#endif
- local_irq_save(flags);
+ local_irq_save_hw(flags);
ret = blackfin_dma_suspend();
if (ret) {
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
kfree(memptr);
return ret;
}
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();
- local_irq_restore(flags);
+ local_irq_restore_hw(flags);
kfree(memptr);
return 0;
static void __cpuinit setup_secondary(unsigned int cpu)
{
-#ifndef CONFIG_TICK_SOURCE_SYSTMR0
+#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE))
struct irq_desc *timer_desc;
#endif
unsigned long ilat;
IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
-#ifdef CONFIG_TICK_SOURCE_SYSTMR0
+#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
/* Power down the core timer, just to play safe. */
bfin_write_TCNTL(0);