select HAVE_CC_STACKPROTECTOR
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
- select HAVE_CONTEXT_TRACKING
+ select HAVE_CONTEXT_TRACKING if !IPIPE
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DEBUG_KMEMLEAK
select HAVE_DMA_API_DEBUG
def_bool y
depends on NUMA
+source kernel/ipipe/Kconfig
source kernel/Kconfig.preempt
source kernel/Kconfig.hz
msr daif, \flags
.endm
+ .macro disable_irq_cond
+#ifdef CONFIG_IPIPE
+ msr daifset, #2
+#endif
+ .endm
+
+ .macro enable_irq_cond
+#ifdef CONFIG_IPIPE
+ msr daifclr, #2
+#endif
+ .endm
+
/*
* Enable and disable debug exceptions.
*/
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe.h
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ARM_IPIPE_H
+#define __ARM_IPIPE_H
+
+#include <linux/irqdomain.h>
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/jump_label.h>
+#include <linux/ipipe_trace.h>
+#include <linux/ipipe_debug.h>
+
+#define IPIPE_CORE_RELEASE 2
+
+struct ipipe_domain;
+
+#define IPIPE_TSC_TYPE_NONE 0
+#define IPIPE_TSC_TYPE_FREERUNNING 1
+#define IPIPE_TSC_TYPE_DECREMENTER 2
+#define IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN 3
+#define IPIPE_TSC_TYPE_FREERUNNING_TWICE 4
+#define IPIPE_TSC_TYPE_FREERUNNING_ARCH 5
+
+/* tscinfo, exported to user-space */
+struct __ipipe_tscinfo {
+ unsigned type;
+ unsigned freq;
+ unsigned long counter_vaddr;
+ union {
+ struct {
+ unsigned long counter_paddr;
+ unsigned long long mask;
+ };
+ struct {
+ unsigned *counter; /* Hw counter physical address */
+ unsigned long long mask; /* Significant bits in the hw counter. */
+ unsigned long long *tsc; /* 64 bits tsc value. */
+ } fr;
+ struct {
+ unsigned *counter; /* Hw counter physical address */
+ unsigned long long mask; /* Significant bits in the hw counter. */
+ unsigned *last_cnt; /* Counter value when updating
+ tsc value. */
+ unsigned long long *tsc; /* 64 bits tsc value. */
+ } dec;
+ } u;
+};
+
+struct ipipe_arch_sysinfo {
+ struct __ipipe_tscinfo tsc;
+};
+
+
+/* arch specific stuff */
+
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info);
+
+static inline void __ipipe_mach_update_tsc(void) {}
+
+static inline notrace unsigned long long __ipipe_mach_get_tsc(void)
+{
+ return arch_counter_get_cntvct();
+}
+
+#define __ipipe_tsc_get() __ipipe_mach_get_tsc()
+void __ipipe_tsc_register(struct __ipipe_tscinfo *info);
+static inline void __ipipe_tsc_update(void) {}
+#ifndef __ipipe_hrclock_freq
+extern unsigned long __ipipe_hrtimer_freq;
+#define __ipipe_hrclock_freq __ipipe_hrtimer_freq
+#endif /* !__ipipe_mach_hrclock_freq */
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+extern void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
+
+#define ipipe_mm_switch_protect(__flags) \
+ do { \
+ (__flags) = hard_cond_local_irq_save(); \
+ } while (0)
+
+#define ipipe_mm_switch_unprotect(__flags) \
+ hard_cond_local_irq_restore(__flags)
+
+#define ipipe_read_tsc(t) do { t = __ipipe_tsc_get(); } while(0)
+#define __ipipe_read_timebase() __ipipe_tsc_get()
+
+#define ipipe_tsc2ns(t) \
+({ \
+ unsigned long long delta = (t)*1000; \
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
+ (unsigned long)delta; \
+})
+#define ipipe_tsc2us(t) \
+({ \
+ unsigned long long delta = (t); \
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
+ (unsigned long)delta; \
+})
+
+static inline const char *ipipe_clock_name(void)
+{
+ return "ipipe_tsc";
+}
+
+/* Private interface -- Internal use only */
+
+#define __ipipe_enable_irq(irq) enable_irq(irq)
+#define __ipipe_disable_irq(irq) disable_irq(irq)
+
+/* PIC muting */
+struct ipipe_mach_pic_muter {
+ void (*enable_irqdesc)(struct ipipe_domain *ipd, unsigned irq);
+ void (*disable_irqdesc)(struct ipipe_domain *ipd, unsigned irq);
+ void (*mute)(void);
+ void (*unmute)(void);
+};
+
+extern struct ipipe_mach_pic_muter ipipe_pic_muter;
+
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter);
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
+
+static inline void ipipe_mute_pic(void)
+{
+ if (ipipe_pic_muter.mute)
+ ipipe_pic_muter.mute();
+}
+
+static inline void ipipe_unmute_pic(void)
+{
+ if (ipipe_pic_muter.unmute)
+ ipipe_pic_muter.unmute();
+}
+
+#define ipipe_notify_root_preemption() do { } while(0)
+
+#ifdef CONFIG_SMP
+void __ipipe_early_core_setup(void);
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
+void __ipipe_root_localtimer(unsigned int irq, void *cookie);
+void __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs);
+void __ipipe_ipis_alloc(void);
+void __ipipe_ipis_request(void);
+
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
+{
+ __ipipe_grab_ipi(irq, regs);
+}
+
+#ifdef CONFIG_SMP_ON_UP
+extern struct static_key __ipipe_smp_key;
+#define ipipe_smp_p (static_key_true(&__ipipe_smp_key))
+#endif /* SMP_ON_UP */
+#else /* !CONFIG_SMP */
+#define __ipipe_early_core_setup() do { } while(0)
+#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
+#endif /* !CONFIG_SMP */
+#ifndef __ipipe_mach_init_platform
+#define __ipipe_mach_init_platform() do { } while(0)
+#endif
+
+void __ipipe_enable_pipeline(void);
+
+void __ipipe_do_critical_sync(unsigned irq, void *cookie);
+
+void __ipipe_grab_irq(int irq, struct pt_regs *regs);
+
+void __ipipe_exit_irq(struct pt_regs *regs);
+
+static inline
+int ipipe_handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ unsigned int irq;
+
+ irq = irq_find_mapping(domain, hwirq);
+ __ipipe_grab_irq(irq, regs);
+
+ return 0;
+}
+
+static inline unsigned long __ipipe_ffnz(unsigned long ul)
+{
+ int __r;
+
+ /* zero input is not valid */
+ IPIPE_WARN(ul == 0);
+
+ __asm__ ("rbit\t%0, %1\n"
+ "clz\t%0, %0\n"
+ : "=r" (__r) : "r"(ul) : "cc");
+
+ return __r;
+}
+
+#define __ipipe_syscall_watched_p(p, sc) \
+ (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= __NR_syscalls)
+
+#define __ipipe_root_tick_p(regs) (!arch_irqs_disabled_flags(regs->pstate))
+
+#else /* !CONFIG_IPIPE */
+
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+
+#define __ipipe_tsc_update() do { } while(0)
+
+#define hard_smp_processor_id() smp_processor_id()
+
+#define ipipe_mm_switch_protect(flags) \
+ do { \
+ (void) (flags); \
+ } while(0)
+
+#define ipipe_mm_switch_unprotect(flags) \
+ do { \
+ (void) (flags); \
+ } while(0)
+
+#ifdef CONFIG_SMP
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
+{
+ handle_IPI(irq, regs);
+}
+#endif /* CONFIG_SMP */
+
+static inline
+int ipipe_handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ return handle_domain_irq(domain, hwirq, regs);
+}
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__ARM_IPIPE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe_base.h
+ *
+ * Copyright (C) 2007 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_ARM_IPIPE_BASE_H
+#define __ASM_ARM_IPIPE_BASE_H
+
+#include <asm-generic/ipipe.h>
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_NR_ROOT_IRQS 1024
+
+#define IPIPE_NR_XIRQS IPIPE_NR_ROOT_IRQS
+
+#ifdef CONFIG_SMP
+
+extern unsigned __ipipe_first_ipi;
+
+#define IPIPE_CRITICAL_IPI __ipipe_first_ipi
+#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
+#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
+
+#define IPIPE_LAST_IPI IPIPE_RESCHEDULE_IPI
+
+#ifdef CONFIG_IPIPE_LEGACY
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__ __volatile__ ("\n" \
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r" (cpunum)); \
+ cpunum &= 0xFF; \
+ })
+extern u32 __cpu_logical_map[];
+#define ipipe_processor_id() (__cpu_logical_map[hard_smp_processor_id()])
+
+#else /* !legacy */
+#define hard_smp_processor_id() raw_smp_processor_id()
+
+#ifdef CONFIG_SMP_ON_UP
+unsigned __ipipe_processor_id(void);
+
+#define ipipe_processor_id() \
+ ({ \
+ register unsigned int cpunum __asm__ ("r0"); \
+ register unsigned int r1 __asm__ ("r1"); \
+ register unsigned int r2 __asm__ ("r2"); \
+ register unsigned int r3 __asm__ ("r3"); \
+ register unsigned int ip __asm__ ("ip"); \
+ register unsigned int lr __asm__ ("lr"); \
+ __asm__ __volatile__ ("\n" \
+ "1: bl __ipipe_processor_id\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r"(cpunum), "=r"(r1), "=r"(r2), "=r"(r3), \
+ "=r"(ip), "=r"(lr) \
+ : /* */ : "cc"); \
+ cpunum; \
+ })
+#else /* !SMP_ON_UP */
+#define ipipe_processor_id() raw_smp_processor_id()
+#endif /* !SMP_ON_UP */
+#endif /* !legacy */
+
+#define IPIPE_ARCH_HAVE_VIRQ_IPI
+
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id() (0)
+#endif /* !CONFIG_IPIPE */
+
+/* ARM traps */
+#define IPIPE_TRAP_ACCESS 0 /* Data or instruction access exception */
+#define IPIPE_TRAP_SECTION 1 /* Section fault */
+#define IPIPE_TRAP_DABT 2 /* Generic data abort */
+#define IPIPE_TRAP_UNKNOWN 3 /* Unknown exception */
+#define IPIPE_TRAP_BREAK 4 /* Instruction breakpoint */
+#define IPIPE_TRAP_FPU_ACC 5 /* Floating point access */
+#define IPIPE_TRAP_FPU_EXC 6 /* Floating point exception */
+#define IPIPE_TRAP_UNDEFINSTR 7 /* Undefined instruction */
+#define IPIPE_TRAP_ALIGNMENT 8 /* Unaligned access exception */
+#define IPIPE_TRAP_MAYDAY 9 /* Internal recovery trap */
+#define IPIPE_NR_FAULTS 10
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+
+void ipipe_stall_root(void);
+
+unsigned long ipipe_test_and_stall_root(void);
+
+unsigned long ipipe_test_root(void);
+
+#else /* !CONFIG_SMP */
+
+#include <asm/irqflags.h>
+
+#if __GNUC__ >= 4
+/* Alias to ipipe_root_cpudom_var(status) */
+extern unsigned long __ipipe_root_status;
+#else
+extern unsigned long *const __ipipe_root_status_addr;
+#define __ipipe_root_status (*__ipipe_root_status_addr)
+#endif
+
+static inline void ipipe_stall_root(void)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+ __ipipe_root_status |= 1;
+ hard_local_irq_restore(flags);
+}
+
+static inline unsigned ipipe_test_root(void)
+{
+ return __ipipe_root_status & 1;
+}
+
+static inline unsigned ipipe_test_and_stall_root(void)
+{
+ unsigned long flags, res;
+
+ flags = hard_local_irq_save();
+ res = __ipipe_root_status;
+ __ipipe_root_status = res | 1;
+ hard_local_irq_restore(flags);
+
+ return res & 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* __ASM_ARM_IPIPE_BASE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe_hwirq.h
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ASM_ARM_IPIPE_HWIRQ_H
+#define _ASM_ARM_IPIPE_HWIRQ_H
+
+#define hard_local_irq_restore_notrace(x) \
+ __asm__ __volatile__( \
+ "msr daif, %0" \
+ : \
+ : "r" (x) \
+ : "memory", "cc")
+
+static inline void hard_local_irq_disable_notrace(void)
+{
+ __asm__ __volatile__("msr daifset, #2" : : : "memory", "cc");
+}
+
+static inline void hard_local_irq_enable_notrace(void)
+{
+ __asm__ __volatile__("msr daifclr, #2" : : : "memory", "cc");
+}
+
+static inline void hard_local_fiq_disable_notrace(void)
+{
+ __asm__ __volatile__("msr daifset, #1" : : : "memory", "cc");
+}
+
+static inline void hard_local_fiq_enable_notrace(void)
+{
+ __asm__ __volatile__("msr daifclr, #1" : : : "memory", "cc");
+}
+
+static inline unsigned long hard_local_irq_save_notrace(void)
+{
+ unsigned long res;
+ __asm__ __volatile__(
+ "mrs %0, daif\n"
+ "msr daifset, #2"
+ : "=r" (res) : : "memory", "cc");
+ return res;
+}
+
+#include <asm-generic/ipipe.h>
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_trace.h>
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (int)((flags) & PSR_I_BIT);
+}
+
+static inline unsigned long hard_local_save_flags(void)
+{
+ unsigned long flags;
+ __asm__ __volatile__(
+ "mrs %0, daif"
+ : "=r" (flags) : : "memory", "cc");
+ return flags;
+}
+
+#define hard_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags)
+
+static inline int hard_irqs_disabled(void)
+{
+ return hard_irqs_disabled_flags(hard_local_save_flags());
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+
+static inline void hard_local_irq_disable(void)
+{
+ if (!hard_irqs_disabled()) {
+ hard_local_irq_disable_notrace();
+ ipipe_trace_begin(0x80000000);
+ }
+}
+
+static inline void hard_local_irq_enable(void)
+{
+ if (hard_irqs_disabled()) {
+ ipipe_trace_end(0x80000000);
+ hard_local_irq_enable_notrace();
+ }
+}
+
+static inline unsigned long hard_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save_notrace();
+ if (!arch_irqs_disabled_flags(flags))
+ ipipe_trace_begin(0x80000001);
+
+ return flags;
+}
+
+static inline void hard_local_irq_restore(unsigned long x)
+{
+ if (!arch_irqs_disabled_flags(x))
+ ipipe_trace_end(0x80000001);
+
+ hard_local_irq_restore_notrace(x);
+}
+
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define hard_local_irq_disable hard_local_irq_disable_notrace
+#define hard_local_irq_enable hard_local_irq_enable_notrace
+#define hard_local_irq_save hard_local_irq_save_notrace
+#define hard_local_irq_restore hard_local_irq_restore_notrace
+
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define arch_local_irq_disable() \
+ ({ \
+ ipipe_stall_root(); \
+ barrier(); \
+ })
+
+#define arch_local_irq_enable() \
+ do { \
+ barrier(); \
+ ipipe_unstall_root(); \
+ } while (0)
+
+#define local_fiq_enable() hard_local_fiq_enable_notrace()
+
+#define local_fiq_disable() hard_local_fiq_disable_notrace()
+
+#define arch_local_irq_restore(flags) \
+ do { \
+ if (!arch_irqs_disabled_flags(flags)) \
+ arch_local_irq_enable(); \
+ } while (0)
+
+#define arch_local_irq_save() \
+ ({ \
+ unsigned long _flags; \
+ _flags = ipipe_test_and_stall_root() << 7; \
+ barrier(); \
+ _flags; \
+ })
+
+#define arch_local_save_flags() \
+ ({ \
+ unsigned long _flags; \
+ _flags = ipipe_test_root() << 7; \
+ barrier(); \
+ _flags; \
+ })
+
+#define arch_irqs_disabled() ipipe_test_root()
+#define hard_irq_disable() hard_local_irq_disable()
+
+static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
+{
+ /* Merge virtual and real interrupt mask bits into a single
+ 32bit word. */
+ return (real & ~(1L << 8)) | ((virt != 0) << 8);
+}
+
+static inline int arch_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 8)) != 0;
+ *x &= ~(1L << 8);
+ return virt;
+}
+
+#else /* !CONFIG_IPIPE */
+
+#define hard_local_irq_save() arch_local_irq_save()
+#define hard_local_irq_restore(x) arch_local_irq_restore(x)
+#define hard_local_irq_enable() arch_local_irq_enable()
+#define hard_local_irq_disable() arch_local_irq_disable()
+#define hard_irqs_disabled() irqs_disabled()
+
+#define hard_cond_local_irq_enable() do { } while(0)
+#define hard_cond_local_irq_disable() do { } while(0)
+#define hard_cond_local_irq_save() 0
+#define hard_cond_local_irq_restore(flags) do { (void)(flags); } while(0)
+
+#endif /* !CONFIG_IPIPE */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
+#define hard_smp_local_irq_save() hard_local_irq_save()
+#define hard_smp_local_irq_restore(flags) hard_local_irq_restore(flags)
+#else /* !CONFIG_SMP */
+#define hard_smp_local_irq_save() 0
+#define hard_smp_local_irq_restore(flags) do { (void)(flags); } while(0)
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_ARM_IPIPE_HWIRQ_H */
#include <asm/ptrace.h>
+#include <asm/ipipe_hwirq.h>
+
+#ifndef CONFIG_IPIPE
+
/*
* CPU interrupt mask handling.
*/
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
/*
* Save the current interrupt enable state.
return flags & PSR_I_BIT;
}
+#endif /* CONFIG_IPIPE */
/*
* save and restore debug state
*/
: : "r" (flags) : "memory"); \
} while (0)
+#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
+#define local_async_disable() asm("msr daifset, #4" : : : "memory")
+
#endif
#endif
#define __ASM_PERCPU_H
#include <asm/stack_pointer.h>
+#include <asm/ipipe_base.h>
static inline void set_my_cpu_offset(unsigned long off)
{
/*
* Logical CPU mapping.
*/
-extern u64 __cpu_logical_map[NR_CPUS];
+extern u64 __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR.Aff*
#include <asm/memory.h>
#include <asm/stack_pointer.h>
#include <asm/types.h>
+#include <ipipe/thread_info.h>
typedef unsigned long mm_segment_t;
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
+#ifdef CONFIG_IPIPE
+ unsigned long ipipe_flags;
+#endif
+ struct ipipe_threadinfo ipipe_data;
};
#define INIT_THREAD_INFO(tsk) \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_NOHZ)
+/* ti->ipipe_flags */
+#define TIP_MAYDAY 0 /* MAYDAY call is pending */
+#define TIP_NOTIFY 1 /* Notify head domain about kernel events */
+#define TIP_HEAD 2 /* Runs in head domain */
+
+#define _TIP_MAYDAY (1 << TIP_MAYDAY)
+#define _TIP_NOTIFY (1 << TIP_NOTIFY)
+#define _TIP_HEAD (1 << TIP_HEAD)
+
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
#include <asm/alternative.h>
#include <asm/kernel-pgtable.h>
#include <asm/sysreg.h>
+#include <asm/ipipe.h>
/*
* User space memory access functions
#include <asm/compiler.h>
#include <asm/extable.h>
+#include <asm-generic/ipipe.h>
+
#define KERNEL_DS (-1UL)
#define get_ds() (KERNEL_DS)
#define get_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
+ __ipipe_uaccess_might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
__get_user((x), __p) : \
((x) = 0, -EFAULT); \
#define put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
+ __ipipe_uaccess_might_fault(); \
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
__put_user((x), __p) : \
-EFAULT; \
obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m)
+obj-$(CONFIG_IPIPE) += ipipe.o
head-y := head.o
extra-y += $(head-y) vmlinux.lds
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TSK_STACK, offsetof(struct task_struct, stack));
BLANK();
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+#ifdef CONFIG_IPIPE
+ DEFINE(TSK_TI_IPIPE, offsetof(struct task_struct, thread_info.ipipe_flags));
+#endif
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
#endif
.endm
+#ifdef CONFIG_IPIPE
+#define PREEMPT_SCHEDULE_IRQ __ipipe_preempt_schedule_irq
+#else /* !CONFIG_IPIPE */
+#define ret_from_exception ret_to_user
+#define PREEMPT_SCHEDULE_IRQ preempt_schedule_irq
+#endif /* CONFIG_IPIPE */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE)
+#define TRACE_IRQSON bl trace_hardirqs_on
+#define TRACE_IRQSOFF bl trace_hardirqs_off
+#else
+#define TRACE_IRQSON
+#define TRACE_IRQSOFF
+#endif
+
/*
* Bad Abort numbers
*-----------------
irq_stack_entry
blr x1
irq_stack_exit
+#ifdef CONFIG_IPIPE
+ bl __ipipe_check_root_interruptible
+ cmp w0, #1
+#endif /* CONFIG_IPIPE */
.endm
.text
el1_irq:
kernel_entry 1
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ TRACE_IRQSOFF
irq_handler
+#ifdef CONFIG_IPIPE
+ bne ipipe_fast_svc_irq_exit
+#endif
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz w24, 1f // preempt count != 0
bl el1_preempt
1:
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
+#ifdef CONFIG_IPIPE
+ipipe_fast_svc_irq_exit:
#endif
+ TRACE_IRQSON
kernel_exit 1
ENDPROC(el1_irq)
#ifdef CONFIG_PREEMPT
el1_preempt:
mov x24, lr
-1: bl preempt_schedule_irq // irq en/disable is done inside
+1: bl PREEMPT_SCHEDULE_IRQ
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
kernel_entry 0
el0_irq_naked:
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ TRACE_IRQSOFF
ct_user_exit
irq_handler
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
+#ifdef CONFIG_IPIPE
+ b.eq normal_irq_ret
+ /* Fast IRQ exit, root domain stalled or not current. */
+ kernel_exit 0
+normal_irq_ret:
+#endif /* CONFIG_IPIPE */
b ret_to_user
ENDPROC(el0_irq)
+#ifdef CONFIG_IPIPE
+ret_from_exception:
+ disable_irq
+ ldr x0, [tsk, #TSK_TI_IPIPE]
+ tst x0, #_TIP_HEAD
+ b.eq ret_to_user_noirq
+ kernel_exit 0
+#endif /* CONFIG_IPIPE */
+
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
*/
ret_to_user:
disable_irq // disable interrupts
+ret_to_user_noirq:
ldr x1, [tsk, #TSK_TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
+ enable_irq_cond
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
--- /dev/null
+/* -*- linux-c -*-
+ * linux/arch/arm64/kernel/ipipe.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/arm port over 2.4).
+ * Copyright (C) 2005 Heikki Lindholm (PowerPC 970 fixes).
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-PIPE support for ARM.
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/ipipe_trace.h>
+#include <linux/irq.h>
+#include <linux/irqnr.h>
+#include <linux/prefetch.h>
+#include <linux/cpu.h>
+#include <linux/sched/debug.h>
+#include <linux/ipipe_domain.h>
+#include <linux/ipipe_tickdev.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/mmu_context.h>
+#include <asm/exception.h>
+#include <asm/arch_timer.h>
+
+static void __ipipe_do_IRQ(unsigned irq, void *cookie);
+
+/* irq_nesting tracks the interrupt nesting level for a CPU. */
+DEFINE_PER_CPU(int, irq_nesting);
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
+#endif
+
+#ifdef CONFIG_SMP
+
+void __ipipe_early_core_setup(void)
+{
+ __ipipe_mach_init_platform();
+}
+
+void ipipe_stall_root(void)
+{
+ unsigned long flags;
+
+ ipipe_root_only();
+ flags = hard_smp_local_irq_save();
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(ipipe_stall_root);
+
+unsigned long ipipe_test_and_stall_root(void)
+{
+ unsigned long flags;
+ int x;
+
+ ipipe_root_only();
+ flags = hard_smp_local_irq_save();
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+
+ return x;
+}
+EXPORT_SYMBOL_GPL(ipipe_test_and_stall_root);
+
+unsigned long ipipe_test_root(void)
+{
+ unsigned long flags;
+ int x;
+
+ flags = hard_smp_local_irq_save();
+ x = test_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+
+ return x;
+}
+EXPORT_SYMBOL_GPL(ipipe_test_root);
+
+static inline void
+hook_internal_ipi(struct ipipe_domain *ipd, int virq,
+ void (*handler)(unsigned int irq, void *cookie))
+{
+ ipd->irqs[virq].ackfn = NULL;
+ ipd->irqs[virq].handler = handler;
+ ipd->irqs[virq].cookie = NULL;
+ /* Immediately handle in the current domain but *never* pass */
+ ipd->irqs[virq].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK;
+}
+
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
+{
+ __ipipe_ipis_alloc();
+ hook_internal_ipi(ipd, IPIPE_CRITICAL_IPI, __ipipe_do_critical_sync);
+}
+
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ if (ipipe_virtual_irq_p(irq) ||
+ irq_get_chip(irq)->irq_set_affinity == NULL)
+ return;
+
+ cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+ if (WARN_ON_ONCE(cpumask_empty(&cpumask)))
+ return;
+
+ irq_get_chip(irq)->irq_set_affinity(irq_get_irq_data(irq), &cpumask, true);
+}
+EXPORT_SYMBOL_GPL(ipipe_set_irq_affinity);
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_SMP_ON_UP
+struct static_key __ipipe_smp_key = STATIC_KEY_INIT_TRUE;
+
+unsigned __ipipe_processor_id(void)
+{
+ return raw_smp_processor_id();
+}
+EXPORT_SYMBOL_GPL(__ipipe_processor_id);
+
+static int ipipe_disable_smp(void)
+{
+ if (num_online_cpus() == 1) {
+ unsigned long flags;
+
+ printk("I-pipe: disabling SMP code\n");
+
+ flags = hard_local_irq_save();
+ static_key_slow_dec(&__ipipe_smp_key);
+ hard_local_irq_restore(flags);
+ }
+ return 0;
+}
+arch_initcall(ipipe_disable_smp);
+#endif /* SMP_ON_UP */
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+ info->sys_nr_cpus = num_online_cpus();
+ info->sys_cpu_freq = __ipipe_hrclock_freq;
+ info->sys_hrtimer_irq = per_cpu(ipipe_percpu.hrtimer_irq, 0);
+ info->sys_hrtimer_freq = __ipipe_hrtimer_freq;
+ info->sys_hrclock_freq = __ipipe_hrclock_freq;
+ __ipipe_mach_get_tscinfo(&info->arch.tsc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_get_sysinfo);
+
+struct ipipe_mach_pic_muter ipipe_pic_muter;
+EXPORT_SYMBOL_GPL(ipipe_pic_muter);
+
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter)
+{
+ ipipe_pic_muter = *muter;
+}
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ /* With sparse IRQs, some irqs may not have a descriptor */
+ if (irq_to_desc(irq) == NULL)
+ return;
+
+ if (ipipe_pic_muter.enable_irqdesc)
+ ipipe_pic_muter.enable_irqdesc(ipd, irq);
+}
+EXPORT_SYMBOL_GPL(__ipipe_enable_irqdesc);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ if (ipipe_pic_muter.disable_irqdesc)
+ ipipe_pic_muter.disable_irqdesc(ipd, irq);
+}
+EXPORT_SYMBOL_GPL(__ipipe_disable_irqdesc);
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+ unsigned long flags;
+ unsigned int irq;
+
+ flags = ipipe_critical_enter(NULL);
+
+ /* virtualize all interrupts from the root domain. */
+ for (irq = 0; irq < IPIPE_NR_ROOT_IRQS; irq++)
+ ipipe_request_irq(ipipe_root_domain,
+ irq,
+ (ipipe_irq_handler_t)__ipipe_do_IRQ,
+ NULL, NULL);
+
+#ifdef CONFIG_SMP
+ __ipipe_ipis_request();
+#endif /* CONFIG_SMP */
+
+ ipipe_critical_exit(flags);
+}
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+unsigned asmlinkage __ipipe_bugon_irqs_enabled(unsigned x)
+{
+ BUG_ON(!hard_irqs_disabled());
+ return x; /* Preserve r0 */
+}
+#endif
+
+asmlinkage int __ipipe_check_root_interruptible(void)
+{
+ return __ipipe_root_p && !irqs_disabled();
+}
+
+void __ipipe_exit_irq(struct pt_regs *regs)
+{
+ /*
+ * Testing for user_regs() eliminates foreign stack contexts,
+ * including from legacy domains which did not set the foreign
+ * stack bit (foreign stacks are always kernel-based).
+ */
+ if (user_mode(regs) &&
+ ipipe_test_thread_flag(TIP_MAYDAY)) {
+ /*
+ * MAYDAY is never raised under normal circumstances,
+ * so prefer test then maybe clear over
+ * test_and_clear.
+ */
+ ipipe_clear_thread_flag(TIP_MAYDAY);
+ __ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs);
+ }
+}
+
+/* hw irqs off */
+asmlinkage void __exception __ipipe_grab_irq(int irq, struct pt_regs *regs)
+{
+ struct ipipe_percpu_data *p = __ipipe_raw_cpu_ptr(&ipipe_percpu);
+
+ ipipe_trace_irq_entry(irq);
+
+ if (p->hrtimer_irq == -1)
+ goto copy_regs;
+
+ if (irq == p->hrtimer_irq) {
+ /*
+ * Given our deferred dispatching model for regular IRQs, we
+ * only record CPU regs for the last timer interrupt, so that
+ * the timer handler charges CPU times properly. It is assumed
+ * that other interrupt handlers don't actually care for such
+ * information.
+ */
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+ if (__ipipe_mach_hrtimer_debug)
+ __ipipe_mach_hrtimer_debug(irq);
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
+ copy_regs:
+ p->tick_regs.pstate =
+ (p->curr == &p->root
+ ? regs->pstate
+ : regs->pstate | PSR_I_BIT);
+ p->tick_regs.pc = regs->pc;
+ }
+
+ __ipipe_dispatch_irq(irq, 0);
+
+ ipipe_trace_irq_exit(irq);
+
+ __ipipe_exit_irq(regs);
+}
+
+static void __ipipe_do_IRQ(unsigned irq, void *cookie)
+{
+ struct pt_regs *regs = raw_cpu_ptr(&ipipe_percpu.tick_regs);
+ __handle_domain_irq(NULL, irq, false, regs);
+}
+
+static struct __ipipe_tscinfo tsc_info;
+
+void __init __ipipe_tsc_register(struct __ipipe_tscinfo *info)
+{
+ tsc_info = *info;
+ __ipipe_hrclock_freq = info->freq;
+}
+
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info)
+{
+ *info = tsc_info;
+}
+
+EXPORT_SYMBOL_GPL(do_munmap);
+EXPORT_SYMBOL_GPL(show_stack);
+EXPORT_SYMBOL_GPL(init_mm);
+#ifndef MULTI_CPU
+EXPORT_SYMBOL_GPL(cpu_do_switch_mm);
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+EXPORT_SYMBOL_GPL(tasklist_lock);
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
+
+#ifndef CONFIG_SPARSE_IRQ
+EXPORT_SYMBOL_GPL(irq_desc);
+#endif
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+#ifdef CONFIG_IPIPE
+static void __ipipe_halt_root(void)
+{
+ struct ipipe_percpu_domain_data *p;
+
+ /*
+ * Emulate idle entry sequence over the root domain, which is
+ * stalled on entry.
+ */
+ hard_local_irq_disable();
+
+ p = ipipe_this_cpu_root_context();
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+
+ if (unlikely(__ipipe_ipending_p(p)))
+ __ipipe_sync_stage();
+ else {
+ cpu_do_idle();
+ }
+}
+
+#else /* !CONFIG_IPIPE */
+
+static void __ipipe_halt_root(void)
+{
+ cpu_do_idle();
+}
+
+#endif /* !CONFIG_IPIPE */
+
/*
* This is our default idle handler.
*/
* tricks
*/
trace_cpu_idle_rcuidle(1, smp_processor_id());
+ if (!need_resched())
+ __ipipe_halt_root();
cpu_do_idle();
local_irq_enable();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
}
}
+#if NR_CPUS > 16
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+#else
+u64 __cpu_logical_map[16] = { [0 ... 15] = INVALID_HWID };
+#endif
void __init setup_arch(char **cmdline_p)
{
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
#include <asm/virt.h>
+#include <asm/exception.h>
+#include <asm/ipipe.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
IPI_CPU_CRASH_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
- IPI_WAKEUP
+ IPI_WAKEUP,
+#ifdef CONFIG_IPIPE
+ IPI_IPIPE_FIRST,
+#endif /* CONFIG_IPIPE */
};
+#ifdef CONFIG_IPIPE
+#define noipipe_irq_enter() \
+ do { \
+ } while(0)
+#define noipipe_irq_exit() \
+ do { \
+ } while(0)
+#else /* !CONFIG_IPIPE */
+#define noipipe_irq_enter() irq_enter()
+#define noipipe_irq_exit() irq_exit()
+#endif /* !CONFIG_IPIPE */
+
#ifdef CONFIG_ARM64_VHE
/* Whether the boot CPU is running in HYP mode or not*/
return sum;
}
+#ifdef CONFIG_IPIPE
+#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE
+
+unsigned __ipipe_first_ipi;
+EXPORT_SYMBOL_GPL(__ipipe_first_ipi);
+
+static void __ipipe_do_IPI(unsigned virq, void *cookie)
+{
+ enum ipi_msg_type msg = virq - IPIPE_IPI_BASE;
+ handle_IPI(msg, raw_cpu_ptr(&ipipe_percpu.tick_regs));
+}
+
+void __ipipe_ipis_alloc(void)
+{
+ unsigned virq, _virq;
+ unsigned ipi_nr;
+
+ if (__ipipe_first_ipi)
+ return;
+
+ /* __ipipe_first_ipi is 0 here */
+ ipi_nr = IPI_IPIPE_FIRST + IPIPE_LAST_IPI + 1;
+
+ for (virq = IPIPE_IPI_BASE; virq < IPIPE_IPI_BASE + ipi_nr; virq++) {
+ _virq = ipipe_alloc_virq();
+ if (virq != _virq)
+ panic("I-pipe: cannot reserve virq #%d (got #%d)\n",
+ virq, _virq);
+
+ if (virq - IPIPE_IPI_BASE == IPI_IPIPE_FIRST)
+ __ipipe_first_ipi = virq;
+ }
+}
+
+void __ipipe_ipis_request(void)
+{
+ unsigned virq;
+
+ for (virq = IPIPE_IPI_BASE; virq < __ipipe_first_ipi; virq++)
+ ipipe_request_irq(ipipe_root_domain,
+ virq,
+ (ipipe_irq_handler_t)__ipipe_do_IPI,
+ NULL, NULL);
+}
+void ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
+{
+ enum ipi_msg_type msg = ipi - IPIPE_IPI_BASE;
+ smp_cross_call(&cpumask, msg);
+}
+EXPORT_SYMBOL_GPL(ipipe_send_ipi);
+
+ /* hw IRQs off */
+asmlinkage void __exception __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs)
+{
+ int virq = IPIPE_IPI_BASE + svc;
+
+ __ipipe_dispatch_irq(virq, IPIPE_IRQF_NOACK);
+
+ __ipipe_exit_irq(regs);
+}
+
+#endif /* CONFIG_IPIPE */
+
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
break;
case IPI_CALL_FUNC:
- irq_enter();
+ noipipe_irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
+ noipipe_irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
+ noipipe_irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
+ noipipe_irq_exit();
break;
case IPI_CPU_CRASH_STOP:
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
+ noipipe_irq_enter();
+#ifdef CONFIG_IPIPE
+ __ipipe_mach_update_tsc();
+#endif
tick_receive_broadcast();
- irq_exit();
+ noipipe_irq_exit();
break;
#endif
#ifdef CONFIG_IRQ_WORK
case IPI_IRQ_WORK:
- irq_enter();
+ noipipe_irq_enter();
irq_work_run();
- irq_exit();
+ noipipe_irq_exit();
break;
#endif