select RTC_LIB
select SPARSE_IRQ
select SYSCTL_EXCEPTION_TRACE
- select HAVE_CONTEXT_TRACKING
+ select HAVE_CONTEXT_TRACKING if !IPIPE
help
ARM 64-bit (AArch64) Linux support.
+config IPIPE_WANT_PREEMPTIBLE_SWITCH
+ depends on IPIPE
+ def_bool SMP
+
config 64BIT
def_bool y
This option enables support selections for the big.LITTLE
system architecture.
+source kernel/ipipe/Kconfig
source kernel/Kconfig.preempt
config UP_LATE_INIT
msr daifclr, #2
.endm
+ .macro disable_irq_cond
+#ifdef CONFIG_IPIPE
+ msr daifset, #2
+#endif
+ .endm
+
+ .macro enable_irq_cond
+#ifdef CONFIG_IPIPE
+ msr daifclr, #2
+#endif
+ .endm
+
/*
* Save/disable and restore interrupts.
*/
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe.h
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ARM_IPIPE_H
+#define __ARM_IPIPE_H
+
+#include <linux/irqdomain.h>
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/jump_label.h>
+#include <linux/ipipe_trace.h>
+#include <linux/ipipe_debug.h>
+
+#define IPIPE_CORE_RELEASE 8
+
+struct ipipe_domain;
+
+#define IPIPE_TSC_TYPE_NONE 0
+#define IPIPE_TSC_TYPE_FREERUNNING 1
+#define IPIPE_TSC_TYPE_DECREMENTER 2
+#define IPIPE_TSC_TYPE_FREERUNNING_COUNTDOWN 3
+#define IPIPE_TSC_TYPE_FREERUNNING_TWICE 4
+#define IPIPE_TSC_TYPE_FREERUNNING_ARCH 5
+
+/* tscinfo, exported to user-space */
+struct __ipipe_tscinfo {
+ unsigned type;
+ unsigned freq;
+ unsigned long counter_vaddr;
+ union {
+ struct {
+ unsigned long counter_paddr;
+ unsigned long long mask;
+ };
+ struct {
+ unsigned *counter; /* Hw counter physical address */
+ unsigned long long mask; /* Significant bits in the hw counter. */
+ unsigned long long *tsc; /* 64 bits tsc value. */
+ } fr;
+ struct {
+ unsigned *counter; /* Hw counter physical address */
+ unsigned long long mask; /* Significant bits in the hw counter. */
+ unsigned *last_cnt; /* Counter value when updating
+ tsc value. */
+ unsigned long long *tsc; /* 64 bits tsc value. */
+ } dec;
+ } u;
+};
+
+struct ipipe_arch_sysinfo {
+ struct __ipipe_tscinfo tsc;
+};
+
+
+/* arch specific stuff */
+
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info);
+
+static inline void __ipipe_mach_update_tsc(void) {}
+
+static inline notrace unsigned long long __ipipe_mach_get_tsc(void)
+{
+ return arch_counter_get_cntvct();
+}
+
+#define __ipipe_tsc_get() __ipipe_mach_get_tsc()
+void __ipipe_tsc_register(struct __ipipe_tscinfo *info);
+static inline void __ipipe_tsc_update(void) {}
+#ifndef __ipipe_hrclock_freq
+extern unsigned long __ipipe_hrtimer_freq;
+#define __ipipe_hrclock_freq __ipipe_hrtimer_freq
+#endif /* !__ipipe_mach_hrclock_freq */
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+extern void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
+
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+
+#define ipipe_mm_switch_protect(__flags) \
+ do { \
+ (void)(__flags); \
+ } while(0)
+
+#define ipipe_mm_switch_unprotect(__flags) \
+ do { \
+ (void)(__flags); \
+ } while(0)
+
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+
+#define ipipe_mm_switch_protect(__flags) \
+ do { \
+ (__flags) = hard_cond_local_irq_save(); \
+ } while (0)
+
+#define ipipe_mm_switch_unprotect(__flags) \
+ hard_cond_local_irq_restore(__flags)
+
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+
+#define ipipe_get_active_mm() (__this_cpu_read(ipipe_percpu.active_mm))
+
+#define ipipe_read_tsc(t) do { t = __ipipe_tsc_get(); } while(0)
+#define __ipipe_read_timebase() __ipipe_tsc_get()
+
+#define ipipe_tsc2ns(t) \
+({ \
+ unsigned long long delta = (t)*1000; \
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
+ (unsigned long)delta; \
+})
+#define ipipe_tsc2us(t) \
+({ \
+ unsigned long long delta = (t); \
+ do_div(delta, __ipipe_hrclock_freq / 1000000 + 1); \
+ (unsigned long)delta; \
+})
+
+static inline const char *ipipe_clock_name(void)
+{
+ return "ipipe_tsc";
+}
+
+/* Private interface -- Internal use only */
+
+#define __ipipe_enable_irq(irq) enable_irq(irq)
+#define __ipipe_disable_irq(irq) disable_irq(irq)
+
+/* PIC muting */
+struct ipipe_mach_pic_muter {
+ void (*enable_irqdesc)(struct ipipe_domain *ipd, unsigned irq);
+ void (*disable_irqdesc)(struct ipipe_domain *ipd, unsigned irq);
+ void (*mute)(void);
+ void (*unmute)(void);
+};
+
+extern struct ipipe_mach_pic_muter ipipe_pic_muter;
+
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter);
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq);
+
+static inline void ipipe_mute_pic(void)
+{
+ if (ipipe_pic_muter.mute)
+ ipipe_pic_muter.mute();
+}
+
+static inline void ipipe_unmute_pic(void)
+{
+ if (ipipe_pic_muter.unmute)
+ ipipe_pic_muter.unmute();
+}
+
+#define ipipe_notify_root_preemption() do { } while(0)
+
+#ifdef CONFIG_SMP
+void __ipipe_early_core_setup(void);
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd);
+void __ipipe_root_localtimer(unsigned int irq, void *cookie);
+void __ipipe_send_vnmi(void (*fn)(void *), cpumask_t cpumask, void *arg);
+void __ipipe_do_vnmi(unsigned int irq, void *cookie);
+void __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs);
+void __ipipe_ipis_alloc(void);
+void __ipipe_ipis_request(void);
+
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
+{
+ __ipipe_grab_ipi(irq, regs);
+}
+
+#ifdef CONFIG_SMP_ON_UP
+extern struct static_key __ipipe_smp_key;
+#define ipipe_smp_p (static_key_true(&__ipipe_smp_key))
+#endif /* SMP_ON_UP */
+#else /* !CONFIG_SMP */
+#define __ipipe_early_core_setup() do { } while(0)
+#define __ipipe_hook_critical_ipi(ipd) do { } while(0)
+#endif /* !CONFIG_SMP */
+#ifndef __ipipe_mach_init_platform
+#define __ipipe_mach_init_platform() do { } while(0)
+#endif
+
+void __ipipe_enable_pipeline(void);
+
+void __ipipe_do_critical_sync(unsigned irq, void *cookie);
+
+void __ipipe_grab_irq(int irq, struct pt_regs *regs);
+
+void __ipipe_exit_irq(struct pt_regs *regs);
+
+static inline
+int ipipe_handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ unsigned int irq;
+
+ irq = irq_find_mapping(domain, hwirq);
+ __ipipe_grab_irq(irq, regs);
+
+ return 0;
+}
+
+static inline unsigned long __ipipe_ffnz(unsigned long ul)
+{
+ int __r;
+
+ /* zero input is not valid */
+ IPIPE_WARN(ul == 0);
+
+ __asm__ ("rbit\t%0, %1\n"
+ "clz\t%0, %0\n"
+ : "=r" (__r) : "r"(ul) : "cc");
+
+ return __r;
+}
+
+#define __ipipe_syscall_watched_p(p, sc) \
+ (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= __NR_syscalls)
+
+#define __ipipe_root_tick_p(regs) (!arch_irqs_disabled_flags(regs->pstate))
+
+struct task_struct *ipipe_switch_to(struct task_struct *prev,
+ struct task_struct *next);
+
+#else /* !CONFIG_IPIPE */
+
+#define __ipipe_tsc_update() do { } while(0)
+
+#define hard_smp_processor_id() smp_processor_id()
+
+#define ipipe_mm_switch_protect(flags) \
+ do { \
+ (void) (flags); \
+ } while(0)
+
+#define ipipe_mm_switch_unprotect(flags) \
+ do { \
+ (void) (flags); \
+ } while(0)
+
+#ifdef CONFIG_SMP
+static inline void ipipe_handle_multi_ipi(int irq, struct pt_regs *regs)
+{
+ handle_IPI(irq, regs);
+}
+#endif /* CONFIG_SMP */
+
+static inline
+int ipipe_handle_domain_irq(struct irq_domain *domain,
+ unsigned int hwirq, struct pt_regs *regs)
+{
+ return handle_domain_irq(domain, hwirq, regs);
+}
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__ARM_IPIPE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe_base.h
+ *
+ * Copyright (C) 2007 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ASM_ARM_IPIPE_BASE_H
+#define __ASM_ARM_IPIPE_BASE_H
+
+#include <asm/irq.h> /* For NR_IRQS */
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_NR_ROOT_IRQS 1024
+
+#define IPIPE_NR_XIRQS IPIPE_NR_ROOT_IRQS
+
+#ifdef CONFIG_SMP
+
+extern unsigned __ipipe_first_ipi;
+
+#define IPIPE_CRITICAL_IPI __ipipe_first_ipi
+#define IPIPE_HRTIMER_IPI (IPIPE_CRITICAL_IPI + 1)
+#define IPIPE_RESCHEDULE_IPI (IPIPE_CRITICAL_IPI + 2)
+#define IPIPE_SERVICE_VNMI (IPIPE_CRITICAL_IPI + 3)
+
+#define IPIPE_LAST_IPI IPIPE_SERVICE_VNMI
+
+#ifdef CONFIG_IPIPE_LEGACY
+#define hard_smp_processor_id() \
+ ({ \
+ unsigned int cpunum; \
+ __asm__ __volatile__ ("\n" \
+ "1: mrc p15, 0, %0, c0, c0, 5\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r" (cpunum)); \
+ cpunum &= 0xFF; \
+ })
+extern u32 __cpu_logical_map[];
+#define ipipe_processor_id() (__cpu_logical_map[hard_smp_processor_id()])
+
+#else /* !legacy */
+#define hard_smp_processor_id() raw_smp_processor_id()
+
+#ifdef CONFIG_SMP_ON_UP
+unsigned __ipipe_processor_id(void);
+
+#define ipipe_processor_id() \
+ ({ \
+ register unsigned int cpunum __asm__ ("r0"); \
+ register unsigned int r1 __asm__ ("r1"); \
+ register unsigned int r2 __asm__ ("r2"); \
+ register unsigned int r3 __asm__ ("r3"); \
+ register unsigned int ip __asm__ ("ip"); \
+ register unsigned int lr __asm__ ("lr"); \
+ __asm__ __volatile__ ("\n" \
+ "1: bl __ipipe_processor_id\n" \
+ " .pushsection \".alt.smp.init\", \"a\"\n" \
+ " .long 1b\n" \
+ " mov %0, #0\n" \
+ " .popsection" \
+ : "=r"(cpunum), "=r"(r1), "=r"(r2), "=r"(r3), \
+ "=r"(ip), "=r"(lr) \
+ : /* */ : "cc"); \
+ cpunum; \
+ })
+#else /* !SMP_ON_UP */
+#define ipipe_processor_id() raw_smp_processor_id()
+#endif /* !SMP_ON_UP */
+#endif /* !legacy */
+
+#define IPIPE_ARCH_HAVE_VIRQ_IPI
+
+#else /* !CONFIG_SMP */
+#define ipipe_processor_id() (0)
+#endif /* !CONFIG_IPIPE */
+
+/* ARM traps */
+#define IPIPE_TRAP_ACCESS 0 /* Data or instruction access exception */
+#define IPIPE_TRAP_SECTION 1 /* Section fault */
+#define IPIPE_TRAP_DABT 2 /* Generic data abort */
+#define IPIPE_TRAP_UNKNOWN 3 /* Unknown exception */
+#define IPIPE_TRAP_BREAK 4 /* Instruction breakpoint */
+#define IPIPE_TRAP_FPU_ACC 5 /* Floating point access */
+#define IPIPE_TRAP_FPU_EXC 6 /* Floating point exception */
+#define IPIPE_TRAP_UNDEFINSTR 7 /* Undefined instruction */
+#define IPIPE_TRAP_ALIGNMENT 8 /* Unaligned access exception */
+#define IPIPE_TRAP_MAYDAY 9 /* Internal recovery trap */
+#define IPIPE_NR_FAULTS 10
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+
+void ipipe_stall_root(void);
+
+unsigned long ipipe_test_and_stall_root(void);
+
+unsigned long ipipe_test_root(void);
+
+#else /* !CONFIG_SMP */
+
+#include <asm/irqflags.h>
+
+#if __GNUC__ >= 4
+/* Alias to ipipe_root_cpudom_var(status) */
+extern unsigned long __ipipe_root_status;
+#else
+extern unsigned long *const __ipipe_root_status_addr;
+#define __ipipe_root_status (*__ipipe_root_status_addr)
+#endif
+
+static inline void ipipe_stall_root(void)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+ __ipipe_root_status |= 1;
+ hard_local_irq_restore(flags);
+}
+
+static inline unsigned ipipe_test_root(void)
+{
+ return __ipipe_root_status & 1;
+}
+
+static inline unsigned ipipe_test_and_stall_root(void)
+{
+ unsigned long flags, res;
+
+ flags = hard_local_irq_save();
+ res = __ipipe_root_status;
+ __ipipe_root_status = res | 1;
+ hard_local_irq_restore(flags);
+
+ return res & 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* __ASM_ARM_IPIPE_BASE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * arch/arm/include/asm/ipipe_hwirq.h
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _ASM_ARM_IPIPE_HWIRQ_H
+#define _ASM_ARM_IPIPE_HWIRQ_H
+
+#define hard_local_irq_restore_notrace(x) \
+ __asm__ __volatile__( \
+ "msr daif, %0" \
+ : \
+ : "r" (x) \
+ : "memory", "cc")
+
+static inline void hard_local_irq_disable_notrace(void)
+{
+ __asm__ __volatile__("msr daifset, #2" : : : "memory", "cc");
+}
+
+static inline void hard_local_irq_enable_notrace(void)
+{
+ __asm__ __volatile__("msr daifclr, #2" : : : "memory", "cc");
+}
+
+static inline void hard_local_fiq_disable_notrace(void)
+{
+ __asm__ __volatile__("msr daifset, #1" : : : "memory", "cc");
+}
+
+static inline void hard_local_fiq_enable_notrace(void)
+{
+ __asm__ __volatile__("msr daifclr, #1" : : : "memory", "cc");
+}
+
+static inline unsigned long hard_local_irq_save_notrace(void)
+{
+ unsigned long res;
+ __asm__ __volatile__(
+ "mrs %0, daif\n"
+ "msr daifset, #2"
+ : "=r" (res) : : "memory", "cc");
+ return res;
+}
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_trace.h>
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return (int)((flags) & PSR_I_BIT);
+}
+
+static inline unsigned long hard_local_save_flags(void)
+{
+ unsigned long flags;
+ __asm__ __volatile__(
+ "mrs %0, daif"
+ : "=r" (flags) : : "memory", "cc");
+ return flags;
+}
+
+#define hard_irqs_disabled_flags(flags) arch_irqs_disabled_flags(flags)
+
+static inline int hard_irqs_disabled(void)
+{
+ return hard_irqs_disabled_flags(hard_local_save_flags());
+}
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+
+static inline void hard_local_irq_disable(void)
+{
+ if (!hard_irqs_disabled()) {
+ hard_local_irq_disable_notrace();
+ ipipe_trace_begin(0x80000000);
+ }
+}
+
+static inline void hard_local_irq_enable(void)
+{
+ if (hard_irqs_disabled()) {
+ ipipe_trace_end(0x80000000);
+ hard_local_irq_enable_notrace();
+ }
+}
+
+static inline unsigned long hard_local_irq_save(void)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save_notrace();
+ if (!arch_irqs_disabled_flags(flags))
+ ipipe_trace_begin(0x80000001);
+
+ return flags;
+}
+
+static inline void hard_local_irq_restore(unsigned long x)
+{
+ if (!arch_irqs_disabled_flags(x))
+ ipipe_trace_end(0x80000001);
+
+ hard_local_irq_restore_notrace(x);
+}
+
+#else /* !CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define hard_local_irq_disable hard_local_irq_disable_notrace
+#define hard_local_irq_enable hard_local_irq_enable_notrace
+#define hard_local_irq_save hard_local_irq_save_notrace
+#define hard_local_irq_restore hard_local_irq_restore_notrace
+
+#endif /* CONFIG_IPIPE_TRACE_IRQSOFF */
+
+#define arch_local_irq_disable() \
+ ({ \
+ ipipe_stall_root(); \
+ barrier(); \
+ })
+
+#define arch_local_irq_enable() \
+ do { \
+ barrier(); \
+ ipipe_unstall_root(); \
+ } while (0)
+
+#define local_fiq_enable() hard_local_fiq_enable_notrace()
+
+#define local_fiq_disable() hard_local_fiq_disable_notrace()
+
+#define arch_local_irq_restore(flags) \
+ do { \
+ if (!arch_irqs_disabled_flags(flags)) \
+ arch_local_irq_enable(); \
+ } while (0)
+
+#define arch_local_irq_save() \
+ ({ \
+ unsigned long _flags; \
+ _flags = ipipe_test_and_stall_root() << 7; \
+ barrier(); \
+ _flags; \
+ })
+
+#define arch_local_save_flags() \
+ ({ \
+ unsigned long _flags; \
+ _flags = ipipe_test_root() << 7; \
+ barrier(); \
+ _flags; \
+ })
+
+#define arch_irqs_disabled() ipipe_test_root()
+#define hard_irq_disable() hard_local_irq_disable()
+
+static inline unsigned long arch_mangle_irq_bits(int virt, unsigned long real)
+{
+ /* Merge virtual and real interrupt mask bits into a single
+ 32bit word. */
+ return (real & ~(1L << 8)) | ((virt != 0) << 8);
+}
+
+static inline int arch_demangle_irq_bits(unsigned long *x)
+{
+ int virt = (*x & (1 << 8)) != 0;
+ *x &= ~(1L << 8);
+ return virt;
+}
+
+#else /* !CONFIG_IPIPE */
+
+#define hard_local_irq_save() arch_local_irq_save()
+#define hard_local_irq_restore(x) arch_local_irq_restore(x)
+#define hard_local_irq_enable() arch_local_irq_enable()
+#define hard_local_irq_disable() arch_local_irq_disable()
+#define hard_irqs_disabled() irqs_disabled()
+
+#define hard_cond_local_irq_enable() do { } while(0)
+#define hard_cond_local_irq_disable() do { } while(0)
+#define hard_cond_local_irq_save() 0
+#define hard_cond_local_irq_restore(flags) do { (void)(flags); } while(0)
+
+#endif /* !CONFIG_IPIPE */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
+#define hard_smp_local_irq_save() hard_local_irq_save()
+#define hard_smp_local_irq_restore(flags) hard_local_irq_restore(flags)
+#else /* !CONFIG_SMP */
+#define hard_smp_local_irq_save() 0
+#define hard_smp_local_irq_restore(flags) do { (void)(flags); } while(0)
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_ARM_IPIPE_HWIRQ_H */
#include <asm/ptrace.h>
+#include <asm/ipipe_hwirq.h>
+
+#ifndef CONFIG_IPIPE
+
/*
* CPU interrupt mask handling.
*/
#define local_fiq_enable() asm("msr daifclr, #1" : : : "memory")
#define local_fiq_disable() asm("msr daifset, #1" : : : "memory")
-#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
-#define local_async_disable() asm("msr daifset, #4" : : : "memory")
/*
* Save the current interrupt enable state.
return flags & PSR_I_BIT;
}
+#endif /* CONFIG_IPIPE */
/*
* save and restore debug state
*/
#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
+#define local_async_enable() asm("msr daifclr, #4" : : : "memory")
+#define local_async_disable() asm("msr daifset, #4" : : : "memory")
+
#endif
#endif
typedef struct {
unsigned int id;
- raw_spinlock_t id_lock;
+ ipipe_spinlock_t id_lock;
void *vdso;
} mm_context_t;
#define INIT_MM_CONTEXT(name) \
- .context.id_lock = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
+ .context.id_lock = IPIPE_SPIN_LOCK_UNLOCKED,
#define ASID(mm) ((mm)->context.id & 0xffff)
static inline void switch_new_context(struct mm_struct *mm)
{
- unsigned long flags;
-
__new_context(mm);
- local_irq_save(flags);
cpu_switch_mm(mm->pgd, mm);
- local_irq_restore(flags);
}
-static inline void check_and_switch_context(struct mm_struct *mm,
- struct task_struct *tsk)
+static inline int
+check_and_switch_context(struct mm_struct *mm,
+ struct task_struct *tsk, bool may_defer)
{
/*
* Required during context switch to avoid speculative page table
*/
cpu_set_reserved_ttbr0();
- if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS))
+ if (!((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
/*
* The ASID is from the current generation, just switch to the
* new pgd. This condition is only true for calls from
* context_switch() and interrupts are already disabled.
*/
cpu_switch_mm(mm->pgd, mm);
- else if (irqs_disabled())
+ } else if (may_defer && irqs_disabled()) {
/*
* Defer the new ASID allocation until after the context
* switch critical region since __new_context() cannot be
* called with interrupts disabled.
*/
set_ti_thread_flag(task_thread_info(tsk), TIF_SWITCH_MM);
- else
+ return -EAGAIN;
+ } else {
/*
* That is a direct call to switch_mm() or activate_mm() with
* interrupts enabled and a new context.
*/
switch_new_context(mm);
+ }
+
+ return 0;
}
+#ifdef CONFIG_IPIPE
+extern void deferred_switch_mm(struct mm_struct *mm);
+#else /* !I-pipe */
+static inline void deferred_switch_mm(struct mm_struct *next)
+{
+ cpu_switch_mm(next->pgd, next);
+}
+#endif /* !I-pipe */
+
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
#define destroy_context(mm) do { } while(0)
finish_arch_post_lock_switch
static inline void finish_arch_post_lock_switch(void)
{
+ preempt_disable();
if (test_and_clear_thread_flag(TIF_SWITCH_MM)) {
struct mm_struct *mm = current->mm;
unsigned long flags;
__new_context(mm);
- local_irq_save(flags);
- cpu_switch_mm(mm->pgd, mm);
- local_irq_restore(flags);
+ ipipe_mm_switch_protect(flags);
+ deferred_switch_mm(mm);
+ ipipe_mm_switch_unprotect(flags);
}
+ preempt_enable();
}
/*
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
-static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
- struct task_struct *tsk)
+static inline int
+__do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk, bool may_defer)
{
- unsigned int cpu = smp_processor_id();
+ const unsigned int cpu = ipipe_processor_id();
+ int ret = 0;
/*
* init_mm.pgd does not contain any user mappings and it is always
*/
if (next == &init_mm) {
cpu_set_reserved_ttbr0();
- return;
+ return 0;
}
- if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next)
- check_and_switch_context(next, tsk);
+ if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
+ ret = check_and_switch_context(next, tsk, may_defer);
+#ifdef CONFIG_IPIPE
+ if (ret < 0)
+ cpumask_clear_cpu(cpu, mm_cpumask(next));
+#endif /* CONFIG_IPIPE */
+ }
+ return ret;
+}
+
+#if defined(CONFIG_IPIPE) && defined(CONFIG_MMU)
+extern void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk);
+#else /* !I-pipe || !MMU */
+#define __switch_mm_inner(prev, next, tsk) __do_switch_mm(prev, next, tsk, true)
+#endif /* !I-pipe || !MMU */
+
+static inline void
+ipipe_switch_mm_head(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ __do_switch_mm(prev, next, tsk, false);
+}
+
+static inline void
+__switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ __switch_mm_inner(prev, next, tsk);
+}
+
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ unsigned long flags;
+ ipipe_mm_switch_protect(flags);
+ __switch_mm(prev, next, tsk);
+ ipipe_mm_switch_unprotect(flags);
}
#define deactivate_mm(tsk,mm) do { } while (0)
-#define activate_mm(prev,next) switch_mm(prev, next, NULL)
+
+#define activate_mm(prev,next) __switch_mm(prev, next, NULL)
#endif
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_IPIPE)
static inline void set_my_cpu_offset(unsigned long off)
{
#else /* !CONFIG_SMP */
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
+#define __my_cpu_offset (per_cpu_offset(ipipe_processor_id()))
+#endif /* SMP && IPIPE */
+
#define set_my_cpu_offset(x) do { } while (0)
#endif /* CONFIG_SMP */
#define cpu_switch_mm(pgd,mm) \
do { \
+ unsigned long __flags; \
BUG_ON(pgd == swapper_pg_dir); \
+ __flags = hard_local_irq_save(); \
cpu_do_switch_mm(virt_to_phys(pgd),mm); \
+ hard_local_irq_restore(__flags); \
} while (0)
#endif /* __ASSEMBLY__ */
/*
* Logical CPU mapping.
*/
-extern u64 __cpu_logical_map[NR_CPUS];
+extern u64 __cpu_logical_map[];
#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
/*
* Retrieve logical cpu index corresponding to a given MPIDR[23:0]
struct task_struct;
#include <asm/types.h>
+#include <ipipe/thread_info.h>
typedef unsigned long mm_segment_t;
struct task_struct *task; /* main task structure */
int preempt_count; /* 0 => preemptable, <0 => bug */
int cpu; /* cpu */
+#ifdef CONFIG_IPIPE
+ unsigned long ipipe_flags;
+#endif
+ struct ipipe_threadinfo ipipe_data;
};
#define INIT_THREAD_INFO(tsk) \
#define TIF_SINGLESTEP 21
#define TIF_32BIT 22 /* 32bit process */
#define TIF_SWITCH_MM 23 /* deferred switch_mm */
+#ifdef CONFIG_IPIPE
+#define TIF_MMSWITCH_INT 25
+#endif /* CONFIG_IPIPE */
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
#define _TIF_32BIT (1 << TIF_32BIT)
+#ifdef CONFIG_IPIPE
+#define _TIF_MMSWITCH_INT (1 << TIF_MMSWITCH_INT)
+#endif /* CONFIG_IPIPE */
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
_TIF_NOHZ)
+/* ti->ipipe_flags */
+#define TIP_MAYDAY 0 /* MAYDAY call is pending */
+#define TIP_NOTIFY 1 /* Notify head domain about kernel events */
+#define TIP_HEAD 2 /* Runs in head domain */
+
+#define _TIP_MAYDAY (1 << TIP_MAYDAY)
+#define _TIP_NOTIFY (1 << TIP_NOTIFY)
+#define _TIP_HEAD (1 << TIP_HEAD)
+
#endif /* __KERNEL__ */
#endif /* __ASM_THREAD_INFO_H */
#include <linux/thread_info.h>
#include <asm/ptrace.h>
+#include <linux/ipipe.h>
#include <asm/errno.h>
#include <asm/memory.h>
#include <asm/compiler.h>
#define get_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
+ __ipipe_uaccess_might_fault(); \
access_ok(VERIFY_READ, __p, sizeof(*__p)) ? \
__get_user((x), __p) : \
((x) = 0, -EFAULT); \
#define put_user(x, ptr) \
({ \
__typeof__(*(ptr)) __user *__p = (ptr); \
- might_fault(); \
+ __ipipe_uaccess_might_fault(); \
access_ok(VERIFY_WRITE, __p, sizeof(*__p)) ? \
__put_user((x), __p) : \
-EFAULT; \
#endif
#define NR_syscalls (__NR_syscalls)
+
+#define __ARM_ipipe_syscall 0x10000000
obj-y += $(arm64-obj-y) vdso/
obj-m += $(arm64-obj-m)
+obj-$(CONFIG_IPIPE) += ipipe.o
head-y := head.o
extra-y := $(head-y) vmlinux.lds
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+#ifdef CONFIG_IPIPE
+ DEFINE(TI_IPIPE, offsetof(struct thread_info, ipipe_flags));
+#endif
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
#endif
.endm
+#ifdef CONFIG_IPIPE
+#define PREEMPT_SCHEDULE_IRQ __ipipe_preempt_schedule_irq
+#else /* !CONFIG_IPIPE */
+#define ret_from_exception ret_to_user
+#define PREEMPT_SCHEDULE_IRQ preempt_schedule_irq
+#endif /* CONFIG_IPIPE */
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_IPIPE)
+#define TRACE_IRQSON bl trace_hardirqs_on
+#define TRACE_IRQSOFF bl trace_hardirqs_off
+#else
+#define TRACE_IRQSON
+#define TRACE_IRQSOFF
+#endif
+
/*
* Bad Abort numbers
*-----------------
ldr x1, [x1, #:lo12:handle_arch_irq]
mov x0, sp
blr x1
+#ifdef CONFIG_IPIPE
+ bl __ipipe_check_root_interruptible
+ cmp w0, #1
+#endif /* CONFIG_IPIPE */
.endm
.text
/*
* Undefined instruction
*/
+#ifdef CONFIG_IPIPE
+ mov x0, #7 //@ x0 = IPIPE_TRAP_UNDEFINSTR
+ mov x1, sp //@ x1 = ®s
+ bl __ipipe_notify_trap //@ branch to trap handler
+ cmp w0, #0
+ bne ipipe_fast_svc_irq_exit
+#endif /* CONFIG_IPIPE */
enable_dbg
mov x0, sp
b do_undefinstr
el1_irq:
kernel_entry 1
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ TRACE_IRQSOFF
irq_handler
+#ifdef CONFIG_IPIPE
+ bne ipipe_fast_svc_irq_exit
+#endif
#ifdef CONFIG_PREEMPT
get_thread_info tsk
ldr w24, [tsk, #TI_PREEMPT] // get preempt count
bl el1_preempt
1:
#endif
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
+#ifdef CONFIG_IPIPE
+ipipe_fast_svc_irq_exit:
#endif
+ TRACE_IRQSON
kernel_exit 1
ENDPROC(el1_irq)
#ifdef CONFIG_PREEMPT
el1_preempt:
mov x24, lr
-1: bl preempt_schedule_irq // irq en/disable is done inside
+1: bl PREEMPT_SCHEDULE_IRQ
ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
ret x24
mov x1, x25
mov x2, sp
bl do_mem_abort
- b ret_to_user
+ b ret_from_exception
el0_ia:
/*
* Instruction abort handling
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
mov x2, sp
bl do_mem_abort
- b ret_to_user
+ b ret_from_exception
el0_fpsimd_acc:
/*
* Floating Point or Advanced SIMD access
mov x0, x25
mov x1, sp
bl do_fpsimd_acc
- b ret_to_user
+ b ret_from_exception
el0_fpsimd_exc:
/*
* Floating Point or Advanced SIMD exception
mov x0, x25
mov x1, sp
bl do_fpsimd_exc
- b ret_to_user
+ b ret_from_exception
el0_sp_pc:
/*
* Stack or PC alignment exception handling
mov x1, x25
mov x2, sp
bl do_sp_pc_abort
- b ret_to_user
+ b ret_from_exception
el0_undef:
/*
* Undefined instruction
*/
+#ifdef CONFIG_IPIPE
+ mov x0, #7 // x0 = IPIPE_TRAP_UNDEFINSTR
+ mov x1, sp // x1 = ®s
+ bl __ipipe_notify_trap // branch to trap handler
+ cmp w0, #0
+ bne ret_from_exception
+#endif /* CONFIG_IPIPE */
// enable interrupts before calling the main handler
enable_dbg_and_irq
ct_user_exit
mov x0, sp
bl do_undefinstr
- b ret_to_user
+ b ret_from_exception
el0_dbg:
/*
* Debug exception handling
mov x1, #BAD_SYNC
mrs x2, esr_el1
bl bad_mode
- b ret_to_user
+ b ret_from_exception
ENDPROC(el0_sync)
.align 6
kernel_entry 0
el0_irq_naked:
enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_off
-#endif
+ TRACE_IRQSOFF
ct_user_exit
irq_handler
-
-#ifdef CONFIG_TRACE_IRQFLAGS
- bl trace_hardirqs_on
-#endif
+#ifdef CONFIG_IPIPE
+ b.eq normal_irq_ret
+ /* Fast IRQ exit, root domain stalled or not current. */
+ kernel_exit 0, ret = 0
+normal_irq_ret:
+#endif /* CONFIG_IPIPE */
+ TRACE_IRQSON
b ret_to_user
ENDPROC(el0_irq)
ret
ENDPROC(cpu_switch_to)
+#ifdef CONFIG_IPIPE
+ret_from_exception:
+ disable_irq
+ ldr x0, [tsk, #TI_IPIPE]
+ tst x0, #_TIP_HEAD
+ b.eq ret_to_user_noirq
+ kernel_exit 0, ret = 0
+#endif /* CONFIG_IPIPE */
/*
* This is the fast syscall return path. We do as little as possible here,
* and this includes saving x0 back into the kernel stack.
bl do_notify_resume
b ret_to_user
work_resched:
+ enable_irq_cond
bl schedule
/*
*/
ret_to_user:
disable_irq // disable interrupts
+ret_to_user_noirq:
ldr x1, [tsk, #TI_FLAGS]
and x2, x1, #_TIF_WORK_MASK
cbnz x2, work_pending
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
+ enable_irq_cond
bl schedule_tail
cbz x19, 1f // not a kernel thread
mov x0, x20
el0_svc_naked: // compat entry point
stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number
enable_dbg_and_irq
+
+#ifdef CONFIG_IPIPE
+ ldr x16, [tsk, #TI_IPIPE]
+ tst scno, __ARM_ipipe_syscall
+ b.eq fastcall_bypass
+ tst x16, #_TIP_HEAD
+ b.eq fastcall_bypass
+ mov x0, sp
+ bl ipipe_fastcall_hook
+ cmp w0, #0
+ b.lt no_fastcall
+ ldr x16, [tsk, #TI_IPIPE]
+ tst x16, #_TIP_HEAD
+ b.ne fastcall_exit
+ bl __ipipe_root_sync
+fastcall_tail:
+ ldr x0, [sp, #S_X0]
+ b ret_fast_syscall
+fastcall_exit:
+ tst x16, #_TIP_MAYDAY
+ b.eq fastcall_notail
+ mov x0, sp
+ bl __ipipe_call_mayday
+fastcall_notail:
+ ldr x0, [sp, #S_X0]
+ disable_irq
+ ldr x1, [tsk, #TI_FLAGS]
+ enable_step_tsk x1, x2
+ kernel_exit 0, ret = 1
+no_fastcall:
+ ldr x16, [tsk, #TI_IPIPE]
+fastcall_bypass:
+ tst x16, #_TIP_NOTIFY
+ b.ne syscall_pipeline
+ tst scno, __ARM_ipipe_syscall
+ b.eq regular_syscall
+syscall_pipeline:
+ mov x0, sp
+ bl __ipipe_notify_syscall
+ ldr x16, [tsk, #TI_IPIPE]
+ tst x16, #_TIP_HEAD
+ b.ne fastcall_notail
+ cmp w0, #0
+ b.ne fastcall_tail
+regular_syscall:
+ ldp x0, x1, [sp, #S_X0]
+ ldp x2, x3, [sp, #S_X2]
+ ldp x4, x5, [sp, #S_X4]
+ ldp x6, x7, [sp, #S_X6]
+#endif /* CONFIG_IPIPE */
+
ct_user_exit 1
ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks
mov x0, sp
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)
+
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+ .text
+ .align 3
+ .type mcount %function
+ .global mcount
+mcount:
+ ret // just return
+ENDPROC(mcount)
+#endif
*/
void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
{
+ if (__ipipe_report_trap(IPIPE_TRAP_FPU_ACC, regs))
+ return;
+
/* TODO: implement lazy context saving/restoring */
WARN_ON(1);
}
siginfo_t info;
unsigned int si_code = 0;
+ if (__ipipe_report_trap(IPIPE_TRAP_FPU_EXC, regs))
+ return;
+
if (esr & FPEXC_IOF)
si_code = FPE_FLTINV;
else if (esr & FPEXC_DZF)
void fpsimd_thread_switch(struct task_struct *next)
{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
/*
* Save the current FPSIMD state to memory, but only if whatever is in
* the registers is in fact the most recent userland FPSIMD state of
set_ti_thread_flag(task_thread_info(next),
TIF_FOREIGN_FPSTATE);
}
+ hard_local_irq_restore(flags);
}
void fpsimd_flush_thread(void)
*/
void fpsimd_preserve_current_state(void)
{
- preempt_disable();
+ unsigned long flags;
+
+ flags = hard_preempt_disable();
if (!test_thread_flag(TIF_FOREIGN_FPSTATE))
fpsimd_save_state(¤t->thread.fpsimd_state);
- preempt_enable();
+ hard_preempt_enable(flags);
}
/*
*/
void fpsimd_restore_current_state(void)
{
- preempt_disable();
+ unsigned long flags;
+
+ flags = hard_preempt_disable();
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = ¤t->thread.fpsimd_state;
this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
- preempt_enable();
+ hard_preempt_enable(flags);
}
/*
*/
void fpsimd_update_current_state(struct fpsimd_state *state)
{
- preempt_disable();
+ unsigned long flags;
+
+ flags = hard_preempt_disable();
fpsimd_load_state(state);
if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
struct fpsimd_state *st = ¤t->thread.fpsimd_state;
this_cpu_write(fpsimd_last_state, st);
st->cpu = smp_processor_id();
}
- preempt_enable();
+ hard_preempt_enable(flags);
}
/*
--- /dev/null
+/* -*- linux-c -*-
+ * linux/arch/arm64/kernel/ipipe.c
+ *
+ * Copyright (C) 2002-2005 Philippe Gerum.
+ * Copyright (C) 2004 Wolfgang Grandegger (Adeos/arm port over 2.4).
+ * Copyright (C) 2005 Heikki Lindholm (PowerPC 970 fixes).
+ * Copyright (C) 2005 Stelian Pop.
+ * Copyright (C) 2006-2008 Gilles Chanteperdrix.
+ * Copyright (C) 2010 Philippe Gerum (SMP port).
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-dependent I-PIPE support for ARM.
+ */
+
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kallsyms.h>
+#include <linux/kprobes.h>
+#include <linux/ipipe_trace.h>
+#include <linux/irq.h>
+#include <linux/irqnr.h>
+#include <linux/prefetch.h>
+#include <linux/cpu.h>
+#include <linux/ipipe_domain.h>
+#include <linux/ipipe_tickdev.h>
+#include <asm/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/io.h>
+#include <asm/unistd.h>
+#include <asm/mmu_context.h>
+#include <asm/exception.h>
+#include <asm/arch_timer.h>
+
+static void __ipipe_do_IRQ(unsigned irq, void *cookie);
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+void (*__ipipe_mach_hrtimer_debug)(unsigned irq);
+#endif
+
+#ifdef CONFIG_SMP
+
+struct __ipipe_vnmidata {
+ void (*fn)(void *);
+ void *arg;
+ cpumask_t cpumask;
+};
+
+static struct __ipipe_vnmislot {
+ ipipe_spinlock_t lock;
+ struct __ipipe_vnmidata *data;
+ ipipe_rwlock_t data_lock;
+} __ipipe_vnmi __cacheline_aligned_in_smp = {
+ .lock = IPIPE_SPIN_LOCK_UNLOCKED,
+ .data = NULL,
+ .data_lock = IPIPE_RW_LOCK_UNLOCKED,
+};
+
+void __ipipe_early_core_setup(void)
+{
+ __ipipe_mach_init_platform();
+}
+
+void ipipe_stall_root(void)
+{
+ unsigned long flags;
+
+ ipipe_root_only();
+ flags = hard_smp_local_irq_save();
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(ipipe_stall_root);
+
+unsigned long ipipe_test_and_stall_root(void)
+{
+ unsigned long flags;
+ int x;
+
+ ipipe_root_only();
+ flags = hard_smp_local_irq_save();
+ x = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+
+ return x;
+}
+EXPORT_SYMBOL_GPL(ipipe_test_and_stall_root);
+
+unsigned long ipipe_test_root(void)
+{
+ unsigned long flags;
+ int x;
+
+ flags = hard_smp_local_irq_save();
+ x = test_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ hard_smp_local_irq_restore(flags);
+
+ return x;
+}
+EXPORT_SYMBOL_GPL(ipipe_test_root);
+
+void __ipipe_do_vnmi(unsigned int irq, void *cookie)
+{
+ int cpu = ipipe_processor_id();
+ struct __ipipe_vnmidata *data;
+
+ read_lock(&__ipipe_vnmi.data_lock);
+
+ data = __ipipe_vnmi.data;
+ if (likely(data && cpumask_test_cpu(cpu, &data->cpumask))) {
+ data->fn(data->arg);
+ cpumask_clear_cpu(cpu, &data->cpumask);
+ }
+
+ read_unlock(&__ipipe_vnmi.data_lock);
+}
+
+static inline void
+hook_internal_ipi(struct ipipe_domain *ipd, int virq,
+ void (*handler)(unsigned int irq, void *cookie))
+{
+ ipd->irqs[virq].ackfn = NULL;
+ ipd->irqs[virq].handler = handler;
+ ipd->irqs[virq].cookie = NULL;
+ /* Immediately handle in the current domain but *never* pass */
+ ipd->irqs[virq].control = IPIPE_HANDLE_MASK|IPIPE_STICKY_MASK;
+}
+
+void __ipipe_hook_critical_ipi(struct ipipe_domain *ipd)
+{
+ __ipipe_ipis_alloc();
+ hook_internal_ipi(ipd, IPIPE_CRITICAL_IPI, __ipipe_do_critical_sync);
+ hook_internal_ipi(ipd, IPIPE_SERVICE_VNMI, __ipipe_do_vnmi);
+}
+
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask)
+{
+ if (ipipe_virtual_irq_p(irq) ||
+ irq_get_chip(irq)->irq_set_affinity == NULL)
+ return;
+
+ cpumask_and(&cpumask, &cpumask, cpu_online_mask);
+ if (WARN_ON_ONCE(cpumask_empty(&cpumask)))
+ return;
+
+ irq_get_chip(irq)->irq_set_affinity(irq_get_irq_data(irq), &cpumask, true);
+}
+EXPORT_SYMBOL_GPL(ipipe_set_irq_affinity);
+
+void __ipipe_send_vnmi(void (*fn)(void *), cpumask_t cpumask, void *arg)
+{
+ struct __ipipe_vnmidata data;
+ unsigned long flags;
+ int cpu;
+
+ data.fn = fn;
+ data.arg = arg;
+ data.cpumask = cpumask;
+
+ while (!spin_trylock_irqsave(&__ipipe_vnmi.lock, flags)) {
+ if (hard_irqs_disabled())
+ __ipipe_do_vnmi(IPIPE_SERVICE_VNMI, NULL);
+ cpu_relax();
+ }
+
+ cpu = ipipe_processor_id();
+ cpumask_clear_cpu(cpu, &data.cpumask);
+ if (cpumask_empty(&data.cpumask)) {
+ spin_unlock_irqrestore(&__ipipe_vnmi.lock, flags);
+ return;
+ }
+
+ write_lock(&__ipipe_vnmi.data_lock);
+ __ipipe_vnmi.data = &data;
+ write_unlock(&__ipipe_vnmi.data_lock);
+
+ ipipe_send_ipi(IPIPE_SERVICE_VNMI, data.cpumask);
+ while (!cpumask_empty(&data.cpumask))
+ cpu_relax();
+
+ write_lock(&__ipipe_vnmi.data_lock);
+ __ipipe_vnmi.data = NULL;
+ write_unlock(&__ipipe_vnmi.data_lock);
+
+ spin_unlock_irqrestore(&__ipipe_vnmi.lock, flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_send_vnmi);
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_SMP_ON_UP
+struct static_key __ipipe_smp_key = STATIC_KEY_INIT_TRUE;
+
+unsigned __ipipe_processor_id(void)
+{
+ return raw_smp_processor_id();
+}
+EXPORT_SYMBOL_GPL(__ipipe_processor_id);
+
+static int ipipe_disable_smp(void)
+{
+ if (num_online_cpus() == 1) {
+ unsigned long flags;
+
+ printk("I-pipe: disabling SMP code\n");
+
+ flags = hard_local_irq_save();
+ static_key_slow_dec(&__ipipe_smp_key);
+ hard_local_irq_restore(flags);
+ }
+ return 0;
+}
+arch_initcall(ipipe_disable_smp);
+#endif /* SMP_ON_UP */
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *info)
+{
+ info->sys_nr_cpus = num_online_cpus();
+ info->sys_cpu_freq = __ipipe_hrclock_freq;
+ info->sys_hrtimer_irq = per_cpu(ipipe_percpu.hrtimer_irq, 0);
+ info->sys_hrtimer_freq = __ipipe_hrtimer_freq;
+ info->sys_hrclock_freq = __ipipe_hrclock_freq;
+ __ipipe_mach_get_tscinfo(&info->arch.tsc);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_get_sysinfo);
+
+struct ipipe_mach_pic_muter ipipe_pic_muter;
+EXPORT_SYMBOL_GPL(ipipe_pic_muter);
+
+void ipipe_pic_muter_register(struct ipipe_mach_pic_muter *muter)
+{
+ ipipe_pic_muter = *muter;
+}
+
+void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ /* With sparse IRQs, some irqs may not have a descriptor */
+ if (irq_to_desc(irq) == NULL)
+ return;
+
+ if (ipipe_pic_muter.enable_irqdesc)
+ ipipe_pic_muter.enable_irqdesc(ipd, irq);
+}
+EXPORT_SYMBOL_GPL(__ipipe_enable_irqdesc);
+
+void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
+{
+ if (ipipe_pic_muter.disable_irqdesc)
+ ipipe_pic_muter.disable_irqdesc(ipd, irq);
+}
+EXPORT_SYMBOL_GPL(__ipipe_disable_irqdesc);
+
+/*
+ * __ipipe_enable_pipeline() -- We are running on the boot CPU, hw
+ * interrupts are off, and secondary CPUs are still lost in space.
+ */
+void __ipipe_enable_pipeline(void)
+{
+ unsigned long flags;
+ unsigned int irq;
+
+ flags = ipipe_critical_enter(NULL);
+
+ /* virtualize all interrupts from the root domain. */
+ for (irq = 0; irq < IPIPE_NR_ROOT_IRQS; irq++)
+ ipipe_request_irq(ipipe_root_domain,
+ irq,
+ (ipipe_irq_handler_t)__ipipe_do_IRQ,
+ NULL, NULL);
+
+#ifdef CONFIG_SMP
+ __ipipe_ipis_request();
+#endif /* CONFIG_SMP */
+
+ ipipe_critical_exit(flags);
+}
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+unsigned asmlinkage __ipipe_bugon_irqs_enabled(unsigned x)
+{
+ BUG_ON(!hard_irqs_disabled());
+ return x; /* Preserve r0 */
+}
+#endif
+
+asmlinkage int __ipipe_check_root_interruptible(void)
+{
+ return __ipipe_root_p && !irqs_disabled();
+}
+
+void __ipipe_exit_irq(struct pt_regs *regs)
+{
+ /*
+ * Testing for user_regs() eliminates foreign stack contexts,
+ * including from legacy domains which did not set the foreign
+ * stack bit (foreign stacks are always kernel-based).
+ */
+ if (user_mode(regs) &&
+ ipipe_test_thread_flag(TIP_MAYDAY)) {
+ /*
+ * MAYDAY is never raised under normal circumstances,
+ * so prefer test then maybe clear over
+ * test_and_clear.
+ */
+ ipipe_clear_thread_flag(TIP_MAYDAY);
+ __ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs);
+ }
+}
+
+/* hw irqs off */
+asmlinkage void __exception __ipipe_grab_irq(int irq, struct pt_regs *regs)
+{
+ struct ipipe_percpu_data *p = __ipipe_raw_cpu_ptr(&ipipe_percpu);
+
+ ipipe_trace_irq_entry(irq);
+
+ if (p->hrtimer_irq == -1)
+ goto copy_regs;
+
+ if (irq == p->hrtimer_irq) {
+ /*
+ * Given our deferred dispatching model for regular IRQs, we
+ * only record CPU regs for the last timer interrupt, so that
+ * the timer handler charges CPU times properly. It is assumed
+ * that other interrupt handlers don't actually care for such
+ * information.
+ */
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+ if (__ipipe_mach_hrtimer_debug)
+ __ipipe_mach_hrtimer_debug(irq);
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL */
+ copy_regs:
+ p->tick_regs.pstate =
+ (p->curr == &p->root
+ ? regs->pstate
+ : regs->pstate | PSR_I_BIT);
+ p->tick_regs.pc = regs->pc;
+ }
+
+ __ipipe_dispatch_irq(irq, 0);
+
+ ipipe_trace_irq_exit(irq);
+
+ __ipipe_exit_irq(regs);
+}
+
+static void __ipipe_do_IRQ(unsigned irq, void *cookie)
+{
+ struct pt_regs *regs = raw_cpu_ptr(&ipipe_percpu.tick_regs);
+ __handle_domain_irq(NULL, irq, false, regs);
+}
+
+#ifdef CONFIG_MMU
+void __switch_mm_inner(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ struct mm_struct ** const active_mm =
+ raw_cpu_ptr(&ipipe_percpu.active_mm);
+ int ret;
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+ struct thread_info *const tip = current_thread_info();
+ unsigned long flags;
+
+ prev = *active_mm;
+ clear_bit(TIF_MMSWITCH_INT, &tip->flags);
+ barrier();
+ *active_mm = NULL;
+ barrier();
+
+ for (;;) {
+ ret = __do_switch_mm(prev, next, tsk, true);
+ /*
+ * Reading thread_info flags and setting active_mm
+ * must be done atomically.
+ */
+ flags = hard_local_irq_save();
+ if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) {
+ *active_mm = ret < 0 ? prev : next;
+ hard_local_irq_restore(flags);
+ return;
+ }
+ hard_local_irq_restore(flags);
+
+ if (ret < 0)
+ /*
+ * We were interrupted by head domain, which
+ * may have changed the mm context, mm context
+ * is now unknown, but will be switched in
+ * deferred_switch_mm
+ */
+ return;
+
+ prev = NULL;
+ }
+#else
+ ret = __do_switch_mm(prev, next, tsk, true);
+ *active_mm = ret < 0 ? prev : next;
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+}
+
+#ifdef finish_arch_post_lock_switch
+void deferred_switch_mm(struct mm_struct *next)
+{
+ struct mm_struct ** const active_mm =
+ raw_cpu_ptr(&ipipe_percpu.active_mm);
+ struct mm_struct *prev = *active_mm;
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+ struct thread_info *const tip = current_thread_info();
+ unsigned long flags;
+
+ clear_bit(TIF_MMSWITCH_INT, &tip->flags);
+ barrier();
+ *active_mm = NULL;
+ barrier();
+
+ for (;;) {
+ __do_switch_mm(prev, next, NULL, false);
+ /*
+ * Reading thread_info flags and setting active_mm
+ * must be done atomically.
+ */
+ flags = hard_local_irq_save();
+ if (__test_and_clear_bit(TIF_MMSWITCH_INT, &tip->flags) == 0) {
+ *active_mm = next;
+ hard_local_irq_restore(flags);
+ return;
+ }
+ hard_local_irq_restore(flags);
+ prev = NULL;
+ }
+#else
+ __do_switch_mm(prev, next, NULL, false);
+ *active_mm = next;
+#endif /* CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+}
+#endif /* finish_arch_post_lock_switch */
+#endif /* CONFIG_MMU */
+
+static struct __ipipe_tscinfo tsc_info;
+
+void __init __ipipe_tsc_register(struct __ipipe_tscinfo *info)
+{
+ tsc_info = *info;
+ __ipipe_hrclock_freq = info->freq;
+}
+
+void __ipipe_mach_get_tscinfo(struct __ipipe_tscinfo *info)
+{
+ *info = tsc_info;
+}
+
+EXPORT_SYMBOL_GPL(do_munmap);
+EXPORT_SYMBOL_GPL(show_stack);
+EXPORT_SYMBOL_GPL(init_mm);
+#ifndef MULTI_CPU
+EXPORT_SYMBOL_GPL(cpu_do_switch_mm);
+#endif
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+EXPORT_SYMBOL_GPL(tasklist_lock);
+#endif /* CONFIG_SMP || CONFIG_DEBUG_SPINLOCK */
+
+#ifndef CONFIG_SPARSE_IRQ
+EXPORT_SYMBOL_GPL(irq_desc);
+#endif
void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
+#ifdef CONFIG_IPIPE
+static void __ipipe_halt_root(void)
+{
+ struct ipipe_percpu_domain_data *p;
+
+ /*
+ * Emulate idle entry sequence over the root domain, which is
+ * stalled on entry.
+ */
+ hard_local_irq_disable();
+
+ p = ipipe_this_cpu_root_context();
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+
+ if (unlikely(__ipipe_ipending_p(p)))
+ __ipipe_sync_stage();
+ else {
+ cpu_do_idle();
+ }
+}
+
+#define FPSIMD_EN (0x3 << 20)
+static inline void disable_fpsimd(void)
+{
+ unsigned long flags, cpacr;
+
+ flags = hard_local_irq_save();
+ __asm__ __volatile__("mrs %0, cpacr_el1": "=r"(cpacr));
+ cpacr &= ~FPSIMD_EN;
+ __asm__ __volatile__ (
+ "msr cpacr_el1, %0\n\t"
+ "isb"
+ : /* */ : "r"(cpacr));
+ hard_local_irq_restore(flags);
+}
+
+#else /* !CONFIG_IPIPE */
+static void __ipipe_halt_root(void)
+{
+ cpu_do_idle();
+}
+
+static inline void disable_fpsimd(void)
+{ }
+
+#endif /* !CONFIG_IPIPE */
+
/*
* This is our default idle handler.
*/
* This should do all the clock switching and wait for interrupt
* tricks
*/
- cpu_do_idle();
+ if (!need_resched())
+ __ipipe_halt_root();
local_irq_enable();
}
/*
* Thread switching.
*/
-struct task_struct *__switch_to(struct task_struct *prev,
- struct task_struct *next)
+static struct task_struct *__do_switch_to(struct task_struct *prev,
+ struct task_struct *next,
+ bool lazy_fpu)
{
struct task_struct *last;
- fpsimd_thread_switch(next);
- tls_thread_switch(next);
+ if (lazy_fpu)
+ disable_fpsimd();
+ else
+ fpsimd_thread_switch(next);
+ tls_thread_switch(next);
hw_breakpoint_thread_switch(next);
contextidr_thread_switch(next);
return last;
}
+struct task_struct *__switch_to(struct task_struct *prev,
+ struct task_struct *next)
+{
+ return __do_switch_to(prev, next, false);
+}
+
+#ifdef CONFIG_IPIPE
+struct task_struct *ipipe_switch_to(struct task_struct *prev,
+ struct task_struct *next)
+{
+ return __do_switch_to(prev, next, true);
+}
+#endif
+
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
.si_code = TRAP_HWBKPT,
.si_addr = (void __user *)(bkpt->trigger),
};
+ int i __maybe_unused;
-#ifdef CONFIG_COMPAT
- int i;
+ if (__ipipe_report_trap(IPIPE_TRAP_BREAK, regs))
+ return;
+#ifdef CONFIG_COMPAT
if (!is_compat_task())
goto send_sig;
}
}
+#if NR_CPUS > 16
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+#else
+u64 __cpu_logical_map[16] = { [0 ... 15] = INVALID_HWID };
+#endif
void __init setup_arch(char **cmdline_p)
{
#include <asm/sections.h>
#include <asm/tlbflush.h>
#include <asm/ptrace.h>
+#include <asm/exception.h>
#define CREATE_TRACE_POINTS
#include <trace/events/ipi.h>
IPI_CPU_STOP,
IPI_TIMER,
IPI_IRQ_WORK,
+ IPI_CPU_DUMP,
+#ifdef CONFIG_IPIPE
+ IPI_IPIPE_FIRST,
+#endif /* CONFIG_IPIPE */
};
+#ifdef CONFIG_IPIPE
+#define noipipe_irq_enter() \
+ do { \
+ } while(0)
+#define noipipe_irq_exit() \
+ do { \
+ } while(0)
+#else /* !CONFIG_IPIPE */
+#define noipipe_irq_enter() irq_enter()
+#define noipipe_irq_exit() irq_exit()
+#endif /* !CONFIG_IPIPE */
+
/*
* Boot a secondary CPU, and assign it the specified idle task.
* This also gives us the initial stack to use for this CPU.
return sum;
}
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static inline void ipi_timer(void)
+{
+#ifdef CONFIG_IPIPE
+ __ipipe_mach_update_tsc();
+#endif /* CONFIG_IPIPE */
+
+ tick_receive_broadcast();
+}
+
+#endif
+
+#ifdef CONFIG_IPIPE
+#define IPIPE_IPI_BASE IPIPE_VIRQ_BASE
+
+unsigned __ipipe_first_ipi;
+EXPORT_SYMBOL_GPL(__ipipe_first_ipi);
+
+static void __ipipe_do_IPI(unsigned virq, void *cookie)
+{
+ enum ipi_msg_type msg = virq - IPIPE_IPI_BASE;
+ handle_IPI(msg, raw_cpu_ptr(&ipipe_percpu.tick_regs));
+}
+
+void __ipipe_ipis_alloc(void)
+{
+ unsigned virq, _virq;
+ unsigned ipi_nr;
+
+ if (__ipipe_first_ipi)
+ return;
+
+ /* __ipipe_first_ipi is 0 here */
+ ipi_nr = IPI_IPIPE_FIRST + IPIPE_LAST_IPI + 1;
+
+ for (virq = IPIPE_IPI_BASE; virq < IPIPE_IPI_BASE + ipi_nr; virq++) {
+ _virq = ipipe_alloc_virq();
+ if (virq != _virq)
+ panic("I-pipe: cannot reserve virq #%d (got #%d)\n",
+ virq, _virq);
+
+ if (virq - IPIPE_IPI_BASE == IPI_IPIPE_FIRST)
+ __ipipe_first_ipi = virq;
+ }
+}
+
+void __ipipe_ipis_request(void)
+{
+ unsigned virq;
+
+ for (virq = IPIPE_IPI_BASE; virq < __ipipe_first_ipi; virq++)
+ ipipe_request_irq(ipipe_root_domain,
+ virq,
+ (ipipe_irq_handler_t)__ipipe_do_IPI,
+ NULL, NULL);
+}
+void ipipe_send_ipi(unsigned ipi, cpumask_t cpumask)
+{
+ enum ipi_msg_type msg = ipi - IPIPE_IPI_BASE;
+ smp_cross_call(&cpumask, msg);
+}
+EXPORT_SYMBOL_GPL(ipipe_send_ipi);
+
+ /* hw IRQs off */
+asmlinkage void __exception __ipipe_grab_ipi(unsigned svc, struct pt_regs *regs)
+{
+ int virq = IPIPE_IPI_BASE + svc;
+
+ /*
+ * Virtual NMIs ignore the root domain's stall
+ * bit. When caught over high priority
+ * domains, virtual VMIs are pipelined the
+ * usual way as normal interrupts.
+ */
+ if (virq == IPIPE_SERVICE_VNMI && __ipipe_root_p)
+ __ipipe_do_vnmi(IPIPE_SERVICE_VNMI, NULL);
+ else
+ __ipipe_dispatch_irq(virq, IPIPE_IRQF_NOACK);
+
+ __ipipe_exit_irq(regs);
+}
+
+#endif /* CONFIG_IPIPE */
+
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
smp_cross_call(mask, IPI_CALL_FUNC);
break;
case IPI_CALL_FUNC:
- irq_enter();
+ noipipe_irq_enter();
generic_smp_call_function_interrupt();
- irq_exit();
+ noipipe_irq_exit();
break;
case IPI_CPU_STOP:
- irq_enter();
+ noipipe_irq_enter();
ipi_cpu_stop(cpu);
- irq_exit();
+ noipipe_irq_exit();
break;
#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
case IPI_TIMER:
- irq_enter();
+ noipipe_irq_enter();
tick_receive_broadcast();
- irq_exit();
+ noipipe_irq_exit();
break;
#endif
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/syscalls.h>
+#include <linux/ipipe.h>
#include <asm/atomic.h>
#include <asm/debug-monitors.h>
{
siginfo_t info;
void __user *pc = (void __user *)instruction_pointer(regs);
+
+ if (__ipipe_report_trap(IPIPE_TRAP_UNKNOWN,regs))
+ return;
+
+#ifdef CONFIG_IPIPE
+ ipipe_stall_root();
+ hard_local_irq_enable();
+#endif
+
console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
info.si_addr = pc;
arm64_notify_die("Oops - bad mode", regs, &info, 0);
+
+#ifdef CONFIG_IPIPE
+ hard_local_irq_disable();
+ __ipipe_root_status &= ~IPIPE_STALL_FLAG;
+#endif
}
void __pte_error(const char *file, int line, unsigned long val)
#define ASID_FIRST_VERSION (1 << MAX_ASID_BITS)
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
+#if defined(CONFIG_SMP) && defined(CONFIG_IPIPE)
+/*
+ * We may create a new context over the head domain, which means that
+ * we can't send IPIs using the regular smp_call* mechanism. Use the
+ * pipelined VNMIs instead.
+ *
+ * However, we must be able to serve interrupts while attempting to
+ * grab the ASID lock on entry to __new_context(). This is a
+ * prerequisite for broadcasting VNMIs to other CPUs later on, to have
+ * them reset their current ASID, without risking deadlocks. I.e. each
+ * CPU must be able to reset the current ASID upon a remote request,
+ * while trying to get a new ASID.
+ *
+ * So CONFIG_SMP+IPIPE requires CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH.
+ */
+#define asid_lock(__flags) \
+ do { \
+ IPIPE_WARN_ONCE(hard_irqs_disabled()); \
+ while (!raw_spin_trylock_irqsave(&cpu_asid_lock, (__flags))) \
+ cpu_relax(); \
+ } while (0) \
+
+#define asid_unlock(__flags) \
+ raw_spin_unlock_irqrestore(&cpu_asid_lock, __flags)
+
+#define asid_broadcast_reset() \
+ __ipipe_send_vnmi(reset_context, *cpu_online_mask, NULL);
+
+#else /* !(CONFIG_SMP && CONFIG_IPIPE) */
+
+#define asid_lock(__flags) \
+ raw_spin_lock_irqsave_cond(&cpu_asid_lock, __flags)
+
+#define asid_unlock(__flags) \
+ raw_spin_unlock_irqrestore_cond(&cpu_asid_lock, __flags)
+
+#define asid_broadcast_reset() \
+ smp_call_function(reset_context, NULL, 1);
+
+#endif /* !(CONFIG_SMP && CONFIG_IPIPE) */
+
/*
* We fork()ed a process, and we need a new context for the child to run in.
*/
/*
* Set the mm_cpumask(mm) bit for the current CPU.
*/
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+ cpumask_set_cpu(ipipe_processor_id(), mm_cpumask(mm));
}
/*
static void reset_context(void *info)
{
unsigned int asid;
- unsigned int cpu = smp_processor_id();
+ unsigned int cpu = ipipe_processor_id();
struct mm_struct *mm = current->active_mm;
/*
static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
mm->context.id = asid;
- cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
+ cpumask_copy(mm_cpumask(mm), cpumask_of(ipipe_processor_id()));
}
#endif
{
unsigned int asid;
unsigned int bits = asid_bits();
+ unsigned long flags;
+ int cpu;
- raw_spin_lock(&cpu_asid_lock);
+ asid_lock(flags);
+ cpu = ipipe_processor_id();
#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from another
* CPU before we acquired the lock.
*/
if (!unlikely((mm->context.id ^ cpu_last_asid) >> MAX_ASID_BITS)) {
- cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
- raw_spin_unlock(&cpu_asid_lock);
+ cpumask_set_cpu(cpu, mm_cpumask(mm));
+ asid_unlock(flags);
return;
}
#endif
cpu_last_asid += (1 << MAX_ASID_BITS) - (1 << bits);
if (cpu_last_asid == 0)
cpu_last_asid = ASID_FIRST_VERSION;
- asid = cpu_last_asid + smp_processor_id();
+ asid = cpu_last_asid + cpu;
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
- smp_call_function(reset_context, NULL, 1);
+ asid_broadcast_reset();
#endif
cpu_last_asid += NR_CPUS - 1;
}
set_mm_context(mm, asid);
- raw_spin_unlock(&cpu_asid_lock);
+ asid_unlock(flags);
}
static const char *fault_name(unsigned int esr);
+#define cpu_get_pgd() \
+({ \
+ unsigned long pg; \
+ asm("mrs %0, ttbr0_el1\n" \
+ : "=r" (pg)); \
+ pg &= ~0xffff000000003ffful; \
+ (pgd_t *)phys_to_virt(pg); \
+})
+
/*
* Dump out the page tables associated with 'addr' in mm 'mm'.
*/
if (!mm)
mm = &init_mm;
- pr_alert("pgd = %p\n", mm->pgd);
+ pr_alert("mm_pgd = %p, hw_pgd = %p\n", mm->pgd, cpu_get_pgd());
pgd = pgd_offset(mm, addr);
pr_alert("[%08lx] *pgd=%016llx", addr, pgd_val(*pgd));
static void __do_kernel_fault(struct mm_struct *mm, unsigned long addr,
unsigned int esr, struct pt_regs *regs)
{
+ unsigned long flags;
+ int ret;
+
/*
* Are we prepared to handle this kernel fault?
*/
- if (fixup_exception(regs))
+ flags = hard_cond_local_irq_save();
+ ret = fixup_exception(regs);
+ hard_cond_local_irq_restore(flags);
+ if (ret)
+ return;
+
+ if (__ipipe_report_trap(IPIPE_TRAP_ACCESS, regs))
return;
/*
{
struct siginfo si;
+ if (__ipipe_report_trap(IPIPE_TRAP_ACCESS, regs))
+ return;
+
if (show_unhandled_signals && unhandled_signal(tsk, sig) &&
printk_ratelimit()) {
pr_info("%s[%d]: unhandled %s (%d) at 0x%08lx, esr 0x%03x\n",
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+ if (__ipipe_report_trap(IPIPE_TRAP_ACCESS, regs))
+ return 0;
+
tsk = current;
mm = tsk->mm;
* would already be released in __lock_page_or_retry in mm/filemap.c.
*/
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
- return 0;
+ goto out;
/*
* Major/minor page fault accounting is only done on the initial
*/
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
VM_FAULT_BADACCESS))))
- return 0;
+ goto out;
/*
* If we are in kernel mode at this point, we have no context to
* oom-killed).
*/
pagefault_out_of_memory();
- return 0;
+ goto out;
}
if (fault & VM_FAULT_SIGBUS) {
}
__do_user_fault(tsk, addr, esr, sig, code, regs);
- return 0;
+ goto out;
no_context:
__do_kernel_fault(mm, addr, esr, regs);
+out:
+
return 0;
}
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
+
return 0;
}
*/
static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{
+ if (__ipipe_report_trap(IPIPE_TRAP_DABT, regs))
+ return 0;
+
return 1;
}
const struct fault_info *inf = fault_info + (esr & 63);
struct siginfo info;
+ IPIPE_WARN_ONCE(hard_irqs_disabled());
+
if (!inf->fn(addr, esr, regs))
return;
+ if (__ipipe_report_trap(IPIPE_TRAP_UNKNOWN, regs))
+ return;
+
pr_alert("Unhandled fault: %s (0x%08x) at 0x%016lx\n",
inf->name, esr, addr);
{
struct siginfo info;
+ if (__ipipe_report_trap(IPIPE_TRAP_ALIGNMENT, regs))
+ return;
+
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
if (!inf->fn(addr, esr, regs))
return 1;
+ if (__ipipe_report_trap(IPIPE_TRAP_UNKNOWN, regs))
+ return 0;
+
pr_alert("Unhandled debug exception: %s (0x%08x) at 0x%016lx\n",
inf->name, esr, addr);
#include <linux/clockchips.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
+#include <linux/ipipe.h>
+#include <linux/ipipe_tickdev.h>
#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/io.h>
return val;
}
-static __always_inline irqreturn_t timer_handler(const int access,
- struct clock_event_device *evt)
+static int arch_timer_ack(const int access, struct clock_event_device *evt)
{
unsigned long ctrl;
if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
ctrl |= ARCH_TIMER_CTRL_IT_MASK;
arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_IPIPE
+static DEFINE_PER_CPU(struct ipipe_timer, arch_itimer);
+static struct __ipipe_tscinfo tsc_info = {
+ .type = IPIPE_TSC_TYPE_FREERUNNING_ARCH,
+ .u = {
+ {
+ .mask = 0xffffffffffffffff,
+ },
+ },
+};
+
+static void arch_itimer_ack_phys(void)
+{
+ struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
+ arch_timer_ack(ARCH_TIMER_PHYS_ACCESS, evt);
+}
+
+static void arch_itimer_ack_virt(void)
+{
+ struct clock_event_device *evt = this_cpu_ptr(arch_timer_evt);
+ arch_timer_ack(ARCH_TIMER_VIRT_ACCESS, evt);
+}
+#endif /* CONFIG_IPIPE */
+
+static inline irqreturn_t timer_handler(int irq, const int access,
+ struct clock_event_device *evt)
+{
+ if (clockevent_ipipe_stolen(evt))
+ goto stolen;
+
+ if (arch_timer_ack(access, evt)) {
+#ifdef CONFIG_IPIPE
+ struct ipipe_timer *itimer = raw_cpu_ptr(&arch_itimer);
+ if (itimer->irq != irq)
+ itimer->irq = irq;
+#endif /* CONFIG_IPIPE */
+ stolen:
+ /*
+ * This is a 64bit clock source, no need for TSC
+ * update.
+ */
evt->event_handler(evt);
return IRQ_HANDLED;
}
{
struct clock_event_device *evt = dev_id;
- return timer_handler(ARCH_TIMER_VIRT_ACCESS, evt);
+ return timer_handler(irq, ARCH_TIMER_VIRT_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_phys(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- return timer_handler(ARCH_TIMER_PHYS_ACCESS, evt);
+ return timer_handler(irq, ARCH_TIMER_PHYS_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_phys_mem(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- return timer_handler(ARCH_TIMER_MEM_PHYS_ACCESS, evt);
+ return timer_handler(irq, ARCH_TIMER_MEM_PHYS_ACCESS, evt);
}
static irqreturn_t arch_timer_handler_virt_mem(int irq, void *dev_id)
{
struct clock_event_device *evt = dev_id;
- return timer_handler(ARCH_TIMER_MEM_VIRT_ACCESS, evt);
+ return timer_handler(irq, ARCH_TIMER_MEM_VIRT_ACCESS, evt);
}
static __always_inline void timer_set_mode(const int access, int mode,
clk->set_mode = arch_timer_set_mode_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
}
+
+#ifdef CONFIG_IPIPE
+ clk->ipipe_timer = raw_cpu_ptr(&arch_itimer);
+ if (arch_timer_use_virtual) {
+ clk->ipipe_timer->irq = arch_timer_ppi[VIRT_PPI];
+ clk->ipipe_timer->ack = arch_itimer_ack_virt;
+ } else {
+ clk->ipipe_timer->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ clk->ipipe_timer->ack = arch_itimer_ack_phys;
+ }
+ clk->ipipe_timer->freq = arch_timer_rate;
+#endif
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
/* Enable user access to the virtual counter */
cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
+#ifdef CONFIG_IPIPE
+ cntkctl |= ARCH_TIMER_USR_PCT_ACCESS_EN;
+#endif
arch_timer_set_cntkctl(cntkctl);
}
clocksource_counter.name = "arch_mem_counter";
}
+#ifdef CONFIG_IPIPE
+ tsc_info.freq = arch_timer_rate;
+ __ipipe_tsc_register(&tsc_info);
+#endif /* CONFIG_IPIPE */
+
start_count = arch_timer_read_counter();
clocksource_register_hz(&clocksource_counter, arch_timer_rate);
cyclecounter.mult = clocksource_counter.mult;
config CPU_IDLE
bool "CPU idle PM support"
default y if ACPI || PPC_PSERIES
+ depends on !(ARCH_OMAP4 && IPIPE)
select CPU_IDLE_GOV_LADDER if (!NO_HZ && !NO_HZ_IDLE)
select CPU_IDLE_GOV_MENU if (NO_HZ || NO_HZ_IDLE)
help
#include <linux/slab.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm.h>
+#include <linux/ipipe.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
#endif
struct pl061_gpio {
- spinlock_t lock;
+ ipipe_spinlock_t lock;
void __iomem *base;
struct gpio_chip gc;
writeb(pending, chip->base + GPIOIC);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
- generic_handle_irq(irq_find_mapping(gc->irqdomain,
- offset));
+ ipipe_handle_demuxed_irq(irq_find_mapping(gc->irqdomain,
+ offset));
}
chained_irq_exit(irqchip, desc);
}
static void pl061_irq_mask(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+ u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
+ unsigned long flags;
+ u8 gpioie;
+
+ spin_lock_irqsave_cond(&chip->lock, flags);
+ gpioie = readb(chip->base + GPIOIE) & ~mask;
+ writeb(gpioie, chip->base + GPIOIE);
+ ipipe_lock_irq(d->irq);
+ spin_unlock_irqrestore_cond(&chip->lock, flags);
+}
+
+#ifdef CONFIG_IPIPE
+static void pl061_irq_mask_ack(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
writeb(gpioie, chip->base + GPIOIE);
spin_unlock(&chip->lock);
}
+#endif
static void pl061_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
+ unsigned long flags;
u8 mask = BIT(irqd_to_hwirq(d) % PL061_GPIO_NR);
u8 gpioie;
- spin_lock(&chip->lock);
+ spin_lock_irqsave_cond(&chip->lock, flags);
gpioie = readb(chip->base + GPIOIE) | mask;
writeb(gpioie, chip->base + GPIOIE);
- spin_unlock(&chip->lock);
+ ipipe_unlock_irq(d->irq);
+ spin_unlock_irqrestore_cond(&chip->lock, flags);
}
static struct irq_chip pl061_irqchip = {
.irq_mask = pl061_irq_mask,
.irq_unmask = pl061_irq_unmask,
.irq_set_type = pl061_irq_type,
+#ifdef CONFIG_IPIPE
+ .irq_mask_ack = pl061_irq_mask_ack,
+#endif
};
static int pl061_probe(struct amba_device *adev, const struct amba_id *id)
static void ir_ack_apic_edge(struct irq_data *data)
{
- ack_APIC_irq();
+ __ack_APIC_irq();
}
static void ir_ack_apic_level(struct irq_data *data)
{
- ack_APIC_irq();
+ __ack_APIC_irq();
eoi_ioapic_irq(data->irq, irqd_cfg(data));
}
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
- err = handle_domain_irq(gic_data.domain, irqnr, regs);
+ err = ipipe_handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_write_eoir(irqnr);
* that any shared data read by handle_IPI will
* be read after the ACK.
*/
- handle_IPI(irqnr, regs);
+ ipipe_handle_multi_ipi(irqnr, regs);
#else
WARN_ONCE(true, "Unexpected SGI received!\n");
#endif
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/slab.h>
+#include <linux/ipipe.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
#include <linux/irqchip/arm-gic-acpi.h>
#endif
};
-static DEFINE_RAW_SPINLOCK(irq_controller_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(irq_controller_lock);
/*
* The GIC mapping of CPU interfaces does not necessarily match
static void gic_mask_irq(struct irq_data *d)
{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
+ ipipe_lock_irq(d->irq);
gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_unmask_irq(struct irq_data *d)
{
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&irq_controller_lock, flags);
gic_poke_irq(d, GIC_DIST_ENABLE_SET);
+ ipipe_unlock_irq(d->irq);
+ raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_eoi_irq(struct irq_data *d)
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
+#ifdef CONFIG_IPIPE
+static void gic_hold_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
+ gic_eoi_irq(d);
+}
+
+static void gic_release_irq(struct irq_data *d)
+{
+ gic_poke_irq(d, GIC_DIST_ENABLE_SET);
+}
+
+#endif /* CONFIG_IPIPE */
+
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1021)) {
- handle_domain_irq(gic->domain, irqnr, regs);
+ ipipe_handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
if (irqnr < 16) {
* Pairs with the write barrier in gic_raise_softirq
*/
smp_rmb();
- handle_IPI(irqnr, regs);
+ ipipe_handle_multi_ipi(irqnr, regs);
#endif
continue;
}
struct gic_chip_data *chip_data = irq_get_handler_data(irq);
struct irq_chip *chip = irq_get_chip(irq);
unsigned int cascade_irq, gic_irq;
- unsigned long status;
+ unsigned long status, flags;
chained_irq_enter(chip, desc);
- raw_spin_lock(&irq_controller_lock);
+ raw_spin_lock_irqsave_cond(&irq_controller_lock, flags);
status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
- raw_spin_unlock(&irq_controller_lock);
+ raw_spin_unlock_irqrestore_cond(&irq_controller_lock, flags);
gic_irq = (status & GICC_IAR_INT_ID_MASK);
if (gic_irq == GICC_INT_SPURIOUS)
if (unlikely(gic_irq < 32 || gic_irq > 1020))
handle_bad_irq(cascade_irq, desc);
else
- generic_handle_irq(cascade_irq);
+ ipipe_handle_demuxed_irq(cascade_irq);
out:
chained_irq_exit(chip, desc);
.irq_set_type = gic_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
+#endif
+#ifdef CONFIG_IPIPE
+ .irq_hold = gic_hold_irq,
+ .irq_release = gic_release_irq,
#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
}
+#ifdef CONFIG_IPIPE
+
+void gic_mute(void)
+{
+ writel_relaxed(0x90, gic_data_cpu_base(&gic_data[0]) + GIC_CPU_PRIMASK);
+}
+
+void gic_unmute(void)
+{
+ writel_relaxed(0xf0, gic_data_cpu_base(&gic_data[0]) + GIC_CPU_PRIMASK);
+}
+
+void gic_set_irq_prio(int irq, int hi)
+{
+ void __iomem *dist_base;
+ unsigned gic_irqs;
+
+ if (irq < 32) /* The IPIs always are high priority */
+ return;
+
+ dist_base = gic_data_dist_base(&gic_data[0]);
+ gic_irqs = readl_relaxed(dist_base + GIC_DIST_CTR) & 0x1f;
+ gic_irqs = (gic_irqs + 1) * 32;
+ if (gic_irqs > 1020)
+ gic_irqs = 1020;
+ if (irq >= gic_irqs)
+ return;
+
+ writeb_relaxed(hi ? 0x10 : 0xa0, dist_base + GIC_DIST_PRI + irq);
+}
+
+#endif /* CONFIG_IPIPE */
+
static void __init gic_dist_init(struct gic_chip_data *gic)
{
unsigned int i;
serial8250_console_write(up, s, count);
}
+#ifdef CONFIG_RAW_PRINTK
+
+static void raw_write_char(struct uart_8250_port *up, int c)
+{
+ unsigned int status, tmout = 10000;
+
+ for (;;) {
+ status = serial_in(up, UART_LSR);
+ up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+ if ((status & UART_LSR_THRE) == UART_LSR_THRE)
+ break;
+ if (--tmout == 0)
+ break;
+ cpu_relax();
+ }
+ serial_port_out(&up->port, UART_TX, c);
+}
+
+static void univ8250_console_write_raw(struct console *co, const char *s,
+ unsigned int count)
+{
+ struct uart_8250_port *up = &serial8250_ports[co->index];
+ unsigned int ier;
+
+ ier = serial_in(up, UART_IER);
+
+ if (up->capabilities & UART_CAP_UUE)
+ serial_out(up, UART_IER, UART_IER_UUE);
+ else
+ serial_out(up, UART_IER, 0);
+
+ while (count-- > 0) {
+ if (*s == '\n')
+ raw_write_char(up, '\r');
+ raw_write_char(up, *s++);
+ }
+
+ serial_out(up, UART_IER, ier);
+}
+
+#endif
+
static unsigned int probe_baud(struct uart_port *port)
{
unsigned char lcr, dll, dlm;
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
+#ifdef CONFIG_RAW_PRINTK
+ .write_raw = univ8250_console_write_raw,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_RAW,
+#else
.flags = CON_PRINTBUFFER | CON_ANYTIME,
+#endif
.index = -1,
.data = &serial8250_reg,
};
writew(ch, uap->port.membase + UART01x_DR);
}
+#ifdef CONFIG_RAW_PRINTK
+
+#define pl011_clk_setup(clk) clk_prepare_enable(clk)
+#define pl011_clk_enable(clk) do { } while (0)
+#define pl011_clk_disable(clk) do { } while (0)
+
+static void
+pl011_console_write_raw(struct console *co, const char *s, unsigned int count)
+{
+ struct uart_amba_port *uap = amba_ports[co->index];
+ unsigned int old_cr, new_cr, status;
+
+ old_cr = readw(uap->port.membase + UART011_CR);
+ new_cr = old_cr & ~UART011_CR_CTSEN;
+ new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+ writew(new_cr, uap->port.membase + UART011_CR);
+
+ while (count-- > 0) {
+ if (*s == '\n')
+ pl011_console_putchar(&uap->port, '\r');
+ pl011_console_putchar(&uap->port, *s++);
+ }
+ do
+ status = readw(uap->port.membase + UART01x_FR);
+ while (status & UART01x_FR_BUSY);
+ writew(old_cr, uap->port.membase + UART011_CR);
+}
+
+#else /* !CONFIG_RAW_PRINTK */
+
+#define pl011_clk_setup(clk) clk_prepare(clk)
+#define pl011_clk_enable(clk) clk_enable(clk)
+#define pl011_clk_disable(clk) clk_disable(clk)
+
+#endif /* !CONFIG_RAW_PRINTK */
+
static void
pl011_console_write(struct console *co, const char *s, unsigned int count)
{
unsigned long flags;
int locked = 1;
- clk_enable(uap->clk);
+ pl011_clk_enable(uap->clk);
local_irq_save(flags);
if (uap->port.sysrq)
spin_unlock(&uap->port.lock);
local_irq_restore(flags);
- clk_disable(uap->clk);
+ pl011_clk_disable(uap->clk);
}
static void __init
/* Allow pins to be muxed in and configured */
pinctrl_pm_select_default_state(uap->port.dev);
- ret = clk_prepare(uap->clk);
+ ret = pl011_clk_setup(uap->clk);
if (ret)
return ret;
.write = pl011_console_write,
.device = uart_console_device,
.setup = pl011_console_setup,
+#ifdef CONFIG_RAW_PRINTK
+ .write_raw = pl011_console_write_raw,
+ .flags = CON_PRINTBUFFER | CON_RAW,
+#else
.flags = CON_PRINTBUFFER,
+#endif
.index = -1,
.data = &amba_reg,
};
{
struct task_struct *tsk;
struct mm_struct *old_mm, *active_mm;
+ unsigned long flags;
/* Notify parent that we're no longer interested in the old VM */
tsk = current;
task_lock(tsk);
active_mm = tsk->active_mm;
tsk->mm = mm;
+ ipipe_mm_switch_protect(flags);
tsk->active_mm = mm;
activate_mm(active_mm, mm);
+ ipipe_mm_switch_unprotect(flags);
tsk->mm->vmacache_seqnum = 0;
vmacache_flush(tsk);
task_unlock(tsk);
{ \
unsigned long flags; \
\
- raw_local_irq_save(flags); \
+ flags = hard_local_irq_save(); \
v->counter = v->counter c_op i; \
- raw_local_irq_restore(flags); \
+ hard_local_irq_restore(flags); \
}
#define ATOMIC_OP_RETURN(op, c_op) \
unsigned long flags; \
int ret; \
\
- raw_local_irq_save(flags); \
+ flags = hard_local_irq_save(); \
ret = (v->counter = v->counter c_op i); \
- raw_local_irq_restore(flags); \
+ hard_local_irq_restore(flags); \
\
return ret; \
}
* this is the substitute */
#define _atomic_spin_lock_irqsave(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
+ (f) = hard_local_irq_save(); \
arch_spin_lock(s); \
} while(0)
#define _atomic_spin_unlock_irqrestore(l,f) do { \
arch_spinlock_t *s = ATOMIC_HASH(l); \
arch_spin_unlock(s); \
- local_irq_restore(f); \
+ hard_local_irq_restore(f); \
} while(0)
#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+# define _atomic_spin_lock_irqsave(l,f) do { (f) = hard_local_irq_save(); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { hard_local_irq_restore(f); } while (0)
#endif
/*
if (size == 8 && sizeof(unsigned long) != 8)
wrong_size_cmpxchg(ptr);
- raw_local_irq_save(flags);
+ flags = hard_local_irq_save();
switch (size) {
case 1: prev = *(u8 *)ptr;
if (prev == old)
default:
wrong_size_cmpxchg(ptr);
}
- raw_local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return prev;
}
u64 prev;
unsigned long flags;
- raw_local_irq_save(flags);
+ flags = hard_local_irq_save();
prev = *(u64 *)ptr;
if (prev == old)
*(u64 *)ptr = new;
- raw_local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return prev;
}
#define arch_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset)
#endif
+#ifdef CONFIG_IPIPE
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
+extern int __ipipe_check_percpu_access(void);
+#define __ipipe_cpu_offset \
+ ({ \
+ WARN_ON_ONCE(__ipipe_check_percpu_access()); \
+ __my_cpu_offset; \
+ })
+#else
+#define __ipipe_cpu_offset __my_cpu_offset
+#endif
+#ifndef __ipipe_raw_cpu_ptr
+#define __ipipe_raw_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __ipipe_cpu_offset)
+#endif
+#define __ipipe_raw_cpu_read(var) (*__ipipe_raw_cpu_ptr(&(var)))
+#endif /* CONFIG_IPIPE */
+
#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void setup_per_cpu_areas(void);
#endif
-#endif /* SMP */
+#else /* !SMP */
+
+#define __ipipe_raw_cpu_ptr(ptr) VERIFY_PERCPU_PTR(ptr)
+#define __ipipe_raw_cpu_read(var) (*__ipipe_raw_cpu_ptr(&(var)))
+
+#endif /* !SMP */
#ifndef PER_CPU_BASE_SECTION
#ifdef CONFIG_SMP
#define this_cpu_generic_to_op(pcp, val, op) \
do { \
unsigned long __flags; \
- raw_local_irq_save(__flags); \
+ __flags = hard_local_irq_save(); \
*raw_cpu_ptr(&(pcp)) op val; \
- raw_local_irq_restore(__flags); \
+ hard_local_irq_restore(__flags); \
} while (0)
#define this_cpu_generic_add_return(pcp, val) \
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
- raw_local_irq_save(__flags); \
+ __flags = hard_local_irq_save(); \
raw_cpu_add(pcp, val); \
__ret = raw_cpu_read(pcp); \
- raw_local_irq_restore(__flags); \
+ hard_local_irq_restore(__flags); \
__ret; \
})
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
- raw_local_irq_save(__flags); \
+ __flags = hard_local_irq_save(); \
__ret = raw_cpu_read(pcp); \
raw_cpu_write(pcp, nval); \
- raw_local_irq_restore(__flags); \
+ hard_local_irq_restore(__flags); \
__ret; \
})
({ \
typeof(pcp) __ret; \
unsigned long __flags; \
- raw_local_irq_save(__flags); \
+ __flags = hard_local_irq_save(); \
__ret = raw_cpu_read(pcp); \
if (__ret == (oval)) \
raw_cpu_write(pcp, nval); \
- raw_local_irq_restore(__flags); \
+ hard_local_irq_restore(__flags); \
__ret; \
})
({ \
int __ret; \
unsigned long __flags; \
- raw_local_irq_save(__flags); \
+ __flags = hard_local_irq_save(); \
__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \
oval1, oval2, nval1, nval2); \
- raw_local_irq_restore(__flags); \
+ hard_local_irq_restore(__flags); \
__ret; \
})
static __always_inline void __preempt_count_add(int val)
{
+ ipipe_preempt_root_only();
*preempt_count_ptr() += val;
}
static __always_inline void __preempt_count_sub(int val)
{
+ ipipe_preempt_root_only();
*preempt_count_ptr() -= val;
}
static __always_inline bool __preempt_count_dec_and_test(void)
{
+ ipipe_preempt_root_only();
/*
* Because of load-store architectures cannot do per-cpu atomic
* operations; we cannot use PREEMPT_NEED_RESCHED because it might get
*/
extern struct task_struct *__switch_to(struct task_struct *,
struct task_struct *);
-
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
#define switch_to(prev, next, last) \
do { \
+ hard_cond_local_irq_disable(); \
((last) = __switch_to((prev), (next))); \
+ hard_cond_local_irq_enable(); \
} while (0)
-
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+#define switch_to(prev, next, last) \
+ do { \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
#endif /* __ASM_GENERIC_SWITCH_TO_H */
--- /dev/null
+#ifndef _IPIPE_SETUP_H
+#define _IPIPE_SETUP_H
+
+/*
+ * Placeholders for setup hooks defined by client domains.
+ */
+
+static inline void __ipipe_early_client_setup(void) { }
+
+#endif /* !_IPIPE_SETUP_H */
--- /dev/null
+#ifndef _IPIPE_THREAD_INFO_H
+#define _IPIPE_THREAD_INFO_H
+
+/*
+ * Placeholder for private thread information defined by client
+ * domains.
+ */
+
+struct ipipe_threadinfo {
+};
+
+static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p) { }
+
+#endif /* !_IPIPE_THREAD_INFO_H */
* Used to lock bgpio_chip->data. Also, this is needed to keep
* shadowed and real data registers writes together.
*/
- spinlock_t lock;
+ ipipe_spinlock_t lock;
/* Shadowed data register to clear/set bits safely. */
unsigned long data;
const struct cpumask *cpumask;
struct list_head list;
struct module *owner;
+
+#ifdef CONFIG_IPIPE
+ struct ipipe_timer *ipipe_timer;
+ unsigned ipipe_stolen;
+
+#define clockevent_ipipe_stolen(evt) ((evt)->ipipe_stolen)
+#else
+#define clockevent_ipipe_stolen(evt) (0)
+#endif /* !CONFIG_IPIPE */
} ____cacheline_aligned;
/*
cycle_t wd_last;
#endif
struct module *owner;
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+ cycle_t (*ipipe_read)(struct clocksource *cs);
+#endif /* CONFIG_IPIPE_WANT_CLOCKSOURCE */
+
} ____cacheline_aligned;
/*
#define unreachable() __builtin_unreachable()
/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__, __optimize__("no-tracer")))
+#define __noclone __attribute__((__noclone__))
#endif /* GCC_VERSION >= 40500 */
#define CON_BOOT (8)
#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
#define CON_BRL (32) /* Used for a braille device */
+#define CON_RAW (64) /* Supports raw write mode */
struct console {
char name[16];
void (*write)(struct console *, const char *, unsigned);
+ void (*write_raw)(struct console *, const char *, unsigned);
int (*read)(struct console *, char *, unsigned);
struct tty_driver *(*device)(struct console *, int *);
void (*unblank)(void);
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
FTRACE_OPS_FL_PID = 1 << 14,
+ FTRACE_OPS_FL_IPIPE_EXCLUSIVE = 1 << 15,
};
#ifdef CONFIG_DYNAMIC_FTRACE
#define nmi_enter() \
do { \
+ __ipipe_nmi_enter(); \
lockdep_off(); \
ftrace_nmi_enter(); \
BUG_ON(in_nmi()); \
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
ftrace_nmi_exit(); \
lockdep_on(); \
+ __ipipe_nmi_exit(); \
} while (0)
#endif /* LINUX_HARDIRQ_H */
#include <linux/param.h>
#include <linux/spinlock.h>
#include <linux/timex.h>
+#include <linux/ipipe_lock.h>
/* i8253A PIT registers */
#define PIT_MODE 0x43
#define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ)
-extern raw_spinlock_t i8253_lock;
+IPIPE_DECLARE_RAW_SPINLOCK(i8253_lock);
extern struct clock_event_device i8253_clockevent;
extern void clockevent_i8253_init(bool oneshot);
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe.h
+ *
+ * Copyright (C) 2002-2014 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_H
+#define __LINUX_IPIPE_H
+
+#include <linux/spinlock.h>
+#include <linux/cache.h>
+#include <linux/percpu.h>
+#include <linux/irq.h>
+#include <linux/thread_info.h>
+#include <linux/ipipe_base.h>
+#include <linux/ipipe_debug.h>
+#include <asm/ptrace.h>
+#include <asm/ipipe.h>
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/ipipe_domain.h>
+
+/* ipipe_set_hooks(..., enables) */
+#define IPIPE_SYSCALL __IPIPE_SYSCALL_E
+#define IPIPE_TRAP __IPIPE_TRAP_E
+#define IPIPE_KEVENT __IPIPE_KEVENT_E
+
+struct ipipe_sysinfo {
+ int sys_nr_cpus; /* Number of CPUs on board */
+ int sys_hrtimer_irq; /* hrtimer device IRQ */
+ u64 sys_hrtimer_freq; /* hrtimer device frequency */
+ u64 sys_hrclock_freq; /* hrclock device frequency */
+ u64 sys_cpu_freq; /* CPU frequency (Hz) */
+ struct ipipe_arch_sysinfo arch;
+};
+
+struct ipipe_work_header {
+ size_t size;
+ void (*handler)(struct ipipe_work_header *work);
+};
+
+extern unsigned int __ipipe_printk_virq;
+
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq);
+
+void __ipipe_complete_domain_migration(void);
+
+int __ipipe_switch_tail(void);
+
+void __ipipe_share_current(int flags);
+
+void __ipipe_arch_share_current(int flags);
+
+int __ipipe_migrate_head(void);
+
+void __ipipe_reenter_root(void);
+
+int __ipipe_disable_ondemand_mappings(struct task_struct *p);
+
+int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma);
+
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+
+#define prepare_arch_switch(next) \
+ do { \
+ hard_local_irq_enable(); \
+ __ipipe_report_schedule(current, next); \
+ } while(0)
+
+#ifndef ipipe_get_active_mm
+static inline struct mm_struct *ipipe_get_active_mm(void)
+{
+ return __this_cpu_read(ipipe_percpu.active_mm);
+}
+#define ipipe_get_active_mm ipipe_get_active_mm
+#endif
+
+#else /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+
+#define prepare_arch_switch(next) \
+ do { \
+ __ipipe_report_schedule(current, next); \
+ hard_local_irq_disable(); \
+ } while(0)
+
+#ifndef ipipe_get_active_mm
+#define ipipe_get_active_mm() (current->active_mm)
+#endif
+
+#endif /* !CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH */
+
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+
+extern unsigned long long __ipipe_cs_freq;
+
+extern struct clocksource *__ipipe_cs;
+
+#endif /* CONFIG_IPIPE_WANT_CLOCKSOURCE */
+
+static inline bool __ipipe_hrclock_ok(void)
+{
+ return __ipipe_hrclock_freq != 0;
+}
+
+static inline void __ipipe_nmi_enter(void)
+{
+ __this_cpu_write(ipipe_percpu.nmi_state, __ipipe_root_status);
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+ ipipe_save_context_nmi();
+}
+
+static inline void __ipipe_nmi_exit(void)
+{
+ ipipe_restore_context_nmi();
+ if (!test_bit(IPIPE_STALL_FLAG, raw_cpu_ptr(&ipipe_percpu.nmi_state)))
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_root_status);
+}
+
+/* KVM-side calls, hw IRQs off. */
+static inline void __ipipe_enter_vm(struct ipipe_vm_notifier *vmf)
+{
+ struct ipipe_percpu_data *p;
+
+ p = raw_cpu_ptr(&ipipe_percpu);
+ p->vm_notifier = vmf;
+ barrier();
+}
+
+static inline void __ipipe_exit_vm(void)
+{
+ struct ipipe_percpu_data *p;
+
+ p = raw_cpu_ptr(&ipipe_percpu);
+ p->vm_notifier = NULL;
+ barrier();
+}
+
+/* Client-side call, hw IRQs off. */
+void __ipipe_notify_vm_preemption(void);
+
+static inline void __ipipe_sync_pipeline(struct ipipe_domain *top)
+{
+ if (__ipipe_current_domain != top) {
+ __ipipe_do_sync_pipeline(top);
+ return;
+ }
+ if (!test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(top)->status))
+ __ipipe_sync_stage();
+}
+
+void ipipe_register_head(struct ipipe_domain *ipd,
+ const char *name);
+
+void ipipe_unregister_head(struct ipipe_domain *ipd);
+
+int ipipe_request_irq(struct ipipe_domain *ipd,
+ unsigned int irq,
+ ipipe_irq_handler_t handler,
+ void *cookie,
+ ipipe_irq_ackfn_t ackfn);
+
+void ipipe_free_irq(struct ipipe_domain *ipd,
+ unsigned int irq);
+
+void ipipe_raise_irq(unsigned int irq);
+
+void ipipe_set_hooks(struct ipipe_domain *ipd,
+ int enables);
+
+unsigned int ipipe_alloc_virq(void);
+
+void ipipe_free_virq(unsigned int virq);
+
+static inline void ipipe_post_irq_head(unsigned int irq)
+{
+ __ipipe_set_irq_pending(ipipe_head_domain, irq);
+}
+
+static inline void ipipe_post_irq_root(unsigned int irq)
+{
+ __ipipe_set_irq_pending(&ipipe_root, irq);
+}
+
+static inline void ipipe_stall_head(void)
+{
+ hard_local_irq_disable();
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
+}
+
+static inline unsigned long ipipe_test_and_stall_head(void)
+{
+ hard_local_irq_disable();
+ return __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
+}
+
+static inline unsigned long ipipe_test_head(void)
+{
+ unsigned long flags, ret;
+
+ flags = hard_smp_local_irq_save();
+ ret = test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status);
+ hard_smp_local_irq_restore(flags);
+
+ return ret;
+}
+
+void ipipe_unstall_head(void);
+
+void __ipipe_restore_head(unsigned long x);
+
+static inline void ipipe_restore_head(unsigned long x)
+{
+ ipipe_check_irqoff();
+ if ((x ^ test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status)) & 1)
+ __ipipe_restore_head(x);
+}
+
+void __ipipe_post_work_root(struct ipipe_work_header *work);
+
+#define ipipe_post_work_root(p, header) \
+ do { \
+ void header_not_at_start(void); \
+ if (offsetof(typeof(*(p)), header)) { \
+ header_not_at_start(); \
+ } \
+ __ipipe_post_work_root(&(p)->header); \
+ } while (0)
+
+int ipipe_get_sysinfo(struct ipipe_sysinfo *sysinfo);
+
+unsigned long ipipe_critical_enter(void (*syncfn)(void));
+
+void ipipe_critical_exit(unsigned long flags);
+
+void ipipe_prepare_panic(void);
+
+#ifdef CONFIG_SMP
+#ifndef ipipe_smp_p
+#define ipipe_smp_p (1)
+#endif
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask);
+void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask);
+#else /* !CONFIG_SMP */
+#define ipipe_smp_p (0)
+static inline
+void ipipe_set_irq_affinity(unsigned int irq, cpumask_t cpumask) { }
+static inline void ipipe_send_ipi(unsigned int ipi, cpumask_t cpumask) { }
+static inline void ipipe_disable_smp(void) { }
+#endif /* CONFIG_SMP */
+
+static inline void ipipe_restore_root_nosync(unsigned long x)
+{
+ unsigned long flags;
+
+ flags = hard_smp_local_irq_save();
+ __ipipe_restore_root_nosync(x);
+ hard_smp_local_irq_restore(flags);
+}
+
+/* Must be called hw IRQs off. */
+static inline void ipipe_lock_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = __ipipe_current_domain;
+ if (ipd == ipipe_root_domain)
+ __ipipe_lock_irq(irq);
+}
+
+/* Must be called hw IRQs off. */
+static inline void ipipe_unlock_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = __ipipe_current_domain;
+ if (ipd == ipipe_root_domain)
+ __ipipe_unlock_irq(irq);
+}
+
+static inline struct ipipe_threadinfo *ipipe_current_threadinfo(void)
+{
+ return ¤t_thread_info()->ipipe_data;
+}
+
+#define ipipe_task_threadinfo(p) (&task_thread_info(p)->ipipe_data)
+
+void ipipe_enable_irq(unsigned int irq);
+
+static inline void ipipe_disable_irq(unsigned int irq)
+{
+ struct irq_desc *desc;
+ struct irq_chip *chip;
+
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ return;
+
+ chip = irq_desc_get_chip(desc);
+
+ if (WARN_ON_ONCE(chip->irq_disable == NULL && chip->irq_mask == NULL))
+ return;
+
+ if (chip->irq_disable)
+ chip->irq_disable(&desc->irq_data);
+ else
+ chip->irq_mask(&desc->irq_data);
+}
+
+static inline void ipipe_end_irq(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ if (desc)
+ desc->ipipe_end(irq, desc);
+}
+
+static inline int ipipe_chained_irq_p(struct irq_desc *desc)
+{
+ void __ipipe_chained_irq(unsigned irq, struct irq_desc *desc);
+
+ return desc->handle_irq == __ipipe_chained_irq;
+}
+
+static inline void ipipe_handle_demuxed_irq(unsigned int cascade_irq)
+{
+ ipipe_trace_irq_entry(cascade_irq);
+ __ipipe_dispatch_irq(cascade_irq, IPIPE_IRQF_NOSYNC);
+ ipipe_trace_irq_exit(cascade_irq);
+}
+
+static inline void __ipipe_init_threadflags(struct thread_info *ti)
+{
+ ti->ipipe_flags = 0;
+}
+
+static inline
+void ipipe_set_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ set_bit(flag, &ti->ipipe_flags);
+}
+
+static inline
+void ipipe_clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ clear_bit(flag, &ti->ipipe_flags);
+}
+
+static inline
+void ipipe_test_and_clear_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ test_and_clear_bit(flag, &ti->ipipe_flags);
+}
+
+static inline
+int ipipe_test_ti_thread_flag(struct thread_info *ti, int flag)
+{
+ return test_bit(flag, &ti->ipipe_flags);
+}
+
+#define ipipe_set_thread_flag(flag) \
+ ipipe_set_ti_thread_flag(current_thread_info(), flag)
+
+#define ipipe_clear_thread_flag(flag) \
+ ipipe_clear_ti_thread_flag(current_thread_info(), flag)
+
+#define ipipe_test_and_clear_thread_flag(flag) \
+ ipipe_test_and_clear_ti_thread_flag(current_thread_info(), flag)
+
+#define ipipe_test_thread_flag(flag) \
+ ipipe_test_ti_thread_flag(current_thread_info(), flag)
+
+#define ipipe_enable_notifier(p) \
+ ipipe_set_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)
+
+#define ipipe_disable_notifier(p) \
+ do { \
+ struct thread_info *ti = task_thread_info(p); \
+ ipipe_clear_ti_thread_flag(ti, TIP_NOTIFY); \
+ ipipe_clear_ti_thread_flag(ti, TIP_MAYDAY); \
+ } while (0)
+
+#define ipipe_notifier_enabled_p(p) \
+ ipipe_test_ti_thread_flag(task_thread_info(p), TIP_NOTIFY)
+
+#define ipipe_raise_mayday(p) \
+ do { \
+ struct thread_info *ti = task_thread_info(p); \
+ ipipe_check_irqoff(); \
+ if (ipipe_test_ti_thread_flag(ti, TIP_NOTIFY)) \
+ ipipe_set_ti_thread_flag(ti, TIP_MAYDAY); \
+ } while (0)
+
+extern bool __ipipe_probe_access;
+
+long ipipe_probe_kernel_read(void *dst, void *src, size_t size);
+long ipipe_probe_kernel_write(void *dst, void *src, size_t size);
+
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || defined(CONFIG_PROVE_LOCKING) || \
+ defined(CONFIG_PREEMPT_VOLUNTARY) || defined(CONFIG_IPIPE_DEBUG_CONTEXT)
+extern void __ipipe_uaccess_might_fault(void);
+#else
+#define __ipipe_uaccess_might_fault() might_fault()
+#endif
+
+#ifdef CONFIG_IPIPE_TRACE
+void __ipipe_tracer_hrclock_initialized(void);
+#else /* !CONFIG_IPIPE_TRACE */
+#define __ipipe_tracer_hrclock_initialized() do { } while(0)
+#endif /* !CONFIG_IPIPE_TRACE */
+
+#include <linux/ipipe_compat.h>
+
+#else /* !CONFIG_IPIPE */
+
+#define __ipipe_root_p 1
+#define ipipe_root_p 1
+
+static inline void __ipipe_init_threadflags(struct thread_info *ti) { }
+
+static inline void __ipipe_complete_domain_migration(void) { }
+
+static inline int __ipipe_switch_tail(void)
+{
+ return 0;
+}
+
+static inline void __ipipe_nmi_enter(void) { }
+
+static inline void __ipipe_nmi_exit(void) { }
+
+#define ipipe_safe_current() current
+#define ipipe_processor_id() smp_processor_id()
+
+static inline int ipipe_test_foreign_stack(void)
+{
+ return 0;
+}
+
+static inline void ipipe_lock_irq(unsigned int irq) { }
+
+static inline void ipipe_unlock_irq(unsigned int irq) { }
+
+#define ipipe_probe_kernel_read(d, s, sz) probe_kernel_read(d, s, sz)
+#define ipipe_probe_kernel_write(d, s, sz) probe_kernel_write(d, s, sz)
+#define __ipipe_uaccess_might_fault() might_fault()
+
+#endif /* !CONFIG_IPIPE */
+
+#endif /* !__LINUX_IPIPE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_base.h
+ *
+ * Copyright (C) 2002-2014 Philippe Gerum.
+ * 2007 Jan Kiszka.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_BASE_H
+#define __LINUX_IPIPE_BASE_H
+
+struct kvm_vcpu;
+struct ipipe_vm_notifier;
+struct irq_desc;
+
+#ifdef CONFIG_IPIPE
+
+#define IPIPE_CORE_APIREV CONFIG_IPIPE_CORE_APIREV
+
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+void ipipe_root_only(void);
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+static inline void ipipe_root_only(void) { }
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+
+typedef void (*ipipe_irq_handler_t)(unsigned int irq,
+ void *cookie);
+
+void ipipe_unstall_root(void);
+
+void ipipe_restore_root(unsigned long x);
+
+#include <asm/ipipe_base.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+#ifndef IPIPE_NR_ROOT_IRQS
+#define IPIPE_NR_ROOT_IRQS NR_IRQS
+#endif /* !IPIPE_NR_ROOT_IRQS */
+
+#define __bpl_up(x) (((x)+(BITS_PER_LONG-1)) & ~(BITS_PER_LONG-1))
+/* Number of virtual IRQs (must be a multiple of BITS_PER_LONG) */
+#define IPIPE_NR_VIRQS BITS_PER_LONG
+/* First virtual IRQ # (must be aligned on BITS_PER_LONG) */
+#define IPIPE_VIRQ_BASE __bpl_up(IPIPE_NR_XIRQS)
+/* Total number of IRQ slots */
+#define IPIPE_NR_IRQS (IPIPE_VIRQ_BASE+IPIPE_NR_VIRQS)
+
+static inline int ipipe_virtual_irq_p(unsigned int irq)
+{
+ return irq >= IPIPE_VIRQ_BASE && irq < IPIPE_NR_IRQS;
+}
+
+#define IPIPE_IRQ_LOMAPSZ (IPIPE_NR_IRQS / BITS_PER_LONG)
+#if IPIPE_IRQ_LOMAPSZ > BITS_PER_LONG
+/*
+ * We need a 3-level mapping. This allows us to handle up to 32k IRQ
+ * vectors on 32bit machines, 256k on 64bit ones.
+ */
+#define __IPIPE_3LEVEL_IRQMAP 1
+#define IPIPE_IRQ_MDMAPSZ (__bpl_up(IPIPE_IRQ_LOMAPSZ) / BITS_PER_LONG)
+#else
+/*
+ * 2-level mapping is enough. This allows us to handle up to 1024 IRQ
+ * vectors on 32bit machines, 4096 on 64bit ones.
+ */
+#define __IPIPE_2LEVEL_IRQMAP 1
+#endif
+
+/* Per-cpu pipeline status */
+#define IPIPE_STALL_FLAG 0 /* interrupts (virtually) disabled. */
+#define IPIPE_STALL_MASK (1L << IPIPE_STALL_FLAG)
+
+/* Interrupt control bits */
+#define IPIPE_HANDLE_FLAG 0
+#define IPIPE_STICKY_FLAG 1
+#define IPIPE_LOCK_FLAG 2
+#define IPIPE_HANDLE_MASK (1 << IPIPE_HANDLE_FLAG)
+#define IPIPE_STICKY_MASK (1 << IPIPE_STICKY_FLAG)
+#define IPIPE_LOCK_MASK (1 << IPIPE_LOCK_FLAG)
+
+struct pt_regs;
+struct ipipe_domain;
+
+struct ipipe_trap_data {
+ int exception;
+ struct pt_regs *regs;
+};
+
+#define IPIPE_KEVT_SCHEDULE 0
+#define IPIPE_KEVT_SIGWAKE 1
+#define IPIPE_KEVT_SETSCHED 2
+#define IPIPE_KEVT_SETAFFINITY 3
+#define IPIPE_KEVT_EXIT 4
+#define IPIPE_KEVT_CLEANUP 5
+#define IPIPE_KEVT_HOSTRT 6
+#define IPIPE_KEVT_CLOCKFREQ 7
+
+struct ipipe_vm_notifier {
+ void (*handler)(struct ipipe_vm_notifier *nfy);
+};
+
+void __ipipe_init_early(void);
+
+void __ipipe_init(void);
+
+#ifdef CONFIG_PROC_FS
+void __ipipe_init_proc(void);
+#ifdef CONFIG_IPIPE_TRACE
+void __ipipe_init_tracer(void);
+#else /* !CONFIG_IPIPE_TRACE */
+static inline void __ipipe_init_tracer(void) { }
+#endif /* CONFIG_IPIPE_TRACE */
+#else /* !CONFIG_PROC_FS */
+static inline void __ipipe_init_proc(void) { }
+#endif /* CONFIG_PROC_FS */
+
+void __ipipe_restore_root_nosync(unsigned long x);
+
+#define IPIPE_IRQF_NOACK 0x1
+#define IPIPE_IRQF_NOSYNC 0x2
+
+void __ipipe_dispatch_irq(unsigned int irq, int flags);
+
+void __ipipe_do_sync_stage(void);
+
+void __ipipe_do_sync_pipeline(struct ipipe_domain *top);
+
+void __ipipe_lock_irq(unsigned int irq);
+
+void __ipipe_unlock_irq(unsigned int irq);
+
+void __ipipe_do_critical_sync(unsigned int irq, void *cookie);
+
+void __ipipe_ack_edge_irq(unsigned int irq, struct irq_desc *desc);
+
+void __ipipe_nop_irq(unsigned int irq, struct irq_desc *desc);
+
+static inline void __ipipe_idle(void)
+{
+ ipipe_unstall_root();
+}
+
+#ifndef __ipipe_sync_check
+#define __ipipe_sync_check 1
+#endif
+
+static inline void __ipipe_sync_stage(void)
+{
+ if (likely(__ipipe_sync_check))
+ __ipipe_do_sync_stage();
+}
+
+#ifndef __ipipe_run_irqtail
+#define __ipipe_run_irqtail(irq) do { } while(0)
+#endif
+
+void __ipipe_flush_printk(unsigned int irq, void *cookie);
+
+#define __ipipe_get_cpu(flags) ({ (flags) = hard_preempt_disable(); ipipe_processor_id(); })
+#define __ipipe_put_cpu(flags) hard_preempt_enable(flags)
+
+int __ipipe_notify_syscall(struct pt_regs *regs);
+
+int __ipipe_notify_trap(int exception, struct pt_regs *regs);
+
+int __ipipe_notify_kevent(int event, void *data);
+
+#define __ipipe_report_trap(exception, regs) \
+ __ipipe_notify_trap(exception, regs)
+
+#define __ipipe_report_sigwake(p) \
+ do { \
+ if (ipipe_notifier_enabled_p(p)) \
+ __ipipe_notify_kevent(IPIPE_KEVT_SIGWAKE, p); \
+ } while (0)
+
+struct ipipe_cpu_migration_data {
+ struct task_struct *task;
+ int dest_cpu;
+};
+
+#define __ipipe_report_setaffinity(__p, __dest_cpu) \
+ do { \
+ struct ipipe_cpu_migration_data d = { \
+ .task = (__p), \
+ .dest_cpu = (__dest_cpu), \
+ }; \
+ if (ipipe_notifier_enabled_p(__p)) \
+ __ipipe_notify_kevent(IPIPE_KEVT_SETAFFINITY, &d); \
+ } while (0)
+
+#define __ipipe_report_exit(p) \
+ do { \
+ if (ipipe_notifier_enabled_p(p)) \
+ __ipipe_notify_kevent(IPIPE_KEVT_EXIT, p); \
+ } while (0)
+
+#define __ipipe_report_setsched(p) \
+ do { \
+ if (ipipe_notifier_enabled_p(p)) \
+ __ipipe_notify_kevent(IPIPE_KEVT_SETSCHED, p); \
+ } while (0)
+
+#define __ipipe_report_schedule(prev, next) \
+do { \
+ if (ipipe_notifier_enabled_p(next) || \
+ ipipe_notifier_enabled_p(prev)) { \
+ __this_cpu_write(ipipe_percpu.rqlock_owner, prev); \
+ __ipipe_notify_kevent(IPIPE_KEVT_SCHEDULE, next); \
+ } \
+} while (0)
+
+#define __ipipe_report_cleanup(mm) \
+ __ipipe_notify_kevent(IPIPE_KEVT_CLEANUP, mm)
+
+#define __ipipe_report_clockfreq_update(freq) \
+ __ipipe_notify_kevent(IPIPE_KEVT_CLOCKFREQ, &(freq))
+
+void __ipipe_notify_vm_preemption(void);
+
+void __ipipe_call_mayday(struct pt_regs *regs);
+
+#define hard_cond_local_irq_enable() hard_local_irq_enable()
+#define hard_cond_local_irq_disable() hard_local_irq_disable()
+#define hard_cond_local_irq_save() hard_local_irq_save()
+#define hard_cond_local_irq_restore(flags) hard_local_irq_restore(flags)
+
+#ifdef CONFIG_IPIPE_LEGACY
+
+#define IPIPE_FIRST_EVENT IPIPE_NR_FAULTS
+#define IPIPE_EVENT_SCHEDULE IPIPE_FIRST_EVENT
+#define IPIPE_EVENT_SIGWAKE (IPIPE_FIRST_EVENT + 1)
+#define IPIPE_EVENT_SETSCHED (IPIPE_FIRST_EVENT + 2)
+#define IPIPE_EVENT_SETAFFINITY (IPIPE_FIRST_EVENT + 3)
+#define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 4)
+#define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 5)
+#define IPIPE_EVENT_HOSTRT (IPIPE_FIRST_EVENT + 6)
+#define IPIPE_EVENT_CLOCKFREQ (IPIPE_FIRST_EVENT + 7)
+#define IPIPE_EVENT_SYSCALL (IPIPE_FIRST_EVENT + 8)
+#define IPIPE_LAST_EVENT IPIPE_EVENT_SYSCALL
+#define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1)
+
+typedef int (*ipipe_event_handler_t)(unsigned int event,
+ struct ipipe_domain *from,
+ void *data);
+struct ipipe_legacy_context {
+ unsigned int domid;
+ int priority;
+ void *pdd;
+ ipipe_event_handler_t handlers[IPIPE_NR_EVENTS];
+};
+
+#define __ipipe_init_taskinfo(p) \
+ do { \
+ memset(p->ptd, 0, sizeof(p->ptd)); \
+ } while (0)
+
+#else /* !CONFIG_IPIPE_LEGACY */
+
+struct ipipe_legacy_context {
+};
+
+static inline void __ipipe_init_taskinfo(struct task_struct *p) { }
+
+#endif /* !CONFIG_IPIPE_LEGACY */
+
+#define __ipipe_serial_debug(__fmt, __args...) raw_printk(__fmt, ##__args)
+
+#else /* !CONFIG_IPIPE */
+
+struct task_struct;
+struct mm_struct;
+
+static inline void __ipipe_init_early(void) { }
+
+static inline void __ipipe_init(void) { }
+
+static inline void __ipipe_init_proc(void) { }
+
+static inline void __ipipe_idle(void) { }
+
+static inline void __ipipe_report_sigwake(struct task_struct *p) { }
+
+static inline void __ipipe_report_setaffinity(struct task_struct *p,
+ int dest_cpu) { }
+
+static inline void __ipipe_report_setsched(struct task_struct *p) { }
+
+static inline void __ipipe_report_exit(struct task_struct *p) { }
+
+static inline void __ipipe_report_cleanup(struct mm_struct *mm) { }
+
+#define __ipipe_report_trap(exception, regs) 0
+
+static inline void __ipipe_init_taskinfo(struct task_struct *p) { }
+
+#define hard_preempt_disable() ({ preempt_disable(); 0; })
+#define hard_preempt_enable(flags) ({ preempt_enable(); (void)(flags); })
+
+#define __ipipe_get_cpu(flags) ({ (void)(flags); get_cpu(); })
+#define __ipipe_put_cpu(flags) \
+ do { \
+ (void)(flags); \
+ put_cpu(); \
+ } while (0)
+
+#define __ipipe_root_tick_p(regs) 1
+
+#define ipipe_handle_demuxed_irq(irq) generic_handle_irq(irq)
+
+#define __ipipe_enter_vm(vmf) do { } while (0)
+
+static inline void __ipipe_exit_vm(void) { }
+
+static inline void __ipipe_notify_vm_preemption(void) { }
+
+static inline void ipipe_root_only(void) { }
+
+#define __ipipe_serial_debug(__fmt, __args...) do { } while (0)
+
+#endif /* !CONFIG_IPIPE */
+
+#ifdef CONFIG_IPIPE_WANT_PTE_PINNING
+void __ipipe_pin_mapping_globally(unsigned long start,
+ unsigned long end);
+#else
+static inline void __ipipe_pin_mapping_globally(unsigned long start,
+ unsigned long end)
+{ }
+#endif
+
+static inline void ipipe_preempt_root_only(void)
+{
+#if defined(CONFIG_IPIPE_DEBUG_CONTEXT) && \
+ defined(CONFIG_IPIPE_LEGACY) && \
+ !defined(CONFIG_IPIPE_HAVE_SAFE_THREAD_INFO)
+ ipipe_root_only();
+#endif
+}
+
+#endif /* !__LINUX_IPIPE_BASE_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_compat.h
+ *
+ * Copyright (C) 2012 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_COMPAT_H
+#define __LINUX_IPIPE_COMPAT_H
+
+#ifndef __LINUX_IPIPE_H
+#error "Do not include this file directly, use linux/ipipe.h instead"
+#endif
+
+#ifdef CONFIG_IPIPE_LEGACY
+
+#define IPIPE_HEAD_PRIORITY (-1)
+#define IPIPE_ROOT_PRIO 100
+#define IPIPE_ROOT_ID 0
+#define IPIPE_ROOT_NPTDKEYS 4
+
+/* Legacy pipeline status bit */
+#define IPIPE_NOSTACK_FLAG 1 /* running on foreign stack. */
+#define IPIPE_NOSTACK_MASK (1L << IPIPE_NOSTACK_FLAG)
+
+/* Legacy interrupt control bits */
+#define IPIPE_DUMMY_FLAG 31
+#define IPIPE_WIRED_FLAG IPIPE_HANDLE_FLAG
+#define IPIPE_WIRED_MASK (1 << IPIPE_WIRED_FLAG)
+#define IPIPE_PASS_FLAG IPIPE_DUMMY_FLAG
+#define IPIPE_PASS_MASK (1 << IPIPE_PASS_FLAG)
+#define IPIPE_DYNAMIC_FLAG IPIPE_HANDLE_FLAG
+#define IPIPE_DYNAMIC_MASK (1 << IPIPE_DYNAMIC_FLAG)
+#define IPIPE_SYSTEM_FLAG IPIPE_DUMMY_FLAG
+#define IPIPE_SYSTEM_MASK (1 << IPIPE_SYSTEM_FLAG)
+#define IPIPE_EXCLUSIVE_FLAG IPIPE_DUMMY_FLAG
+#define IPIPE_EXCLUSIVE_MASK (1 << IPIPE_EXCLUSIVE_FLAG)
+
+#define IPIPE_NR_CPUS NR_CPUS
+
+#define IPIPE_EVENT_SELF 0x80000000
+#define IPIPE_EVENT_RETURN IPIPE_TRAP_MAYDAY
+
+#define TASK_ATOMICSWITCH TASK_HARDENING
+
+struct ipipe_domain_attr {
+ unsigned int domid;
+ const char *name;
+ int priority;
+ void (*entry) (void);
+ void *pdd;
+};
+
+void ipipe_init_attr(struct ipipe_domain_attr *attr);
+
+int ipipe_register_domain(struct ipipe_domain *ipd,
+ struct ipipe_domain_attr *attr);
+
+int ipipe_unregister_domain(struct ipipe_domain *ipd);
+
+int ipipe_alloc_ptdkey(void);
+
+int ipipe_free_ptdkey(int key);
+
+int ipipe_set_ptd(int key, void *value);
+
+void *ipipe_get_ptd(int key);
+
+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
+ unsigned int irq,
+ ipipe_irq_handler_t handler,
+ void *cookie,
+ ipipe_irq_ackfn_t ackfn,
+ unsigned int modemask);
+
+ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd,
+ unsigned int event,
+ ipipe_event_handler_t handler);
+
+int ipipe_setscheduler_root(struct task_struct *p,
+ int policy,
+ int prio);
+
+static inline void ipipe_check_context(struct ipipe_domain *border_ipd)
+{
+ ipipe_root_only();
+}
+
+static inline void ipipe_set_printk_sync(struct ipipe_domain *ipd)
+{
+ ipipe_prepare_panic();
+}
+
+static inline void __ipipe_propagate_irq(unsigned int irq)
+{
+ ipipe_post_irq_root(irq);
+}
+
+static inline void __ipipe_schedule_irq_head(unsigned int irq)
+{
+ ipipe_post_irq_head(irq);
+}
+
+static inline void __ipipe_schedule_irq_root(unsigned int irq)
+{
+ ipipe_post_irq_root(irq);
+}
+
+static inline int ipipe_trigger_irq(unsigned int irq)
+{
+ ipipe_raise_irq(irq);
+ return 1;
+}
+
+static inline void ipipe_stall_pipeline_from(struct ipipe_domain *ipd)
+{
+ if (ipd != ipipe_root_domain)
+ ipipe_stall_head();
+ else
+ ipipe_stall_root();
+}
+
+static inline
+unsigned long ipipe_test_and_stall_pipeline_from(struct ipipe_domain *ipd)
+{
+ if (ipd != ipipe_root_domain)
+ return ipipe_test_and_stall_head();
+
+ return ipipe_test_and_stall_root();
+}
+
+static inline
+void ipipe_unstall_pipeline_from(struct ipipe_domain *ipd)
+{
+ if (ipd != ipipe_root_domain)
+ ipipe_unstall_head();
+ else
+ ipipe_unstall_root();
+}
+
+static inline
+void ipipe_restore_pipeline_from(struct ipipe_domain *ipd,
+ unsigned long x)
+{
+ if (ipd != ipipe_root_domain)
+ ipipe_restore_head(x);
+ else
+ ipipe_restore_root(x);
+}
+
+static inline
+unsigned long ipipe_test_pipeline_from(struct ipipe_domain *ipd)
+{
+ return test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(ipd)->status);
+}
+
+static inline void ipipe_stall_pipeline_head(void)
+{
+ ipipe_stall_head();
+}
+
+static inline unsigned long ipipe_test_and_stall_pipeline_head(void)
+{
+ return ipipe_test_and_stall_head();
+}
+
+static inline void ipipe_unstall_pipeline_head(void)
+{
+ ipipe_unstall_head();
+}
+
+static inline void ipipe_restore_pipeline_head(unsigned long x)
+{
+ ipipe_restore_head(x);
+}
+
+static inline int ipipe_disable_ondemand_mappings(struct task_struct *p)
+{
+ return __ipipe_disable_ondemand_mappings(p);
+}
+
+static inline int ipipe_reenter_root(struct task_struct *prev,
+ int policy,
+ int prio)
+{
+ __ipipe_reenter_root();
+ return 0;
+}
+
+static inline void ipipe_root_preempt_notify(void)
+{
+ ipipe_notify_root_preemption();
+}
+
+#define ipipe_return_notify(p) ipipe_raise_mayday(p)
+
+/*
+ * Keep the following as a macro, so that client code could check for
+ * the support of the invariant pipeline head optimization.
+ */
+#define __ipipe_pipeline_head() ipipe_head_domain
+
+static inline int irqs_disabled_hw(void)
+{
+ return hard_irqs_disabled();
+}
+
+static inline void local_irq_disable_hw(void)
+{
+ hard_local_irq_disable();
+}
+
+static inline void local_irq_enable_hw(void)
+{
+ hard_local_irq_enable();
+}
+
+#define local_irq_save_hw(flags) \
+ do { \
+ (flags) = hard_local_irq_save(); \
+ } while (0)
+
+static inline void local_irq_restore_hw(unsigned long flags)
+{
+ hard_local_irq_restore(flags);
+}
+
+#define local_save_flags_hw(flags) \
+ do { \
+ (flags) = hard_local_save_flags(); \
+ } while (0)
+
+#define local_irq_save_hw_smp(flags) \
+ do { \
+ (flags) = hard_smp_local_irq_save(); \
+ } while (0)
+#define local_irq_restore_hw_smp(flags) hard_smp_local_irq_restore(flags)
+
+#define local_irq_save_hw_cond(flags) \
+ do { \
+ (flags) = hard_cond_local_irq_save(); \
+ } while (0)
+#define local_irq_restore_hw_cond(flags) hard_cond_local_irq_restore(flags)
+
+static inline void ipipe_set_foreign_stack(struct ipipe_domain *ipd)
+{
+ /* Must be called hw interrupts off. */
+ __set_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpu_context(ipd)->status);
+}
+
+static inline void ipipe_clear_foreign_stack(struct ipipe_domain *ipd)
+{
+ /* Must be called hw interrupts off. */
+ __clear_bit(IPIPE_NOSTACK_FLAG, &ipipe_this_cpu_context(ipd)->status);
+}
+
+static inline int ipipe_test_foreign_stack(void)
+{
+ /* Must be called hw interrupts off. */
+ return test_bit(IPIPE_NOSTACK_FLAG, &__ipipe_current_context->status);
+}
+
+#ifndef ipipe_safe_current
+#define ipipe_safe_current() \
+ ({ \
+ struct task_struct *__p__; \
+ unsigned long __flags__; \
+ __flags__ = hard_smp_local_irq_save(); \
+ __p__ = ipipe_test_foreign_stack() ? &init_task : current; \
+ hard_smp_local_irq_restore(__flags__); \
+ __p__; \
+ })
+#endif
+
+void __ipipe_legacy_init_stage(struct ipipe_domain *ipd);
+
+/*
+ * These values have no real meaning from a versioning POV, however
+ * they are guaranteed to look more recent than any legacy patch
+ * release ever published in the past.
+ */
+#define IPIPE_MAJOR_NUMBER 3
+#define IPIPE_MINOR_NUMBER 0
+#define IPIPE_PATCH_NUMBER 0
+
+#define __IPIPE_FEATURE_REQUEST_TICKDEV 1
+#define __IPIPE_FEATURE_FASTPEND_IRQ 1
+#define __IPIPE_FEATURE_TRACE_EVENT 1
+#define __IPIPE_FEATURE_ENABLE_NOTIFIER 1
+#define __IPIPE_FEATURE_PREPARE_PANIC 1
+#define __IPIPE_FEATURE_SYSINFO_V2 1
+#define __IPIPE_FEATURE_PIC_MUTE 1
+#ifdef CONFIG_IPIPE_HAVE_VM_NOTIFIER
+#define __IPIPE_FEATURE_ROOTPREEMPT_NOTIFIER 1
+#endif
+
+#else /* !CONFIG_IPIPE_LEGACY */
+
+static inline void __ipipe_legacy_init_stage(struct ipipe_domain *ipd)
+{
+}
+
+#endif /* !CONFIG_IPIPE_LEGACY */
+
+#endif /* !__LINUX_IPIPE_COMPAT_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_debug.h
+ *
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_DEBUG_H
+#define __LINUX_IPIPE_DEBUG_H
+
+#include <linux/ipipe_domain.h>
+
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+
+#include <asm/bug.h>
+
+static inline int ipipe_disable_context_check(void)
+{
+ return xchg(raw_cpu_ptr(&ipipe_percpu.context_check), 0);
+}
+
+static inline void ipipe_restore_context_check(int old_state)
+{
+ __this_cpu_write(ipipe_percpu.context_check, old_state);
+}
+
+static inline void ipipe_context_check_off(void)
+{
+ int cpu;
+ for_each_online_cpu(cpu)
+ per_cpu(ipipe_percpu, cpu).context_check = 0;
+}
+
+static inline void ipipe_save_context_nmi(void)
+{
+ int state = ipipe_disable_context_check();
+ __this_cpu_write(ipipe_percpu.context_check_saved, state);
+}
+
+static inline void ipipe_restore_context_nmi(void)
+{
+ ipipe_restore_context_check(__this_cpu_read(ipipe_percpu.context_check_saved));
+}
+
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+
+static inline int ipipe_disable_context_check(void)
+{
+ return 0;
+}
+
+static inline void ipipe_restore_context_check(int old_state) { }
+
+static inline void ipipe_context_check_off(void) { }
+
+static inline void ipipe_save_context_nmi(void) { }
+
+static inline void ipipe_restore_context_nmi(void) { }
+
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+
+#ifdef CONFIG_IPIPE_DEBUG
+
+#define ipipe_check_irqoff() \
+ do { \
+ if (WARN_ON_ONCE(!hard_irqs_disabled())) \
+ hard_local_irq_disable(); \
+ } while (0)
+
+#else /* !CONFIG_IPIPE_DEBUG */
+
+static inline void ipipe_check_irqoff(void) { }
+
+#endif /* !CONFIG_IPIPE_DEBUG */
+
+#ifdef CONFIG_IPIPE_DEBUG_INTERNAL
+#define IPIPE_WARN(c) WARN_ON(c)
+#define IPIPE_WARN_ONCE(c) WARN_ON_ONCE(c)
+#define IPIPE_BUG_ON(c) BUG_ON(c)
+#else
+#define IPIPE_WARN(c) do { (void)(c); } while (0)
+#define IPIPE_WARN_ONCE(c) do { (void)(c); } while (0)
+#define IPIPE_BUG_ON(c) do { (void)(c); } while (0)
+#endif
+
+#endif /* !__LINUX_IPIPE_DEBUG_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_domain.h
+ *
+ * Copyright (C) 2007-2012 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_DOMAIN_H
+#define __LINUX_IPIPE_DOMAIN_H
+
+#ifdef CONFIG_IPIPE
+
+#include <linux/mutex.h>
+#include <linux/percpu.h>
+#include <asm/ptrace.h>
+
+struct task_struct;
+struct mm_struct;
+struct irq_desc;
+struct ipipe_vm_notifier;
+
+#define __IPIPE_SYSCALL_P 0
+#define __IPIPE_TRAP_P 1
+#define __IPIPE_KEVENT_P 2
+#define __IPIPE_SYSCALL_E (1 << __IPIPE_SYSCALL_P)
+#define __IPIPE_TRAP_E (1 << __IPIPE_TRAP_P)
+#define __IPIPE_KEVENT_E (1 << __IPIPE_KEVENT_P)
+#define __IPIPE_ALL_E 0x7
+#define __IPIPE_SYSCALL_R (8 << __IPIPE_SYSCALL_P)
+#define __IPIPE_TRAP_R (8 << __IPIPE_TRAP_P)
+#define __IPIPE_KEVENT_R (8 << __IPIPE_KEVENT_P)
+#define __IPIPE_SHIFT_R 3
+#define __IPIPE_ALL_R (__IPIPE_ALL_E << __IPIPE_SHIFT_R)
+
+typedef void (*ipipe_irq_ackfn_t)(unsigned int irq, struct irq_desc *desc);
+
+struct ipipe_domain {
+ int context_offset;
+ struct ipipe_irqdesc {
+ unsigned long control;
+ ipipe_irq_ackfn_t ackfn;
+ ipipe_irq_handler_t handler;
+ void *cookie;
+ } ____cacheline_aligned irqs[IPIPE_NR_IRQS];
+ const char *name;
+ struct mutex mutex;
+ struct ipipe_legacy_context legacy;
+};
+
+static inline void *
+__ipipe_irq_cookie(struct ipipe_domain *ipd, unsigned int irq)
+{
+ return ipd->irqs[irq].cookie;
+}
+
+static inline ipipe_irq_handler_t
+__ipipe_irq_handler(struct ipipe_domain *ipd, unsigned int irq)
+{
+ return ipd->irqs[irq].handler;
+}
+
+extern struct ipipe_domain ipipe_root;
+
+#define ipipe_root_domain (&ipipe_root)
+
+extern struct ipipe_domain *ipipe_head_domain;
+
+struct ipipe_percpu_domain_data {
+ unsigned long status; /* <= Must be first in struct. */
+ unsigned long irqpend_himap;
+#ifdef __IPIPE_3LEVEL_IRQMAP
+ unsigned long irqpend_mdmap[IPIPE_IRQ_MDMAPSZ];
+#endif
+ unsigned long irqpend_lomap[IPIPE_IRQ_LOMAPSZ];
+ unsigned long irqheld_map[IPIPE_IRQ_LOMAPSZ];
+ unsigned long irqall[IPIPE_NR_IRQS];
+ struct ipipe_domain *domain;
+ int coflags;
+};
+
+struct ipipe_percpu_data {
+ struct ipipe_percpu_domain_data root;
+ struct ipipe_percpu_domain_data head;
+ struct ipipe_percpu_domain_data *curr;
+ struct pt_regs tick_regs;
+ int hrtimer_irq;
+ struct task_struct *task_hijacked;
+ struct task_struct *rqlock_owner;
+ struct ipipe_vm_notifier *vm_notifier;
+ unsigned long nmi_state;
+ struct mm_struct *active_mm;
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+ int context_check;
+ int context_check_saved;
+#endif
+};
+
+/*
+ * CAREFUL: all accessors based on __ipipe_raw_cpu_ptr() you may find
+ * in this file should be used only while hw interrupts are off, to
+ * prevent from CPU migration regardless of the running domain.
+ */
+DECLARE_PER_CPU(struct ipipe_percpu_data, ipipe_percpu);
+
+static inline struct ipipe_percpu_domain_data *
+__context_of(struct ipipe_percpu_data *p, struct ipipe_domain *ipd)
+{
+ return (void *)p + ipd->context_offset;
+}
+
+/**
+ * ipipe_percpu_context - return the address of the pipeline context
+ * data for a domain on a given CPU.
+ *
+ * NOTE: this is the slowest accessor, use it carefully. Prefer
+ * ipipe_this_cpu_context() for requests targeted at the current
+ * CPU. Additionally, if the target domain is known at build time,
+ * consider ipipe_this_cpu_{root, head}_context().
+ */
+static inline struct ipipe_percpu_domain_data *
+ipipe_percpu_context(struct ipipe_domain *ipd, int cpu)
+{
+ return __context_of(&per_cpu(ipipe_percpu, cpu), ipd);
+}
+
+/**
+ * ipipe_this_cpu_context - return the address of the pipeline context
+ * data for a domain on the current CPU. hw IRQs must be off.
+ *
+ * NOTE: this accessor is a bit faster, but since we don't know which
+ * one of "root" or "head" ipd refers to, we still need to compute the
+ * context address from its offset.
+ */
+static inline struct ipipe_percpu_domain_data *
+ipipe_this_cpu_context(struct ipipe_domain *ipd)
+{
+ return __context_of(__ipipe_raw_cpu_ptr(&ipipe_percpu), ipd);
+}
+
+/**
+ * ipipe_this_cpu_root_context - return the address of the pipeline
+ * context data for the root domain on the current CPU. hw IRQs must
+ * be off.
+ *
+ * NOTE: this accessor is recommended when the domain we refer to is
+ * known at build time to be the root one.
+ */
+static inline struct ipipe_percpu_domain_data *
+ipipe_this_cpu_root_context(void)
+{
+ return __ipipe_raw_cpu_ptr(&ipipe_percpu.root);
+}
+
+/**
+ * ipipe_this_cpu_head_context - return the address of the pipeline
+ * context data for the registered head domain on the current CPU. hw
+ * IRQs must be off.
+ *
+ * NOTE: this accessor is recommended when the domain we refer to is
+ * known at build time to be the registered head domain. This address
+ * is always different from the context data of the root domain in
+ * absence of registered head domain. To get the address of the
+ * context data for the domain leading the pipeline at the time of the
+ * call (which may be root in absence of registered head domain), use
+ * ipipe_this_cpu_leading_context() instead.
+ */
+static inline struct ipipe_percpu_domain_data *
+ipipe_this_cpu_head_context(void)
+{
+ return __ipipe_raw_cpu_ptr(&ipipe_percpu.head);
+}
+
+/**
+ * ipipe_this_cpu_leading_context - return the address of the pipeline
+ * context data for the domain leading the pipeline on the current
+ * CPU. hw IRQs must be off.
+ *
+ * NOTE: this accessor is required when either root or a registered
+ * head domain may be the final target of this call, depending on
+ * whether the high priority domain was installed via
+ * ipipe_register_head().
+ */
+static inline struct ipipe_percpu_domain_data *
+ipipe_this_cpu_leading_context(void)
+{
+ return ipipe_this_cpu_context(ipipe_head_domain);
+}
+
+/**
+ * __ipipe_get_current_context() - return the address of the pipeline
+ * context data of the domain running on the current CPU. hw IRQs must
+ * be off.
+ */
+static inline struct ipipe_percpu_domain_data *__ipipe_get_current_context(void)
+{
+ return __ipipe_raw_cpu_read(ipipe_percpu.curr);
+}
+
+#define __ipipe_current_context __ipipe_get_current_context()
+
+/**
+ * __ipipe_set_current_context() - switch the current CPU to the
+ * specified domain context. hw IRQs must be off.
+ *
+ * NOTE: this is the only way to change the current domain for the
+ * current CPU. Don't bypass.
+ */
+static inline
+void __ipipe_set_current_context(struct ipipe_percpu_domain_data *pd)
+{
+ struct ipipe_percpu_data *p;
+ p = __ipipe_raw_cpu_ptr(&ipipe_percpu);
+ p->curr = pd;
+}
+
+/**
+ * __ipipe_set_current_domain() - switch the current CPU to the
+ * specified domain. This is equivalent to calling
+ * __ipipe_set_current_context() with the context data of that
+ * domain. hw IRQs must be off.
+ */
+static inline void __ipipe_set_current_domain(struct ipipe_domain *ipd)
+{
+ struct ipipe_percpu_data *p;
+ p = __ipipe_raw_cpu_ptr(&ipipe_percpu);
+ p->curr = __context_of(p, ipd);
+}
+
+static inline struct ipipe_percpu_domain_data *ipipe_current_context(void)
+{
+ struct ipipe_percpu_domain_data *pd;
+ unsigned long flags;
+
+ flags = hard_smp_local_irq_save();
+ pd = __ipipe_get_current_context();
+ hard_smp_local_irq_restore(flags);
+
+ return pd;
+}
+
+static inline struct ipipe_domain *__ipipe_get_current_domain(void)
+{
+ return __ipipe_get_current_context()->domain;
+}
+
+#define __ipipe_current_domain __ipipe_get_current_domain()
+
+/**
+ * __ipipe_get_current_domain() - return the address of the pipeline
+ * domain running on the current CPU. hw IRQs must be off.
+ */
+static inline struct ipipe_domain *ipipe_get_current_domain(void)
+{
+ struct ipipe_domain *ipd;
+ unsigned long flags;
+
+ flags = hard_smp_local_irq_save();
+ ipd = __ipipe_get_current_domain();
+ hard_smp_local_irq_restore(flags);
+
+ return ipd;
+}
+
+#define ipipe_current_domain ipipe_get_current_domain()
+
+#define __ipipe_root_p (__ipipe_current_domain == ipipe_root_domain)
+#define ipipe_root_p (ipipe_current_domain == ipipe_root_domain)
+
+#ifdef CONFIG_SMP
+#define __ipipe_root_status (ipipe_this_cpu_root_context()->status)
+#else
+extern unsigned long __ipipe_root_status;
+#endif
+
+#define __ipipe_head_status (ipipe_this_cpu_head_context()->status)
+
+/**
+ * __ipipe_ipending_p() - Whether we have interrupts pending
+ * (i.e. logged) for the given domain context on the current CPU. hw
+ * IRQs must be off.
+ */
+static inline int __ipipe_ipending_p(struct ipipe_percpu_domain_data *pd)
+{
+ return pd->irqpend_himap != 0;
+}
+
+static inline unsigned long
+__ipipe_cpudata_irq_hits(struct ipipe_domain *ipd, int cpu, unsigned int irq)
+{
+ return ipipe_percpu_context(ipd, cpu)->irqall[irq];
+}
+
+#endif /* CONFIG_IPIPE */
+
+#endif /* !__LINUX_IPIPE_DOMAIN_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_lock.h
+ *
+ * Copyright (C) 2009 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_LOCK_H
+#define __LINUX_IPIPE_LOCK_H
+
+typedef struct {
+ arch_spinlock_t arch_lock;
+} __ipipe_spinlock_t;
+
+#define ipipe_spinlock(lock) ((__ipipe_spinlock_t *)(lock))
+#define ipipe_spinlock_p(lock) \
+ __builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t *) || \
+ __builtin_types_compatible_p(typeof(lock), __ipipe_spinlock_t [])
+
+#define std_spinlock_raw(lock) ((raw_spinlock_t *)(lock))
+#define std_spinlock_raw_p(lock) \
+ __builtin_types_compatible_p(typeof(lock), raw_spinlock_t *) || \
+ __builtin_types_compatible_p(typeof(lock), raw_spinlock_t [])
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+#define PICK_SPINLOCK_IRQSAVE(lock, flags) \
+ do { \
+ if (ipipe_spinlock_p(lock)) \
+ (flags) = __ipipe_spin_lock_irqsave(ipipe_spinlock(lock)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __real_raw_spin_lock_irqsave(std_spinlock_raw(lock), flags); \
+ else __bad_lock_type(); \
+ } while (0)
+
+#define PICK_SPINTRYLOCK_IRQSAVE(lock, flags) \
+ ({ \
+ int __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = __ipipe_spin_trylock_irqsave(ipipe_spinlock(lock), &(flags)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irqsave(std_spinlock_raw(lock), flags); \
+ else __bad_lock_type(); \
+ __ret__; \
+ })
+
+#define PICK_SPINTRYLOCK_IRQ(lock) \
+ ({ \
+ int __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = __ipipe_spin_trylock_irq(ipipe_spinlock(lock)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irq(std_spinlock_raw(lock)); \
+ else __bad_lock_type(); \
+ __ret__; \
+ })
+
+#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags) \
+ do { \
+ if (ipipe_spinlock_p(lock)) \
+ __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \
+ else if (std_spinlock_raw_p(lock)) { \
+ __ipipe_spin_unlock_debug(flags); \
+ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \
+ } else __bad_lock_type(); \
+ } while (0)
+
+#define PICK_SPINOP(op, lock) \
+ ({ \
+ if (ipipe_spinlock_p(lock)) \
+ arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
+ else if (std_spinlock_raw_p(lock)) \
+ __real_raw_spin##op(std_spinlock_raw(lock)); \
+ else __bad_lock_type(); \
+ (void)0; \
+ })
+
+#define PICK_SPINOP_RET(op, lock, type) \
+ ({ \
+ type __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin##op(std_spinlock_raw(lock)); \
+ else { __ret__ = -1; __bad_lock_type(); } \
+ __ret__; \
+ })
+
+#else /* !CONFIG_PREEMPT_RT_FULL */
+
+#define std_spinlock(lock) ((spinlock_t *)(lock))
+#define std_spinlock_p(lock) \
+ __builtin_types_compatible_p(typeof(lock), spinlock_t *) || \
+ __builtin_types_compatible_p(typeof(lock), spinlock_t [])
+
+#define PICK_SPINLOCK_IRQSAVE(lock, flags) \
+ do { \
+ if (ipipe_spinlock_p(lock)) \
+ (flags) = __ipipe_spin_lock_irqsave(ipipe_spinlock(lock)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __real_raw_spin_lock_irqsave(std_spinlock_raw(lock), flags); \
+ else if (std_spinlock_p(lock)) \
+ __real_raw_spin_lock_irqsave(&std_spinlock(lock)->rlock, flags); \
+ else __bad_lock_type(); \
+ } while (0)
+
+#define PICK_SPINTRYLOCK_IRQSAVE(lock, flags) \
+ ({ \
+ int __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = __ipipe_spin_trylock_irqsave(ipipe_spinlock(lock), &(flags)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irqsave(std_spinlock_raw(lock), flags); \
+ else if (std_spinlock_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irqsave(&std_spinlock(lock)->rlock, flags); \
+ else __bad_lock_type(); \
+ __ret__; \
+ })
+
+#define PICK_SPINTRYLOCK_IRQ(lock) \
+ ({ \
+ int __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = __ipipe_spin_trylock_irq(ipipe_spinlock(lock)); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irq(std_spinlock_raw(lock)); \
+ else if (std_spinlock_p(lock)) \
+ __ret__ = __real_raw_spin_trylock_irq(&std_spinlock(lock)->rlock); \
+ else __bad_lock_type(); \
+ __ret__; \
+ })
+
+#define PICK_SPINUNLOCK_IRQRESTORE(lock, flags) \
+ do { \
+ if (ipipe_spinlock_p(lock)) \
+ __ipipe_spin_unlock_irqrestore(ipipe_spinlock(lock), flags); \
+ else { \
+ __ipipe_spin_unlock_debug(flags); \
+ if (std_spinlock_raw_p(lock)) \
+ __real_raw_spin_unlock_irqrestore(std_spinlock_raw(lock), flags); \
+ else if (std_spinlock_p(lock)) \
+ __real_raw_spin_unlock_irqrestore(&std_spinlock(lock)->rlock, flags); \
+ } \
+ } while (0)
+
+#define PICK_SPINOP(op, lock) \
+ ({ \
+ if (ipipe_spinlock_p(lock)) \
+ arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
+ else if (std_spinlock_raw_p(lock)) \
+ __real_raw_spin##op(std_spinlock_raw(lock)); \
+ else if (std_spinlock_p(lock)) \
+ __real_raw_spin##op(&std_spinlock(lock)->rlock); \
+ else __bad_lock_type(); \
+ (void)0; \
+ })
+
+#define PICK_SPINOP_RET(op, lock, type) \
+ ({ \
+ type __ret__; \
+ if (ipipe_spinlock_p(lock)) \
+ __ret__ = arch_spin##op(&ipipe_spinlock(lock)->arch_lock); \
+ else if (std_spinlock_raw_p(lock)) \
+ __ret__ = __real_raw_spin##op(std_spinlock_raw(lock)); \
+ else if (std_spinlock_p(lock)) \
+ __ret__ = __real_raw_spin##op(&std_spinlock(lock)->rlock); \
+ else { __ret__ = -1; __bad_lock_type(); } \
+ __ret__; \
+ })
+
+#endif /* !CONFIG_PREEMPT_RT_FULL */
+
+#define arch_spin_lock_init(lock) \
+ do { \
+ IPIPE_DEFINE_SPINLOCK(__lock__); \
+ *((ipipe_spinlock_t *)lock) = __lock__; \
+ } while (0)
+
+#define arch_spin_lock_irq(lock) \
+ do { \
+ hard_local_irq_disable(); \
+ arch_spin_lock(lock); \
+ } while (0)
+
+#define arch_spin_unlock_irq(lock) \
+ do { \
+ arch_spin_unlock(lock); \
+ hard_local_irq_enable(); \
+ } while (0)
+
+typedef struct {
+ arch_rwlock_t arch_lock;
+} __ipipe_rwlock_t;
+
+#define ipipe_rwlock_p(lock) \
+ __builtin_types_compatible_p(typeof(lock), __ipipe_rwlock_t *)
+
+#define std_rwlock_p(lock) \
+ __builtin_types_compatible_p(typeof(lock), rwlock_t *)
+
+#define ipipe_rwlock(lock) ((__ipipe_rwlock_t *)(lock))
+#define std_rwlock(lock) ((rwlock_t *)(lock))
+
+#define PICK_RWOP(op, lock) \
+ do { \
+ if (ipipe_rwlock_p(lock)) \
+ arch##op(&ipipe_rwlock(lock)->arch_lock); \
+ else if (std_rwlock_p(lock)) \
+ _raw##op(std_rwlock(lock)); \
+ else __bad_lock_type(); \
+ } while (0)
+
+extern int __bad_lock_type(void);
+
+#ifdef CONFIG_IPIPE
+
+#define ipipe_spinlock_t __ipipe_spinlock_t
+#define IPIPE_DEFINE_RAW_SPINLOCK(x) ipipe_spinlock_t x = IPIPE_SPIN_LOCK_UNLOCKED
+#define IPIPE_DECLARE_RAW_SPINLOCK(x) extern ipipe_spinlock_t x
+#define IPIPE_DEFINE_SPINLOCK(x) IPIPE_DEFINE_RAW_SPINLOCK(x)
+#define IPIPE_DECLARE_SPINLOCK(x) IPIPE_DECLARE_RAW_SPINLOCK(x)
+
+#define IPIPE_SPIN_LOCK_UNLOCKED \
+ (__ipipe_spinlock_t) { .arch_lock = __ARCH_SPIN_LOCK_UNLOCKED }
+
+#define spin_lock_irqsave_cond(lock, flags) \
+ spin_lock_irqsave(lock, flags)
+
+#define spin_unlock_irqrestore_cond(lock, flags) \
+ spin_unlock_irqrestore(lock, flags)
+
+#define raw_spin_lock_irqsave_cond(lock, flags) \
+ raw_spin_lock_irqsave(lock, flags)
+
+#define raw_spin_unlock_irqrestore_cond(lock, flags) \
+ raw_spin_unlock_irqrestore(lock, flags)
+
+void __ipipe_spin_lock_irq(ipipe_spinlock_t *lock);
+
+int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock);
+
+void __ipipe_spin_unlock_irq(ipipe_spinlock_t *lock);
+
+unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock);
+
+int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock,
+ unsigned long *x);
+
+void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock,
+ unsigned long x);
+
+void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock);
+
+void __ipipe_spin_unlock_irqcomplete(unsigned long x);
+
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
+void __ipipe_spin_unlock_debug(unsigned long flags);
+#else
+#define __ipipe_spin_unlock_debug(flags) do { } while (0)
+#endif
+
+#define ipipe_rwlock_t __ipipe_rwlock_t
+#define IPIPE_DEFINE_RWLOCK(x) ipipe_rwlock_t x = IPIPE_RW_LOCK_UNLOCKED
+#define IPIPE_DECLARE_RWLOCK(x) extern ipipe_rwlock_t x
+
+#define IPIPE_RW_LOCK_UNLOCKED \
+ (__ipipe_rwlock_t) { .arch_lock = __ARCH_RW_LOCK_UNLOCKED }
+
+#else /* !CONFIG_IPIPE */
+
+#define ipipe_spinlock_t spinlock_t
+#define IPIPE_DEFINE_SPINLOCK(x) DEFINE_SPINLOCK(x)
+#define IPIPE_DECLARE_SPINLOCK(x) extern spinlock_t x
+#define IPIPE_SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(unknown)
+#define IPIPE_DEFINE_RAW_SPINLOCK(x) DEFINE_RAW_SPINLOCK(x)
+#define IPIPE_DECLARE_RAW_SPINLOCK(x) extern raw_spinlock_t x
+
+#define spin_lock_irqsave_cond(lock, flags) \
+ do { \
+ (void)(flags); \
+ spin_lock(lock); \
+ } while(0)
+
+#define spin_unlock_irqrestore_cond(lock, flags) \
+ spin_unlock(lock)
+
+#define raw_spin_lock_irqsave_cond(lock, flags) \
+ do { \
+ (void)(flags); \
+ raw_spin_lock(lock); \
+ } while(0)
+
+#define raw_spin_unlock_irqrestore_cond(lock, flags) \
+ raw_spin_unlock(lock)
+
+#define __ipipe_spin_lock_irq(lock) do { } while (0)
+#define __ipipe_spin_unlock_irq(lock) do { } while (0)
+#define __ipipe_spin_lock_irqsave(lock) 0
+#define __ipipe_spin_trylock_irq(lock) 1
+#define __ipipe_spin_trylock_irqsave(lock, x) ({ (void)(x); 1; })
+#define __ipipe_spin_unlock_irqrestore(lock, x) do { (void)(x); } while (0)
+#define __ipipe_spin_unlock_irqbegin(lock) spin_unlock(lock)
+#define __ipipe_spin_unlock_irqcomplete(x) do { (void)(x); } while (0)
+#define __ipipe_spin_unlock_debug(flags) do { } while (0)
+
+#define ipipe_rwlock_t rwlock_t
+#define IPIPE_DEFINE_RWLOCK(x) DEFINE_RWLOCK(x)
+#define IPIPE_DECLARE_RWLOCK(x) extern rwlock_t x
+#define IPIPE_RW_LOCK_UNLOCKED RW_LOCK_UNLOCKED
+
+#endif /* !CONFIG_IPIPE */
+
+#endif /* !__LINUX_IPIPE_LOCK_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_tickdev.h
+ *
+ * Copyright (C) 2007 Philippe Gerum.
+ * Copyright (C) 2012 Gilles Chanteperdrix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __LINUX_IPIPE_TICKDEV_H
+#define __LINUX_IPIPE_TICKDEV_H
+
+#include <linux/list.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
+#include <linux/ipipe_domain.h>
+#include <linux/clocksource.h>
+#include <linux/timekeeper_internal.h>
+
+#ifdef CONFIG_IPIPE
+
+enum clock_event_mode;
+struct clock_event_device;
+
+struct ipipe_hostrt_data {
+ short live;
+ seqcount_t seqcount;
+ time_t wall_time_sec;
+ u32 wall_time_nsec;
+ struct timespec wall_to_monotonic;
+ cycle_t cycle_last;
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+};
+
+struct ipipe_timer {
+ int irq;
+ void (*request)(struct ipipe_timer *timer, int steal);
+ int (*set)(unsigned long ticks, void *timer);
+ void (*ack)(void);
+ void (*release)(struct ipipe_timer *timer);
+
+ /* Only if registering a timer directly */
+ const char *name;
+ unsigned rating;
+ unsigned long freq;
+ unsigned min_delay_ticks;
+ const struct cpumask *cpumask;
+
+ /* For internal use */
+ void *timer_set; /* pointer passed to ->set() callback */
+ struct clock_event_device *host_timer;
+ struct list_head link;
+
+ /* Conversions between clock frequency and timer frequency */
+ unsigned c2t_integ;
+ unsigned c2t_frac;
+
+ /* For clockevent interception */
+ u32 real_mult;
+ u32 real_shift;
+ void (*real_set_mode)(enum clock_event_mode mode,
+ struct clock_event_device *cdev);
+ int (*real_set_next_event)(unsigned long evt,
+ struct clock_event_device *cdev);
+ unsigned int (*refresh_freq)(void);
+};
+
+#define __ipipe_hrtimer_irq __ipipe_raw_cpu_read(ipipe_percpu.hrtimer_irq)
+
+extern unsigned long __ipipe_hrtimer_freq;
+
+/*
+ * Called by clockevents_register_device, to register a piggybacked
+ * ipipe timer, if there is one
+ */
+void ipipe_host_timer_register(struct clock_event_device *clkevt);
+
+/*
+ * Register a standalone ipipe timer
+ */
+void ipipe_timer_register(struct ipipe_timer *timer);
+
+/*
+ * Chooses the best timer for each cpu. Take over its handling.
+ */
+int ipipe_select_timers(const struct cpumask *mask);
+
+/*
+ * Release the per-cpu timers
+ */
+void ipipe_timers_release(void);
+
+/*
+ * Start handling the per-cpu timer irq, and intercepting the linux clockevent
+ * device callbacks.
+ */
+int ipipe_timer_start(void (*tick_handler)(void),
+ void (*emumode)(enum clock_event_mode mode,
+ struct clock_event_device *cdev),
+ int (*emutick)(unsigned long evt,
+ struct clock_event_device *cdev),
+ unsigned cpu);
+
+/*
+ * Stop handling a per-cpu timer
+ */
+void ipipe_timer_stop(unsigned cpu);
+
+/*
+ * Program the timer
+ */
+void ipipe_timer_set(unsigned long delay);
+
+const char *ipipe_timer_name(void);
+
+unsigned ipipe_timer_ns2ticks(struct ipipe_timer *timer, unsigned ns);
+
+void __ipipe_timer_refresh_freq(unsigned int hrclock_freq);
+
+#else /* !CONFIG_IPIPE */
+
+#define ipipe_host_timer_register(clkevt) do { } while (0)
+
+#endif /* !CONFIG_IPIPE */
+
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+void ipipe_update_hostrt(struct timekeeper *tk);
+#else
+static inline void
+ipipe_update_hostrt(struct timekeeper *tk) {}
+#endif
+
+#endif /* __LINUX_IPIPE_TICKDEV_H */
--- /dev/null
+/* -*- linux-c -*-
+ * include/linux/ipipe_trace.h
+ *
+ * Copyright (C) 2005 Luotao Fu.
+ * 2005-2007 Jan Kiszka.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _LINUX_IPIPE_TRACE_H
+#define _LINUX_IPIPE_TRACE_H
+
+#ifdef CONFIG_IPIPE_TRACE
+
+#include <linux/types.h>
+
+#ifndef BROKEN_BUILTIN_RETURN_ADDRESS
+#define __BUILTIN_RETURN_ADDRESS0 ((unsigned long)__builtin_return_address(0))
+#define __BUILTIN_RETURN_ADDRESS1 ((unsigned long)__builtin_return_address(1))
+#endif /* !BUILTIN_RETURN_ADDRESS */
+
+void ipipe_trace_begin(unsigned long v);
+void ipipe_trace_end(unsigned long v);
+void ipipe_trace_freeze(unsigned long v);
+void ipipe_trace_special(unsigned char special_id, unsigned long v);
+void ipipe_trace_pid(pid_t pid, short prio);
+void ipipe_trace_event(unsigned char id, unsigned long delay_tsc);
+int ipipe_trace_max_reset(void);
+int ipipe_trace_frozen_reset(void);
+
+#else /* !CONFIG_IPIPE_TRACE */
+
+#define ipipe_trace_begin(v) do { (void)(v); } while(0)
+#define ipipe_trace_end(v) do { (void)(v); } while(0)
+#define ipipe_trace_freeze(v) do { (void)(v); } while(0)
+#define ipipe_trace_special(id, v) do { (void)(id); (void)(v); } while(0)
+#define ipipe_trace_pid(pid, prio) do { (void)(pid); (void)(prio); } while(0)
+#define ipipe_trace_event(id, delay_tsc) do { (void)(id); (void)(delay_tsc); } while(0)
+#define ipipe_trace_max_reset() ({ 0; })
+#define ipipe_trace_frozen_reset() ({ 0; })
+
+#endif /* !CONFIG_IPIPE_TRACE */
+
+#ifdef CONFIG_IPIPE_TRACE_PANIC
+void ipipe_trace_panic_freeze(void);
+void ipipe_trace_panic_dump(void);
+#else
+static inline void ipipe_trace_panic_freeze(void) { }
+static inline void ipipe_trace_panic_dump(void) { }
+#endif
+
+#ifdef CONFIG_IPIPE_TRACE_IRQSOFF
+#define ipipe_trace_irq_entry(irq) ipipe_trace_begin(irq)
+#define ipipe_trace_irq_exit(irq) ipipe_trace_end(irq)
+#define ipipe_trace_irqsoff() ipipe_trace_begin(0x80000000UL)
+#define ipipe_trace_irqson() ipipe_trace_end(0x80000000UL)
+#else
+#define ipipe_trace_irq_entry(irq) do { (void)(irq);} while(0)
+#define ipipe_trace_irq_exit(irq) do { (void)(irq);} while(0)
+#define ipipe_trace_irqsoff() do { } while(0)
+#define ipipe_trace_irqson() do { } while(0)
+#endif
+
+#endif /* !__LINUX_IPIPE_TRACE_H */
void (*irq_bus_lock)(struct irq_data *data);
void (*irq_bus_sync_unlock)(struct irq_data *data);
+#ifdef CONFIG_IPIPE
+ void (*irq_move)(struct irq_data *data);
+ void (*irq_hold)(struct irq_data *data);
+ void (*irq_release)(struct irq_data *data);
+#endif /* CONFIG_IPIPE */
void (*irq_cpu_online)(struct irq_data *data);
void (*irq_cpu_offline)(struct irq_data *data);
extern void irq_chip_mask_parent(struct irq_data *data);
extern void irq_chip_unmask_parent(struct irq_data *data);
extern void irq_chip_eoi_parent(struct irq_data *data);
+#ifdef CONFIG_IPIPE
+extern void irq_chip_hold_parent(struct irq_data *data);
+extern void irq_chip_release_parent(struct irq_data *data);
+#endif
+
extern int irq_chip_set_affinity_parent(struct irq_data *data,
const struct cpumask *dest,
bool force);
extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
struct msi_desc *entry);
-extern struct irq_data *irq_get_irq_data(unsigned int irq);
+
+static inline __attribute__((const)) struct irq_data *
+irq_get_irq_data(unsigned int irq)
+{
+ struct irq_desc *desc = irq_to_desc(irq);
+
+ return desc ? &desc->irq_data : NULL;
+}
static inline struct irq_chip *irq_get_chip(unsigned int irq)
{
* different flow mechanisms (level/edge) for it.
*/
struct irq_chip_generic {
+#ifdef CONFIG_IPIPE
+ ipipe_spinlock_t lock;
+#else
raw_spinlock_t lock;
+#endif
void __iomem *reg_base;
u32 (*reg_readl)(void __iomem *addr);
void (*reg_writel)(u32 val, void __iomem *addr);
#define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
#ifdef CONFIG_SMP
-static inline void irq_gc_lock(struct irq_chip_generic *gc)
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc)
{
- raw_spin_lock(&gc->lock);
+ unsigned long flags = 0;
+ raw_spin_lock_irqsave_cond(&gc->lock, flags);
+ return flags;
}
-static inline void irq_gc_unlock(struct irq_chip_generic *gc)
+static inline void
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags)
{
- raw_spin_unlock(&gc->lock);
+ raw_spin_unlock_irqrestore_cond(&gc->lock, flags);
}
#else
-static inline void irq_gc_lock(struct irq_chip_generic *gc) { }
-static inline void irq_gc_unlock(struct irq_chip_generic *gc) { }
+static inline unsigned long irq_gc_lock(struct irq_chip_generic *gc)
+{
+ return hard_cond_local_irq_save();
+}
+static inline void
+irq_gc_unlock(struct irq_chip_generic *gc, unsigned long flags)
+{
+ hard_cond_local_irq_restore(flags);
+}
#endif
/*
#define GICD_INT_EN_CLR_X32 0xffffffff
#define GICD_INT_EN_SET_SGI 0x0000ffff
#define GICD_INT_EN_CLR_PPI 0xffff0000
+#ifndef CONFIG_IPIPE
#define GICD_INT_DEF_PRI 0xa0
+#else
+#define GICD_INT_DEF_PRI 0x10
+#endif
#define GICD_INT_DEF_PRI_X4 ((GICD_INT_DEF_PRI << 24) |\
(GICD_INT_DEF_PRI << 16) |\
(GICD_INT_DEF_PRI << 8) |\
struct irq_desc {
struct irq_data irq_data;
unsigned int __percpu *kstat_irqs;
+#ifdef CONFIG_IPIPE
+ void (*ipipe_ack)(unsigned int irq,
+ struct irq_desc *desc);
+ void (*ipipe_end)(unsigned int irq,
+ struct irq_desc *desc);
+#endif /* CONFIG_IPIPE */
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
irq_preflow_handler_t preflow_handler;
return desc->action != NULL;
}
+irq_flow_handler_t
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle,
+ int is_chained);
+
/* caller has locked the irq_desc and both params are valid */
static inline void __irq_set_handler_locked(unsigned int irq,
irq_flow_handler_t handler)
struct irq_desc *desc;
desc = irq_to_desc(irq);
+ handler = __fixup_irq_handler(desc, handler, 0);
desc->handle_irq = handler;
}
extern int nr_irqs;
+#if !defined(CONFIG_IPIPE) || defined(CONFIG_SPARSE_IRQ)
extern struct irq_desc *irq_to_desc(unsigned int irq);
+#else
+#define irq_to_desc(irq) ({ ipipe_virtual_irq_p(irq) ? NULL : &irq_desc[irq]; })
+#endif
unsigned int irq_get_next_irq(unsigned int offset);
# define for_each_irq_desc(irq, desc) \
#include <linux/compiler.h>
#include <linux/bitops.h>
#include <linux/log2.h>
+#include <linux/ipipe_base.h>
#include <linux/typecheck.h>
#include <linux/printk.h>
#include <linux/dynamic_debug.h>
#ifdef CONFIG_PREEMPT_VOLUNTARY
extern int _cond_resched(void);
-# define might_resched() _cond_resched()
+# define might_resched() do { \
+ ipipe_root_only(); \
+ _cond_resched(); \
+ } while (0)
#else
-# define might_resched() do { } while (0)
+# define might_resched() ipipe_root_only()
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
struct kvm *kvm;
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier preempt_notifier;
+#endif
+#ifdef CONFIG_IPIPE
+ struct ipipe_vm_notifier ipipe_notifier;
#endif
int cpu;
int vcpu_id;
#include <linux/linkage.h>
#include <linux/list.h>
+#include <linux/ipipe_base.h>
/*
* We use the MSB mostly because its available; see <linux/preempt_mask.h> for
#endif /* CONFIG_PREEMPT_COUNT */
-#ifdef MODULE
+#ifdef CONFIG_IPIPE
+#define hard_preempt_disable() \
+ ({ \
+ unsigned long __flags__; \
+ __flags__ = hard_local_irq_save(); \
+ if (__ipipe_root_p) \
+ preempt_disable(); \
+ __flags__; \
+ })
+
+#define hard_preempt_enable(__flags__) \
+ do { \
+ if (__ipipe_root_p) { \
+ preempt_enable_no_resched(); \
+ hard_local_irq_restore(__flags__); \
+ preempt_check_resched(); \
+ } else \
+ hard_local_irq_restore(__flags__); \
+ } while (0)
+
+#elif defined(MODULE)
/*
* Modules have no business playing preemption tricks.
*/
#undef preempt_enable_no_resched
#undef preempt_enable_no_resched_notrace
#undef preempt_check_resched
-#endif
+#endif /* !IPIPE && MODULE */
#define preempt_set_need_resched() \
do { \
void early_printk(const char *s, ...) { }
#endif
+#ifdef CONFIG_RAW_PRINTK
+void raw_vprintk(const char *fmt, va_list ap);
+asmlinkage __printf(1, 2)
+void raw_printk(const char *fmt, ...);
+#else
+static inline __cold
+void raw_vprintk(const char *s, va_list ap) { }
+static inline __printf(1, 2) __cold
+void raw_printk(const char *s, ...) { }
+#endif
+
typedef int(*printk_func_t)(const char *fmt, va_list args);
#ifdef CONFIG_PRINTK
#define read_trylock(lock) __cond_lock(lock, _raw_read_trylock(lock))
#define write_trylock(lock) __cond_lock(lock, _raw_write_trylock(lock))
-#define write_lock(lock) _raw_write_lock(lock)
-#define read_lock(lock) _raw_read_lock(lock)
+#define write_lock(lock) PICK_RWOP(_write_lock, lock)
+#define read_lock(lock) PICK_RWOP(_read_lock, lock)
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
#define read_lock_bh(lock) _raw_read_lock_bh(lock)
#define write_lock_irq(lock) _raw_write_lock_irq(lock)
#define write_lock_bh(lock) _raw_write_lock_bh(lock)
-#define read_unlock(lock) _raw_read_unlock(lock)
-#define write_unlock(lock) _raw_write_unlock(lock)
+#define read_unlock(lock) PICK_RWOP(_read_unlock, lock)
+#define write_unlock(lock) PICK_RWOP(_write_unlock, lock)
#define read_unlock_irq(lock) _raw_read_unlock_irq(lock)
#define write_unlock_irq(lock) _raw_write_unlock_irq(lock)
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \
+ defined(CONFIG_IPIPE)
static inline void __raw_read_lock(rwlock_t *lock)
{
#include <linux/nodemask.h>
#include <linux/mm_types.h>
#include <linux/preempt_mask.h>
+#include <linux/ipipe.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#define TASK_WAKEKILL 128
#define TASK_WAKING 256
#define TASK_PARKED 512
+#ifdef CONFIG_IPIPE
+#define TASK_HARDENING 1024
+#define TASK_NOWAKEUP 2048
+#define TASK_STATE_MAX 4096
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWPHN"
+#else /* !CONFIG_IPIPE */
+#define TASK_HARDENING 0
+#define TASK_NOWAKEUP 0
#define TASK_STATE_MAX 1024
-
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtZXxKWP"
+#endif /* CONFIG_IPIPE */
extern char ___assert_task_state[1 - 2*!!(
sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
extern void update_process_times(int user);
extern void scheduler_tick(void);
+#ifdef CONFIG_IPIPE
+void update_root_process_times(struct pt_regs *regs);
+#else /* !CONFIG_IPIPE */
+static inline void update_root_process_times(struct pt_regs *regs)
+{
+ update_process_times(user_mode(regs));
+}
+#endif /* CONFIG_IPIPE */
+
extern void sched_show_task(struct task_struct *p);
#ifdef CONFIG_LOCKUP_DETECTOR
#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
+#ifdef CONFIG_IPIPE
+#define MMF_VM_PINNED 31 /* ondemand load up and COW disabled */
+#endif
#define MMF_HAS_UPROBES 19 /* has uprobes */
#define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */
#endif /* CONFIG_NUMA_BALANCING */
struct rcu_head rcu;
+#ifdef CONFIG_IPIPE_LEGACY
+ void *ptd[IPIPE_ROOT_NPTDKEYS];
+#endif
/*
* cache last used pipe for splice
# include <linux/spinlock_up.h>
#endif
+#include <linux/ipipe_lock.h>
+
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key);
-# define raw_spin_lock_init(lock) \
+# define __real_raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
} while (0)
#else
-# define raw_spin_lock_init(lock) \
+# define __real_raw_spin_lock_init(lock) \
do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
#endif
+#define raw_spin_lock_init(lock) PICK_SPINOP(_lock_init, lock)
-#define raw_spin_is_locked(lock) arch_spin_is_locked(&(lock)->raw_lock)
+#define __real_raw_spin_is_locked(lock) \
+ arch_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock) PICK_SPINOP_RET(_is_locked, lock, int)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define raw_spin_is_contended(lock) ((lock)->break_lock)
* various methods are defined as nops in the case they are not
* required.
*/
-#define raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define __real_raw_spin_trylock(lock) __cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock) PICK_SPINOP_RET(_trylock, lock, int)
-#define raw_spin_lock(lock) _raw_spin_lock(lock)
+#define __real_raw_spin_lock(lock) _raw_spin_lock(lock)
+#define raw_spin_lock(lock) PICK_SPINOP(_lock, lock)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define raw_spin_lock_nested(lock, subclass) \
#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
-#define raw_spin_lock_irqsave(lock, flags) \
+#define __real_raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
flags = _raw_spin_lock_irqsave(lock); \
#else
-#define raw_spin_lock_irqsave(lock, flags) \
+#define __real_raw_spin_lock_irqsave(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_lock_irqsave(lock, flags); \
#endif
-#define raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_irqsave(lock, flags) \
+ PICK_SPINLOCK_IRQSAVE(lock, flags)
+
+#define __real_raw_spin_lock_irq(lock) _raw_spin_lock_irq(lock)
+#define raw_spin_lock_irq(lock) PICK_SPINOP(_lock_irq, lock)
#define raw_spin_lock_bh(lock) _raw_spin_lock_bh(lock)
-#define raw_spin_unlock(lock) _raw_spin_unlock(lock)
-#define raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
+#define __real_raw_spin_unlock(lock) _raw_spin_unlock(lock)
+#define raw_spin_unlock(lock) PICK_SPINOP(_unlock, lock)
+#define __real_raw_spin_unlock_irq(lock) _raw_spin_unlock_irq(lock)
+#define raw_spin_unlock_irq(lock) PICK_SPINOP(_unlock_irq, lock)
-#define raw_spin_unlock_irqrestore(lock, flags) \
+#define __real_raw_spin_unlock_irqrestore(lock, flags) \
do { \
typecheck(unsigned long, flags); \
_raw_spin_unlock_irqrestore(lock, flags); \
} while (0)
+#define raw_spin_unlock_irqrestore(lock, flags) \
+ PICK_SPINUNLOCK_IRQRESTORE(lock, flags)
+
#define raw_spin_unlock_bh(lock) _raw_spin_unlock_bh(lock)
#define raw_spin_trylock_bh(lock) \
__cond_lock(lock, _raw_spin_trylock_bh(lock))
-#define raw_spin_trylock_irq(lock) \
+#define __real_raw_spin_trylock_irq(lock) \
({ \
local_irq_disable(); \
- raw_spin_trylock(lock) ? \
+ __real_raw_spin_trylock(lock) ? \
1 : ({ local_irq_enable(); 0; }); \
})
+#define raw_spin_trylock_irq(lock) PICK_SPINTRYLOCK_IRQ(lock)
-#define raw_spin_trylock_irqsave(lock, flags) \
+#define __real_raw_spin_trylock_irqsave(lock, flags) \
({ \
local_irq_save(flags); \
raw_spin_trylock(lock) ? \
1 : ({ local_irq_restore(flags); 0; }); \
})
+#define raw_spin_trylock_irqsave(lock, flags) \
+ PICK_SPINTRYLOCK_IRQSAVE(lock, flags)
/**
* raw_spin_can_lock - would raw_spin_trylock() succeed?
#define spin_lock_init(_lock) \
do { \
- spinlock_check(_lock); \
- raw_spin_lock_init(&(_lock)->rlock); \
+ raw_spin_lock_init(_lock); \
} while (0)
-static inline void spin_lock(spinlock_t *lock)
-{
- raw_spin_lock(&lock->rlock);
-}
+#define spin_lock(lock) raw_spin_lock(lock)
static inline void spin_lock_bh(spinlock_t *lock)
{
raw_spin_lock_bh(&lock->rlock);
}
-static inline int spin_trylock(spinlock_t *lock)
-{
- return raw_spin_trylock(&lock->rlock);
-}
+#define spin_trylock(lock) raw_spin_trylock(lock)
#define spin_lock_nested(lock, subclass) \
do { \
raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock); \
} while (0)
-static inline void spin_lock_irq(spinlock_t *lock)
-{
- raw_spin_lock_irq(&lock->rlock);
-}
+#define spin_lock_irq(lock) raw_spin_lock_irq(lock)
#define spin_lock_irqsave(lock, flags) \
do { \
- raw_spin_lock_irqsave(spinlock_check(lock), flags); \
+ raw_spin_lock_irqsave(lock, flags); \
} while (0)
#define spin_lock_irqsave_nested(lock, flags, subclass) \
raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
} while (0)
-static inline void spin_unlock(spinlock_t *lock)
-{
- raw_spin_unlock(&lock->rlock);
-}
+#define spin_unlock(lock) raw_spin_unlock(lock)
static inline void spin_unlock_bh(spinlock_t *lock)
{
raw_spin_unlock_bh(&lock->rlock);
}
-static inline void spin_unlock_irq(spinlock_t *lock)
-{
- raw_spin_unlock_irq(&lock->rlock);
-}
+#define spin_unlock_irq(lock) raw_spin_unlock_irq(lock)
-static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
-{
- raw_spin_unlock_irqrestore(&lock->rlock, flags);
-}
+#define spin_unlock_irqrestore(lock, flags) \
+ raw_spin_unlock_irqrestore(lock, flags)
static inline int spin_trylock_bh(spinlock_t *lock)
{
return raw_spin_trylock_bh(&lock->rlock);
}
-static inline int spin_trylock_irq(spinlock_t *lock)
-{
- return raw_spin_trylock_irq(&lock->rlock);
-}
+#define spin_trylock_irq(lock) raw_spin_trylock_irq(lock)
#define spin_trylock_irqsave(lock, flags) \
({ \
- raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
+ raw_spin_trylock_irqsave(lock, flags); \
})
static inline void spin_unlock_wait(spinlock_t *lock)
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \
+ defined(CONFIG_IPIPE)
static inline unsigned long __raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
* do_raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire:
*/
-#ifdef CONFIG_LOCKDEP
+#if defined(CONFIG_LOCKDEP) || defined(CONFIG_IPIPE)
LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
#else
do_raw_spin_lock_flags(lock, &flags);
lock->slock = 1;
}
-/*
- * Read-write spinlocks. No debug version.
- */
-#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
-#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
-#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
-
#else /* DEBUG_SPINLOCK */
#define arch_spin_is_locked(lock) ((void)(lock), 0)
/* for sched/core.c and kernel_lock.c: */
# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
+#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; })
+#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0)
+#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0)
+
#define arch_spin_is_contended(lock) (((void)(lock), 0))
#define arch_read_can_lock(lock) (((void)(lock), 1))
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
- struct clocksource *c, u32 mult,
+ struct clocksource *c, u32 mult, u32 shift,
cycle_t cycle_last);
extern void update_vsyscall_tz(void);
#define MAP_TYPE 0x0f /* Mask for type of mapping */
#define MAP_FIXED 0x10 /* Interpret addr exactly */
#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#ifndef MAP_BRK
+# define MAP_BRK 0
+#endif
#ifdef CONFIG_MMAP_ALLOW_UNINITIALIZED
# define MAP_UNINITIALIZED 0x4000000 /* For anonymous mmap, memory could be uninitialized */
#else
# define RLIM_INFINITY (~0UL)
#endif
+/*
+ * Limit the stack by to some sane default: root can always
+ * increase this limit if needed.. 8MB seems reasonable.
+ */
+#ifndef _STK_LIM
+# define _STK_LIM (8*1024*1024)
+#endif
+
#endif /* _UAPI_ASM_GENERIC_RESOURCE_H */
#define PRIO_PGRP 1
#define PRIO_USER 2
-/*
- * Limit the stack by to some sane default: root can always
- * increase this limit if needed.. 8MB seems reasonable.
- */
-#define _STK_LIM (8*1024*1024)
-
/*
* GPG2 wants 64kB of mlocked memory, to make sure pass phrases
* and other sensitive information are never written to disk.
config LOCALVERSION
string "Local version - append to kernel release"
+ default "-ipipe"
help
Append an extra string to the end of your kernel version.
This will show up when you type uname, for example.
very difficult to diagnose system problems, saying N here is
strongly discouraged.
+config RAW_PRINTK
+ bool "Enable support for raw printk"
+ default n
+ select DEBUG_LL if ARM
+ help
+ This option enables a printk variant called raw_printk() for
+ writing all output unmodified to a raw console channel
+ immediately, without any header or preparation whatsoever,
+ usable from any context.
+
+ Unlike early_printk() console devices, raw_printk() devices
+ can live past the boot sequence.
+
config BUG
bool "BUG() support" if EXPERT
default y
cgroup_init_early();
- local_irq_disable();
+ hard_local_irq_disable();
early_boot_irqs_disabled = true;
/*
pidhash_init();
vfs_caches_init_early();
sort_main_extable();
+ __ipipe_init_early();
trap_init();
mm_init();
softirq_init();
timekeeping_init();
time_init();
+ /*
+ * We need to wait for the interrupt and time subsystems to be
+ * initialized before enabling the pipeline.
+ */
+ __ipipe_init();
sched_clock_postinit();
perf_event_init();
profile_init();
shmem_init();
driver_init();
init_irq_proc();
+ __ipipe_init_proc();
do_ctors();
usermodehelper_enable();
do_initcalls();
obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o
obj-$(CONFIG_SECCOMP) += seccomp.o
obj-$(CONFIG_RELAY) += relay.o
+obj-$(CONFIG_IPIPE) += ipipe/
obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
- if (in_interrupt())
+ if (ipipe_root_p == 0 || in_interrupt())
return;
/* Kernel threads aren't supposed to go to userspace */
{
unsigned long flags;
- if (!context_tracking_is_enabled())
+ if (!ipipe_root_p || !context_tracking_is_enabled())
return;
if (in_interrupt())
*/
atomic_t kgdb_active = ATOMIC_INIT(-1);
EXPORT_SYMBOL_GPL(kgdb_active);
-static DEFINE_RAW_SPINLOCK(dbg_master_lock);
-static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_master_lock);
+static IPIPE_DEFINE_RAW_SPINLOCK(dbg_slave_lock);
/*
* We use NR_CPUs not PERCPU, in case kgdb is used to debug early
{
int err;
- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
- BREAK_INSTR_SIZE);
+ err = ipipe_probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
+ BREAK_INSTR_SIZE);
if (err)
return err;
- err = probe_kernel_write((char *)bpt->bpt_addr,
- arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ err = ipipe_probe_kernel_write((char *)bpt->bpt_addr,
+ arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
return err;
}
int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
- return probe_kernel_write((char *)bpt->bpt_addr,
- (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
+ return ipipe_probe_kernel_write((char *)bpt->bpt_addr,
+ (char *)bpt->saved_instr,
+ BREAK_INSTR_SIZE);
}
int __weak kgdb_validate_break_address(unsigned long addr)
static void dbg_touch_watchdogs(void)
{
touch_softlockup_watchdog_sync();
+#ifndef CONFIG_IPIPE
clocksource_touch_watchdog();
+#endif
rcu_cpu_stall_reset();
}
* Interrupts will be restored by the 'trap return' code, except when
* single stepping.
*/
- local_irq_save(flags);
+ flags = hard_local_irq_save();
cpu = ks->cpu;
kgdb_info[cpu].debuggerinfo = regs;
smp_mb__before_atomic();
atomic_dec(&slaves_in_kgdb);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return 0;
}
cpu_relax();
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
goto acquirelock;
}
atomic_set(&kgdb_active, -1);
raw_spin_unlock(&dbg_master_lock);
dbg_touch_watchdogs();
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
return kgdb_info[cpu].ret_state;
}
if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
return;
- local_irq_save(flags);
+ flags = hard_local_irq_save();
gdbstub_msg_write(s, count);
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
static struct console kgdbcons = {
*/
tmp = buf + count;
- err = probe_kernel_read(tmp, mem, count);
+ err = ipipe_probe_kernel_read(tmp, mem, count);
if (err)
return NULL;
while (count > 0) {
*tmp_raw |= hex_to_bin(*tmp_hex--) << 4;
}
- return probe_kernel_write(mem, tmp_raw, count);
+ return ipipe_probe_kernel_write(mem, tmp_raw, count);
}
/*
size++;
}
- return probe_kernel_write(mem, c, size);
+ return ipipe_probe_kernel_write(mem, c, size);
}
#if DBG_MAX_REG_NUM > 0
*/
smp_mb();
raw_spin_unlock_wait(&tsk->pi_lock);
+ __ipipe_report_exit(tsk);
if (unlikely(in_atomic()))
pr_info("note: %s[%d] exited with preempt_count %d\n",
#endif
setup_thread_stack(tsk, orig);
+ __ipipe_init_threadflags(ti);
+ __ipipe_init_threadinfo(&ti->ipipe_data);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
set_task_stack_end_magic(tsk);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
+ __ipipe_report_cleanup(mm);
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
threadgroup_change_end(current);
+ __ipipe_init_taskinfo(p);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
--- /dev/null
+config IPIPE
+ bool "Interrupt pipeline"
+ default y
+ ---help---
+ Activate this option if you want the interrupt pipeline to be
+ compiled in.
+
+config IPIPE_LEGACY
+ bool "I-pipe legacy interface"
+ depends on IPIPE
+ ---help---
+ Activate this option if you want to control the interrupt
+ pipeline via the legacy interface.
+
+config IPIPE_CORE
+ def_bool y if IPIPE
+
+config IPIPE_WANT_CLOCKSOURCE
+ bool
+
+config IPIPE_WANT_PTE_PINNING
+ bool
+
+config IPIPE_CORE_APIREV
+ int
+ depends on IPIPE
+ default 2
+ ---help---
+ The API revision level we implement.
+
+config IPIPE_WANT_APIREV_1
+ bool
+
+config IPIPE_WANT_APIREV_2
+ bool
+
+config IPIPE_TARGET_APIREV
+ int
+ depends on IPIPE
+ default 1 if IPIPE_WANT_APIREV_1
+ default 2 if IPIPE_WANT_APIREV_2
+ default 1 if IPIPE_LEGACY
+ default IPIPE_CORE_APIREV
+ ---help---
+ The API revision level the we want (must be <=
+ IPIPE_CORE_APIREV).
+
+config IPIPE_HAVE_HOSTRT
+ bool
+
+config IPIPE_HAVE_PIC_MUTE
+ bool
+
+config HAVE_IPIPE_HOSTRT
+ depends on IPIPE_LEGACY
+ bool
+
+config IPIPE_DELAYED_ATOMICSW
+ def_bool y if IPIPE_LEGACY
+
+config IPIPE_HAVE_SAFE_THREAD_INFO
+ bool
+
+config IPIPE_HAVE_VM_NOTIFIER
+ bool
--- /dev/null
+config IPIPE_DEBUG
+ bool "I-pipe debugging"
+ depends on IPIPE
+ select RAW_PRINTK
+
+config IPIPE_DEBUG_CONTEXT
+ bool "Check for illicit cross-domain calls"
+ depends on IPIPE_DEBUG
+ default y
+ ---help---
+ Enable this feature to arm checkpoints in the kernel that
+ verify the correct invocation context. On entry of critical
+ Linux services a warning is issued if the caller is not
+ running over the root domain.
+
+config IPIPE_DEBUG_INTERNAL
+ bool "Enable internal debug checks"
+ depends on IPIPE_DEBUG
+ default y
+ ---help---
+ When this feature is enabled, I-pipe will perform internal
+ consistency checks of its subsystems, e.g. on per-cpu variable
+ access.
+
+config IPIPE_TRACE
+ bool "Latency tracing"
+ depends on IPIPE_DEBUG
+ select CONFIG_FTRACE
+ select CONFIG_FUNCTION_TRACER
+ select KALLSYMS
+ select PROC_FS
+ ---help---
+ Activate this option if you want to use per-function tracing of
+ the kernel. The tracer will collect data via instrumentation
+ features like the one below or with the help of explicite calls
+ of ipipe_trace_xxx(). See include/linux/ipipe_trace.h for the
+ in-kernel tracing API. The collected data and runtime control
+ is available via /proc/ipipe/trace/*.
+
+if IPIPE_TRACE
+
+config IPIPE_TRACE_ENABLE
+ bool "Enable tracing on boot"
+ default y
+ ---help---
+ Disable this option if you want to arm the tracer after booting
+ manually ("echo 1 > /proc/ipipe/tracer/enable"). This can reduce
+ boot time on slow embedded devices due to the tracer overhead.
+
+config IPIPE_TRACE_MCOUNT
+ bool "Instrument function entries"
+ default y
+ select FTRACE
+ select FUNCTION_TRACER
+ ---help---
+ When enabled, records every kernel function entry in the tracer
+ log. While this slows down the system noticeably, it provides
+ the highest level of information about the flow of events.
+ However, it can be switch off in order to record only explicit
+ I-pipe trace points.
+
+config IPIPE_TRACE_IRQSOFF
+ bool "Trace IRQs-off times"
+ default y
+ ---help---
+ Activate this option if I-pipe shall trace the longest path
+ with hard-IRQs switched off.
+
+config IPIPE_TRACE_SHIFT
+ int "Depth of trace log (14 => 16Kpoints, 15 => 32Kpoints)"
+ range 10 18
+ default 14
+ ---help---
+ The number of trace points to hold tracing data for each
+ trace path, as a power of 2.
+
+config IPIPE_TRACE_VMALLOC
+ bool "Use vmalloc'ed trace buffer"
+ default y if EMBEDDED
+ ---help---
+ Instead of reserving static kernel data, the required buffer
+ is allocated via vmalloc during boot-up when this option is
+ enabled. This can help to start systems that are low on memory,
+ but it slightly degrades overall performance. Try this option
+ when a traced kernel hangs unexpectedly at boot time.
+
+config IPIPE_TRACE_PANIC
+ bool "Enable panic back traces"
+ default y
+ ---help---
+ Provides services to freeze and dump a back trace on panic
+ situations. This is used on IPIPE_DEBUG_CONTEXT exceptions
+ as well as ordinary kernel oopses. You can control the number
+ of printed back trace points via /proc/ipipe/trace.
+
+endif
--- /dev/null
+obj-$(CONFIG_IPIPE) += core.o timer.o
+obj-$(CONFIG_IPIPE_TRACE) += tracer.o
+obj-$(CONFIG_IPIPE_LEGACY) += compat.o
--- /dev/null
+/* -*- linux-c -*-
+ * linux/kernel/ipipe/compat.c
+ *
+ * Copyright (C) 2012 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * I-pipe legacy interface.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+
+static int ptd_key_count;
+
+static unsigned long ptd_key_map;
+
+IPIPE_DECLARE_SPINLOCK(__ipipe_lock);
+
+void ipipe_init_attr(struct ipipe_domain_attr *attr)
+{
+ attr->name = "anon";
+ attr->domid = 1;
+ attr->entry = NULL;
+ attr->priority = IPIPE_ROOT_PRIO;
+ attr->pdd = NULL;
+}
+EXPORT_SYMBOL_GPL(ipipe_init_attr);
+
+int ipipe_register_domain(struct ipipe_domain *ipd,
+ struct ipipe_domain_attr *attr)
+{
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+
+ BUG_ON(attr->priority != IPIPE_HEAD_PRIORITY);
+
+ ipipe_register_head(ipd, attr->name);
+ ipd->legacy.domid = attr->domid;
+ ipd->legacy.pdd = attr->pdd;
+ ipd->legacy.priority = INT_MAX;
+
+ if (attr->entry == NULL)
+ return 0;
+
+ flags = hard_smp_local_irq_save();
+ __ipipe_set_current_domain(ipd);
+ hard_smp_local_irq_restore(flags);
+
+ attr->entry();
+
+ flags = hard_local_irq_save();
+ __ipipe_set_current_domain(ipipe_root_domain);
+ p = ipipe_this_cpu_root_context();
+ if (__ipipe_ipending_p(p) &&
+ !test_bit(IPIPE_STALL_FLAG, &p->status))
+ __ipipe_sync_stage();
+ hard_local_irq_restore(flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_register_domain);
+
+int ipipe_unregister_domain(struct ipipe_domain *ipd)
+{
+ ipipe_unregister_head(ipd);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_unregister_domain);
+
+int ipipe_alloc_ptdkey(void)
+{
+ unsigned long flags;
+ int key = -1;
+
+ spin_lock_irqsave(&__ipipe_lock,flags);
+
+ if (ptd_key_count < IPIPE_ROOT_NPTDKEYS) {
+ key = ffz(ptd_key_map);
+ set_bit(key,&ptd_key_map);
+ ptd_key_count++;
+ }
+
+ spin_unlock_irqrestore(&__ipipe_lock,flags);
+
+ return key;
+}
+EXPORT_SYMBOL_GPL(ipipe_alloc_ptdkey);
+
+int ipipe_free_ptdkey(int key)
+{
+ unsigned long flags;
+
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return -EINVAL;
+
+ spin_lock_irqsave(&__ipipe_lock,flags);
+
+ if (test_and_clear_bit(key,&ptd_key_map))
+ ptd_key_count--;
+
+ spin_unlock_irqrestore(&__ipipe_lock,flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_free_ptdkey);
+
+int ipipe_set_ptd(int key, void *value)
+{
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return -EINVAL;
+
+ current->ptd[key] = value;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ipipe_set_ptd);
+
+void *ipipe_get_ptd(int key)
+{
+ if (key < 0 || key >= IPIPE_ROOT_NPTDKEYS)
+ return NULL;
+
+ return current->ptd[key];
+}
+EXPORT_SYMBOL_GPL(ipipe_get_ptd);
+
+int ipipe_virtualize_irq(struct ipipe_domain *ipd,
+ unsigned int irq,
+ ipipe_irq_handler_t handler,
+ void *cookie,
+ ipipe_irq_ackfn_t ackfn,
+ unsigned int modemask)
+{
+ if (handler == NULL) {
+ ipipe_free_irq(ipd, irq);
+ return 0;
+ }
+
+ return ipipe_request_irq(ipd, irq, handler, cookie, ackfn);
+}
+EXPORT_SYMBOL_GPL(ipipe_virtualize_irq);
+
+static int null_handler(unsigned int event,
+ struct ipipe_domain *from, void *data)
+{
+ /*
+ * Legacy mode users will trap all events, at worst most
+ * frequent ones. Therefore it is actually faster to run a
+ * dummy handler once in a while rather than testing for a
+ * null handler pointer each time an event is fired.
+ */
+ return 0;
+}
+
+ipipe_event_handler_t ipipe_catch_event(struct ipipe_domain *ipd,
+ unsigned int event,
+ ipipe_event_handler_t handler)
+{
+ ipipe_event_handler_t oldhandler;
+ int n, enables = 0;
+
+ if (event & IPIPE_EVENT_SELF) {
+ event &= ~IPIPE_EVENT_SELF;
+ IPIPE_WARN(event >= IPIPE_NR_FAULTS);
+ }
+
+ if (event >= IPIPE_NR_EVENTS)
+ return NULL;
+
+ /*
+ * It makes no sense to run a SETSCHED notification handler
+ * over the head domain, this introduces a useless domain
+ * switch for doing work which ought to be root specific.
+ * Unfortunately, some client domains using the legacy
+ * interface still ask for this, so we silently fix their
+ * request. This prevents ipipe_set_hooks() from yelling at us
+ * because of an attempt to enable kernel event notifications
+ * for the head domain.
+ */
+ if (event == IPIPE_EVENT_SETSCHED)
+ ipd = ipipe_root_domain;
+
+ oldhandler = ipd->legacy.handlers[event];
+ ipd->legacy.handlers[event] = handler ?: null_handler;
+
+ for (n = 0; n < IPIPE_NR_FAULTS; n++) {
+ if (ipd->legacy.handlers[n] != null_handler) {
+ enables |= __IPIPE_TRAP_E;
+ break;
+ }
+ }
+
+ for (n = IPIPE_FIRST_EVENT; n < IPIPE_LAST_EVENT; n++) {
+ if (ipd->legacy.handlers[n] != null_handler) {
+ enables |= __IPIPE_KEVENT_E;
+ break;
+ }
+ }
+
+ if (ipd->legacy.handlers[IPIPE_EVENT_SYSCALL] != null_handler)
+ enables |= __IPIPE_SYSCALL_E;
+
+ ipipe_set_hooks(ipd, enables);
+
+ return oldhandler == null_handler ? NULL : oldhandler;
+}
+EXPORT_SYMBOL_GPL(ipipe_catch_event);
+
+int ipipe_setscheduler_root(struct task_struct *p, int policy, int prio)
+{
+ struct sched_param param = { .sched_priority = prio };
+ return sched_setscheduler_nocheck(p, policy, ¶m);
+}
+EXPORT_SYMBOL_GPL(ipipe_setscheduler_root);
+
+int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+ const int event = IPIPE_EVENT_SYSCALL;
+ return ipipe_current_domain->legacy.handlers[event](event, ipd, regs);
+}
+
+int ipipe_trap_hook(struct ipipe_trap_data *data)
+{
+ struct ipipe_domain *ipd = ipipe_head_domain;
+ struct pt_regs *regs = data->regs;
+ int ex = data->exception;
+
+ return ipd->legacy.handlers[ex](ex, ipd, regs);
+}
+
+int ipipe_kevent_hook(int kevent, void *data)
+{
+ unsigned int event = IPIPE_FIRST_EVENT + kevent;
+ struct ipipe_domain *ipd = ipipe_root_domain;
+
+ return ipd->legacy.handlers[event](event, ipd, data);
+}
+
+void __ipipe_legacy_init_stage(struct ipipe_domain *ipd)
+{
+ int n;
+
+ for (n = 0; n < IPIPE_NR_EVENTS; n++)
+ ipd->legacy.handlers[n] = null_handler;
+
+ if (ipd == &ipipe_root) {
+ ipd->legacy.domid = IPIPE_ROOT_ID;
+ ipd->legacy.priority = IPIPE_ROOT_PRIO;
+ }
+}
+
+notrace asmlinkage int __ipipe_check_root(void) /* hw IRQs off */
+{
+ return __ipipe_root_p;
+}
--- /dev/null
+/* -*- linux-c -*-
+ * linux/kernel/ipipe/core.c
+ *
+ * Copyright (C) 2002-2012 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Architecture-independent I-PIPE core support.
+ */
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kallsyms.h>
+#include <linux/bitops.h>
+#include <linux/tick.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_PROC_FS */
+#include <linux/ipipe_trace.h>
+#include <linux/ipipe.h>
+#include <ipipe/setup.h>
+
+struct ipipe_domain ipipe_root;
+EXPORT_SYMBOL_GPL(ipipe_root);
+
+struct ipipe_domain *ipipe_head_domain = &ipipe_root;
+EXPORT_SYMBOL_GPL(ipipe_head_domain);
+
+#ifdef CONFIG_SMP
+static __initdata struct ipipe_percpu_domain_data bootup_context = {
+ .status = IPIPE_STALL_MASK,
+ .domain = &ipipe_root,
+};
+#else
+#define bootup_context ipipe_percpu.root
+#endif /* !CONFIG_SMP */
+
+DEFINE_PER_CPU(struct ipipe_percpu_data, ipipe_percpu) = {
+ .root = {
+ .status = IPIPE_STALL_MASK,
+ .domain = &ipipe_root,
+ },
+ .curr = &bootup_context,
+ .hrtimer_irq = -1,
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+ .context_check = 1,
+#endif
+};
+EXPORT_PER_CPU_SYMBOL(ipipe_percpu);
+
+/* Up to 2k of pending work data per CPU. */
+#define WORKBUF_SIZE 2048
+static DEFINE_PER_CPU_ALIGNED(unsigned char[WORKBUF_SIZE], work_buf);
+static DEFINE_PER_CPU(void *, work_tail);
+static unsigned int __ipipe_work_virq;
+
+static void __ipipe_do_work(unsigned int virq, void *cookie);
+
+#ifdef CONFIG_SMP
+
+#define IPIPE_CRITICAL_TIMEOUT 1000000
+static cpumask_t __ipipe_cpu_sync_map;
+static cpumask_t __ipipe_cpu_lock_map;
+static cpumask_t __ipipe_cpu_pass_map;
+static unsigned long __ipipe_critical_lock;
+static IPIPE_DEFINE_SPINLOCK(__ipipe_cpu_barrier);
+static atomic_t __ipipe_critical_count = ATOMIC_INIT(0);
+static void (*__ipipe_cpu_sync) (void);
+
+#else /* !CONFIG_SMP */
+/*
+ * Create an alias to the unique root status, so that arch-dep code
+ * may get fast access to this percpu variable including from
+ * assembly. A hard-coded assumption is that root.status appears at
+ * offset #0 of the ipipe_percpu struct.
+ */
+extern unsigned long __ipipe_root_status
+__attribute__((alias(__stringify(ipipe_percpu))));
+EXPORT_SYMBOL(__ipipe_root_status);
+
+#endif /* !CONFIG_SMP */
+
+IPIPE_DEFINE_SPINLOCK(__ipipe_lock);
+
+static unsigned long __ipipe_virtual_irq_map;
+
+#ifdef CONFIG_PRINTK
+unsigned int __ipipe_printk_virq;
+int __ipipe_printk_bypass;
+#endif /* CONFIG_PRINTK */
+
+#ifdef CONFIG_PROC_FS
+
+struct proc_dir_entry *ipipe_proc_root;
+
+static int __ipipe_version_info_show(struct seq_file *p, void *data)
+{
+ seq_printf(p, "%d\n", IPIPE_CORE_RELEASE);
+ return 0;
+}
+
+static int __ipipe_version_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, __ipipe_version_info_show, NULL);
+}
+
+static const struct file_operations __ipipe_version_proc_ops = {
+ .open = __ipipe_version_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __ipipe_common_info_show(struct seq_file *p, void *data)
+{
+ struct ipipe_domain *ipd = (struct ipipe_domain *)p->private;
+ char handling, lockbit, virtuality;
+ unsigned long ctlbits;
+ unsigned int irq;
+
+ seq_printf(p, " +--- Handled\n");
+ seq_printf(p, " |+-- Locked\n");
+ seq_printf(p, " ||+- Virtual\n");
+ seq_printf(p, " [IRQ] ||| Handler\n");
+
+ mutex_lock(&ipd->mutex);
+
+ for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
+ ctlbits = ipd->irqs[irq].control;
+ /*
+ * There might be a hole between the last external IRQ
+ * and the first virtual one; skip it.
+ */
+ if (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq))
+ continue;
+
+ if (ipipe_virtual_irq_p(irq)
+ && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))
+ /* Non-allocated virtual IRQ; skip it. */
+ continue;
+
+ if (ctlbits & IPIPE_HANDLE_MASK)
+ handling = 'H';
+ else
+ handling = '.';
+
+ if (ctlbits & IPIPE_LOCK_MASK)
+ lockbit = 'L';
+ else
+ lockbit = '.';
+
+ if (ipipe_virtual_irq_p(irq))
+ virtuality = 'V';
+ else
+ virtuality = '.';
+
+ if (ctlbits & IPIPE_HANDLE_MASK)
+ seq_printf(p, " %4u: %c%c%c %pf\n",
+ irq, handling, lockbit, virtuality,
+ ipd->irqs[irq].handler);
+ else
+ seq_printf(p, " %4u: %c%c%c\n",
+ irq, handling, lockbit, virtuality);
+ }
+
+ mutex_unlock(&ipd->mutex);
+
+ return 0;
+}
+
+static int __ipipe_common_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, __ipipe_common_info_show, PDE_DATA(inode));
+}
+
+static const struct file_operations __ipipe_info_proc_ops = {
+ .owner = THIS_MODULE,
+ .open = __ipipe_common_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void add_domain_proc(struct ipipe_domain *ipd)
+{
+ proc_create_data(ipd->name, 0444, ipipe_proc_root,
+ &__ipipe_info_proc_ops, ipd);
+}
+
+void remove_domain_proc(struct ipipe_domain *ipd)
+{
+ remove_proc_entry(ipd->name, ipipe_proc_root);
+}
+
+void __init __ipipe_init_proc(void)
+{
+ ipipe_proc_root = proc_mkdir("ipipe", NULL);
+ proc_create("version", 0444, ipipe_proc_root,
+ &__ipipe_version_proc_ops);
+ add_domain_proc(ipipe_root_domain);
+
+ __ipipe_init_tracer();
+}
+
+#else
+
+static inline void add_domain_proc(struct ipipe_domain *ipd)
+{
+}
+
+static inline void remove_domain_proc(struct ipipe_domain *ipd)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
+static void init_stage(struct ipipe_domain *ipd)
+{
+ memset(&ipd->irqs, 0, sizeof(ipd->irqs));
+ mutex_init(&ipd->mutex);
+ __ipipe_legacy_init_stage(ipd);
+ __ipipe_hook_critical_ipi(ipd);
+}
+
+static inline int root_context_offset(void)
+{
+ void root_context_not_at_start_of_ipipe_percpu(void);
+
+ /* ipipe_percpu.root must be found at offset #0. */
+
+ if (offsetof(struct ipipe_percpu_data, root))
+ root_context_not_at_start_of_ipipe_percpu();
+
+ return 0;
+}
+
+#ifdef CONFIG_SMP
+
+static inline void fixup_percpu_data(void)
+{
+ struct ipipe_percpu_data *p;
+ int cpu;
+
+ /*
+ * ipipe_percpu.curr cannot be assigned statically to
+ * &ipipe_percpu.root, due to the dynamic nature of percpu
+ * data. So we make ipipe_percpu.curr refer to a temporary
+ * boot up context in static memory, until we can fixup all
+ * context pointers in this routine, after per-cpu areas have
+ * been eventually set up. The temporary context data is
+ * copied to per_cpu(ipipe_percpu, 0).root in the same move.
+ *
+ * Obviously, this code must run over the boot CPU, before SMP
+ * operations start.
+ */
+ BUG_ON(smp_processor_id() || !irqs_disabled());
+
+ per_cpu(ipipe_percpu, 0).root = bootup_context;
+
+ for_each_possible_cpu(cpu) {
+ p = &per_cpu(ipipe_percpu, cpu);
+ p->curr = &p->root;
+ }
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void fixup_percpu_data(void) { }
+
+#endif /* CONFIG_SMP */
+
+void __init __ipipe_init_early(void)
+{
+ struct ipipe_domain *ipd = &ipipe_root;
+ int cpu;
+
+ fixup_percpu_data();
+
+ /*
+ * A lightweight registration code for the root domain. We are
+ * running on the boot CPU, hw interrupts are off, and
+ * secondary CPUs are still lost in space.
+ */
+ ipd->name = "Linux";
+ ipd->context_offset = root_context_offset();
+ init_stage(ipd);
+
+ /*
+ * Do the early init stuff. First we do the per-arch pipeline
+ * core setup, then we run the per-client setup code. At this
+ * point, the kernel does not provide much services yet: be
+ * careful.
+ */
+ __ipipe_early_core_setup();
+ __ipipe_early_client_setup();
+
+#ifdef CONFIG_PRINTK
+ __ipipe_printk_virq = ipipe_alloc_virq();
+ ipd->irqs[__ipipe_printk_virq].handler = __ipipe_flush_printk;
+ ipd->irqs[__ipipe_printk_virq].cookie = NULL;
+ ipd->irqs[__ipipe_printk_virq].ackfn = NULL;
+ ipd->irqs[__ipipe_printk_virq].control = IPIPE_HANDLE_MASK;
+#endif /* CONFIG_PRINTK */
+
+ __ipipe_work_virq = ipipe_alloc_virq();
+ ipd->irqs[__ipipe_work_virq].handler = __ipipe_do_work;
+ ipd->irqs[__ipipe_work_virq].cookie = NULL;
+ ipd->irqs[__ipipe_work_virq].ackfn = NULL;
+ ipd->irqs[__ipipe_work_virq].control = IPIPE_HANDLE_MASK;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu);
+}
+
+void __init __ipipe_init(void)
+{
+ /* Now we may engage the pipeline. */
+ __ipipe_enable_pipeline();
+
+ pr_info("Interrupt pipeline (release #%d)\n", IPIPE_CORE_RELEASE);
+}
+
+static inline void init_head_stage(struct ipipe_domain *ipd)
+{
+ struct ipipe_percpu_domain_data *p;
+ int cpu;
+
+ /* Must be set first, used in ipipe_percpu_context(). */
+ ipd->context_offset = offsetof(struct ipipe_percpu_data, head);
+
+ for_each_online_cpu(cpu) {
+ p = ipipe_percpu_context(ipd, cpu);
+ memset(p, 0, sizeof(*p));
+ p->domain = ipd;
+ }
+
+ init_stage(ipd);
+}
+
+void ipipe_register_head(struct ipipe_domain *ipd, const char *name)
+{
+ BUG_ON(!ipipe_root_p || ipd == &ipipe_root);
+
+ ipd->name = name;
+ init_head_stage(ipd);
+ barrier();
+ ipipe_head_domain = ipd;
+ add_domain_proc(ipd);
+
+ pr_info("I-pipe: head domain %s registered.\n", name);
+}
+EXPORT_SYMBOL_GPL(ipipe_register_head);
+
+void ipipe_unregister_head(struct ipipe_domain *ipd)
+{
+ BUG_ON(!ipipe_root_p || ipd != ipipe_head_domain);
+
+ ipipe_head_domain = &ipipe_root;
+ smp_mb();
+ mutex_lock(&ipd->mutex);
+ remove_domain_proc(ipd);
+ mutex_unlock(&ipd->mutex);
+
+ pr_info("I-pipe: head domain %s unregistered.\n", ipd->name);
+}
+EXPORT_SYMBOL_GPL(ipipe_unregister_head);
+
+void ipipe_unstall_root(void)
+{
+ struct ipipe_percpu_domain_data *p;
+
+ hard_local_irq_disable();
+
+ /* This helps catching bad usage from assembly call sites. */
+ ipipe_root_only();
+
+ p = ipipe_this_cpu_root_context();
+
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+
+ if (unlikely(__ipipe_ipending_p(p)))
+ __ipipe_sync_stage();
+
+ hard_local_irq_enable();
+}
+EXPORT_SYMBOL(ipipe_unstall_root);
+
+void ipipe_restore_root(unsigned long x)
+{
+ ipipe_root_only();
+
+ if (x)
+ ipipe_stall_root();
+ else
+ ipipe_unstall_root();
+}
+EXPORT_SYMBOL(ipipe_restore_root);
+
+void __ipipe_restore_root_nosync(unsigned long x)
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_root_context();
+
+ if (raw_irqs_disabled_flags(x)) {
+ __set_bit(IPIPE_STALL_FLAG, &p->status);
+ trace_hardirqs_off();
+ } else {
+ trace_hardirqs_on();
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_restore_root_nosync);
+
+void ipipe_unstall_head(void)
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();
+
+ hard_local_irq_disable();
+
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+
+ if (unlikely(__ipipe_ipending_p(p)))
+ __ipipe_sync_pipeline(ipipe_head_domain);
+
+ hard_local_irq_enable();
+}
+EXPORT_SYMBOL_GPL(ipipe_unstall_head);
+
+void __ipipe_restore_head(unsigned long x) /* hw interrupt off */
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context();
+
+ if (x) {
+#ifdef CONFIG_DEBUG_KERNEL
+ static int warned;
+ if (!warned &&
+ __test_and_set_bit(IPIPE_STALL_FLAG, &p->status)) {
+ /*
+ * Already stalled albeit ipipe_restore_head()
+ * should have detected it? Send a warning once.
+ */
+ hard_local_irq_enable();
+ warned = 1;
+ pr_warning("I-pipe: ipipe_restore_head() "
+ "optimization failed.\n");
+ dump_stack();
+ hard_local_irq_disable();
+ }
+#else /* !CONFIG_DEBUG_KERNEL */
+ __set_bit(IPIPE_STALL_FLAG, &p->status);
+#endif /* CONFIG_DEBUG_KERNEL */
+ } else {
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+ if (unlikely(__ipipe_ipending_p(p)))
+ __ipipe_sync_pipeline(ipipe_head_domain);
+ hard_local_irq_enable();
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_restore_head);
+
+void __ipipe_spin_lock_irq(ipipe_spinlock_t *lock)
+{
+ hard_local_irq_disable();
+ if (ipipe_smp_p)
+ arch_spin_lock(&lock->arch_lock);
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_lock_irq);
+
+void __ipipe_spin_unlock_irq(ipipe_spinlock_t *lock)
+{
+ if (ipipe_smp_p)
+ arch_spin_unlock(&lock->arch_lock);
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+ hard_local_irq_enable();
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_unlock_irq);
+
+unsigned long __ipipe_spin_lock_irqsave(ipipe_spinlock_t *lock)
+{
+ unsigned long flags;
+ int s;
+
+ flags = hard_local_irq_save();
+ if (ipipe_smp_p)
+ arch_spin_lock(&lock->arch_lock);
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+
+ return arch_mangle_irq_bits(s, flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_lock_irqsave);
+
+int __ipipe_spin_trylock_irqsave(ipipe_spinlock_t *lock,
+ unsigned long *x)
+{
+ unsigned long flags;
+ int s;
+
+ flags = hard_local_irq_save();
+ if (ipipe_smp_p && !arch_spin_trylock(&lock->arch_lock)) {
+ hard_local_irq_restore(flags);
+ return 0;
+ }
+ s = __test_and_set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+ *x = arch_mangle_irq_bits(s, flags);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_trylock_irqsave);
+
+void __ipipe_spin_unlock_irqrestore(ipipe_spinlock_t *lock,
+ unsigned long x)
+{
+ if (ipipe_smp_p)
+ arch_spin_unlock(&lock->arch_lock);
+ if (!arch_demangle_irq_bits(&x))
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+ hard_local_irq_restore(x);
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_unlock_irqrestore);
+
+int __ipipe_spin_trylock_irq(ipipe_spinlock_t *lock)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+ if (ipipe_smp_p && !arch_spin_trylock(&lock->arch_lock)) {
+ hard_local_irq_restore(flags);
+ return 0;
+ }
+ __set_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+
+ return 1;
+}
+EXPORT_SYMBOL_GPL(__ipipe_spin_trylock_irq);
+
+void __ipipe_spin_unlock_irqbegin(ipipe_spinlock_t *lock)
+{
+ if (ipipe_smp_p)
+ arch_spin_unlock(&lock->arch_lock);
+}
+
+void __ipipe_spin_unlock_irqcomplete(unsigned long x)
+{
+ if (!arch_demangle_irq_bits(&x))
+ __clear_bit(IPIPE_STALL_FLAG, &__ipipe_current_context->status);
+ hard_local_irq_restore(x);
+}
+
+#ifdef __IPIPE_3LEVEL_IRQMAP
+
+/* Must be called hw IRQs off. */
+static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p,
+ unsigned int irq)
+{
+ __set_bit(irq, p->irqheld_map);
+ p->irqall[irq]++;
+}
+
+/* Must be called hw IRQs off. */
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq)
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_context(ipd);
+ int l0b, l1b;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
+ l1b = irq / BITS_PER_LONG;
+
+ if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) {
+ __set_bit(irq, p->irqpend_lomap);
+ __set_bit(l1b, p->irqpend_mdmap);
+ __set_bit(l0b, &p->irqpend_himap);
+ } else
+ __set_bit(irq, p->irqheld_map);
+
+ p->irqall[irq]++;
+}
+EXPORT_SYMBOL_GPL(__ipipe_set_irq_pending);
+
+/* Must be called hw IRQs off. */
+void __ipipe_lock_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = ipipe_root_domain;
+ struct ipipe_percpu_domain_data *p;
+ int l0b, l1b;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ /*
+ * Interrupts requested by a registered head domain cannot be
+ * locked, since this would make no sense: interrupts are
+ * globally masked at CPU level when the head domain is
+ * stalled, so there is no way we could encounter the
+ * situation IRQ locks are handling.
+ */
+ if (test_and_set_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
+ return;
+
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
+ l1b = irq / BITS_PER_LONG;
+
+ p = ipipe_this_cpu_context(ipd);
+ if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
+ __set_bit(irq, p->irqheld_map);
+ if (p->irqpend_lomap[l1b] == 0) {
+ __clear_bit(l1b, p->irqpend_mdmap);
+ if (p->irqpend_mdmap[l0b] == 0)
+ __clear_bit(l0b, &p->irqpend_himap);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_lock_irq);
+
+/* Must be called hw IRQs off. */
+void __ipipe_unlock_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = ipipe_root_domain;
+ struct ipipe_percpu_domain_data *p;
+ int l0b, l1b, cpu;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ if (!test_and_clear_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
+ return;
+
+ l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
+ l1b = irq / BITS_PER_LONG;
+
+ for_each_online_cpu(cpu) {
+ p = ipipe_this_cpu_root_context();
+ if (test_and_clear_bit(irq, p->irqheld_map)) {
+ /* We need atomic ops here: */
+ set_bit(irq, p->irqpend_lomap);
+ set_bit(l1b, p->irqpend_mdmap);
+ set_bit(l0b, &p->irqpend_himap);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_unlock_irq);
+
+static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p)
+{
+ int l0b, l1b, l2b;
+ unsigned long l0m, l1m, l2m;
+ unsigned int irq;
+
+ l0m = p->irqpend_himap;
+ if (unlikely(l0m == 0))
+ return -1;
+
+ l0b = __ipipe_ffnz(l0m);
+ l1m = p->irqpend_mdmap[l0b];
+ if (unlikely(l1m == 0))
+ return -1;
+
+ l1b = __ipipe_ffnz(l1m) + l0b * BITS_PER_LONG;
+ l2m = p->irqpend_lomap[l1b];
+ if (unlikely(l2m == 0))
+ return -1;
+
+ l2b = __ipipe_ffnz(l2m);
+ irq = l1b * BITS_PER_LONG + l2b;
+
+ __clear_bit(irq, p->irqpend_lomap);
+ if (p->irqpend_lomap[l1b] == 0) {
+ __clear_bit(l1b, p->irqpend_mdmap);
+ if (p->irqpend_mdmap[l0b] == 0)
+ __clear_bit(l0b, &p->irqpend_himap);
+ }
+
+ return irq;
+}
+
+#else /* __IPIPE_2LEVEL_IRQMAP */
+
+/* Must be called hw IRQs off. */
+static inline void __ipipe_set_irq_held(struct ipipe_percpu_domain_data *p,
+ unsigned int irq)
+{
+ __set_bit(irq, p->irqheld_map);
+ p->irqall[irq]++;
+}
+
+/* Must be called hw IRQs off. */
+void __ipipe_set_irq_pending(struct ipipe_domain *ipd, unsigned int irq)
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_context(ipd);
+ int l0b = irq / BITS_PER_LONG;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ if (likely(!test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))) {
+ __set_bit(irq, p->irqpend_lomap);
+ __set_bit(l0b, &p->irqpend_himap);
+ } else
+ __set_bit(irq, p->irqheld_map);
+
+ p->irqall[irq]++;
+}
+EXPORT_SYMBOL_GPL(__ipipe_set_irq_pending);
+
+/* Must be called hw IRQs off. */
+void __ipipe_lock_irq(unsigned int irq)
+{
+ struct ipipe_percpu_domain_data *p;
+ int l0b = irq / BITS_PER_LONG;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ if (test_and_set_bit(IPIPE_LOCK_FLAG,
+ &ipipe_root_domain->irqs[irq].control))
+ return;
+
+ p = ipipe_this_cpu_root_context();
+ if (__test_and_clear_bit(irq, p->irqpend_lomap)) {
+ __set_bit(irq, p->irqheld_map);
+ if (p->irqpend_lomap[l0b] == 0)
+ __clear_bit(l0b, &p->irqpend_himap);
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_lock_irq);
+
+/* Must be called hw IRQs off. */
+void __ipipe_unlock_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = ipipe_root_domain;
+ struct ipipe_percpu_domain_data *p;
+ int l0b = irq / BITS_PER_LONG, cpu;
+
+ IPIPE_WARN_ONCE(!hard_irqs_disabled());
+
+ if (!test_and_clear_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
+ return;
+
+ for_each_online_cpu(cpu) {
+ p = ipipe_percpu_context(ipd, cpu);
+ if (test_and_clear_bit(irq, p->irqheld_map)) {
+ /* We need atomic ops here: */
+ set_bit(irq, p->irqpend_lomap);
+ set_bit(l0b, &p->irqpend_himap);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_unlock_irq);
+
+static inline int __ipipe_next_irq(struct ipipe_percpu_domain_data *p)
+{
+ unsigned long l0m, l1m;
+ int l0b, l1b;
+
+ l0m = p->irqpend_himap;
+ if (unlikely(l0m == 0))
+ return -1;
+
+ l0b = __ipipe_ffnz(l0m);
+ l1m = p->irqpend_lomap[l0b];
+ if (unlikely(l1m == 0))
+ return -1;
+
+ l1b = __ipipe_ffnz(l1m);
+ __clear_bit(l1b, &p->irqpend_lomap[l0b]);
+ if (p->irqpend_lomap[l0b] == 0)
+ __clear_bit(l0b, &p->irqpend_himap);
+
+ return l0b * BITS_PER_LONG + l1b;
+}
+
+#endif /* __IPIPE_2LEVEL_IRQMAP */
+
+void __ipipe_do_sync_pipeline(struct ipipe_domain *top)
+{
+ struct ipipe_percpu_domain_data *p;
+ struct ipipe_domain *ipd;
+
+ /* We must enter over the root domain. */
+ IPIPE_WARN_ONCE(__ipipe_current_domain != ipipe_root_domain);
+ ipd = top;
+next:
+ p = ipipe_this_cpu_context(ipd);
+ if (test_bit(IPIPE_STALL_FLAG, &p->status))
+ return;
+
+ if (__ipipe_ipending_p(p)) {
+ if (ipd == ipipe_root_domain)
+ __ipipe_sync_stage();
+ else {
+ /* Switching to head. */
+ p->coflags &= ~__IPIPE_ALL_R;
+ __ipipe_set_current_context(p);
+ __ipipe_sync_stage();
+ __ipipe_set_current_domain(ipipe_root_domain);
+ }
+ }
+
+ if (ipd != ipipe_root_domain) {
+ ipd = ipipe_root_domain;
+ goto next;
+ }
+}
+EXPORT_SYMBOL_GPL(__ipipe_do_sync_pipeline);
+
+unsigned int ipipe_alloc_virq(void)
+{
+ unsigned long flags, irq = 0;
+ int ipos;
+
+ raw_spin_lock_irqsave(&__ipipe_lock, flags);
+
+ if (__ipipe_virtual_irq_map != ~0) {
+ ipos = ffz(__ipipe_virtual_irq_map);
+ set_bit(ipos, &__ipipe_virtual_irq_map);
+ irq = ipos + IPIPE_VIRQ_BASE;
+ }
+
+ raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
+
+ return irq;
+}
+EXPORT_SYMBOL_GPL(ipipe_alloc_virq);
+
+void ipipe_free_virq(unsigned int virq)
+{
+ clear_bit(virq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map);
+ smp_mb__after_atomic();
+}
+EXPORT_SYMBOL_GPL(ipipe_free_virq);
+
+int ipipe_request_irq(struct ipipe_domain *ipd,
+ unsigned int irq,
+ ipipe_irq_handler_t handler,
+ void *cookie,
+ ipipe_irq_ackfn_t ackfn)
+{
+ unsigned long flags;
+ int ret = 0;
+
+#ifndef CONFIG_IPIPE_LEGACY
+ ipipe_root_only();
+#endif /* CONFIG_IPIPE_LEGACY */
+
+ if (handler == NULL ||
+ (irq >= IPIPE_NR_XIRQS && !ipipe_virtual_irq_p(irq)))
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&__ipipe_lock, flags);
+
+ if (ipd->irqs[irq].handler) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (ackfn == NULL)
+ ackfn = ipipe_root_domain->irqs[irq].ackfn;
+
+ ipd->irqs[irq].handler = handler;
+ ipd->irqs[irq].cookie = cookie;
+ ipd->irqs[irq].ackfn = ackfn;
+ ipd->irqs[irq].control = IPIPE_HANDLE_MASK;
+
+ if (irq < IPIPE_NR_ROOT_IRQS)
+ __ipipe_enable_irqdesc(ipd, irq);
+out:
+ raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipipe_request_irq);
+
+void ipipe_free_irq(struct ipipe_domain *ipd,
+ unsigned int irq)
+{
+ unsigned long flags;
+
+#ifndef CONFIG_IPIPE_LEGACY
+ ipipe_root_only();
+#endif /* CONFIG_IPIPE_LEGACY */
+
+ raw_spin_lock_irqsave(&__ipipe_lock, flags);
+
+ if (ipd->irqs[irq].handler == NULL)
+ goto out;
+
+ ipd->irqs[irq].handler = NULL;
+ ipd->irqs[irq].cookie = NULL;
+ ipd->irqs[irq].ackfn = NULL;
+ ipd->irqs[irq].control = 0;
+
+ if (irq < IPIPE_NR_ROOT_IRQS)
+ __ipipe_disable_irqdesc(ipd, irq);
+out:
+ raw_spin_unlock_irqrestore(&__ipipe_lock, flags);
+}
+EXPORT_SYMBOL_GPL(ipipe_free_irq);
+
+void ipipe_set_hooks(struct ipipe_domain *ipd, int enables)
+{
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+ int cpu, wait;
+
+ if (ipd == ipipe_root_domain) {
+ IPIPE_WARN(enables & __IPIPE_TRAP_E);
+ enables &= ~__IPIPE_TRAP_E;
+ } else {
+ IPIPE_WARN(enables & __IPIPE_KEVENT_E);
+ enables &= ~__IPIPE_KEVENT_E;
+ }
+
+ flags = ipipe_critical_enter(NULL);
+
+ for_each_online_cpu(cpu) {
+ p = ipipe_percpu_context(ipd, cpu);
+ p->coflags &= ~__IPIPE_ALL_E;
+ p->coflags |= enables;
+ }
+
+ wait = (enables ^ __IPIPE_ALL_E) << __IPIPE_SHIFT_R;
+ if (wait == 0 || !__ipipe_root_p) {
+ ipipe_critical_exit(flags);
+ return;
+ }
+
+ ipipe_this_cpu_context(ipd)->coflags &= ~wait;
+
+ ipipe_critical_exit(flags);
+
+ /*
+ * In case we cleared some hooks over the root domain, we have
+ * to wait for any ongoing execution to finish, since our
+ * caller might subsequently unmap the target domain code.
+ *
+ * We synchronize with the relevant __ipipe_notify_*()
+ * helpers, disabling all hooks before we start waiting for
+ * completion on all CPUs.
+ */
+ for_each_online_cpu(cpu) {
+ while (ipipe_percpu_context(ipd, cpu)->coflags & wait)
+ schedule_timeout_interruptible(HZ / 50);
+ }
+}
+EXPORT_SYMBOL_GPL(ipipe_set_hooks);
+
+int __weak ipipe_fastcall_hook(struct pt_regs *regs)
+{
+ return -1; /* i.e. fall back to slow path. */
+}
+
+int __weak ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+ return 0;
+}
+
+void __ipipe_root_sync(void)
+{
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+
+ p = ipipe_this_cpu_root_context();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_stage();
+
+ hard_local_irq_restore(flags);
+}
+
+int __ipipe_notify_syscall(struct pt_regs *regs)
+{
+ struct ipipe_domain *caller_domain, *this_domain, *ipd;
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * We should definitely not pipeline a syscall with IRQs off.
+ */
+ IPIPE_WARN_ONCE(hard_irqs_disabled());
+
+ flags = hard_local_irq_save();
+ caller_domain = this_domain = __ipipe_current_domain;
+ ipd = ipipe_head_domain;
+next:
+ p = ipipe_this_cpu_context(ipd);
+ if (likely(p->coflags & __IPIPE_SYSCALL_E)) {
+ __ipipe_set_current_context(p);
+ p->coflags |= __IPIPE_SYSCALL_R;
+ hard_local_irq_restore(flags);
+ ret = ipipe_syscall_hook(caller_domain, regs);
+ flags = hard_local_irq_save();
+ p->coflags &= ~__IPIPE_SYSCALL_R;
+ if (__ipipe_current_domain != ipd)
+ /* Account for domain migration. */
+ this_domain = __ipipe_current_domain;
+ else
+ __ipipe_set_current_domain(this_domain);
+ }
+
+ if (this_domain == ipipe_root_domain) {
+ if (ipd != ipipe_root_domain && ret == 0) {
+ ipd = ipipe_root_domain;
+ goto next;
+ }
+ /*
+ * Careful: we may have migrated from head->root, so p
+ * would be ipipe_this_cpu_context(head).
+ */
+ p = ipipe_this_cpu_root_context();
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_stage();
+ } else if (ipipe_test_thread_flag(TIP_MAYDAY))
+ __ipipe_call_mayday(regs);
+
+ hard_local_irq_restore(flags);
+
+ return ret;
+}
+
+int __weak ipipe_trap_hook(struct ipipe_trap_data *data)
+{
+ return 0;
+}
+
+int __ipipe_notify_trap(int exception, struct pt_regs *regs)
+{
+ struct ipipe_percpu_domain_data *p;
+ struct ipipe_trap_data data;
+ unsigned long flags;
+ int ret = 0;
+
+ flags = hard_local_irq_save();
+
+ /*
+ * We send a notification about all traps raised over a
+ * registered head domain only.
+ */
+ if (__ipipe_root_p)
+ goto out;
+
+ p = ipipe_this_cpu_head_context();
+ if (likely(p->coflags & __IPIPE_TRAP_E)) {
+ p->coflags |= __IPIPE_TRAP_R;
+ hard_local_irq_restore(flags);
+ data.exception = exception;
+ data.regs = regs;
+ ret = ipipe_trap_hook(&data);
+ flags = hard_local_irq_save();
+ p->coflags &= ~__IPIPE_TRAP_R;
+ }
+out:
+ hard_local_irq_restore(flags);
+
+ return ret;
+}
+
+int __weak ipipe_kevent_hook(int kevent, void *data)
+{
+ return 0;
+}
+
+int __ipipe_notify_kevent(int kevent, void *data)
+{
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+ int ret = 0;
+
+ ipipe_root_only();
+
+ flags = hard_local_irq_save();
+
+ p = ipipe_this_cpu_root_context();
+ if (likely(p->coflags & __IPIPE_KEVENT_E)) {
+ p->coflags |= __IPIPE_KEVENT_R;
+ hard_local_irq_restore(flags);
+ ret = ipipe_kevent_hook(kevent, data);
+ flags = hard_local_irq_save();
+ p->coflags &= ~__IPIPE_KEVENT_R;
+ }
+
+ hard_local_irq_restore(flags);
+
+ return ret;
+}
+
+void __weak ipipe_migration_hook(struct task_struct *p)
+{
+}
+
+#ifdef CONFIG_IPIPE_LEGACY
+
+static inline void complete_domain_migration(void) /* hw IRQs off */
+{
+ if (current->state & TASK_HARDENING) {
+ current->state &= ~TASK_HARDENING;
+ ipipe_set_thread_flag(TIP_HEAD);
+ }
+}
+
+#else /* !CONFIG_IPIPE_LEGACY */
+
+static void complete_domain_migration(void) /* hw IRQs off */
+{
+ struct ipipe_percpu_domain_data *p;
+ struct ipipe_percpu_data *pd;
+ struct task_struct *t;
+
+ ipipe_root_only();
+ pd = raw_cpu_ptr(&ipipe_percpu);
+ t = pd->task_hijacked;
+ if (t == NULL)
+ return;
+
+ pd->task_hijacked = NULL;
+ t->state &= ~TASK_HARDENING;
+ if (t->state != TASK_INTERRUPTIBLE)
+ /* Migration aborted (by signal). */
+ return;
+
+ ipipe_set_ti_thread_flag(task_thread_info(t), TIP_HEAD);
+ p = ipipe_this_cpu_head_context();
+ IPIPE_WARN_ONCE(test_bit(IPIPE_STALL_FLAG, &p->status));
+ /*
+ * hw IRQs are disabled, but the completion hook assumes the
+ * head domain is logically stalled: fix it up.
+ */
+ __set_bit(IPIPE_STALL_FLAG, &p->status);
+ ipipe_migration_hook(t);
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_pipeline(p->domain);
+}
+
+#endif /* !CONFIG_IPIPE_LEGACY */
+
+void __ipipe_complete_domain_migration(void)
+{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+ complete_domain_migration();
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_complete_domain_migration);
+
+int __ipipe_switch_tail(void)
+{
+ int x;
+
+#ifdef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+ hard_local_irq_disable();
+#endif
+ x = __ipipe_root_p;
+#ifndef CONFIG_IPIPE_LEGACY
+ if (x)
+#endif
+ complete_domain_migration();
+
+#ifndef CONFIG_IPIPE_WANT_PREEMPTIBLE_SWITCH
+ if (x)
+#endif
+ hard_local_irq_enable();
+
+ return !x;
+}
+
+#ifdef CONFIG_IPIPE_HAVE_VM_NOTIFIER
+void __ipipe_notify_vm_preemption(void)
+{
+ struct ipipe_vm_notifier *vmf;
+ struct ipipe_percpu_data *p;
+
+ ipipe_check_irqoff();
+ p = __ipipe_raw_cpu_ptr(&ipipe_percpu);
+ vmf = p->vm_notifier;
+ if (unlikely(vmf))
+ vmf->handler(vmf);
+}
+EXPORT_SYMBOL_GPL(__ipipe_notify_vm_preemption);
+#endif /* CONFIG_IPIPE_HAVE_VM_NOTIFIER */
+
+static void dispatch_irq_head(unsigned int irq) /* hw interrupts off */
+{
+ struct ipipe_percpu_domain_data *p = ipipe_this_cpu_head_context(), *old;
+ struct ipipe_domain *head = p->domain;
+
+ if (unlikely(test_bit(IPIPE_STALL_FLAG, &p->status))) {
+ __ipipe_set_irq_pending(head, irq);
+ return;
+ }
+
+ /* Switch to the head domain if not current. */
+ old = __ipipe_current_context;
+ if (old != p)
+ __ipipe_set_current_context(p);
+
+ p->irqall[irq]++;
+ __set_bit(IPIPE_STALL_FLAG, &p->status);
+ barrier();
+ head->irqs[irq].handler(irq, head->irqs[irq].cookie);
+ __ipipe_run_irqtail(irq);
+ hard_local_irq_disable();
+ p = ipipe_this_cpu_head_context();
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+
+ /* Are we still running in the head domain? */
+ if (likely(__ipipe_current_context == p)) {
+ /* Did we enter this code over the head domain? */
+ if (old->domain == head) {
+ /* Yes, do immediate synchronization. */
+ if (__ipipe_ipending_p(p))
+ __ipipe_sync_stage();
+ return;
+ }
+ __ipipe_set_current_context(ipipe_this_cpu_root_context());
+ }
+
+ /*
+ * We must be running over the root domain, synchronize
+ * the pipeline for high priority IRQs (slow path).
+ */
+ __ipipe_do_sync_pipeline(head);
+}
+
+void __ipipe_dispatch_irq(unsigned int irq, int flags) /* hw interrupts off */
+{
+ struct ipipe_domain *ipd;
+ struct irq_desc *desc;
+ unsigned long control;
+ int chained_irq;
+
+ /*
+ * Survival kit when reading this code:
+ *
+ * - we have two main situations, leading to three cases for
+ * handling interrupts:
+ *
+ * a) the root domain is alone, no registered head domain
+ * => all interrupts go through the interrupt log
+ * b) a head domain is registered
+ * => head domain IRQs go through the fast dispatcher
+ * => root domain IRQs go through the interrupt log
+ *
+ * - when no head domain is registered, ipipe_head_domain ==
+ * ipipe_root_domain == &ipipe_root.
+ *
+ * - the caller tells us whether we should acknowledge this
+ * IRQ. Even virtual IRQs may require acknowledge on some
+ * platforms (e.g. arm/SMP).
+ *
+ * - the caller tells us whether we may try to run the IRQ log
+ * syncer. Typically, demuxed IRQs won't be synced
+ * immediately.
+ *
+ * - multiplex IRQs most likely have a valid acknowledge
+ * handler and we may not be called with IPIPE_IRQF_NOACK
+ * for them. The ack handler for the multiplex IRQ actually
+ * decodes the demuxed interrupts.
+ */
+
+#ifdef CONFIG_IPIPE_DEBUG
+ if (unlikely(irq >= IPIPE_NR_IRQS) ||
+ (irq < IPIPE_NR_ROOT_IRQS && irq_to_desc(irq) == NULL)) {
+ pr_err("I-pipe: spurious interrupt %u\n", irq);
+ return;
+ }
+#endif
+ /*
+ * CAUTION: on some archs, virtual IRQs may have acknowledge
+ * handlers. Multiplex IRQs should have one too.
+ */
+ if (unlikely(irq >= IPIPE_NR_ROOT_IRQS)) {
+ desc = NULL;
+ chained_irq = 0;
+ } else {
+ desc = irq_to_desc(irq);
+ chained_irq = desc ? ipipe_chained_irq_p(desc) : 0;
+ }
+ if (flags & IPIPE_IRQF_NOACK)
+ IPIPE_WARN_ONCE(chained_irq);
+ else {
+ ipd = ipipe_head_domain;
+ control = ipd->irqs[irq].control;
+ if ((control & IPIPE_HANDLE_MASK) == 0)
+ ipd = ipipe_root_domain;
+ if (ipd->irqs[irq].ackfn)
+ ipd->irqs[irq].ackfn(irq, desc);
+ if (chained_irq) {
+ if ((flags & IPIPE_IRQF_NOSYNC) == 0)
+ /* Run demuxed IRQ handlers. */
+ goto sync;
+ return;
+ }
+ }
+
+ /*
+ * Sticky interrupts must be handled early and separately, so
+ * that we always process them on the current domain.
+ */
+ ipd = __ipipe_current_domain;
+ control = ipd->irqs[irq].control;
+ if (control & IPIPE_STICKY_MASK)
+ goto log;
+
+ /*
+ * In case we have no registered head domain
+ * (i.e. ipipe_head_domain == &ipipe_root), we always go
+ * through the interrupt log, and leave the dispatching work
+ * ultimately to __ipipe_sync_pipeline().
+ */
+ ipd = ipipe_head_domain;
+ control = ipd->irqs[irq].control;
+ if (ipd == ipipe_root_domain)
+ /*
+ * The root domain must handle all interrupts, so
+ * testing the HANDLE bit would be pointless.
+ */
+ goto log;
+
+ if (control & IPIPE_HANDLE_MASK) {
+ if (unlikely(flags & IPIPE_IRQF_NOSYNC))
+ __ipipe_set_irq_pending(ipd, irq);
+ else
+ dispatch_irq_head(irq);
+ return;
+ }
+
+ ipd = ipipe_root_domain;
+log:
+ __ipipe_set_irq_pending(ipd, irq);
+
+ if (flags & IPIPE_IRQF_NOSYNC)
+ return;
+
+ /*
+ * Optimize if we preempted a registered high priority head
+ * domain: we don't need to synchronize the pipeline unless
+ * there is a pending interrupt for it.
+ */
+ if (!__ipipe_root_p &&
+ !__ipipe_ipending_p(ipipe_this_cpu_head_context()))
+ return;
+sync:
+ __ipipe_sync_pipeline(ipipe_head_domain);
+}
+
+void ipipe_raise_irq(unsigned int irq)
+{
+ struct ipipe_domain *ipd = ipipe_head_domain;
+ unsigned long flags, control;
+
+ flags = hard_local_irq_save();
+
+ /*
+ * Fast path: raising a virtual IRQ handled by the head
+ * domain.
+ */
+ if (likely(ipipe_virtual_irq_p(irq) && ipd != ipipe_root_domain)) {
+ control = ipd->irqs[irq].control;
+ if (likely(control & IPIPE_HANDLE_MASK)) {
+ dispatch_irq_head(irq);
+ goto out;
+ }
+ }
+
+ /* Emulate regular device IRQ receipt. */
+ __ipipe_dispatch_irq(irq, IPIPE_IRQF_NOACK);
+out:
+ hard_local_irq_restore(flags);
+
+}
+EXPORT_SYMBOL_GPL(ipipe_raise_irq);
+
+#ifdef CONFIG_PREEMPT
+
+void preempt_schedule_irq(void);
+
+void __sched __ipipe_preempt_schedule_irq(void)
+{
+ struct ipipe_percpu_domain_data *p;
+ unsigned long flags;
+
+ BUG_ON(!hard_irqs_disabled());
+ local_irq_save(flags);
+ hard_local_irq_enable();
+ preempt_schedule_irq(); /* Ok, may reschedule now. */
+ hard_local_irq_disable();
+
+ /*
+ * Flush any pending interrupt that may have been logged after
+ * preempt_schedule_irq() stalled the root stage before
+ * returning to us, and now.
+ */
+ p = ipipe_this_cpu_root_context();
+ if (unlikely(__ipipe_ipending_p(p))) {
+ __preempt_count_add(PREEMPT_ACTIVE);
+ trace_hardirqs_on();
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+ __ipipe_sync_stage();
+ __preempt_count_sub(PREEMPT_ACTIVE);
+ }
+
+ __ipipe_restore_root_nosync(flags);
+}
+
+#else /* !CONFIG_PREEMPT */
+
+#define __ipipe_preempt_schedule_irq() do { } while (0)
+
+#endif /* !CONFIG_PREEMPT */
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+#define root_stall_after_handler() local_irq_disable()
+#else
+#define root_stall_after_handler() do { } while (0)
+#endif
+
+/*
+ * __ipipe_do_sync_stage() -- Flush the pending IRQs for the current
+ * domain (and processor). This routine flushes the interrupt log (see
+ * "Optimistic interrupt protection" from D. Stodolsky et al. for more
+ * on the deferred interrupt scheme). Every interrupt that occurred
+ * while the pipeline was stalled gets played.
+ *
+ * WARNING: CPU migration may occur over this routine.
+ */
+void __ipipe_do_sync_stage(void)
+{
+ struct ipipe_percpu_domain_data *p;
+ struct ipipe_domain *ipd;
+ int irq;
+
+ p = __ipipe_current_context;
+respin:
+ ipd = p->domain;
+
+ __set_bit(IPIPE_STALL_FLAG, &p->status);
+ smp_wmb();
+
+ if (ipd == ipipe_root_domain)
+ trace_hardirqs_off();
+
+ for (;;) {
+ irq = __ipipe_next_irq(p);
+ if (irq < 0)
+ break;
+ /*
+ * Make sure the compiler does not reorder wrongly, so
+ * that all updates to maps are done before the
+ * handler gets called.
+ */
+ barrier();
+
+ if (test_bit(IPIPE_LOCK_FLAG, &ipd->irqs[irq].control))
+ continue;
+
+ if (ipd != ipipe_head_domain)
+ hard_local_irq_enable();
+
+ if (likely(ipd != ipipe_root_domain)) {
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
+ __ipipe_run_irqtail(irq);
+ hard_local_irq_disable();
+ } else if (ipipe_virtual_irq_p(irq)) {
+ irq_enter();
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
+ irq_exit();
+ root_stall_after_handler();
+ hard_local_irq_disable();
+ } else {
+ ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie);
+ root_stall_after_handler();
+ hard_local_irq_disable();
+ }
+
+ /*
+ * We may have migrated to a different CPU (1) upon
+ * return from the handler, or downgraded from the
+ * head domain to the root one (2), the opposite way
+ * is NOT allowed though.
+ *
+ * (1) reload the current per-cpu context pointer, so
+ * that we further pull pending interrupts from the
+ * proper per-cpu log.
+ *
+ * (2) check the stall bit to know whether we may
+ * dispatch any interrupt pending for the root domain,
+ * and respin the entire dispatch loop if
+ * so. Otherwise, immediately return to the caller,
+ * _without_ affecting the stall state for the root
+ * domain, since we do not own it at this stage. This
+ * case is basically reflecting what may happen in
+ * dispatch_irq_head() for the fast path.
+ */
+ p = __ipipe_current_context;
+ if (p->domain != ipd) {
+ IPIPE_BUG_ON(ipd == ipipe_root_domain);
+ if (test_bit(IPIPE_STALL_FLAG, &p->status))
+ return;
+ goto respin;
+ }
+ }
+
+ if (ipd == ipipe_root_domain)
+ trace_hardirqs_on();
+
+ __clear_bit(IPIPE_STALL_FLAG, &p->status);
+}
+
+void __ipipe_call_mayday(struct pt_regs *regs)
+{
+ unsigned long flags;
+
+ ipipe_clear_thread_flag(TIP_MAYDAY);
+ flags = hard_local_irq_save();
+ __ipipe_notify_trap(IPIPE_TRAP_MAYDAY, regs);
+ hard_local_irq_restore(flags);
+}
+
+#ifdef CONFIG_SMP
+
+/* Always called with hw interrupts off. */
+void __ipipe_do_critical_sync(unsigned int irq, void *cookie)
+{
+ int cpu = ipipe_processor_id();
+
+ cpumask_set_cpu(cpu, &__ipipe_cpu_sync_map);
+
+ /*
+ * Now we are in sync with the lock requestor running on
+ * another CPU. Enter a spinning wait until he releases the
+ * global lock.
+ */
+ raw_spin_lock(&__ipipe_cpu_barrier);
+
+ /* Got it. Now get out. */
+
+ /* Call the sync routine if any. */
+ if (__ipipe_cpu_sync)
+ __ipipe_cpu_sync();
+
+ cpumask_set_cpu(cpu, &__ipipe_cpu_pass_map);
+
+ raw_spin_unlock(&__ipipe_cpu_barrier);
+
+ cpumask_clear_cpu(cpu, &__ipipe_cpu_sync_map);
+}
+#endif /* CONFIG_SMP */
+
+unsigned long ipipe_critical_enter(void (*syncfn)(void))
+{
+ cpumask_t allbutself __maybe_unused, online __maybe_unused;
+ int cpu __maybe_unused, n __maybe_unused;
+ unsigned long flags, loops __maybe_unused;
+
+ flags = hard_local_irq_save();
+
+ if (num_online_cpus() == 1)
+ return flags;
+
+#ifdef CONFIG_SMP
+
+ cpu = ipipe_processor_id();
+ if (!cpumask_test_and_set_cpu(cpu, &__ipipe_cpu_lock_map)) {
+ while (test_and_set_bit(0, &__ipipe_critical_lock)) {
+ n = 0;
+ hard_local_irq_enable();
+
+ do
+ cpu_relax();
+ while (++n < cpu);
+
+ hard_local_irq_disable();
+ }
+restart:
+ online = *cpu_online_mask;
+ raw_spin_lock(&__ipipe_cpu_barrier);
+
+ __ipipe_cpu_sync = syncfn;
+
+ cpumask_clear(&__ipipe_cpu_pass_map);
+ cpumask_set_cpu(cpu, &__ipipe_cpu_pass_map);
+
+ /*
+ * Send the sync IPI to all processors but the current
+ * one.
+ */
+ cpumask_andnot(&allbutself, &online, &__ipipe_cpu_pass_map);
+ ipipe_send_ipi(IPIPE_CRITICAL_IPI, allbutself);
+ loops = IPIPE_CRITICAL_TIMEOUT;
+
+ while (!cpumask_equal(&__ipipe_cpu_sync_map, &allbutself)) {
+ if (--loops > 0) {
+ cpu_relax();
+ continue;
+ }
+ /*
+ * We ran into a deadlock due to a contended
+ * rwlock. Cancel this round and retry.
+ */
+ __ipipe_cpu_sync = NULL;
+
+ raw_spin_unlock(&__ipipe_cpu_barrier);
+ /*
+ * Ensure all CPUs consumed the IPI to avoid
+ * running __ipipe_cpu_sync prematurely. This
+ * usually resolves the deadlock reason too.
+ */
+ while (!cpumask_equal(&online, &__ipipe_cpu_pass_map))
+ cpu_relax();
+
+ goto restart;
+ }
+ }
+
+ atomic_inc(&__ipipe_critical_count);
+
+#endif /* CONFIG_SMP */
+
+ return flags;
+}
+EXPORT_SYMBOL_GPL(ipipe_critical_enter);
+
+void ipipe_critical_exit(unsigned long flags)
+{
+ if (num_online_cpus() == 1) {
+ hard_local_irq_restore(flags);
+ return;
+ }
+
+#ifdef CONFIG_SMP
+ if (atomic_dec_and_test(&__ipipe_critical_count)) {
+ raw_spin_unlock(&__ipipe_cpu_barrier);
+ while (!cpumask_empty(&__ipipe_cpu_sync_map))
+ cpu_relax();
+ cpumask_clear_cpu(ipipe_processor_id(), &__ipipe_cpu_lock_map);
+ clear_bit(0, &__ipipe_critical_lock);
+ smp_mb__after_atomic();
+ }
+#endif /* CONFIG_SMP */
+
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(ipipe_critical_exit);
+
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+
+void ipipe_root_only(void)
+{
+ struct ipipe_domain *this_domain;
+ unsigned long flags;
+
+ flags = hard_smp_local_irq_save();
+
+ this_domain = __ipipe_current_domain;
+ if (likely(this_domain == ipipe_root_domain &&
+ !test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status))) {
+ hard_smp_local_irq_restore(flags);
+ return;
+ }
+
+ if (!__this_cpu_read(ipipe_percpu.context_check)) {
+ hard_smp_local_irq_restore(flags);
+ return;
+ }
+
+ hard_smp_local_irq_restore(flags);
+
+ ipipe_prepare_panic();
+ ipipe_trace_panic_freeze();
+
+ if (this_domain != ipipe_root_domain)
+ pr_err("I-pipe: Detected illicit call from head domain '%s'\n"
+ " into a regular Linux service\n",
+ this_domain->name);
+ else
+ pr_err("I-pipe: Detected stalled head domain, "
+ "probably caused by a bug.\n"
+ " A critical section may have been "
+ "left unterminated.\n");
+ dump_stack();
+ ipipe_trace_panic_dump();
+}
+EXPORT_SYMBOL(ipipe_root_only);
+
+#endif /* CONFIG_IPIPE_DEBUG_CONTEXT */
+
+#if defined(CONFIG_IPIPE_DEBUG_INTERNAL) && defined(CONFIG_SMP)
+
+int notrace __ipipe_check_percpu_access(void)
+{
+ struct ipipe_percpu_domain_data *p;
+ struct ipipe_domain *this_domain;
+ unsigned long flags;
+ int ret = 0;
+
+ flags = hard_local_irq_save_notrace();
+
+ /*
+ * Don't use __ipipe_current_domain here, this would recurse
+ * indefinitely.
+ */
+ this_domain = raw_cpu_read(ipipe_percpu.curr)->domain;
+
+ /*
+ * Only the root domain may implement preemptive CPU migration
+ * of tasks, so anything above in the pipeline should be fine.
+ */
+ if (this_domain != ipipe_root_domain)
+ goto out;
+
+ if (raw_irqs_disabled_flags(flags))
+ goto out;
+
+ /*
+ * Last chance: hw interrupts were enabled on entry while
+ * running over the root domain, but the root stage might be
+ * currently stalled, in which case preemption would be
+ * disabled, and no migration could occur.
+ */
+
+ p = raw_cpu_ptr(&ipipe_percpu.root);
+ if (!preemptible())
+ goto out;
+ /*
+ * Our caller may end up accessing the wrong per-cpu variable
+ * instance due to CPU migration; tell it to complain about
+ * this.
+ */
+ ret = 1;
+out:
+ hard_local_irq_restore_notrace(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__ipipe_check_percpu_access);
+
+void __ipipe_spin_unlock_debug(unsigned long flags)
+{
+ /*
+ * We catch a nasty issue where spin_unlock_irqrestore() on a
+ * regular kernel spinlock is about to re-enable hw interrupts
+ * in a section entered with hw irqs off. This is clearly the
+ * sign of a massive breakage coming. Usual suspect is a
+ * regular spinlock which was overlooked, used within a
+ * section which must run with hw irqs disabled.
+ */
+ IPIPE_WARN_ONCE(!raw_irqs_disabled_flags(flags) && hard_irqs_disabled());
+}
+EXPORT_SYMBOL(__ipipe_spin_unlock_debug);
+
+#endif /* CONFIG_IPIPE_DEBUG_INTERNAL && CONFIG_SMP */
+
+void ipipe_prepare_panic(void)
+{
+#ifdef CONFIG_PRINTK
+ __ipipe_printk_bypass = 1;
+#endif
+ ipipe_context_check_off();
+}
+EXPORT_SYMBOL_GPL(ipipe_prepare_panic);
+
+static void __ipipe_do_work(unsigned int virq, void *cookie)
+{
+ struct ipipe_work_header *work;
+ unsigned long flags;
+ void *curr, *tail;
+ int cpu;
+
+ /*
+ * Work is dispatched in enqueuing order. This interrupt
+ * context can't migrate to another CPU.
+ */
+ cpu = smp_processor_id();
+ curr = per_cpu(work_buf, cpu);
+
+ for (;;) {
+ flags = hard_local_irq_save();
+ tail = per_cpu(work_tail, cpu);
+ if (curr == tail) {
+ per_cpu(work_tail, cpu) = per_cpu(work_buf, cpu);
+ hard_local_irq_restore(flags);
+ return;
+ }
+ work = curr;
+ curr += work->size;
+ hard_local_irq_restore(flags);
+ work->handler(work);
+ }
+}
+
+void __ipipe_post_work_root(struct ipipe_work_header *work)
+{
+ unsigned long flags;
+ void *tail;
+ int cpu;
+
+ /*
+ * Subtle: we want to use the head stall/unstall operators,
+ * not the hard_* routines to protect against races. This way,
+ * we ensure that a root-based caller will trigger the virq
+ * handling immediately when unstalling the head stage, as a
+ * result of calling __ipipe_sync_pipeline() under the hood.
+ */
+ flags = ipipe_test_and_stall_head();
+ cpu = ipipe_processor_id();
+ tail = per_cpu(work_tail, cpu);
+
+ if (WARN_ON_ONCE((unsigned char *)tail + work->size >=
+ per_cpu(work_buf, cpu) + WORKBUF_SIZE))
+ goto out;
+
+ /* Work handling is deferred, so data has to be copied. */
+ memcpy(tail, work, work->size);
+ per_cpu(work_tail, cpu) = tail + work->size;
+ ipipe_post_irq_root(__ipipe_work_virq);
+out:
+ ipipe_restore_head(flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_post_work_root);
+
+void __weak __ipipe_arch_share_current(int flags)
+{
+}
+
+void __ipipe_share_current(int flags)
+{
+ ipipe_root_only();
+
+ __ipipe_arch_share_current(flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_share_current);
+
+#ifdef CONFIG_KGDB
+bool __ipipe_probe_access;
+
+long ipipe_probe_kernel_read(void *dst, void *src, size_t size)
+{
+ long ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ __ipipe_probe_access = true;
+ barrier();
+ ret = __copy_from_user_inatomic(dst,
+ (__force const void __user *)src, size);
+ barrier();
+ __ipipe_probe_access = false;
+ set_fs(old_fs);
+
+ return ret ? -EFAULT : 0;
+}
+
+long ipipe_probe_kernel_write(void *dst, void *src, size_t size)
+{
+ long ret;
+ mm_segment_t old_fs = get_fs();
+
+ set_fs(KERNEL_DS);
+ __ipipe_probe_access = true;
+ barrier();
+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
+ barrier();
+ __ipipe_probe_access = false;
+ set_fs(old_fs);
+
+ return ret ? -EFAULT : 0;
+}
+#endif /* CONFIG_KGDB */
+
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || defined(CONFIG_PROVE_LOCKING) || \
+ defined(CONFIG_PREEMPT_VOLUNTARY) || defined(CONFIG_IPIPE_DEBUG_CONTEXT)
+void __ipipe_uaccess_might_fault(void)
+{
+ struct ipipe_percpu_domain_data *pdd;
+ struct ipipe_domain *ipd;
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
+ ipd = __ipipe_current_domain;
+ if (ipd == ipipe_root_domain) {
+ hard_local_irq_restore(flags);
+ might_fault();
+ return;
+ }
+
+#ifdef CONFIG_IPIPE_DEBUG_CONTEXT
+ pdd = ipipe_this_cpu_context(ipd);
+ WARN_ON_ONCE(hard_irqs_disabled_flags(flags)
+ || test_bit(IPIPE_STALL_FLAG, &pdd->status));
+#else /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+ (void)pdd;
+#endif /* !CONFIG_IPIPE_DEBUG_CONTEXT */
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(__ipipe_uaccess_might_fault);
+#endif
--- /dev/null
+/* -*- linux-c -*-
+ * linux/kernel/ipipe/timer.c
+ *
+ * Copyright (C) 2012 Gilles Chanteperdrix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * I-pipe timer request interface.
+ */
+#include <linux/ipipe.h>
+#include <linux/percpu.h>
+#include <linux/irqdesc.h>
+#include <linux/cpumask.h>
+#include <linux/spinlock.h>
+#include <linux/ipipe_tickdev.h>
+#include <linux/interrupt.h>
+#include <linux/export.h>
+
+unsigned long __ipipe_hrtimer_freq;
+
+static LIST_HEAD(timers);
+static IPIPE_DEFINE_SPINLOCK(lock);
+
+static DEFINE_PER_CPU(struct ipipe_timer *, percpu_timer);
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+/*
+ * Default request method: switch to oneshot mode if supported.
+ */
+static void ipipe_timer_default_request(struct ipipe_timer *timer, int steal)
+{
+ struct clock_event_device *evtdev = timer->host_timer;
+
+ if (!(evtdev->features & CLOCK_EVT_FEAT_ONESHOT))
+ return;
+
+ if (evtdev->mode != CLOCK_EVT_MODE_ONESHOT) {
+ evtdev->set_mode(CLOCK_EVT_MODE_ONESHOT, evtdev);
+ evtdev->set_next_event(timer->freq / HZ, evtdev);
+ }
+}
+
+/*
+ * Default release method: return the timer to the mode it had when
+ * starting.
+ */
+static void ipipe_timer_default_release(struct ipipe_timer *timer)
+{
+ struct clock_event_device *evtdev = timer->host_timer;
+
+ evtdev->set_mode(evtdev->mode, evtdev);
+ if (evtdev->mode == CLOCK_EVT_MODE_ONESHOT)
+ evtdev->set_next_event(timer->freq / HZ, evtdev);
+}
+
+void ipipe_host_timer_register(struct clock_event_device *evtdev)
+{
+ struct ipipe_timer *timer = evtdev->ipipe_timer;
+
+ if (timer == NULL)
+ return;
+
+ if (timer->request == NULL)
+ timer->request = ipipe_timer_default_request;
+
+ /*
+ * By default, use the same method as linux timer, on ARM at
+ * least, most set_next_event methods are safe to be called
+ * from Xenomai domain anyway.
+ */
+ if (timer->set == NULL) {
+ timer->timer_set = evtdev;
+ timer->set = (typeof(timer->set))evtdev->set_next_event;
+ }
+
+ if (timer->release == NULL)
+ timer->release = ipipe_timer_default_release;
+
+ if (timer->name == NULL)
+ timer->name = evtdev->name;
+
+ if (timer->rating == 0)
+ timer->rating = evtdev->rating;
+
+ timer->freq = (1000000000ULL * evtdev->mult) >> evtdev->shift;
+
+ if (timer->min_delay_ticks == 0)
+ timer->min_delay_ticks =
+ (evtdev->min_delta_ns * evtdev->mult) >> evtdev->shift;
+
+ if (timer->cpumask == NULL)
+ timer->cpumask = evtdev->cpumask;
+
+ timer->host_timer = evtdev;
+
+ ipipe_timer_register(timer);
+}
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+
+/*
+ * register a timer: maintain them in a list sorted by rating
+ */
+void ipipe_timer_register(struct ipipe_timer *timer)
+{
+ struct ipipe_timer *t;
+ unsigned long flags;
+
+ if (timer->timer_set == NULL)
+ timer->timer_set = timer;
+
+ if (timer->cpumask == NULL)
+ timer->cpumask = cpumask_of(smp_processor_id());
+
+ raw_spin_lock_irqsave(&lock, flags);
+
+ list_for_each_entry(t, &timers, link) {
+ if (t->rating <= timer->rating) {
+ __list_add(&timer->link, t->link.prev, &t->link);
+ goto done;
+ }
+ }
+ list_add_tail(&timer->link, &timers);
+ done:
+ raw_spin_unlock_irqrestore(&lock, flags);
+}
+
+static void ipipe_timer_request_sync(void)
+{
+ struct ipipe_timer *timer = __ipipe_raw_cpu_read(percpu_timer);
+ struct clock_event_device *evtdev;
+ int steal;
+
+ if (!timer)
+ return;
+
+ evtdev = timer->host_timer;
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ steal = evtdev != NULL && evtdev->mode != CLOCK_EVT_MODE_UNUSED;
+#else /* !CONFIG_GENERIC_CLOCKEVENTS */
+ steal = 1;
+#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
+
+ timer->request(timer, steal);
+}
+
+static void config_pcpu_timer(struct ipipe_timer *t, unsigned hrclock_freq)
+{
+ unsigned long long tmp;
+ unsigned hrtimer_freq;
+
+ if (__ipipe_hrtimer_freq != t->freq)
+ __ipipe_hrtimer_freq = t->freq;
+
+ hrtimer_freq = t->freq;
+ if (__ipipe_hrclock_freq > UINT_MAX)
+ hrtimer_freq /= 1000;
+
+ t->c2t_integ = hrtimer_freq / hrclock_freq;
+ tmp = (((unsigned long long)
+ (hrtimer_freq % hrclock_freq)) << 32)
+ + hrclock_freq - 1;
+ do_div(tmp, hrclock_freq);
+ t->c2t_frac = tmp;
+}
+
+/* Set up a timer as per-cpu timer for ipipe */
+static void install_pcpu_timer(unsigned cpu, unsigned hrclock_freq,
+ struct ipipe_timer *t)
+{
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = t->irq;
+ per_cpu(percpu_timer, cpu) = t;
+ config_pcpu_timer(t, hrclock_freq);
+}
+
+static void select_root_only_timer(unsigned cpu, unsigned hrclock_khz,
+ const struct cpumask *mask,
+ struct ipipe_timer *t) {
+ unsigned icpu;
+ struct clock_event_device *evtdev;
+
+ /*
+ * If no ipipe-supported CPU shares an interrupt with the
+ * timer, we do not need to care about it.
+ */
+ for_each_cpu(icpu, mask) {
+ if (t->irq == per_cpu(ipipe_percpu.hrtimer_irq, icpu)) {
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ evtdev = t->host_timer;
+ if (evtdev && evtdev->mode == CLOCK_EVT_MODE_SHUTDOWN)
+ continue;
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+ goto found;
+ }
+ }
+
+ return;
+
+found:
+ install_pcpu_timer(cpu, hrclock_khz, t);
+}
+
+/*
+ * Choose per-cpu timers with the highest rating by traversing the
+ * rating-sorted list for each CPU.
+ */
+int ipipe_select_timers(const struct cpumask *mask)
+{
+ unsigned hrclock_freq;
+ unsigned long long tmp;
+ struct ipipe_timer *t;
+ struct clock_event_device *evtdev;
+ unsigned long flags;
+ unsigned cpu;
+ cpumask_t fixup;
+
+ if (!__ipipe_hrclock_ok()) {
+ printk("I-pipe: high-resolution clock not working\n");
+ return -ENODEV;
+ }
+
+ if (__ipipe_hrclock_freq > UINT_MAX) {
+ tmp = __ipipe_hrclock_freq;
+ do_div(tmp, 1000);
+ hrclock_freq = tmp;
+ } else
+ hrclock_freq = __ipipe_hrclock_freq;
+
+ raw_spin_lock_irqsave(&lock, flags);
+
+ /* First, choose timers for the CPUs handled by ipipe */
+ for_each_cpu(cpu, mask) {
+ list_for_each_entry(t, &timers, link) {
+ if (!cpumask_test_cpu(cpu, t->cpumask))
+ continue;
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ evtdev = t->host_timer;
+ if (evtdev && evtdev->mode == CLOCK_EVT_MODE_SHUTDOWN)
+ continue;
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+ goto found;
+ }
+
+ printk("I-pipe: could not find timer for cpu #%d\n",
+ cpu);
+ goto err_remove_all;
+found:
+ install_pcpu_timer(cpu, hrclock_freq, t);
+ }
+
+ /*
+ * Second, check if we need to fix up any CPUs not supported
+ * by ipipe (but by Linux) whose interrupt may need to be
+ * forwarded because they have the same IRQ as an ipipe-enabled
+ * timer.
+ */
+ cpumask_andnot(&fixup, cpu_online_mask, mask);
+
+ for_each_cpu(cpu, &fixup) {
+ list_for_each_entry(t, &timers, link) {
+ if (!cpumask_test_cpu(cpu, t->cpumask))
+ continue;
+
+ select_root_only_timer(cpu, hrclock_freq, mask, t);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&lock, flags);
+
+ flags = ipipe_critical_enter(ipipe_timer_request_sync);
+ ipipe_timer_request_sync();
+ ipipe_critical_exit(flags);
+
+ return 0;
+
+err_remove_all:
+ raw_spin_unlock_irqrestore(&lock, flags);
+
+ for_each_cpu(cpu, mask) {
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1;
+ per_cpu(percpu_timer, cpu) = NULL;
+ }
+ __ipipe_hrtimer_freq = 0;
+
+ return -ENODEV;
+}
+
+static void ipipe_timer_release_sync(void)
+{
+ struct ipipe_timer *timer = __ipipe_raw_cpu_read(percpu_timer);
+
+ if (timer)
+ timer->release(timer);
+}
+
+void ipipe_timers_release(void)
+{
+ unsigned long flags;
+ unsigned cpu;
+
+ flags = ipipe_critical_enter(ipipe_timer_release_sync);
+ ipipe_timer_release_sync();
+ ipipe_critical_exit(flags);
+
+ for_each_online_cpu(cpu) {
+ per_cpu(ipipe_percpu.hrtimer_irq, cpu) = -1;
+ per_cpu(percpu_timer, cpu) = NULL;
+ __ipipe_hrtimer_freq = 0;
+ }
+}
+
+static void __ipipe_ack_hrtimer_irq(unsigned int irq, struct irq_desc *desc)
+{
+ struct ipipe_timer *timer = __ipipe_raw_cpu_read(percpu_timer);
+
+ if (desc)
+ desc->ipipe_ack(irq, desc);
+ if (timer->ack)
+ timer->ack();
+ if (desc)
+ desc->ipipe_end(irq, desc);
+}
+
+int ipipe_timer_start(void (*tick_handler)(void),
+ void (*emumode)(enum clock_event_mode mode,
+ struct clock_event_device *cdev),
+ int (*emutick)(unsigned long evt,
+ struct clock_event_device *cdev),
+ unsigned cpu)
+{
+ struct clock_event_device *evtdev;
+ struct ipipe_timer *timer;
+ struct irq_desc *desc;
+ unsigned long flags;
+ int steal, ret;
+
+ timer = per_cpu(percpu_timer, cpu);
+ evtdev = timer->host_timer;
+
+ flags = ipipe_critical_enter(NULL);
+
+ ret = ipipe_request_irq(ipipe_head_domain, timer->irq,
+ (ipipe_irq_handler_t)tick_handler,
+ NULL, __ipipe_ack_hrtimer_irq);
+ if (ret < 0 && ret != -EBUSY) {
+ ipipe_critical_exit(flags);
+ return ret;
+ }
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ steal = evtdev != NULL && evtdev->mode != CLOCK_EVT_MODE_UNUSED;
+ if (steal && evtdev->ipipe_stolen == 0) {
+ timer->real_mult = evtdev->mult;
+ timer->real_shift = evtdev->shift;
+ timer->real_set_mode = evtdev->set_mode;
+ timer->real_set_next_event = evtdev->set_next_event;
+ evtdev->mult = 1;
+ evtdev->shift = 0;
+ evtdev->max_delta_ns = UINT_MAX;
+ evtdev->set_mode = emumode;
+ evtdev->set_next_event = emutick;
+ evtdev->ipipe_stolen = 1;
+ }
+
+ ret = evtdev ? evtdev->mode : CLOCK_EVT_MODE_UNUSED;
+#else /* CONFIG_GENERIC_CLOCKEVENTS */
+ steal = 1;
+ ret = 0;
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+
+ ipipe_critical_exit(flags);
+
+ desc = irq_to_desc(timer->irq);
+ if (desc && irqd_irq_disabled(&desc->irq_data))
+ ipipe_enable_irq(timer->irq);
+
+ return ret;
+}
+
+void ipipe_timer_stop(unsigned cpu)
+{
+ unsigned long __maybe_unused flags;
+ struct clock_event_device *evtdev;
+ struct ipipe_timer *timer;
+ struct irq_desc *desc;
+
+ timer = per_cpu(percpu_timer, cpu);
+ evtdev = timer->host_timer;
+
+ desc = irq_to_desc(timer->irq);
+ if (desc && irqd_irq_disabled(&desc->irq_data))
+ ipipe_disable_irq(timer->irq);
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+ if (evtdev) {
+ flags = ipipe_critical_enter(NULL);
+
+ if (evtdev->ipipe_stolen) {
+ evtdev->mult = timer->real_mult;
+ evtdev->shift = timer->real_shift;
+ evtdev->set_mode = timer->real_set_mode;
+ evtdev->set_next_event = timer->real_set_next_event;
+ timer->real_mult = timer->real_shift = 0;
+ timer->real_set_mode = NULL;
+ timer->real_set_next_event = NULL;
+ evtdev->ipipe_stolen = 0;
+ }
+
+ ipipe_critical_exit(flags);
+ }
+#endif /* CONFIG_GENERIC_CLOCKEVENTS */
+
+ ipipe_free_irq(ipipe_head_domain, timer->irq);
+}
+
+void ipipe_timer_set(unsigned long cdelay)
+{
+ unsigned long tdelay;
+ struct ipipe_timer *t;
+
+ t = __ipipe_raw_cpu_read(percpu_timer);
+
+ /*
+ * Even though some architectures may use a 64 bits delay
+ * here, we voluntarily limit to 32 bits, 4 billions ticks
+ * should be enough for now. Would a timer needs more, an
+ * extra call to the tick handler would simply occur after 4
+ * billions ticks.
+ */
+ if (cdelay > UINT_MAX)
+ cdelay = UINT_MAX;
+
+ tdelay = cdelay;
+ if (t->c2t_integ != 1)
+ tdelay *= t->c2t_integ;
+ if (t->c2t_frac)
+ tdelay += ((unsigned long long)cdelay * t->c2t_frac) >> 32;
+ if (tdelay < t->min_delay_ticks)
+ tdelay = t->min_delay_ticks;
+
+ if (t->set(tdelay, t->timer_set) < 0)
+ ipipe_raise_irq(t->irq);
+}
+EXPORT_SYMBOL_GPL(ipipe_timer_set);
+
+const char *ipipe_timer_name(void)
+{
+ return per_cpu(percpu_timer, 0)->name;
+}
+EXPORT_SYMBOL_GPL(ipipe_timer_name);
+
+unsigned ipipe_timer_ns2ticks(struct ipipe_timer *timer, unsigned ns)
+{
+ unsigned long long tmp;
+ BUG_ON(!timer->freq);
+ tmp = (unsigned long long)ns * timer->freq;
+ do_div(tmp, 1000000000);
+ return tmp;
+}
+
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+/*
+ * NOTE: The architecture specific code must only call this function
+ * when a clocksource suitable for CLOCK_HOST_REALTIME is enabled.
+ * The event receiver is responsible for providing proper locking.
+ */
+void ipipe_update_hostrt(struct timekeeper *tk)
+{
+ struct tk_read_base *tkr = &tk->tkr_mono;
+ struct clocksource *clock = tkr->clock;
+ struct ipipe_hostrt_data data;
+ struct timespec xt;
+
+ xt.tv_sec = tk->xtime_sec;
+ xt.tv_nsec = (long)(tkr->xtime_nsec >> tkr->shift);
+ ipipe_root_only();
+ data.live = 1;
+ data.cycle_last = tkr->cycle_last;
+ data.mask = clock->mask;
+ data.mult = tkr->mult;
+ data.shift = tkr->shift;
+ data.wall_time_sec = xt.tv_sec;
+ data.wall_time_nsec = xt.tv_nsec;
+ data.wall_to_monotonic.tv_sec = tk->wall_to_monotonic.tv_sec;
+ data.wall_to_monotonic.tv_nsec = tk->wall_to_monotonic.tv_nsec;
+ __ipipe_notify_kevent(IPIPE_KEVT_HOSTRT, &data);
+}
+
+#endif /* CONFIG_IPIPE_HAVE_HOSTRT */
+
+int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
+ bool force);
+
+void __ipipe_timer_refresh_freq(unsigned int hrclock_freq)
+{
+ struct ipipe_timer *t = __ipipe_raw_cpu_read(percpu_timer);
+ unsigned long flags;
+
+ if (t && t->refresh_freq) {
+ t->freq = t->refresh_freq();
+ flags = hard_local_irq_save();
+ config_pcpu_timer(t, hrclock_freq);
+ hard_local_irq_restore(flags);
+ clockevents_program_event(t->host_timer,
+ t->host_timer->next_event, false);
+ }
+}
--- /dev/null
+/* -*- linux-c -*-
+ * kernel/ipipe/tracer.c
+ *
+ * Copyright (C) 2005 Luotao Fu.
+ * 2005-2008 Jan Kiszka.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/vmalloc.h>
+#include <linux/pid.h>
+#include <linux/vermagic.h>
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/ftrace.h>
+#include <asm/uaccess.h>
+
+#define IPIPE_TRACE_PATHS 4 /* <!> Do not lower below 3 */
+#define IPIPE_DEFAULT_ACTIVE 0
+#define IPIPE_DEFAULT_MAX 1
+#define IPIPE_DEFAULT_FROZEN 2
+
+#define IPIPE_TRACE_POINTS (1 << CONFIG_IPIPE_TRACE_SHIFT)
+#define WRAP_POINT_NO(point) ((point) & (IPIPE_TRACE_POINTS-1))
+
+#define IPIPE_DEFAULT_PRE_TRACE 10
+#define IPIPE_DEFAULT_POST_TRACE 10
+#define IPIPE_DEFAULT_BACK_TRACE 100
+
+#define IPIPE_DELAY_NOTE 1000 /* in nanoseconds */
+#define IPIPE_DELAY_WARN 10000 /* in nanoseconds */
+
+#define IPIPE_TFLG_NMI_LOCK 0x0001
+#define IPIPE_TFLG_NMI_HIT 0x0002
+#define IPIPE_TFLG_NMI_FREEZE_REQ 0x0004
+
+#define IPIPE_TFLG_HWIRQ_OFF 0x0100
+#define IPIPE_TFLG_FREEZING 0x0200
+#define IPIPE_TFLG_CURRDOM_SHIFT 10 /* bits 10..11: current domain */
+#define IPIPE_TFLG_CURRDOM_MASK 0x0C00
+#define IPIPE_TFLG_DOMSTATE_SHIFT 12 /* bits 12..15: domain stalled? */
+#define IPIPE_TFLG_DOMSTATE_BITS 1
+
+#define IPIPE_TFLG_DOMAIN_STALLED(point, n) \
+ (point->flags & (1 << (n + IPIPE_TFLG_DOMSTATE_SHIFT)))
+#define IPIPE_TFLG_CURRENT_DOMAIN(point) \
+ ((point->flags & IPIPE_TFLG_CURRDOM_MASK) >> IPIPE_TFLG_CURRDOM_SHIFT)
+
+struct ipipe_trace_point {
+ short type;
+ short flags;
+ unsigned long eip;
+ unsigned long parent_eip;
+ unsigned long v;
+ unsigned long long timestamp;
+};
+
+struct ipipe_trace_path {
+ volatile int flags;
+ int dump_lock; /* separated from flags due to cross-cpu access */
+ int trace_pos; /* next point to fill */
+ int begin, end; /* finalised path begin and end */
+ int post_trace; /* non-zero when in post-trace phase */
+ unsigned long long length; /* max path length in cycles */
+ unsigned long nmi_saved_eip; /* for deferred requests from NMIs */
+ unsigned long nmi_saved_parent_eip;
+ unsigned long nmi_saved_v;
+ struct ipipe_trace_point point[IPIPE_TRACE_POINTS];
+} ____cacheline_aligned_in_smp;
+
+enum ipipe_trace_type
+{
+ IPIPE_TRACE_FUNC = 0,
+ IPIPE_TRACE_BEGIN,
+ IPIPE_TRACE_END,
+ IPIPE_TRACE_FREEZE,
+ IPIPE_TRACE_SPECIAL,
+ IPIPE_TRACE_PID,
+ IPIPE_TRACE_EVENT,
+};
+
+#define IPIPE_TYPE_MASK 0x0007
+#define IPIPE_TYPE_BITS 3
+
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC
+static DEFINE_PER_CPU(struct ipipe_trace_path *, trace_path);
+#else /* !CONFIG_IPIPE_TRACE_VMALLOC */
+static DEFINE_PER_CPU(struct ipipe_trace_path, trace_path[IPIPE_TRACE_PATHS]) =
+ { [0 ... IPIPE_TRACE_PATHS-1] = { .begin = -1, .end = -1 } };
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */
+
+int ipipe_trace_enable = 0;
+
+static DEFINE_PER_CPU(int, active_path) = { IPIPE_DEFAULT_ACTIVE };
+static DEFINE_PER_CPU(int, max_path) = { IPIPE_DEFAULT_MAX };
+static DEFINE_PER_CPU(int, frozen_path) = { IPIPE_DEFAULT_FROZEN };
+static IPIPE_DEFINE_SPINLOCK(global_path_lock);
+static int pre_trace = IPIPE_DEFAULT_PRE_TRACE;
+static int post_trace = IPIPE_DEFAULT_POST_TRACE;
+static int back_trace = IPIPE_DEFAULT_BACK_TRACE;
+static int verbose_trace = 1;
+static unsigned long trace_overhead;
+
+static unsigned long trigger_begin;
+static unsigned long trigger_end;
+
+static DEFINE_MUTEX(out_mutex);
+static struct ipipe_trace_path *print_path;
+#ifdef CONFIG_IPIPE_TRACE_PANIC
+static struct ipipe_trace_path *panic_path;
+#endif /* CONFIG_IPIPE_TRACE_PANIC */
+static int print_pre_trace;
+static int print_post_trace;
+
+
+static long __ipipe_signed_tsc2us(long long tsc);
+static void
+__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point);
+static void __ipipe_print_symname(struct seq_file *m, unsigned long eip);
+
+static inline void store_states(struct ipipe_domain *ipd,
+ struct ipipe_trace_point *point, int pos)
+{
+ if (test_bit(IPIPE_STALL_FLAG, &ipipe_this_cpu_context(ipd)->status))
+ point->flags |= 1 << (pos + IPIPE_TFLG_DOMSTATE_SHIFT);
+
+ if (ipd == __ipipe_current_domain)
+ point->flags |= pos << IPIPE_TFLG_CURRDOM_SHIFT;
+}
+
+static notrace void
+__ipipe_store_domain_states(struct ipipe_trace_point *point)
+{
+ store_states(ipipe_root_domain, point, 0);
+ if (ipipe_head_domain != ipipe_root_domain)
+ store_states(ipipe_head_domain, point, 1);
+}
+
+static notrace int __ipipe_get_free_trace_path(int old, int cpu)
+{
+ int new_active = old;
+ struct ipipe_trace_path *tp;
+
+ do {
+ if (++new_active == IPIPE_TRACE_PATHS)
+ new_active = 0;
+ tp = &per_cpu(trace_path, cpu)[new_active];
+ } while (new_active == per_cpu(max_path, cpu) ||
+ new_active == per_cpu(frozen_path, cpu) ||
+ tp->dump_lock);
+
+ return new_active;
+}
+
+static notrace void
+__ipipe_migrate_pre_trace(struct ipipe_trace_path *new_tp,
+ struct ipipe_trace_path *old_tp, int old_pos)
+{
+ int i;
+
+ new_tp->trace_pos = pre_trace+1;
+
+ for (i = new_tp->trace_pos; i > 0; i--)
+ memcpy(&new_tp->point[WRAP_POINT_NO(new_tp->trace_pos-i)],
+ &old_tp->point[WRAP_POINT_NO(old_pos-i)],
+ sizeof(struct ipipe_trace_point));
+
+ /* mark the end (i.e. the point before point[0]) invalid */
+ new_tp->point[IPIPE_TRACE_POINTS-1].eip = 0;
+}
+
+static notrace struct ipipe_trace_path *
+__ipipe_trace_end(int cpu, struct ipipe_trace_path *tp, int pos)
+{
+ struct ipipe_trace_path *old_tp = tp;
+ long active = per_cpu(active_path, cpu);
+ unsigned long long length;
+
+ /* do we have a new worst case? */
+ length = tp->point[tp->end].timestamp -
+ tp->point[tp->begin].timestamp;
+ if (length > per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)].length) {
+ /* we need protection here against other cpus trying
+ to start a proc dump */
+ raw_spin_lock(&global_path_lock);
+
+ /* active path holds new worst case */
+ tp->length = length;
+ per_cpu(max_path, cpu) = active;
+
+ /* find next unused trace path */
+ active = __ipipe_get_free_trace_path(active, cpu);
+
+ raw_spin_unlock(&global_path_lock);
+
+ tp = &per_cpu(trace_path, cpu)[active];
+
+ /* migrate last entries for pre-tracing */
+ __ipipe_migrate_pre_trace(tp, old_tp, pos);
+ }
+
+ return tp;
+}
+
+static notrace struct ipipe_trace_path *
+__ipipe_trace_freeze(int cpu, struct ipipe_trace_path *tp, int pos)
+{
+ struct ipipe_trace_path *old_tp = tp;
+ long active = per_cpu(active_path, cpu);
+ int n;
+
+ /* frozen paths have no core (begin=end) */
+ tp->begin = tp->end;
+
+ /* we need protection here against other cpus trying
+ * to set their frozen path or to start a proc dump */
+ raw_spin_lock(&global_path_lock);
+
+ per_cpu(frozen_path, cpu) = active;
+
+ /* find next unused trace path */
+ active = __ipipe_get_free_trace_path(active, cpu);
+
+ /* check if this is the first frozen path */
+ for_each_possible_cpu(n) {
+ if (n != cpu &&
+ per_cpu(trace_path, n)[per_cpu(frozen_path, n)].end >= 0)
+ tp->end = -1;
+ }
+
+ raw_spin_unlock(&global_path_lock);
+
+ tp = &per_cpu(trace_path, cpu)[active];
+
+ /* migrate last entries for pre-tracing */
+ __ipipe_migrate_pre_trace(tp, old_tp, pos);
+
+ return tp;
+}
+
+void notrace
+__ipipe_trace(enum ipipe_trace_type type, unsigned long eip,
+ unsigned long parent_eip, unsigned long v)
+{
+ struct ipipe_trace_path *tp, *old_tp;
+ int pos, next_pos, begin;
+ struct ipipe_trace_point *point;
+ unsigned long flags;
+ int cpu;
+
+ flags = hard_local_irq_save_notrace();
+
+ cpu = ipipe_processor_id();
+ restart:
+ tp = old_tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
+
+ /* here starts a race window with NMIs - catched below */
+
+ /* check for NMI recursion */
+ if (unlikely(tp->flags & IPIPE_TFLG_NMI_LOCK)) {
+ tp->flags |= IPIPE_TFLG_NMI_HIT;
+
+ /* first freeze request from NMI context? */
+ if ((type == IPIPE_TRACE_FREEZE) &&
+ !(tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)) {
+ /* save arguments and mark deferred freezing */
+ tp->flags |= IPIPE_TFLG_NMI_FREEZE_REQ;
+ tp->nmi_saved_eip = eip;
+ tp->nmi_saved_parent_eip = parent_eip;
+ tp->nmi_saved_v = v;
+ }
+ return; /* no need for restoring flags inside IRQ */
+ }
+
+ /* clear NMI events and set lock (atomically per cpu) */
+ tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT |
+ IPIPE_TFLG_NMI_FREEZE_REQ))
+ | IPIPE_TFLG_NMI_LOCK;
+
+ /* check active_path again - some nasty NMI may have switched
+ * it meanwhile */
+ if (unlikely(tp !=
+ &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)])) {
+ /* release lock on wrong path and restart */
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
+
+ /* there is no chance that the NMI got deferred
+ * => no need to check for pending freeze requests */
+ goto restart;
+ }
+
+ /* get the point buffer */
+ pos = tp->trace_pos;
+ point = &tp->point[pos];
+
+ /* store all trace point data */
+ point->type = type;
+ point->flags = hard_irqs_disabled_flags(flags) ? IPIPE_TFLG_HWIRQ_OFF : 0;
+ point->eip = eip;
+ point->parent_eip = parent_eip;
+ point->v = v;
+ ipipe_read_tsc(point->timestamp);
+
+ __ipipe_store_domain_states(point);
+
+ /* forward to next point buffer */
+ next_pos = WRAP_POINT_NO(pos+1);
+ tp->trace_pos = next_pos;
+
+ /* only mark beginning if we haven't started yet */
+ begin = tp->begin;
+ if (unlikely(type == IPIPE_TRACE_BEGIN) && (begin < 0))
+ tp->begin = pos;
+
+ /* end of critical path, start post-trace if not already started */
+ if (unlikely(type == IPIPE_TRACE_END) &&
+ (begin >= 0) && !tp->post_trace)
+ tp->post_trace = post_trace + 1;
+
+ /* freeze only if the slot is free and we are not already freezing */
+ if ((unlikely(type == IPIPE_TRACE_FREEZE) ||
+ (unlikely(eip >= trigger_begin && eip <= trigger_end) &&
+ type == IPIPE_TRACE_FUNC)) &&
+ per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)].begin < 0 &&
+ !(tp->flags & IPIPE_TFLG_FREEZING)) {
+ tp->post_trace = post_trace + 1;
+ tp->flags |= IPIPE_TFLG_FREEZING;
+ }
+
+ /* enforce end of trace in case of overflow */
+ if (unlikely(WRAP_POINT_NO(next_pos + 1) == begin)) {
+ tp->end = pos;
+ goto enforce_end;
+ }
+
+ /* stop tracing this path if we are in post-trace and
+ * a) that phase is over now or
+ * b) a new TRACE_BEGIN came in but we are not freezing this path */
+ if (unlikely((tp->post_trace > 0) && ((--tp->post_trace == 0) ||
+ ((type == IPIPE_TRACE_BEGIN) &&
+ !(tp->flags & IPIPE_TFLG_FREEZING))))) {
+ /* store the path's end (i.e. excluding post-trace) */
+ tp->end = WRAP_POINT_NO(pos - post_trace + tp->post_trace);
+
+ enforce_end:
+ if (tp->flags & IPIPE_TFLG_FREEZING)
+ tp = __ipipe_trace_freeze(cpu, tp, pos);
+ else
+ tp = __ipipe_trace_end(cpu, tp, pos);
+
+ /* reset the active path, maybe already start a new one */
+ tp->begin = (type == IPIPE_TRACE_BEGIN) ?
+ WRAP_POINT_NO(tp->trace_pos - 1) : -1;
+ tp->end = -1;
+ tp->post_trace = 0;
+ tp->flags = 0;
+
+ /* update active_path not earlier to avoid races with NMIs */
+ per_cpu(active_path, cpu) = tp - per_cpu(trace_path, cpu);
+ }
+
+ /* we still have old_tp and point,
+ * let's reset NMI lock and check for catches */
+ old_tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
+ if (unlikely(old_tp->flags & IPIPE_TFLG_NMI_HIT)) {
+ /* well, this late tagging may not immediately be visible for
+ * other cpus already dumping this path - a minor issue */
+ point->flags |= IPIPE_TFLG_NMI_HIT;
+
+ /* handle deferred freezing from NMI context */
+ if (old_tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)
+ __ipipe_trace(IPIPE_TRACE_FREEZE, old_tp->nmi_saved_eip,
+ old_tp->nmi_saved_parent_eip,
+ old_tp->nmi_saved_v);
+ }
+
+ hard_local_irq_restore_notrace(flags);
+}
+
+static unsigned long __ipipe_global_path_lock(void)
+{
+ unsigned long flags;
+ int cpu;
+ struct ipipe_trace_path *tp;
+
+ raw_spin_lock_irqsave(&global_path_lock, flags);
+
+ cpu = ipipe_processor_id();
+ restart:
+ tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
+
+ /* here is small race window with NMIs - catched below */
+
+ /* clear NMI events and set lock (atomically per cpu) */
+ tp->flags = (tp->flags & ~(IPIPE_TFLG_NMI_HIT |
+ IPIPE_TFLG_NMI_FREEZE_REQ))
+ | IPIPE_TFLG_NMI_LOCK;
+
+ /* check active_path again - some nasty NMI may have switched
+ * it meanwhile */
+ if (tp != &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)]) {
+ /* release lock on wrong path and restart */
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
+
+ /* there is no chance that the NMI got deferred
+ * => no need to check for pending freeze requests */
+ goto restart;
+ }
+
+ return flags;
+}
+
+static void __ipipe_global_path_unlock(unsigned long flags)
+{
+ int cpu;
+ struct ipipe_trace_path *tp;
+
+ /* release spinlock first - it's not involved in the NMI issue */
+ __ipipe_spin_unlock_irqbegin(&global_path_lock);
+
+ cpu = ipipe_processor_id();
+ tp = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
+
+ tp->flags &= ~IPIPE_TFLG_NMI_LOCK;
+
+ /* handle deferred freezing from NMI context */
+ if (tp->flags & IPIPE_TFLG_NMI_FREEZE_REQ)
+ __ipipe_trace(IPIPE_TRACE_FREEZE, tp->nmi_saved_eip,
+ tp->nmi_saved_parent_eip, tp->nmi_saved_v);
+
+ /* See __ipipe_spin_lock_irqsave() and friends. */
+ __ipipe_spin_unlock_irqcomplete(flags);
+}
+
+void notrace asmlinkage
+ipipe_trace_asm(enum ipipe_trace_type type, unsigned long eip,
+ unsigned long parent_eip, unsigned long v)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(type, eip, parent_eip, v);
+}
+
+void notrace ipipe_trace_begin(unsigned long v)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_BEGIN, __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, v);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_begin);
+
+void notrace ipipe_trace_end(unsigned long v)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_END, __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, v);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_end);
+
+void notrace ipipe_trace_freeze(unsigned long v)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_FREEZE, __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, v);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_freeze);
+
+void notrace ipipe_trace_special(unsigned char id, unsigned long v)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_SPECIAL | (id << IPIPE_TYPE_BITS),
+ __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, v);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_special);
+
+void notrace ipipe_trace_pid(pid_t pid, short prio)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_PID | (prio << IPIPE_TYPE_BITS),
+ __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, pid);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_pid);
+
+void notrace ipipe_trace_event(unsigned char id, unsigned long delay_tsc)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_EVENT | (id << IPIPE_TYPE_BITS),
+ __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, delay_tsc);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_event);
+
+int ipipe_trace_max_reset(void)
+{
+ int cpu;
+ unsigned long flags;
+ struct ipipe_trace_path *path;
+ int ret = 0;
+
+ flags = __ipipe_global_path_lock();
+
+ for_each_possible_cpu(cpu) {
+ path = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)];
+
+ if (path->dump_lock) {
+ ret = -EBUSY;
+ break;
+ }
+
+ path->begin = -1;
+ path->end = -1;
+ path->trace_pos = 0;
+ path->length = 0;
+ }
+
+ __ipipe_global_path_unlock(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_max_reset);
+
+int ipipe_trace_frozen_reset(void)
+{
+ int cpu;
+ unsigned long flags;
+ struct ipipe_trace_path *path;
+ int ret = 0;
+
+ flags = __ipipe_global_path_lock();
+
+ for_each_online_cpu(cpu) {
+ path = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)];
+
+ if (path->dump_lock) {
+ ret = -EBUSY;
+ break;
+ }
+
+ path->begin = -1;
+ path->end = -1;
+ path->trace_pos = 0;
+ path->length = 0;
+ }
+
+ __ipipe_global_path_unlock(flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_frozen_reset);
+
+static void
+__ipipe_get_task_info(char *task_info, struct ipipe_trace_point *point,
+ int trylock)
+{
+ struct task_struct *task = NULL;
+ char buf[8];
+ int i;
+ int locked = 1;
+
+ if (trylock) {
+ if (!read_trylock(&tasklist_lock))
+ locked = 0;
+ } else
+ read_lock(&tasklist_lock);
+
+ if (locked)
+ task = find_task_by_pid_ns((pid_t)point->v, &init_pid_ns);
+
+ if (task)
+ strncpy(task_info, task->comm, 11);
+ else
+ strcpy(task_info, "-<?>-");
+
+ if (locked)
+ read_unlock(&tasklist_lock);
+
+ for (i = strlen(task_info); i < 11; i++)
+ task_info[i] = ' ';
+
+ sprintf(buf, " %d ", point->type >> IPIPE_TYPE_BITS);
+ strcpy(task_info + (11 - strlen(buf)), buf);
+}
+
+static void
+__ipipe_get_event_date(char *buf,struct ipipe_trace_path *path,
+ struct ipipe_trace_point *point)
+{
+ long time;
+ int type;
+
+ time = __ipipe_signed_tsc2us(point->timestamp -
+ path->point[path->begin].timestamp + point->v);
+ type = point->type >> IPIPE_TYPE_BITS;
+
+ if (type == 0)
+ /*
+ * Event type #0 is predefined, stands for the next
+ * timer tick.
+ */
+ sprintf(buf, "tick@%-6ld", time);
+ else
+ sprintf(buf, "%3d@%-7ld", type, time);
+}
+
+#ifdef CONFIG_IPIPE_TRACE_PANIC
+
+void ipipe_trace_panic_freeze(void)
+{
+ unsigned long flags;
+ int cpu;
+
+ if (!ipipe_trace_enable)
+ return;
+
+ ipipe_trace_enable = 0;
+ flags = hard_local_irq_save_notrace();
+
+ cpu = ipipe_processor_id();
+
+ panic_path = &per_cpu(trace_path, cpu)[per_cpu(active_path, cpu)];
+
+ hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_panic_freeze);
+
+void ipipe_trace_panic_dump(void)
+{
+ int cnt = back_trace;
+ int start, pos;
+ char buf[16];
+
+ if (!panic_path)
+ return;
+
+ ipipe_context_check_off();
+
+ printk("I-pipe tracer log (%d points):\n", cnt);
+
+ start = pos = WRAP_POINT_NO(panic_path->trace_pos-1);
+
+ while (cnt-- > 0) {
+ struct ipipe_trace_point *point = &panic_path->point[pos];
+ long time;
+ char info[16];
+ int i;
+
+ printk(" %c",
+ (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' ');
+
+ for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--)
+ printk("%c",
+ (IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ?
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
+ '#' : '+') :
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
+ '*' : ' '));
+
+ if (!point->eip)
+ printk("-<invalid>-\n");
+ else {
+ __ipipe_trace_point_type(buf, point);
+ printk("%s", buf);
+
+ switch (point->type & IPIPE_TYPE_MASK) {
+ case IPIPE_TRACE_FUNC:
+ printk(" ");
+ break;
+
+ case IPIPE_TRACE_PID:
+ __ipipe_get_task_info(info,
+ point, 1);
+ printk("%s", info);
+ break;
+
+ case IPIPE_TRACE_EVENT:
+ __ipipe_get_event_date(info,
+ panic_path, point);
+ printk("%s", info);
+ break;
+
+ default:
+ printk("0x%08lx ", point->v);
+ }
+
+ time = __ipipe_signed_tsc2us(point->timestamp -
+ panic_path->point[start].timestamp);
+ printk(" %5ld ", time);
+
+ __ipipe_print_symname(NULL, point->eip);
+ printk(" (");
+ __ipipe_print_symname(NULL, point->parent_eip);
+ printk(")\n");
+ }
+ pos = WRAP_POINT_NO(pos - 1);
+ }
+
+ panic_path = NULL;
+}
+EXPORT_SYMBOL_GPL(ipipe_trace_panic_dump);
+
+#endif /* CONFIG_IPIPE_TRACE_PANIC */
+
+
+/* --- /proc output --- */
+
+static notrace int __ipipe_in_critical_trpath(long point_no)
+{
+ return ((WRAP_POINT_NO(point_no-print_path->begin) <
+ WRAP_POINT_NO(print_path->end-print_path->begin)) ||
+ ((print_path->end == print_path->begin) &&
+ (WRAP_POINT_NO(point_no-print_path->end) >
+ print_post_trace)));
+}
+
+static long __ipipe_signed_tsc2us(long long tsc)
+{
+ unsigned long long abs_tsc;
+ long us;
+
+ if (!__ipipe_hrclock_ok())
+ return 0;
+
+ /* ipipe_tsc2us works on unsigned => handle sign separately */
+ abs_tsc = (tsc >= 0) ? tsc : -tsc;
+ us = ipipe_tsc2us(abs_tsc);
+ if (tsc < 0)
+ return -us;
+ else
+ return us;
+}
+
+static void
+__ipipe_trace_point_type(char *buf, struct ipipe_trace_point *point)
+{
+ switch (point->type & IPIPE_TYPE_MASK) {
+ case IPIPE_TRACE_FUNC:
+ strcpy(buf, "func ");
+ break;
+
+ case IPIPE_TRACE_BEGIN:
+ strcpy(buf, "begin ");
+ break;
+
+ case IPIPE_TRACE_END:
+ strcpy(buf, "end ");
+ break;
+
+ case IPIPE_TRACE_FREEZE:
+ strcpy(buf, "freeze ");
+ break;
+
+ case IPIPE_TRACE_SPECIAL:
+ sprintf(buf, "(0x%02x) ",
+ point->type >> IPIPE_TYPE_BITS);
+ break;
+
+ case IPIPE_TRACE_PID:
+ sprintf(buf, "[%5d] ", (pid_t)point->v);
+ break;
+
+ case IPIPE_TRACE_EVENT:
+ sprintf(buf, "event ");
+ break;
+ }
+}
+
+static void
+__ipipe_print_pathmark(struct seq_file *m, struct ipipe_trace_point *point)
+{
+ char mark = ' ';
+ int point_no = point - print_path->point;
+ int i;
+
+ if (print_path->end == point_no)
+ mark = '<';
+ else if (print_path->begin == point_no)
+ mark = '>';
+ else if (__ipipe_in_critical_trpath(point_no))
+ mark = ':';
+ seq_printf(m, "%c%c", mark,
+ (point->flags & IPIPE_TFLG_HWIRQ_OFF) ? '|' : ' ');
+
+ if (!verbose_trace)
+ return;
+
+ for (i = IPIPE_TFLG_DOMSTATE_BITS; i >= 0; i--)
+ seq_printf(m, "%c",
+ (IPIPE_TFLG_CURRENT_DOMAIN(point) == i) ?
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ?
+ '#' : '+') :
+ (IPIPE_TFLG_DOMAIN_STALLED(point, i) ? '*' : ' '));
+}
+
+static void
+__ipipe_print_delay(struct seq_file *m, struct ipipe_trace_point *point)
+{
+ unsigned long delay = 0;
+ int next;
+ char *mark = " ";
+
+ next = WRAP_POINT_NO(point+1 - print_path->point);
+
+ if (next != print_path->trace_pos)
+ delay = ipipe_tsc2ns(print_path->point[next].timestamp -
+ point->timestamp);
+
+ if (__ipipe_in_critical_trpath(point - print_path->point)) {
+ if (delay > IPIPE_DELAY_WARN)
+ mark = "! ";
+ else if (delay > IPIPE_DELAY_NOTE)
+ mark = "+ ";
+ }
+ seq_puts(m, mark);
+
+ if (verbose_trace)
+ seq_printf(m, "%3lu.%03lu%c ", delay/1000, delay%1000,
+ (point->flags & IPIPE_TFLG_NMI_HIT) ? 'N' : ' ');
+ else
+ seq_puts(m, " ");
+}
+
+static void __ipipe_print_symname(struct seq_file *m, unsigned long eip)
+{
+ char namebuf[KSYM_NAME_LEN+1];
+ unsigned long size, offset;
+ const char *sym_name;
+ char *modname;
+
+ sym_name = kallsyms_lookup(eip, &size, &offset, &modname, namebuf);
+
+#ifdef CONFIG_IPIPE_TRACE_PANIC
+ if (!m) {
+ /* panic dump */
+ if (sym_name) {
+ printk("%s+0x%lx", sym_name, offset);
+ if (modname)
+ printk(" [%s]", modname);
+ } else
+ printk("<%08lx>", eip);
+ } else
+#endif /* CONFIG_IPIPE_TRACE_PANIC */
+ {
+ if (sym_name) {
+ if (verbose_trace) {
+ seq_printf(m, "%s+0x%lx", sym_name, offset);
+ if (modname)
+ seq_printf(m, " [%s]", modname);
+ } else
+ seq_puts(m, sym_name);
+ } else
+ seq_printf(m, "<%08lx>", eip);
+ }
+}
+
+static void __ipipe_print_headline(struct seq_file *m)
+{
+ const char *name[2];
+
+ seq_printf(m, "Calibrated minimum trace-point overhead: %lu.%03lu "
+ "us\n\n", trace_overhead/1000, trace_overhead%1000);
+
+ if (verbose_trace) {
+ name[0] = ipipe_root_domain->name;
+ if (ipipe_head_domain != ipipe_root_domain)
+ name[1] = ipipe_head_domain->name;
+ else
+ name[1] = "<unused>";
+
+ seq_printf(m,
+ " +----- Hard IRQs ('|': locked)\n"
+ " |+-- %s\n"
+ " ||+- %s%s\n"
+ " ||| +---------- "
+ "Delay flag ('+': > %d us, '!': > %d us)\n"
+ " ||| | +- "
+ "NMI noise ('N')\n"
+ " ||| | |\n"
+ " Type User Val. Time Delay Function "
+ "(Parent)\n",
+ name[1], name[0],
+ " ('*': domain stalled, '+': current, "
+ "'#': current+stalled)",
+ IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000);
+ } else
+ seq_printf(m,
+ " +--------------- Hard IRQs ('|': locked)\n"
+ " | +- Delay flag "
+ "('+': > %d us, '!': > %d us)\n"
+ " | |\n"
+ " Type Time Function (Parent)\n",
+ IPIPE_DELAY_NOTE/1000, IPIPE_DELAY_WARN/1000);
+}
+
+static void *__ipipe_max_prtrace_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ mutex_lock(&out_mutex);
+
+ if (!n) {
+ struct ipipe_trace_path *tp;
+ unsigned long length_usecs;
+ int points, cpu;
+ unsigned long flags;
+
+ /* protect against max_path/frozen_path updates while we
+ * haven't locked our target path, also avoid recursively
+ * taking global_path_lock from NMI context */
+ flags = __ipipe_global_path_lock();
+
+ /* find the longest of all per-cpu paths */
+ print_path = NULL;
+ for_each_online_cpu(cpu) {
+ tp = &per_cpu(trace_path, cpu)[per_cpu(max_path, cpu)];
+ if ((print_path == NULL) ||
+ (tp->length > print_path->length)) {
+ print_path = tp;
+ break;
+ }
+ }
+ print_path->dump_lock = 1;
+
+ __ipipe_global_path_unlock(flags);
+
+ if (!__ipipe_hrclock_ok()) {
+ seq_printf(m, "No hrclock available, dumping traces disabled\n");
+ return NULL;
+ }
+
+ /* does this path actually contain data? */
+ if (print_path->end == print_path->begin)
+ return NULL;
+
+ /* number of points inside the critical path */
+ points = WRAP_POINT_NO(print_path->end-print_path->begin+1);
+
+ /* pre- and post-tracing length, post-trace length was frozen
+ in __ipipe_trace, pre-trace may have to be reduced due to
+ buffer overrun */
+ print_pre_trace = pre_trace;
+ print_post_trace = WRAP_POINT_NO(print_path->trace_pos -
+ print_path->end - 1);
+ if (points+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1)
+ print_pre_trace = IPIPE_TRACE_POINTS - 1 - points -
+ print_post_trace;
+
+ length_usecs = ipipe_tsc2us(print_path->length);
+ seq_printf(m, "I-pipe worst-case tracing service on %s/ipipe release #%d\n"
+ "-------------------------------------------------------------\n",
+ UTS_RELEASE, IPIPE_CORE_RELEASE);
+ seq_printf(m, "CPU: %d, Begin: %lld cycles, Trace Points: "
+ "%d (-%d/+%d), Length: %lu us\n",
+ cpu, print_path->point[print_path->begin].timestamp,
+ points, print_pre_trace, print_post_trace, length_usecs);
+ __ipipe_print_headline(m);
+ }
+
+ /* check if we are inside the trace range */
+ if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 +
+ print_pre_trace + print_post_trace))
+ return NULL;
+
+ /* return the next point to be shown */
+ return &print_path->point[WRAP_POINT_NO(print_path->begin -
+ print_pre_trace + n)];
+}
+
+static void *__ipipe_prtrace_next(struct seq_file *m, void *p, loff_t *pos)
+{
+ loff_t n = ++*pos;
+
+ /* check if we are inside the trace range with the next entry */
+ if (n >= WRAP_POINT_NO(print_path->end - print_path->begin + 1 +
+ print_pre_trace + print_post_trace))
+ return NULL;
+
+ /* return the next point to be shown */
+ return &print_path->point[WRAP_POINT_NO(print_path->begin -
+ print_pre_trace + *pos)];
+}
+
+static void __ipipe_prtrace_stop(struct seq_file *m, void *p)
+{
+ if (print_path)
+ print_path->dump_lock = 0;
+ mutex_unlock(&out_mutex);
+}
+
+static int __ipipe_prtrace_show(struct seq_file *m, void *p)
+{
+ long time;
+ struct ipipe_trace_point *point = p;
+ char buf[16];
+
+ if (!point->eip) {
+ seq_puts(m, "-<invalid>-\n");
+ return 0;
+ }
+
+ __ipipe_print_pathmark(m, point);
+ __ipipe_trace_point_type(buf, point);
+ seq_puts(m, buf);
+ if (verbose_trace)
+ switch (point->type & IPIPE_TYPE_MASK) {
+ case IPIPE_TRACE_FUNC:
+ seq_puts(m, " ");
+ break;
+
+ case IPIPE_TRACE_PID:
+ __ipipe_get_task_info(buf, point, 0);
+ seq_puts(m, buf);
+ break;
+
+ case IPIPE_TRACE_EVENT:
+ __ipipe_get_event_date(buf, print_path, point);
+ seq_puts(m, buf);
+ break;
+
+ default:
+ seq_printf(m, "0x%08lx ", point->v);
+ }
+
+ time = __ipipe_signed_tsc2us(point->timestamp -
+ print_path->point[print_path->begin].timestamp);
+ seq_printf(m, "%5ld", time);
+
+ __ipipe_print_delay(m, point);
+ __ipipe_print_symname(m, point->eip);
+ seq_puts(m, " (");
+ __ipipe_print_symname(m, point->parent_eip);
+ seq_puts(m, ")\n");
+
+ return 0;
+}
+
+static struct seq_operations __ipipe_max_ptrace_ops = {
+ .start = __ipipe_max_prtrace_start,
+ .next = __ipipe_prtrace_next,
+ .stop = __ipipe_prtrace_stop,
+ .show = __ipipe_prtrace_show
+};
+
+static int __ipipe_max_prtrace_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &__ipipe_max_ptrace_ops);
+}
+
+static ssize_t
+__ipipe_max_reset(struct file *file, const char __user *pbuffer,
+ size_t count, loff_t *data)
+{
+ mutex_lock(&out_mutex);
+ ipipe_trace_max_reset();
+ mutex_unlock(&out_mutex);
+
+ return count;
+}
+
+static const struct file_operations __ipipe_max_prtrace_fops = {
+ .open = __ipipe_max_prtrace_open,
+ .read = seq_read,
+ .write = __ipipe_max_reset,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static void *__ipipe_frozen_prtrace_start(struct seq_file *m, loff_t *pos)
+{
+ loff_t n = *pos;
+
+ mutex_lock(&out_mutex);
+
+ if (!n) {
+ struct ipipe_trace_path *tp;
+ int cpu;
+ unsigned long flags;
+
+ /* protect against max_path/frozen_path updates while we
+ * haven't locked our target path, also avoid recursively
+ * taking global_path_lock from NMI context */
+ flags = __ipipe_global_path_lock();
+
+ /* find the first of all per-cpu frozen paths */
+ print_path = NULL;
+ for_each_online_cpu(cpu) {
+ tp = &per_cpu(trace_path, cpu)[per_cpu(frozen_path, cpu)];
+ if (tp->end >= 0) {
+ print_path = tp;
+ break;
+ }
+ }
+ if (print_path)
+ print_path->dump_lock = 1;
+
+ __ipipe_global_path_unlock(flags);
+
+ if (!print_path)
+ return NULL;
+
+ if (!__ipipe_hrclock_ok()) {
+ seq_printf(m, "No hrclock available, dumping traces disabled\n");
+ return NULL;
+ }
+
+ /* back- and post-tracing length, post-trace length was frozen
+ in __ipipe_trace, back-trace may have to be reduced due to
+ buffer overrun */
+ print_pre_trace = back_trace-1; /* substract freeze point */
+ print_post_trace = WRAP_POINT_NO(print_path->trace_pos -
+ print_path->end - 1);
+ if (1+pre_trace+print_post_trace > IPIPE_TRACE_POINTS - 1)
+ print_pre_trace = IPIPE_TRACE_POINTS - 2 -
+ print_post_trace;
+
+ seq_printf(m, "I-pipe frozen back-tracing service on %s/ipipe release #%d\n"
+ "------------------------------------------------------------\n",
+ UTS_RELEASE, IPIPE_CORE_RELEASE);
+ seq_printf(m, "CPU: %d, Freeze: %lld cycles, Trace Points: %d (+%d)\n",
+ cpu, print_path->point[print_path->begin].timestamp,
+ print_pre_trace+1, print_post_trace);
+ __ipipe_print_headline(m);
+ }
+
+ /* check if we are inside the trace range */
+ if (n >= print_pre_trace + 1 + print_post_trace)
+ return NULL;
+
+ /* return the next point to be shown */
+ return &print_path->point[WRAP_POINT_NO(print_path->begin-
+ print_pre_trace+n)];
+}
+
+static struct seq_operations __ipipe_frozen_ptrace_ops = {
+ .start = __ipipe_frozen_prtrace_start,
+ .next = __ipipe_prtrace_next,
+ .stop = __ipipe_prtrace_stop,
+ .show = __ipipe_prtrace_show
+};
+
+static int __ipipe_frozen_prtrace_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &__ipipe_frozen_ptrace_ops);
+}
+
+static ssize_t
+__ipipe_frozen_ctrl(struct file *file, const char __user *pbuffer,
+ size_t count, loff_t *data)
+{
+ char *end, buf[16];
+ int val;
+ int n;
+
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
+
+ if (copy_from_user(buf, pbuffer, n))
+ return -EFAULT;
+
+ buf[n] = '\0';
+ val = simple_strtol(buf, &end, 0);
+
+ if (((*end != '\0') && !isspace(*end)) || (val < 0))
+ return -EINVAL;
+
+ mutex_lock(&out_mutex);
+ ipipe_trace_frozen_reset();
+ if (val > 0)
+ ipipe_trace_freeze(-1);
+ mutex_unlock(&out_mutex);
+
+ return count;
+}
+
+static const struct file_operations __ipipe_frozen_prtrace_fops = {
+ .open = __ipipe_frozen_prtrace_open,
+ .read = seq_read,
+ .write = __ipipe_frozen_ctrl,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static int __ipipe_rd_proc_val(struct seq_file *p, void *data)
+{
+ seq_printf(p, "%u\n", *(int *)p->private);
+ return 0;
+}
+
+static ssize_t
+__ipipe_wr_proc_val(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ struct seq_file *p = file->private_data;
+ char *end, buf[16];
+ int val;
+ int n;
+
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
+
+ if (copy_from_user(buf, buffer, n))
+ return -EFAULT;
+
+ buf[n] = '\0';
+ val = simple_strtol(buf, &end, 0);
+
+ if (((*end != '\0') && !isspace(*end)) || (val < 0))
+ return -EINVAL;
+
+ mutex_lock(&out_mutex);
+ *(int *)p->private = val;
+ mutex_unlock(&out_mutex);
+
+ return count;
+}
+
+static int __ipipe_rw_proc_val_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, __ipipe_rd_proc_val, PDE_DATA(inode));
+}
+
+static const struct file_operations __ipipe_rw_proc_val_ops = {
+ .open = __ipipe_rw_proc_val_open,
+ .read = seq_read,
+ .write = __ipipe_wr_proc_val,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void __init
+__ipipe_create_trace_proc_val(struct proc_dir_entry *trace_dir,
+ const char *name, int *value_ptr)
+{
+ proc_create_data(name, 0644, trace_dir, &__ipipe_rw_proc_val_ops,
+ value_ptr);
+}
+
+static int __ipipe_rd_trigger(struct seq_file *p, void *data)
+{
+ char str[KSYM_SYMBOL_LEN];
+
+ if (trigger_begin) {
+ sprint_symbol(str, trigger_begin);
+ seq_printf(p, "%s\n", str);
+ }
+ return 0;
+}
+
+static ssize_t
+__ipipe_wr_trigger(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char buf[KSYM_SYMBOL_LEN];
+ unsigned long begin, end;
+
+ if (count > sizeof(buf) - 1)
+ count = sizeof(buf) - 1;
+ if (copy_from_user(buf, buffer, count))
+ return -EFAULT;
+ buf[count] = 0;
+ if (buf[count-1] == '\n')
+ buf[count-1] = 0;
+
+ begin = kallsyms_lookup_name(buf);
+ if (!begin || !kallsyms_lookup_size_offset(begin, &end, NULL))
+ return -ENOENT;
+ end += begin - 1;
+
+ mutex_lock(&out_mutex);
+ /* invalidate the current range before setting a new one */
+ trigger_end = 0;
+ wmb();
+ ipipe_trace_frozen_reset();
+
+ /* set new range */
+ trigger_begin = begin;
+ wmb();
+ trigger_end = end;
+ mutex_unlock(&out_mutex);
+
+ return count;
+}
+
+static int __ipipe_rw_trigger_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, __ipipe_rd_trigger, NULL);
+}
+
+static const struct file_operations __ipipe_rw_trigger_ops = {
+ .open = __ipipe_rw_trigger_open,
+ .read = seq_read,
+ .write = __ipipe_wr_trigger,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+static void notrace
+ipipe_trace_function(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct pt_regs *regs)
+{
+ if (!ipipe_trace_enable)
+ return;
+ __ipipe_trace(IPIPE_TRACE_FUNC, ip, parent_ip, 0);
+}
+
+static struct ftrace_ops ipipe_trace_ops = {
+ .func = ipipe_trace_function,
+ .flags = FTRACE_OPS_FL_IPIPE_EXCLUSIVE,
+};
+
+static ssize_t __ipipe_wr_enable(struct file *file, const char __user *buffer,
+ size_t count, loff_t *data)
+{
+ char *end, buf[16];
+ int val;
+ int n;
+
+ n = (count > sizeof(buf) - 1) ? sizeof(buf) - 1 : count;
+
+ if (copy_from_user(buf, buffer, n))
+ return -EFAULT;
+
+ buf[n] = '\0';
+ val = simple_strtol(buf, &end, 0);
+
+ if (((*end != '\0') && !isspace(*end)) || (val < 0))
+ return -EINVAL;
+
+ mutex_lock(&out_mutex);
+
+ if (ipipe_trace_enable) {
+ if (!val)
+ unregister_ftrace_function(&ipipe_trace_ops);
+ } else if (val)
+ register_ftrace_function(&ipipe_trace_ops);
+
+ ipipe_trace_enable = val;
+
+ mutex_unlock(&out_mutex);
+
+ return count;
+}
+
+static const struct file_operations __ipipe_rw_enable_ops = {
+ .open = __ipipe_rw_proc_val_open,
+ .read = seq_read,
+ .write = __ipipe_wr_enable,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
+
+extern struct proc_dir_entry *ipipe_proc_root;
+
+void __init __ipipe_tracer_hrclock_initialized(void)
+{
+ unsigned long long start, end, min = ULLONG_MAX;
+ int i;
+
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC
+ if (!per_cpu(trace_path, 0))
+ return;
+#endif
+ /* Calculate minimum overhead of __ipipe_trace() */
+ hard_local_irq_disable();
+ for (i = 0; i < 100; i++) {
+ ipipe_read_tsc(start);
+ __ipipe_trace(IPIPE_TRACE_FUNC, __BUILTIN_RETURN_ADDRESS0,
+ __BUILTIN_RETURN_ADDRESS1, 0);
+ ipipe_read_tsc(end);
+
+ end -= start;
+ if (end < min)
+ min = end;
+ }
+ hard_local_irq_enable();
+ trace_overhead = ipipe_tsc2ns(min);
+}
+
+void __init __ipipe_init_tracer(void)
+{
+ struct proc_dir_entry *trace_dir;
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC
+ int cpu, path;
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */
+
+#ifdef CONFIG_IPIPE_TRACE_VMALLOC
+ for_each_possible_cpu(cpu) {
+ struct ipipe_trace_path *tp_buf;
+
+ tp_buf = vmalloc_node(sizeof(struct ipipe_trace_path) *
+ IPIPE_TRACE_PATHS, cpu_to_node(cpu));
+ if (!tp_buf) {
+ pr_err("I-pipe: "
+ "insufficient memory for trace buffer.\n");
+ return;
+ }
+ memset(tp_buf, 0,
+ sizeof(struct ipipe_trace_path) * IPIPE_TRACE_PATHS);
+ for (path = 0; path < IPIPE_TRACE_PATHS; path++) {
+ tp_buf[path].begin = -1;
+ tp_buf[path].end = -1;
+ }
+ per_cpu(trace_path, cpu) = tp_buf;
+ }
+#endif /* CONFIG_IPIPE_TRACE_VMALLOC */
+
+ if (__ipipe_hrclock_ok() && !trace_overhead)
+ __ipipe_tracer_hrclock_initialized();
+
+#ifdef CONFIG_IPIPE_TRACE_ENABLE
+ ipipe_trace_enable = 1;
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+ ftrace_enabled = 1;
+ register_ftrace_function(&ipipe_trace_ops);
+#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
+#endif /* CONFIG_IPIPE_TRACE_ENABLE */
+
+ trace_dir = proc_mkdir("trace", ipipe_proc_root);
+
+ proc_create("max", 0644, trace_dir, &__ipipe_max_prtrace_fops);
+ proc_create("frozen", 0644, trace_dir, &__ipipe_frozen_prtrace_fops);
+
+ proc_create("trigger", 0644, trace_dir, &__ipipe_rw_trigger_ops);
+
+ __ipipe_create_trace_proc_val(trace_dir, "pre_trace_points",
+ &pre_trace);
+ __ipipe_create_trace_proc_val(trace_dir, "post_trace_points",
+ &post_trace);
+ __ipipe_create_trace_proc_val(trace_dir, "back_trace_points",
+ &back_trace);
+ __ipipe_create_trace_proc_val(trace_dir, "verbose",
+ &verbose_trace);
+#ifdef CONFIG_IPIPE_TRACE_MCOUNT
+ proc_create_data("enable", 0644, trace_dir, &__ipipe_rw_enable_ops,
+ &ipipe_trace_enable);
+#else /* !CONFIG_IPIPE_TRACE_MCOUNT */
+ __ipipe_create_trace_proc_val(trace_dir, "enable",
+ &ipipe_trace_enable);
+#endif /* !CONFIG_IPIPE_TRACE_MCOUNT */
+}
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
+#include <linux/ipipe.h>
#include <trace/events/irq.h>
}
EXPORT_SYMBOL(irq_set_chip_data);
-struct irq_data *irq_get_irq_data(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
-
- return desc ? &desc->irq_data : NULL;
-}
-EXPORT_SYMBOL_GPL(irq_get_irq_data);
-
static void irq_state_clr_disabled(struct irq_desc *desc)
{
irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
irq_domain_activate_irq(&desc->irq_data);
if (desc->irq_data.chip->irq_startup) {
+ unsigned long flags = hard_cond_local_irq_save();
ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
irq_state_clr_masked(desc);
+ hard_cond_local_irq_restore(flags);
+#ifdef CONFIG_IPIPE
+ desc->istate &= ~IPIPE_IRQS_NEEDS_STARTUP;
+#endif
} else {
irq_enable(desc);
}
{
irq_state_set_disabled(desc);
desc->depth = 1;
- if (desc->irq_data.chip->irq_shutdown)
+ if (desc->irq_data.chip->irq_shutdown) {
desc->irq_data.chip->irq_shutdown(&desc->irq_data);
- else if (desc->irq_data.chip->irq_disable)
+#ifdef CONFIG_IPIPE
+ desc->istate |= IPIPE_IRQS_NEEDS_STARTUP;
+#endif
+ } else if (desc->irq_data.chip->irq_disable)
desc->irq_data.chip->irq_disable(&desc->irq_data);
else
desc->irq_data.chip->irq_mask(&desc->irq_data);
void irq_enable(struct irq_desc *desc)
{
+ unsigned long flags = hard_cond_local_irq_save();
irq_state_clr_disabled(desc);
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
+ hard_cond_local_irq_restore(flags);
}
/**
void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
{
+ unsigned long flags = hard_cond_local_irq_save();
if (desc->irq_data.chip->irq_enable)
desc->irq_data.chip->irq_enable(&desc->irq_data);
else
desc->irq_data.chip->irq_unmask(&desc->irq_data);
cpumask_set_cpu(cpu, desc->percpu_enabled);
+ hard_cond_local_irq_restore(flags);
}
void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
void unmask_irq(struct irq_desc *desc)
{
+ unsigned long flags;
+
if (desc->irq_data.chip->irq_unmask) {
+ flags = hard_cond_local_irq_save();
desc->irq_data.chip->irq_unmask(&desc->irq_data);
irq_state_clr_masked(desc);
+ hard_cond_local_irq_restore(flags);
}
}
handle_level_irq(unsigned int irq, struct irq_desc *desc)
{
raw_spin_lock(&desc->lock);
+#ifndef CONFIG_IPIPE
mask_ack_irq(desc);
+#endif
if (!irq_may_run(desc))
goto out_unlock;
static inline void preflow_handler(struct irq_desc *desc) { }
#endif
-static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
+#ifdef CONFIG_IPIPE
+static void cond_release_fasteoi_irq(struct irq_desc *desc,
+ struct irq_chip *chip)
+{
+ if (chip->irq_release &&
+ !irqd_irq_disabled(&desc->irq_data) && !desc->threads_oneshot)
+ chip->irq_release(&desc->irq_data);
+}
+#endif /* CONFIG_IPIPE */
+
+static inline void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
{
if (!(desc->istate & IRQS_ONESHOT)) {
chip->irq_eoi(&desc->irq_data);
goto out;
}
+#ifndef CONFIG_IPIPE
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
+#endif
preflow_handler(desc);
handle_irq_event(desc);
+#ifdef CONFIG_IPIPE
+ /*
+ * IRQCHIP_EOI_IF_HANDLED is ignored as the I-pipe always
+ * sends EOI.
+ */
+ cond_release_fasteoi_irq(desc, chip);
+#else /* !CONFIG_IPIPE */
cond_unmask_eoi_irq(desc, chip);
-
+#endif /* !CONFIG_IPIPE */
raw_spin_unlock(&desc->lock);
return;
out:
kstat_incr_irqs_this_cpu(irq, desc);
/* Start handling the irq */
+#ifndef CONFIG_IPIPE
desc->irq_data.chip->irq_ack(&desc->irq_data);
+#endif
do {
if (unlikely(!desc->action)) {
kstat_incr_irqs_this_cpu(irq, desc);
+#ifdef CONFIG_IPIPE
+ (void)chip;
+
+ handle_irq_event_percpu(desc, desc->action);
+
+ if ((desc->percpu_enabled == NULL ||
+ cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled)) &&
+ !irqd_irq_masked(&desc->irq_data) &&
+ !desc->threads_oneshot &&
+ desc->ipipe_end)
+ desc->ipipe_end(desc->irq_data.irq, desc);
+#else
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+#endif
}
/**
kstat_incr_irqs_this_cpu(irq, desc);
+#ifndef CONFIG_IPIPE
if (chip->irq_ack)
chip->irq_ack(&desc->irq_data);
+#else
+ (void)chip;
+#endif
trace_irq_handler_entry(irq, action);
res = action->handler(irq, dev_id);
trace_irq_handler_exit(irq, action, res);
+#ifndef CONFIG_IPIPE
if (chip->irq_eoi)
chip->irq_eoi(&desc->irq_data);
+#else
+ if ((desc->percpu_enabled == NULL ||
+ cpumask_test_cpu(smp_processor_id(), desc->percpu_enabled)) &&
+ !irqd_irq_masked(&desc->irq_data) &&
+ !desc->threads_oneshot &&
+ desc->ipipe_end)
+ desc->ipipe_end(desc->irq_data.irq, desc);
+#endif
+}
+
+#ifdef CONFIG_IPIPE
+
+void __ipipe_ack_level_irq(unsigned irq, struct irq_desc *desc)
+{
+ mask_ack_irq(desc);
+}
+
+void __ipipe_end_level_irq(unsigned irq, struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_unmask(&desc->irq_data);
+}
+
+void __ipipe_ack_fasteoi_irq(unsigned irq, struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_hold(&desc->irq_data);
+}
+
+void __ipipe_end_fasteoi_irq(unsigned irq, struct irq_desc *desc)
+{
+ if (desc->irq_data.chip->irq_release)
+ desc->irq_data.chip->irq_release(&desc->irq_data);
+}
+
+void __ipipe_ack_edge_irq(unsigned irq, struct irq_desc *desc)
+{
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+}
+
+void __ipipe_ack_percpu_irq(unsigned irq, struct irq_desc *desc)
+{
+ if (desc->irq_data.chip->irq_ack)
+ desc->irq_data.chip->irq_ack(&desc->irq_data);
+
+ if (desc->irq_data.chip->irq_eoi)
+ desc->irq_data.chip->irq_eoi(&desc->irq_data);
+}
+
+void __ipipe_nop_irq(unsigned irq, struct irq_desc *desc)
+{
+}
+
+void __ipipe_chained_irq(unsigned irq, struct irq_desc *desc)
+{
+ /*
+ * XXX: Do NOT fold this into __ipipe_nop_irq(), see
+ * ipipe_chained_irq_p().
+ */
+}
+
+static void __ipipe_ack_bad_irq(unsigned irq, struct irq_desc *desc)
+{
+ handle_bad_irq(irq, desc);
+ WARN_ON_ONCE(1);
}
+irq_flow_handler_t
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
+{
+ if (unlikely(handle == NULL)) {
+ desc->ipipe_ack = __ipipe_ack_bad_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else {
+ if (is_chained) {
+ desc->ipipe_ack = handle;
+ desc->ipipe_end = __ipipe_nop_irq;
+ handle = __ipipe_chained_irq;
+ } else if (handle == handle_simple_irq) {
+ desc->ipipe_ack = __ipipe_nop_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else if (handle == handle_level_irq) {
+ desc->ipipe_ack = __ipipe_ack_level_irq;
+ desc->ipipe_end = __ipipe_end_level_irq;
+ } else if (handle == handle_edge_irq) {
+ desc->ipipe_ack = __ipipe_ack_edge_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else if (handle == handle_fasteoi_irq) {
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq;
+ desc->ipipe_end = __ipipe_end_fasteoi_irq;
+ } else if (handle == handle_percpu_irq ||
+ handle == handle_percpu_devid_irq) {
+ if (irq_desc_get_chip(desc) &&
+ irq_desc_get_chip(desc)->irq_hold) {
+ desc->ipipe_ack = __ipipe_ack_fasteoi_irq;
+ desc->ipipe_end = __ipipe_end_fasteoi_irq;
+ } else {
+ desc->ipipe_ack = __ipipe_ack_percpu_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ }
+ } else if (irq_desc_get_chip(desc) == &no_irq_chip) {
+ desc->ipipe_ack = __ipipe_nop_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ } else {
+ desc->ipipe_ack = __ipipe_ack_bad_irq;
+ desc->ipipe_end = __ipipe_nop_irq;
+ }
+ }
+
+ /* Suppress intermediate trampoline routine. */
+ ipipe_root_domain->irqs[desc->irq_data.irq].ackfn = desc->ipipe_ack;
+
+ return handle;
+}
+
+void ipipe_enable_irq(unsigned int irq)
+{
+ struct irq_desc *desc;
+ struct irq_chip *chip;
+ unsigned long flags;
+
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ return;
+
+ chip = irq_desc_get_chip(desc);
+
+ if (chip->irq_startup && (desc->istate & IPIPE_IRQS_NEEDS_STARTUP)) {
+
+ ipipe_root_only();
+
+ raw_spin_lock_irqsave(&desc->lock, flags);
+ if (desc->istate & IPIPE_IRQS_NEEDS_STARTUP) {
+ desc->istate &= ~IPIPE_IRQS_NEEDS_STARTUP;
+ chip->irq_startup(&desc->irq_data);
+ }
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
+ return;
+ }
+
+ if (WARN_ON_ONCE(chip->irq_enable == NULL && chip->irq_unmask == NULL))
+ return;
+
+ if (chip->irq_enable)
+ chip->irq_enable(&desc->irq_data);
+ else
+ chip->irq_unmask(&desc->irq_data);
+}
+EXPORT_SYMBOL_GPL(ipipe_enable_irq);
+
+#else /* !CONFIG_IPIPE */
+
+irq_flow_handler_t
+__fixup_irq_handler(struct irq_desc *desc, irq_flow_handler_t handle, int is_chained)
+{
+ return handle;
+}
+
+#endif /* !CONFIG_IPIPE */
+EXPORT_SYMBOL_GPL(__fixup_irq_handler);
+
void
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
const char *name)
goto out;
}
+ handle = __fixup_irq_handler(desc, handle, is_chained);
+
/* Uninstall? */
if (handle == handle_bad_irq) {
if (desc->irq_data.chip != &no_irq_chip)
data->chip->irq_eoi(data);
}
+#ifdef CONFIG_IPIPE
+void irq_chip_hold_parent(struct irq_data *data)
+{
+ data = data->parent_data;
+ data->chip->irq_hold(data);
+}
+
+void irq_chip_release_parent(struct irq_data *data)
+{
+ data = data->parent_data;
+ data->chip->irq_release(data);
+}
+#endif
+
/**
* irq_chip_set_affinity_parent - Set affinity on the parent interrupt
* @data: Pointer to interrupt specific data
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.disable);
*ct->mask_cache &= ~mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
*ct->mask_cache |= mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
*ct->mask_cache &= ~mask;
irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.enable);
*ct->mask_cache |= mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit);
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = ~d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.mask);
irq_reg_writel(gc, mask, ct->regs.ack);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ unsigned long flags;
u32 mask = d->mask;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
irq_reg_writel(gc, mask, ct->regs.eoi);
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
}
/**
int irq_gc_set_wake(struct irq_data *d, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
u32 mask = d->mask;
if (!(mask & gc->wake_enabled))
return -EINVAL;
- irq_gc_lock(gc);
+ flags = irq_gc_lock(gc);
if (on)
gc->wake_active |= mask;
else
gc->wake_active &= ~mask;
- irq_gc_unlock(gc);
+ irq_gc_unlock(gc, flags);
return 0;
}
IRQS_WAITING = 0x00000080,
IRQS_PENDING = 0x00000200,
IRQS_SUSPENDED = 0x00000800,
+#ifdef CONFIG_IPIPE
+ IPIPE_IRQS_NEEDS_STARTUP= 0x80000000,
+#endif
};
#include "debug.h"
for_each_possible_cpu(cpu)
*per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
desc_smp_init(desc, node);
+#ifdef CONFIG_IPIPE
+ desc->istate |= IPIPE_IRQS_NEEDS_STARTUP;
+#endif
}
int nr_irqs = NR_IRQS;
return arch_early_irq_init();
}
+#ifndef CONFIG_IPIPE
struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
EXPORT_SYMBOL(irq_to_desc);
+#endif /* CONFIG_IPIPE */
static void free_desc(unsigned int irq)
{
desc->threads_oneshot &= ~action->thread_mask;
+#ifndef CONFIG_IPIPE
if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
irqd_irq_masked(&desc->irq_data))
unmask_threaded_irq(desc);
+#else /* CONFIG_IPIPE */
+ if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data))
+ desc->ipipe_end(desc->irq_data.irq, desc);
+#endif /* CONFIG_IPIPE */
out_unlock:
raw_spin_unlock_irq(&desc->lock);
* already enabled, yet we find the hardware thinks they are in fact
* enabled.. someone messed up their IRQ state tracing.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
/*
* So we're supposed to get called after you mask local IRQs, but for
* some reason the hardware doesn't quite think you did a proper job.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->hardirqs_enabled) {
* We fancy IRQs being disabled here, see softirq.c, avoids
* funny state and nesting things.
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->softirqs_enabled) {
/*
* We fancy IRQs being disabled here, see softirq.c
*/
- if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+ if (DEBUG_LOCKS_WARN_ON(!irqs_disabled() && !hard_irqs_disabled()))
return;
if (curr->softirqs_enabled) {
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/
-#if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
+#if !defined(CONFIG_GENERIC_LOCKBREAK) || \
+ defined(CONFIG_DEBUG_LOCK_ALLOC) || \
+ defined(CONFIG_IPIPE)
/*
* The __lock_function inlines are taken from
* include/linux/spinlock_api_smp.h
bool ret = true;
if (module) {
- preempt_disable();
+ unsigned long flags = hard_preempt_disable();
/* Note: here, we can fail to get a reference */
if (likely(module_is_live(module) &&
atomic_inc_not_zero(&module->refcnt) != 0))
else
ret = false;
- preempt_enable();
+ hard_preempt_enable(flags);
}
return ret;
}
int ret;
if (module) {
- preempt_disable();
+ unsigned long flags = hard_preempt_disable();
ret = atomic_dec_if_positive(&module->refcnt);
WARN_ON(ret < 0); /* Failed to put refcount */
trace_module_put(module, _RET_IP_);
- preempt_enable();
+ hard_preempt_enable(flags);
}
}
EXPORT_SYMBOL(module_put);
#include <linux/init.h>
#include <linux/nmi.h>
#include <linux/console.h>
+#include <linux/ipipe_trace.h>
#define PANIC_TIMER_STEP 100
#define PANIC_BLINK_SPD 18
{
tracing_off();
/* can't trust the integrity of the kernel anymore: */
+ ipipe_trace_panic_freeze();
+ ipipe_disable_context_check();
debug_locks_off();
do_oops_enter_exit();
}
goto Enable_cpus;
local_irq_disable();
+ hard_cond_local_irq_disable();
error = syscore_suspend();
if (error) {
goto Enable_cpus;
local_irq_disable();
+ hard_cond_local_irq_disable();
error = syscore_suspend();
if (error)
goto Platform_finish;
local_irq_disable();
+ hard_cond_local_irq_disable();
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
CONSOLE_LOGLEVEL_DEFAULT, /* default_console_loglevel */
};
+/* Deferred messaged from sched code are marked by this special level */
+#define SCHED_MESSAGE_LOGLEVEL -2
+
/*
* Low level drivers may need that to know if they can schedule in
* their unblank() callback or not. So let's export it.
int do_syslog(int type, char __user *buf, int len, bool from_file)
{
bool clear = false;
- static int saved_console_loglevel = LOGLEVEL_DEFAULT;
+ static int saved_console_loglevel = -1;
int error;
error = check_syslog_permissions(type, from_file);
break;
/* Disable logging to console */
case SYSLOG_ACTION_CONSOLE_OFF:
- if (saved_console_loglevel == LOGLEVEL_DEFAULT)
+ if (saved_console_loglevel == -1)
saved_console_loglevel = console_loglevel;
console_loglevel = minimum_console_loglevel;
break;
/* Enable logging to console */
case SYSLOG_ACTION_CONSOLE_ON:
- if (saved_console_loglevel != LOGLEVEL_DEFAULT) {
+ if (saved_console_loglevel != -1) {
console_loglevel = saved_console_loglevel;
- saved_console_loglevel = LOGLEVEL_DEFAULT;
+ saved_console_loglevel = -1;
}
break;
/* Set level of messages printed to console */
len = minimum_console_loglevel;
console_loglevel = len;
/* Implicitly re-enable logging to console */
- saved_console_loglevel = LOGLEVEL_DEFAULT;
+ saved_console_loglevel = -1;
error = 0;
break;
/* Number of chars in the log buffer */
int printed_len = 0;
bool in_sched = false;
/* cpu currently holding logbuf_lock in this function */
- static unsigned int logbuf_cpu = UINT_MAX;
+ static volatile unsigned int logbuf_cpu = UINT_MAX;
- if (level == LOGLEVEL_SCHED) {
- level = LOGLEVEL_DEFAULT;
+ if (level == SCHED_MESSAGE_LOGLEVEL) {
+ level = -1;
in_sched = true;
}
const char *end_of_header = printk_skip_level(text);
switch (kern_level) {
case '0' ... '7':
- if (level == LOGLEVEL_DEFAULT)
+ if (level == -1)
level = kern_level - '0';
- /* fallthrough */
case 'd': /* KERN_DEFAULT */
lflags |= LOG_PREFIX;
}
}
}
- if (level == LOGLEVEL_DEFAULT)
+ if (level == -1)
level = default_message_loglevel;
if (dict)
asmlinkage int vprintk(const char *fmt, va_list args)
{
- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
+ return vprintk_emit(0, -1, NULL, 0, fmt, args);
}
EXPORT_SYMBOL(vprintk);
*/
DEFINE_PER_CPU(printk_func_t, printk_func) = vprintk_default;
+#ifdef CONFIG_IPIPE
+
+extern int __ipipe_printk_bypass;
+
+static IPIPE_DEFINE_SPINLOCK(__ipipe_printk_lock);
+
+static int __ipipe_printk_fill;
+
+static char __ipipe_printk_buf[__LOG_BUF_LEN];
+
+void __ipipe_flush_printk (unsigned virq, void *cookie)
+{
+ char *p = __ipipe_printk_buf;
+ int len, lmax, out = 0;
+ unsigned long flags;
+
+ goto start;
+
+ do {
+ raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
+ start:
+ lmax = __ipipe_printk_fill;
+ while (out < lmax) {
+ len = strlen(p) + 1;
+ printk("%s",p);
+ p += len;
+ out += len;
+ }
+ raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
+ }
+ while (__ipipe_printk_fill != lmax);
+
+ __ipipe_printk_fill = 0;
+
+ raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
+}
+
/**
* printk - print a kernel message
* @fmt: format string
*
* See the vsnprintf() documentation for format string extensions over C99.
*/
+asmlinkage __visible int printk(const char *fmt, ...)
+{
+ int sprintk = 1, cs = -1;
+ int r, fbytes, oldcount;
+ unsigned long flags;
+ va_list args;
+
+ va_start(args, fmt);
+
+ flags = hard_local_irq_save();
+
+ if (__ipipe_printk_bypass || oops_in_progress)
+ cs = ipipe_disable_context_check();
+ else if (__ipipe_current_domain == ipipe_root_domain) {
+ if (ipipe_head_domain != ipipe_root_domain &&
+ (raw_irqs_disabled_flags(flags) ||
+ test_bit(IPIPE_STALL_FLAG, &__ipipe_head_status)))
+ sprintk = 0;
+ } else
+ sprintk = 0;
+
+ hard_local_irq_restore(flags);
+
+ if (sprintk) {
+ r = vprintk(fmt, args);
+ if (cs != -1)
+ ipipe_restore_context_check(cs);
+ goto out;
+ }
+
+ raw_spin_lock_irqsave(&__ipipe_printk_lock, flags);
+
+ oldcount = __ipipe_printk_fill;
+ fbytes = __LOG_BUF_LEN - oldcount;
+ if (fbytes > 1) {
+ r = vscnprintf(__ipipe_printk_buf + __ipipe_printk_fill,
+ fbytes, fmt, args) + 1;
+ __ipipe_printk_fill += r;
+ } else
+ r = 0;
+
+ raw_spin_unlock_irqrestore(&__ipipe_printk_lock, flags);
+
+ if (oldcount == 0)
+ ipipe_raise_irq(__ipipe_printk_virq);
+out:
+ va_end(args);
+
+ return r;
+}
+
+#else /* !CONFIG_IPIPE */
+
asmlinkage __visible int printk(const char *fmt, ...)
{
printk_func_t vprintk_func;
return r;
}
+#endif /* CONFIG_IPIPE */
+
EXPORT_SYMBOL(printk);
#else /* CONFIG_PRINTK */
#ifdef CONFIG_EARLY_PRINTK
struct console *early_console;
+void early_vprintk(const char *fmt, va_list ap)
+{
+ if (early_console) {
+ char buf[512];
+ int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+
+ early_console->write(early_console, buf, n);
+ }
+}
+
asmlinkage __visible void early_printk(const char *fmt, ...)
{
va_list ap;
- char buf[512];
- int n;
- if (!early_console)
+ va_start(ap, fmt);
+ early_vprintk(fmt, ap);
+ va_end(ap);
+}
+#endif
+
+#ifdef CONFIG_RAW_PRINTK
+static struct console *raw_console;
+static IPIPE_DEFINE_RAW_SPINLOCK(raw_console_lock);
+
+void raw_vprintk(const char *fmt, va_list ap)
+{
+ unsigned long flags;
+ char buf[256];
+ int n;
+
+ if (raw_console == NULL || console_suspended)
return;
- va_start(ap, fmt);
n = vscnprintf(buf, sizeof(buf), fmt, ap);
+ touch_nmi_watchdog();
+ raw_spin_lock_irqsave(&raw_console_lock, flags);
+ if (raw_console)
+ raw_console->write_raw(raw_console, buf, n);
+ raw_spin_unlock_irqrestore(&raw_console_lock, flags);
+}
+
+asmlinkage __visible void raw_printk(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ raw_vprintk(fmt, ap);
va_end(ap);
+}
+
+static inline void register_raw_console(struct console *newcon)
+{
+ if ((newcon->flags & CON_RAW) != 0 && newcon->write_raw)
+ raw_console = newcon;
+}
+
+static inline void unregister_raw_console(struct console *oldcon)
+{
+ unsigned long flags;
- early_console->write(early_console, buf, n);
+ raw_spin_lock_irqsave(&raw_console_lock, flags);
+ if (oldcon == raw_console)
+ raw_console = NULL;
+ raw_spin_unlock_irqrestore(&raw_console_lock, flags);
}
+
+#else
+
+static inline void register_raw_console(struct console *newcon)
+{ }
+
+static inline void unregister_raw_console(struct console *oldcon)
+{ }
+
#endif
static int __add_preferred_console(char *name, int idx, char *options,
newcon->next = console_drivers->next;
console_drivers->next = newcon;
}
+
+ /* The latest raw console to register is current. */
+ register_raw_console(newcon);
+
if (newcon->flags & CON_PRINTBUFFER) {
/*
* console_unlock(); will print out the buffered messages
(console->flags & CON_BOOT) ? "boot" : "" ,
console->name, console->index);
+ unregister_raw_console(console);
+
res = _braille_unregister_console(console);
if (res)
return res;
preempt_disable();
va_start(args, fmt);
- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, 0, fmt, args);
+ r = vprintk_emit(0, SCHED_MESSAGE_LOGLEVEL, NULL, 0, fmt, args);
va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
* however a fair share of IPIs are still resched only so this would
* somewhat pessimize the simple resched case.
*/
+#ifndef IPIPE_ARCH_HAVE_VIRQ_IPI
irq_enter();
+#endif
sched_ttwu_pending();
/*
raise_softirq_irqoff(SCHED_SOFTIRQ);
#endif
+#ifndef IPIPE_ARCH_HAVE_VIRQ_IPI
irq_exit();
+#endif
}
static void ttwu_queue_remote(struct task_struct *p, int cpu)
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
- if (!(p->state & state))
+ if (!(p->state & state) ||
+ (p->state & (TASK_NOWAKEUP|TASK_HARDENING)))
goto out;
success = 1; /* we're going to change ->state */
{
struct rq *rq;
+ __ipipe_complete_domain_migration();
+
/* finish_task_switch() drops rq->lock and enables preemtion */
preempt_disable();
rq = finish_task_switch(prev);
switch_to(prev, next, prev);
barrier();
+ if (unlikely(__ipipe_switch_tail()))
+ return NULL;
+
return finish_task_switch(prev);
}
void preempt_count_add(int val)
{
+ ipipe_preempt_root_only();
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
void preempt_count_sub(int val)
{
+ ipipe_preempt_root_only();
#ifdef CONFIG_DEBUG_PREEMPT
/*
* Underflow?
*/
static inline void schedule_debug(struct task_struct *prev)
{
+ ipipe_root_only();
#ifdef CONFIG_SCHED_STACK_END_CHECK
BUG_ON(unlikely(task_stack_end_corrupted(prev)));
#endif
* accordingly in case an event triggered the need for rescheduling (such as
* an interrupt waking up a task) while preemption was disabled in __schedule().
*/
-static void __sched __schedule(void)
+static int __sched __schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
rcu_note_context_switch();
prev = rq->curr;
+ if (unlikely(prev->state & TASK_HARDENING))
+ /* Pop one disable level -- one still remains. */
+ preempt_enable();
+
schedule_debug(prev);
if (sched_feat(HRTICK))
++*switch_count;
rq = context_switch(rq, prev, next); /* unlocks the rq */
+ if (rq == NULL)
+ return 1; /* task hijacked by head domain */
cpu = cpu_of(rq);
- } else
+ } else {
+ prev->state &= ~TASK_HARDENING;
raw_spin_unlock_irq(&rq->lock);
+ }
post_schedule(rq);
sched_preempt_enable_no_resched();
+
+ return 0;
}
static inline void sched_submit_work(struct task_struct *tsk)
sched_submit_work(tsk);
do {
- __schedule();
+ if (__schedule())
+ return;
} while (need_resched());
}
EXPORT_SYMBOL(schedule);
{
do {
__preempt_count_add(PREEMPT_ACTIVE);
- __schedule();
+ if (__schedule())
+ return;
__preempt_count_sub(PREEMPT_ACTIVE);
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
*/
- if (likely(!preemptible()))
+ if (likely(!preemptible() || !ipipe_root_p))
return;
preempt_schedule_common();
prev_class = p->sched_class;
__setscheduler(rq, p, attr, true);
+ __ipipe_report_setsched(p);
if (running)
p->sched_class->set_curr_task(rq);
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
- if (cpumask_test_cpu(task_cpu(p), new_mask))
+ if (cpumask_test_cpu(task_cpu(p), new_mask)) {
+ __ipipe_report_setaffinity(p, task_cpu(p));
goto out;
+ }
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
+ __ipipe_report_setaffinity(p, dest_cpu);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
pr_info("Task dump for CPU %d:\n", cpu);
sched_show_task(cpu_curr(cpu));
}
+
+#ifdef CONFIG_IPIPE
+
+int __ipipe_migrate_head(void)
+{
+ struct task_struct *p = current;
+
+ preempt_disable();
+
+ IPIPE_WARN_ONCE(__this_cpu_read(ipipe_percpu.task_hijacked) != NULL);
+
+ __this_cpu_write(ipipe_percpu.task_hijacked, p);
+ set_current_state(TASK_INTERRUPTIBLE | TASK_HARDENING);
+ sched_submit_work(p);
+ if (likely(__schedule()))
+ return 0;
+
+ if (signal_pending(p))
+ return -ERESTARTSYS;
+
+ BUG();
+}
+EXPORT_SYMBOL_GPL(__ipipe_migrate_head);
+
+void __ipipe_reenter_root(void)
+{
+ struct rq *rq;
+ struct task_struct *p;
+
+ p = __this_cpu_read(ipipe_percpu.rqlock_owner);
+ BUG_ON(p == NULL);
+ ipipe_clear_thread_flag(TIP_HEAD);
+ rq = finish_task_switch(p);
+ post_schedule(rq);
+ sched_preempt_enable_no_resched();
+}
+EXPORT_SYMBOL_GPL(__ipipe_reenter_root);
+
+#endif /* CONFIG_IPIPE */
{
wait_queue_t *curr, *next;
+ ipipe_root_only();
+
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
set_tsk_thread_flag(t, TIF_SIGPENDING);
+
+ /* TIF_SIGPENDING must be prior to reporting. */
+ __ipipe_report_sigwake(t);
+
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it
return 0;
if (sig == SIGKILL)
return 1;
- if (task_is_stopped_or_traced(p))
+ if (task_is_stopped_or_traced(p)) {
+ if (!signal_pending(p))
+ __ipipe_report_sigwake(p);
return 0;
+ }
return task_curr(p) || !signal_pending(p);
}
#include <linux/module.h>
#include <linux/smp.h>
#include <linux/device.h>
+#include <linux/ipipe_tickdev.h>
#include "tick-internal.h"
/* Initialize state to DETACHED */
dev->state = CLOCK_EVT_STATE_DETACHED;
+ ipipe_host_timer_register(dev);
+
if (!dev->cpumask) {
WARN_ON(num_possible_cpus() > 1);
dev->cpumask = cpumask_of(smp_processor_id());
#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
#include <linux/tick.h>
#include <linux/kthread.h>
+#include <linux/kallsyms.h>
#include "tick-internal.h"
#include "timekeeping_internal.h"
cycle_t csnow, wdnow, cslast, wdlast, delta;
int64_t wd_nsec, cs_nsec;
int next_cpu, reset_pending;
+#ifdef CONFIG_IPIPE
+ cycle_t wdref;
+#endif
spin_lock(&watchdog_lock);
if (!watchdog_running)
continue;
}
+#ifdef CONFIG_IPIPE
+retry:
+#endif
local_irq_disable();
+#ifdef CONFIG_IPIPE
+ wdref = watchdog->read(watchdog);
+#endif
csnow = cs->read(cs);
wdnow = watchdog->read(watchdog);
local_irq_enable();
+#ifdef CONFIG_IPIPE
+ wd_nsec = clocksource_cyc2ns((wdnow - wdref) & watchdog->mask,
+ watchdog->mult, watchdog->shift);
+ if (wd_nsec > WATCHDOG_THRESHOLD)
+ goto retry;
+#endif
+
/* Clocksource initialized ? */
if (!(cs->flags & CLOCK_SOURCE_WATCHDOG) ||
atomic_read(&watchdog_reset_pending)) {
}
fs_initcall(clocksource_done_booting);
+#ifdef CONFIG_IPIPE_WANT_CLOCKSOURCE
+unsigned long long __ipipe_cs_freq;
+EXPORT_SYMBOL_GPL(__ipipe_cs_freq);
+
+struct clocksource *__ipipe_cs;
+EXPORT_SYMBOL_GPL(__ipipe_cs);
+
+cycle_t (*__ipipe_cs_read)(struct clocksource *cs);
+cycle_t __ipipe_cs_last_tsc;
+cycle_t __ipipe_cs_mask;
+unsigned __ipipe_cs_lat = 0xffffffff;
+
+static void ipipe_check_clocksource(struct clocksource *cs)
+{
+ cycle_t (*cread)(struct clocksource *cs);
+ cycle_t lat, mask, saved;
+ unsigned long long freq;
+ unsigned long flags;
+ unsigned i;
+
+ if (cs->ipipe_read) {
+ mask = CLOCKSOURCE_MASK(64);
+ cread = cs->ipipe_read;
+ } else {
+ mask = cs->mask;
+ cread = cs->read;
+
+ if ((cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) == 0)
+ return;
+
+ /*
+ * We only support masks such that cs->mask + 1 is a power of 2,
+ * 64 bits masks or masks lesser than 32 bits
+ */
+ if (mask != CLOCKSOURCE_MASK(64)
+ && ((mask & (mask + 1)) != 0 || mask > 0xffffffff))
+ return;
+ }
+
+ /*
+ * We prefer a clocksource with a better resolution than 1us
+ */
+ if (cs->shift <= 34) {
+ freq = 1000000000ULL << cs->shift;
+ do_div(freq, cs->mult);
+ } else {
+ freq = 1000000ULL << cs->shift;
+ do_div(freq, cs->mult);
+ freq *= 1000;
+ }
+ if (freq < 1000000)
+ return;
+
+ /* Measure the clocksource latency */
+ flags = hard_local_irq_save();
+ saved = __ipipe_cs_last_tsc;
+ lat = cread(cs);
+ for (i = 0; i < 10; i++)
+ cread(cs);
+ lat = cread(cs) - lat;
+ __ipipe_cs_last_tsc = saved;
+ hard_local_irq_restore(flags);
+ lat = (lat * cs->mult) >> cs->shift;
+ do_div(lat, i + 1);
+
+ if (!strcmp(cs->name, override_name))
+ goto skip_tests;
+
+ if (lat > __ipipe_cs_lat)
+ return;
+
+ if (__ipipe_cs && !strcmp(__ipipe_cs->name, override_name))
+ return;
+
+ skip_tests:
+ flags = hard_local_irq_save();
+ if (__ipipe_cs_last_tsc == 0) {
+ __ipipe_cs_lat = lat;
+ __ipipe_cs_freq = freq;
+ __ipipe_cs = cs;
+ __ipipe_cs_read = cread;
+ __ipipe_cs_mask = mask;
+ }
+ hard_local_irq_restore(flags);
+}
+#else /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+#define ipipe_check_clocksource(cs) do { }while (0)
+#endif /* !CONFIG_IPIPE_WANT_CLOCKSOURCE */
+
/*
* Enqueue the clocksource sorted by rating
*/
if (tmp->rating >= cs->rating)
entry = &tmp->list;
list_add(&cs->list, entry);
+
+ ipipe_check_clocksource(cs);
}
/**
update_wall_time();
}
- update_process_times(user_mode(get_irq_regs()));
+ update_root_process_times(get_irq_regs());
profile_tick(CPU_PROFILING);
}
ts->idle_jiffies++;
}
#endif
- update_process_times(user_mode(regs));
+ update_root_process_times(regs);
profile_tick(CPU_PROFILING);
}
xt = timespec64_to_timespec(tk_xtime(tk));
wm = timespec64_to_timespec(tk->wall_to_monotonic);
update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
- tk->tkr_mono.cycle_last);
+ tk->tkr_mono.shift, tk->tkr_mono.cycle_last);
}
static inline void old_vsyscall_fixup(struct timekeeper *tk)
run_posix_cpu_timers(p);
}
+#ifdef CONFIG_IPIPE
+
+void update_root_process_times(struct pt_regs *regs)
+{
+ int user_tick = user_mode(regs);
+
+ if (__ipipe_root_tick_p(regs)) {
+ update_process_times(user_tick);
+ return;
+ }
+
+ run_local_timers();
+ rcu_check_callbacks(user_tick);
+ run_posix_cpu_timers(current);
+}
+
+#endif
+
/*
* This function runs timers and the timer-tq in bottom half context.
*/
bool "enable/disable function tracing dynamically"
depends on FUNCTION_TRACER
depends on HAVE_DYNAMIC_FTRACE
+ depends on !IPIPE
default y
help
This option will modify all the calls to function tracing
#include <linux/list.h>
#include <linux/hash.h>
#include <linux/rcupdate.h>
+#include <linux/ipipe.h>
#include <trace/events/sched.h>
static void update_ftrace_function(void)
{
+ struct ftrace_ops *ops;
ftrace_func_t func;
+ for (ops = ftrace_ops_list;
+ ops != &ftrace_list_end; ops = ops->next)
+ if (ops->flags & FTRACE_OPS_FL_IPIPE_EXCLUSIVE) {
+ set_function_trace_op = ops;
+ func = ops->func;
+ goto set_pointers;
+ }
+
/*
* Prepare the ftrace_ops that the arch callback will use.
* If there's only one ftrace_ops registered, the ftrace_ops_list
update_function_graph_func();
+ set_pointers:
/* If there's no change, then do nothing more here */
if (ftrace_trace_function == func)
return;
static void ftrace_run_update_code(int command)
{
+#ifdef CONFIG_IPIPE
+ unsigned long flags;
+#endif /* CONFIG_IPIPE */
int ret;
ret = ftrace_arch_code_modify_prepare();
* is safe. The stop_machine() is the safest, but also
* produces the most overhead.
*/
+#ifdef CONFIG_IPIPE
+ flags = ipipe_critical_enter(NULL);
+ __ftrace_modify_code(&command);
+ ipipe_critical_exit(flags);
+#else /* !CONFIG_IPIPE */
arch_ftrace_update_code(command);
+#endif /* !CONFIG_IPIPE */
ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret);
* reason to cause large interrupt latencies while we do it.
*/
if (!mod)
- local_irq_save(flags);
+ flags = hard_local_irq_save();
ftrace_update_code(mod, start_pg);
if (!mod)
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
ret = 0;
out:
mutex_unlock(&ftrace_lock);
unsigned long count, flags;
int ret;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
ret = ftrace_dyn_arch_init();
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
+
+ /* ftrace_dyn_arch_init places the return code in addr */
if (ret)
goto failed;
}
} while_for_each_ftrace_op(op);
out:
- preempt_enable_notrace();
+#ifdef CONFIG_IPIPE
+ if (hard_irqs_disabled() || !__ipipe_root_p)
+ /*
+ * Nothing urgent to schedule here. At latest the timer tick
+ * will pick up whatever the tracing functions kicked off.
+ */
+ preempt_enable_no_resched_notrace();
+ else
+#endif
+ preempt_enable_notrace();
trace_clear_recursion(bit);
}
static __always_inline int trace_recursive_lock(void)
{
- unsigned int val = __this_cpu_read(current_context);
+ unsigned long flags;
+ unsigned int val;
int bit;
if (in_interrupt()) {
} else
bit = 3;
- if (unlikely(val & (1 << bit)))
+ flags = hard_local_irq_save();
+
+ val = __this_cpu_read(current_context);
+ if (unlikely(val & (1 << bit))) {
+ hard_local_irq_restore(flags);
return 1;
+ }
val |= (1 << bit);
__this_cpu_write(current_context, val);
+ hard_local_irq_restore(flags);
+
return 0;
}
static __always_inline void trace_recursive_unlock(void)
{
+ unsigned long flags;
+
+ flags = hard_local_irq_save();
__this_cpu_and(current_context, __this_cpu_read(current_context) - 1);
+ hard_local_irq_restore(flags);
}
#else
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
+ flags = hard_local_irq_save();
+
pc = preempt_count();
- preempt_disable_notrace();
tbuffer = get_trace_buf();
if (!tbuffer) {
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out;
- local_save_flags(flags);
size = sizeof(*entry) + sizeof(u32) * len;
buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
}
out:
- preempt_enable_notrace();
+ hard_local_irq_restore(flags);
unpause_graph_tracing();
return len;
int this_cpu;
u64 now;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
this_cpu = raw_smp_processor_id();
now = sched_clock_cpu(this_cpu);
arch_spin_unlock(&trace_clock_struct.lock);
out:
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
return now;
}
* Need to use raw, since this must be called before the
* recursive protection is performed.
*/
- local_irq_save(flags);
+ flags = hard_local_irq_save();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore(flags);
}
static struct tracer_opt func_opts[] = {
if (ftrace_graph_notrace_addr(trace->func))
return 1;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
return ret;
}
int cpu;
int pc;
- local_irq_save(flags);
+ flags = hard_local_irq_save_notrace();
cpu = raw_smp_processor_id();
data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled);
__trace_graph_return(tr, trace, flags, pc);
}
atomic_dec(&data->disabled);
- local_irq_restore(flags);
+ hard_local_irq_restore_notrace(flags);
}
void set_graph_array(struct trace_array *tr)
keys are documented in <file:Documentation/sysrq.txt>. Don't say Y
unless you really know what this hack does.
+
config MAGIC_SYSRQ_DEFAULT_ENABLE
hex "Enable magic SysRq key functions by default"
depends on MAGIC_SYSRQ
This may be set to 1 or 0 to enable or disable them all, or
to a bitmask as described in Documentation/sysrq.txt.
+source "kernel/ipipe/Kconfig.debug"
+
config DEBUG_KERNEL
bool "Kernel debugging"
help
config DEBUG_STACKOVERFLOW
bool "Check for stack overflows"
- depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW
+ depends on DEBUG_KERNEL && HAVE_DEBUG_STACKOVERFLOW && !IPIPE_LEGACY
---help---
Say Y here if you want to check for overflows of kernel, IRQ
and exception stacks (if your architecture uses them). This
menu "RCU Debugging"
config PROVE_RCU
- def_bool PROVE_LOCKING
+ def_bool PROVE_LOCKING && !IPIPE
config PROVE_RCU_REPEATEDLY
bool "RCU debugging: don't disable PROVE_RCU on first splat"
* Ensure each lock is in a separate cacheline.
*/
static union {
- raw_spinlock_t lock;
+ ipipe_spinlock_t lock;
char pad[L1_CACHE_BYTES];
} atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
[0 ... (NR_LOCKS - 1)] = {
- .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+ .lock = IPIPE_SPIN_LOCK_UNLOCKED,
},
};
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline ipipe_spinlock_t *lock_addr(const atomic64_t *v)
{
unsigned long addr = (unsigned long) v;
long long atomic64_read(const atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
void atomic64_set(atomic64_t *v, long long i)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
raw_spin_lock_irqsave(lock, flags);
v->counter = i;
void atomic64_##op(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ ipipe_spinlock_t *lock = lock_addr(v); \
\
raw_spin_lock_irqsave(lock, flags); \
v->counter c_op a; \
long long atomic64_##op##_return(long long a, atomic64_t *v) \
{ \
unsigned long flags; \
- raw_spinlock_t *lock = lock_addr(v); \
+ ipipe_spinlock_t *lock = lock_addr(v); \
long long val; \
\
raw_spin_lock_irqsave(lock, flags); \
long long atomic64_dec_if_positive(atomic64_t *v)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
long long atomic64_xchg(atomic64_t *v, long long new)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
long long val;
raw_spin_lock_irqsave(lock, flags);
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
unsigned long flags;
- raw_spinlock_t *lock = lock_addr(v);
+ ipipe_spinlock_t *lock = lock_addr(v);
int ret = 0;
raw_spin_lock_irqsave(lock, flags);
#include <linux/wait.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
+#include <linux/ipipe_trace.h>
void __attribute__((weak)) bust_spinlocks(int yes)
unblank_screen();
#endif
console_unblank();
+ ipipe_trace_panic_dump();
if (--oops_in_progress == 0)
wake_up_klogd();
}
#include <linux/sched.h>
#include <linux/io.h>
#include <linux/export.h>
+#include <linux/hardirq.h>
#include <asm/cacheflush.h>
#include <asm/pgtable.h>
break;
} while (pgd++, addr = next, addr != end);
- flush_cache_vmap(start, end);
+ /* APEI may invoke this for temporarily remapping pages in interrupt
+ * context - nothing we can and need to propagate globally. */
+ if (!in_interrupt()) {
+ __ipipe_pin_mapping_globally(start, end);
+ flush_cache_vmap(start, end);
+ }
return err;
}
{
int this_cpu = raw_smp_processor_id();
+ if (hard_irqs_disabled())
+ goto out;
+
+ if (!ipipe_root_p)
+ goto out;
+
if (likely(preempt_count()))
goto out;
return pfn_to_page(pfn);
}
+static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
+{
+ debug_dma_assert_idle(src);
+
+ /*
+ * If the source page was a PFN mapping, we don't have
+ * a "struct page" for it. We do a best-effort copy by
+ * just copying from the original user address. If that
+ * fails, we just zero-fill it. Live with it.
+ */
+ if (unlikely(!src)) {
+ void *kaddr = kmap_atomic(dst);
+ void __user *uaddr = (void __user *)(va & PAGE_MASK);
+
+ /*
+ * This really shouldn't fail, because the page is there
+ * in the page tables. But it might just be unreadable,
+ * in which case we just give up and fill the result with
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
+ clear_page(kaddr);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(dst);
+ } else
+ copy_user_highpage(dst, src, va, vma);
+}
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
static inline unsigned long
copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
- pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
- unsigned long addr, int *rss)
+ pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
+ unsigned long addr, int *rss, struct page *uncow_page)
{
unsigned long vm_flags = vma->vm_flags;
pte_t pte = *src_pte;
* in the parent and the child
*/
if (is_cow_mapping(vm_flags)) {
+#ifdef CONFIG_IPIPE
+ if (uncow_page) {
+ struct page *old_page = vm_normal_page(vma, addr, pte);
+ cow_user_page(uncow_page, old_page, addr, vma);
+ pte = mk_pte(uncow_page, vma->vm_page_prot);
+
+ if (vm_flags & VM_SHARED)
+ pte = pte_mkclean(pte);
+ pte = pte_mkold(pte);
+
+ page_add_new_anon_rmap(uncow_page, vma, addr);
+ rss[!!PageAnon(uncow_page)]++;
+ goto out_set_pte;
+ }
+#endif /* CONFIG_IPIPE */
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = pte_wrprotect(pte);
}
int progress = 0;
int rss[NR_MM_COUNTERS];
swp_entry_t entry = (swp_entry_t){0};
-
+ struct page *uncow_page = NULL;
+#ifdef CONFIG_IPIPE
+ int do_cow_break = 0;
again:
+ if (do_cow_break) {
+ uncow_page = alloc_page_vma(GFP_HIGHUSER, vma, addr);
+ if (uncow_page == NULL)
+ return -ENOMEM;
+ do_cow_break = 0;
+ }
+#else
+again:
+#endif
init_rss_vec(rss);
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
- if (!dst_pte)
+ if (!dst_pte) {
+ if (uncow_page)
+ page_cache_release(uncow_page);
return -ENOMEM;
+ }
src_pte = pte_offset_map(src_pmd, addr);
src_ptl = pte_lockptr(src_mm, src_pmd);
spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
progress++;
continue;
}
+#ifdef CONFIG_IPIPE
+ if (likely(uncow_page == NULL) && likely(pte_present(*src_pte))) {
+ if (is_cow_mapping(vma->vm_flags) &&
+ test_bit(MMF_VM_PINNED, &src_mm->flags) &&
+ ((vma->vm_flags|src_mm->def_flags) & VM_LOCKED)) {
+ arch_leave_lazy_mmu_mode();
+ spin_unlock(src_ptl);
+ pte_unmap(src_pte);
+ add_mm_rss_vec(dst_mm, rss);
+ pte_unmap_unlock(dst_pte, dst_ptl);
+ cond_resched();
+ do_cow_break = 1;
+ goto again;
+ }
+ }
+#endif
entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte,
- vma, addr, rss);
+ vma, addr, rss, uncow_page);
+ uncow_page = NULL;
if (entry.val)
break;
progress += 8;
return same;
}
-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
-{
- debug_dma_assert_idle(src);
-
- /*
- * If the source page was a PFN mapping, we don't have
- * a "struct page" for it. We do a best-effort copy by
- * just copying from the original user address. If that
- * fails, we just zero-fill it. Live with it.
- */
- if (unlikely(!src)) {
- void *kaddr = kmap_atomic(dst);
- void __user *uaddr = (void __user *)(va & PAGE_MASK);
-
- /*
- * This really shouldn't fail, because the page is there
- * in the page tables. But it might just be unreadable,
- * in which case we just give up and fill the result with
- * zeroes.
- */
- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
- clear_page(kaddr);
- kunmap_atomic(kaddr);
- flush_dcache_page(dst);
- } else
- copy_user_highpage(dst, src, va, vma);
-}
-
/*
* Notify the address space that the page is about to become writable so that
* it can prohibit this or wait for the page to get into an appropriate state.
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
+#ifdef CONFIG_IPIPE
+
+int __ipipe_disable_ondemand_mappings(struct task_struct *tsk)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ int result = 0;
+
+ mm = get_task_mm(tsk);
+ if (!mm)
+ return -EPERM;
+
+ down_write(&mm->mmap_sem);
+ if (test_bit(MMF_VM_PINNED, &mm->flags))
+ goto done_mm;
+
+ for (vma = mm->mmap; vma; vma = vma->vm_next) {
+ if (is_cow_mapping(vma->vm_flags) &&
+ (vma->vm_flags & VM_WRITE)) {
+ result = __ipipe_pin_vma(mm, vma);
+ if (result < 0)
+ goto done_mm;
+ }
+ }
+ set_bit(MMF_VM_PINNED, &mm->flags);
+
+ done_mm:
+ up_write(&mm->mmap_sem);
+ mmput(mm);
+ return result;
+}
+EXPORT_SYMBOL_GPL(__ipipe_disable_ondemand_mappings);
+
+#endif /* CONFIG_IPIPE */
+
#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
static struct kmem_cache *page_ptl_cachep;
spin_unlock(&shmlock_user_lock);
free_uid(user);
}
+
+#ifdef CONFIG_IPIPE
+int __ipipe_pin_vma(struct mm_struct *mm, struct vm_area_struct *vma)
+{
+ int ret, write, len;
+
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ return 0;
+
+ if (!((vma->vm_flags & VM_DONTEXPAND) ||
+ is_vm_hugetlb_page(vma) || vma == get_gate_vma(mm))) {
+ ret = populate_vma_page_range(vma, vma->vm_start, vma->vm_end,
+ NULL);
+ return ret < 0 ? ret : 0;
+ }
+
+ write = (vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE;
+ len = DIV_ROUND_UP(vma->vm_end, PAGE_SIZE) - vma->vm_start/PAGE_SIZE;
+ ret = get_user_pages(current, mm, vma->vm_start,
+ len, write, 0, NULL, NULL);
+ if (ret < 0)
+ return ret;
+ return ret == len ? 0 : -EFAULT;
+}
+#endif
#include "internal.h"
+#ifndef MMAP_BRK
+#define MMAP_BRK 0
+#endif
+
#ifndef arch_mmap_check
#define arch_mmap_check(addr, len, flags) (0)
#endif
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
- error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
+ error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED | MAP_BRK);
if (error & ~PAGE_MASK)
return error;
{
struct mm_struct *active_mm;
struct task_struct *tsk = current;
+ unsigned long flags;
task_lock(tsk);
active_mm = tsk->active_mm;
+ ipipe_mm_switch_protect(flags);
if (active_mm != mm) {
atomic_inc(&mm->mm_count);
tsk->active_mm = mm;
}
tsk->mm = mm;
- switch_mm(active_mm, mm, tsk);
+ __switch_mm(active_mm, mm, tsk);
+ ipipe_mm_switch_unprotect(flags);
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
struct mm_struct *mm = vma->vm_mm;
pte_t *pte, oldpte;
spinlock_t *ptl;
- unsigned long pages = 0;
+ unsigned long pages = 0, flags;
pte = lock_pte_protection(vma, pmd, addr, prot_numa, &ptl);
if (!pte)
continue;
}
+ flags = hard_local_irq_save();
ptent = ptep_modify_prot_start(mm, addr, pte);
ptent = pte_modify(ptent, newprot);
if (preserve_write)
ptent = pte_mkwrite(ptent);
}
ptep_modify_prot_commit(mm, addr, pte, ptent);
+ hard_local_irq_restore(flags);
pages++;
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
pages = hugetlb_change_protection(vma, start, end, newprot);
else
pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa);
+#ifdef CONFIG_IPIPE
+ if (test_bit(MMF_VM_PINNED, &vma->vm_mm->flags) &&
+ ((vma->vm_flags | vma->vm_mm->def_flags) & VM_LOCKED) &&
+ (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
+ __ipipe_pin_vma(vma->vm_mm, vma);
+#endif
return pages;
}
return err;
} while (pgd++, addr = next, addr != end);
+ __ipipe_pin_mapping_globally(start, end);
+
return nr;
}