#include "entry.h"
#include "cpumap.h"
+#include "kstack.h"
#define NUM_IVECS (IMAP_INR + 1)
void *hardirq_stack[NR_CPUS];
void *softirq_stack[NR_CPUS];
-static __attribute__((always_inline)) void *set_hardirq_stack(void)
-{
- void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
-
- __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
- if (orig_sp < sp ||
- orig_sp > (sp + THREAD_SIZE)) {
- sp += THREAD_SIZE - 192 - STACK_BIAS;
- __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
- }
-
- return orig_sp;
-}
-static __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
-{
- __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
-}
-
void __irq_entry handler_irq(int irq, struct pt_regs *regs)
{
unsigned long pstate, bucket_pa;
}
+static inline __attribute__((always_inline)) void *set_hardirq_stack(void)
+{
+ void *orig_sp, *sp = hardirq_stack[smp_processor_id()];
+
+ __asm__ __volatile__("mov %%sp, %0" : "=r" (orig_sp));
+ if (orig_sp < sp ||
+ orig_sp > (sp + THREAD_SIZE)) {
+ sp += THREAD_SIZE - 192 - STACK_BIAS;
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (sp));
+ }
+
+ return orig_sp;
+}
+
+static inline __attribute__((always_inline)) void restore_hardirq_stack(void *orig_sp)
+{
+ __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp));
+}
+
#endif /* _KSTACK_H */
#include <asm/ptrace.h>
#include <asm/pcr.h>
+#include "kstack.h"
+
/* We don't have a real NMI on sparc64, but we can fake one
* up using profiling counter overflow interrupts and interrupt
* levels.
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs)
{
unsigned int sum, touched = 0;
+ void *orig_sp;
clear_softint(1 << irq);
nmi_enter();
+ orig_sp = set_hardirq_stack();
+
if (notify_die(DIE_NMI, "nmi", regs, 0,
pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP)
touched = 1;
pcr_ops->write(pcr_enable);
}
+ restore_hardirq_stack(orig_sp);
+
nmi_exit();
}