select HAVE_OPROFILE
select HAVE_SYSCALL_WRAPPERS if PPC64
select GENERIC_ATOMIC64 if PPC32
+ select HAVE_PERF_COUNTERS
config EARLY_PRINTK
bool
struct irq_chip;
#ifdef CONFIG_PERF_COUNTERS
+
+#ifdef CONFIG_PPC64
static inline unsigned long test_perf_counter_pending(void)
{
unsigned long x;
"r" (0),
"i" (offsetof(struct paca_struct, perf_counter_pending)));
}
+#endif /* CONFIG_PPC64 */
-#else
+#else /* CONFIG_PERF_COUNTERS */
static inline unsigned long test_perf_counter_pending(void)
{
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
-#define perf_misc_flags(regs) perf_misc_flags(regs)
-
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
/*
+ * Only override the default definitions in include/linux/perf_counter.h
+ * if we have hardware PMU support.
+ */
+#ifdef CONFIG_PPC_PERF_CTRS
+#define perf_misc_flags(regs) perf_misc_flags(regs)
+#endif
+
+/*
* The power_pmu.get_constraint function returns a 64-bit value and
* a 64-bit mask that express the constraints between this event and
* other events.
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
-obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
- power5-pmu.o power5+-pmu.o power6-pmu.o \
- power7-pmu.o
+obj-$(CONFIG_PPC_PERF_CTRS) += perf_counter.o
+obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
+ power5+-pmu.o power6-pmu.o power7-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
#include <linux/posix-timers.h>
#include <linux/irq.h>
#include <linux/delay.h>
+#include <linux/perf_counter.h>
#include <asm/io.h>
#include <asm/processor.h>
}
#endif /* CONFIG_PPC_ISERIES */
+#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32)
+DEFINE_PER_CPU(u8, perf_counter_pending);
+
+void set_perf_counter_pending(void)
+{
+ get_cpu_var(perf_counter_pending) = 1;
+ set_dec(1);
+ put_cpu_var(perf_counter_pending);
+}
+
+#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending)
+#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0
+
+#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
+#define test_perf_counter_pending() 0
+#define clear_perf_counter_pending()
+
+#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */
+
/*
* For iSeries shared processors, we have to let the hypervisor
* set the hardware decrementer. We set a virtual decrementer
set_dec(DECREMENTER_MAX);
#ifdef CONFIG_PPC32
+ if (test_perf_counter_pending()) {
+ clear_perf_counter_pending();
+ perf_counter_do_pending();
+ }
if (atomic_read(&ppc_n_lost_interrupts) != 0)
do_IRQ(regs);
#endif
config PPC64
bool "64-bit kernel"
default n
- select HAVE_PERF_COUNTERS
+ select PPC_HAVE_PMU_SUPPORT
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
If in doubt, say Y here.
+config PPC_HAVE_PMU_SUPPORT
+ bool
+
+config PPC_PERF_CTRS
+ def_bool y
+ depends on PERF_COUNTERS && PPC_HAVE_PMU_SUPPORT
+ help
+ This enables the powerpc-specific perf_counter back-end.
+
config SMP
depends on PPC_STD_MMU || FSL_BOOKE
bool "Symmetric multi-processing support"