#include <linux/mc146818rtc.h>
#include <linux/kernel_stat.h>
#include <linux/sysdev.h>
+#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/smp.h>
int disable_apic_timer __initdata;
+/*
+ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
+ * IPIs in place of local APIC timers
+ */
+static cpumask_t timer_interrupt_broadcast_ipi_mask;
+
/* Using APIC to generate smp_local_timer_interrupt? */
int using_apic_timer = 0;
-static DEFINE_PER_CPU(int, prof_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
-static DEFINE_PER_CPU(int, prof_counter) = 1;
-
static void apic_pm_activate(void);
void enable_NMI_through_LVT0 (void * dummy)
static void __setup_APIC_LVTT(unsigned int clocks)
{
unsigned int lvtt_value, tmp_value, ver;
+ int cpu = smp_processor_id();
ver = GET_APIC_VERSION(apic_read(APIC_LVR));
lvtt_value = APIC_LVT_TIMER_PERIODIC | LOCAL_TIMER_VECTOR;
+
+ if (cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask))
+ lvtt_value |= APIC_LVT_MASKED;
+
apic_write_around(APIC_LVTT, lvtt_value);
/*
local_irq_enable();
}
-void __cpuinit disable_APIC_timer(void)
+void disable_APIC_timer(void)
{
if (using_apic_timer) {
unsigned long v;
void enable_APIC_timer(void)
{
- if (using_apic_timer) {
+ int cpu = smp_processor_id();
+
+ if (using_apic_timer &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
unsigned long v;
v = apic_read(APIC_LVTT);
}
}
-/*
- * the frequency of the profiling timer can be changed
- * by writing a multiplier value into /proc/profile.
- */
-int setup_profiling_timer(unsigned int multiplier)
+void switch_APIC_timer_to_ipi(void *cpumask)
{
- int i;
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
- /*
- * Sanity check. [at least 500 APIC cycles should be
- * between APIC interrupts as a rule of thumb, to avoid
- * irqs flooding us]
- */
- if ( (!multiplier) || (calibration_result/multiplier < 500))
- return -EINVAL;
-
- /*
- * Set the new multiplier for each CPU. CPUs don't start using the
- * new values until the next timer interrupt in which they do process
- * accounting. At that time they also adjust their APIC timers
- * accordingly.
- */
- for (i = 0; i < NR_CPUS; ++i)
- per_cpu(prof_multiplier, i) = multiplier;
+ if (cpu_isset(cpu, mask) &&
+ !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ disable_APIC_timer();
+ cpu_set(cpu, timer_interrupt_broadcast_ipi_mask);
+ }
+}
+EXPORT_SYMBOL(switch_APIC_timer_to_ipi);
- return 0;
+void smp_send_timer_broadcast_ipi(void)
+{
+ cpumask_t mask;
+
+ cpus_and(mask, cpu_online_map, timer_interrupt_broadcast_ipi_mask);
+ if (!cpus_empty(mask)) {
+ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
+ }
+}
+
+void switch_ipi_to_APIC_timer(void *cpumask)
+{
+ cpumask_t mask = *(cpumask_t *)cpumask;
+ int cpu = smp_processor_id();
+
+ if (cpu_isset(cpu, mask) &&
+ cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) {
+ cpu_clear(cpu, timer_interrupt_broadcast_ipi_mask);
+ enable_APIC_timer();
+ }
+}
+EXPORT_SYMBOL(switch_ipi_to_APIC_timer);
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
}
+#ifdef CONFIG_X86_MCE_AMD
+void setup_threshold_lvt(unsigned long lvt_off)
+{
+ unsigned int v = 0;
+ unsigned long reg = (lvt_off << 4) + 0x500;
+ v |= THRESHOLD_APIC_VECTOR;
+ apic_write(reg, v);
+}
+#endif /* CONFIG_X86_MCE_AMD */
+
#undef APIC_DIVISOR
/*
void smp_local_timer_interrupt(struct pt_regs *regs)
{
- int cpu = smp_processor_id();
-
profile_tick(CPU_PROFILING, regs);
- if (--per_cpu(prof_counter, cpu) <= 0) {
- /*
- * The multiplier may have changed since the last time we got
- * to this point as a result of the user writing to
- * /proc/profile. In this case we need to adjust the APIC
- * timer accordingly.
- *
- * Interrupts are already masked off at this point.
- */
- per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
- if (per_cpu(prof_counter, cpu) !=
- per_cpu(prof_old_multiplier, cpu)) {
- __setup_APIC_LVTT(calibration_result/
- per_cpu(prof_counter, cpu));
- per_cpu(prof_old_multiplier, cpu) =
- per_cpu(prof_counter, cpu);
- }
-
#ifdef CONFIG_SMP
- update_process_times(user_mode(regs));
+ update_process_times(user_mode(regs));
#endif
- }
-
/*
* We take the 'long' return path, and there every subsystem
* grabs the appropriate locks (kernel lock/ irq lock).