extern void __dec_zone_state(struct zone *, enum zone_stat_item);
void refresh_cpu_vm_stats(int);
-void refresh_vm_stats(void);
-
#else /* CONFIG_SMP */
/*
#define mod_zone_page_state __mod_zone_page_state
static inline void refresh_cpu_vm_stats(int cpu) { }
-static inline void refresh_vm_stats(void) { }
#endif
#endif /* _LINUX_VMSTAT_H */
check_irq_on();
mutex_unlock(&cache_chain_mutex);
next_reap_node();
- refresh_cpu_vm_stats(smp_processor_id());
out:
/* Set up the next iteration */
schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
#endif /* CONFIG_PROC_FS */
#ifdef CONFIG_SMP
+static DEFINE_PER_CPU(struct delayed_work, vmstat_work);
+
+static void vmstat_update(struct work_struct *w)
+{
+ refresh_cpu_vm_stats(smp_processor_id());
+ schedule_delayed_work(&__get_cpu_var(vmstat_work), HZ);
+}
+
+static void __devinit start_cpu_timer(int cpu)
+{
+ struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu);
+
+ INIT_DELAYED_WORK(vmstat_work, vmstat_update);
+ schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu);
+}
+
/*
* Use the cpu notifier to insure that the thresholds are recalculated
* when necessary.
unsigned long action,
void *hcpu)
{
+ long cpu = (long)hcpu;
+
switch (action) {
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
- case CPU_UP_CANCELED:
- case CPU_UP_CANCELED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ start_cpu_timer(cpu);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu));
+ per_cpu(vmstat_work, cpu).work.func = NULL;
+ break;
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ start_cpu_timer(cpu);
+ break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
refresh_zone_stat_thresholds();
int __init setup_vmstat(void)
{
+ int cpu;
+
refresh_zone_stat_thresholds();
register_cpu_notifier(&vmstat_notifier);
+
+ for_each_online_cpu(cpu)
+ start_cpu_timer(cpu);
return 0;
}
module_init(setup_vmstat)