diff options
-rw-r--r-- | include/linux/vmstat.h | 3 | ||||
-rw-r--r-- | mm/slab.c | 1 | ||||
-rw-r--r-- | mm/slub.c | 1 | ||||
-rw-r--r-- | mm/vmstat.c | 40 |
4 files changed, 36 insertions, 9 deletions
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index acb1f105870..d9325cf8a13 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -212,8 +212,6 @@ extern void dec_zone_state(struct zone *, enum zone_stat_item); extern void __dec_zone_state(struct zone *, enum zone_stat_item); void refresh_cpu_vm_stats(int); -void refresh_vm_stats(void); - #else /* CONFIG_SMP */ /* @@ -260,7 +258,6 @@ static inline void __dec_zone_page_state(struct page *page, #define mod_zone_page_state __mod_zone_page_state static inline void refresh_cpu_vm_stats(int cpu) { } -static inline void refresh_vm_stats(void) { } #endif #endif /* _LINUX_VMSTAT_H */ diff --git a/mm/slab.c b/mm/slab.c index 6f3d6e240c6..e50908b2bfa 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4156,7 +4156,6 @@ next: check_irq_on(); mutex_unlock(&cache_chain_mutex); next_reap_node(); - refresh_cpu_vm_stats(smp_processor_id()); out: /* Set up the next iteration */ schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC)); diff --git a/mm/slub.c b/mm/slub.c index a581fa8ae11..dbb206503a8 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2580,7 +2580,6 @@ static DEFINE_PER_CPU(struct delayed_work, reap_work); static void cache_reap(struct work_struct *unused) { next_reap_node(); - refresh_cpu_vm_stats(smp_processor_id()); schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC); } diff --git a/mm/vmstat.c b/mm/vmstat.c index 9a66dc4aed4..9d824643a22 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -640,6 +640,22 @@ const struct seq_operations vmstat_op = { #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SMP +static DEFINE_PER_CPU(struct delayed_work, vmstat_work); + +static void vmstat_update(struct work_struct *w) +{ + refresh_cpu_vm_stats(smp_processor_id()); + schedule_delayed_work(&__get_cpu_var(vmstat_work), HZ); +} + +static void __devinit start_cpu_timer(int cpu) +{ + struct delayed_work *vmstat_work = &per_cpu(vmstat_work, cpu); + + INIT_DELAYED_WORK(vmstat_work, vmstat_update); + schedule_delayed_work_on(cpu, vmstat_work, HZ + cpu); +} + /* * Use the cpu notifier to insure that the thresholds are recalculated * when necessary. @@ -648,11 +664,22 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { + long cpu = (long)hcpu; + switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: + start_cpu_timer(cpu); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + cancel_rearming_delayed_work(&per_cpu(vmstat_work, cpu)); + per_cpu(vmstat_work, cpu).work.func = NULL; + break; + case CPU_DOWN_FAILED: + case CPU_DOWN_FAILED_FROZEN: + start_cpu_timer(cpu); + break; case CPU_DEAD: case CPU_DEAD_FROZEN: refresh_zone_stat_thresholds(); @@ -668,8 +695,13 @@ static struct notifier_block __cpuinitdata vmstat_notifier = int __init setup_vmstat(void) { + int cpu; + refresh_zone_stat_thresholds(); register_cpu_notifier(&vmstat_notifier); + + for_each_online_cpu(cpu) + start_cpu_timer(cpu); return 0; } module_init(setup_vmstat) |