OSDN Git Service

NFS: Fix bool initialization/comparison
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / vmstat.c
index 0d5712b..9d8936c 100644 (file)
@@ -219,7 +219,7 @@ void set_pgdat_percpu_threshold(pg_data_t *pgdat,
  * particular counter cannot be updated from interrupt context.
  */
 void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-                               int delta)
+                          long delta)
 {
        struct per_cpu_pageset __percpu *pcp = zone->pageset;
        s8 __percpu *p = pcp->vm_stat_diff + item;
@@ -318,8 +318,8 @@ EXPORT_SYMBOL(__dec_zone_page_state);
  *     1       Overstepping half of threshold
  *     -1      Overstepping minus half of threshold
 */
-static inline void mod_state(struct zone *zone,
-       enum zone_stat_item item, int delta, int overstep_mode)
+static inline void mod_state(struct zone *zone, enum zone_stat_item item,
+                            long delta, int overstep_mode)
 {
        struct per_cpu_pageset __percpu *pcp = zone->pageset;
        s8 __percpu *p = pcp->vm_stat_diff + item;
@@ -357,7 +357,7 @@ static inline void mod_state(struct zone *zone,
 }
 
 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-                                       int delta)
+                        long delta)
 {
        mod_state(zone, item, delta, 0);
 }
@@ -384,7 +384,7 @@ EXPORT_SYMBOL(dec_zone_page_state);
  * Use interrupt disable to serialize counter updates
  */
 void mod_zone_page_state(struct zone *zone, enum zone_stat_item item,
-                                       int delta)
+                        long delta)
 {
        unsigned long flags;
 
@@ -460,7 +460,7 @@ static int fold_diff(int *diff)
  *
  * The function returns the number of global counters updated.
  */
-static int refresh_cpu_vm_stats(void)
+static int refresh_cpu_vm_stats(bool do_pagesets)
 {
        struct zone *zone;
        int i;
@@ -484,33 +484,35 @@ static int refresh_cpu_vm_stats(void)
 #endif
                        }
                }
-               cond_resched();
 #ifdef CONFIG_NUMA
-               /*
-                * Deal with draining the remote pageset of this
-                * processor
-                *
-                * Check if there are pages remaining in this pageset
-                * if not then there is nothing to expire.
-                */
-               if (!__this_cpu_read(p->expire) ||
+               if (do_pagesets) {
+                       cond_resched();
+                       /*
+                        * Deal with draining the remote pageset of this
+                        * processor
+                        *
+                        * Check if there are pages remaining in this pageset
+                        * if not then there is nothing to expire.
+                        */
+                       if (!__this_cpu_read(p->expire) ||
                               !__this_cpu_read(p->pcp.count))
-                       continue;
+                               continue;
 
-               /*
-                * We never drain zones local to this processor.
-                */
-               if (zone_to_nid(zone) == numa_node_id()) {
-                       __this_cpu_write(p->expire, 0);
-                       continue;
-               }
+                       /*
+                        * We never drain zones local to this processor.
+                        */
+                       if (zone_to_nid(zone) == numa_node_id()) {
+                               __this_cpu_write(p->expire, 0);
+                               continue;
+                       }
 
-               if (__this_cpu_dec_return(p->expire))
-                       continue;
+                       if (__this_cpu_dec_return(p->expire))
+                               continue;
 
-               if (__this_cpu_read(p->pcp.count)) {
-                       drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
-                       changes++;
+                       if (__this_cpu_read(p->pcp.count)) {
+                               drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
+                               changes++;
+                       }
                }
 #endif
        }
@@ -736,6 +738,7 @@ const char * const vmstat_text[] = {
        "nr_slab_unreclaimable",
        "nr_page_table_pages",
        "nr_kernel_stack",
+       "nr_overhead",
        "nr_unstable",
        "nr_bounce",
        "nr_vmscan_write",
@@ -854,10 +857,8 @@ const char * const vmstat_text[] = {
 #endif
 #endif /* CONFIG_MEMORY_BALLOON */
 #ifdef CONFIG_DEBUG_TLBFLUSH
-#ifdef CONFIG_SMP
        "nr_tlb_remote_flush",
        "nr_tlb_remote_flush_received",
-#endif /* CONFIG_SMP */
        "nr_tlb_local_flush_all",
        "nr_tlb_local_flush_one",
 #endif /* CONFIG_DEBUG_TLBFLUSH */
@@ -865,7 +866,6 @@ const char * const vmstat_text[] = {
 #ifdef CONFIG_DEBUG_VM_VMACACHE
        "vmacache_find_calls",
        "vmacache_find_hits",
-       "vmacache_full_flushes",
 #endif
 #endif /* CONFIG_VM_EVENTS_COUNTERS */
 };
@@ -1091,6 +1091,8 @@ static void pagetypeinfo_showmixedcount_print(struct seq_file *m,
                                continue;
 
                        page_ext = lookup_page_ext(page);
+                       if (unlikely(!page_ext))
+                               continue;
 
                        if (!test_bit(PAGE_EXT_OWNER, &page_ext->flags))
                                continue;
@@ -1348,7 +1350,9 @@ static int vmstat_show(struct seq_file *m, void *arg)
        unsigned long *l = arg;
        unsigned long off = l - (unsigned long *)m->private;
 
-       seq_printf(m, "%s %lu\n", vmstat_text[off], *l);
+       seq_puts(m, vmstat_text[off]);
+       seq_put_decimal_ull(m, ' ', *l);
+       seq_putc(m, '\n');
        return 0;
 }
 
@@ -1386,15 +1390,20 @@ static cpumask_var_t cpu_stat_off;
 
 static void vmstat_update(struct work_struct *w)
 {
-       if (refresh_cpu_vm_stats()) {
+       if (refresh_cpu_vm_stats(true)) {
                /*
                 * Counters were updated so we expect more updates
                 * to occur in the future. Keep on running the
                 * update worker thread.
+                * If we were marked on cpu_stat_off clear the flag
+                * so that vmstat_shepherd doesn't schedule us again.
                 */
-               queue_delayed_work_on(smp_processor_id(), vmstat_wq,
-                       this_cpu_ptr(&vmstat_work),
-                       round_jiffies_relative(sysctl_stat_interval));
+               if (!cpumask_test_and_clear_cpu(smp_processor_id(),
+                                               cpu_stat_off)) {
+                       queue_delayed_work_on(smp_processor_id(), vmstat_wq,
+                               this_cpu_ptr(&vmstat_work),
+                               round_jiffies_relative(sysctl_stat_interval));
+               }
        } else {
                /*
                 * We did not update any counters so the app may be in
@@ -1403,21 +1412,16 @@ static void vmstat_update(struct work_struct *w)
                 * Defer the checking for differentials to the
                 * shepherd thread on a different processor.
                 */
-               int r;
-               /*
-                * Shepherd work thread does not race since it never
-                * changes the bit if its zero but the cpu
-                * online / off line code may race if
-                * worker threads are still allowed during
-                * shutdown / startup.
-                */
-               r = cpumask_test_and_set_cpu(smp_processor_id(),
-                       cpu_stat_off);
-               VM_BUG_ON(r);
+               cpumask_set_cpu(smp_processor_id(), cpu_stat_off);
        }
 }
 
 /*
+ * Switch off vmstat processing and then fold all the remaining differentials
+ * until the diffs stay at zero. The function is used by NOHZ and can only be
+ * invoked when tick processing is not active.
+ */
+/*
  * Check if the diffs for a certain cpu indicate that
  * an update is needed.
  */
@@ -1440,6 +1444,30 @@ static bool need_update(int cpu)
        return false;
 }
 
+void quiet_vmstat(void)
+{
+       if (system_state != SYSTEM_RUNNING)
+               return;
+
+       /*
+        * If we are already in hands of the shepherd then there
+        * is nothing for us to do here.
+        */
+       if (cpumask_test_and_set_cpu(smp_processor_id(), cpu_stat_off))
+               return;
+
+       if (!need_update(smp_processor_id()))
+               return;
+
+       /*
+        * Just refresh counters and do not care about the pending delayed
+        * vmstat_update. It doesn't fire that often to matter and canceling
+        * it would be too expensive from this path.
+        * vmstat_shepherd will take care about that for us.
+        */
+       refresh_cpu_vm_stats(false);
+}
+
 
 /*
  * Shepherd worker thread that checks the
@@ -1449,7 +1477,7 @@ static bool need_update(int cpu)
  */
 static void vmstat_shepherd(struct work_struct *w);
 
-static DECLARE_DELAYED_WORK(shepherd, vmstat_shepherd);
+static DECLARE_DEFERRABLE_WORK(shepherd, vmstat_shepherd);
 
 static void vmstat_shepherd(struct work_struct *w)
 {
@@ -1457,18 +1485,25 @@ static void vmstat_shepherd(struct work_struct *w)
 
        get_online_cpus();
        /* Check processors whose vmstat worker threads have been disabled */
-       for_each_cpu(cpu, cpu_stat_off)
-               if (need_update(cpu) &&
-                       cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
-
-                       queue_delayed_work_on(cpu, vmstat_wq,
-                               &per_cpu(vmstat_work, cpu), 0);
-
+       for_each_cpu(cpu, cpu_stat_off) {
+               struct delayed_work *dw = &per_cpu(vmstat_work, cpu);
+
+               if (need_update(cpu)) {
+                       if (cpumask_test_and_clear_cpu(cpu, cpu_stat_off))
+                               queue_delayed_work_on(cpu, vmstat_wq, dw, 0);
+               } else {
+                       /*
+                        * Cancel the work if quiet_vmstat has put this
+                        * cpu on cpu_stat_off because the work item might
+                        * be still scheduled
+                        */
+                       cancel_delayed_work(dw);
+               }
+       }
        put_online_cpus();
 
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
-
 }
 
 static void __init start_shepherd_timer(void)
@@ -1483,6 +1518,7 @@ static void __init start_shepherd_timer(void)
                BUG();
        cpumask_copy(cpu_stat_off, cpu_online_mask);
 
+       vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
        schedule_delayed_work(&shepherd,
                round_jiffies_relative(sysctl_stat_interval));
 }
@@ -1550,11 +1586,10 @@ static int __init setup_vmstat(void)
 
        start_shepherd_timer();
        cpu_notifier_register_done();
-       vmstat_wq = alloc_workqueue("vmstat", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
 #endif
 #ifdef CONFIG_PROC_FS
        proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
-       proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
+       proc_create("pagetypeinfo", 0400, NULL, &pagetypeinfo_file_ops);
        proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
        proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
 #endif