OSDN Git Service

vmscan: tracing: add trace events for kswapd wakeup, sleeping and direct reclaim
[android-x86/kernel.git] / mm / vmscan.c
index b94fe1b..c99bc41 100644 (file)
@@ -48,6 +48,9 @@
 
 #include "internal.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/vmscan.h>
+
 struct scan_control {
        /* Incremented by the number of inactive pages that were scanned */
        unsigned long nr_scanned;
@@ -1787,7 +1790,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
        bool all_unreclaimable;
        unsigned long total_scanned = 0;
        struct reclaim_state *reclaim_state = current->reclaim_state;
-       unsigned long lru_pages = 0;
        struct zoneref *z;
        struct zone *zone;
        enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask);
@@ -1798,18 +1800,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
 
        if (scanning_global_lru(sc))
                count_vm_event(ALLOCSTALL);
-       /*
-        * mem_cgroup will not do shrink_slab.
-        */
-       if (scanning_global_lru(sc)) {
-               for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
-
-                       if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
-                               continue;
-
-                       lru_pages += zone_reclaimable_pages(zone);
-               }
-       }
 
        for (priority = DEF_PRIORITY; priority >= 0; priority--) {
                sc->nr_scanned = 0;
@@ -1821,6 +1811,14 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
                 * over limit cgroups
                 */
                if (scanning_global_lru(sc)) {
+                       unsigned long lru_pages = 0;
+                       for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+                               if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
+                                       continue;
+
+                               lru_pages += zone_reclaimable_pages(zone);
+                       }
+
                        shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages);
                        if (reclaim_state) {
                                sc->nr_reclaimed += reclaim_state->reclaimed_slab;
@@ -1888,6 +1886,7 @@ out:
 unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                                gfp_t gfp_mask, nodemask_t *nodemask)
 {
+       unsigned long nr_reclaimed;
        struct scan_control sc = {
                .gfp_mask = gfp_mask,
                .may_writepage = !laptop_mode,
@@ -1900,7 +1899,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
                .nodemask = nodemask,
        };
 
-       return do_try_to_free_pages(zonelist, &sc);
+       trace_mm_vmscan_direct_reclaim_begin(order,
+                               sc.may_writepage,
+                               gfp_mask);
+
+       nr_reclaimed = do_try_to_free_pages(zonelist, &sc);
+
+       trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
+
+       return nr_reclaimed;
 }
 
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
@@ -2299,9 +2306,10 @@ static int kswapd(void *p)
                                 * premature sleep. If not, then go fully
                                 * to sleep until explicitly woken up
                                 */
-                               if (!sleeping_prematurely(pgdat, order, remaining))
+                               if (!sleeping_prematurely(pgdat, order, remaining)) {
+                                       trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
                                        schedule();
-                               else {
+                               else {
                                        if (remaining)
                                                count_vm_event(KSWAPD_LOW_WMARK_HIT_QUICKLY);
                                        else
@@ -2321,8 +2329,10 @@ static int kswapd(void *p)
                 * We can speed up thawing tasks if we don't call balance_pgdat
                 * after returning from the refrigerator
                 */
-               if (!ret)
+               if (!ret) {
+                       trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
                        balance_pgdat(pgdat, order);
+               }
        }
        return 0;
 }
@@ -2342,6 +2352,7 @@ void wakeup_kswapd(struct zone *zone, int order)
                return;
        if (pgdat->kswapd_max_order < order)
                pgdat->kswapd_max_order = order;
+       trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
        if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
                return;
        if (!waitqueue_active(&pgdat->kswapd_wait))
@@ -2592,7 +2603,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
        };
        unsigned long slab_reclaimable;
 
-       disable_swap_token();
        cond_resched();
        /*
         * We need to be able to allocate from the reserves for RECLAIM_SWAP