OSDN Git Service

arm64: spin-table: handle unmapped cpu-release-addrs
[android-x86/kernel.git] / mm / vmscan.c
index 5fec1ba..2836b53 100644 (file)
@@ -174,7 +174,8 @@ static unsigned long zone_reclaimable_pages(struct zone *zone)
 
 bool zone_reclaimable(struct zone *zone)
 {
-       return zone->pages_scanned < zone_reclaimable_pages(zone) * 6;
+       return zone_page_state(zone, NR_PAGES_SCANNED) <
+               zone_reclaimable_pages(zone) * 6;
 }
 
 static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
@@ -576,9 +577,10 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
+               mem_cgroup_swapout(page, swap);
                __delete_from_swap_cache(page);
                spin_unlock_irq(&mapping->tree_lock);
-               swapcache_free(swap, page);
+               swapcache_free(swap);
        } else {
                void (*freepage)(struct page *);
                void *shadow = NULL;
@@ -599,7 +601,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
                        shadow = workingset_eviction(mapping, page);
                __delete_from_page_cache(page, shadow);
                spin_unlock_irq(&mapping->tree_lock);
-               mem_cgroup_uncharge_cache_page(page);
 
                if (freepage != NULL)
                        freepage(page);
@@ -821,7 +822,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
 
        cond_resched();
 
-       mem_cgroup_uncharge_start();
        while (!list_empty(page_list)) {
                struct address_space *mapping;
                struct page *page;
@@ -1132,11 +1132,12 @@ keep:
                VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
        }
 
+       mem_cgroup_uncharge_list(&free_pages);
        free_hot_cold_page_list(&free_pages, true);
 
        list_splice(&ret_pages, page_list);
        count_vm_events(PGACTIVATE, pgactivate);
-       mem_cgroup_uncharge_end();
+
        *ret_nr_dirty += nr_dirty;
        *ret_nr_congested += nr_congested;
        *ret_nr_unqueued_dirty += nr_unqueued_dirty;
@@ -1436,6 +1437,7 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
+                               mem_cgroup_uncharge(page);
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&zone->lru_lock);
                        } else
@@ -1508,7 +1510,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
 
        if (global_reclaim(sc)) {
-               zone->pages_scanned += nr_scanned;
+               __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
                if (current_is_kswapd())
                        __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
                else
@@ -1543,6 +1545,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
 
        spin_unlock_irq(&zone->lru_lock);
 
+       mem_cgroup_uncharge_list(&page_list);
        free_hot_cold_page_list(&page_list, true);
 
        /*
@@ -1657,6 +1660,7 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&zone->lru_lock);
+                               mem_cgroup_uncharge(page);
                                (*get_compound_page_dtor(page))(page);
                                spin_lock_irq(&zone->lru_lock);
                        } else
@@ -1698,7 +1702,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
        nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
                                     &nr_scanned, sc, isolate_mode, lru);
        if (global_reclaim(sc))
-               zone->pages_scanned += nr_scanned;
+               __mod_zone_page_state(zone, NR_PAGES_SCANNED, nr_scanned);
 
        reclaim_stat->recent_scanned[file] += nr_taken;
 
@@ -1755,7 +1759,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
         * Count referenced pages from currently used mappings as rotated,
         * even though only some of them are actually re-activated.  This
         * helps balance scan pressure between file and anonymous pages in
-        * get_scan_ratio.
+        * get_scan_count.
         */
        reclaim_stat->recent_rotated[file] += nr_rotated;
 
@@ -1764,6 +1768,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
        __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
        spin_unlock_irq(&zone->lru_lock);
 
+       mem_cgroup_uncharge_list(&l_hold);
        free_hot_cold_page_list(&l_hold, true);
 }
 
@@ -1929,11 +1934,6 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
                goto out;
        }
 
-       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
-               get_lru_size(lruvec, LRU_INACTIVE_ANON);
-       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
-               get_lru_size(lruvec, LRU_INACTIVE_FILE);
-
        /*
         * Prevent the reclaimer from falling into the cache trap: as
         * cache pages start out inactive, every cache fault will tip
@@ -1944,9 +1944,14 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
         * anon pages.  Try to detect this based on file LRU size.
         */
        if (global_reclaim(sc)) {
-               unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
+               unsigned long zonefile;
+               unsigned long zonefree;
+
+               zonefree = zone_page_state(zone, NR_FREE_PAGES);
+               zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
+                          zone_page_state(zone, NR_INACTIVE_FILE);
 
-               if (unlikely(file + free <= high_wmark_pages(zone))) {
+               if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
                        scan_balance = SCAN_ANON;
                        goto out;
                }
@@ -1981,6 +1986,12 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
         *
         * anon in [0], file in [1]
         */
+
+       anon  = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
+               get_lru_size(lruvec, LRU_INACTIVE_ANON);
+       file  = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
+               get_lru_size(lruvec, LRU_INACTIVE_FILE);
+
        spin_lock_irq(&zone->lru_lock);
        if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
                reclaim_stat->recent_scanned[0] /= 2;