OSDN Git Service

lib/test_kasan.c: fix memory leak in kmalloc_oob_krealloc_more()
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / compaction.c
index de3e1e7..b6f145e 100644 (file)
@@ -200,7 +200,8 @@ static void reset_cached_positions(struct zone *zone)
 {
        zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
        zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
-       zone->compact_cached_free_pfn = zone_end_pfn(zone);
+       zone->compact_cached_free_pfn =
+                       round_down(zone_end_pfn(zone) - 1, pageblock_nr_pages);
 }
 
 /*
@@ -475,25 +476,23 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
 
                /* Found a free page, break it into order-0 pages */
                isolated = split_free_page(page);
+               if (!isolated)
+                       break;
+
                total_isolated += isolated;
+               cc->nr_freepages += isolated;
                for (i = 0; i < isolated; i++) {
                        list_add(&page->lru, freelist);
                        page++;
                }
-
-               /* If a page was split, advance to the end of it */
-               if (isolated) {
-                       cc->nr_freepages += isolated;
-                       if (!strict &&
-                               cc->nr_migratepages <= cc->nr_freepages) {
-                               blockpfn += isolated;
-                               break;
-                       }
-
-                       blockpfn += isolated - 1;
-                       cursor += isolated - 1;
-                       continue;
+               if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
+                       blockpfn += isolated;
+                       break;
                }
+               /* Advance to the end of split page */
+               blockpfn += isolated - 1;
+               cursor += isolated - 1;
+               continue;
 
 isolate_fail:
                if (strict)
@@ -503,6 +502,9 @@ isolate_fail:
 
        }
 
+       if (locked)
+               spin_unlock_irqrestore(&cc->zone->lock, flags);
+
        /*
         * There is a tiny chance that we have read bogus compound_order(),
         * so be careful to not go outside of the pageblock.
@@ -524,9 +526,6 @@ isolate_fail:
        if (strict && blockpfn < end_pfn)
                total_isolated = 0;
 
-       if (locked)
-               spin_unlock_irqrestore(&cc->zone->lock, flags);
-
        /* Update the pageblock-skip if the whole pageblock was scanned */
        if (blockpfn == end_pfn)
                update_pageblock_skip(cc, valid_page, total_isolated, false);
@@ -554,13 +553,17 @@ unsigned long
 isolate_freepages_range(struct compact_control *cc,
                        unsigned long start_pfn, unsigned long end_pfn)
 {
-       unsigned long isolated, pfn, block_end_pfn;
+       unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
        LIST_HEAD(freelist);
 
        pfn = start_pfn;
+       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 
        for (; pfn < end_pfn; pfn += isolated,
+                               block_start_pfn = block_end_pfn,
                                block_end_pfn += pageblock_nr_pages) {
                /* Protect pfn from changing by isolate_freepages_block */
                unsigned long isolate_start_pfn = pfn;
@@ -573,11 +576,13 @@ isolate_freepages_range(struct compact_control *cc,
                 * scanning range to right one.
                 */
                if (pfn >= block_end_pfn) {
+                       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
                        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
                        block_end_pfn = min(block_end_pfn, end_pfn);
                }
 
-               if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
+               if (!pageblock_pfn_to_page(block_start_pfn,
+                                       block_end_pfn, cc->zone))
                        break;
 
                isolated = isolate_freepages_block(cc, &isolate_start_pfn,
@@ -863,33 +868,30 @@ unsigned long
 isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
                                                        unsigned long end_pfn)
 {
-       unsigned long pfn, block_end_pfn;
+       unsigned long pfn, block_start_pfn, block_end_pfn;
 
        /* Scan block by block. First and last block may be incomplete */
        pfn = start_pfn;
+       block_start_pfn = pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < cc->zone->zone_start_pfn)
+               block_start_pfn = cc->zone->zone_start_pfn;
        block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
 
        for (; pfn < end_pfn; pfn = block_end_pfn,
+                               block_start_pfn = block_end_pfn,
                                block_end_pfn += pageblock_nr_pages) {
 
                block_end_pfn = min(block_end_pfn, end_pfn);
 
-               if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
+               if (!pageblock_pfn_to_page(block_start_pfn,
+                                       block_end_pfn, cc->zone))
                        continue;
 
                pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
                                                        ISOLATE_UNEVICTABLE);
 
-               /*
-                * In case of fatal failure, release everything that might
-                * have been isolated in the previous iteration, and signal
-                * the failure back to caller.
-                */
-               if (!pfn) {
-                       putback_movable_pages(&cc->migratepages);
-                       cc->nr_migratepages = 0;
+               if (!pfn)
                        break;
-               }
 
                if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
                        break;
@@ -974,7 +976,6 @@ static void isolate_freepages(struct compact_control *cc)
                                block_end_pfn = block_start_pfn,
                                block_start_pfn -= pageblock_nr_pages,
                                isolate_start_pfn = block_start_pfn) {
-
                /*
                 * This can iterate a massively long zone without finding any
                 * suitable migration targets, so periodically check if we need
@@ -998,32 +999,30 @@ static void isolate_freepages(struct compact_control *cc)
                        continue;
 
                /* Found a block suitable for isolating free pages from. */
-               isolate_freepages_block(cc, &isolate_start_pfn,
-                                       block_end_pfn, freelist, false);
+               isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
+                                       freelist, false);
 
                /*
-                * If we isolated enough freepages, or aborted due to async
-                * compaction being contended, terminate the loop.
-                * Remember where the free scanner should restart next time,
-                * which is where isolate_freepages_block() left off.
-                * But if it scanned the whole pageblock, isolate_start_pfn
-                * now points at block_end_pfn, which is the start of the next
-                * pageblock.
-                * In that case we will however want to restart at the start
-                * of the previous pageblock.
+                * If we isolated enough freepages, or aborted due to lock
+                * contention, terminate.
                 */
                if ((cc->nr_freepages >= cc->nr_migratepages)
                                                        || cc->contended) {
-                       if (isolate_start_pfn >= block_end_pfn)
+                       if (isolate_start_pfn >= block_end_pfn) {
+                               /*
+                                * Restart at previous pageblock if more
+                                * freepages can be isolated next time.
+                                */
                                isolate_start_pfn =
                                        block_start_pfn - pageblock_nr_pages;
+                       }
                        break;
-               } else {
+               } else if (isolate_start_pfn < block_end_pfn) {
                        /*
-                        * isolate_freepages_block() should not terminate
-                        * prematurely unless contended, or isolated enough
+                        * If isolation failed early, do not continue
+                        * needlessly.
                         */
-                       VM_BUG_ON(isolate_start_pfn < block_end_pfn);
+                       break;
                }
        }
 
@@ -1103,7 +1102,9 @@ int sysctl_compact_unevictable_allowed __read_mostly = 1;
 static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                        struct compact_control *cc)
 {
-       unsigned long low_pfn, end_pfn;
+       unsigned long block_start_pfn;
+       unsigned long block_end_pfn;
+       unsigned long low_pfn;
        unsigned long isolate_start_pfn;
        struct page *page;
        const isolate_mode_t isolate_mode =
@@ -1115,16 +1116,21 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
         * initialized by compact_zone()
         */
        low_pfn = cc->migrate_pfn;
+       block_start_pfn = cc->migrate_pfn & ~(pageblock_nr_pages - 1);
+       if (block_start_pfn < zone->zone_start_pfn)
+               block_start_pfn = zone->zone_start_pfn;
 
        /* Only scan within a pageblock boundary */
-       end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
+       block_end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
 
        /*
         * Iterate over whole pageblocks until we find the first suitable.
         * Do not cross the free scanner.
         */
-       for (; end_pfn <= cc->free_pfn;
-                       low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
+       for (; block_end_pfn <= cc->free_pfn;
+                       low_pfn = block_end_pfn,
+                       block_start_pfn = block_end_pfn,
+                       block_end_pfn += pageblock_nr_pages) {
 
                /*
                 * This can potentially iterate a massively long zone with
@@ -1135,7 +1141,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                                                && compact_should_abort(cc))
                        break;
 
-               page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
+               page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
+                                                                       zone);
                if (!page)
                        continue;
 
@@ -1154,8 +1161,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
 
                /* Perform the isolation */
                isolate_start_pfn = low_pfn;
-               low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
-                                                               isolate_mode);
+               low_pfn = isolate_migratepages_block(cc, low_pfn,
+                                               block_end_pfn, isolate_mode);
 
                if (!low_pfn || cc->contended) {
                        acct_isolated(zone, cc);
@@ -1371,11 +1378,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
         */
        cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
        cc->free_pfn = zone->compact_cached_free_pfn;
-       if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
-               cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
+       if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
+               cc->free_pfn = round_down(end_pfn - 1, pageblock_nr_pages);
                zone->compact_cached_free_pfn = cc->free_pfn;
        }
-       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
+       if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
                cc->migrate_pfn = start_pfn;
                zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
                zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;