OSDN Git Service

NFS: Fix bool initialization/comparison
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / page_alloc.c
index 8b80115..df58941 100644 (file)
@@ -267,28 +267,37 @@ EXPORT_SYMBOL(nr_online_nodes);
 int page_group_by_mobility_disabled __read_mostly;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+
+/*
+ * Determine how many pages need to be initialized durig early boot
+ * (non-deferred initialization).
+ * The value of first_deferred_pfn will be set later, once non-deferred pages
+ * are initialized, but for now set it ULONG_MAX.
+ */
 static inline void reset_deferred_meminit(pg_data_t *pgdat)
 {
-       unsigned long max_initialise;
-       unsigned long reserved_lowmem;
+       phys_addr_t start_addr, end_addr;
+       unsigned long max_pgcnt;
+       unsigned long reserved;
 
        /*
         * Initialise at least 2G of a node but also take into account that
         * two large system hashes that can take up 1GB for 0.25TB/node.
         */
-       max_initialise = max(2UL << (30 - PAGE_SHIFT),
-               (pgdat->node_spanned_pages >> 8));
+       max_pgcnt = max(2UL << (30 - PAGE_SHIFT),
+                       (pgdat->node_spanned_pages >> 8));
 
        /*
         * Compensate the all the memblock reservations (e.g. crash kernel)
         * from the initial estimation to make sure we will initialize enough
         * memory to boot.
         */
-       reserved_lowmem = memblock_reserved_memory_within(pgdat->node_start_pfn,
-                       pgdat->node_start_pfn + max_initialise);
-       max_initialise += reserved_lowmem;
+       start_addr = PFN_PHYS(pgdat->node_start_pfn);
+       end_addr = PFN_PHYS(pgdat->node_start_pfn + max_pgcnt);
+       reserved = memblock_reserved_memory_within(start_addr, end_addr);
+       max_pgcnt += PHYS_PFN(reserved);
 
-       pgdat->static_init_size = min(max_initialise, pgdat->node_spanned_pages);
+       pgdat->static_init_pgcnt = min(max_pgcnt, pgdat->node_spanned_pages);
        pgdat->first_deferred_pfn = ULONG_MAX;
 }
 
@@ -324,7 +333,7 @@ static inline bool update_defer_init(pg_data_t *pgdat,
                return true;
        /* Initialise at least 2G of the highest zone */
        (*nr_initialised)++;
-       if ((*nr_initialised > pgdat->static_init_size) &&
+       if ((*nr_initialised > pgdat->static_init_pgcnt) &&
            (pfn & (PAGES_PER_SECTION - 1)) == 0) {
                pgdat->first_deferred_pfn = pfn;
                return false;
@@ -560,6 +569,9 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        INIT_LIST_HEAD(&page->lru);
@@ -577,6 +589,9 @@ static inline void clear_page_guard(struct zone *zone, struct page *page,
                return;
 
        page_ext = lookup_page_ext(page);
+       if (unlikely(!page_ext))
+               return;
+
        __clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
 
        set_page_private(page, 0);
@@ -1527,14 +1542,14 @@ int move_freepages(struct zone *zone,
 #endif
 
        for (page = start_page; page <= end_page;) {
-               /* Make sure we are not inadvertently changing nodes */
-               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
                if (!pfn_valid_within(page_to_pfn(page))) {
                        page++;
                        continue;
                }
 
+               /* Make sure we are not inadvertently changing nodes */
+               VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
                if (!PageBuddy(page)) {
                        page++;
                        continue;
@@ -1748,13 +1763,25 @@ static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
                                                struct page, lru);
 
                        /*
-                        * It should never happen but changes to locking could
-                        * inadvertently allow a per-cpu drain to add pages
-                        * to MIGRATE_HIGHATOMIC while unreserving so be safe
-                        * and watch for underflows.
+                        * In page freeing path, migratetype change is racy so
+                        * we can counter several free pages in a pageblock
+                        * in this loop althoug we changed the pageblock type
+                        * from highatomic to ac->migratetype. So we should
+                        * adjust the count once.
                         */
-                       zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
-                               zone->nr_reserved_highatomic);
+                       if (get_pageblock_migratetype(page) ==
+                                                       MIGRATE_HIGHATOMIC) {
+                               /*
+                                * It should never happen but changes to
+                                * locking could inadvertently allow a per-cpu
+                                * drain to add pages to MIGRATE_HIGHATOMIC
+                                * while unreserving so be safe and watch for
+                                * underflows.
+                                */
+                               zone->nr_reserved_highatomic -= min(
+                                               pageblock_nr_pages,
+                                               zone->nr_reserved_highatomic);
+                       }
 
                        /*
                         * Convert to ac->migratetype and avoid the normal
@@ -2441,9 +2468,6 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
                if (!area->nr_free)
                        continue;
 
-               if (alloc_harder)
-                       return true;
-
                for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
                        if (!list_empty(&area->free_list[mt]))
                                return true;
@@ -2455,6 +2479,9 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order,
                        return true;
                }
 #endif
+               if (alloc_harder &&
+                       !list_empty(&area->free_list[MIGRATE_HIGHATOMIC]))
+                       return true;
        }
        return false;
 }
@@ -3082,8 +3109,6 @@ retry:
                 * the allocation is high priority and these type of
                 * allocations are system rather than user orientated
                 */
-               ac->zonelist = node_zonelist(numa_node_id(), gfp_mask);
-
                page = __alloc_pages_high_priority(gfp_mask, order, ac);
 
                if (page) {
@@ -3625,6 +3650,49 @@ static inline void show_node(struct zone *zone)
                printk("Node %d ", zone_to_nid(zone));
 }
 
+long si_mem_available(void)
+{
+       long available;
+       unsigned long pagecache;
+       unsigned long wmark_low = 0;
+       unsigned long pages[NR_LRU_LISTS];
+       struct zone *zone;
+       int lru;
+
+       for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
+               pages[lru] = global_page_state(NR_LRU_BASE + lru);
+
+       for_each_zone(zone)
+               wmark_low += zone->watermark[WMARK_LOW];
+
+       /*
+        * Estimate the amount of memory available for userspace allocations,
+        * without causing swapping.
+        */
+       available = global_page_state(NR_FREE_PAGES) - totalreserve_pages;
+
+       /*
+        * Not all the page cache can be freed, otherwise the system will
+        * start swapping. Assume at least half of the page cache, or the
+        * low watermark worth of cache, needs to stay.
+        */
+       pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
+       pagecache -= min(pagecache / 2, wmark_low);
+       available += pagecache;
+
+       /*
+        * Part of the reclaimable slab consists of items that are in use,
+        * and cannot be freed. Cap this estimate at the low watermark.
+        */
+       available += global_page_state(NR_SLAB_RECLAIMABLE) -
+                    min(global_page_state(NR_SLAB_RECLAIMABLE) / 2, wmark_low);
+
+       if (available < 0)
+               available = 0;
+       return available;
+}
+EXPORT_SYMBOL_GPL(si_mem_available);
+
 void si_meminfo(struct sysinfo *val)
 {
        val->totalram = totalram_pages;
@@ -6804,7 +6872,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
 
        /* Make sure the range is really isolated. */
        if (test_pages_isolated(outer_start, end, false)) {
-               pr_info("%s: [%lx, %lx) PFNs busy\n",
+               pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
                        __func__, outer_start, end);
                ret = -EBUSY;
                goto done;