OSDN Git Service

of: Add OF_DMA_DEFAULT_COHERENT & select it on powerpc
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / memory_hotplug.c
index 67d488a..804cbfe 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/hugetlb.h>
 #include <linux/memblock.h>
 #include <linux/bootmem.h>
+#include <linux/rmap.h>
 
 #include <asm/tlbflush.h>
 
@@ -1357,7 +1358,8 @@ static struct page *next_active_pageblock(struct page *page)
 int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 {
        struct page *page = pfn_to_page(start_pfn);
-       struct page *end_page = page + nr_pages;
+       unsigned long end_pfn = min(start_pfn + nr_pages, zone_end_pfn(page_zone(page)));
+       struct page *end_page = pfn_to_page(end_pfn);
 
        /* Check the starting page of each pageblock within the range */
        for (; page < end_page; page = next_active_pageblock(page)) {
@@ -1371,29 +1373,52 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) is belongs to the same zone.
+ * Confirm all pages in a range [start, end) belong to the same zone.
+ * When true, return its valid [start, end).
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
+int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
+                        unsigned long *valid_start, unsigned long *valid_end)
 {
-       unsigned long pfn;
+       unsigned long pfn, sec_end_pfn;
+       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
-       for (pfn = start_pfn;
+       for (pfn = start_pfn, sec_end_pfn = SECTION_ALIGN_UP(start_pfn + 1);
             pfn < end_pfn;
-            pfn += MAX_ORDER_NR_PAGES) {
-               i = 0;
-               /* This is just a CONFIG_HOLES_IN_ZONE check.*/
-               while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
-                       i++;
-               if (i == MAX_ORDER_NR_PAGES)
+            pfn = sec_end_pfn, sec_end_pfn += PAGES_PER_SECTION) {
+               /* Make sure the memory section is present first */
+               if (!present_section_nr(pfn_to_section_nr(pfn)))
                        continue;
-               page = pfn_to_page(pfn + i);
-               if (zone && page_zone(page) != zone)
-                       return 0;
-               zone = page_zone(page);
+               for (; pfn < sec_end_pfn && pfn < end_pfn;
+                    pfn += MAX_ORDER_NR_PAGES) {
+                       i = 0;
+                       /* This is just a CONFIG_HOLES_IN_ZONE check.*/
+                       while ((i < MAX_ORDER_NR_PAGES) &&
+                               !pfn_valid_within(pfn + i))
+                               i++;
+                       if (i == MAX_ORDER_NR_PAGES)
+                               continue;
+                       /* Check if we got outside of the zone */
+                       if (zone && !zone_spans_pfn(zone, pfn + i))
+                               return 0;
+                       page = pfn_to_page(pfn + i);
+                       if (zone && page_zone(page) != zone)
+                               return 0;
+                       if (!zone)
+                               start = pfn + i;
+                       zone = page_zone(page);
+                       end = pfn + MAX_ORDER_NR_PAGES;
+               }
+       }
+
+       if (zone) {
+               *valid_start = start;
+               *valid_end = end;
+               return 1;
+       } else {
+               return 0;
        }
-       return 1;
 }
 
 /*
@@ -1451,6 +1476,21 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
                        continue;
                }
 
+               /*
+                * HWPoison pages have elevated reference counts so the migration would
+                * fail on them. It also doesn't make any sense to migrate them in the
+                * first place. Still try to unmap such a page in case it is still mapped
+                * (e.g. current hwpoison implementation doesn't unmap KSM pages but keep
+                * the unmap as the catch all safety net).
+                */
+               if (PageHWPoison(page)) {
+                       if (WARN_ON(PageLRU(page)))
+                               isolate_lru_page(page);
+                       if (page_mapped(page))
+                               try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
+                       continue;
+               }
+
                if (!get_page_unless_zero(page))
                        continue;
                /*
@@ -1711,6 +1751,7 @@ static int __ref __offline_pages(unsigned long start_pfn,
        long offlined_pages;
        int ret, drain, retry_max, node;
        unsigned long flags;
+       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
 
@@ -1721,10 +1762,10 @@ static int __ref __offline_pages(unsigned long start_pfn,
                return -EINVAL;
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn))
+       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start, &valid_end))
                return -EINVAL;
 
-       zone = page_zone(pfn_to_page(start_pfn));
+       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;