OSDN Git Service

crypto: talitos - HMAC SNOOP NO AFEU mode requires SW icv checking.
[android-x86/kernel.git] / mm / mlock.c
index 145a425..3e7fe40 100644 (file)
@@ -190,10 +190,13 @@ unsigned int munlock_vma_page(struct page *page)
         */
        spin_lock_irq(zone_lru_lock(zone));
 
-       nr_pages = hpage_nr_pages(page);
-       if (!TestClearPageMlocked(page))
+       if (!TestClearPageMlocked(page)) {
+               /* Potentially, PTE-mapped THP: do not skip the rest PTEs */
+               nr_pages = 1;
                goto unlock_out;
+       }
 
+       nr_pages = hpage_nr_pages(page);
        __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
 
        if (__munlock_isolate_lru_page(page, true)) {
@@ -282,7 +285,7 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
 {
        int i;
        int nr = pagevec_count(pvec);
-       int delta_munlocked;
+       int delta_munlocked = -nr;
        struct pagevec pvec_putback;
        int pgrescued = 0;
 
@@ -302,6 +305,8 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                                continue;
                        else
                                __munlock_isolation_failed(page);
+               } else {
+                       delta_munlocked++;
                }
 
                /*
@@ -313,7 +318,6 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
                pagevec_add(&pvec_putback, pvec->pages[i]);
                pvec->pages[i] = NULL;
        }
-       delta_munlocked = -nr + pagevec_count(&pvec_putback);
        __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
        spin_unlock_irq(zone_lru_lock(zone));
 
@@ -438,7 +442,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
 
        while (start < end) {
                struct page *page;
-               unsigned int page_mask;
+               unsigned int page_mask = 0;
                unsigned long page_increm;
                struct pagevec pvec;
                struct zone *zone;
@@ -452,8 +456,7 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                 * suits munlock very well (and if somehow an abnormal page
                 * has sneaked into the range, we won't oops here: great).
                 */
-               page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
-                               &page_mask);
+               page = follow_page(vma, start, FOLL_GET | FOLL_DUMP);
 
                if (page && !IS_ERR(page)) {
                        if (PageTransTail(page)) {
@@ -464,8 +467,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
                                /*
                                 * Any THP page found by follow_page_mask() may
                                 * have gotten split before reaching
-                                * munlock_vma_page(), so we need to recompute
-                                * the page_mask here.
+                                * munlock_vma_page(), so we need to compute
+                                * the page_mask here instead.
                                 */
                                page_mask = munlock_vma_page(page);
                                unlock_page(page);
@@ -627,11 +630,11 @@ static int apply_vma_lock_flags(unsigned long start, size_t len,
  * is also counted.
  * Return value: previously mlocked page counts
  */
-static int count_mm_mlocked_page_nr(struct mm_struct *mm,
+static unsigned long count_mm_mlocked_page_nr(struct mm_struct *mm,
                unsigned long start, size_t len)
 {
        struct vm_area_struct *vma;
-       int count = 0;
+       unsigned long count = 0;
 
        if (mm == NULL)
                mm = current->mm;