OSDN Git Service

MIPS: VDSO: Prevent use of smp_processor_id()
[android-x86/kernel.git] / mm / mremap.c
index 30d7d24..9e60359 100644 (file)
@@ -104,7 +104,7 @@ static pte_t move_soft_dirty_pte(pte_t pte)
 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                unsigned long old_addr, unsigned long old_end,
                struct vm_area_struct *new_vma, pmd_t *new_pmd,
-               unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+               unsigned long new_addr, bool need_rmap_locks)
 {
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
@@ -142,6 +142,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+       flush_tlb_batched_pending(vma->vm_mm);
        arch_enter_lazy_mmu_mode();
 
        for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
@@ -151,15 +152,17 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
 
                pte = ptep_get_and_clear(mm, old_addr, old_pte);
                /*
-                * If we are remapping a dirty PTE, make sure
+                * If we are remapping a valid PTE, make sure
                 * to flush TLB before we drop the PTL for the
-                * old PTE or we may race with page_mkclean().
+                * PTE.
                 *
-                * This check has to be done after we removed the
-                * old PTE from page tables or another thread may
-                * dirty it after the check and before the removal.
+                * NOTE! Both old and new PTL matter: the old one
+                * for racing with page_mkclean(), the new one to
+                * make sure the physical page stays valid until
+                * the TLB entry for the old mapping has been
+                * flushed.
                 */
-               if (pte_present(pte) && pte_dirty(pte))
+               if (pte_present(pte))
                        force_flush = true;
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
                pte = move_soft_dirty_pte(pte);
@@ -167,13 +170,11 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        }
 
        arch_leave_lazy_mmu_mode();
+       if (force_flush)
+               flush_tlb_range(vma, old_end - len, old_end);
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        pte_unmap(new_pte - 1);
-       if (force_flush)
-               flush_tlb_range(vma, old_end - len, old_end);
-       else
-               *need_flush = true;
        pte_unmap_unlock(old_pte - 1, old_ptl);
        if (need_rmap_locks)
                drop_rmap_locks(vma);
@@ -188,7 +189,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 {
        unsigned long extent, next, old_end;
        pmd_t *old_pmd, *new_pmd;
-       bool need_flush = false;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
@@ -219,8 +219,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                                if (need_rmap_locks)
                                        take_rmap_locks(vma);
                                moved = move_huge_pmd(vma, old_addr, new_addr,
-                                                   old_end, old_pmd, new_pmd,
-                                                   &need_flush);
+                                                   old_end, old_pmd, new_pmd);
                                if (need_rmap_locks)
                                        drop_rmap_locks(vma);
                                if (moved)
@@ -238,10 +237,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                if (extent > LATENCY_LIMIT)
                        extent = LATENCY_LIMIT;
                move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
-                         new_pmd, new_addr, need_rmap_locks, &need_flush);
+                         new_pmd, new_addr, need_rmap_locks);
        }
-       if (need_flush)
-               flush_tlb_range(vma, old_end-len, old_addr);
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);