OSDN Git Service

cifs: fail i/o on soft mounts if sessionsetup errors out
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / mremap.c
index c25bc62..450b306 100644 (file)
@@ -96,6 +96,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        struct mm_struct *mm = vma->vm_mm;
        pte_t *old_pte, *new_pte, pte;
        spinlock_t *old_ptl, *new_ptl;
+       bool force_flush = false;
+       unsigned long len = old_end - old_addr;
 
        /*
         * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
@@ -135,6 +137,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
        new_ptl = pte_lockptr(mm, new_pmd);
        if (new_ptl != old_ptl)
                spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
+       flush_tlb_batched_pending(vma->vm_mm);
        arch_enter_lazy_mmu_mode();
 
        for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
@@ -142,12 +145,26 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
                if (pte_none(*old_pte))
                        continue;
                pte = ptep_get_and_clear(mm, old_addr, old_pte);
+               /*
+                * If we are remapping a valid PTE, make sure
+                * to flush TLB before we drop the PTL for the PTE.
+                *
+                * NOTE! Both old and new PTL matter: the old one
+                * for racing with page_mkclean(), the new one to
+                * make sure the physical page stays valid until
+                * the TLB entry for the old mapping has been
+                * flushed.
+                */
+               if (pte_present(pte))
+                       force_flush = true;
                pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
                pte = move_soft_dirty_pte(pte);
                set_pte_at(mm, new_addr, new_pte, pte);
        }
 
        arch_leave_lazy_mmu_mode();
+       if (force_flush)
+               flush_tlb_range(vma, old_end - len, old_end);
        if (new_ptl != old_ptl)
                spin_unlock(new_ptl);
        pte_unmap(new_pte - 1);
@@ -167,7 +184,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
 {
        unsigned long extent, next, old_end;
        pmd_t *old_pmd, *new_pmd;
-       bool need_flush = false;
        unsigned long mmun_start;       /* For mmu_notifiers */
        unsigned long mmun_end;         /* For mmu_notifiers */
 
@@ -206,7 +222,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                                        anon_vma_unlock_write(vma->anon_vma);
                        }
                        if (err > 0) {
-                               need_flush = true;
                                continue;
                        } else if (!err) {
                                split_huge_page_pmd(vma, old_addr, old_pmd);
@@ -223,10 +238,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
                        extent = LATENCY_LIMIT;
                move_ptes(vma, old_pmd, old_addr, old_addr + extent,
                          new_vma, new_pmd, new_addr, need_rmap_locks);
-               need_flush = true;
        }
-       if (likely(need_flush))
-               flush_tlb_range(vma, old_end-len, old_addr);
 
        mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);