OSDN Git Service

powerpc/mm/hugetlb: Update huge_ptep_set_access_flags to call __ptep_set_access_flags...
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Tue, 29 May 2018 14:28:38 +0000 (19:58 +0530)
committerMichael Ellerman <mpe@ellerman.id.au>
Sun, 3 Jun 2018 10:40:33 +0000 (20:40 +1000)
In a later patch, we want to update __ptep_set_access_flags take page size
arg. This makes ptep_set_access_flags only work with mmu_virtual_psize.
To simplify the code make huge_ptep_set_access_flags directly call
__ptep_set_access_flags so that we can compute the hugetlb page size in
hugetlb function.

Now that ptep_set_access_flags won't be called for hugetlb remove
the is_vm_hugetlb_page() check and add the assert of pte lock
unconditionally.

Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
arch/powerpc/include/asm/hugetlb.h
arch/powerpc/mm/pgtable.c

index 96444bc..3225eb6 100644 (file)
@@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
        return pte_wrprotect(pte);
 }
 
-static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
-                                            unsigned long addr, pte_t *ptep,
-                                            pte_t pte, int dirty)
-{
-#ifdef HUGETLB_NEED_PRELOAD
-       /*
-        * The "return 1" forces a call of update_mmu_cache, which will write a
-        * TLB entry.  Without this, platforms that don't do a write of the TLB
-        * entry in the TLB miss handler asm will fault ad infinitum.
-        */
-       ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-       return 1;
-#else
-       return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-#endif
-}
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                     unsigned long addr, pte_t *ptep,
+                                     pte_t pte, int dirty);
 
 static inline pte_t huge_ptep_get(pte_t *ptep)
 {
index 9f361ae..e70af99 100644 (file)
@@ -221,14 +221,43 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
        entry = set_access_flags_filter(entry, vma, dirty);
        changed = !pte_same(*(ptep), entry);
        if (changed) {
-               if (!is_vm_hugetlb_page(vma))
-                       assert_pte_locked(vma->vm_mm, address);
+               assert_pte_locked(vma->vm_mm, address);
                __ptep_set_access_flags(vma->vm_mm, ptep, entry, address);
                flush_tlb_page(vma, address);
        }
        return changed;
 }
 
+#ifdef CONFIG_HUGETLB_PAGE
+extern int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+                                     unsigned long addr, pte_t *ptep,
+                                     pte_t pte, int dirty)
+{
+#ifdef HUGETLB_NEED_PRELOAD
+       /*
+        * The "return 1" forces a call of update_mmu_cache, which will write a
+        * TLB entry.  Without this, platforms that don't do a write of the TLB
+        * entry in the TLB miss handler asm will fault ad infinitum.
+        */
+       ptep_set_access_flags(vma, addr, ptep, pte, dirty);
+       return 1;
+#else
+       int changed;
+
+       pte = set_access_flags_filter(pte, vma, dirty);
+       changed = !pte_same(*(ptep), pte);
+       if (changed) {
+#ifdef CONFIG_DEBUG_VM
+               assert_spin_locked(&vma->vm_mm->page_table_lock);
+#endif
+               __ptep_set_access_flags(vma->vm_mm, ptep, pte, addr);
+               flush_hugetlb_page(vma, addr);
+       }
+       return changed;
+#endif
+}
+#endif /* CONFIG_HUGETLB_PAGE */
+
 #ifdef CONFIG_DEBUG_VM
 void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
 {