From f069ff396d657ac7bdb5de866c3ec28b8d08d953 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 29 May 2018 19:58:38 +0530 Subject: [PATCH] powerpc/mm/hugetlb: Update huge_ptep_set_access_flags to call __ptep_set_access_flags directly In a later patch, we want to update __ptep_set_access_flags take page size arg. This makes ptep_set_access_flags only work with mmu_virtual_psize. To simplify the code make huge_ptep_set_access_flags directly call __ptep_set_access_flags so that we can compute the hugetlb page size in hugetlb function. Now that ptep_set_access_flags won't be called for hugetlb remove the is_vm_hugetlb_page() check and add the assert of pte lock unconditionally. Signed-off-by: Aneesh Kumar K.V Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/hugetlb.h | 19 +++---------------- arch/powerpc/mm/pgtable.c | 33 +++++++++++++++++++++++++++++++-- 2 files changed, 34 insertions(+), 18 deletions(-) diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h index 96444bc08034..3225eb6402cc 100644 --- a/arch/powerpc/include/asm/hugetlb.h +++ b/arch/powerpc/include/asm/hugetlb.h @@ -166,22 +166,9 @@ static inline pte_t huge_pte_wrprotect(pte_t pte) return pte_wrprotect(pte); } -static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long addr, pte_t *ptep, - pte_t pte, int dirty) -{ -#ifdef HUGETLB_NEED_PRELOAD - /* - * The "return 1" forces a call of update_mmu_cache, which will write a - * TLB entry. Without this, platforms that don't do a write of the TLB - * entry in the TLB miss handler asm will fault ad infinitum. - */ - ptep_set_access_flags(vma, addr, ptep, pte, dirty); - return 1; -#else - return ptep_set_access_flags(vma, addr, ptep, pte, dirty); -#endif -} +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty); static inline pte_t huge_ptep_get(pte_t *ptep) { diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 9f361ae571e9..e70af9939379 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -221,14 +221,43 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, entry = set_access_flags_filter(entry, vma, dirty); changed = !pte_same(*(ptep), entry); if (changed) { - if (!is_vm_hugetlb_page(vma)) - assert_pte_locked(vma->vm_mm, address); + assert_pte_locked(vma->vm_mm, address); __ptep_set_access_flags(vma->vm_mm, ptep, entry, address); flush_tlb_page(vma, address); } return changed; } +#ifdef CONFIG_HUGETLB_PAGE +extern int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ +#ifdef HUGETLB_NEED_PRELOAD + /* + * The "return 1" forces a call of update_mmu_cache, which will write a + * TLB entry. Without this, platforms that don't do a write of the TLB + * entry in the TLB miss handler asm will fault ad infinitum. + */ + ptep_set_access_flags(vma, addr, ptep, pte, dirty); + return 1; +#else + int changed; + + pte = set_access_flags_filter(pte, vma, dirty); + changed = !pte_same(*(ptep), pte); + if (changed) { +#ifdef CONFIG_DEBUG_VM + assert_spin_locked(&vma->vm_mm->page_table_lock); +#endif + __ptep_set_access_flags(vma->vm_mm, ptep, pte, addr); + flush_hugetlb_page(vma, addr); + } + return changed; +#endif +} +#endif /* CONFIG_HUGETLB_PAGE */ + #ifdef CONFIG_DEBUG_VM void assert_pte_locked(struct mm_struct *mm, unsigned long addr) { -- 2.11.0