OSDN Git Service

arm64/mm: enable ARCH_HAS_VM_GET_PAGE_PROT
authorAnshuman Khandual <anshuman.khandual@arm.com>
Fri, 29 Apr 2022 06:16:13 +0000 (23:16 -0700)
committerakpm <akpm@linux-foundation.org>
Fri, 29 Apr 2022 06:16:13 +0000 (23:16 -0700)
This defines and exports a platform specific custom vm_get_page_prot() via
subscribing ARCH_HAS_VM_GET_PAGE_PROT. It localizes arch_vm_get_page_prot()
and moves it near vm_get_page_prot().

Link: https://lkml.kernel.org/r/20220414062125.609297-4-anshuman.khandual@arm.com
Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David S. Miller <davem@davemloft.net>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
arch/arm64/Kconfig
arch/arm64/include/asm/mman.h
arch/arm64/mm/mmap.c

index 2d7399a..8da1f65 100644 (file)
@@ -45,6 +45,7 @@ config ARM64
        select ARCH_HAS_SYSCALL_WRAPPER
        select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_HAS_VM_GET_PAGE_PROT
        select ARCH_HAS_ZONE_DMA_SET if EXPERT
        select ARCH_HAVE_ELF_PROT
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
index e3e28f7..5966ee4 100644 (file)
@@ -35,30 +35,6 @@ static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
 }
 #define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
 
-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
-{
-       pteval_t prot = 0;
-
-       if (vm_flags & VM_ARM64_BTI)
-               prot |= PTE_GP;
-
-       /*
-        * There are two conditions required for returning a Normal Tagged
-        * memory type: (1) the user requested it via PROT_MTE passed to
-        * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
-        * register (1) as VM_MTE in the vma->vm_flags and (2) as
-        * VM_MTE_ALLOWED. Note that the latter can only be set during the
-        * mmap() call since mprotect() does not accept MAP_* flags.
-        * Checking for VM_MTE only is sufficient since arch_validate_flags()
-        * does not permit (VM_MTE & !VM_MTE_ALLOWED).
-        */
-       if (vm_flags & VM_MTE)
-               prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
-
-       return __pgprot(prot);
-}
-#define arch_vm_get_page_prot(vm_flags) arch_vm_get_page_prot(vm_flags)
-
 static inline bool arch_validate_prot(unsigned long prot,
        unsigned long addr __always_unused)
 {
index 77ada00..78e9490 100644 (file)
@@ -55,3 +55,28 @@ static int __init adjust_protection_map(void)
        return 0;
 }
 arch_initcall(adjust_protection_map);
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags)
+{
+       pteval_t prot = pgprot_val(protection_map[vm_flags &
+                                  (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]);
+
+       if (vm_flags & VM_ARM64_BTI)
+               prot |= PTE_GP;
+
+       /*
+        * There are two conditions required for returning a Normal Tagged
+        * memory type: (1) the user requested it via PROT_MTE passed to
+        * mmap() or mprotect() and (2) the corresponding vma supports MTE. We
+        * register (1) as VM_MTE in the vma->vm_flags and (2) as
+        * VM_MTE_ALLOWED. Note that the latter can only be set during the
+        * mmap() call since mprotect() does not accept MAP_* flags.
+        * Checking for VM_MTE only is sufficient since arch_validate_flags()
+        * does not permit (VM_MTE & !VM_MTE_ALLOWED).
+        */
+       if (vm_flags & VM_MTE)
+               prot |= PTE_ATTRINDX(MT_NORMAL_TAGGED);
+
+       return __pgprot(prot);
+}
+EXPORT_SYMBOL(vm_get_page_prot);