OSDN Git Service

parisc: Purge TLB entries after updating page table entry and set page accessed flag...
authorJohn David Anglin <dave.anglin@bell.net>
Fri, 21 Sep 2018 02:31:48 +0000 (22:31 -0400)
committerHelge Deller <deller@gmx.de>
Wed, 17 Oct 2018 06:18:01 +0000 (08:18 +0200)
This patch may resolve some races in TLB handling.  Hopefully, TLB
inserts are accesses and protected by spin lock.

If not, we may need to IPI calls and do local purges on PA 2.0.

Signed-off-by: John David Anglin <dave.anglin@bell.net>
Signed-off-by: Helge Deller <deller@gmx.de>
arch/parisc/include/asm/pgtable.h
arch/parisc/kernel/entry.S

index fa6b7c7..b86c312 100644 (file)
@@ -66,9 +66,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
                unsigned long flags;                            \
                spin_lock_irqsave(&pa_tlb_lock, flags);         \
                old_pte = *ptep;                                \
+               set_pte(ptep, pteval);                          \
                if (pte_inserted(old_pte))                      \
                        purge_tlb_entries(mm, addr);            \
-               set_pte(ptep, pteval);                          \
                spin_unlock_irqrestore(&pa_tlb_lock, flags);    \
        } while (0)
 
@@ -202,7 +202,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 #define _PAGE_HUGE     (1 << xlate_pabit(_PAGE_HPAGE_BIT))
 #define _PAGE_USER     (1 << xlate_pabit(_PAGE_USER_BIT))
 
-#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE |  _PAGE_DIRTY | _PAGE_ACCESSED)
+#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
 #define _PAGE_KERNEL_RO        (_PAGE_PRESENT | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
 #define _PAGE_KERNEL_EXEC      (_PAGE_KERNEL_RO | _PAGE_EXEC)
@@ -227,22 +227,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
 
 #ifndef __ASSEMBLY__
 
-#define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
-#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
+#define PAGE_NONE      __pgprot(_PAGE_PRESENT | _PAGE_USER)
+#define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
 /* Others seem to make this executable, I don't know if that's correct
    or not.  The stack is mapped this way though so this is necessary
    in the short term - dhd@linuxcare.com, 2000-08-08 */
-#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
-#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
-#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_READONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
+#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
+#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
 #define PAGE_COPY       PAGE_EXECREAD
-#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
+#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
 #define PAGE_KERNEL    __pgprot(_PAGE_KERNEL)
 #define PAGE_KERNEL_EXEC       __pgprot(_PAGE_KERNEL_EXEC)
 #define PAGE_KERNEL_RWX        __pgprot(_PAGE_KERNEL_RWX)
 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
 #define PAGE_KERNEL_UNC        __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
-#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
+#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)
 
 
 /*
@@ -479,8 +479,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
                spin_unlock_irqrestore(&pa_tlb_lock, flags);
                return 0;
        }
-       purge_tlb_entries(vma->vm_mm, addr);
        set_pte(ptep, pte_mkold(pte));
+       purge_tlb_entries(vma->vm_mm, addr);
        spin_unlock_irqrestore(&pa_tlb_lock, flags);
        return 1;
 }
@@ -493,9 +493,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
 
        spin_lock_irqsave(&pa_tlb_lock, flags);
        old_pte = *ptep;
+       set_pte(ptep, __pte(0));
        if (pte_inserted(old_pte))
                purge_tlb_entries(mm, addr);
-       set_pte(ptep, __pte(0));
        spin_unlock_irqrestore(&pa_tlb_lock, flags);
 
        return old_pte;
@@ -505,8 +505,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
 {
        unsigned long flags;
        spin_lock_irqsave(&pa_tlb_lock, flags);
-       purge_tlb_entries(mm, addr);
        set_pte(ptep, pte_wrprotect(*ptep));
+       purge_tlb_entries(mm, addr);
        spin_unlock_irqrestore(&pa_tlb_lock, flags);
 }
 
index 00ac988..0d662f0 100644 (file)
        .macro          tlb_unlock0     spc,tmp
 #ifdef CONFIG_SMP
        or,COND(=)      %r0,\spc,%r0
-       sync
-       or,COND(=)      %r0,\spc,%r0
-       stw             \spc,0(\tmp)
+       stw,ma          \spc,0(\tmp)
 #endif
        .endm