OSDN Git Service

Merge tag 'perf-urgent-2023-09-10' of git://git.kernel.org/pub/scm/linux/kernel/git...
[tomoyo/tomoyo-test1.git] / arch / sparc / mm / tlb.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* arch/sparc64/mm/tlb.c
3  *
4  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12 #include <linux/pagemap.h>
13
14 #include <asm/tlbflush.h>
15 #include <asm/cacheflush.h>
16 #include <asm/mmu_context.h>
17 #include <asm/tlb.h>
18
19 /* Heavily inspired by the ppc64 code.  */
20
21 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
22
23 void flush_tlb_pending(void)
24 {
25         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
26         struct mm_struct *mm = tb->mm;
27
28         if (!tb->tlb_nr)
29                 goto out;
30
31         flush_tsb_user(tb);
32
33         if (CTX_VALID(mm->context)) {
34                 if (tb->tlb_nr == 1) {
35                         global_flush_tlb_page(mm, tb->vaddrs[0]);
36                 } else {
37 #ifdef CONFIG_SMP
38                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
39                                               &tb->vaddrs[0]);
40 #else
41                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
42                                             tb->tlb_nr, &tb->vaddrs[0]);
43 #endif
44                 }
45         }
46
47         tb->tlb_nr = 0;
48
49 out:
50         put_cpu_var(tlb_batch);
51 }
52
53 void arch_enter_lazy_mmu_mode(void)
54 {
55         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
56
57         tb->active = 1;
58 }
59
60 void arch_leave_lazy_mmu_mode(void)
61 {
62         struct tlb_batch *tb = this_cpu_ptr(&tlb_batch);
63
64         if (tb->tlb_nr)
65                 flush_tlb_pending();
66         tb->active = 0;
67 }
68
69 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
70                               bool exec, unsigned int hugepage_shift)
71 {
72         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
73         unsigned long nr;
74
75         vaddr &= PAGE_MASK;
76         if (exec)
77                 vaddr |= 0x1UL;
78
79         nr = tb->tlb_nr;
80
81         if (unlikely(nr != 0 && mm != tb->mm)) {
82                 flush_tlb_pending();
83                 nr = 0;
84         }
85
86         if (!tb->active) {
87                 flush_tsb_user_page(mm, vaddr, hugepage_shift);
88                 global_flush_tlb_page(mm, vaddr);
89                 goto out;
90         }
91
92         if (nr == 0) {
93                 tb->mm = mm;
94                 tb->hugepage_shift = hugepage_shift;
95         }
96
97         if (tb->hugepage_shift != hugepage_shift) {
98                 flush_tlb_pending();
99                 tb->hugepage_shift = hugepage_shift;
100                 nr = 0;
101         }
102
103         tb->vaddrs[nr] = vaddr;
104         tb->tlb_nr = ++nr;
105         if (nr >= TLB_BATCH_NR)
106                 flush_tlb_pending();
107
108 out:
109         put_cpu_var(tlb_batch);
110 }
111
112 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
113                    pte_t *ptep, pte_t orig, int fullmm,
114                    unsigned int hugepage_shift)
115 {
116         if (tlb_type != hypervisor &&
117             pte_dirty(orig)) {
118                 unsigned long paddr, pfn = pte_pfn(orig);
119                 struct address_space *mapping;
120                 struct page *page;
121                 struct folio *folio;
122
123                 if (!pfn_valid(pfn))
124                         goto no_cache_flush;
125
126                 page = pfn_to_page(pfn);
127                 if (PageReserved(page))
128                         goto no_cache_flush;
129
130                 /* A real file page? */
131                 folio = page_folio(page);
132                 mapping = folio_flush_mapping(folio);
133                 if (!mapping)
134                         goto no_cache_flush;
135
136                 paddr = (unsigned long) page_address(page);
137                 if ((paddr ^ vaddr) & (1 << 13))
138                         flush_dcache_folio_all(mm, folio);
139         }
140
141 no_cache_flush:
142         if (!fullmm)
143                 tlb_batch_add_one(mm, vaddr, pte_exec(orig), hugepage_shift);
144 }
145
146 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
147 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
148                                pmd_t pmd)
149 {
150         unsigned long end;
151         pte_t *pte;
152
153         pte = pte_offset_map(&pmd, vaddr);
154         if (!pte)
155                 return;
156         end = vaddr + HPAGE_SIZE;
157         while (vaddr < end) {
158                 if (pte_val(*pte) & _PAGE_VALID) {
159                         bool exec = pte_exec(*pte);
160
161                         tlb_batch_add_one(mm, vaddr, exec, PAGE_SHIFT);
162                 }
163                 pte++;
164                 vaddr += PAGE_SIZE;
165         }
166         pte_unmap(pte);
167 }
168
169
170 static void __set_pmd_acct(struct mm_struct *mm, unsigned long addr,
171                            pmd_t orig, pmd_t pmd)
172 {
173         if (mm == &init_mm)
174                 return;
175
176         if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
177                 /*
178                  * Note that this routine only sets pmds for THP pages.
179                  * Hugetlb pages are handled elsewhere.  We need to check
180                  * for huge zero page.  Huge zero pages are like hugetlb
181                  * pages in that there is no RSS, but there is the need
182                  * for TSB entries.  So, huge zero page counts go into
183                  * hugetlb_pte_count.
184                  */
185                 if (pmd_val(pmd) & _PAGE_PMD_HUGE) {
186                         if (is_huge_zero_page(pmd_page(pmd)))
187                                 mm->context.hugetlb_pte_count++;
188                         else
189                                 mm->context.thp_pte_count++;
190                 } else {
191                         if (is_huge_zero_page(pmd_page(orig)))
192                                 mm->context.hugetlb_pte_count--;
193                         else
194                                 mm->context.thp_pte_count--;
195                 }
196
197                 /* Do not try to allocate the TSB hash table if we
198                  * don't have one already.  We have various locks held
199                  * and thus we'll end up doing a GFP_KERNEL allocation
200                  * in an atomic context.
201                  *
202                  * Instead, we let the first TLB miss on a hugepage
203                  * take care of this.
204                  */
205         }
206
207         if (!pmd_none(orig)) {
208                 addr &= HPAGE_MASK;
209                 if (pmd_trans_huge(orig)) {
210                         pte_t orig_pte = __pte(pmd_val(orig));
211                         bool exec = pte_exec(orig_pte);
212
213                         tlb_batch_add_one(mm, addr, exec, REAL_HPAGE_SHIFT);
214                         tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec,
215                                           REAL_HPAGE_SHIFT);
216                 } else {
217                         tlb_batch_pmd_scan(mm, addr, orig);
218                 }
219         }
220 }
221
222 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
223                 pmd_t *pmdp, pmd_t pmd)
224 {
225         pmd_t orig = *pmdp;
226
227         *pmdp = pmd;
228         __set_pmd_acct(mm, addr, orig, pmd);
229 }
230
231 static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
232                 unsigned long address, pmd_t *pmdp, pmd_t pmd)
233 {
234         pmd_t old;
235
236         do {
237                 old = *pmdp;
238         } while (cmpxchg64(&pmdp->pmd, old.pmd, pmd.pmd) != old.pmd);
239         __set_pmd_acct(vma->vm_mm, address, old, pmd);
240
241         return old;
242 }
243
244 /*
245  * This routine is only called when splitting a THP
246  */
247 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
248                      pmd_t *pmdp)
249 {
250         pmd_t old, entry;
251
252         entry = __pmd(pmd_val(*pmdp) & ~_PAGE_VALID);
253         old = pmdp_establish(vma, address, pmdp, entry);
254         flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
255
256         /*
257          * set_pmd_at() will not be called in a way to decrement
258          * thp_pte_count when splitting a THP, so do it now.
259          * Sanity check pmd before doing the actual decrement.
260          */
261         if ((pmd_val(entry) & _PAGE_PMD_HUGE) &&
262             !is_huge_zero_page(pmd_page(entry)))
263                 (vma->vm_mm)->context.thp_pte_count--;
264
265         return old;
266 }
267
268 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
269                                 pgtable_t pgtable)
270 {
271         struct list_head *lh = (struct list_head *) pgtable;
272
273         assert_spin_locked(&mm->page_table_lock);
274
275         /* FIFO */
276         if (!pmd_huge_pte(mm, pmdp))
277                 INIT_LIST_HEAD(lh);
278         else
279                 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
280         pmd_huge_pte(mm, pmdp) = pgtable;
281 }
282
283 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
284 {
285         struct list_head *lh;
286         pgtable_t pgtable;
287
288         assert_spin_locked(&mm->page_table_lock);
289
290         /* FIFO */
291         pgtable = pmd_huge_pte(mm, pmdp);
292         lh = (struct list_head *) pgtable;
293         if (list_empty(lh))
294                 pmd_huge_pte(mm, pmdp) = NULL;
295         else {
296                 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
297                 list_del(lh);
298         }
299         pte_val(pgtable[0]) = 0;
300         pte_val(pgtable[1]) = 0;
301
302         return pgtable;
303 }
304 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */