OSDN Git Service

sparc64: Fix race in TLB batch processing.
[uclinux-h8/linux.git] / arch / sparc / mm / tlb.c
1 /* arch/sparc64/mm/tlb.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4  */
5
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19
20 /* Heavily inspired by the ppc64 code.  */
21
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23
24 void flush_tlb_pending(void)
25 {
26         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27         struct mm_struct *mm = tb->mm;
28
29         if (!tb->tlb_nr)
30                 goto out;
31
32         flush_tsb_user(tb);
33
34         if (CTX_VALID(mm->context)) {
35                 if (tb->tlb_nr == 1) {
36                         global_flush_tlb_page(mm, tb->vaddrs[0]);
37                 } else {
38 #ifdef CONFIG_SMP
39                         smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40                                               &tb->vaddrs[0]);
41 #else
42                         __flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43                                             tb->tlb_nr, &tb->vaddrs[0]);
44 #endif
45                 }
46         }
47
48         tb->tlb_nr = 0;
49
50 out:
51         put_cpu_var(tlb_batch);
52 }
53
54 void arch_enter_lazy_mmu_mode(void)
55 {
56         struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57
58         tb->active = 1;
59 }
60
61 void arch_leave_lazy_mmu_mode(void)
62 {
63         struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64
65         if (tb->tlb_nr)
66                 flush_tlb_pending();
67         tb->active = 0;
68 }
69
70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
71                               bool exec)
72 {
73         struct tlb_batch *tb = &get_cpu_var(tlb_batch);
74         unsigned long nr;
75
76         vaddr &= PAGE_MASK;
77         if (exec)
78                 vaddr |= 0x1UL;
79
80         nr = tb->tlb_nr;
81
82         if (unlikely(nr != 0 && mm != tb->mm)) {
83                 flush_tlb_pending();
84                 nr = 0;
85         }
86
87         if (!tb->active) {
88                 global_flush_tlb_page(mm, vaddr);
89                 flush_tsb_user_page(mm, vaddr);
90                 return;
91         }
92
93         if (nr == 0)
94                 tb->mm = mm;
95
96         tb->vaddrs[nr] = vaddr;
97         tb->tlb_nr = ++nr;
98         if (nr >= TLB_BATCH_NR)
99                 flush_tlb_pending();
100
101         put_cpu_var(tlb_batch);
102 }
103
104 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
105                    pte_t *ptep, pte_t orig, int fullmm)
106 {
107         if (tlb_type != hypervisor &&
108             pte_dirty(orig)) {
109                 unsigned long paddr, pfn = pte_pfn(orig);
110                 struct address_space *mapping;
111                 struct page *page;
112
113                 if (!pfn_valid(pfn))
114                         goto no_cache_flush;
115
116                 page = pfn_to_page(pfn);
117                 if (PageReserved(page))
118                         goto no_cache_flush;
119
120                 /* A real file page? */
121                 mapping = page_mapping(page);
122                 if (!mapping)
123                         goto no_cache_flush;
124
125                 paddr = (unsigned long) page_address(page);
126                 if ((paddr ^ vaddr) & (1 << 13))
127                         flush_dcache_page_all(mm, page);
128         }
129
130 no_cache_flush:
131         if (!fullmm)
132                 tlb_batch_add_one(mm, vaddr, pte_exec(orig));
133 }
134
135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
137                                pmd_t pmd, bool exec)
138 {
139         unsigned long end;
140         pte_t *pte;
141
142         pte = pte_offset_map(&pmd, vaddr);
143         end = vaddr + HPAGE_SIZE;
144         while (vaddr < end) {
145                 if (pte_val(*pte) & _PAGE_VALID)
146                         tlb_batch_add_one(mm, vaddr, exec);
147                 pte++;
148                 vaddr += PAGE_SIZE;
149         }
150         pte_unmap(pte);
151 }
152
153 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
154                 pmd_t *pmdp, pmd_t pmd)
155 {
156         pmd_t orig = *pmdp;
157
158         *pmdp = pmd;
159
160         if (mm == &init_mm)
161                 return;
162
163         if ((pmd_val(pmd) ^ pmd_val(orig)) & PMD_ISHUGE) {
164                 if (pmd_val(pmd) & PMD_ISHUGE)
165                         mm->context.huge_pte_count++;
166                 else
167                         mm->context.huge_pte_count--;
168
169                 /* Do not try to allocate the TSB hash table if we
170                  * don't have one already.  We have various locks held
171                  * and thus we'll end up doing a GFP_KERNEL allocation
172                  * in an atomic context.
173                  *
174                  * Instead, we let the first TLB miss on a hugepage
175                  * take care of this.
176                  */
177         }
178
179         if (!pmd_none(orig)) {
180                 bool exec = ((pmd_val(orig) & PMD_HUGE_EXEC) != 0);
181
182                 addr &= HPAGE_MASK;
183                 if (pmd_val(orig) & PMD_ISHUGE)
184                         tlb_batch_add_one(mm, addr, exec);
185                 else
186                         tlb_batch_pmd_scan(mm, addr, orig, exec);
187         }
188 }
189
190 void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable)
191 {
192         struct list_head *lh = (struct list_head *) pgtable;
193
194         assert_spin_locked(&mm->page_table_lock);
195
196         /* FIFO */
197         if (!mm->pmd_huge_pte)
198                 INIT_LIST_HEAD(lh);
199         else
200                 list_add(lh, (struct list_head *) mm->pmd_huge_pte);
201         mm->pmd_huge_pte = pgtable;
202 }
203
204 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm)
205 {
206         struct list_head *lh;
207         pgtable_t pgtable;
208
209         assert_spin_locked(&mm->page_table_lock);
210
211         /* FIFO */
212         pgtable = mm->pmd_huge_pte;
213         lh = (struct list_head *) pgtable;
214         if (list_empty(lh))
215                 mm->pmd_huge_pte = NULL;
216         else {
217                 mm->pmd_huge_pte = (pgtable_t) lh->next;
218                 list_del(lh);
219         }
220         pte_val(pgtable[0]) = 0;
221         pte_val(pgtable[1]) = 0;
222
223         return pgtable;
224 }
225 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */