OSDN Git Service

MIPS: MT: Remove SMTC support
[uclinux-h8/linux.git] / arch / mips / mm / tlb-r4k.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
7  * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8  * Carsten Langgaard, carstenl@mips.com
9  * Copyright (C) 2002 MIPS Technologies, Inc.  All rights reserved.
10  */
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/smp.h>
14 #include <linux/mm.h>
15 #include <linux/hugetlb.h>
16 #include <linux/module.h>
17
18 #include <asm/cpu.h>
19 #include <asm/cpu-type.h>
20 #include <asm/bootinfo.h>
21 #include <asm/mmu_context.h>
22 #include <asm/pgtable.h>
23 #include <asm/tlb.h>
24 #include <asm/tlbmisc.h>
25
26 extern void build_tlb_refill_handler(void);
27
28 /*
29  * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
30  * unfortunately, itlb is not totally transparent to software.
31  */
32 static inline void flush_itlb(void)
33 {
34         switch (current_cpu_type()) {
35         case CPU_LOONGSON2:
36         case CPU_LOONGSON3:
37                 write_c0_diag(4);
38                 break;
39         default:
40                 break;
41         }
42 }
43
44 static inline void flush_itlb_vm(struct vm_area_struct *vma)
45 {
46         if (vma->vm_flags & VM_EXEC)
47                 flush_itlb();
48 }
49
50 void local_flush_tlb_all(void)
51 {
52         unsigned long flags;
53         unsigned long old_ctx;
54         int entry, ftlbhighset;
55
56         local_irq_save(flags);
57         /* Save old context and create impossible VPN2 value */
58         old_ctx = read_c0_entryhi();
59         write_c0_entrylo0(0);
60         write_c0_entrylo1(0);
61
62         entry = read_c0_wired();
63
64         /* Blast 'em all away. */
65         if (cpu_has_tlbinv) {
66                 if (current_cpu_data.tlbsizevtlb) {
67                         write_c0_index(0);
68                         mtc0_tlbw_hazard();
69                         tlbinvf();  /* invalidate VTLB */
70                 }
71                 ftlbhighset = current_cpu_data.tlbsizevtlb +
72                         current_cpu_data.tlbsizeftlbsets;
73                 for (entry = current_cpu_data.tlbsizevtlb;
74                      entry < ftlbhighset;
75                      entry++) {
76                         write_c0_index(entry);
77                         mtc0_tlbw_hazard();
78                         tlbinvf();  /* invalidate one FTLB set */
79                 }
80         } else {
81                 while (entry < current_cpu_data.tlbsize) {
82                         /* Make sure all entries differ. */
83                         write_c0_entryhi(UNIQUE_ENTRYHI(entry));
84                         write_c0_index(entry);
85                         mtc0_tlbw_hazard();
86                         tlb_write_indexed();
87                         entry++;
88                 }
89         }
90         tlbw_use_hazard();
91         write_c0_entryhi(old_ctx);
92         flush_itlb();
93         local_irq_restore(flags);
94 }
95 EXPORT_SYMBOL(local_flush_tlb_all);
96
97 /* All entries common to a mm share an asid.  To effectively flush
98    these entries, we just bump the asid. */
99 void local_flush_tlb_mm(struct mm_struct *mm)
100 {
101         int cpu;
102
103         preempt_disable();
104
105         cpu = smp_processor_id();
106
107         if (cpu_context(cpu, mm) != 0) {
108                 drop_mmu_context(mm, cpu);
109         }
110
111         preempt_enable();
112 }
113
114 void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
115         unsigned long end)
116 {
117         struct mm_struct *mm = vma->vm_mm;
118         int cpu = smp_processor_id();
119
120         if (cpu_context(cpu, mm) != 0) {
121                 unsigned long size, flags;
122
123                 local_irq_save(flags);
124                 start = round_down(start, PAGE_SIZE << 1);
125                 end = round_up(end, PAGE_SIZE << 1);
126                 size = (end - start) >> (PAGE_SHIFT + 1);
127                 if (size <= (current_cpu_data.tlbsizeftlbsets ?
128                              current_cpu_data.tlbsize / 8 :
129                              current_cpu_data.tlbsize / 2)) {
130                         int oldpid = read_c0_entryhi();
131                         int newpid = cpu_asid(cpu, mm);
132
133                         while (start < end) {
134                                 int idx;
135
136                                 write_c0_entryhi(start | newpid);
137                                 start += (PAGE_SIZE << 1);
138                                 mtc0_tlbw_hazard();
139                                 tlb_probe();
140                                 tlb_probe_hazard();
141                                 idx = read_c0_index();
142                                 write_c0_entrylo0(0);
143                                 write_c0_entrylo1(0);
144                                 if (idx < 0)
145                                         continue;
146                                 /* Make sure all entries differ. */
147                                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
148                                 mtc0_tlbw_hazard();
149                                 tlb_write_indexed();
150                         }
151                         tlbw_use_hazard();
152                         write_c0_entryhi(oldpid);
153                 } else {
154                         drop_mmu_context(mm, cpu);
155                 }
156                 flush_itlb();
157                 local_irq_restore(flags);
158         }
159 }
160
161 void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
162 {
163         unsigned long size, flags;
164
165         local_irq_save(flags);
166         size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
167         size = (size + 1) >> 1;
168         if (size <= (current_cpu_data.tlbsizeftlbsets ?
169                      current_cpu_data.tlbsize / 8 :
170                      current_cpu_data.tlbsize / 2)) {
171                 int pid = read_c0_entryhi();
172
173                 start &= (PAGE_MASK << 1);
174                 end += ((PAGE_SIZE << 1) - 1);
175                 end &= (PAGE_MASK << 1);
176
177                 while (start < end) {
178                         int idx;
179
180                         write_c0_entryhi(start);
181                         start += (PAGE_SIZE << 1);
182                         mtc0_tlbw_hazard();
183                         tlb_probe();
184                         tlb_probe_hazard();
185                         idx = read_c0_index();
186                         write_c0_entrylo0(0);
187                         write_c0_entrylo1(0);
188                         if (idx < 0)
189                                 continue;
190                         /* Make sure all entries differ. */
191                         write_c0_entryhi(UNIQUE_ENTRYHI(idx));
192                         mtc0_tlbw_hazard();
193                         tlb_write_indexed();
194                 }
195                 tlbw_use_hazard();
196                 write_c0_entryhi(pid);
197         } else {
198                 local_flush_tlb_all();
199         }
200         flush_itlb();
201         local_irq_restore(flags);
202 }
203
204 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
205 {
206         int cpu = smp_processor_id();
207
208         if (cpu_context(cpu, vma->vm_mm) != 0) {
209                 unsigned long flags;
210                 int oldpid, newpid, idx;
211
212                 newpid = cpu_asid(cpu, vma->vm_mm);
213                 page &= (PAGE_MASK << 1);
214                 local_irq_save(flags);
215                 oldpid = read_c0_entryhi();
216                 write_c0_entryhi(page | newpid);
217                 mtc0_tlbw_hazard();
218                 tlb_probe();
219                 tlb_probe_hazard();
220                 idx = read_c0_index();
221                 write_c0_entrylo0(0);
222                 write_c0_entrylo1(0);
223                 if (idx < 0)
224                         goto finish;
225                 /* Make sure all entries differ. */
226                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
227                 mtc0_tlbw_hazard();
228                 tlb_write_indexed();
229                 tlbw_use_hazard();
230
231         finish:
232                 write_c0_entryhi(oldpid);
233                 flush_itlb_vm(vma);
234                 local_irq_restore(flags);
235         }
236 }
237
238 /*
239  * This one is only used for pages with the global bit set so we don't care
240  * much about the ASID.
241  */
242 void local_flush_tlb_one(unsigned long page)
243 {
244         unsigned long flags;
245         int oldpid, idx;
246
247         local_irq_save(flags);
248         oldpid = read_c0_entryhi();
249         page &= (PAGE_MASK << 1);
250         write_c0_entryhi(page);
251         mtc0_tlbw_hazard();
252         tlb_probe();
253         tlb_probe_hazard();
254         idx = read_c0_index();
255         write_c0_entrylo0(0);
256         write_c0_entrylo1(0);
257         if (idx >= 0) {
258                 /* Make sure all entries differ. */
259                 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
260                 mtc0_tlbw_hazard();
261                 tlb_write_indexed();
262                 tlbw_use_hazard();
263         }
264         write_c0_entryhi(oldpid);
265         flush_itlb();
266         local_irq_restore(flags);
267 }
268
269 /*
270  * We will need multiple versions of update_mmu_cache(), one that just
271  * updates the TLB with the new pte(s), and another which also checks
272  * for the R4k "end of page" hardware bug and does the needy.
273  */
274 void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
275 {
276         unsigned long flags;
277         pgd_t *pgdp;
278         pud_t *pudp;
279         pmd_t *pmdp;
280         pte_t *ptep;
281         int idx, pid;
282
283         /*
284          * Handle debugger faulting in for debugee.
285          */
286         if (current->active_mm != vma->vm_mm)
287                 return;
288
289         local_irq_save(flags);
290
291         pid = read_c0_entryhi() & ASID_MASK;
292         address &= (PAGE_MASK << 1);
293         write_c0_entryhi(address | pid);
294         pgdp = pgd_offset(vma->vm_mm, address);
295         mtc0_tlbw_hazard();
296         tlb_probe();
297         tlb_probe_hazard();
298         pudp = pud_offset(pgdp, address);
299         pmdp = pmd_offset(pudp, address);
300         idx = read_c0_index();
301 #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
302         /* this could be a huge page  */
303         if (pmd_huge(*pmdp)) {
304                 unsigned long lo;
305                 write_c0_pagemask(PM_HUGE_MASK);
306                 ptep = (pte_t *)pmdp;
307                 lo = pte_to_entrylo(pte_val(*ptep));
308                 write_c0_entrylo0(lo);
309                 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
310
311                 mtc0_tlbw_hazard();
312                 if (idx < 0)
313                         tlb_write_random();
314                 else
315                         tlb_write_indexed();
316                 tlbw_use_hazard();
317                 write_c0_pagemask(PM_DEFAULT_MASK);
318         } else
319 #endif
320         {
321                 ptep = pte_offset_map(pmdp, address);
322
323 #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
324                 write_c0_entrylo0(ptep->pte_high);
325                 ptep++;
326                 write_c0_entrylo1(ptep->pte_high);
327 #else
328                 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
329                 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
330 #endif
331                 mtc0_tlbw_hazard();
332                 if (idx < 0)
333                         tlb_write_random();
334                 else
335                         tlb_write_indexed();
336         }
337         tlbw_use_hazard();
338         flush_itlb_vm(vma);
339         local_irq_restore(flags);
340 }
341
342 void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
343                      unsigned long entryhi, unsigned long pagemask)
344 {
345         unsigned long flags;
346         unsigned long wired;
347         unsigned long old_pagemask;
348         unsigned long old_ctx;
349
350         local_irq_save(flags);
351         /* Save old context and create impossible VPN2 value */
352         old_ctx = read_c0_entryhi();
353         old_pagemask = read_c0_pagemask();
354         wired = read_c0_wired();
355         write_c0_wired(wired + 1);
356         write_c0_index(wired);
357         tlbw_use_hazard();      /* What is the hazard here? */
358         write_c0_pagemask(pagemask);
359         write_c0_entryhi(entryhi);
360         write_c0_entrylo0(entrylo0);
361         write_c0_entrylo1(entrylo1);
362         mtc0_tlbw_hazard();
363         tlb_write_indexed();
364         tlbw_use_hazard();
365
366         write_c0_entryhi(old_ctx);
367         tlbw_use_hazard();      /* What is the hazard here? */
368         write_c0_pagemask(old_pagemask);
369         local_flush_tlb_all();
370         local_irq_restore(flags);
371 }
372
373 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
374
375 int __init has_transparent_hugepage(void)
376 {
377         unsigned int mask;
378         unsigned long flags;
379
380         local_irq_save(flags);
381         write_c0_pagemask(PM_HUGE_MASK);
382         back_to_back_c0_hazard();
383         mask = read_c0_pagemask();
384         write_c0_pagemask(PM_DEFAULT_MASK);
385
386         local_irq_restore(flags);
387
388         return mask == PM_HUGE_MASK;
389 }
390
391 #endif /* CONFIG_TRANSPARENT_HUGEPAGE  */
392
393 static int ntlb;
394 static int __init set_ntlb(char *str)
395 {
396         get_option(&str, &ntlb);
397         return 1;
398 }
399
400 __setup("ntlb=", set_ntlb);
401
402 void tlb_init(void)
403 {
404         /*
405          * You should never change this register:
406          *   - On R4600 1.7 the tlbp never hits for pages smaller than
407          *     the value in the c0_pagemask register.
408          *   - The entire mm handling assumes the c0_pagemask register to
409          *     be set to fixed-size pages.
410          */
411         write_c0_pagemask(PM_DEFAULT_MASK);
412         write_c0_wired(0);
413         if (current_cpu_type() == CPU_R10000 ||
414             current_cpu_type() == CPU_R12000 ||
415             current_cpu_type() == CPU_R14000)
416                 write_c0_framemask(0);
417
418         if (cpu_has_rixi) {
419                 /*
420                  * Enable the no read, no exec bits, and enable large virtual
421                  * address.
422                  */
423                 u32 pg = PG_RIE | PG_XIE;
424 #ifdef CONFIG_64BIT
425                 pg |= PG_ELPA;
426 #endif
427                 write_c0_pagegrain(pg);
428         }
429
430         /* From this point on the ARC firmware is dead.  */
431         local_flush_tlb_all();
432
433         /* Did I tell you that ARC SUCKS?  */
434
435         if (ntlb) {
436                 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
437                         int wired = current_cpu_data.tlbsize - ntlb;
438                         write_c0_wired(wired);
439                         write_c0_index(wired-1);
440                         printk("Restricting TLB to %d entries\n", ntlb);
441                 } else
442                         printk("Ignoring invalid argument ntlb=%d\n", ntlb);
443         }
444
445         build_tlb_refill_handler();
446 }