OSDN Git Service

Merge branch '4.1-fp' into mips-for-linux-next
[uclinux-h8/linux.git] / arch / mips / include / asm / r4kcache.h
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Inline assembly cache operations.
7  *
8  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
9  * Copyright (C) 1997 - 2002 Ralf Baechle (ralf@gnu.org)
10  * Copyright (C) 2004 Ralf Baechle (ralf@linux-mips.org)
11  */
12 #ifndef _ASM_R4KCACHE_H
13 #define _ASM_R4KCACHE_H
14
15 #include <asm/asm.h>
16 #include <asm/cacheops.h>
17 #include <asm/compiler.h>
18 #include <asm/cpu-features.h>
19 #include <asm/cpu-type.h>
20 #include <asm/mipsmtregs.h>
21 #include <asm/uaccess.h> /* for segment_eq() */
22
23 extern void (*r4k_blast_dcache)(void);
24 extern void (*r4k_blast_icache)(void);
25
26 /*
27  * This macro return a properly sign-extended address suitable as base address
28  * for indexed cache operations.  Two issues here:
29  *
30  *  - The MIPS32 and MIPS64 specs permit an implementation to directly derive
31  *    the index bits from the virtual address.  This breaks with tradition
32  *    set by the R4000.  To keep unpleasant surprises from happening we pick
33  *    an address in KSEG0 / CKSEG0.
34  *  - We need a properly sign extended address for 64-bit code.  To get away
35  *    without ifdefs we let the compiler do it by a type cast.
36  */
37 #define INDEX_BASE      CKSEG0
38
39 #define cache_op(op,addr)                                               \
40         __asm__ __volatile__(                                           \
41         "       .set    push                                    \n"     \
42         "       .set    noreorder                               \n"     \
43         "       .set "MIPS_ISA_ARCH_LEVEL"                      \n"     \
44         "       cache   %0, %1                                  \n"     \
45         "       .set    pop                                     \n"     \
46         :                                                               \
47         : "i" (op), "R" (*(unsigned char *)(addr)))
48
49 #ifdef CONFIG_MIPS_MT
50
51 #define __iflush_prologue                                               \
52         unsigned long redundance;                                       \
53         extern int mt_n_iflushes;                                       \
54         for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
55
56 #define __iflush_epilogue                                               \
57         }
58
59 #define __dflush_prologue                                               \
60         unsigned long redundance;                                       \
61         extern int mt_n_dflushes;                                       \
62         for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
63
64 #define __dflush_epilogue \
65         }
66
67 #define __inv_dflush_prologue __dflush_prologue
68 #define __inv_dflush_epilogue __dflush_epilogue
69 #define __sflush_prologue {
70 #define __sflush_epilogue }
71 #define __inv_sflush_prologue __sflush_prologue
72 #define __inv_sflush_epilogue __sflush_epilogue
73
74 #else /* CONFIG_MIPS_MT */
75
76 #define __iflush_prologue {
77 #define __iflush_epilogue }
78 #define __dflush_prologue {
79 #define __dflush_epilogue }
80 #define __inv_dflush_prologue {
81 #define __inv_dflush_epilogue }
82 #define __sflush_prologue {
83 #define __sflush_epilogue }
84 #define __inv_sflush_prologue {
85 #define __inv_sflush_epilogue }
86
87 #endif /* CONFIG_MIPS_MT */
88
89 static inline void flush_icache_line_indexed(unsigned long addr)
90 {
91         __iflush_prologue
92         cache_op(Index_Invalidate_I, addr);
93         __iflush_epilogue
94 }
95
96 static inline void flush_dcache_line_indexed(unsigned long addr)
97 {
98         __dflush_prologue
99         cache_op(Index_Writeback_Inv_D, addr);
100         __dflush_epilogue
101 }
102
103 static inline void flush_scache_line_indexed(unsigned long addr)
104 {
105         cache_op(Index_Writeback_Inv_SD, addr);
106 }
107
108 static inline void flush_icache_line(unsigned long addr)
109 {
110         __iflush_prologue
111         switch (boot_cpu_type()) {
112         case CPU_LOONGSON2:
113                 cache_op(Hit_Invalidate_I_Loongson2, addr);
114                 break;
115
116         default:
117                 cache_op(Hit_Invalidate_I, addr);
118                 break;
119         }
120         __iflush_epilogue
121 }
122
123 static inline void flush_dcache_line(unsigned long addr)
124 {
125         __dflush_prologue
126         cache_op(Hit_Writeback_Inv_D, addr);
127         __dflush_epilogue
128 }
129
130 static inline void invalidate_dcache_line(unsigned long addr)
131 {
132         __dflush_prologue
133         cache_op(Hit_Invalidate_D, addr);
134         __dflush_epilogue
135 }
136
137 static inline void invalidate_scache_line(unsigned long addr)
138 {
139         cache_op(Hit_Invalidate_SD, addr);
140 }
141
142 static inline void flush_scache_line(unsigned long addr)
143 {
144         cache_op(Hit_Writeback_Inv_SD, addr);
145 }
146
147 #define protected_cache_op(op,addr)                             \
148         __asm__ __volatile__(                                   \
149         "       .set    push                    \n"             \
150         "       .set    noreorder               \n"             \
151         "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
152         "1:     cache   %0, (%1)                \n"             \
153         "2:     .set    pop                     \n"             \
154         "       .section __ex_table,\"a\"       \n"             \
155         "       "STR(PTR)" 1b, 2b               \n"             \
156         "       .previous"                                      \
157         :                                                       \
158         : "i" (op), "r" (addr))
159
160 #define protected_cachee_op(op,addr)                            \
161         __asm__ __volatile__(                                   \
162         "       .set    push                    \n"             \
163         "       .set    noreorder               \n"             \
164         "       .set    mips0                   \n"             \
165         "       .set    eva                     \n"             \
166         "1:     cachee  %0, (%1)                \n"             \
167         "2:     .set    pop                     \n"             \
168         "       .section __ex_table,\"a\"       \n"             \
169         "       "STR(PTR)" 1b, 2b               \n"             \
170         "       .previous"                                      \
171         :                                                       \
172         : "i" (op), "r" (addr))
173
174 /*
175  * The next two are for badland addresses like signal trampolines.
176  */
177 static inline void protected_flush_icache_line(unsigned long addr)
178 {
179         switch (boot_cpu_type()) {
180         case CPU_LOONGSON2:
181                 protected_cache_op(Hit_Invalidate_I_Loongson2, addr);
182                 break;
183
184         default:
185 #ifdef CONFIG_EVA
186                 protected_cachee_op(Hit_Invalidate_I, addr);
187 #else
188                 protected_cache_op(Hit_Invalidate_I, addr);
189 #endif
190                 break;
191         }
192 }
193
194 /*
195  * R10000 / R12000 hazard - these processors don't support the Hit_Writeback_D
196  * cacheop so we use Hit_Writeback_Inv_D which is supported by all R4000-style
197  * caches.  We're talking about one cacheline unnecessarily getting invalidated
198  * here so the penalty isn't overly hard.
199  */
200 static inline void protected_writeback_dcache_line(unsigned long addr)
201 {
202 #ifdef CONFIG_EVA
203         protected_cachee_op(Hit_Writeback_Inv_D, addr);
204 #else
205         protected_cache_op(Hit_Writeback_Inv_D, addr);
206 #endif
207 }
208
209 static inline void protected_writeback_scache_line(unsigned long addr)
210 {
211         protected_cache_op(Hit_Writeback_Inv_SD, addr);
212 }
213
214 /*
215  * This one is RM7000-specific
216  */
217 static inline void invalidate_tcache_page(unsigned long addr)
218 {
219         cache_op(Page_Invalidate_T, addr);
220 }
221
222 #ifndef CONFIG_CPU_MIPSR6
223 #define cache16_unroll32(base,op)                                       \
224         __asm__ __volatile__(                                           \
225         "       .set push                                       \n"     \
226         "       .set noreorder                                  \n"     \
227         "       .set mips3                                      \n"     \
228         "       cache %1, 0x000(%0); cache %1, 0x010(%0)        \n"     \
229         "       cache %1, 0x020(%0); cache %1, 0x030(%0)        \n"     \
230         "       cache %1, 0x040(%0); cache %1, 0x050(%0)        \n"     \
231         "       cache %1, 0x060(%0); cache %1, 0x070(%0)        \n"     \
232         "       cache %1, 0x080(%0); cache %1, 0x090(%0)        \n"     \
233         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)        \n"     \
234         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)        \n"     \
235         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)        \n"     \
236         "       cache %1, 0x100(%0); cache %1, 0x110(%0)        \n"     \
237         "       cache %1, 0x120(%0); cache %1, 0x130(%0)        \n"     \
238         "       cache %1, 0x140(%0); cache %1, 0x150(%0)        \n"     \
239         "       cache %1, 0x160(%0); cache %1, 0x170(%0)        \n"     \
240         "       cache %1, 0x180(%0); cache %1, 0x190(%0)        \n"     \
241         "       cache %1, 0x1a0(%0); cache %1, 0x1b0(%0)        \n"     \
242         "       cache %1, 0x1c0(%0); cache %1, 0x1d0(%0)        \n"     \
243         "       cache %1, 0x1e0(%0); cache %1, 0x1f0(%0)        \n"     \
244         "       .set pop                                        \n"     \
245                 :                                                       \
246                 : "r" (base),                                           \
247                   "i" (op));
248
249 #define cache32_unroll32(base,op)                                       \
250         __asm__ __volatile__(                                           \
251         "       .set push                                       \n"     \
252         "       .set noreorder                                  \n"     \
253         "       .set mips3                                      \n"     \
254         "       cache %1, 0x000(%0); cache %1, 0x020(%0)        \n"     \
255         "       cache %1, 0x040(%0); cache %1, 0x060(%0)        \n"     \
256         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)        \n"     \
257         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)        \n"     \
258         "       cache %1, 0x100(%0); cache %1, 0x120(%0)        \n"     \
259         "       cache %1, 0x140(%0); cache %1, 0x160(%0)        \n"     \
260         "       cache %1, 0x180(%0); cache %1, 0x1a0(%0)        \n"     \
261         "       cache %1, 0x1c0(%0); cache %1, 0x1e0(%0)        \n"     \
262         "       cache %1, 0x200(%0); cache %1, 0x220(%0)        \n"     \
263         "       cache %1, 0x240(%0); cache %1, 0x260(%0)        \n"     \
264         "       cache %1, 0x280(%0); cache %1, 0x2a0(%0)        \n"     \
265         "       cache %1, 0x2c0(%0); cache %1, 0x2e0(%0)        \n"     \
266         "       cache %1, 0x300(%0); cache %1, 0x320(%0)        \n"     \
267         "       cache %1, 0x340(%0); cache %1, 0x360(%0)        \n"     \
268         "       cache %1, 0x380(%0); cache %1, 0x3a0(%0)        \n"     \
269         "       cache %1, 0x3c0(%0); cache %1, 0x3e0(%0)        \n"     \
270         "       .set pop                                        \n"     \
271                 :                                                       \
272                 : "r" (base),                                           \
273                   "i" (op));
274
275 #define cache64_unroll32(base,op)                                       \
276         __asm__ __volatile__(                                           \
277         "       .set push                                       \n"     \
278         "       .set noreorder                                  \n"     \
279         "       .set mips3                                      \n"     \
280         "       cache %1, 0x000(%0); cache %1, 0x040(%0)        \n"     \
281         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)        \n"     \
282         "       cache %1, 0x100(%0); cache %1, 0x140(%0)        \n"     \
283         "       cache %1, 0x180(%0); cache %1, 0x1c0(%0)        \n"     \
284         "       cache %1, 0x200(%0); cache %1, 0x240(%0)        \n"     \
285         "       cache %1, 0x280(%0); cache %1, 0x2c0(%0)        \n"     \
286         "       cache %1, 0x300(%0); cache %1, 0x340(%0)        \n"     \
287         "       cache %1, 0x380(%0); cache %1, 0x3c0(%0)        \n"     \
288         "       cache %1, 0x400(%0); cache %1, 0x440(%0)        \n"     \
289         "       cache %1, 0x480(%0); cache %1, 0x4c0(%0)        \n"     \
290         "       cache %1, 0x500(%0); cache %1, 0x540(%0)        \n"     \
291         "       cache %1, 0x580(%0); cache %1, 0x5c0(%0)        \n"     \
292         "       cache %1, 0x600(%0); cache %1, 0x640(%0)        \n"     \
293         "       cache %1, 0x680(%0); cache %1, 0x6c0(%0)        \n"     \
294         "       cache %1, 0x700(%0); cache %1, 0x740(%0)        \n"     \
295         "       cache %1, 0x780(%0); cache %1, 0x7c0(%0)        \n"     \
296         "       .set pop                                        \n"     \
297                 :                                                       \
298                 : "r" (base),                                           \
299                   "i" (op));
300
301 #define cache128_unroll32(base,op)                                      \
302         __asm__ __volatile__(                                           \
303         "       .set push                                       \n"     \
304         "       .set noreorder                                  \n"     \
305         "       .set mips3                                      \n"     \
306         "       cache %1, 0x000(%0); cache %1, 0x080(%0)        \n"     \
307         "       cache %1, 0x100(%0); cache %1, 0x180(%0)        \n"     \
308         "       cache %1, 0x200(%0); cache %1, 0x280(%0)        \n"     \
309         "       cache %1, 0x300(%0); cache %1, 0x380(%0)        \n"     \
310         "       cache %1, 0x400(%0); cache %1, 0x480(%0)        \n"     \
311         "       cache %1, 0x500(%0); cache %1, 0x580(%0)        \n"     \
312         "       cache %1, 0x600(%0); cache %1, 0x680(%0)        \n"     \
313         "       cache %1, 0x700(%0); cache %1, 0x780(%0)        \n"     \
314         "       cache %1, 0x800(%0); cache %1, 0x880(%0)        \n"     \
315         "       cache %1, 0x900(%0); cache %1, 0x980(%0)        \n"     \
316         "       cache %1, 0xa00(%0); cache %1, 0xa80(%0)        \n"     \
317         "       cache %1, 0xb00(%0); cache %1, 0xb80(%0)        \n"     \
318         "       cache %1, 0xc00(%0); cache %1, 0xc80(%0)        \n"     \
319         "       cache %1, 0xd00(%0); cache %1, 0xd80(%0)        \n"     \
320         "       cache %1, 0xe00(%0); cache %1, 0xe80(%0)        \n"     \
321         "       cache %1, 0xf00(%0); cache %1, 0xf80(%0)        \n"     \
322         "       .set pop                                        \n"     \
323                 :                                                       \
324                 : "r" (base),                                           \
325                   "i" (op));
326
327 #else
328 /*
329  * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
330  * This means we now need to increment the base register before we flush
331  * more cache lines
332  */
333 #define cache16_unroll32(base,op)                               \
334         __asm__ __volatile__(                                   \
335         "       .set push\n"                                    \
336         "       .set noreorder\n"                               \
337         "       .set mips64r6\n"                                \
338         "       .set noat\n"                                    \
339         "       cache %1, 0x000(%0); cache %1, 0x010(%0)\n"     \
340         "       cache %1, 0x020(%0); cache %1, 0x030(%0)\n"     \
341         "       cache %1, 0x040(%0); cache %1, 0x050(%0)\n"     \
342         "       cache %1, 0x060(%0); cache %1, 0x070(%0)\n"     \
343         "       cache %1, 0x080(%0); cache %1, 0x090(%0)\n"     \
344         "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
345         "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
346         "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
347         "       addiu $1, $0, 0x100                     \n"     \
348         "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
349         "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
350         "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
351         "       cache %1, 0x060($1); cache %1, 0x070($1)\n"     \
352         "       cache %1, 0x080($1); cache %1, 0x090($1)\n"     \
353         "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"     \
354         "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"     \
355         "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"     \
356         "       .set pop\n"                                     \
357                 :                                               \
358                 : "r" (base),                                   \
359                   "i" (op));
360
361 #define cache32_unroll32(base,op)                               \
362         __asm__ __volatile__(                                   \
363         "       .set push\n"                                    \
364         "       .set noreorder\n"                               \
365         "       .set mips64r6\n"                                \
366         "       .set noat\n"                                    \
367         "       cache %1, 0x000(%0); cache %1, 0x020(%0)\n"     \
368         "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
369         "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
370         "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
371         "       addiu $1, %0, 0x100\n"                          \
372         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
373         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
374         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
375         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
376         "       addiu $1, $1, 0x100\n"                          \
377         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
378         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
379         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
380         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
381         "       addiu $1, $1, 0x100\n"                          \
382         "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
383         "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
384         "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
385         "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
386         "       .set pop\n"                                     \
387                 :                                               \
388                 : "r" (base),                                   \
389                   "i" (op));
390
391 #define cache64_unroll32(base,op)                               \
392         __asm__ __volatile__(                                   \
393         "       .set push\n"                                    \
394         "       .set noreorder\n"                               \
395         "       .set mips64r6\n"                                \
396         "       .set noat\n"                                    \
397         "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
398         "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
399         "       addiu $1, %0, 0x100\n"                          \
400         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
401         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
402         "       addiu $1, %0, 0x100\n"                          \
403         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
404         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
405         "       addiu $1, %0, 0x100\n"                          \
406         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
407         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
408         "       addiu $1, %0, 0x100\n"                          \
409         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
410         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
411         "       addiu $1, %0, 0x100\n"                          \
412         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
413         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
414         "       addiu $1, %0, 0x100\n"                          \
415         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
416         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
417         "       addiu $1, %0, 0x100\n"                          \
418         "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
419         "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
420         "       .set pop\n"                                     \
421                 :                                               \
422                 : "r" (base),                                   \
423                   "i" (op));
424
425 #define cache128_unroll32(base,op)                              \
426         __asm__ __volatile__(                                   \
427         "       .set push\n"                                    \
428         "       .set noreorder\n"                               \
429         "       .set mips64r6\n"                                \
430         "       .set noat\n"                                    \
431         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
432         "       addiu $1, %0, 0x100\n"                          \
433         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
434         "       addiu $1, %0, 0x100\n"                          \
435         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
436         "       addiu $1, %0, 0x100\n"                          \
437         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
438         "       addiu $1, %0, 0x100\n"                          \
439         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
440         "       addiu $1, %0, 0x100\n"                          \
441         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
442         "       addiu $1, %0, 0x100\n"                          \
443         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
444         "       addiu $1, %0, 0x100\n"                          \
445         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
446         "       addiu $1, %0, 0x100\n"                          \
447         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
448         "       addiu $1, %0, 0x100\n"                          \
449         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
450         "       addiu $1, %0, 0x100\n"                          \
451         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
452         "       addiu $1, %0, 0x100\n"                          \
453         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
454         "       addiu $1, %0, 0x100\n"                          \
455         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
456         "       addiu $1, %0, 0x100\n"                          \
457         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
458         "       addiu $1, %0, 0x100\n"                          \
459         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
460         "       addiu $1, %0, 0x100\n"                          \
461         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
462         "       addiu $1, %0, 0x100\n"                          \
463         "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
464         "       addiu $1, %0, 0x100\n"                          \
465         "       .set pop\n"                                     \
466                 :                                               \
467                 : "r" (base),                                   \
468                   "i" (op));
469 #endif /* CONFIG_CPU_MIPSR6 */
470
471 /*
472  * Perform the cache operation specified by op using a user mode virtual
473  * address while in kernel mode.
474  */
475 #define cache16_unroll32_user(base,op)                                  \
476         __asm__ __volatile__(                                           \
477         "       .set push                                       \n"     \
478         "       .set noreorder                                  \n"     \
479         "       .set mips0                                      \n"     \
480         "       .set eva                                        \n"     \
481         "       cachee %1, 0x000(%0); cachee %1, 0x010(%0)      \n"     \
482         "       cachee %1, 0x020(%0); cachee %1, 0x030(%0)      \n"     \
483         "       cachee %1, 0x040(%0); cachee %1, 0x050(%0)      \n"     \
484         "       cachee %1, 0x060(%0); cachee %1, 0x070(%0)      \n"     \
485         "       cachee %1, 0x080(%0); cachee %1, 0x090(%0)      \n"     \
486         "       cachee %1, 0x0a0(%0); cachee %1, 0x0b0(%0)      \n"     \
487         "       cachee %1, 0x0c0(%0); cachee %1, 0x0d0(%0)      \n"     \
488         "       cachee %1, 0x0e0(%0); cachee %1, 0x0f0(%0)      \n"     \
489         "       cachee %1, 0x100(%0); cachee %1, 0x110(%0)      \n"     \
490         "       cachee %1, 0x120(%0); cachee %1, 0x130(%0)      \n"     \
491         "       cachee %1, 0x140(%0); cachee %1, 0x150(%0)      \n"     \
492         "       cachee %1, 0x160(%0); cachee %1, 0x170(%0)      \n"     \
493         "       cachee %1, 0x180(%0); cachee %1, 0x190(%0)      \n"     \
494         "       cachee %1, 0x1a0(%0); cachee %1, 0x1b0(%0)      \n"     \
495         "       cachee %1, 0x1c0(%0); cachee %1, 0x1d0(%0)      \n"     \
496         "       cachee %1, 0x1e0(%0); cachee %1, 0x1f0(%0)      \n"     \
497         "       .set pop                                        \n"     \
498                 :                                                       \
499                 : "r" (base),                                           \
500                   "i" (op));
501
502 #define cache32_unroll32_user(base, op)                                 \
503         __asm__ __volatile__(                                           \
504         "       .set push                                       \n"     \
505         "       .set noreorder                                  \n"     \
506         "       .set mips0                                      \n"     \
507         "       .set eva                                        \n"     \
508         "       cachee %1, 0x000(%0); cachee %1, 0x020(%0)      \n"     \
509         "       cachee %1, 0x040(%0); cachee %1, 0x060(%0)      \n"     \
510         "       cachee %1, 0x080(%0); cachee %1, 0x0a0(%0)      \n"     \
511         "       cachee %1, 0x0c0(%0); cachee %1, 0x0e0(%0)      \n"     \
512         "       cachee %1, 0x100(%0); cachee %1, 0x120(%0)      \n"     \
513         "       cachee %1, 0x140(%0); cachee %1, 0x160(%0)      \n"     \
514         "       cachee %1, 0x180(%0); cachee %1, 0x1a0(%0)      \n"     \
515         "       cachee %1, 0x1c0(%0); cachee %1, 0x1e0(%0)      \n"     \
516         "       cachee %1, 0x200(%0); cachee %1, 0x220(%0)      \n"     \
517         "       cachee %1, 0x240(%0); cachee %1, 0x260(%0)      \n"     \
518         "       cachee %1, 0x280(%0); cachee %1, 0x2a0(%0)      \n"     \
519         "       cachee %1, 0x2c0(%0); cachee %1, 0x2e0(%0)      \n"     \
520         "       cachee %1, 0x300(%0); cachee %1, 0x320(%0)      \n"     \
521         "       cachee %1, 0x340(%0); cachee %1, 0x360(%0)      \n"     \
522         "       cachee %1, 0x380(%0); cachee %1, 0x3a0(%0)      \n"     \
523         "       cachee %1, 0x3c0(%0); cachee %1, 0x3e0(%0)      \n"     \
524         "       .set pop                                        \n"     \
525                 :                                                       \
526                 : "r" (base),                                           \
527                   "i" (op));
528
529 #define cache64_unroll32_user(base, op)                                 \
530         __asm__ __volatile__(                                           \
531         "       .set push                                       \n"     \
532         "       .set noreorder                                  \n"     \
533         "       .set mips0                                      \n"     \
534         "       .set eva                                        \n"     \
535         "       cachee %1, 0x000(%0); cachee %1, 0x040(%0)      \n"     \
536         "       cachee %1, 0x080(%0); cachee %1, 0x0c0(%0)      \n"     \
537         "       cachee %1, 0x100(%0); cachee %1, 0x140(%0)      \n"     \
538         "       cachee %1, 0x180(%0); cachee %1, 0x1c0(%0)      \n"     \
539         "       cachee %1, 0x200(%0); cachee %1, 0x240(%0)      \n"     \
540         "       cachee %1, 0x280(%0); cachee %1, 0x2c0(%0)      \n"     \
541         "       cachee %1, 0x300(%0); cachee %1, 0x340(%0)      \n"     \
542         "       cachee %1, 0x380(%0); cachee %1, 0x3c0(%0)      \n"     \
543         "       cachee %1, 0x400(%0); cachee %1, 0x440(%0)      \n"     \
544         "       cachee %1, 0x480(%0); cachee %1, 0x4c0(%0)      \n"     \
545         "       cachee %1, 0x500(%0); cachee %1, 0x540(%0)      \n"     \
546         "       cachee %1, 0x580(%0); cachee %1, 0x5c0(%0)      \n"     \
547         "       cachee %1, 0x600(%0); cachee %1, 0x640(%0)      \n"     \
548         "       cachee %1, 0x680(%0); cachee %1, 0x6c0(%0)      \n"     \
549         "       cachee %1, 0x700(%0); cachee %1, 0x740(%0)      \n"     \
550         "       cachee %1, 0x780(%0); cachee %1, 0x7c0(%0)      \n"     \
551         "       .set pop                                        \n"     \
552                 :                                                       \
553                 : "r" (base),                                           \
554                   "i" (op));
555
556 /* build blast_xxx, blast_xxx_page, blast_xxx_page_indexed */
557 #define __BUILD_BLAST_CACHE(pfx, desc, indexop, hitop, lsize, extra)    \
558 static inline void extra##blast_##pfx##cache##lsize(void)               \
559 {                                                                       \
560         unsigned long start = INDEX_BASE;                               \
561         unsigned long end = start + current_cpu_data.desc.waysize;      \
562         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
563         unsigned long ws_end = current_cpu_data.desc.ways <<            \
564                                current_cpu_data.desc.waybit;            \
565         unsigned long ws, addr;                                         \
566                                                                         \
567         __##pfx##flush_prologue                                         \
568                                                                         \
569         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
570                 for (addr = start; addr < end; addr += lsize * 32)      \
571                         cache##lsize##_unroll32(addr|ws, indexop);      \
572                                                                         \
573         __##pfx##flush_epilogue                                         \
574 }                                                                       \
575                                                                         \
576 static inline void extra##blast_##pfx##cache##lsize##_page(unsigned long page) \
577 {                                                                       \
578         unsigned long start = page;                                     \
579         unsigned long end = page + PAGE_SIZE;                           \
580                                                                         \
581         __##pfx##flush_prologue                                         \
582                                                                         \
583         do {                                                            \
584                 cache##lsize##_unroll32(start, hitop);                  \
585                 start += lsize * 32;                                    \
586         } while (start < end);                                          \
587                                                                         \
588         __##pfx##flush_epilogue                                         \
589 }                                                                       \
590                                                                         \
591 static inline void extra##blast_##pfx##cache##lsize##_page_indexed(unsigned long page) \
592 {                                                                       \
593         unsigned long indexmask = current_cpu_data.desc.waysize - 1;    \
594         unsigned long start = INDEX_BASE + (page & indexmask);          \
595         unsigned long end = start + PAGE_SIZE;                          \
596         unsigned long ws_inc = 1UL << current_cpu_data.desc.waybit;     \
597         unsigned long ws_end = current_cpu_data.desc.ways <<            \
598                                current_cpu_data.desc.waybit;            \
599         unsigned long ws, addr;                                         \
600                                                                         \
601         __##pfx##flush_prologue                                         \
602                                                                         \
603         for (ws = 0; ws < ws_end; ws += ws_inc)                         \
604                 for (addr = start; addr < end; addr += lsize * 32)      \
605                         cache##lsize##_unroll32(addr|ws, indexop);      \
606                                                                         \
607         __##pfx##flush_epilogue                                         \
608 }
609
610 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 16, )
611 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16, )
612 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 16, )
613 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 32, )
614 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32, )
615 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I_Loongson2, 32, loongson2_)
616 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 32, )
617 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 64, )
618 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64, )
619 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 64, )
620 __BUILD_BLAST_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D, 128, )
621 __BUILD_BLAST_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 128, )
622 __BUILD_BLAST_CACHE(s, scache, Index_Writeback_Inv_SD, Hit_Writeback_Inv_SD, 128, )
623
624 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 16, )
625 __BUILD_BLAST_CACHE(inv_d, dcache, Index_Writeback_Inv_D, Hit_Invalidate_D, 32, )
626 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 16, )
627 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 32, )
628 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 64, )
629 __BUILD_BLAST_CACHE(inv_s, scache, Index_Writeback_Inv_SD, Hit_Invalidate_SD, 128, )
630
631 #define __BUILD_BLAST_USER_CACHE(pfx, desc, indexop, hitop, lsize) \
632 static inline void blast_##pfx##cache##lsize##_user_page(unsigned long page) \
633 {                                                                       \
634         unsigned long start = page;                                     \
635         unsigned long end = page + PAGE_SIZE;                           \
636                                                                         \
637         __##pfx##flush_prologue                                         \
638                                                                         \
639         do {                                                            \
640                 cache##lsize##_unroll32_user(start, hitop);             \
641                 start += lsize * 32;                                    \
642         } while (start < end);                                          \
643                                                                         \
644         __##pfx##flush_epilogue                                         \
645 }
646
647 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
648                          16)
649 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 16)
650 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
651                          32)
652 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 32)
653 __BUILD_BLAST_USER_CACHE(d, dcache, Index_Writeback_Inv_D, Hit_Writeback_Inv_D,
654                          64)
655 __BUILD_BLAST_USER_CACHE(i, icache, Index_Invalidate_I, Hit_Invalidate_I, 64)
656
657 /* build blast_xxx_range, protected_blast_xxx_range */
658 #define __BUILD_BLAST_CACHE_RANGE(pfx, desc, hitop, prot, extra)        \
659 static inline void prot##extra##blast_##pfx##cache##_range(unsigned long start, \
660                                                     unsigned long end)  \
661 {                                                                       \
662         unsigned long lsize = cpu_##desc##_line_size();                 \
663         unsigned long addr = start & ~(lsize - 1);                      \
664         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
665                                                                         \
666         __##pfx##flush_prologue                                         \
667                                                                         \
668         while (1) {                                                     \
669                 prot##cache_op(hitop, addr);                            \
670                 if (addr == aend)                                       \
671                         break;                                          \
672                 addr += lsize;                                          \
673         }                                                               \
674                                                                         \
675         __##pfx##flush_epilogue                                         \
676 }
677
678 #ifndef CONFIG_EVA
679
680 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, protected_, )
681 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, protected_, )
682
683 #else
684
685 #define __BUILD_PROT_BLAST_CACHE_RANGE(pfx, desc, hitop)                \
686 static inline void protected_blast_##pfx##cache##_range(unsigned long start,\
687                                                         unsigned long end) \
688 {                                                                       \
689         unsigned long lsize = cpu_##desc##_line_size();                 \
690         unsigned long addr = start & ~(lsize - 1);                      \
691         unsigned long aend = (end - 1) & ~(lsize - 1);                  \
692                                                                         \
693         __##pfx##flush_prologue                                         \
694                                                                         \
695         if (segment_eq(get_fs(), USER_DS)) {                            \
696                 while (1) {                                             \
697                         protected_cachee_op(hitop, addr);               \
698                         if (addr == aend)                               \
699                                 break;                                  \
700                         addr += lsize;                                  \
701                 }                                                       \
702         } else {                                                        \
703                 while (1) {                                             \
704                         protected_cache_op(hitop, addr);                \
705                         if (addr == aend)                               \
706                                 break;                                  \
707                         addr += lsize;                                  \
708                 }                                                       \
709                                                                         \
710         }                                                               \
711         __##pfx##flush_epilogue                                         \
712 }
713
714 __BUILD_PROT_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D)
715 __BUILD_PROT_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I)
716
717 #endif
718 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, protected_, )
719 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I_Loongson2, \
720         protected_, loongson2_)
721 __BUILD_BLAST_CACHE_RANGE(d, dcache, Hit_Writeback_Inv_D, , )
722 __BUILD_BLAST_CACHE_RANGE(i, icache, Hit_Invalidate_I, , )
723 __BUILD_BLAST_CACHE_RANGE(s, scache, Hit_Writeback_Inv_SD, , )
724 /* blast_inv_dcache_range */
725 __BUILD_BLAST_CACHE_RANGE(inv_d, dcache, Hit_Invalidate_D, , )
726 __BUILD_BLAST_CACHE_RANGE(inv_s, scache, Hit_Invalidate_SD, , )
727
728 #endif /* _ASM_R4KCACHE_H */