OSDN Git Service

metag/usercopy: Drop unused macros
[android-x86/kernel.git] / arch / metag / lib / usercopy.c
1 /*
2  * User address space access functions.
3  * The non-inlined parts of asm-metag/uaccess.h are here.
4  *
5  * Copyright (C) 2006, Imagination Technologies.
6  * Copyright (C) 2000, Axis Communications AB.
7  *
8  * Written by Hans-Peter Nilsson.
9  * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10  * Modified for Meta by Will Newton.
11  */
12
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
16
17 #define USE_RAPF
18 #define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
19
20
21 /* The "double write" in this code is because the Meta will not fault
22  * immediately unless the memory pipe is forced to by e.g. a data stall or
23  * another memory op. The second write should be discarded by the write
24  * combiner so should have virtually no cost.
25  */
26
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28         asm volatile (                                           \
29                 COPY                                             \
30                 "1:\n"                                           \
31                 "       .section .fixup,\"ax\"\n"                \
32                 "       MOV D1Ar1,#0\n"                          \
33                 FIXUP                                            \
34                 "       MOVT    D1Ar1,#HI(1b)\n"                 \
35                 "       JUMP    D1Ar1,#LO(1b)\n"                 \
36                 "       .previous\n"                             \
37                 "       .section __ex_table,\"a\"\n"             \
38                 TENTRY                                           \
39                 "       .previous\n"                             \
40                 : "=r" (to), "=r" (from), "=r" (ret)             \
41                 : "0" (to), "1" (from), "2" (ret)                \
42                 : "D1Ar1", "memory")
43
44
45 #define __asm_copy_to_user_1(to, from, ret)     \
46         __asm_copy_user_cont(to, from, ret,     \
47                 "       GETB D1Ar1,[%1++]\n"    \
48                 "       SETB [%0],D1Ar1\n"      \
49                 "2:     SETB [%0++],D1Ar1\n",   \
50                 "3:     ADD  %2,%2,#1\n",       \
51                 "       .long 2b,3b\n")
52
53 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54         __asm_copy_user_cont(to, from, ret,             \
55                 "       GETW D1Ar1,[%1++]\n"            \
56                 "       SETW [%0],D1Ar1\n"              \
57                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
58                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
59                 "       .long 2b,3b\n" TENTRY)
60
61 #define __asm_copy_to_user_2(to, from, ret) \
62         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63
64 #define __asm_copy_to_user_3(to, from, ret) \
65         __asm_copy_to_user_2x_cont(to, from, ret,       \
66                 "       GETB D1Ar1,[%1++]\n"            \
67                 "       SETB [%0],D1Ar1\n"              \
68                 "4:     SETB [%0++],D1Ar1\n",           \
69                 "5:     ADD  %2,%2,#1\n",               \
70                 "       .long 4b,5b\n")
71
72 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73         __asm_copy_user_cont(to, from, ret,             \
74                 "       GETD D1Ar1,[%1++]\n"            \
75                 "       SETD [%0],D1Ar1\n"              \
76                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
77                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
78                 "       .long 2b,3b\n" TENTRY)
79
80 #define __asm_copy_to_user_4(to, from, ret) \
81         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82
83 #define __asm_copy_to_user_5(to, from, ret) \
84         __asm_copy_to_user_4x_cont(to, from, ret,       \
85                 "       GETB D1Ar1,[%1++]\n"            \
86                 "       SETB [%0],D1Ar1\n"              \
87                 "4:     SETB [%0++],D1Ar1\n",           \
88                 "5:     ADD  %2,%2,#1\n",               \
89                 "       .long 4b,5b\n")
90
91 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92         __asm_copy_to_user_4x_cont(to, from, ret,       \
93                 "       GETW D1Ar1,[%1++]\n"            \
94                 "       SETW [%0],D1Ar1\n"              \
95                 "4:     SETW [%0++],D1Ar1\n" COPY,      \
96                 "5:     ADD  %2,%2,#2\n" FIXUP,         \
97                 "       .long 4b,5b\n" TENTRY)
98
99 #define __asm_copy_to_user_6(to, from, ret) \
100         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101
102 #define __asm_copy_to_user_7(to, from, ret) \
103         __asm_copy_to_user_6x_cont(to, from, ret,       \
104                 "       GETB D1Ar1,[%1++]\n"            \
105                 "       SETB [%0],D1Ar1\n"              \
106                 "6:     SETB [%0++],D1Ar1\n",           \
107                 "7:     ADD  %2,%2,#1\n",               \
108                 "       .long 6b,7b\n")
109
110 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111         __asm_copy_to_user_4x_cont(to, from, ret,       \
112                 "       GETD D1Ar1,[%1++]\n"            \
113                 "       SETD [%0],D1Ar1\n"              \
114                 "4:     SETD [%0++],D1Ar1\n" COPY,      \
115                 "5:     ADD  %2,%2,#4\n"  FIXUP,        \
116                 "       .long 4b,5b\n" TENTRY)
117
118 #define __asm_copy_to_user_8(to, from, ret) \
119         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120
121 #define __asm_copy_to_user_9(to, from, ret) \
122         __asm_copy_to_user_8x_cont(to, from, ret,       \
123                 "       GETB D1Ar1,[%1++]\n"            \
124                 "       SETB [%0],D1Ar1\n"              \
125                 "6:     SETB [%0++],D1Ar1\n",           \
126                 "7:     ADD  %2,%2,#1\n",               \
127                 "       .long 6b,7b\n")
128
129 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130         __asm_copy_to_user_8x_cont(to, from, ret,       \
131                 "       GETW D1Ar1,[%1++]\n"            \
132                 "       SETW [%0],D1Ar1\n"              \
133                 "6:     SETW [%0++],D1Ar1\n" COPY,      \
134                 "7:     ADD  %2,%2,#2\n" FIXUP,         \
135                 "       .long 6b,7b\n" TENTRY)
136
137 #define __asm_copy_to_user_10(to, from, ret) \
138         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139
140 #define __asm_copy_to_user_11(to, from, ret) \
141         __asm_copy_to_user_10x_cont(to, from, ret,      \
142                 "       GETB D1Ar1,[%1++]\n"            \
143                 "       SETB [%0],D1Ar1\n"              \
144                 "8:     SETB [%0++],D1Ar1\n",           \
145                 "9:     ADD  %2,%2,#1\n",               \
146                 "       .long 8b,9b\n")
147
148 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149         __asm_copy_to_user_8x_cont(to, from, ret,       \
150                 "       GETD D1Ar1,[%1++]\n"            \
151                 "       SETD [%0],D1Ar1\n"              \
152                 "6:     SETD [%0++],D1Ar1\n" COPY,      \
153                 "7:     ADD  %2,%2,#4\n" FIXUP,         \
154                 "       .long 6b,7b\n" TENTRY)
155 #define __asm_copy_to_user_12(to, from, ret) \
156         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157
158 #define __asm_copy_to_user_13(to, from, ret) \
159         __asm_copy_to_user_12x_cont(to, from, ret,      \
160                 "       GETB D1Ar1,[%1++]\n"            \
161                 "       SETB [%0],D1Ar1\n"              \
162                 "8:     SETB [%0++],D1Ar1\n",           \
163                 "9:     ADD  %2,%2,#1\n",               \
164                 "       .long 8b,9b\n")
165
166 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167         __asm_copy_to_user_12x_cont(to, from, ret,      \
168                 "       GETW D1Ar1,[%1++]\n"            \
169                 "       SETW [%0],D1Ar1\n"              \
170                 "8:     SETW [%0++],D1Ar1\n" COPY,      \
171                 "9:     ADD  %2,%2,#2\n" FIXUP,         \
172                 "       .long 8b,9b\n" TENTRY)
173
174 #define __asm_copy_to_user_14(to, from, ret) \
175         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176
177 #define __asm_copy_to_user_15(to, from, ret) \
178         __asm_copy_to_user_14x_cont(to, from, ret,      \
179                 "       GETB D1Ar1,[%1++]\n"            \
180                 "       SETB [%0],D1Ar1\n"              \
181                 "10:    SETB [%0++],D1Ar1\n",           \
182                 "11:    ADD  %2,%2,#1\n",               \
183                 "       .long 10b,11b\n")
184
185 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186         __asm_copy_to_user_12x_cont(to, from, ret,      \
187                 "       GETD D1Ar1,[%1++]\n"            \
188                 "       SETD [%0],D1Ar1\n"              \
189                 "8:     SETD [%0++],D1Ar1\n" COPY,      \
190                 "9:     ADD  %2,%2,#4\n" FIXUP,         \
191                 "       .long 8b,9b\n" TENTRY)
192
193 #define __asm_copy_to_user_16(to, from, ret) \
194                 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195
196 #define __asm_copy_to_user_8x64(to, from, ret) \
197         asm volatile (                                  \
198                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
199                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
200                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
201                 "1:\n"                                  \
202                 "       .section .fixup,\"ax\"\n"       \
203                 "3:     ADD  %2,%2,#8\n"                \
204                 "       MOVT    D0Ar2,#HI(1b)\n"        \
205                 "       JUMP    D0Ar2,#LO(1b)\n"        \
206                 "       .previous\n"                    \
207                 "       .section __ex_table,\"a\"\n"    \
208                 "       .long 2b,3b\n"                  \
209                 "       .previous\n"                    \
210                 : "=r" (to), "=r" (from), "=r" (ret)    \
211                 : "0" (to), "1" (from), "2" (ret)       \
212                 : "D1Ar1", "D0Ar2", "memory")
213
214 /*
215  *      optimized copying loop using RAPF when 64 bit aligned
216  *
217  *      n               will be automatically decremented inside the loop
218  *      ret             will be left intact. if error occurs we will rewind
219  *                      so that the original non optimized code will fill up
220  *                      this value correctly.
221  *
222  *      on fault:
223  *              >       n will hold total number of uncopied bytes
224  *
225  *              >       {'to','from'} will be rewind back so that
226  *                      the non-optimized code will do the proper fix up
227  *
228  *      DCACHE drops the cacheline which helps in reducing cache
229  *      pollution.
230  *
231  *      We introduce an extra SETL at the end of the loop to
232  *      ensure we don't fall off the loop before we catch all
233  *      erros.
234  *
235  *      NOTICE:
236  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
237  *              since we're using M{S,G}ETL, a fault might happen at
238  *              any address in the middle of M{S,G}ETL causing
239  *              the value of LSM_STEP to be incorrect which can
240  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
241  *              ie: if LSM_STEP was 1 when a fault occurs, the
242  *              next call to M{S,G}ET{L,D} will skip the first
243  *              copy/getting as it think that the first 1 has already
244  *              been done.
245  *
246  */
247 #define __asm_copy_user_64bit_rapf_loop(                                \
248                 to, from, ret, n, id, FIXUP)                            \
249         asm volatile (                                                  \
250                 ".balign 8\n"                                           \
251                 "MOV    RAPF, %1\n"                                     \
252                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
253                 "MOV    D0Ar6, #0\n"                                    \
254                 "LSR    D1Ar5, %3, #6\n"                                \
255                 "SUB    TXRPT, D1Ar5, #2\n"                             \
256                 "MOV    RAPF, %1\n"                                     \
257                 "$Lloop"id":\n"                                         \
258                 "ADD    RAPF, %1, #64\n"                                \
259                 "21:\n"                                                 \
260                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
261                 "22:\n"                                                 \
262                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
263                 "SUB    %3, %3, #32\n"                                  \
264                 "23:\n"                                                 \
265                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
266                 "24:\n"                                                 \
267                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
268                 "SUB    %3, %3, #32\n"                                  \
269                 "DCACHE [%1+#-64], D0Ar6\n"                             \
270                 "BR     $Lloop"id"\n"                                   \
271                                                                         \
272                 "MOV    RAPF, %1\n"                                     \
273                 "25:\n"                                                 \
274                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
275                 "26:\n"                                                 \
276                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
277                 "SUB    %3, %3, #32\n"                                  \
278                 "27:\n"                                                 \
279                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
280                 "28:\n"                                                 \
281                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
282                 "SUB    %0, %0, #8\n"                                   \
283                 "29:\n"                                                 \
284                 "SETL   [%0++], D0.7, D1.7\n"                           \
285                 "SUB    %3, %3, #32\n"                                  \
286                 "1:"                                                    \
287                 "DCACHE [%1+#-64], D0Ar6\n"                             \
288                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
289                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
290                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
291                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
292                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
293                 "SUB A0StP, A0StP, #40\n"                               \
294                 "       .section .fixup,\"ax\"\n"                       \
295                 "4:\n"                                                  \
296                 "       ADD     %0, %0, #8\n"                           \
297                 "3:\n"                                                  \
298                 "       MOV     D0Ar2, TXSTATUS\n"                      \
299                 "       MOV     D1Ar1, TXSTATUS\n"                      \
300                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
301                 "       MOV     TXSTATUS, D1Ar1\n"                      \
302                         FIXUP                                           \
303                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
304                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
305                 "       .previous\n"                                    \
306                 "       .section __ex_table,\"a\"\n"                    \
307                 "       .long 21b,3b\n"                                 \
308                 "       .long 22b,3b\n"                                 \
309                 "       .long 23b,3b\n"                                 \
310                 "       .long 24b,3b\n"                                 \
311                 "       .long 25b,3b\n"                                 \
312                 "       .long 26b,3b\n"                                 \
313                 "       .long 27b,3b\n"                                 \
314                 "       .long 28b,3b\n"                                 \
315                 "       .long 29b,4b\n"                                 \
316                 "       .previous\n"                                    \
317                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
318                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
319                 : "D1Ar1", "D0Ar2", "memory")
320
321 /*      rewind 'to' and 'from'  pointers when a fault occurs
322  *
323  *      Rationale:
324  *              A fault always occurs on writing to user buffer. A fault
325  *              is at a single address, so we need to rewind by only 4
326  *              bytes.
327  *              Since we do a complete read from kernel buffer before
328  *              writing, we need to rewind it also. The amount to be
329  *              rewind equals the number of faulty writes in MSETD
330  *              which is: [4 - (LSM_STEP-1)]*8
331  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
332  *              and stored in D0Ar2
333  *
334  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
335  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
336  *                      a fault happens at the 4th write, LSM_STEP will be 0
337  *                      instead of 4. The code copes with that.
338  *
339  *              n is updated by the number of successful writes, which is:
340  *              n = n - (LSM_STEP-1)*8
341  */
342 #define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
343         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
344                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
345                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
346                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
347                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
348                 "MOV    D1Ar1, #4\n"                                    \
349                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
350                 "LSL    D0Ar2, D0Ar2, #3\n"                             \
351                 "LSL    D1Ar1, D1Ar1, #3\n"                             \
352                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
353                 "SUB    %0, %0, #8\n"                                   \
354                 "SUB    %1,     %1,D0Ar2\n"                             \
355                 "SUB    %3, %3, D1Ar1\n")
356
357 /*
358  *      optimized copying loop using RAPF when 32 bit aligned
359  *
360  *      n               will be automatically decremented inside the loop
361  *      ret             will be left intact. if error occurs we will rewind
362  *                      so that the original non optimized code will fill up
363  *                      this value correctly.
364  *
365  *      on fault:
366  *              >       n will hold total number of uncopied bytes
367  *
368  *              >       {'to','from'} will be rewind back so that
369  *                      the non-optimized code will do the proper fix up
370  *
371  *      DCACHE drops the cacheline which helps in reducing cache
372  *      pollution.
373  *
374  *      We introduce an extra SETD at the end of the loop to
375  *      ensure we don't fall off the loop before we catch all
376  *      erros.
377  *
378  *      NOTICE:
379  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
380  *              since we're using M{S,G}ETL, a fault might happen at
381  *              any address in the middle of M{S,G}ETL causing
382  *              the value of LSM_STEP to be incorrect which can
383  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
384  *              ie: if LSM_STEP was 1 when a fault occurs, the
385  *              next call to M{S,G}ET{L,D} will skip the first
386  *              copy/getting as it think that the first 1 has already
387  *              been done.
388  *
389  */
390 #define __asm_copy_user_32bit_rapf_loop(                                \
391                         to,     from, ret, n, id, FIXUP)                \
392         asm volatile (                                                  \
393                 ".balign 8\n"                                           \
394                 "MOV    RAPF, %1\n"                                     \
395                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
396                 "MOV    D0Ar6, #0\n"                                    \
397                 "LSR    D1Ar5, %3, #6\n"                                \
398                 "SUB    TXRPT, D1Ar5, #2\n"                             \
399                 "MOV    RAPF, %1\n"                                     \
400         "$Lloop"id":\n"                                                 \
401                 "ADD    RAPF, %1, #64\n"                                \
402                 "21:\n"                                                 \
403                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
404                 "22:\n"                                                 \
405                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
406                 "SUB    %3, %3, #16\n"                                  \
407                 "23:\n"                                                 \
408                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
409                 "24:\n"                                                 \
410                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
411                 "SUB    %3, %3, #16\n"                                  \
412                 "25:\n"                                                 \
413                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
414                 "26:\n"                                                 \
415                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
416                 "SUB    %3, %3, #16\n"                                  \
417                 "27:\n"                                                 \
418                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
419                 "28:\n"                                                 \
420                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
421                 "SUB    %3, %3, #16\n"                                  \
422                 "DCACHE [%1+#-64], D0Ar6\n"                             \
423                 "BR     $Lloop"id"\n"                                   \
424                                                                         \
425                 "MOV    RAPF, %1\n"                                     \
426                 "29:\n"                                                 \
427                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
428                 "30:\n"                                                 \
429                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
430                 "SUB    %3, %3, #16\n"                                  \
431                 "31:\n"                                                 \
432                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
433                 "32:\n"                                                 \
434                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
435                 "SUB    %3, %3, #16\n"                                  \
436                 "33:\n"                                                 \
437                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
438                 "34:\n"                                                 \
439                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
440                 "SUB    %3, %3, #16\n"                                  \
441                 "35:\n"                                                 \
442                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
443                 "36:\n"                                                 \
444                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
445                 "SUB    %0, %0, #4\n"                                   \
446                 "37:\n"                                                 \
447                 "SETD   [%0++], D0.7\n"                                 \
448                 "SUB    %3, %3, #16\n"                                  \
449                 "1:"                                                    \
450                 "DCACHE [%1+#-64], D0Ar6\n"                             \
451                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
452                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
453                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
454                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
455                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
456                 "SUB A0StP, A0StP, #40\n"                               \
457                 "       .section .fixup,\"ax\"\n"                       \
458                 "4:\n"                                                  \
459                 "       ADD             %0, %0, #4\n"                   \
460                 "3:\n"                                                  \
461                 "       MOV     D0Ar2, TXSTATUS\n"                      \
462                 "       MOV     D1Ar1, TXSTATUS\n"                      \
463                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
464                 "       MOV     TXSTATUS, D1Ar1\n"                      \
465                         FIXUP                                           \
466                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
467                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
468                 "       .previous\n"                                    \
469                 "       .section __ex_table,\"a\"\n"                    \
470                 "       .long 21b,3b\n"                                 \
471                 "       .long 22b,3b\n"                                 \
472                 "       .long 23b,3b\n"                                 \
473                 "       .long 24b,3b\n"                                 \
474                 "       .long 25b,3b\n"                                 \
475                 "       .long 26b,3b\n"                                 \
476                 "       .long 27b,3b\n"                                 \
477                 "       .long 28b,3b\n"                                 \
478                 "       .long 29b,3b\n"                                 \
479                 "       .long 30b,3b\n"                                 \
480                 "       .long 31b,3b\n"                                 \
481                 "       .long 32b,3b\n"                                 \
482                 "       .long 33b,3b\n"                                 \
483                 "       .long 34b,3b\n"                                 \
484                 "       .long 35b,3b\n"                                 \
485                 "       .long 36b,3b\n"                                 \
486                 "       .long 37b,4b\n"                                 \
487                 "       .previous\n"                                    \
488                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
489                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
490                 : "D1Ar1", "D0Ar2", "memory")
491
492 /*      rewind 'to' and 'from'  pointers when a fault occurs
493  *
494  *      Rationale:
495  *              A fault always occurs on writing to user buffer. A fault
496  *              is at a single address, so we need to rewind by only 4
497  *              bytes.
498  *              Since we do a complete read from kernel buffer before
499  *              writing, we need to rewind it also. The amount to be
500  *              rewind equals the number of faulty writes in MSETD
501  *              which is: [4 - (LSM_STEP-1)]*4
502  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
503  *              and stored in D0Ar2
504  *
505  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
506  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
507  *                      a fault happens at the 4th write, LSM_STEP will be 0
508  *                      instead of 4. The code copes with that.
509  *
510  *              n is updated by the number of successful writes, which is:
511  *              n = n - (LSM_STEP-1)*4
512  */
513 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
515                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
516                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
517                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
518                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
519                 "MOV    D1Ar1, #4\n"                                    \
520                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
521                 "LSL    D0Ar2, D0Ar2, #2\n"                             \
522                 "LSL    D1Ar1, D1Ar1, #2\n"                             \
523                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
524                 "SUB    %0, %0, #4\n"                                   \
525                 "SUB    %1,     %1,     D0Ar2\n"                        \
526                 "SUB    %3, %3, D1Ar1\n")
527
528 unsigned long __copy_user(void __user *pdst, const void *psrc,
529                           unsigned long n)
530 {
531         register char __user *dst asm ("A0.2") = pdst;
532         register const char *src asm ("A1.2") = psrc;
533         unsigned long retn = 0;
534
535         if (n == 0)
536                 return 0;
537
538         if ((unsigned long) src & 1) {
539                 __asm_copy_to_user_1(dst, src, retn);
540                 n--;
541         }
542         if ((unsigned long) dst & 1) {
543                 /* Worst case - byte copy */
544                 while (n > 0) {
545                         __asm_copy_to_user_1(dst, src, retn);
546                         n--;
547                 }
548         }
549         if (((unsigned long) src & 2) && n >= 2) {
550                 __asm_copy_to_user_2(dst, src, retn);
551                 n -= 2;
552         }
553         if ((unsigned long) dst & 2) {
554                 /* Second worst case - word copy */
555                 while (n >= 2) {
556                         __asm_copy_to_user_2(dst, src, retn);
557                         n -= 2;
558                 }
559         }
560
561 #ifdef USE_RAPF
562         /* 64 bit copy loop */
563         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
564                 if (n >= RAPF_MIN_BUF_SIZE) {
565                         /* copy user using 64 bit rapf copy */
566                         __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
567                                                         n, "64cu");
568                 }
569                 while (n >= 8) {
570                         __asm_copy_to_user_8x64(dst, src, retn);
571                         n -= 8;
572                 }
573         }
574         if (n >= RAPF_MIN_BUF_SIZE) {
575                 /* copy user using 32 bit rapf copy */
576                 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
577         }
578 #else
579         /* 64 bit copy loop */
580         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
581                 while (n >= 8) {
582                         __asm_copy_to_user_8x64(dst, src, retn);
583                         n -= 8;
584                 }
585         }
586 #endif
587
588         while (n >= 16) {
589                 __asm_copy_to_user_16(dst, src, retn);
590                 n -= 16;
591         }
592
593         while (n >= 4) {
594                 __asm_copy_to_user_4(dst, src, retn);
595                 n -= 4;
596         }
597
598         switch (n) {
599         case 0:
600                 break;
601         case 1:
602                 __asm_copy_to_user_1(dst, src, retn);
603                 break;
604         case 2:
605                 __asm_copy_to_user_2(dst, src, retn);
606                 break;
607         case 3:
608                 __asm_copy_to_user_3(dst, src, retn);
609                 break;
610         }
611
612         return retn;
613 }
614 EXPORT_SYMBOL(__copy_user);
615
616 #define __asm_copy_from_user_1(to, from, ret) \
617         __asm_copy_user_cont(to, from, ret,     \
618                 "       GETB D1Ar1,[%1++]\n"    \
619                 "2:     SETB [%0++],D1Ar1\n",   \
620                 "3:     ADD  %2,%2,#1\n"        \
621                 "       SETB [%0++],D1Ar1\n",   \
622                 "       .long 2b,3b\n")
623
624 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625         __asm_copy_user_cont(to, from, ret,             \
626                 "       GETW D1Ar1,[%1++]\n"            \
627                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
628                 "3:     ADD  %2,%2,#2\n"                \
629                 "       SETW [%0++],D1Ar1\n" FIXUP,     \
630                 "       .long 2b,3b\n" TENTRY)
631
632 #define __asm_copy_from_user_2(to, from, ret) \
633         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
634
635 #define __asm_copy_from_user_3(to, from, ret)           \
636         __asm_copy_from_user_2x_cont(to, from, ret,     \
637                 "       GETB D1Ar1,[%1++]\n"            \
638                 "4:     SETB [%0++],D1Ar1\n",           \
639                 "5:     ADD  %2,%2,#1\n"                \
640                 "       SETB [%0++],D1Ar1\n",           \
641                 "       .long 4b,5b\n")
642
643 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644         __asm_copy_user_cont(to, from, ret,             \
645                 "       GETD D1Ar1,[%1++]\n"            \
646                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
647                 "3:     ADD  %2,%2,#4\n"                \
648                 "       SETD [%0++],D1Ar1\n" FIXUP,     \
649                 "       .long 2b,3b\n" TENTRY)
650
651 #define __asm_copy_from_user_4(to, from, ret) \
652         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653
654
655 #define __asm_copy_from_user_8x64(to, from, ret) \
656         asm volatile (                          \
657                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
658                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
659                 "1:\n"                                  \
660                 "       .section .fixup,\"ax\"\n"       \
661                 "       MOV D1Ar1,#0\n"                 \
662                 "       MOV D0Ar2,#0\n"                 \
663                 "3:     ADD  %2,%2,#8\n"                \
664                 "       SETL [%0++],D0Ar2,D1Ar1\n"      \
665                 "       MOVT    D0Ar2,#HI(1b)\n"        \
666                 "       JUMP    D0Ar2,#LO(1b)\n"        \
667                 "       .previous\n"                    \
668                 "       .section __ex_table,\"a\"\n"    \
669                 "       .long 2b,3b\n"                  \
670                 "       .previous\n"                    \
671                 : "=a" (to), "=r" (from), "=r" (ret)    \
672                 : "0" (to), "1" (from), "2" (ret)       \
673                 : "D1Ar1", "D0Ar2", "memory")
674
675 /*      rewind 'from' pointer when a fault occurs
676  *
677  *      Rationale:
678  *              A fault occurs while reading from user buffer, which is the
679  *              source. Since the fault is at a single address, we only
680  *              need to rewind by 8 bytes.
681  *              Since we don't write to kernel buffer until we read first,
682  *              the kernel buffer is at the right state and needn't be
683  *              corrected.
684  */
685 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
686         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
687                 "SUB    %1, %1, #8\n")
688
689 /*      rewind 'from' pointer when a fault occurs
690  *
691  *      Rationale:
692  *              A fault occurs while reading from user buffer, which is the
693  *              source. Since the fault is at a single address, we only
694  *              need to rewind by 4 bytes.
695  *              Since we don't write to kernel buffer until we read first,
696  *              the kernel buffer is at the right state and needn't be
697  *              corrected.
698  */
699 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
700         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
701                 "SUB    %1, %1, #4\n")
702
703
704 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
705    userland.  The return-value is the number of bytes that were
706    inaccessible.  */
707 unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
708                                   unsigned long n)
709 {
710         register char *dst asm ("A0.2") = pdst;
711         register const char __user *src asm ("A1.2") = psrc;
712         unsigned long retn = 0;
713
714         if (n == 0)
715                 return 0;
716
717         if ((unsigned long) src & 1) {
718                 __asm_copy_from_user_1(dst, src, retn);
719                 n--;
720         }
721         if ((unsigned long) dst & 1) {
722                 /* Worst case - byte copy */
723                 while (n > 0) {
724                         __asm_copy_from_user_1(dst, src, retn);
725                         n--;
726                         if (retn)
727                                 goto copy_exception_bytes;
728                 }
729         }
730         if (((unsigned long) src & 2) && n >= 2) {
731                 __asm_copy_from_user_2(dst, src, retn);
732                 n -= 2;
733         }
734         if ((unsigned long) dst & 2) {
735                 /* Second worst case - word copy */
736                 while (n >= 2) {
737                         __asm_copy_from_user_2(dst, src, retn);
738                         n -= 2;
739                         if (retn)
740                                 goto copy_exception_bytes;
741                 }
742         }
743
744         /* We only need one check after the unalignment-adjustments,
745            because if both adjustments were done, either both or
746            neither reference had an exception.  */
747         if (retn != 0)
748                 goto copy_exception_bytes;
749
750 #ifdef USE_RAPF
751         /* 64 bit copy loop */
752         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
753                 if (n >= RAPF_MIN_BUF_SIZE) {
754                         /* Copy using fast 64bit rapf */
755                         __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
756                                                         n, "64cuz");
757                 }
758                 while (n >= 8) {
759                         __asm_copy_from_user_8x64(dst, src, retn);
760                         n -= 8;
761                         if (retn)
762                                 goto copy_exception_bytes;
763                 }
764         }
765
766         if (n >= RAPF_MIN_BUF_SIZE) {
767                 /* Copy using fast 32bit rapf */
768                 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
769                                                 n, "32cuz");
770         }
771 #else
772         /* 64 bit copy loop */
773         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
774                 while (n >= 8) {
775                         __asm_copy_from_user_8x64(dst, src, retn);
776                         n -= 8;
777                         if (retn)
778                                 goto copy_exception_bytes;
779                 }
780         }
781 #endif
782
783         while (n >= 4) {
784                 __asm_copy_from_user_4(dst, src, retn);
785                 n -= 4;
786
787                 if (retn)
788                         goto copy_exception_bytes;
789         }
790
791         /* If we get here, there were no memory read faults.  */
792         switch (n) {
793                 /* These copies are at least "naturally aligned" (so we don't
794                    have to check each byte), due to the src alignment code.
795                    The *_3 case *will* get the correct count for retn.  */
796         case 0:
797                 /* This case deliberately left in (if you have doubts check the
798                    generated assembly code).  */
799                 break;
800         case 1:
801                 __asm_copy_from_user_1(dst, src, retn);
802                 break;
803         case 2:
804                 __asm_copy_from_user_2(dst, src, retn);
805                 break;
806         case 3:
807                 __asm_copy_from_user_3(dst, src, retn);
808                 break;
809         }
810
811         /* If we get here, retn correctly reflects the number of failing
812            bytes.  */
813         return retn;
814
815  copy_exception_bytes:
816         /* We already have "retn" bytes cleared, and need to clear the
817            remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
818            memset is preferred here, since this isn't speed-critical code and
819            we'd rather have this a leaf-function than calling memset.  */
820         {
821                 char *endp;
822                 for (endp = dst + n; dst < endp; dst++)
823                         *dst = 0;
824         }
825
826         return retn + n;
827 }
828 EXPORT_SYMBOL(__copy_user_zeroing);
829
830 #define __asm_clear_8x64(to, ret) \
831         asm volatile (                                  \
832                 "       MOV  D0Ar2,#0\n"                \
833                 "       MOV  D1Ar1,#0\n"                \
834                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
835                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
836                 "1:\n"                                  \
837                 "       .section .fixup,\"ax\"\n"       \
838                 "3:     ADD  %1,%1,#8\n"                \
839                 "       MOVT    D0Ar2,#HI(1b)\n"        \
840                 "       JUMP    D0Ar2,#LO(1b)\n"        \
841                 "       .previous\n"                    \
842                 "       .section __ex_table,\"a\"\n"    \
843                 "       .long 2b,3b\n"                  \
844                 "       .previous\n"                    \
845                 : "=r" (to), "=r" (ret) \
846                 : "0" (to), "1" (ret)   \
847                 : "D1Ar1", "D0Ar2", "memory")
848
849 /* Zero userspace.  */
850
851 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
852         asm volatile (                                  \
853                 "       MOV D1Ar1,#0\n"                 \
854                         CLEAR                           \
855                 "1:\n"                                  \
856                 "       .section .fixup,\"ax\"\n"       \
857                         FIXUP                           \
858                 "       MOVT    D1Ar1,#HI(1b)\n"        \
859                 "       JUMP    D1Ar1,#LO(1b)\n"        \
860                 "       .previous\n"                    \
861                 "       .section __ex_table,\"a\"\n"    \
862                         TENTRY                          \
863                 "       .previous"                      \
864                 : "=r" (to), "=r" (ret)                 \
865                 : "0" (to), "1" (ret)                   \
866                 : "D1Ar1", "memory")
867
868 #define __asm_clear_1(to, ret) \
869         __asm_clear(to, ret,                    \
870                 "       SETB [%0],D1Ar1\n"      \
871                 "2:     SETB [%0++],D1Ar1\n",   \
872                 "3:     ADD  %1,%1,#1\n",       \
873                 "       .long 2b,3b\n")
874
875 #define __asm_clear_2(to, ret) \
876         __asm_clear(to, ret,                    \
877                 "       SETW [%0],D1Ar1\n"      \
878                 "2:     SETW [%0++],D1Ar1\n",   \
879                 "3:     ADD  %1,%1,#2\n",       \
880                 "       .long 2b,3b\n")
881
882 #define __asm_clear_3(to, ret) \
883         __asm_clear(to, ret,                    \
884                  "2:    SETW [%0++],D1Ar1\n"    \
885                  "      SETB [%0],D1Ar1\n"      \
886                  "3:    SETB [%0++],D1Ar1\n",   \
887                  "4:    ADD  %1,%1,#2\n"        \
888                  "5:    ADD  %1,%1,#1\n",       \
889                  "      .long 2b,4b\n"          \
890                  "      .long 3b,5b\n")
891
892 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
893         __asm_clear(to, ret,                            \
894                 "       SETD [%0],D1Ar1\n"              \
895                 "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
896                 "3:     ADD  %1,%1,#4\n" FIXUP,         \
897                 "       .long 2b,3b\n" TENTRY)
898
899 #define __asm_clear_4(to, ret) \
900         __asm_clear_4x_cont(to, ret, "", "", "")
901
902 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
903         __asm_clear_4x_cont(to, ret,                    \
904                 "       SETD [%0],D1Ar1\n"              \
905                 "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
906                 "5:     ADD  %1,%1,#4\n" FIXUP,         \
907                 "       .long 4b,5b\n" TENTRY)
908
909 #define __asm_clear_8(to, ret) \
910         __asm_clear_8x_cont(to, ret, "", "", "")
911
912 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
913         __asm_clear_8x_cont(to, ret,                    \
914                 "       SETD [%0],D1Ar1\n"              \
915                 "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
916                 "7:     ADD  %1,%1,#4\n" FIXUP,         \
917                 "       .long 6b,7b\n" TENTRY)
918
919 #define __asm_clear_12(to, ret) \
920         __asm_clear_12x_cont(to, ret, "", "", "")
921
922 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
923         __asm_clear_12x_cont(to, ret,                   \
924                 "       SETD [%0],D1Ar1\n"              \
925                 "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
926                 "9:     ADD  %1,%1,#4\n" FIXUP,         \
927                 "       .long 8b,9b\n" TENTRY)
928
929 #define __asm_clear_16(to, ret) \
930         __asm_clear_16x_cont(to, ret, "", "", "")
931
932 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
933 {
934         register char __user *dst asm ("D0Re0") = pto;
935         register unsigned long n asm ("D1Re0") = pn;
936         register unsigned long retn asm ("D0Ar6") = 0;
937
938         if ((unsigned long) dst & 1) {
939                 __asm_clear_1(dst, retn);
940                 n--;
941         }
942
943         if ((unsigned long) dst & 2) {
944                 __asm_clear_2(dst, retn);
945                 n -= 2;
946         }
947
948         /* 64 bit copy loop */
949         if (!((__force unsigned long) dst & 7)) {
950                 while (n >= 8) {
951                         __asm_clear_8x64(dst, retn);
952                         n -= 8;
953                 }
954         }
955
956         while (n >= 16) {
957                 __asm_clear_16(dst, retn);
958                 n -= 16;
959         }
960
961         while (n >= 4) {
962                 __asm_clear_4(dst, retn);
963                 n -= 4;
964         }
965
966         switch (n) {
967         case 0:
968                 break;
969         case 1:
970                 __asm_clear_1(dst, retn);
971                 break;
972         case 2:
973                 __asm_clear_2(dst, retn);
974                 break;
975         case 3:
976                 __asm_clear_3(dst, retn);
977                 break;
978         }
979
980         return retn;
981 }
982 EXPORT_SYMBOL(__do_clear_user);
983
984 unsigned char __get_user_asm_b(const void __user *addr, long *err)
985 {
986         register unsigned char x asm ("D0Re0") = 0;
987         asm volatile (
988                 "       GETB %0,[%2]\n"
989                 "1:\n"
990                 "       GETB %0,[%2]\n"
991                 "2:\n"
992                 "       .section .fixup,\"ax\"\n"
993                 "3:     MOV     D0FrT,%3\n"
994                 "       SETD    [%1],D0FrT\n"
995                 "       MOVT    D0FrT,#HI(2b)\n"
996                 "       JUMP    D0FrT,#LO(2b)\n"
997                 "       .previous\n"
998                 "       .section __ex_table,\"a\"\n"
999                 "       .long 1b,3b\n"
1000                 "       .previous\n"
1001                 : "=r" (x)
1002                 : "r" (err), "r" (addr), "P" (-EFAULT)
1003                 : "D0FrT");
1004         return x;
1005 }
1006 EXPORT_SYMBOL(__get_user_asm_b);
1007
1008 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1009 {
1010         register unsigned short x asm ("D0Re0") = 0;
1011         asm volatile (
1012                 "       GETW %0,[%2]\n"
1013                 "1:\n"
1014                 "       GETW %0,[%2]\n"
1015                 "2:\n"
1016                 "       .section .fixup,\"ax\"\n"
1017                 "3:     MOV     D0FrT,%3\n"
1018                 "       SETD    [%1],D0FrT\n"
1019                 "       MOVT    D0FrT,#HI(2b)\n"
1020                 "       JUMP    D0FrT,#LO(2b)\n"
1021                 "       .previous\n"
1022                 "       .section __ex_table,\"a\"\n"
1023                 "       .long 1b,3b\n"
1024                 "       .previous\n"
1025                 : "=r" (x)
1026                 : "r" (err), "r" (addr), "P" (-EFAULT)
1027                 : "D0FrT");
1028         return x;
1029 }
1030 EXPORT_SYMBOL(__get_user_asm_w);
1031
1032 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1033 {
1034         register unsigned int x asm ("D0Re0") = 0;
1035         asm volatile (
1036                 "       GETD %0,[%2]\n"
1037                 "1:\n"
1038                 "       GETD %0,[%2]\n"
1039                 "2:\n"
1040                 "       .section .fixup,\"ax\"\n"
1041                 "3:     MOV     D0FrT,%3\n"
1042                 "       SETD    [%1],D0FrT\n"
1043                 "       MOVT    D0FrT,#HI(2b)\n"
1044                 "       JUMP    D0FrT,#LO(2b)\n"
1045                 "       .previous\n"
1046                 "       .section __ex_table,\"a\"\n"
1047                 "       .long 1b,3b\n"
1048                 "       .previous\n"
1049                 : "=r" (x)
1050                 : "r" (err), "r" (addr), "P" (-EFAULT)
1051                 : "D0FrT");
1052         return x;
1053 }
1054 EXPORT_SYMBOL(__get_user_asm_d);
1055
1056 long __put_user_asm_b(unsigned int x, void __user *addr)
1057 {
1058         register unsigned int err asm ("D0Re0") = 0;
1059         asm volatile (
1060                 "       MOV  %0,#0\n"
1061                 "       SETB [%2],%1\n"
1062                 "1:\n"
1063                 "       SETB [%2],%1\n"
1064                 "2:\n"
1065                 ".section .fixup,\"ax\"\n"
1066                 "3:     MOV     %0,%3\n"
1067                 "       MOVT    D0FrT,#HI(2b)\n"
1068                 "       JUMP    D0FrT,#LO(2b)\n"
1069                 ".previous\n"
1070                 ".section __ex_table,\"a\"\n"
1071                 "       .long 1b,3b\n"
1072                 ".previous"
1073                 : "=r"(err)
1074                 : "d" (x), "a" (addr), "P"(-EFAULT)
1075                 : "D0FrT");
1076         return err;
1077 }
1078 EXPORT_SYMBOL(__put_user_asm_b);
1079
1080 long __put_user_asm_w(unsigned int x, void __user *addr)
1081 {
1082         register unsigned int err asm ("D0Re0") = 0;
1083         asm volatile (
1084                 "       MOV  %0,#0\n"
1085                 "       SETW [%2],%1\n"
1086                 "1:\n"
1087                 "       SETW [%2],%1\n"
1088                 "2:\n"
1089                 ".section .fixup,\"ax\"\n"
1090                 "3:     MOV     %0,%3\n"
1091                 "       MOVT    D0FrT,#HI(2b)\n"
1092                 "       JUMP    D0FrT,#LO(2b)\n"
1093                 ".previous\n"
1094                 ".section __ex_table,\"a\"\n"
1095                 "       .long 1b,3b\n"
1096                 ".previous"
1097                 : "=r"(err)
1098                 : "d" (x), "a" (addr), "P"(-EFAULT)
1099                 : "D0FrT");
1100         return err;
1101 }
1102 EXPORT_SYMBOL(__put_user_asm_w);
1103
1104 long __put_user_asm_d(unsigned int x, void __user *addr)
1105 {
1106         register unsigned int err asm ("D0Re0") = 0;
1107         asm volatile (
1108                 "       MOV  %0,#0\n"
1109                 "       SETD [%2],%1\n"
1110                 "1:\n"
1111                 "       SETD [%2],%1\n"
1112                 "2:\n"
1113                 ".section .fixup,\"ax\"\n"
1114                 "3:     MOV     %0,%3\n"
1115                 "       MOVT    D0FrT,#HI(2b)\n"
1116                 "       JUMP    D0FrT,#LO(2b)\n"
1117                 ".previous\n"
1118                 ".section __ex_table,\"a\"\n"
1119                 "       .long 1b,3b\n"
1120                 ".previous"
1121                 : "=r"(err)
1122                 : "d" (x), "a" (addr), "P"(-EFAULT)
1123                 : "D0FrT");
1124         return err;
1125 }
1126 EXPORT_SYMBOL(__put_user_asm_d);
1127
1128 long __put_user_asm_l(unsigned long long x, void __user *addr)
1129 {
1130         register unsigned int err asm ("D0Re0") = 0;
1131         asm volatile (
1132                 "       MOV  %0,#0\n"
1133                 "       SETL [%2],%1,%t1\n"
1134                 "1:\n"
1135                 "       SETL [%2],%1,%t1\n"
1136                 "2:\n"
1137                 ".section .fixup,\"ax\"\n"
1138                 "3:     MOV     %0,%3\n"
1139                 "       MOVT    D0FrT,#HI(2b)\n"
1140                 "       JUMP    D0FrT,#LO(2b)\n"
1141                 ".previous\n"
1142                 ".section __ex_table,\"a\"\n"
1143                 "       .long 1b,3b\n"
1144                 ".previous"
1145                 : "=r"(err)
1146                 : "d" (x), "a" (addr), "P"(-EFAULT)
1147                 : "D0FrT");
1148         return err;
1149 }
1150 EXPORT_SYMBOL(__put_user_asm_l);
1151
1152 long strnlen_user(const char __user *src, long count)
1153 {
1154         long res;
1155
1156         if (!access_ok(VERIFY_READ, src, 0))
1157                 return 0;
1158
1159         asm volatile (" MOV     D0Ar4, %1\n"
1160                       " MOV     D0Ar6, %2\n"
1161                       "0:\n"
1162                       " SUBS    D0FrT, D0Ar6, #0\n"
1163                       " SUB     D0Ar6, D0Ar6, #1\n"
1164                       " BLE     2f\n"
1165                       " GETB    D0FrT, [D0Ar4+#1++]\n"
1166                       "1:\n"
1167                       " TST     D0FrT, #255\n"
1168                       " BNE     0b\n"
1169                       "2:\n"
1170                       " SUB     %0, %2, D0Ar6\n"
1171                       "3:\n"
1172                       " .section .fixup,\"ax\"\n"
1173                       "4:\n"
1174                       " MOV     %0, #0\n"
1175                       " MOVT    D0FrT,#HI(3b)\n"
1176                       " JUMP    D0FrT,#LO(3b)\n"
1177                       " .previous\n"
1178                       " .section __ex_table,\"a\"\n"
1179                       " .long 1b,4b\n"
1180                       " .previous\n"
1181                       : "=r" (res)
1182                       : "r" (src), "r" (count)
1183                       : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1184
1185         return res;
1186 }
1187 EXPORT_SYMBOL(strnlen_user);
1188
1189 long __strncpy_from_user(char *dst, const char __user *src, long count)
1190 {
1191         long res;
1192
1193         if (count == 0)
1194                 return 0;
1195
1196         /*
1197          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1198          *  So do we.
1199          *
1200          *  This code is deduced from:
1201          *
1202          *      char tmp2;
1203          *      long tmp1, tmp3;
1204          *      tmp1 = count;
1205          *      while ((*dst++ = (tmp2 = *src++)) != 0
1206          *             && --tmp1)
1207          *        ;
1208          *
1209          *      res = count - tmp1;
1210          *
1211          *  with tweaks.
1212          */
1213
1214         asm volatile (" MOV  %0,%3\n"
1215                       "1:\n"
1216                       " GETB D0FrT,[%2++]\n"
1217                       "2:\n"
1218                       " CMP  D0FrT,#0\n"
1219                       " SETB [%1++],D0FrT\n"
1220                       " BEQ  3f\n"
1221                       " SUBS %0,%0,#1\n"
1222                       " BNZ  1b\n"
1223                       "3:\n"
1224                       " SUB  %0,%3,%0\n"
1225                       "4:\n"
1226                       " .section .fixup,\"ax\"\n"
1227                       "5:\n"
1228                       " MOV  %0,%7\n"
1229                       " MOVT    D0FrT,#HI(4b)\n"
1230                       " JUMP    D0FrT,#LO(4b)\n"
1231                       " .previous\n"
1232                       " .section __ex_table,\"a\"\n"
1233                       " .long 2b,5b\n"
1234                       " .previous"
1235                       : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1236                       : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1237                       : "D0FrT", "memory", "cc");
1238
1239         return res;
1240 }
1241 EXPORT_SYMBOL(__strncpy_from_user);