OSDN Git Service

a6ced9691ddb9fe4ae5554e72d7ccb8f336d8426
[android-x86/kernel.git] / arch / metag / lib / usercopy.c
1 /*
2  * User address space access functions.
3  * The non-inlined parts of asm-metag/uaccess.h are here.
4  *
5  * Copyright (C) 2006, Imagination Technologies.
6  * Copyright (C) 2000, Axis Communications AB.
7  *
8  * Written by Hans-Peter Nilsson.
9  * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10  * Modified for Meta by Will Newton.
11  */
12
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
16
17 #define USE_RAPF
18 #define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
19
20
21 /* The "double write" in this code is because the Meta will not fault
22  * immediately unless the memory pipe is forced to by e.g. a data stall or
23  * another memory op. The second write should be discarded by the write
24  * combiner so should have virtually no cost.
25  */
26
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28         asm volatile (                                           \
29                 COPY                                             \
30                 "1:\n"                                           \
31                 "       .section .fixup,\"ax\"\n"                \
32                 "       MOV D1Ar1,#0\n"                          \
33                 FIXUP                                            \
34                 "       MOVT    D1Ar1,#HI(1b)\n"                 \
35                 "       JUMP    D1Ar1,#LO(1b)\n"                 \
36                 "       .previous\n"                             \
37                 "       .section __ex_table,\"a\"\n"             \
38                 TENTRY                                           \
39                 "       .previous\n"                             \
40                 : "=r" (to), "=r" (from), "=r" (ret)             \
41                 : "0" (to), "1" (from), "2" (ret)                \
42                 : "D1Ar1", "memory")
43
44
45 #define __asm_copy_to_user_1(to, from, ret)     \
46         __asm_copy_user_cont(to, from, ret,     \
47                 "       GETB D1Ar1,[%1++]\n"    \
48                 "       SETB [%0],D1Ar1\n"      \
49                 "2:     SETB [%0++],D1Ar1\n",   \
50                 "3:     ADD  %2,%2,#1\n",       \
51                 "       .long 2b,3b\n")
52
53 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
54         __asm_copy_user_cont(to, from, ret,             \
55                 "       GETW D1Ar1,[%1++]\n"            \
56                 "       SETW [%0],D1Ar1\n"              \
57                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
58                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
59                 "       .long 2b,3b\n" TENTRY)
60
61 #define __asm_copy_to_user_2(to, from, ret) \
62         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
63
64 #define __asm_copy_to_user_3(to, from, ret) \
65         __asm_copy_to_user_2x_cont(to, from, ret,       \
66                 "       GETB D1Ar1,[%1++]\n"            \
67                 "       SETB [%0],D1Ar1\n"              \
68                 "4:     SETB [%0++],D1Ar1\n",           \
69                 "5:     ADD  %2,%2,#1\n",               \
70                 "       .long 4b,5b\n")
71
72 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
73         __asm_copy_user_cont(to, from, ret,             \
74                 "       GETD D1Ar1,[%1++]\n"            \
75                 "       SETD [%0],D1Ar1\n"              \
76                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
77                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
78                 "       .long 2b,3b\n" TENTRY)
79
80 #define __asm_copy_to_user_4(to, from, ret) \
81         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
82
83 #define __asm_copy_to_user_5(to, from, ret) \
84         __asm_copy_to_user_4x_cont(to, from, ret,       \
85                 "       GETB D1Ar1,[%1++]\n"            \
86                 "       SETB [%0],D1Ar1\n"              \
87                 "4:     SETB [%0++],D1Ar1\n",           \
88                 "5:     ADD  %2,%2,#1\n",               \
89                 "       .long 4b,5b\n")
90
91 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
92         __asm_copy_to_user_4x_cont(to, from, ret,       \
93                 "       GETW D1Ar1,[%1++]\n"            \
94                 "       SETW [%0],D1Ar1\n"              \
95                 "4:     SETW [%0++],D1Ar1\n" COPY,      \
96                 "5:     ADD  %2,%2,#2\n" FIXUP,         \
97                 "       .long 4b,5b\n" TENTRY)
98
99 #define __asm_copy_to_user_6(to, from, ret) \
100         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
101
102 #define __asm_copy_to_user_7(to, from, ret) \
103         __asm_copy_to_user_6x_cont(to, from, ret,       \
104                 "       GETB D1Ar1,[%1++]\n"            \
105                 "       SETB [%0],D1Ar1\n"              \
106                 "6:     SETB [%0++],D1Ar1\n",           \
107                 "7:     ADD  %2,%2,#1\n",               \
108                 "       .long 6b,7b\n")
109
110 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
111         __asm_copy_to_user_4x_cont(to, from, ret,       \
112                 "       GETD D1Ar1,[%1++]\n"            \
113                 "       SETD [%0],D1Ar1\n"              \
114                 "4:     SETD [%0++],D1Ar1\n" COPY,      \
115                 "5:     ADD  %2,%2,#4\n"  FIXUP,        \
116                 "       .long 4b,5b\n" TENTRY)
117
118 #define __asm_copy_to_user_8(to, from, ret) \
119         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
120
121 #define __asm_copy_to_user_9(to, from, ret) \
122         __asm_copy_to_user_8x_cont(to, from, ret,       \
123                 "       GETB D1Ar1,[%1++]\n"            \
124                 "       SETB [%0],D1Ar1\n"              \
125                 "6:     SETB [%0++],D1Ar1\n",           \
126                 "7:     ADD  %2,%2,#1\n",               \
127                 "       .long 6b,7b\n")
128
129 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
130         __asm_copy_to_user_8x_cont(to, from, ret,       \
131                 "       GETW D1Ar1,[%1++]\n"            \
132                 "       SETW [%0],D1Ar1\n"              \
133                 "6:     SETW [%0++],D1Ar1\n" COPY,      \
134                 "7:     ADD  %2,%2,#2\n" FIXUP,         \
135                 "       .long 6b,7b\n" TENTRY)
136
137 #define __asm_copy_to_user_10(to, from, ret) \
138         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
139
140 #define __asm_copy_to_user_11(to, from, ret) \
141         __asm_copy_to_user_10x_cont(to, from, ret,      \
142                 "       GETB D1Ar1,[%1++]\n"            \
143                 "       SETB [%0],D1Ar1\n"              \
144                 "8:     SETB [%0++],D1Ar1\n",           \
145                 "9:     ADD  %2,%2,#1\n",               \
146                 "       .long 8b,9b\n")
147
148 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
149         __asm_copy_to_user_8x_cont(to, from, ret,       \
150                 "       GETD D1Ar1,[%1++]\n"            \
151                 "       SETD [%0],D1Ar1\n"              \
152                 "6:     SETD [%0++],D1Ar1\n" COPY,      \
153                 "7:     ADD  %2,%2,#4\n" FIXUP,         \
154                 "       .long 6b,7b\n" TENTRY)
155 #define __asm_copy_to_user_12(to, from, ret) \
156         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
157
158 #define __asm_copy_to_user_13(to, from, ret) \
159         __asm_copy_to_user_12x_cont(to, from, ret,      \
160                 "       GETB D1Ar1,[%1++]\n"            \
161                 "       SETB [%0],D1Ar1\n"              \
162                 "8:     SETB [%0++],D1Ar1\n",           \
163                 "9:     ADD  %2,%2,#1\n",               \
164                 "       .long 8b,9b\n")
165
166 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
167         __asm_copy_to_user_12x_cont(to, from, ret,      \
168                 "       GETW D1Ar1,[%1++]\n"            \
169                 "       SETW [%0],D1Ar1\n"              \
170                 "8:     SETW [%0++],D1Ar1\n" COPY,      \
171                 "9:     ADD  %2,%2,#2\n" FIXUP,         \
172                 "       .long 8b,9b\n" TENTRY)
173
174 #define __asm_copy_to_user_14(to, from, ret) \
175         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
176
177 #define __asm_copy_to_user_15(to, from, ret) \
178         __asm_copy_to_user_14x_cont(to, from, ret,      \
179                 "       GETB D1Ar1,[%1++]\n"            \
180                 "       SETB [%0],D1Ar1\n"              \
181                 "10:    SETB [%0++],D1Ar1\n",           \
182                 "11:    ADD  %2,%2,#1\n",               \
183                 "       .long 10b,11b\n")
184
185 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
186         __asm_copy_to_user_12x_cont(to, from, ret,      \
187                 "       GETD D1Ar1,[%1++]\n"            \
188                 "       SETD [%0],D1Ar1\n"              \
189                 "8:     SETD [%0++],D1Ar1\n" COPY,      \
190                 "9:     ADD  %2,%2,#4\n" FIXUP,         \
191                 "       .long 8b,9b\n" TENTRY)
192
193 #define __asm_copy_to_user_16(to, from, ret) \
194                 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
195
196 #define __asm_copy_to_user_8x64(to, from, ret) \
197         asm volatile (                                  \
198                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
199                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
200                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
201                 "1:\n"                                  \
202                 "       .section .fixup,\"ax\"\n"       \
203                 "3:     ADD  %2,%2,#8\n"                \
204                 "       MOVT    D0Ar2,#HI(1b)\n"        \
205                 "       JUMP    D0Ar2,#LO(1b)\n"        \
206                 "       .previous\n"                    \
207                 "       .section __ex_table,\"a\"\n"    \
208                 "       .long 2b,3b\n"                  \
209                 "       .previous\n"                    \
210                 : "=r" (to), "=r" (from), "=r" (ret)    \
211                 : "0" (to), "1" (from), "2" (ret)       \
212                 : "D1Ar1", "D0Ar2", "memory")
213
214 /*
215  *      optimized copying loop using RAPF when 64 bit aligned
216  *
217  *      n               will be automatically decremented inside the loop
218  *      ret             will be left intact. if error occurs we will rewind
219  *                      so that the original non optimized code will fill up
220  *                      this value correctly.
221  *
222  *      on fault:
223  *              >       n will hold total number of uncopied bytes
224  *
225  *              >       {'to','from'} will be rewind back so that
226  *                      the non-optimized code will do the proper fix up
227  *
228  *      DCACHE drops the cacheline which helps in reducing cache
229  *      pollution.
230  *
231  *      We introduce an extra SETL at the end of the loop to
232  *      ensure we don't fall off the loop before we catch all
233  *      erros.
234  *
235  *      NOTICE:
236  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
237  *              since we're using M{S,G}ETL, a fault might happen at
238  *              any address in the middle of M{S,G}ETL causing
239  *              the value of LSM_STEP to be incorrect which can
240  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
241  *              ie: if LSM_STEP was 1 when a fault occurs, the
242  *              next call to M{S,G}ET{L,D} will skip the first
243  *              copy/getting as it think that the first 1 has already
244  *              been done.
245  *
246  */
247 #define __asm_copy_user_64bit_rapf_loop(                                \
248                 to, from, ret, n, id, FIXUP)                            \
249         asm volatile (                                                  \
250                 ".balign 8\n"                                           \
251                 "MOV    RAPF, %1\n"                                     \
252                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
253                 "MOV    D0Ar6, #0\n"                                    \
254                 "LSR    D1Ar5, %3, #6\n"                                \
255                 "SUB    TXRPT, D1Ar5, #2\n"                             \
256                 "MOV    RAPF, %1\n"                                     \
257                 "$Lloop"id":\n"                                         \
258                 "ADD    RAPF, %1, #64\n"                                \
259                 "21:\n"                                                 \
260                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
261                 "22:\n"                                                 \
262                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
263                 "SUB    %3, %3, #32\n"                                  \
264                 "23:\n"                                                 \
265                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
266                 "24:\n"                                                 \
267                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
268                 "SUB    %3, %3, #32\n"                                  \
269                 "DCACHE [%1+#-64], D0Ar6\n"                             \
270                 "BR     $Lloop"id"\n"                                   \
271                                                                         \
272                 "MOV    RAPF, %1\n"                                     \
273                 "25:\n"                                                 \
274                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
275                 "26:\n"                                                 \
276                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
277                 "SUB    %3, %3, #32\n"                                  \
278                 "27:\n"                                                 \
279                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
280                 "28:\n"                                                 \
281                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
282                 "SUB    %0, %0, #8\n"                                   \
283                 "29:\n"                                                 \
284                 "SETL   [%0++], D0.7, D1.7\n"                           \
285                 "SUB    %3, %3, #32\n"                                  \
286                 "1:"                                                    \
287                 "DCACHE [%1+#-64], D0Ar6\n"                             \
288                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
289                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
290                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
291                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
292                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
293                 "SUB A0StP, A0StP, #40\n"                               \
294                 "       .section .fixup,\"ax\"\n"                       \
295                 "4:\n"                                                  \
296                 "       ADD     %0, %0, #8\n"                           \
297                 "3:\n"                                                  \
298                 "       MOV     D0Ar2, TXSTATUS\n"                      \
299                 "       MOV     D1Ar1, TXSTATUS\n"                      \
300                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
301                 "       MOV     TXSTATUS, D1Ar1\n"                      \
302                         FIXUP                                           \
303                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
304                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
305                 "       .previous\n"                                    \
306                 "       .section __ex_table,\"a\"\n"                    \
307                 "       .long 21b,3b\n"                                 \
308                 "       .long 22b,3b\n"                                 \
309                 "       .long 23b,3b\n"                                 \
310                 "       .long 24b,3b\n"                                 \
311                 "       .long 25b,3b\n"                                 \
312                 "       .long 26b,3b\n"                                 \
313                 "       .long 27b,3b\n"                                 \
314                 "       .long 28b,3b\n"                                 \
315                 "       .long 29b,4b\n"                                 \
316                 "       .previous\n"                                    \
317                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
318                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
319                 : "D1Ar1", "D0Ar2", "memory")
320
321 /*      rewind 'to' and 'from'  pointers when a fault occurs
322  *
323  *      Rationale:
324  *              A fault always occurs on writing to user buffer. A fault
325  *              is at a single address, so we need to rewind by only 4
326  *              bytes.
327  *              Since we do a complete read from kernel buffer before
328  *              writing, we need to rewind it also. The amount to be
329  *              rewind equals the number of faulty writes in MSETD
330  *              which is: [4 - (LSM_STEP-1)]*8
331  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
332  *              and stored in D0Ar2
333  *
334  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
335  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
336  *                      a fault happens at the 4th write, LSM_STEP will be 0
337  *                      instead of 4. The code copes with that.
338  *
339  *              n is updated by the number of successful writes, which is:
340  *              n = n - (LSM_STEP-1)*8
341  */
342 #define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
343         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
344                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
345                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
346                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
347                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
348                 "MOV    D1Ar1, #4\n"                                    \
349                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
350                 "LSL    D0Ar2, D0Ar2, #3\n"                             \
351                 "LSL    D1Ar1, D1Ar1, #3\n"                             \
352                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
353                 "SUB    %0, %0, #8\n"                                   \
354                 "SUB    %1,     %1,D0Ar2\n"                             \
355                 "SUB    %3, %3, D1Ar1\n")
356
357 /*
358  *      optimized copying loop using RAPF when 32 bit aligned
359  *
360  *      n               will be automatically decremented inside the loop
361  *      ret             will be left intact. if error occurs we will rewind
362  *                      so that the original non optimized code will fill up
363  *                      this value correctly.
364  *
365  *      on fault:
366  *              >       n will hold total number of uncopied bytes
367  *
368  *              >       {'to','from'} will be rewind back so that
369  *                      the non-optimized code will do the proper fix up
370  *
371  *      DCACHE drops the cacheline which helps in reducing cache
372  *      pollution.
373  *
374  *      We introduce an extra SETD at the end of the loop to
375  *      ensure we don't fall off the loop before we catch all
376  *      erros.
377  *
378  *      NOTICE:
379  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
380  *              since we're using M{S,G}ETL, a fault might happen at
381  *              any address in the middle of M{S,G}ETL causing
382  *              the value of LSM_STEP to be incorrect which can
383  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
384  *              ie: if LSM_STEP was 1 when a fault occurs, the
385  *              next call to M{S,G}ET{L,D} will skip the first
386  *              copy/getting as it think that the first 1 has already
387  *              been done.
388  *
389  */
390 #define __asm_copy_user_32bit_rapf_loop(                                \
391                         to,     from, ret, n, id, FIXUP)                \
392         asm volatile (                                                  \
393                 ".balign 8\n"                                           \
394                 "MOV    RAPF, %1\n"                                     \
395                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
396                 "MOV    D0Ar6, #0\n"                                    \
397                 "LSR    D1Ar5, %3, #6\n"                                \
398                 "SUB    TXRPT, D1Ar5, #2\n"                             \
399                 "MOV    RAPF, %1\n"                                     \
400         "$Lloop"id":\n"                                                 \
401                 "ADD    RAPF, %1, #64\n"                                \
402                 "21:\n"                                                 \
403                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
404                 "22:\n"                                                 \
405                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
406                 "SUB    %3, %3, #16\n"                                  \
407                 "23:\n"                                                 \
408                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
409                 "24:\n"                                                 \
410                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
411                 "SUB    %3, %3, #16\n"                                  \
412                 "25:\n"                                                 \
413                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
414                 "26:\n"                                                 \
415                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
416                 "SUB    %3, %3, #16\n"                                  \
417                 "27:\n"                                                 \
418                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
419                 "28:\n"                                                 \
420                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
421                 "SUB    %3, %3, #16\n"                                  \
422                 "DCACHE [%1+#-64], D0Ar6\n"                             \
423                 "BR     $Lloop"id"\n"                                   \
424                                                                         \
425                 "MOV    RAPF, %1\n"                                     \
426                 "29:\n"                                                 \
427                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
428                 "30:\n"                                                 \
429                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
430                 "SUB    %3, %3, #16\n"                                  \
431                 "31:\n"                                                 \
432                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
433                 "32:\n"                                                 \
434                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
435                 "SUB    %3, %3, #16\n"                                  \
436                 "33:\n"                                                 \
437                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
438                 "34:\n"                                                 \
439                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
440                 "SUB    %3, %3, #16\n"                                  \
441                 "35:\n"                                                 \
442                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
443                 "36:\n"                                                 \
444                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
445                 "SUB    %0, %0, #4\n"                                   \
446                 "37:\n"                                                 \
447                 "SETD   [%0++], D0.7\n"                                 \
448                 "SUB    %3, %3, #16\n"                                  \
449                 "1:"                                                    \
450                 "DCACHE [%1+#-64], D0Ar6\n"                             \
451                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
452                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
453                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
454                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
455                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
456                 "SUB A0StP, A0StP, #40\n"                               \
457                 "       .section .fixup,\"ax\"\n"                       \
458                 "4:\n"                                                  \
459                 "       ADD             %0, %0, #4\n"                   \
460                 "3:\n"                                                  \
461                 "       MOV     D0Ar2, TXSTATUS\n"                      \
462                 "       MOV     D1Ar1, TXSTATUS\n"                      \
463                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
464                 "       MOV     TXSTATUS, D1Ar1\n"                      \
465                         FIXUP                                           \
466                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
467                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
468                 "       .previous\n"                                    \
469                 "       .section __ex_table,\"a\"\n"                    \
470                 "       .long 21b,3b\n"                                 \
471                 "       .long 22b,3b\n"                                 \
472                 "       .long 23b,3b\n"                                 \
473                 "       .long 24b,3b\n"                                 \
474                 "       .long 25b,3b\n"                                 \
475                 "       .long 26b,3b\n"                                 \
476                 "       .long 27b,3b\n"                                 \
477                 "       .long 28b,3b\n"                                 \
478                 "       .long 29b,3b\n"                                 \
479                 "       .long 30b,3b\n"                                 \
480                 "       .long 31b,3b\n"                                 \
481                 "       .long 32b,3b\n"                                 \
482                 "       .long 33b,3b\n"                                 \
483                 "       .long 34b,3b\n"                                 \
484                 "       .long 35b,3b\n"                                 \
485                 "       .long 36b,3b\n"                                 \
486                 "       .long 37b,4b\n"                                 \
487                 "       .previous\n"                                    \
488                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
489                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
490                 : "D1Ar1", "D0Ar2", "memory")
491
492 /*      rewind 'to' and 'from'  pointers when a fault occurs
493  *
494  *      Rationale:
495  *              A fault always occurs on writing to user buffer. A fault
496  *              is at a single address, so we need to rewind by only 4
497  *              bytes.
498  *              Since we do a complete read from kernel buffer before
499  *              writing, we need to rewind it also. The amount to be
500  *              rewind equals the number of faulty writes in MSETD
501  *              which is: [4 - (LSM_STEP-1)]*4
502  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
503  *              and stored in D0Ar2
504  *
505  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
506  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
507  *                      a fault happens at the 4th write, LSM_STEP will be 0
508  *                      instead of 4. The code copes with that.
509  *
510  *              n is updated by the number of successful writes, which is:
511  *              n = n - (LSM_STEP-1)*4
512  */
513 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
514         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
515                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
516                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
517                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
518                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
519                 "MOV    D1Ar1, #4\n"                                    \
520                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
521                 "LSL    D0Ar2, D0Ar2, #2\n"                             \
522                 "LSL    D1Ar1, D1Ar1, #2\n"                             \
523                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
524                 "SUB    %0, %0, #4\n"                                   \
525                 "SUB    %1,     %1,     D0Ar2\n"                        \
526                 "SUB    %3, %3, D1Ar1\n")
527
528 unsigned long __copy_user(void __user *pdst, const void *psrc,
529                           unsigned long n)
530 {
531         register char __user *dst asm ("A0.2") = pdst;
532         register const char *src asm ("A1.2") = psrc;
533         unsigned long retn = 0;
534
535         if (n == 0)
536                 return 0;
537
538         if ((unsigned long) src & 1) {
539                 __asm_copy_to_user_1(dst, src, retn);
540                 n--;
541         }
542         if ((unsigned long) dst & 1) {
543                 /* Worst case - byte copy */
544                 while (n > 0) {
545                         __asm_copy_to_user_1(dst, src, retn);
546                         n--;
547                 }
548         }
549         if (((unsigned long) src & 2) && n >= 2) {
550                 __asm_copy_to_user_2(dst, src, retn);
551                 n -= 2;
552         }
553         if ((unsigned long) dst & 2) {
554                 /* Second worst case - word copy */
555                 while (n >= 2) {
556                         __asm_copy_to_user_2(dst, src, retn);
557                         n -= 2;
558                 }
559         }
560
561 #ifdef USE_RAPF
562         /* 64 bit copy loop */
563         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
564                 if (n >= RAPF_MIN_BUF_SIZE) {
565                         /* copy user using 64 bit rapf copy */
566                         __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
567                                                         n, "64cu");
568                 }
569                 while (n >= 8) {
570                         __asm_copy_to_user_8x64(dst, src, retn);
571                         n -= 8;
572                 }
573         }
574         if (n >= RAPF_MIN_BUF_SIZE) {
575                 /* copy user using 32 bit rapf copy */
576                 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
577         }
578 #else
579         /* 64 bit copy loop */
580         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
581                 while (n >= 8) {
582                         __asm_copy_to_user_8x64(dst, src, retn);
583                         n -= 8;
584                 }
585         }
586 #endif
587
588         while (n >= 16) {
589                 __asm_copy_to_user_16(dst, src, retn);
590                 n -= 16;
591         }
592
593         while (n >= 4) {
594                 __asm_copy_to_user_4(dst, src, retn);
595                 n -= 4;
596         }
597
598         switch (n) {
599         case 0:
600                 break;
601         case 1:
602                 __asm_copy_to_user_1(dst, src, retn);
603                 break;
604         case 2:
605                 __asm_copy_to_user_2(dst, src, retn);
606                 break;
607         case 3:
608                 __asm_copy_to_user_3(dst, src, retn);
609                 break;
610         }
611
612         return retn;
613 }
614 EXPORT_SYMBOL(__copy_user);
615
616 #define __asm_copy_from_user_1(to, from, ret) \
617         __asm_copy_user_cont(to, from, ret,     \
618                 "       GETB D1Ar1,[%1++]\n"    \
619                 "2:     SETB [%0++],D1Ar1\n",   \
620                 "3:     ADD  %2,%2,#1\n"        \
621                 "       SETB [%0++],D1Ar1\n",   \
622                 "       .long 2b,3b\n")
623
624 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
625         __asm_copy_user_cont(to, from, ret,             \
626                 "       GETW D1Ar1,[%1++]\n"            \
627                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
628                 "3:     ADD  %2,%2,#2\n"                \
629                 "       SETW [%0++],D1Ar1\n" FIXUP,     \
630                 "       .long 2b,3b\n" TENTRY)
631
632 #define __asm_copy_from_user_2(to, from, ret) \
633         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
634
635 #define __asm_copy_from_user_3(to, from, ret)           \
636         __asm_copy_from_user_2x_cont(to, from, ret,     \
637                 "       GETB D1Ar1,[%1++]\n"            \
638                 "4:     SETB [%0++],D1Ar1\n",           \
639                 "5:     ADD  %2,%2,#1\n"                \
640                 "       SETB [%0++],D1Ar1\n",           \
641                 "       .long 4b,5b\n")
642
643 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
644         __asm_copy_user_cont(to, from, ret,             \
645                 "       GETD D1Ar1,[%1++]\n"            \
646                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
647                 "3:     ADD  %2,%2,#4\n"                \
648                 "       SETD [%0++],D1Ar1\n" FIXUP,     \
649                 "       .long 2b,3b\n" TENTRY)
650
651 #define __asm_copy_from_user_4(to, from, ret) \
652         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
653
654
655 #define __asm_copy_from_user_8x64(to, from, ret) \
656         asm volatile (                          \
657                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
658                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
659                 "1:\n"                                  \
660                 "       .section .fixup,\"ax\"\n"       \
661                 "       MOV D1Ar1,#0\n"                 \
662                 "       MOV D0Ar2,#0\n"                 \
663                 "3:     ADD  %2,%2,#8\n"                \
664                 "       SETL [%0++],D0Ar2,D1Ar1\n"      \
665                 "       MOVT    D0Ar2,#HI(1b)\n"        \
666                 "       JUMP    D0Ar2,#LO(1b)\n"        \
667                 "       .previous\n"                    \
668                 "       .section __ex_table,\"a\"\n"    \
669                 "       .long 2b,3b\n"                  \
670                 "       .previous\n"                    \
671                 : "=a" (to), "=r" (from), "=r" (ret)    \
672                 : "0" (to), "1" (from), "2" (ret)       \
673                 : "D1Ar1", "D0Ar2", "memory")
674
675 /*      rewind 'from' pointer when a fault occurs
676  *
677  *      Rationale:
678  *              A fault occurs while reading from user buffer, which is the
679  *              source. Since the fault is at a single address, we only
680  *              need to rewind by 8 bytes.
681  *              Since we don't write to kernel buffer until we read first,
682  *              the kernel buffer is at the right state and needn't be
683  *              corrected.
684  */
685 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
686         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
687                 "SUB    %1, %1, #8\n")
688
689 /*      rewind 'from' pointer when a fault occurs
690  *
691  *      Rationale:
692  *              A fault occurs while reading from user buffer, which is the
693  *              source. Since the fault is at a single address, we only
694  *              need to rewind by 4 bytes.
695  *              Since we don't write to kernel buffer until we read first,
696  *              the kernel buffer is at the right state and needn't be
697  *              corrected.
698  */
699 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
700         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
701                 "SUB    %1, %1, #4\n")
702
703
704 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
705    userland.  The return-value is the number of bytes that were
706    inaccessible.  */
707 unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
708                                   unsigned long n)
709 {
710         register char *dst asm ("A0.2") = pdst;
711         register const char __user *src asm ("A1.2") = psrc;
712         unsigned long retn = 0;
713
714         if (n == 0)
715                 return 0;
716
717         if ((unsigned long) src & 1) {
718                 __asm_copy_from_user_1(dst, src, retn);
719                 n--;
720                 if (retn)
721                         goto copy_exception_bytes;
722         }
723         if ((unsigned long) dst & 1) {
724                 /* Worst case - byte copy */
725                 while (n > 0) {
726                         __asm_copy_from_user_1(dst, src, retn);
727                         n--;
728                         if (retn)
729                                 goto copy_exception_bytes;
730                 }
731         }
732         if (((unsigned long) src & 2) && n >= 2) {
733                 __asm_copy_from_user_2(dst, src, retn);
734                 n -= 2;
735                 if (retn)
736                         goto copy_exception_bytes;
737         }
738         if ((unsigned long) dst & 2) {
739                 /* Second worst case - word copy */
740                 while (n >= 2) {
741                         __asm_copy_from_user_2(dst, src, retn);
742                         n -= 2;
743                         if (retn)
744                                 goto copy_exception_bytes;
745                 }
746         }
747
748 #ifdef USE_RAPF
749         /* 64 bit copy loop */
750         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
751                 if (n >= RAPF_MIN_BUF_SIZE) {
752                         /* Copy using fast 64bit rapf */
753                         __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
754                                                         n, "64cuz");
755                 }
756                 while (n >= 8) {
757                         __asm_copy_from_user_8x64(dst, src, retn);
758                         n -= 8;
759                         if (retn)
760                                 goto copy_exception_bytes;
761                 }
762         }
763
764         if (n >= RAPF_MIN_BUF_SIZE) {
765                 /* Copy using fast 32bit rapf */
766                 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
767                                                 n, "32cuz");
768         }
769 #else
770         /* 64 bit copy loop */
771         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
772                 while (n >= 8) {
773                         __asm_copy_from_user_8x64(dst, src, retn);
774                         n -= 8;
775                         if (retn)
776                                 goto copy_exception_bytes;
777                 }
778         }
779 #endif
780
781         while (n >= 4) {
782                 __asm_copy_from_user_4(dst, src, retn);
783                 n -= 4;
784
785                 if (retn)
786                         goto copy_exception_bytes;
787         }
788
789         /* If we get here, there were no memory read faults.  */
790         switch (n) {
791                 /* These copies are at least "naturally aligned" (so we don't
792                    have to check each byte), due to the src alignment code.
793                    The *_3 case *will* get the correct count for retn.  */
794         case 0:
795                 /* This case deliberately left in (if you have doubts check the
796                    generated assembly code).  */
797                 break;
798         case 1:
799                 __asm_copy_from_user_1(dst, src, retn);
800                 break;
801         case 2:
802                 __asm_copy_from_user_2(dst, src, retn);
803                 break;
804         case 3:
805                 __asm_copy_from_user_3(dst, src, retn);
806                 break;
807         }
808
809         /* If we get here, retn correctly reflects the number of failing
810            bytes.  */
811         return retn;
812
813  copy_exception_bytes:
814         /* We already have "retn" bytes cleared, and need to clear the
815            remaining "n" bytes.  A non-optimized simple byte-for-byte in-line
816            memset is preferred here, since this isn't speed-critical code and
817            we'd rather have this a leaf-function than calling memset.  */
818         {
819                 char *endp;
820                 for (endp = dst + n; dst < endp; dst++)
821                         *dst = 0;
822         }
823
824         return retn + n;
825 }
826 EXPORT_SYMBOL(__copy_user_zeroing);
827
828 #define __asm_clear_8x64(to, ret) \
829         asm volatile (                                  \
830                 "       MOV  D0Ar2,#0\n"                \
831                 "       MOV  D1Ar1,#0\n"                \
832                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
833                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
834                 "1:\n"                                  \
835                 "       .section .fixup,\"ax\"\n"       \
836                 "3:     ADD  %1,%1,#8\n"                \
837                 "       MOVT    D0Ar2,#HI(1b)\n"        \
838                 "       JUMP    D0Ar2,#LO(1b)\n"        \
839                 "       .previous\n"                    \
840                 "       .section __ex_table,\"a\"\n"    \
841                 "       .long 2b,3b\n"                  \
842                 "       .previous\n"                    \
843                 : "=r" (to), "=r" (ret) \
844                 : "0" (to), "1" (ret)   \
845                 : "D1Ar1", "D0Ar2", "memory")
846
847 /* Zero userspace.  */
848
849 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
850         asm volatile (                                  \
851                 "       MOV D1Ar1,#0\n"                 \
852                         CLEAR                           \
853                 "1:\n"                                  \
854                 "       .section .fixup,\"ax\"\n"       \
855                         FIXUP                           \
856                 "       MOVT    D1Ar1,#HI(1b)\n"        \
857                 "       JUMP    D1Ar1,#LO(1b)\n"        \
858                 "       .previous\n"                    \
859                 "       .section __ex_table,\"a\"\n"    \
860                         TENTRY                          \
861                 "       .previous"                      \
862                 : "=r" (to), "=r" (ret)                 \
863                 : "0" (to), "1" (ret)                   \
864                 : "D1Ar1", "memory")
865
866 #define __asm_clear_1(to, ret) \
867         __asm_clear(to, ret,                    \
868                 "       SETB [%0],D1Ar1\n"      \
869                 "2:     SETB [%0++],D1Ar1\n",   \
870                 "3:     ADD  %1,%1,#1\n",       \
871                 "       .long 2b,3b\n")
872
873 #define __asm_clear_2(to, ret) \
874         __asm_clear(to, ret,                    \
875                 "       SETW [%0],D1Ar1\n"      \
876                 "2:     SETW [%0++],D1Ar1\n",   \
877                 "3:     ADD  %1,%1,#2\n",       \
878                 "       .long 2b,3b\n")
879
880 #define __asm_clear_3(to, ret) \
881         __asm_clear(to, ret,                    \
882                  "2:    SETW [%0++],D1Ar1\n"    \
883                  "      SETB [%0],D1Ar1\n"      \
884                  "3:    SETB [%0++],D1Ar1\n",   \
885                  "4:    ADD  %1,%1,#2\n"        \
886                  "5:    ADD  %1,%1,#1\n",       \
887                  "      .long 2b,4b\n"          \
888                  "      .long 3b,5b\n")
889
890 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
891         __asm_clear(to, ret,                            \
892                 "       SETD [%0],D1Ar1\n"              \
893                 "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
894                 "3:     ADD  %1,%1,#4\n" FIXUP,         \
895                 "       .long 2b,3b\n" TENTRY)
896
897 #define __asm_clear_4(to, ret) \
898         __asm_clear_4x_cont(to, ret, "", "", "")
899
900 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
901         __asm_clear_4x_cont(to, ret,                    \
902                 "       SETD [%0],D1Ar1\n"              \
903                 "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
904                 "5:     ADD  %1,%1,#4\n" FIXUP,         \
905                 "       .long 4b,5b\n" TENTRY)
906
907 #define __asm_clear_8(to, ret) \
908         __asm_clear_8x_cont(to, ret, "", "", "")
909
910 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
911         __asm_clear_8x_cont(to, ret,                    \
912                 "       SETD [%0],D1Ar1\n"              \
913                 "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
914                 "7:     ADD  %1,%1,#4\n" FIXUP,         \
915                 "       .long 6b,7b\n" TENTRY)
916
917 #define __asm_clear_12(to, ret) \
918         __asm_clear_12x_cont(to, ret, "", "", "")
919
920 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
921         __asm_clear_12x_cont(to, ret,                   \
922                 "       SETD [%0],D1Ar1\n"              \
923                 "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
924                 "9:     ADD  %1,%1,#4\n" FIXUP,         \
925                 "       .long 8b,9b\n" TENTRY)
926
927 #define __asm_clear_16(to, ret) \
928         __asm_clear_16x_cont(to, ret, "", "", "")
929
930 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
931 {
932         register char __user *dst asm ("D0Re0") = pto;
933         register unsigned long n asm ("D1Re0") = pn;
934         register unsigned long retn asm ("D0Ar6") = 0;
935
936         if ((unsigned long) dst & 1) {
937                 __asm_clear_1(dst, retn);
938                 n--;
939         }
940
941         if ((unsigned long) dst & 2) {
942                 __asm_clear_2(dst, retn);
943                 n -= 2;
944         }
945
946         /* 64 bit copy loop */
947         if (!((__force unsigned long) dst & 7)) {
948                 while (n >= 8) {
949                         __asm_clear_8x64(dst, retn);
950                         n -= 8;
951                 }
952         }
953
954         while (n >= 16) {
955                 __asm_clear_16(dst, retn);
956                 n -= 16;
957         }
958
959         while (n >= 4) {
960                 __asm_clear_4(dst, retn);
961                 n -= 4;
962         }
963
964         switch (n) {
965         case 0:
966                 break;
967         case 1:
968                 __asm_clear_1(dst, retn);
969                 break;
970         case 2:
971                 __asm_clear_2(dst, retn);
972                 break;
973         case 3:
974                 __asm_clear_3(dst, retn);
975                 break;
976         }
977
978         return retn;
979 }
980 EXPORT_SYMBOL(__do_clear_user);
981
982 unsigned char __get_user_asm_b(const void __user *addr, long *err)
983 {
984         register unsigned char x asm ("D0Re0") = 0;
985         asm volatile (
986                 "       GETB %0,[%2]\n"
987                 "1:\n"
988                 "       GETB %0,[%2]\n"
989                 "2:\n"
990                 "       .section .fixup,\"ax\"\n"
991                 "3:     MOV     D0FrT,%3\n"
992                 "       SETD    [%1],D0FrT\n"
993                 "       MOVT    D0FrT,#HI(2b)\n"
994                 "       JUMP    D0FrT,#LO(2b)\n"
995                 "       .previous\n"
996                 "       .section __ex_table,\"a\"\n"
997                 "       .long 1b,3b\n"
998                 "       .previous\n"
999                 : "=r" (x)
1000                 : "r" (err), "r" (addr), "P" (-EFAULT)
1001                 : "D0FrT");
1002         return x;
1003 }
1004 EXPORT_SYMBOL(__get_user_asm_b);
1005
1006 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1007 {
1008         register unsigned short x asm ("D0Re0") = 0;
1009         asm volatile (
1010                 "       GETW %0,[%2]\n"
1011                 "1:\n"
1012                 "       GETW %0,[%2]\n"
1013                 "2:\n"
1014                 "       .section .fixup,\"ax\"\n"
1015                 "3:     MOV     D0FrT,%3\n"
1016                 "       SETD    [%1],D0FrT\n"
1017                 "       MOVT    D0FrT,#HI(2b)\n"
1018                 "       JUMP    D0FrT,#LO(2b)\n"
1019                 "       .previous\n"
1020                 "       .section __ex_table,\"a\"\n"
1021                 "       .long 1b,3b\n"
1022                 "       .previous\n"
1023                 : "=r" (x)
1024                 : "r" (err), "r" (addr), "P" (-EFAULT)
1025                 : "D0FrT");
1026         return x;
1027 }
1028 EXPORT_SYMBOL(__get_user_asm_w);
1029
1030 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1031 {
1032         register unsigned int x asm ("D0Re0") = 0;
1033         asm volatile (
1034                 "       GETD %0,[%2]\n"
1035                 "1:\n"
1036                 "       GETD %0,[%2]\n"
1037                 "2:\n"
1038                 "       .section .fixup,\"ax\"\n"
1039                 "3:     MOV     D0FrT,%3\n"
1040                 "       SETD    [%1],D0FrT\n"
1041                 "       MOVT    D0FrT,#HI(2b)\n"
1042                 "       JUMP    D0FrT,#LO(2b)\n"
1043                 "       .previous\n"
1044                 "       .section __ex_table,\"a\"\n"
1045                 "       .long 1b,3b\n"
1046                 "       .previous\n"
1047                 : "=r" (x)
1048                 : "r" (err), "r" (addr), "P" (-EFAULT)
1049                 : "D0FrT");
1050         return x;
1051 }
1052 EXPORT_SYMBOL(__get_user_asm_d);
1053
1054 long __put_user_asm_b(unsigned int x, void __user *addr)
1055 {
1056         register unsigned int err asm ("D0Re0") = 0;
1057         asm volatile (
1058                 "       MOV  %0,#0\n"
1059                 "       SETB [%2],%1\n"
1060                 "1:\n"
1061                 "       SETB [%2],%1\n"
1062                 "2:\n"
1063                 ".section .fixup,\"ax\"\n"
1064                 "3:     MOV     %0,%3\n"
1065                 "       MOVT    D0FrT,#HI(2b)\n"
1066                 "       JUMP    D0FrT,#LO(2b)\n"
1067                 ".previous\n"
1068                 ".section __ex_table,\"a\"\n"
1069                 "       .long 1b,3b\n"
1070                 ".previous"
1071                 : "=r"(err)
1072                 : "d" (x), "a" (addr), "P"(-EFAULT)
1073                 : "D0FrT");
1074         return err;
1075 }
1076 EXPORT_SYMBOL(__put_user_asm_b);
1077
1078 long __put_user_asm_w(unsigned int x, void __user *addr)
1079 {
1080         register unsigned int err asm ("D0Re0") = 0;
1081         asm volatile (
1082                 "       MOV  %0,#0\n"
1083                 "       SETW [%2],%1\n"
1084                 "1:\n"
1085                 "       SETW [%2],%1\n"
1086                 "2:\n"
1087                 ".section .fixup,\"ax\"\n"
1088                 "3:     MOV     %0,%3\n"
1089                 "       MOVT    D0FrT,#HI(2b)\n"
1090                 "       JUMP    D0FrT,#LO(2b)\n"
1091                 ".previous\n"
1092                 ".section __ex_table,\"a\"\n"
1093                 "       .long 1b,3b\n"
1094                 ".previous"
1095                 : "=r"(err)
1096                 : "d" (x), "a" (addr), "P"(-EFAULT)
1097                 : "D0FrT");
1098         return err;
1099 }
1100 EXPORT_SYMBOL(__put_user_asm_w);
1101
1102 long __put_user_asm_d(unsigned int x, void __user *addr)
1103 {
1104         register unsigned int err asm ("D0Re0") = 0;
1105         asm volatile (
1106                 "       MOV  %0,#0\n"
1107                 "       SETD [%2],%1\n"
1108                 "1:\n"
1109                 "       SETD [%2],%1\n"
1110                 "2:\n"
1111                 ".section .fixup,\"ax\"\n"
1112                 "3:     MOV     %0,%3\n"
1113                 "       MOVT    D0FrT,#HI(2b)\n"
1114                 "       JUMP    D0FrT,#LO(2b)\n"
1115                 ".previous\n"
1116                 ".section __ex_table,\"a\"\n"
1117                 "       .long 1b,3b\n"
1118                 ".previous"
1119                 : "=r"(err)
1120                 : "d" (x), "a" (addr), "P"(-EFAULT)
1121                 : "D0FrT");
1122         return err;
1123 }
1124 EXPORT_SYMBOL(__put_user_asm_d);
1125
1126 long __put_user_asm_l(unsigned long long x, void __user *addr)
1127 {
1128         register unsigned int err asm ("D0Re0") = 0;
1129         asm volatile (
1130                 "       MOV  %0,#0\n"
1131                 "       SETL [%2],%1,%t1\n"
1132                 "1:\n"
1133                 "       SETL [%2],%1,%t1\n"
1134                 "2:\n"
1135                 ".section .fixup,\"ax\"\n"
1136                 "3:     MOV     %0,%3\n"
1137                 "       MOVT    D0FrT,#HI(2b)\n"
1138                 "       JUMP    D0FrT,#LO(2b)\n"
1139                 ".previous\n"
1140                 ".section __ex_table,\"a\"\n"
1141                 "       .long 1b,3b\n"
1142                 ".previous"
1143                 : "=r"(err)
1144                 : "d" (x), "a" (addr), "P"(-EFAULT)
1145                 : "D0FrT");
1146         return err;
1147 }
1148 EXPORT_SYMBOL(__put_user_asm_l);
1149
1150 long strnlen_user(const char __user *src, long count)
1151 {
1152         long res;
1153
1154         if (!access_ok(VERIFY_READ, src, 0))
1155                 return 0;
1156
1157         asm volatile (" MOV     D0Ar4, %1\n"
1158                       " MOV     D0Ar6, %2\n"
1159                       "0:\n"
1160                       " SUBS    D0FrT, D0Ar6, #0\n"
1161                       " SUB     D0Ar6, D0Ar6, #1\n"
1162                       " BLE     2f\n"
1163                       " GETB    D0FrT, [D0Ar4+#1++]\n"
1164                       "1:\n"
1165                       " TST     D0FrT, #255\n"
1166                       " BNE     0b\n"
1167                       "2:\n"
1168                       " SUB     %0, %2, D0Ar6\n"
1169                       "3:\n"
1170                       " .section .fixup,\"ax\"\n"
1171                       "4:\n"
1172                       " MOV     %0, #0\n"
1173                       " MOVT    D0FrT,#HI(3b)\n"
1174                       " JUMP    D0FrT,#LO(3b)\n"
1175                       " .previous\n"
1176                       " .section __ex_table,\"a\"\n"
1177                       " .long 1b,4b\n"
1178                       " .previous\n"
1179                       : "=r" (res)
1180                       : "r" (src), "r" (count)
1181                       : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1182
1183         return res;
1184 }
1185 EXPORT_SYMBOL(strnlen_user);
1186
1187 long __strncpy_from_user(char *dst, const char __user *src, long count)
1188 {
1189         long res;
1190
1191         if (count == 0)
1192                 return 0;
1193
1194         /*
1195          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1196          *  So do we.
1197          *
1198          *  This code is deduced from:
1199          *
1200          *      char tmp2;
1201          *      long tmp1, tmp3;
1202          *      tmp1 = count;
1203          *      while ((*dst++ = (tmp2 = *src++)) != 0
1204          *             && --tmp1)
1205          *        ;
1206          *
1207          *      res = count - tmp1;
1208          *
1209          *  with tweaks.
1210          */
1211
1212         asm volatile (" MOV  %0,%3\n"
1213                       "1:\n"
1214                       " GETB D0FrT,[%2++]\n"
1215                       "2:\n"
1216                       " CMP  D0FrT,#0\n"
1217                       " SETB [%1++],D0FrT\n"
1218                       " BEQ  3f\n"
1219                       " SUBS %0,%0,#1\n"
1220                       " BNZ  1b\n"
1221                       "3:\n"
1222                       " SUB  %0,%3,%0\n"
1223                       "4:\n"
1224                       " .section .fixup,\"ax\"\n"
1225                       "5:\n"
1226                       " MOV  %0,%7\n"
1227                       " MOVT    D0FrT,#HI(4b)\n"
1228                       " JUMP    D0FrT,#LO(4b)\n"
1229                       " .previous\n"
1230                       " .section __ex_table,\"a\"\n"
1231                       " .long 2b,5b\n"
1232                       " .previous"
1233                       : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1234                       : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1235                       : "D0FrT", "memory", "cc");
1236
1237         return res;
1238 }
1239 EXPORT_SYMBOL(__strncpy_from_user);