OSDN Git Service

metag/usercopy: Zero rest of buffer from copy_from_user
[android-x86/kernel.git] / arch / metag / lib / usercopy.c
1 /*
2  * User address space access functions.
3  * The non-inlined parts of asm-metag/uaccess.h are here.
4  *
5  * Copyright (C) 2006, Imagination Technologies.
6  * Copyright (C) 2000, Axis Communications AB.
7  *
8  * Written by Hans-Peter Nilsson.
9  * Pieces used from memcpy, originally by Kenny Ranerup long time ago.
10  * Modified for Meta by Will Newton.
11  */
12
13 #include <linux/export.h>
14 #include <linux/uaccess.h>
15 #include <asm/cache.h>                  /* def of L1_CACHE_BYTES */
16
17 #define USE_RAPF
18 #define RAPF_MIN_BUF_SIZE       (3*L1_CACHE_BYTES)
19
20
21 /* The "double write" in this code is because the Meta will not fault
22  * immediately unless the memory pipe is forced to by e.g. a data stall or
23  * another memory op. The second write should be discarded by the write
24  * combiner so should have virtually no cost.
25  */
26
27 #define __asm_copy_user_cont(to, from, ret, COPY, FIXUP, TENTRY) \
28         asm volatile (                                           \
29                 COPY                                             \
30                 "1:\n"                                           \
31                 "       .section .fixup,\"ax\"\n"                \
32                 FIXUP                                            \
33                 "       MOVT    D1Ar1,#HI(1b)\n"                 \
34                 "       JUMP    D1Ar1,#LO(1b)\n"                 \
35                 "       .previous\n"                             \
36                 "       .section __ex_table,\"a\"\n"             \
37                 TENTRY                                           \
38                 "       .previous\n"                             \
39                 : "=r" (to), "=r" (from), "=r" (ret)             \
40                 : "0" (to), "1" (from), "2" (ret)                \
41                 : "D1Ar1", "memory")
42
43
44 #define __asm_copy_to_user_1(to, from, ret)     \
45         __asm_copy_user_cont(to, from, ret,     \
46                 "       GETB D1Ar1,[%1++]\n"    \
47                 "       SETB [%0],D1Ar1\n"      \
48                 "2:     SETB [%0++],D1Ar1\n",   \
49                 "3:     ADD  %2,%2,#1\n",       \
50                 "       .long 2b,3b\n")
51
52 #define __asm_copy_to_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
53         __asm_copy_user_cont(to, from, ret,             \
54                 "       GETW D1Ar1,[%1++]\n"            \
55                 "       SETW [%0],D1Ar1\n"              \
56                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
57                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
58                 "       .long 2b,3b\n" TENTRY)
59
60 #define __asm_copy_to_user_2(to, from, ret) \
61         __asm_copy_to_user_2x_cont(to, from, ret, "", "", "")
62
63 #define __asm_copy_to_user_3(to, from, ret) \
64         __asm_copy_to_user_2x_cont(to, from, ret,       \
65                 "       GETB D1Ar1,[%1++]\n"            \
66                 "       SETB [%0],D1Ar1\n"              \
67                 "4:     SETB [%0++],D1Ar1\n",           \
68                 "5:     ADD  %2,%2,#1\n",               \
69                 "       .long 4b,5b\n")
70
71 #define __asm_copy_to_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
72         __asm_copy_user_cont(to, from, ret,             \
73                 "       GETD D1Ar1,[%1++]\n"            \
74                 "       SETD [%0],D1Ar1\n"              \
75                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
76                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
77                 "       .long 2b,3b\n" TENTRY)
78
79 #define __asm_copy_to_user_4(to, from, ret) \
80         __asm_copy_to_user_4x_cont(to, from, ret, "", "", "")
81
82 #define __asm_copy_to_user_5(to, from, ret) \
83         __asm_copy_to_user_4x_cont(to, from, ret,       \
84                 "       GETB D1Ar1,[%1++]\n"            \
85                 "       SETB [%0],D1Ar1\n"              \
86                 "4:     SETB [%0++],D1Ar1\n",           \
87                 "5:     ADD  %2,%2,#1\n",               \
88                 "       .long 4b,5b\n")
89
90 #define __asm_copy_to_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
91         __asm_copy_to_user_4x_cont(to, from, ret,       \
92                 "       GETW D1Ar1,[%1++]\n"            \
93                 "       SETW [%0],D1Ar1\n"              \
94                 "4:     SETW [%0++],D1Ar1\n" COPY,      \
95                 "5:     ADD  %2,%2,#2\n" FIXUP,         \
96                 "       .long 4b,5b\n" TENTRY)
97
98 #define __asm_copy_to_user_6(to, from, ret) \
99         __asm_copy_to_user_6x_cont(to, from, ret, "", "", "")
100
101 #define __asm_copy_to_user_7(to, from, ret) \
102         __asm_copy_to_user_6x_cont(to, from, ret,       \
103                 "       GETB D1Ar1,[%1++]\n"            \
104                 "       SETB [%0],D1Ar1\n"              \
105                 "6:     SETB [%0++],D1Ar1\n",           \
106                 "7:     ADD  %2,%2,#1\n",               \
107                 "       .long 6b,7b\n")
108
109 #define __asm_copy_to_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
110         __asm_copy_to_user_4x_cont(to, from, ret,       \
111                 "       GETD D1Ar1,[%1++]\n"            \
112                 "       SETD [%0],D1Ar1\n"              \
113                 "4:     SETD [%0++],D1Ar1\n" COPY,      \
114                 "5:     ADD  %2,%2,#4\n"  FIXUP,        \
115                 "       .long 4b,5b\n" TENTRY)
116
117 #define __asm_copy_to_user_8(to, from, ret) \
118         __asm_copy_to_user_8x_cont(to, from, ret, "", "", "")
119
120 #define __asm_copy_to_user_9(to, from, ret) \
121         __asm_copy_to_user_8x_cont(to, from, ret,       \
122                 "       GETB D1Ar1,[%1++]\n"            \
123                 "       SETB [%0],D1Ar1\n"              \
124                 "6:     SETB [%0++],D1Ar1\n",           \
125                 "7:     ADD  %2,%2,#1\n",               \
126                 "       .long 6b,7b\n")
127
128 #define __asm_copy_to_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
129         __asm_copy_to_user_8x_cont(to, from, ret,       \
130                 "       GETW D1Ar1,[%1++]\n"            \
131                 "       SETW [%0],D1Ar1\n"              \
132                 "6:     SETW [%0++],D1Ar1\n" COPY,      \
133                 "7:     ADD  %2,%2,#2\n" FIXUP,         \
134                 "       .long 6b,7b\n" TENTRY)
135
136 #define __asm_copy_to_user_10(to, from, ret) \
137         __asm_copy_to_user_10x_cont(to, from, ret, "", "", "")
138
139 #define __asm_copy_to_user_11(to, from, ret) \
140         __asm_copy_to_user_10x_cont(to, from, ret,      \
141                 "       GETB D1Ar1,[%1++]\n"            \
142                 "       SETB [%0],D1Ar1\n"              \
143                 "8:     SETB [%0++],D1Ar1\n",           \
144                 "9:     ADD  %2,%2,#1\n",               \
145                 "       .long 8b,9b\n")
146
147 #define __asm_copy_to_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
148         __asm_copy_to_user_8x_cont(to, from, ret,       \
149                 "       GETD D1Ar1,[%1++]\n"            \
150                 "       SETD [%0],D1Ar1\n"              \
151                 "6:     SETD [%0++],D1Ar1\n" COPY,      \
152                 "7:     ADD  %2,%2,#4\n" FIXUP,         \
153                 "       .long 6b,7b\n" TENTRY)
154 #define __asm_copy_to_user_12(to, from, ret) \
155         __asm_copy_to_user_12x_cont(to, from, ret, "", "", "")
156
157 #define __asm_copy_to_user_13(to, from, ret) \
158         __asm_copy_to_user_12x_cont(to, from, ret,      \
159                 "       GETB D1Ar1,[%1++]\n"            \
160                 "       SETB [%0],D1Ar1\n"              \
161                 "8:     SETB [%0++],D1Ar1\n",           \
162                 "9:     ADD  %2,%2,#1\n",               \
163                 "       .long 8b,9b\n")
164
165 #define __asm_copy_to_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
166         __asm_copy_to_user_12x_cont(to, from, ret,      \
167                 "       GETW D1Ar1,[%1++]\n"            \
168                 "       SETW [%0],D1Ar1\n"              \
169                 "8:     SETW [%0++],D1Ar1\n" COPY,      \
170                 "9:     ADD  %2,%2,#2\n" FIXUP,         \
171                 "       .long 8b,9b\n" TENTRY)
172
173 #define __asm_copy_to_user_14(to, from, ret) \
174         __asm_copy_to_user_14x_cont(to, from, ret, "", "", "")
175
176 #define __asm_copy_to_user_15(to, from, ret) \
177         __asm_copy_to_user_14x_cont(to, from, ret,      \
178                 "       GETB D1Ar1,[%1++]\n"            \
179                 "       SETB [%0],D1Ar1\n"              \
180                 "10:    SETB [%0++],D1Ar1\n",           \
181                 "11:    ADD  %2,%2,#1\n",               \
182                 "       .long 10b,11b\n")
183
184 #define __asm_copy_to_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
185         __asm_copy_to_user_12x_cont(to, from, ret,      \
186                 "       GETD D1Ar1,[%1++]\n"            \
187                 "       SETD [%0],D1Ar1\n"              \
188                 "8:     SETD [%0++],D1Ar1\n" COPY,      \
189                 "9:     ADD  %2,%2,#4\n" FIXUP,         \
190                 "       .long 8b,9b\n" TENTRY)
191
192 #define __asm_copy_to_user_16(to, from, ret) \
193                 __asm_copy_to_user_16x_cont(to, from, ret, "", "", "")
194
195 #define __asm_copy_to_user_8x64(to, from, ret) \
196         asm volatile (                                  \
197                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
198                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
199                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
200                 "1:\n"                                  \
201                 "       .section .fixup,\"ax\"\n"       \
202                 "3:     ADD  %2,%2,#8\n"                \
203                 "       MOVT    D0Ar2,#HI(1b)\n"        \
204                 "       JUMP    D0Ar2,#LO(1b)\n"        \
205                 "       .previous\n"                    \
206                 "       .section __ex_table,\"a\"\n"    \
207                 "       .long 2b,3b\n"                  \
208                 "       .previous\n"                    \
209                 : "=r" (to), "=r" (from), "=r" (ret)    \
210                 : "0" (to), "1" (from), "2" (ret)       \
211                 : "D1Ar1", "D0Ar2", "memory")
212
213 /*
214  *      optimized copying loop using RAPF when 64 bit aligned
215  *
216  *      n               will be automatically decremented inside the loop
217  *      ret             will be left intact. if error occurs we will rewind
218  *                      so that the original non optimized code will fill up
219  *                      this value correctly.
220  *
221  *      on fault:
222  *              >       n will hold total number of uncopied bytes
223  *
224  *              >       {'to','from'} will be rewind back so that
225  *                      the non-optimized code will do the proper fix up
226  *
227  *      DCACHE drops the cacheline which helps in reducing cache
228  *      pollution.
229  *
230  *      We introduce an extra SETL at the end of the loop to
231  *      ensure we don't fall off the loop before we catch all
232  *      erros.
233  *
234  *      NOTICE:
235  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
236  *              since we're using M{S,G}ETL, a fault might happen at
237  *              any address in the middle of M{S,G}ETL causing
238  *              the value of LSM_STEP to be incorrect which can
239  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
240  *              ie: if LSM_STEP was 1 when a fault occurs, the
241  *              next call to M{S,G}ET{L,D} will skip the first
242  *              copy/getting as it think that the first 1 has already
243  *              been done.
244  *
245  */
246 #define __asm_copy_user_64bit_rapf_loop(                                \
247                 to, from, ret, n, id, FIXUP)                            \
248         asm volatile (                                                  \
249                 ".balign 8\n"                                           \
250                 "MOV    RAPF, %1\n"                                     \
251                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
252                 "MOV    D0Ar6, #0\n"                                    \
253                 "LSR    D1Ar5, %3, #6\n"                                \
254                 "SUB    TXRPT, D1Ar5, #2\n"                             \
255                 "MOV    RAPF, %1\n"                                     \
256                 "$Lloop"id":\n"                                         \
257                 "ADD    RAPF, %1, #64\n"                                \
258                 "21:\n"                                                 \
259                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
260                 "22:\n"                                                 \
261                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
262                 "SUB    %3, %3, #32\n"                                  \
263                 "23:\n"                                                 \
264                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
265                 "24:\n"                                                 \
266                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
267                 "SUB    %3, %3, #32\n"                                  \
268                 "DCACHE [%1+#-64], D0Ar6\n"                             \
269                 "BR     $Lloop"id"\n"                                   \
270                                                                         \
271                 "MOV    RAPF, %1\n"                                     \
272                 "25:\n"                                                 \
273                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
274                 "26:\n"                                                 \
275                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
276                 "SUB    %3, %3, #32\n"                                  \
277                 "27:\n"                                                 \
278                 "MGETL  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
279                 "28:\n"                                                 \
280                 "MSETL  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
281                 "SUB    %0, %0, #8\n"                                   \
282                 "29:\n"                                                 \
283                 "SETL   [%0++], D0.7, D1.7\n"                           \
284                 "SUB    %3, %3, #32\n"                                  \
285                 "1:"                                                    \
286                 "DCACHE [%1+#-64], D0Ar6\n"                             \
287                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
288                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
289                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
290                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
291                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
292                 "SUB A0StP, A0StP, #40\n"                               \
293                 "       .section .fixup,\"ax\"\n"                       \
294                 "4:\n"                                                  \
295                 "       ADD     %0, %0, #8\n"                           \
296                 "3:\n"                                                  \
297                 "       MOV     D0Ar2, TXSTATUS\n"                      \
298                 "       MOV     D1Ar1, TXSTATUS\n"                      \
299                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
300                 "       MOV     TXSTATUS, D1Ar1\n"                      \
301                         FIXUP                                           \
302                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
303                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
304                 "       .previous\n"                                    \
305                 "       .section __ex_table,\"a\"\n"                    \
306                 "       .long 21b,3b\n"                                 \
307                 "       .long 22b,3b\n"                                 \
308                 "       .long 23b,3b\n"                                 \
309                 "       .long 24b,3b\n"                                 \
310                 "       .long 25b,3b\n"                                 \
311                 "       .long 26b,3b\n"                                 \
312                 "       .long 27b,3b\n"                                 \
313                 "       .long 28b,3b\n"                                 \
314                 "       .long 29b,4b\n"                                 \
315                 "       .previous\n"                                    \
316                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
317                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
318                 : "D1Ar1", "D0Ar2", "memory")
319
320 /*      rewind 'to' and 'from'  pointers when a fault occurs
321  *
322  *      Rationale:
323  *              A fault always occurs on writing to user buffer. A fault
324  *              is at a single address, so we need to rewind by only 4
325  *              bytes.
326  *              Since we do a complete read from kernel buffer before
327  *              writing, we need to rewind it also. The amount to be
328  *              rewind equals the number of faulty writes in MSETD
329  *              which is: [4 - (LSM_STEP-1)]*8
330  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
331  *              and stored in D0Ar2
332  *
333  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
334  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
335  *                      a fault happens at the 4th write, LSM_STEP will be 0
336  *                      instead of 4. The code copes with that.
337  *
338  *              n is updated by the number of successful writes, which is:
339  *              n = n - (LSM_STEP-1)*8
340  */
341 #define __asm_copy_to_user_64bit_rapf_loop(to,  from, ret, n, id)\
342         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
343                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
344                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
345                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
346                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
347                 "MOV    D1Ar1, #4\n"                                    \
348                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
349                 "LSL    D0Ar2, D0Ar2, #3\n"                             \
350                 "LSL    D1Ar1, D1Ar1, #3\n"                             \
351                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
352                 "SUB    %0, %0, #8\n"                                   \
353                 "SUB    %1,     %1,D0Ar2\n"                             \
354                 "SUB    %3, %3, D1Ar1\n")
355
356 /*
357  *      optimized copying loop using RAPF when 32 bit aligned
358  *
359  *      n               will be automatically decremented inside the loop
360  *      ret             will be left intact. if error occurs we will rewind
361  *                      so that the original non optimized code will fill up
362  *                      this value correctly.
363  *
364  *      on fault:
365  *              >       n will hold total number of uncopied bytes
366  *
367  *              >       {'to','from'} will be rewind back so that
368  *                      the non-optimized code will do the proper fix up
369  *
370  *      DCACHE drops the cacheline which helps in reducing cache
371  *      pollution.
372  *
373  *      We introduce an extra SETD at the end of the loop to
374  *      ensure we don't fall off the loop before we catch all
375  *      erros.
376  *
377  *      NOTICE:
378  *              LSM_STEP in TXSTATUS must be cleared in fix up code.
379  *              since we're using M{S,G}ETL, a fault might happen at
380  *              any address in the middle of M{S,G}ETL causing
381  *              the value of LSM_STEP to be incorrect which can
382  *              cause subsequent use of M{S,G}ET{L,D} to go wrong.
383  *              ie: if LSM_STEP was 1 when a fault occurs, the
384  *              next call to M{S,G}ET{L,D} will skip the first
385  *              copy/getting as it think that the first 1 has already
386  *              been done.
387  *
388  */
389 #define __asm_copy_user_32bit_rapf_loop(                                \
390                         to,     from, ret, n, id, FIXUP)                \
391         asm volatile (                                                  \
392                 ".balign 8\n"                                           \
393                 "MOV    RAPF, %1\n"                                     \
394                 "MSETL  [A0StP++], D0Ar6, D0FrT, D0.5, D0.6, D0.7\n"    \
395                 "MOV    D0Ar6, #0\n"                                    \
396                 "LSR    D1Ar5, %3, #6\n"                                \
397                 "SUB    TXRPT, D1Ar5, #2\n"                             \
398                 "MOV    RAPF, %1\n"                                     \
399         "$Lloop"id":\n"                                                 \
400                 "ADD    RAPF, %1, #64\n"                                \
401                 "21:\n"                                                 \
402                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
403                 "22:\n"                                                 \
404                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
405                 "SUB    %3, %3, #16\n"                                  \
406                 "23:\n"                                                 \
407                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
408                 "24:\n"                                                 \
409                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
410                 "SUB    %3, %3, #16\n"                                  \
411                 "25:\n"                                                 \
412                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
413                 "26:\n"                                                 \
414                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
415                 "SUB    %3, %3, #16\n"                                  \
416                 "27:\n"                                                 \
417                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
418                 "28:\n"                                                 \
419                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
420                 "SUB    %3, %3, #16\n"                                  \
421                 "DCACHE [%1+#-64], D0Ar6\n"                             \
422                 "BR     $Lloop"id"\n"                                   \
423                                                                         \
424                 "MOV    RAPF, %1\n"                                     \
425                 "29:\n"                                                 \
426                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
427                 "30:\n"                                                 \
428                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
429                 "SUB    %3, %3, #16\n"                                  \
430                 "31:\n"                                                 \
431                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
432                 "32:\n"                                                 \
433                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
434                 "SUB    %3, %3, #16\n"                                  \
435                 "33:\n"                                                 \
436                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
437                 "34:\n"                                                 \
438                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
439                 "SUB    %3, %3, #16\n"                                  \
440                 "35:\n"                                                 \
441                 "MGETD  D0FrT, D0.5, D0.6, D0.7, [%1++]\n"              \
442                 "36:\n"                                                 \
443                 "MSETD  [%0++], D0FrT, D0.5, D0.6, D0.7\n"              \
444                 "SUB    %0, %0, #4\n"                                   \
445                 "37:\n"                                                 \
446                 "SETD   [%0++], D0.7\n"                                 \
447                 "SUB    %3, %3, #16\n"                                  \
448                 "1:"                                                    \
449                 "DCACHE [%1+#-64], D0Ar6\n"                             \
450                 "GETL    D0Ar6, D1Ar5, [A0StP+#-40]\n"                  \
451                 "GETL    D0FrT, D1RtP, [A0StP+#-32]\n"                  \
452                 "GETL    D0.5, D1.5, [A0StP+#-24]\n"                    \
453                 "GETL    D0.6, D1.6, [A0StP+#-16]\n"                    \
454                 "GETL    D0.7, D1.7, [A0StP+#-8]\n"                     \
455                 "SUB A0StP, A0StP, #40\n"                               \
456                 "       .section .fixup,\"ax\"\n"                       \
457                 "4:\n"                                                  \
458                 "       ADD             %0, %0, #4\n"                   \
459                 "3:\n"                                                  \
460                 "       MOV     D0Ar2, TXSTATUS\n"                      \
461                 "       MOV     D1Ar1, TXSTATUS\n"                      \
462                 "       AND     D1Ar1, D1Ar1, #0xFFFFF8FF\n"            \
463                 "       MOV     TXSTATUS, D1Ar1\n"                      \
464                         FIXUP                                           \
465                 "       MOVT    D0Ar2,#HI(1b)\n"                        \
466                 "       JUMP    D0Ar2,#LO(1b)\n"                        \
467                 "       .previous\n"                                    \
468                 "       .section __ex_table,\"a\"\n"                    \
469                 "       .long 21b,3b\n"                                 \
470                 "       .long 22b,3b\n"                                 \
471                 "       .long 23b,3b\n"                                 \
472                 "       .long 24b,3b\n"                                 \
473                 "       .long 25b,3b\n"                                 \
474                 "       .long 26b,3b\n"                                 \
475                 "       .long 27b,3b\n"                                 \
476                 "       .long 28b,3b\n"                                 \
477                 "       .long 29b,3b\n"                                 \
478                 "       .long 30b,3b\n"                                 \
479                 "       .long 31b,3b\n"                                 \
480                 "       .long 32b,3b\n"                                 \
481                 "       .long 33b,3b\n"                                 \
482                 "       .long 34b,3b\n"                                 \
483                 "       .long 35b,3b\n"                                 \
484                 "       .long 36b,3b\n"                                 \
485                 "       .long 37b,4b\n"                                 \
486                 "       .previous\n"                                    \
487                 : "=r" (to), "=r" (from), "=r" (ret), "=d" (n)          \
488                 : "0" (to), "1" (from), "2" (ret), "3" (n)              \
489                 : "D1Ar1", "D0Ar2", "memory")
490
491 /*      rewind 'to' and 'from'  pointers when a fault occurs
492  *
493  *      Rationale:
494  *              A fault always occurs on writing to user buffer. A fault
495  *              is at a single address, so we need to rewind by only 4
496  *              bytes.
497  *              Since we do a complete read from kernel buffer before
498  *              writing, we need to rewind it also. The amount to be
499  *              rewind equals the number of faulty writes in MSETD
500  *              which is: [4 - (LSM_STEP-1)]*4
501  *              LSM_STEP is bits 10:8 in TXSTATUS which is already read
502  *              and stored in D0Ar2
503  *
504  *              NOTE: If a fault occurs at the last operation in M{G,S}ETL
505  *                      LSM_STEP will be 0. ie: we do 4 writes in our case, if
506  *                      a fault happens at the 4th write, LSM_STEP will be 0
507  *                      instead of 4. The code copes with that.
508  *
509  *              n is updated by the number of successful writes, which is:
510  *              n = n - (LSM_STEP-1)*4
511  */
512 #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\
513         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
514                 "LSR    D0Ar2, D0Ar2, #8\n"                             \
515                 "AND    D0Ar2, D0Ar2, #0x7\n"                           \
516                 "ADDZ   D0Ar2, D0Ar2, #4\n"                             \
517                 "SUB    D0Ar2, D0Ar2, #1\n"                             \
518                 "MOV    D1Ar1, #4\n"                                    \
519                 "SUB    D0Ar2, D1Ar1, D0Ar2\n"                          \
520                 "LSL    D0Ar2, D0Ar2, #2\n"                             \
521                 "LSL    D1Ar1, D1Ar1, #2\n"                             \
522                 "SUB    D1Ar1, D1Ar1, D0Ar2\n"                          \
523                 "SUB    %0, %0, #4\n"                                   \
524                 "SUB    %1,     %1,     D0Ar2\n"                        \
525                 "SUB    %3, %3, D1Ar1\n")
526
527 unsigned long __copy_user(void __user *pdst, const void *psrc,
528                           unsigned long n)
529 {
530         register char __user *dst asm ("A0.2") = pdst;
531         register const char *src asm ("A1.2") = psrc;
532         unsigned long retn = 0;
533
534         if (n == 0)
535                 return 0;
536
537         if ((unsigned long) src & 1) {
538                 __asm_copy_to_user_1(dst, src, retn);
539                 n--;
540                 if (retn)
541                         return retn + n;
542         }
543         if ((unsigned long) dst & 1) {
544                 /* Worst case - byte copy */
545                 while (n > 0) {
546                         __asm_copy_to_user_1(dst, src, retn);
547                         n--;
548                         if (retn)
549                                 return retn + n;
550                 }
551         }
552         if (((unsigned long) src & 2) && n >= 2) {
553                 __asm_copy_to_user_2(dst, src, retn);
554                 n -= 2;
555                 if (retn)
556                         return retn + n;
557         }
558         if ((unsigned long) dst & 2) {
559                 /* Second worst case - word copy */
560                 while (n >= 2) {
561                         __asm_copy_to_user_2(dst, src, retn);
562                         n -= 2;
563                         if (retn)
564                                 return retn + n;
565                 }
566         }
567
568 #ifdef USE_RAPF
569         /* 64 bit copy loop */
570         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
571                 if (n >= RAPF_MIN_BUF_SIZE) {
572                         /* copy user using 64 bit rapf copy */
573                         __asm_copy_to_user_64bit_rapf_loop(dst, src, retn,
574                                                         n, "64cu");
575                 }
576                 while (n >= 8) {
577                         __asm_copy_to_user_8x64(dst, src, retn);
578                         n -= 8;
579                         if (retn)
580                                 return retn + n;
581                 }
582         }
583         if (n >= RAPF_MIN_BUF_SIZE) {
584                 /* copy user using 32 bit rapf copy */
585                 __asm_copy_to_user_32bit_rapf_loop(dst, src, retn, n, "32cu");
586         }
587 #else
588         /* 64 bit copy loop */
589         if (!(((unsigned long) src | (__force unsigned long) dst) & 7)) {
590                 while (n >= 8) {
591                         __asm_copy_to_user_8x64(dst, src, retn);
592                         n -= 8;
593                         if (retn)
594                                 return retn + n;
595                 }
596         }
597 #endif
598
599         while (n >= 16) {
600                 __asm_copy_to_user_16(dst, src, retn);
601                 n -= 16;
602                 if (retn)
603                         return retn + n;
604         }
605
606         while (n >= 4) {
607                 __asm_copy_to_user_4(dst, src, retn);
608                 n -= 4;
609                 if (retn)
610                         return retn + n;
611         }
612
613         switch (n) {
614         case 0:
615                 break;
616         case 1:
617                 __asm_copy_to_user_1(dst, src, retn);
618                 break;
619         case 2:
620                 __asm_copy_to_user_2(dst, src, retn);
621                 break;
622         case 3:
623                 __asm_copy_to_user_3(dst, src, retn);
624                 break;
625         }
626
627         /*
628          * If we get here, retn correctly reflects the number of failing
629          * bytes.
630          */
631         return retn;
632 }
633 EXPORT_SYMBOL(__copy_user);
634
635 #define __asm_copy_from_user_1(to, from, ret) \
636         __asm_copy_user_cont(to, from, ret,     \
637                 "       GETB D1Ar1,[%1++]\n"    \
638                 "2:     SETB [%0++],D1Ar1\n",   \
639                 "3:     ADD  %2,%2,#1\n",       \
640                 "       .long 2b,3b\n")
641
642 #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
643         __asm_copy_user_cont(to, from, ret,             \
644                 "       GETW D1Ar1,[%1++]\n"            \
645                 "2:     SETW [%0++],D1Ar1\n" COPY,      \
646                 "3:     ADD  %2,%2,#2\n" FIXUP,         \
647                 "       .long 2b,3b\n" TENTRY)
648
649 #define __asm_copy_from_user_2(to, from, ret) \
650         __asm_copy_from_user_2x_cont(to, from, ret, "", "", "")
651
652 #define __asm_copy_from_user_3(to, from, ret)           \
653         __asm_copy_from_user_2x_cont(to, from, ret,     \
654                 "       GETB D1Ar1,[%1++]\n"            \
655                 "4:     SETB [%0++],D1Ar1\n",           \
656                 "5:     ADD  %2,%2,#1\n",               \
657                 "       .long 4b,5b\n")
658
659 #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
660         __asm_copy_user_cont(to, from, ret,             \
661                 "       GETD D1Ar1,[%1++]\n"            \
662                 "2:     SETD [%0++],D1Ar1\n" COPY,      \
663                 "3:     ADD  %2,%2,#4\n" FIXUP,         \
664                 "       .long 2b,3b\n" TENTRY)
665
666 #define __asm_copy_from_user_4(to, from, ret) \
667         __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
668
669 #define __asm_copy_from_user_8x64(to, from, ret) \
670         asm volatile (                          \
671                 "       GETL D0Ar2,D1Ar1,[%1++]\n"      \
672                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
673                 "1:\n"                                  \
674                 "       .section .fixup,\"ax\"\n"       \
675                 "3:     ADD  %2,%2,#8\n"                \
676                 "       MOVT    D0Ar2,#HI(1b)\n"        \
677                 "       JUMP    D0Ar2,#LO(1b)\n"        \
678                 "       .previous\n"                    \
679                 "       .section __ex_table,\"a\"\n"    \
680                 "       .long 2b,3b\n"                  \
681                 "       .previous\n"                    \
682                 : "=a" (to), "=r" (from), "=r" (ret)    \
683                 : "0" (to), "1" (from), "2" (ret)       \
684                 : "D1Ar1", "D0Ar2", "memory")
685
686 /*      rewind 'from' pointer when a fault occurs
687  *
688  *      Rationale:
689  *              A fault occurs while reading from user buffer, which is the
690  *              source. Since the fault is at a single address, we only
691  *              need to rewind by 8 bytes.
692  *              Since we don't write to kernel buffer until we read first,
693  *              the kernel buffer is at the right state and needn't be
694  *              corrected.
695  */
696 #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id)      \
697         __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id,           \
698                 "SUB    %1, %1, #8\n")
699
700 /*      rewind 'from' pointer when a fault occurs
701  *
702  *      Rationale:
703  *              A fault occurs while reading from user buffer, which is the
704  *              source. Since the fault is at a single address, we only
705  *              need to rewind by 4 bytes.
706  *              Since we don't write to kernel buffer until we read first,
707  *              the kernel buffer is at the right state and needn't be
708  *              corrected.
709  */
710 #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id)      \
711         __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id,           \
712                 "SUB    %1, %1, #4\n")
713
714
715 /*
716  * Copy from user to kernel. The return-value is the number of bytes that were
717  * inaccessible.
718  */
719 unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
720                                  unsigned long n)
721 {
722         register char *dst asm ("A0.2") = pdst;
723         register const char __user *src asm ("A1.2") = psrc;
724         unsigned long retn = 0;
725
726         if (n == 0)
727                 return 0;
728
729         if ((unsigned long) src & 1) {
730                 __asm_copy_from_user_1(dst, src, retn);
731                 n--;
732                 if (retn)
733                         return retn + n;
734         }
735         if ((unsigned long) dst & 1) {
736                 /* Worst case - byte copy */
737                 while (n > 0) {
738                         __asm_copy_from_user_1(dst, src, retn);
739                         n--;
740                         if (retn)
741                                 return retn + n;
742                 }
743         }
744         if (((unsigned long) src & 2) && n >= 2) {
745                 __asm_copy_from_user_2(dst, src, retn);
746                 n -= 2;
747                 if (retn)
748                         return retn + n;
749         }
750         if ((unsigned long) dst & 2) {
751                 /* Second worst case - word copy */
752                 while (n >= 2) {
753                         __asm_copy_from_user_2(dst, src, retn);
754                         n -= 2;
755                         if (retn)
756                                 return retn + n;
757                 }
758         }
759
760 #ifdef USE_RAPF
761         /* 64 bit copy loop */
762         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
763                 if (n >= RAPF_MIN_BUF_SIZE) {
764                         /* Copy using fast 64bit rapf */
765                         __asm_copy_from_user_64bit_rapf_loop(dst, src, retn,
766                                                         n, "64cuz");
767                 }
768                 while (n >= 8) {
769                         __asm_copy_from_user_8x64(dst, src, retn);
770                         n -= 8;
771                         if (retn)
772                                 return retn + n;
773                 }
774         }
775
776         if (n >= RAPF_MIN_BUF_SIZE) {
777                 /* Copy using fast 32bit rapf */
778                 __asm_copy_from_user_32bit_rapf_loop(dst, src, retn,
779                                                 n, "32cuz");
780         }
781 #else
782         /* 64 bit copy loop */
783         if (!(((unsigned long) src | (unsigned long) dst) & 7)) {
784                 while (n >= 8) {
785                         __asm_copy_from_user_8x64(dst, src, retn);
786                         n -= 8;
787                         if (retn)
788                                 return retn + n;
789                 }
790         }
791 #endif
792
793         while (n >= 4) {
794                 __asm_copy_from_user_4(dst, src, retn);
795                 n -= 4;
796
797                 if (retn)
798                         return retn + n;
799         }
800
801         /* If we get here, there were no memory read faults.  */
802         switch (n) {
803                 /* These copies are at least "naturally aligned" (so we don't
804                    have to check each byte), due to the src alignment code.
805                    The *_3 case *will* get the correct count for retn.  */
806         case 0:
807                 /* This case deliberately left in (if you have doubts check the
808                    generated assembly code).  */
809                 break;
810         case 1:
811                 __asm_copy_from_user_1(dst, src, retn);
812                 break;
813         case 2:
814                 __asm_copy_from_user_2(dst, src, retn);
815                 break;
816         case 3:
817                 __asm_copy_from_user_3(dst, src, retn);
818                 break;
819         }
820
821         /* If we get here, retn correctly reflects the number of failing
822            bytes.  */
823         return retn;
824 }
825 EXPORT_SYMBOL(raw_copy_from_user);
826
827 #define __asm_clear_8x64(to, ret) \
828         asm volatile (                                  \
829                 "       MOV  D0Ar2,#0\n"                \
830                 "       MOV  D1Ar1,#0\n"                \
831                 "       SETL [%0],D0Ar2,D1Ar1\n"        \
832                 "2:     SETL [%0++],D0Ar2,D1Ar1\n"      \
833                 "1:\n"                                  \
834                 "       .section .fixup,\"ax\"\n"       \
835                 "3:     ADD  %1,%1,#8\n"                \
836                 "       MOVT    D0Ar2,#HI(1b)\n"        \
837                 "       JUMP    D0Ar2,#LO(1b)\n"        \
838                 "       .previous\n"                    \
839                 "       .section __ex_table,\"a\"\n"    \
840                 "       .long 2b,3b\n"                  \
841                 "       .previous\n"                    \
842                 : "=r" (to), "=r" (ret) \
843                 : "0" (to), "1" (ret)   \
844                 : "D1Ar1", "D0Ar2", "memory")
845
846 /* Zero userspace.  */
847
848 #define __asm_clear(to, ret, CLEAR, FIXUP, TENTRY) \
849         asm volatile (                                  \
850                 "       MOV D1Ar1,#0\n"                 \
851                         CLEAR                           \
852                 "1:\n"                                  \
853                 "       .section .fixup,\"ax\"\n"       \
854                         FIXUP                           \
855                 "       MOVT    D1Ar1,#HI(1b)\n"        \
856                 "       JUMP    D1Ar1,#LO(1b)\n"        \
857                 "       .previous\n"                    \
858                 "       .section __ex_table,\"a\"\n"    \
859                         TENTRY                          \
860                 "       .previous"                      \
861                 : "=r" (to), "=r" (ret)                 \
862                 : "0" (to), "1" (ret)                   \
863                 : "D1Ar1", "memory")
864
865 #define __asm_clear_1(to, ret) \
866         __asm_clear(to, ret,                    \
867                 "       SETB [%0],D1Ar1\n"      \
868                 "2:     SETB [%0++],D1Ar1\n",   \
869                 "3:     ADD  %1,%1,#1\n",       \
870                 "       .long 2b,3b\n")
871
872 #define __asm_clear_2(to, ret) \
873         __asm_clear(to, ret,                    \
874                 "       SETW [%0],D1Ar1\n"      \
875                 "2:     SETW [%0++],D1Ar1\n",   \
876                 "3:     ADD  %1,%1,#2\n",       \
877                 "       .long 2b,3b\n")
878
879 #define __asm_clear_3(to, ret) \
880         __asm_clear(to, ret,                    \
881                  "2:    SETW [%0++],D1Ar1\n"    \
882                  "      SETB [%0],D1Ar1\n"      \
883                  "3:    SETB [%0++],D1Ar1\n",   \
884                  "4:    ADD  %1,%1,#2\n"        \
885                  "5:    ADD  %1,%1,#1\n",       \
886                  "      .long 2b,4b\n"          \
887                  "      .long 3b,5b\n")
888
889 #define __asm_clear_4x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
890         __asm_clear(to, ret,                            \
891                 "       SETD [%0],D1Ar1\n"              \
892                 "2:     SETD [%0++],D1Ar1\n" CLEAR,     \
893                 "3:     ADD  %1,%1,#4\n" FIXUP,         \
894                 "       .long 2b,3b\n" TENTRY)
895
896 #define __asm_clear_4(to, ret) \
897         __asm_clear_4x_cont(to, ret, "", "", "")
898
899 #define __asm_clear_8x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
900         __asm_clear_4x_cont(to, ret,                    \
901                 "       SETD [%0],D1Ar1\n"              \
902                 "4:     SETD [%0++],D1Ar1\n" CLEAR,     \
903                 "5:     ADD  %1,%1,#4\n" FIXUP,         \
904                 "       .long 4b,5b\n" TENTRY)
905
906 #define __asm_clear_8(to, ret) \
907         __asm_clear_8x_cont(to, ret, "", "", "")
908
909 #define __asm_clear_12x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
910         __asm_clear_8x_cont(to, ret,                    \
911                 "       SETD [%0],D1Ar1\n"              \
912                 "6:     SETD [%0++],D1Ar1\n" CLEAR,     \
913                 "7:     ADD  %1,%1,#4\n" FIXUP,         \
914                 "       .long 6b,7b\n" TENTRY)
915
916 #define __asm_clear_12(to, ret) \
917         __asm_clear_12x_cont(to, ret, "", "", "")
918
919 #define __asm_clear_16x_cont(to, ret, CLEAR, FIXUP, TENTRY) \
920         __asm_clear_12x_cont(to, ret,                   \
921                 "       SETD [%0],D1Ar1\n"              \
922                 "8:     SETD [%0++],D1Ar1\n" CLEAR,     \
923                 "9:     ADD  %1,%1,#4\n" FIXUP,         \
924                 "       .long 8b,9b\n" TENTRY)
925
926 #define __asm_clear_16(to, ret) \
927         __asm_clear_16x_cont(to, ret, "", "", "")
928
929 unsigned long __do_clear_user(void __user *pto, unsigned long pn)
930 {
931         register char __user *dst asm ("D0Re0") = pto;
932         register unsigned long n asm ("D1Re0") = pn;
933         register unsigned long retn asm ("D0Ar6") = 0;
934
935         if ((unsigned long) dst & 1) {
936                 __asm_clear_1(dst, retn);
937                 n--;
938         }
939
940         if ((unsigned long) dst & 2) {
941                 __asm_clear_2(dst, retn);
942                 n -= 2;
943         }
944
945         /* 64 bit copy loop */
946         if (!((__force unsigned long) dst & 7)) {
947                 while (n >= 8) {
948                         __asm_clear_8x64(dst, retn);
949                         n -= 8;
950                 }
951         }
952
953         while (n >= 16) {
954                 __asm_clear_16(dst, retn);
955                 n -= 16;
956         }
957
958         while (n >= 4) {
959                 __asm_clear_4(dst, retn);
960                 n -= 4;
961         }
962
963         switch (n) {
964         case 0:
965                 break;
966         case 1:
967                 __asm_clear_1(dst, retn);
968                 break;
969         case 2:
970                 __asm_clear_2(dst, retn);
971                 break;
972         case 3:
973                 __asm_clear_3(dst, retn);
974                 break;
975         }
976
977         return retn;
978 }
979 EXPORT_SYMBOL(__do_clear_user);
980
981 unsigned char __get_user_asm_b(const void __user *addr, long *err)
982 {
983         register unsigned char x asm ("D0Re0") = 0;
984         asm volatile (
985                 "       GETB %0,[%2]\n"
986                 "1:\n"
987                 "       GETB %0,[%2]\n"
988                 "2:\n"
989                 "       .section .fixup,\"ax\"\n"
990                 "3:     MOV     D0FrT,%3\n"
991                 "       SETD    [%1],D0FrT\n"
992                 "       MOVT    D0FrT,#HI(2b)\n"
993                 "       JUMP    D0FrT,#LO(2b)\n"
994                 "       .previous\n"
995                 "       .section __ex_table,\"a\"\n"
996                 "       .long 1b,3b\n"
997                 "       .previous\n"
998                 : "=r" (x)
999                 : "r" (err), "r" (addr), "P" (-EFAULT)
1000                 : "D0FrT");
1001         return x;
1002 }
1003 EXPORT_SYMBOL(__get_user_asm_b);
1004
1005 unsigned short __get_user_asm_w(const void __user *addr, long *err)
1006 {
1007         register unsigned short x asm ("D0Re0") = 0;
1008         asm volatile (
1009                 "       GETW %0,[%2]\n"
1010                 "1:\n"
1011                 "       GETW %0,[%2]\n"
1012                 "2:\n"
1013                 "       .section .fixup,\"ax\"\n"
1014                 "3:     MOV     D0FrT,%3\n"
1015                 "       SETD    [%1],D0FrT\n"
1016                 "       MOVT    D0FrT,#HI(2b)\n"
1017                 "       JUMP    D0FrT,#LO(2b)\n"
1018                 "       .previous\n"
1019                 "       .section __ex_table,\"a\"\n"
1020                 "       .long 1b,3b\n"
1021                 "       .previous\n"
1022                 : "=r" (x)
1023                 : "r" (err), "r" (addr), "P" (-EFAULT)
1024                 : "D0FrT");
1025         return x;
1026 }
1027 EXPORT_SYMBOL(__get_user_asm_w);
1028
1029 unsigned int __get_user_asm_d(const void __user *addr, long *err)
1030 {
1031         register unsigned int x asm ("D0Re0") = 0;
1032         asm volatile (
1033                 "       GETD %0,[%2]\n"
1034                 "1:\n"
1035                 "       GETD %0,[%2]\n"
1036                 "2:\n"
1037                 "       .section .fixup,\"ax\"\n"
1038                 "3:     MOV     D0FrT,%3\n"
1039                 "       SETD    [%1],D0FrT\n"
1040                 "       MOVT    D0FrT,#HI(2b)\n"
1041                 "       JUMP    D0FrT,#LO(2b)\n"
1042                 "       .previous\n"
1043                 "       .section __ex_table,\"a\"\n"
1044                 "       .long 1b,3b\n"
1045                 "       .previous\n"
1046                 : "=r" (x)
1047                 : "r" (err), "r" (addr), "P" (-EFAULT)
1048                 : "D0FrT");
1049         return x;
1050 }
1051 EXPORT_SYMBOL(__get_user_asm_d);
1052
1053 long __put_user_asm_b(unsigned int x, void __user *addr)
1054 {
1055         register unsigned int err asm ("D0Re0") = 0;
1056         asm volatile (
1057                 "       MOV  %0,#0\n"
1058                 "       SETB [%2],%1\n"
1059                 "1:\n"
1060                 "       SETB [%2],%1\n"
1061                 "2:\n"
1062                 ".section .fixup,\"ax\"\n"
1063                 "3:     MOV     %0,%3\n"
1064                 "       MOVT    D0FrT,#HI(2b)\n"
1065                 "       JUMP    D0FrT,#LO(2b)\n"
1066                 ".previous\n"
1067                 ".section __ex_table,\"a\"\n"
1068                 "       .long 1b,3b\n"
1069                 ".previous"
1070                 : "=r"(err)
1071                 : "d" (x), "a" (addr), "P"(-EFAULT)
1072                 : "D0FrT");
1073         return err;
1074 }
1075 EXPORT_SYMBOL(__put_user_asm_b);
1076
1077 long __put_user_asm_w(unsigned int x, void __user *addr)
1078 {
1079         register unsigned int err asm ("D0Re0") = 0;
1080         asm volatile (
1081                 "       MOV  %0,#0\n"
1082                 "       SETW [%2],%1\n"
1083                 "1:\n"
1084                 "       SETW [%2],%1\n"
1085                 "2:\n"
1086                 ".section .fixup,\"ax\"\n"
1087                 "3:     MOV     %0,%3\n"
1088                 "       MOVT    D0FrT,#HI(2b)\n"
1089                 "       JUMP    D0FrT,#LO(2b)\n"
1090                 ".previous\n"
1091                 ".section __ex_table,\"a\"\n"
1092                 "       .long 1b,3b\n"
1093                 ".previous"
1094                 : "=r"(err)
1095                 : "d" (x), "a" (addr), "P"(-EFAULT)
1096                 : "D0FrT");
1097         return err;
1098 }
1099 EXPORT_SYMBOL(__put_user_asm_w);
1100
1101 long __put_user_asm_d(unsigned int x, void __user *addr)
1102 {
1103         register unsigned int err asm ("D0Re0") = 0;
1104         asm volatile (
1105                 "       MOV  %0,#0\n"
1106                 "       SETD [%2],%1\n"
1107                 "1:\n"
1108                 "       SETD [%2],%1\n"
1109                 "2:\n"
1110                 ".section .fixup,\"ax\"\n"
1111                 "3:     MOV     %0,%3\n"
1112                 "       MOVT    D0FrT,#HI(2b)\n"
1113                 "       JUMP    D0FrT,#LO(2b)\n"
1114                 ".previous\n"
1115                 ".section __ex_table,\"a\"\n"
1116                 "       .long 1b,3b\n"
1117                 ".previous"
1118                 : "=r"(err)
1119                 : "d" (x), "a" (addr), "P"(-EFAULT)
1120                 : "D0FrT");
1121         return err;
1122 }
1123 EXPORT_SYMBOL(__put_user_asm_d);
1124
1125 long __put_user_asm_l(unsigned long long x, void __user *addr)
1126 {
1127         register unsigned int err asm ("D0Re0") = 0;
1128         asm volatile (
1129                 "       MOV  %0,#0\n"
1130                 "       SETL [%2],%1,%t1\n"
1131                 "1:\n"
1132                 "       SETL [%2],%1,%t1\n"
1133                 "2:\n"
1134                 ".section .fixup,\"ax\"\n"
1135                 "3:     MOV     %0,%3\n"
1136                 "       MOVT    D0FrT,#HI(2b)\n"
1137                 "       JUMP    D0FrT,#LO(2b)\n"
1138                 ".previous\n"
1139                 ".section __ex_table,\"a\"\n"
1140                 "       .long 1b,3b\n"
1141                 ".previous"
1142                 : "=r"(err)
1143                 : "d" (x), "a" (addr), "P"(-EFAULT)
1144                 : "D0FrT");
1145         return err;
1146 }
1147 EXPORT_SYMBOL(__put_user_asm_l);
1148
1149 long strnlen_user(const char __user *src, long count)
1150 {
1151         long res;
1152
1153         if (!access_ok(VERIFY_READ, src, 0))
1154                 return 0;
1155
1156         asm volatile (" MOV     D0Ar4, %1\n"
1157                       " MOV     D0Ar6, %2\n"
1158                       "0:\n"
1159                       " SUBS    D0FrT, D0Ar6, #0\n"
1160                       " SUB     D0Ar6, D0Ar6, #1\n"
1161                       " BLE     2f\n"
1162                       " GETB    D0FrT, [D0Ar4+#1++]\n"
1163                       "1:\n"
1164                       " TST     D0FrT, #255\n"
1165                       " BNE     0b\n"
1166                       "2:\n"
1167                       " SUB     %0, %2, D0Ar6\n"
1168                       "3:\n"
1169                       " .section .fixup,\"ax\"\n"
1170                       "4:\n"
1171                       " MOV     %0, #0\n"
1172                       " MOVT    D0FrT,#HI(3b)\n"
1173                       " JUMP    D0FrT,#LO(3b)\n"
1174                       " .previous\n"
1175                       " .section __ex_table,\"a\"\n"
1176                       " .long 1b,4b\n"
1177                       " .previous\n"
1178                       : "=r" (res)
1179                       : "r" (src), "r" (count)
1180                       : "D0FrT", "D0Ar4", "D0Ar6", "cc");
1181
1182         return res;
1183 }
1184 EXPORT_SYMBOL(strnlen_user);
1185
1186 long __strncpy_from_user(char *dst, const char __user *src, long count)
1187 {
1188         long res;
1189
1190         if (count == 0)
1191                 return 0;
1192
1193         /*
1194          * Currently, in 2.4.0-test9, most ports use a simple byte-copy loop.
1195          *  So do we.
1196          *
1197          *  This code is deduced from:
1198          *
1199          *      char tmp2;
1200          *      long tmp1, tmp3;
1201          *      tmp1 = count;
1202          *      while ((*dst++ = (tmp2 = *src++)) != 0
1203          *             && --tmp1)
1204          *        ;
1205          *
1206          *      res = count - tmp1;
1207          *
1208          *  with tweaks.
1209          */
1210
1211         asm volatile (" MOV  %0,%3\n"
1212                       "1:\n"
1213                       " GETB D0FrT,[%2++]\n"
1214                       "2:\n"
1215                       " CMP  D0FrT,#0\n"
1216                       " SETB [%1++],D0FrT\n"
1217                       " BEQ  3f\n"
1218                       " SUBS %0,%0,#1\n"
1219                       " BNZ  1b\n"
1220                       "3:\n"
1221                       " SUB  %0,%3,%0\n"
1222                       "4:\n"
1223                       " .section .fixup,\"ax\"\n"
1224                       "5:\n"
1225                       " MOV  %0,%7\n"
1226                       " MOVT    D0FrT,#HI(4b)\n"
1227                       " JUMP    D0FrT,#LO(4b)\n"
1228                       " .previous\n"
1229                       " .section __ex_table,\"a\"\n"
1230                       " .long 2b,5b\n"
1231                       " .previous"
1232                       : "=r" (res), "=r" (dst), "=r" (src), "=r" (count)
1233                       : "3" (count), "1" (dst), "2" (src), "P" (-EFAULT)
1234                       : "D0FrT", "memory", "cc");
1235
1236         return res;
1237 }
1238 EXPORT_SYMBOL(__strncpy_from_user);