OSDN Git Service

kasan: prevent compiler from optimizing away memset in tests
[android-x86/kernel.git] / lib / iov_iter.c
1 #include <linux/export.h>
2 #include <linux/uio.h>
3 #include <linux/pagemap.h>
4 #include <linux/slab.h>
5 #include <linux/vmalloc.h>
6 #include <linux/splice.h>
7 #include <net/checksum.h>
8
9 #define PIPE_PARANOIA /* for now */
10
11 #define iterate_iovec(i, n, __v, __p, skip, STEP) {     \
12         size_t left;                                    \
13         size_t wanted = n;                              \
14         __p = i->iov;                                   \
15         __v.iov_len = min(n, __p->iov_len - skip);      \
16         if (likely(__v.iov_len)) {                      \
17                 __v.iov_base = __p->iov_base + skip;    \
18                 left = (STEP);                          \
19                 __v.iov_len -= left;                    \
20                 skip += __v.iov_len;                    \
21                 n -= __v.iov_len;                       \
22         } else {                                        \
23                 left = 0;                               \
24         }                                               \
25         while (unlikely(!left && n)) {                  \
26                 __p++;                                  \
27                 __v.iov_len = min(n, __p->iov_len);     \
28                 if (unlikely(!__v.iov_len))             \
29                         continue;                       \
30                 __v.iov_base = __p->iov_base;           \
31                 left = (STEP);                          \
32                 __v.iov_len -= left;                    \
33                 skip = __v.iov_len;                     \
34                 n -= __v.iov_len;                       \
35         }                                               \
36         n = wanted - n;                                 \
37 }
38
39 #define iterate_kvec(i, n, __v, __p, skip, STEP) {      \
40         size_t wanted = n;                              \
41         __p = i->kvec;                                  \
42         __v.iov_len = min(n, __p->iov_len - skip);      \
43         if (likely(__v.iov_len)) {                      \
44                 __v.iov_base = __p->iov_base + skip;    \
45                 (void)(STEP);                           \
46                 skip += __v.iov_len;                    \
47                 n -= __v.iov_len;                       \
48         }                                               \
49         while (unlikely(n)) {                           \
50                 __p++;                                  \
51                 __v.iov_len = min(n, __p->iov_len);     \
52                 if (unlikely(!__v.iov_len))             \
53                         continue;                       \
54                 __v.iov_base = __p->iov_base;           \
55                 (void)(STEP);                           \
56                 skip = __v.iov_len;                     \
57                 n -= __v.iov_len;                       \
58         }                                               \
59         n = wanted;                                     \
60 }
61
62 #define iterate_bvec(i, n, __v, __bi, skip, STEP) {     \
63         struct bvec_iter __start;                       \
64         __start.bi_size = n;                            \
65         __start.bi_bvec_done = skip;                    \
66         __start.bi_idx = 0;                             \
67         for_each_bvec(__v, i->bvec, __bi, __start) {    \
68                 if (!__v.bv_len)                        \
69                         continue;                       \
70                 (void)(STEP);                           \
71         }                                               \
72 }
73
74 #define iterate_all_kinds(i, n, v, I, B, K) {                   \
75         size_t skip = i->iov_offset;                            \
76         if (unlikely(i->type & ITER_BVEC)) {                    \
77                 struct bio_vec v;                               \
78                 struct bvec_iter __bi;                          \
79                 iterate_bvec(i, n, v, __bi, skip, (B))          \
80         } else if (unlikely(i->type & ITER_KVEC)) {             \
81                 const struct kvec *kvec;                        \
82                 struct kvec v;                                  \
83                 iterate_kvec(i, n, v, kvec, skip, (K))          \
84         } else {                                                \
85                 const struct iovec *iov;                        \
86                 struct iovec v;                                 \
87                 iterate_iovec(i, n, v, iov, skip, (I))          \
88         }                                                       \
89 }
90
91 #define iterate_and_advance(i, n, v, I, B, K) {                 \
92         if (unlikely(i->count < n))                             \
93                 n = i->count;                                   \
94         if (i->count) {                                         \
95                 size_t skip = i->iov_offset;                    \
96                 if (unlikely(i->type & ITER_BVEC)) {            \
97                         const struct bio_vec *bvec = i->bvec;   \
98                         struct bio_vec v;                       \
99                         struct bvec_iter __bi;                  \
100                         iterate_bvec(i, n, v, __bi, skip, (B))  \
101                         i->bvec = __bvec_iter_bvec(i->bvec, __bi);      \
102                         i->nr_segs -= i->bvec - bvec;           \
103                         skip = __bi.bi_bvec_done;               \
104                 } else if (unlikely(i->type & ITER_KVEC)) {     \
105                         const struct kvec *kvec;                \
106                         struct kvec v;                          \
107                         iterate_kvec(i, n, v, kvec, skip, (K))  \
108                         if (skip == kvec->iov_len) {            \
109                                 kvec++;                         \
110                                 skip = 0;                       \
111                         }                                       \
112                         i->nr_segs -= kvec - i->kvec;           \
113                         i->kvec = kvec;                         \
114                 } else {                                        \
115                         const struct iovec *iov;                \
116                         struct iovec v;                         \
117                         iterate_iovec(i, n, v, iov, skip, (I))  \
118                         if (skip == iov->iov_len) {             \
119                                 iov++;                          \
120                                 skip = 0;                       \
121                         }                                       \
122                         i->nr_segs -= iov - i->iov;             \
123                         i->iov = iov;                           \
124                 }                                               \
125                 i->count -= n;                                  \
126                 i->iov_offset = skip;                           \
127         }                                                       \
128 }
129
130 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
131                          struct iov_iter *i)
132 {
133         size_t skip, copy, left, wanted;
134         const struct iovec *iov;
135         char __user *buf;
136         void *kaddr, *from;
137
138         if (unlikely(bytes > i->count))
139                 bytes = i->count;
140
141         if (unlikely(!bytes))
142                 return 0;
143
144         wanted = bytes;
145         iov = i->iov;
146         skip = i->iov_offset;
147         buf = iov->iov_base + skip;
148         copy = min(bytes, iov->iov_len - skip);
149
150         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_writeable(buf, copy)) {
151                 kaddr = kmap_atomic(page);
152                 from = kaddr + offset;
153
154                 /* first chunk, usually the only one */
155                 left = __copy_to_user_inatomic(buf, from, copy);
156                 copy -= left;
157                 skip += copy;
158                 from += copy;
159                 bytes -= copy;
160
161                 while (unlikely(!left && bytes)) {
162                         iov++;
163                         buf = iov->iov_base;
164                         copy = min(bytes, iov->iov_len);
165                         left = __copy_to_user_inatomic(buf, from, copy);
166                         copy -= left;
167                         skip = copy;
168                         from += copy;
169                         bytes -= copy;
170                 }
171                 if (likely(!bytes)) {
172                         kunmap_atomic(kaddr);
173                         goto done;
174                 }
175                 offset = from - kaddr;
176                 buf += copy;
177                 kunmap_atomic(kaddr);
178                 copy = min(bytes, iov->iov_len - skip);
179         }
180         /* Too bad - revert to non-atomic kmap */
181
182         kaddr = kmap(page);
183         from = kaddr + offset;
184         left = __copy_to_user(buf, from, copy);
185         copy -= left;
186         skip += copy;
187         from += copy;
188         bytes -= copy;
189         while (unlikely(!left && bytes)) {
190                 iov++;
191                 buf = iov->iov_base;
192                 copy = min(bytes, iov->iov_len);
193                 left = __copy_to_user(buf, from, copy);
194                 copy -= left;
195                 skip = copy;
196                 from += copy;
197                 bytes -= copy;
198         }
199         kunmap(page);
200
201 done:
202         if (skip == iov->iov_len) {
203                 iov++;
204                 skip = 0;
205         }
206         i->count -= wanted - bytes;
207         i->nr_segs -= iov - i->iov;
208         i->iov = iov;
209         i->iov_offset = skip;
210         return wanted - bytes;
211 }
212
213 static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
214                          struct iov_iter *i)
215 {
216         size_t skip, copy, left, wanted;
217         const struct iovec *iov;
218         char __user *buf;
219         void *kaddr, *to;
220
221         if (unlikely(bytes > i->count))
222                 bytes = i->count;
223
224         if (unlikely(!bytes))
225                 return 0;
226
227         wanted = bytes;
228         iov = i->iov;
229         skip = i->iov_offset;
230         buf = iov->iov_base + skip;
231         copy = min(bytes, iov->iov_len - skip);
232
233         if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_pages_readable(buf, copy)) {
234                 kaddr = kmap_atomic(page);
235                 to = kaddr + offset;
236
237                 /* first chunk, usually the only one */
238                 left = __copy_from_user_inatomic(to, buf, copy);
239                 copy -= left;
240                 skip += copy;
241                 to += copy;
242                 bytes -= copy;
243
244                 while (unlikely(!left && bytes)) {
245                         iov++;
246                         buf = iov->iov_base;
247                         copy = min(bytes, iov->iov_len);
248                         left = __copy_from_user_inatomic(to, buf, copy);
249                         copy -= left;
250                         skip = copy;
251                         to += copy;
252                         bytes -= copy;
253                 }
254                 if (likely(!bytes)) {
255                         kunmap_atomic(kaddr);
256                         goto done;
257                 }
258                 offset = to - kaddr;
259                 buf += copy;
260                 kunmap_atomic(kaddr);
261                 copy = min(bytes, iov->iov_len - skip);
262         }
263         /* Too bad - revert to non-atomic kmap */
264
265         kaddr = kmap(page);
266         to = kaddr + offset;
267         left = __copy_from_user(to, buf, copy);
268         copy -= left;
269         skip += copy;
270         to += copy;
271         bytes -= copy;
272         while (unlikely(!left && bytes)) {
273                 iov++;
274                 buf = iov->iov_base;
275                 copy = min(bytes, iov->iov_len);
276                 left = __copy_from_user(to, buf, copy);
277                 copy -= left;
278                 skip = copy;
279                 to += copy;
280                 bytes -= copy;
281         }
282         kunmap(page);
283
284 done:
285         if (skip == iov->iov_len) {
286                 iov++;
287                 skip = 0;
288         }
289         i->count -= wanted - bytes;
290         i->nr_segs -= iov - i->iov;
291         i->iov = iov;
292         i->iov_offset = skip;
293         return wanted - bytes;
294 }
295
296 #ifdef PIPE_PARANOIA
297 static bool sanity(const struct iov_iter *i)
298 {
299         struct pipe_inode_info *pipe = i->pipe;
300         int idx = i->idx;
301         int next = pipe->curbuf + pipe->nrbufs;
302         if (i->iov_offset) {
303                 struct pipe_buffer *p;
304                 if (unlikely(!pipe->nrbufs))
305                         goto Bad;       // pipe must be non-empty
306                 if (unlikely(idx != ((next - 1) & (pipe->buffers - 1))))
307                         goto Bad;       // must be at the last buffer...
308
309                 p = &pipe->bufs[idx];
310                 if (unlikely(p->offset + p->len != i->iov_offset))
311                         goto Bad;       // ... at the end of segment
312         } else {
313                 if (idx != (next & (pipe->buffers - 1)))
314                         goto Bad;       // must be right after the last buffer
315         }
316         return true;
317 Bad:
318         printk(KERN_ERR "idx = %d, offset = %zd\n", i->idx, i->iov_offset);
319         printk(KERN_ERR "curbuf = %d, nrbufs = %d, buffers = %d\n",
320                         pipe->curbuf, pipe->nrbufs, pipe->buffers);
321         for (idx = 0; idx < pipe->buffers; idx++)
322                 printk(KERN_ERR "[%p %p %d %d]\n",
323                         pipe->bufs[idx].ops,
324                         pipe->bufs[idx].page,
325                         pipe->bufs[idx].offset,
326                         pipe->bufs[idx].len);
327         WARN_ON(1);
328         return false;
329 }
330 #else
331 #define sanity(i) true
332 #endif
333
334 static inline int next_idx(int idx, struct pipe_inode_info *pipe)
335 {
336         return (idx + 1) & (pipe->buffers - 1);
337 }
338
339 static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes,
340                          struct iov_iter *i)
341 {
342         struct pipe_inode_info *pipe = i->pipe;
343         struct pipe_buffer *buf;
344         size_t off;
345         int idx;
346
347         if (unlikely(bytes > i->count))
348                 bytes = i->count;
349
350         if (unlikely(!bytes))
351                 return 0;
352
353         if (!sanity(i))
354                 return 0;
355
356         off = i->iov_offset;
357         idx = i->idx;
358         buf = &pipe->bufs[idx];
359         if (off) {
360                 if (offset == off && buf->page == page) {
361                         /* merge with the last one */
362                         buf->len += bytes;
363                         i->iov_offset += bytes;
364                         goto out;
365                 }
366                 idx = next_idx(idx, pipe);
367                 buf = &pipe->bufs[idx];
368         }
369         if (idx == pipe->curbuf && pipe->nrbufs)
370                 return 0;
371         pipe->nrbufs++;
372         buf->ops = &page_cache_pipe_buf_ops;
373         get_page(buf->page = page);
374         buf->offset = offset;
375         buf->len = bytes;
376         i->iov_offset = offset + bytes;
377         i->idx = idx;
378 out:
379         i->count -= bytes;
380         return bytes;
381 }
382
383 /*
384  * Fault in one or more iovecs of the given iov_iter, to a maximum length of
385  * bytes.  For each iovec, fault in each page that constitutes the iovec.
386  *
387  * Return 0 on success, or non-zero if the memory could not be accessed (i.e.
388  * because it is an invalid address).
389  */
390 int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
391 {
392         size_t skip = i->iov_offset;
393         const struct iovec *iov;
394         int err;
395         struct iovec v;
396
397         if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
398                 iterate_iovec(i, bytes, v, iov, skip, ({
399                         err = fault_in_pages_readable(v.iov_base, v.iov_len);
400                         if (unlikely(err))
401                         return err;
402                 0;}))
403         }
404         return 0;
405 }
406 EXPORT_SYMBOL(iov_iter_fault_in_readable);
407
408 void iov_iter_init(struct iov_iter *i, int direction,
409                         const struct iovec *iov, unsigned long nr_segs,
410                         size_t count)
411 {
412         /* It will get better.  Eventually... */
413         if (segment_eq(get_fs(), KERNEL_DS)) {
414                 direction |= ITER_KVEC;
415                 i->type = direction;
416                 i->kvec = (struct kvec *)iov;
417         } else {
418                 i->type = direction;
419                 i->iov = iov;
420         }
421         i->nr_segs = nr_segs;
422         i->iov_offset = 0;
423         i->count = count;
424 }
425 EXPORT_SYMBOL(iov_iter_init);
426
427 static void memcpy_from_page(char *to, struct page *page, size_t offset, size_t len)
428 {
429         char *from = kmap_atomic(page);
430         memcpy(to, from + offset, len);
431         kunmap_atomic(from);
432 }
433
434 static void memcpy_to_page(struct page *page, size_t offset, const char *from, size_t len)
435 {
436         char *to = kmap_atomic(page);
437         memcpy(to + offset, from, len);
438         kunmap_atomic(to);
439 }
440
441 static void memzero_page(struct page *page, size_t offset, size_t len)
442 {
443         char *addr = kmap_atomic(page);
444         memset(addr + offset, 0, len);
445         kunmap_atomic(addr);
446 }
447
448 static inline bool allocated(struct pipe_buffer *buf)
449 {
450         return buf->ops == &default_pipe_buf_ops;
451 }
452
453 static inline void data_start(const struct iov_iter *i, int *idxp, size_t *offp)
454 {
455         size_t off = i->iov_offset;
456         int idx = i->idx;
457         if (off && (!allocated(&i->pipe->bufs[idx]) || off == PAGE_SIZE)) {
458                 idx = next_idx(idx, i->pipe);
459                 off = 0;
460         }
461         *idxp = idx;
462         *offp = off;
463 }
464
465 static size_t push_pipe(struct iov_iter *i, size_t size,
466                         int *idxp, size_t *offp)
467 {
468         struct pipe_inode_info *pipe = i->pipe;
469         size_t off;
470         int idx;
471         ssize_t left;
472
473         if (unlikely(size > i->count))
474                 size = i->count;
475         if (unlikely(!size))
476                 return 0;
477
478         left = size;
479         data_start(i, &idx, &off);
480         *idxp = idx;
481         *offp = off;
482         if (off) {
483                 left -= PAGE_SIZE - off;
484                 if (left <= 0) {
485                         pipe->bufs[idx].len += size;
486                         return size;
487                 }
488                 pipe->bufs[idx].len = PAGE_SIZE;
489                 idx = next_idx(idx, pipe);
490         }
491         while (idx != pipe->curbuf || !pipe->nrbufs) {
492                 struct page *page = alloc_page(GFP_USER);
493                 if (!page)
494                         break;
495                 pipe->nrbufs++;
496                 pipe->bufs[idx].ops = &default_pipe_buf_ops;
497                 pipe->bufs[idx].page = page;
498                 pipe->bufs[idx].offset = 0;
499                 if (left <= PAGE_SIZE) {
500                         pipe->bufs[idx].len = left;
501                         return size;
502                 }
503                 pipe->bufs[idx].len = PAGE_SIZE;
504                 left -= PAGE_SIZE;
505                 idx = next_idx(idx, pipe);
506         }
507         return size - left;
508 }
509
510 static size_t copy_pipe_to_iter(const void *addr, size_t bytes,
511                                 struct iov_iter *i)
512 {
513         struct pipe_inode_info *pipe = i->pipe;
514         size_t n, off;
515         int idx;
516
517         if (!sanity(i))
518                 return 0;
519
520         bytes = n = push_pipe(i, bytes, &idx, &off);
521         if (unlikely(!n))
522                 return 0;
523         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
524                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
525                 memcpy_to_page(pipe->bufs[idx].page, off, addr, chunk);
526                 i->idx = idx;
527                 i->iov_offset = off + chunk;
528                 n -= chunk;
529                 addr += chunk;
530         }
531         i->count -= bytes;
532         return bytes;
533 }
534
535 size_t copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
536 {
537         const char *from = addr;
538         if (unlikely(i->type & ITER_PIPE))
539                 return copy_pipe_to_iter(addr, bytes, i);
540         iterate_and_advance(i, bytes, v,
541                 __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
542                                v.iov_len),
543                 memcpy_to_page(v.bv_page, v.bv_offset,
544                                (from += v.bv_len) - v.bv_len, v.bv_len),
545                 memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
546         )
547
548         return bytes;
549 }
550 EXPORT_SYMBOL(copy_to_iter);
551
552 size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
553 {
554         char *to = addr;
555         if (unlikely(i->type & ITER_PIPE)) {
556                 WARN_ON(1);
557                 return 0;
558         }
559         iterate_and_advance(i, bytes, v,
560                 __copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
561                                  v.iov_len),
562                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
563                                  v.bv_offset, v.bv_len),
564                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
565         )
566
567         return bytes;
568 }
569 EXPORT_SYMBOL(copy_from_iter);
570
571 size_t copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
572 {
573         char *to = addr;
574         if (unlikely(i->type & ITER_PIPE)) {
575                 WARN_ON(1);
576                 return 0;
577         }
578         iterate_and_advance(i, bytes, v,
579                 __copy_from_user_nocache((to += v.iov_len) - v.iov_len,
580                                          v.iov_base, v.iov_len),
581                 memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
582                                  v.bv_offset, v.bv_len),
583                 memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
584         )
585
586         return bytes;
587 }
588 EXPORT_SYMBOL(copy_from_iter_nocache);
589
590 size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
591                          struct iov_iter *i)
592 {
593         if (i->type & (ITER_BVEC|ITER_KVEC)) {
594                 void *kaddr = kmap_atomic(page);
595                 size_t wanted = copy_to_iter(kaddr + offset, bytes, i);
596                 kunmap_atomic(kaddr);
597                 return wanted;
598         } else if (likely(!(i->type & ITER_PIPE)))
599                 return copy_page_to_iter_iovec(page, offset, bytes, i);
600         else
601                 return copy_page_to_iter_pipe(page, offset, bytes, i);
602 }
603 EXPORT_SYMBOL(copy_page_to_iter);
604
605 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
606                          struct iov_iter *i)
607 {
608         if (unlikely(i->type & ITER_PIPE)) {
609                 WARN_ON(1);
610                 return 0;
611         }
612         if (i->type & (ITER_BVEC|ITER_KVEC)) {
613                 void *kaddr = kmap_atomic(page);
614                 size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
615                 kunmap_atomic(kaddr);
616                 return wanted;
617         } else
618                 return copy_page_from_iter_iovec(page, offset, bytes, i);
619 }
620 EXPORT_SYMBOL(copy_page_from_iter);
621
622 static size_t pipe_zero(size_t bytes, struct iov_iter *i)
623 {
624         struct pipe_inode_info *pipe = i->pipe;
625         size_t n, off;
626         int idx;
627
628         if (!sanity(i))
629                 return 0;
630
631         bytes = n = push_pipe(i, bytes, &idx, &off);
632         if (unlikely(!n))
633                 return 0;
634
635         for ( ; n; idx = next_idx(idx, pipe), off = 0) {
636                 size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
637                 memzero_page(pipe->bufs[idx].page, off, chunk);
638                 i->idx = idx;
639                 i->iov_offset = off + chunk;
640                 n -= chunk;
641         }
642         i->count -= bytes;
643         return bytes;
644 }
645
646 size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
647 {
648         if (unlikely(i->type & ITER_PIPE))
649                 return pipe_zero(bytes, i);
650         iterate_and_advance(i, bytes, v,
651                 __clear_user(v.iov_base, v.iov_len),
652                 memzero_page(v.bv_page, v.bv_offset, v.bv_len),
653                 memset(v.iov_base, 0, v.iov_len)
654         )
655
656         return bytes;
657 }
658 EXPORT_SYMBOL(iov_iter_zero);
659
660 size_t iov_iter_copy_from_user_atomic(struct page *page,
661                 struct iov_iter *i, unsigned long offset, size_t bytes)
662 {
663         char *kaddr = kmap_atomic(page), *p = kaddr + offset;
664         if (unlikely(i->type & ITER_PIPE)) {
665                 kunmap_atomic(kaddr);
666                 WARN_ON(1);
667                 return 0;
668         }
669         iterate_all_kinds(i, bytes, v,
670                 __copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
671                                           v.iov_base, v.iov_len),
672                 memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
673                                  v.bv_offset, v.bv_len),
674                 memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
675         )
676         kunmap_atomic(kaddr);
677         return bytes;
678 }
679 EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
680
681 static inline void pipe_truncate(struct iov_iter *i)
682 {
683         struct pipe_inode_info *pipe = i->pipe;
684         if (pipe->nrbufs) {
685                 size_t off = i->iov_offset;
686                 int idx = i->idx;
687                 int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
688                 if (off) {
689                         pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
690                         idx = next_idx(idx, pipe);
691                         nrbufs++;
692                 }
693                 while (pipe->nrbufs > nrbufs) {
694                         pipe_buf_release(pipe, &pipe->bufs[idx]);
695                         idx = next_idx(idx, pipe);
696                         pipe->nrbufs--;
697                 }
698         }
699 }
700
701 static void pipe_advance(struct iov_iter *i, size_t size)
702 {
703         struct pipe_inode_info *pipe = i->pipe;
704         if (unlikely(i->count < size))
705                 size = i->count;
706         if (size) {
707                 struct pipe_buffer *buf;
708                 size_t off = i->iov_offset, left = size;
709                 int idx = i->idx;
710                 if (off) /* make it relative to the beginning of buffer */
711                         left += off - pipe->bufs[idx].offset;
712                 while (1) {
713                         buf = &pipe->bufs[idx];
714                         if (left <= buf->len)
715                                 break;
716                         left -= buf->len;
717                         idx = next_idx(idx, pipe);
718                 }
719                 i->idx = idx;
720                 i->iov_offset = buf->offset + left;
721         }
722         i->count -= size;
723         /* ... and discard everything past that point */
724         pipe_truncate(i);
725 }
726
727 void iov_iter_advance(struct iov_iter *i, size_t size)
728 {
729         if (unlikely(i->type & ITER_PIPE)) {
730                 pipe_advance(i, size);
731                 return;
732         }
733         iterate_and_advance(i, size, v, 0, 0, 0)
734 }
735 EXPORT_SYMBOL(iov_iter_advance);
736
737 void iov_iter_revert(struct iov_iter *i, size_t unroll)
738 {
739         if (!unroll)
740                 return;
741         i->count += unroll;
742         if (unlikely(i->type & ITER_PIPE)) {
743                 struct pipe_inode_info *pipe = i->pipe;
744                 int idx = i->idx;
745                 size_t off = i->iov_offset;
746                 while (1) {
747                         size_t n = off - pipe->bufs[idx].offset;
748                         if (unroll < n) {
749                                 off -= (n - unroll);
750                                 break;
751                         }
752                         unroll -= n;
753                         if (!unroll && idx == i->start_idx) {
754                                 off = 0;
755                                 break;
756                         }
757                         if (!idx--)
758                                 idx = pipe->buffers - 1;
759                         off = pipe->bufs[idx].offset + pipe->bufs[idx].len;
760                 }
761                 i->iov_offset = off;
762                 i->idx = idx;
763                 pipe_truncate(i);
764                 return;
765         }
766         if (unroll <= i->iov_offset) {
767                 i->iov_offset -= unroll;
768                 return;
769         }
770         unroll -= i->iov_offset;
771         if (i->type & ITER_BVEC) {
772                 const struct bio_vec *bvec = i->bvec;
773                 while (1) {
774                         size_t n = (--bvec)->bv_len;
775                         i->nr_segs++;
776                         if (unroll <= n) {
777                                 i->bvec = bvec;
778                                 i->iov_offset = n - unroll;
779                                 return;
780                         }
781                         unroll -= n;
782                 }
783         } else { /* same logics for iovec and kvec */
784                 const struct iovec *iov = i->iov;
785                 while (1) {
786                         size_t n = (--iov)->iov_len;
787                         i->nr_segs++;
788                         if (unroll <= n) {
789                                 i->iov = iov;
790                                 i->iov_offset = n - unroll;
791                                 return;
792                         }
793                         unroll -= n;
794                 }
795         }
796 }
797 EXPORT_SYMBOL(iov_iter_revert);
798
799 /*
800  * Return the count of just the current iov_iter segment.
801  */
802 size_t iov_iter_single_seg_count(const struct iov_iter *i)
803 {
804         if (unlikely(i->type & ITER_PIPE))
805                 return i->count;        // it is a silly place, anyway
806         if (i->nr_segs == 1)
807                 return i->count;
808         else if (i->type & ITER_BVEC)
809                 return min(i->count, i->bvec->bv_len - i->iov_offset);
810         else
811                 return min(i->count, i->iov->iov_len - i->iov_offset);
812 }
813 EXPORT_SYMBOL(iov_iter_single_seg_count);
814
815 void iov_iter_kvec(struct iov_iter *i, int direction,
816                         const struct kvec *kvec, unsigned long nr_segs,
817                         size_t count)
818 {
819         BUG_ON(!(direction & ITER_KVEC));
820         i->type = direction;
821         i->kvec = kvec;
822         i->nr_segs = nr_segs;
823         i->iov_offset = 0;
824         i->count = count;
825 }
826 EXPORT_SYMBOL(iov_iter_kvec);
827
828 void iov_iter_bvec(struct iov_iter *i, int direction,
829                         const struct bio_vec *bvec, unsigned long nr_segs,
830                         size_t count)
831 {
832         BUG_ON(!(direction & ITER_BVEC));
833         i->type = direction;
834         i->bvec = bvec;
835         i->nr_segs = nr_segs;
836         i->iov_offset = 0;
837         i->count = count;
838 }
839 EXPORT_SYMBOL(iov_iter_bvec);
840
841 void iov_iter_pipe(struct iov_iter *i, int direction,
842                         struct pipe_inode_info *pipe,
843                         size_t count)
844 {
845         BUG_ON(direction != ITER_PIPE);
846         WARN_ON(pipe->nrbufs == pipe->buffers);
847         i->type = direction;
848         i->pipe = pipe;
849         i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
850         i->iov_offset = 0;
851         i->count = count;
852         i->start_idx = i->idx;
853 }
854 EXPORT_SYMBOL(iov_iter_pipe);
855
856 unsigned long iov_iter_alignment(const struct iov_iter *i)
857 {
858         unsigned long res = 0;
859         size_t size = i->count;
860
861         if (!size)
862                 return 0;
863
864         if (unlikely(i->type & ITER_PIPE)) {
865                 if (i->iov_offset && allocated(&i->pipe->bufs[i->idx]))
866                         return size | i->iov_offset;
867                 return size;
868         }
869         iterate_all_kinds(i, size, v,
870                 (res |= (unsigned long)v.iov_base | v.iov_len, 0),
871                 res |= v.bv_offset | v.bv_len,
872                 res |= (unsigned long)v.iov_base | v.iov_len
873         )
874         return res;
875 }
876 EXPORT_SYMBOL(iov_iter_alignment);
877
878 unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
879 {
880         unsigned long res = 0;
881         size_t size = i->count;
882         if (!size)
883                 return 0;
884
885         if (unlikely(i->type & ITER_PIPE)) {
886                 WARN_ON(1);
887                 return ~0U;
888         }
889
890         iterate_all_kinds(i, size, v,
891                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
892                         (size != v.iov_len ? size : 0), 0),
893                 (res |= (!res ? 0 : (unsigned long)v.bv_offset) |
894                         (size != v.bv_len ? size : 0)),
895                 (res |= (!res ? 0 : (unsigned long)v.iov_base) |
896                         (size != v.iov_len ? size : 0))
897                 );
898                 return res;
899 }
900 EXPORT_SYMBOL(iov_iter_gap_alignment);
901
902 static inline size_t __pipe_get_pages(struct iov_iter *i,
903                                 size_t maxsize,
904                                 struct page **pages,
905                                 int idx,
906                                 size_t *start)
907 {
908         struct pipe_inode_info *pipe = i->pipe;
909         ssize_t n = push_pipe(i, maxsize, &idx, start);
910         if (!n)
911                 return -EFAULT;
912
913         maxsize = n;
914         n += *start;
915         while (n > 0) {
916                 get_page(*pages++ = pipe->bufs[idx].page);
917                 idx = next_idx(idx, pipe);
918                 n -= PAGE_SIZE;
919         }
920
921         return maxsize;
922 }
923
924 static ssize_t pipe_get_pages(struct iov_iter *i,
925                    struct page **pages, size_t maxsize, unsigned maxpages,
926                    size_t *start)
927 {
928         unsigned npages;
929         size_t capacity;
930         int idx;
931
932         if (!sanity(i))
933                 return -EFAULT;
934
935         data_start(i, &idx, start);
936         /* some of this one + all after this one */
937         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
938         capacity = min(npages,maxpages) * PAGE_SIZE - *start;
939
940         return __pipe_get_pages(i, min(maxsize, capacity), pages, idx, start);
941 }
942
943 ssize_t iov_iter_get_pages(struct iov_iter *i,
944                    struct page **pages, size_t maxsize, unsigned maxpages,
945                    size_t *start)
946 {
947         if (maxsize > i->count)
948                 maxsize = i->count;
949
950         if (!maxsize)
951                 return 0;
952
953         if (unlikely(i->type & ITER_PIPE))
954                 return pipe_get_pages(i, pages, maxsize, maxpages, start);
955         iterate_all_kinds(i, maxsize, v, ({
956                 unsigned long addr = (unsigned long)v.iov_base;
957                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
958                 int n;
959                 int res;
960
961                 if (len > maxpages * PAGE_SIZE)
962                         len = maxpages * PAGE_SIZE;
963                 addr &= ~(PAGE_SIZE - 1);
964                 n = DIV_ROUND_UP(len, PAGE_SIZE);
965                 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, pages);
966                 if (unlikely(res < 0))
967                         return res;
968                 return (res == n ? len : res * PAGE_SIZE) - *start;
969         0;}),({
970                 /* can't be more than PAGE_SIZE */
971                 *start = v.bv_offset;
972                 get_page(*pages = v.bv_page);
973                 return v.bv_len;
974         }),({
975                 return -EFAULT;
976         })
977         )
978         return 0;
979 }
980 EXPORT_SYMBOL(iov_iter_get_pages);
981
982 static struct page **get_pages_array(size_t n)
983 {
984         struct page **p = kmalloc(n * sizeof(struct page *), GFP_KERNEL);
985         if (!p)
986                 p = vmalloc(n * sizeof(struct page *));
987         return p;
988 }
989
990 static ssize_t pipe_get_pages_alloc(struct iov_iter *i,
991                    struct page ***pages, size_t maxsize,
992                    size_t *start)
993 {
994         struct page **p;
995         size_t n;
996         int idx;
997         int npages;
998
999         if (!sanity(i))
1000                 return -EFAULT;
1001
1002         data_start(i, &idx, start);
1003         /* some of this one + all after this one */
1004         npages = ((i->pipe->curbuf - idx - 1) & (i->pipe->buffers - 1)) + 1;
1005         n = npages * PAGE_SIZE - *start;
1006         if (maxsize > n)
1007                 maxsize = n;
1008         else
1009                 npages = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
1010         p = get_pages_array(npages);
1011         if (!p)
1012                 return -ENOMEM;
1013         n = __pipe_get_pages(i, maxsize, p, idx, start);
1014         if (n > 0)
1015                 *pages = p;
1016         else
1017                 kvfree(p);
1018         return n;
1019 }
1020
1021 ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
1022                    struct page ***pages, size_t maxsize,
1023                    size_t *start)
1024 {
1025         struct page **p;
1026
1027         if (maxsize > i->count)
1028                 maxsize = i->count;
1029
1030         if (!maxsize)
1031                 return 0;
1032
1033         if (unlikely(i->type & ITER_PIPE))
1034                 return pipe_get_pages_alloc(i, pages, maxsize, start);
1035         iterate_all_kinds(i, maxsize, v, ({
1036                 unsigned long addr = (unsigned long)v.iov_base;
1037                 size_t len = v.iov_len + (*start = addr & (PAGE_SIZE - 1));
1038                 int n;
1039                 int res;
1040
1041                 addr &= ~(PAGE_SIZE - 1);
1042                 n = DIV_ROUND_UP(len, PAGE_SIZE);
1043                 p = get_pages_array(n);
1044                 if (!p)
1045                         return -ENOMEM;
1046                 res = get_user_pages_fast(addr, n, (i->type & WRITE) != WRITE, p);
1047                 if (unlikely(res < 0)) {
1048                         kvfree(p);
1049                         return res;
1050                 }
1051                 *pages = p;
1052                 return (res == n ? len : res * PAGE_SIZE) - *start;
1053         0;}),({
1054                 /* can't be more than PAGE_SIZE */
1055                 *start = v.bv_offset;
1056                 *pages = p = get_pages_array(1);
1057                 if (!p)
1058                         return -ENOMEM;
1059                 get_page(*p = v.bv_page);
1060                 return v.bv_len;
1061         }),({
1062                 return -EFAULT;
1063         })
1064         )
1065         return 0;
1066 }
1067 EXPORT_SYMBOL(iov_iter_get_pages_alloc);
1068
1069 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
1070                                struct iov_iter *i)
1071 {
1072         char *to = addr;
1073         __wsum sum, next;
1074         size_t off = 0;
1075         sum = *csum;
1076         if (unlikely(i->type & ITER_PIPE)) {
1077                 WARN_ON(1);
1078                 return 0;
1079         }
1080         iterate_and_advance(i, bytes, v, ({
1081                 int err = 0;
1082                 next = csum_and_copy_from_user(v.iov_base, 
1083                                                (to += v.iov_len) - v.iov_len,
1084                                                v.iov_len, 0, &err);
1085                 if (!err) {
1086                         sum = csum_block_add(sum, next, off);
1087                         off += v.iov_len;
1088                 }
1089                 err ? v.iov_len : 0;
1090         }), ({
1091                 char *p = kmap_atomic(v.bv_page);
1092                 next = csum_partial_copy_nocheck(p + v.bv_offset,
1093                                                  (to += v.bv_len) - v.bv_len,
1094                                                  v.bv_len, 0);
1095                 kunmap_atomic(p);
1096                 sum = csum_block_add(sum, next, off);
1097                 off += v.bv_len;
1098         }),({
1099                 next = csum_partial_copy_nocheck(v.iov_base,
1100                                                  (to += v.iov_len) - v.iov_len,
1101                                                  v.iov_len, 0);
1102                 sum = csum_block_add(sum, next, off);
1103                 off += v.iov_len;
1104         })
1105         )
1106         *csum = sum;
1107         return bytes;
1108 }
1109 EXPORT_SYMBOL(csum_and_copy_from_iter);
1110
1111 size_t csum_and_copy_to_iter(const void *addr, size_t bytes, __wsum *csum,
1112                              struct iov_iter *i)
1113 {
1114         const char *from = addr;
1115         __wsum sum, next;
1116         size_t off = 0;
1117         sum = *csum;
1118         if (unlikely(i->type & ITER_PIPE)) {
1119                 WARN_ON(1);     /* for now */
1120                 return 0;
1121         }
1122         iterate_and_advance(i, bytes, v, ({
1123                 int err = 0;
1124                 next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
1125                                              v.iov_base, 
1126                                              v.iov_len, 0, &err);
1127                 if (!err) {
1128                         sum = csum_block_add(sum, next, off);
1129                         off += v.iov_len;
1130                 }
1131                 err ? v.iov_len : 0;
1132         }), ({
1133                 char *p = kmap_atomic(v.bv_page);
1134                 next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len,
1135                                                  p + v.bv_offset,
1136                                                  v.bv_len, 0);
1137                 kunmap_atomic(p);
1138                 sum = csum_block_add(sum, next, off);
1139                 off += v.bv_len;
1140         }),({
1141                 next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len,
1142                                                  v.iov_base,
1143                                                  v.iov_len, 0);
1144                 sum = csum_block_add(sum, next, off);
1145                 off += v.iov_len;
1146         })
1147         )
1148         *csum = sum;
1149         return bytes;
1150 }
1151 EXPORT_SYMBOL(csum_and_copy_to_iter);
1152
1153 int iov_iter_npages(const struct iov_iter *i, int maxpages)
1154 {
1155         size_t size = i->count;
1156         int npages = 0;
1157
1158         if (!size)
1159                 return 0;
1160
1161         if (unlikely(i->type & ITER_PIPE)) {
1162                 struct pipe_inode_info *pipe = i->pipe;
1163                 size_t off;
1164                 int idx;
1165
1166                 if (!sanity(i))
1167                         return 0;
1168
1169                 data_start(i, &idx, &off);
1170                 /* some of this one + all after this one */
1171                 npages = ((pipe->curbuf - idx - 1) & (pipe->buffers - 1)) + 1;
1172                 if (npages >= maxpages)
1173                         return maxpages;
1174         } else iterate_all_kinds(i, size, v, ({
1175                 unsigned long p = (unsigned long)v.iov_base;
1176                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1177                         - p / PAGE_SIZE;
1178                 if (npages >= maxpages)
1179                         return maxpages;
1180         0;}),({
1181                 npages++;
1182                 if (npages >= maxpages)
1183                         return maxpages;
1184         }),({
1185                 unsigned long p = (unsigned long)v.iov_base;
1186                 npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
1187                         - p / PAGE_SIZE;
1188                 if (npages >= maxpages)
1189                         return maxpages;
1190         })
1191         )
1192         return npages;
1193 }
1194 EXPORT_SYMBOL(iov_iter_npages);
1195
1196 const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags)
1197 {
1198         *new = *old;
1199         if (unlikely(new->type & ITER_PIPE)) {
1200                 WARN_ON(1);
1201                 return NULL;
1202         }
1203         if (new->type & ITER_BVEC)
1204                 return new->bvec = kmemdup(new->bvec,
1205                                     new->nr_segs * sizeof(struct bio_vec),
1206                                     flags);
1207         else
1208                 /* iovec and kvec have identical layout */
1209                 return new->iov = kmemdup(new->iov,
1210                                    new->nr_segs * sizeof(struct iovec),
1211                                    flags);
1212 }
1213 EXPORT_SYMBOL(dup_iter);
1214
1215 /**
1216  * import_iovec() - Copy an array of &struct iovec from userspace
1217  *     into the kernel, check that it is valid, and initialize a new
1218  *     &struct iov_iter iterator to access it.
1219  *
1220  * @type: One of %READ or %WRITE.
1221  * @uvector: Pointer to the userspace array.
1222  * @nr_segs: Number of elements in userspace array.
1223  * @fast_segs: Number of elements in @iov.
1224  * @iov: (input and output parameter) Pointer to pointer to (usually small
1225  *     on-stack) kernel array.
1226  * @i: Pointer to iterator that will be initialized on success.
1227  *
1228  * If the array pointed to by *@iov is large enough to hold all @nr_segs,
1229  * then this function places %NULL in *@iov on return. Otherwise, a new
1230  * array will be allocated and the result placed in *@iov. This means that
1231  * the caller may call kfree() on *@iov regardless of whether the small
1232  * on-stack array was used or not (and regardless of whether this function
1233  * returns an error or not).
1234  *
1235  * Return: 0 on success or negative error code on error.
1236  */
1237 int import_iovec(int type, const struct iovec __user * uvector,
1238                  unsigned nr_segs, unsigned fast_segs,
1239                  struct iovec **iov, struct iov_iter *i)
1240 {
1241         ssize_t n;
1242         struct iovec *p;
1243         n = rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1244                                   *iov, &p);
1245         if (n < 0) {
1246                 if (p != *iov)
1247                         kfree(p);
1248                 *iov = NULL;
1249                 return n;
1250         }
1251         iov_iter_init(i, type, p, nr_segs, n);
1252         *iov = p == *iov ? NULL : p;
1253         return 0;
1254 }
1255 EXPORT_SYMBOL(import_iovec);
1256
1257 #ifdef CONFIG_COMPAT
1258 #include <linux/compat.h>
1259
1260 int compat_import_iovec(int type, const struct compat_iovec __user * uvector,
1261                  unsigned nr_segs, unsigned fast_segs,
1262                  struct iovec **iov, struct iov_iter *i)
1263 {
1264         ssize_t n;
1265         struct iovec *p;
1266         n = compat_rw_copy_check_uvector(type, uvector, nr_segs, fast_segs,
1267                                   *iov, &p);
1268         if (n < 0) {
1269                 if (p != *iov)
1270                         kfree(p);
1271                 *iov = NULL;
1272                 return n;
1273         }
1274         iov_iter_init(i, type, p, nr_segs, n);
1275         *iov = p == *iov ? NULL : p;
1276         return 0;
1277 }
1278 #endif
1279
1280 int import_single_range(int rw, void __user *buf, size_t len,
1281                  struct iovec *iov, struct iov_iter *i)
1282 {
1283         if (len > MAX_RW_COUNT)
1284                 len = MAX_RW_COUNT;
1285         if (unlikely(!access_ok(!rw, buf, len)))
1286                 return -EFAULT;
1287
1288         iov->iov_base = buf;
1289         iov->iov_len = len;
1290         iov_iter_init(i, rw, iov, 1, len);
1291         return 0;
1292 }
1293 EXPORT_SYMBOL(import_single_range);