OSDN Git Service

crypto: talitos - HMAC SNOOP NO AFEU mode requires SW icv checking.
[android-x86/kernel.git] / mm / shmem.c
1 /*
2  * Resizable virtual memory filesystem for Linux.
3  *
4  * Copyright (C) 2000 Linus Torvalds.
5  *               2000 Transmeta Corp.
6  *               2000-2001 Christoph Rohland
7  *               2000-2001 SAP AG
8  *               2002 Red Hat Inc.
9  * Copyright (C) 2002-2011 Hugh Dickins.
10  * Copyright (C) 2011 Google Inc.
11  * Copyright (C) 2002-2005 VERITAS Software Corporation.
12  * Copyright (C) 2004 Andi Kleen, SuSE Labs
13  *
14  * Extended attribute support for tmpfs:
15  * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
16  * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
17  *
18  * tiny-shmem:
19  * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20  *
21  * This file is released under the GPL.
22  */
23
24 #include <linux/fs.h>
25 #include <linux/init.h>
26 #include <linux/vfs.h>
27 #include <linux/mount.h>
28 #include <linux/ramfs.h>
29 #include <linux/pagemap.h>
30 #include <linux/file.h>
31 #include <linux/mm.h>
32 #include <linux/export.h>
33 #include <linux/swap.h>
34 #include <linux/uio.h>
35 #include <linux/khugepaged.h>
36
37 static struct vfsmount *shm_mnt;
38
39 #ifdef CONFIG_SHMEM
40 /*
41  * This virtual memory filesystem is heavily based on the ramfs. It
42  * extends ramfs by the ability to use swap and honor resource limits
43  * which makes it a completely usable filesystem.
44  */
45
46 #include <linux/xattr.h>
47 #include <linux/exportfs.h>
48 #include <linux/posix_acl.h>
49 #include <linux/posix_acl_xattr.h>
50 #include <linux/mman.h>
51 #include <linux/string.h>
52 #include <linux/slab.h>
53 #include <linux/backing-dev.h>
54 #include <linux/shmem_fs.h>
55 #include <linux/writeback.h>
56 #include <linux/blkdev.h>
57 #include <linux/pagevec.h>
58 #include <linux/percpu_counter.h>
59 #include <linux/falloc.h>
60 #include <linux/splice.h>
61 #include <linux/security.h>
62 #include <linux/swapops.h>
63 #include <linux/mempolicy.h>
64 #include <linux/namei.h>
65 #include <linux/ctype.h>
66 #include <linux/migrate.h>
67 #include <linux/highmem.h>
68 #include <linux/seq_file.h>
69 #include <linux/magic.h>
70 #include <linux/syscalls.h>
71 #include <linux/fcntl.h>
72 #include <uapi/linux/memfd.h>
73
74 #include <asm/uaccess.h>
75 #include <asm/pgtable.h>
76
77 #include "internal.h"
78
79 #define BLOCKS_PER_PAGE  (PAGE_SIZE/512)
80 #define VM_ACCT(size)    (PAGE_ALIGN(size) >> PAGE_SHIFT)
81
82 /* Pretend that each entry is of this size in directory's i_size */
83 #define BOGO_DIRENT_SIZE 20
84
85 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
86 #define SHORT_SYMLINK_LEN 128
87
88 /*
89  * shmem_fallocate communicates with shmem_fault or shmem_writepage via
90  * inode->i_private (with i_mutex making sure that it has only one user at
91  * a time): we would prefer not to enlarge the shmem inode just for that.
92  */
93 struct shmem_falloc {
94         wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
95         pgoff_t start;          /* start of range currently being fallocated */
96         pgoff_t next;           /* the next page offset to be fallocated */
97         pgoff_t nr_falloced;    /* how many new pages have been fallocated */
98         pgoff_t nr_unswapped;   /* how often writepage refused to swap out */
99 };
100
101 #ifdef CONFIG_TMPFS
102 static unsigned long shmem_default_max_blocks(void)
103 {
104         return totalram_pages / 2;
105 }
106
107 static unsigned long shmem_default_max_inodes(void)
108 {
109         return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
110 }
111 #endif
112
113 static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
114 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
115                                 struct shmem_inode_info *info, pgoff_t index);
116 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
117                 struct page **pagep, enum sgp_type sgp,
118                 gfp_t gfp, struct mm_struct *fault_mm, int *fault_type);
119
120 int shmem_getpage(struct inode *inode, pgoff_t index,
121                 struct page **pagep, enum sgp_type sgp)
122 {
123         return shmem_getpage_gfp(inode, index, pagep, sgp,
124                 mapping_gfp_mask(inode->i_mapping), NULL, NULL);
125 }
126
127 static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
128 {
129         return sb->s_fs_info;
130 }
131
132 /*
133  * shmem_file_setup pre-accounts the whole fixed size of a VM object,
134  * for shared memory and for shared anonymous (/dev/zero) mappings
135  * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
136  * consistent with the pre-accounting of private mappings ...
137  */
138 static inline int shmem_acct_size(unsigned long flags, loff_t size)
139 {
140         return (flags & VM_NORESERVE) ?
141                 0 : security_vm_enough_memory_mm(current->mm, VM_ACCT(size));
142 }
143
144 static inline void shmem_unacct_size(unsigned long flags, loff_t size)
145 {
146         if (!(flags & VM_NORESERVE))
147                 vm_unacct_memory(VM_ACCT(size));
148 }
149
150 static inline int shmem_reacct_size(unsigned long flags,
151                 loff_t oldsize, loff_t newsize)
152 {
153         if (!(flags & VM_NORESERVE)) {
154                 if (VM_ACCT(newsize) > VM_ACCT(oldsize))
155                         return security_vm_enough_memory_mm(current->mm,
156                                         VM_ACCT(newsize) - VM_ACCT(oldsize));
157                 else if (VM_ACCT(newsize) < VM_ACCT(oldsize))
158                         vm_unacct_memory(VM_ACCT(oldsize) - VM_ACCT(newsize));
159         }
160         return 0;
161 }
162
163 /*
164  * ... whereas tmpfs objects are accounted incrementally as
165  * pages are allocated, in order to allow large sparse files.
166  * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
167  * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
168  */
169 static inline int shmem_acct_block(unsigned long flags, long pages)
170 {
171         if (!(flags & VM_NORESERVE))
172                 return 0;
173
174         return security_vm_enough_memory_mm(current->mm,
175                         pages * VM_ACCT(PAGE_SIZE));
176 }
177
178 static inline void shmem_unacct_blocks(unsigned long flags, long pages)
179 {
180         if (flags & VM_NORESERVE)
181                 vm_unacct_memory(pages * VM_ACCT(PAGE_SIZE));
182 }
183
184 static inline bool shmem_inode_acct_block(struct inode *inode, long pages)
185 {
186         struct shmem_inode_info *info = SHMEM_I(inode);
187         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
188
189         if (shmem_acct_block(info->flags, pages))
190                 return false;
191
192         if (sbinfo->max_blocks) {
193                 if (percpu_counter_compare(&sbinfo->used_blocks,
194                                            sbinfo->max_blocks - pages) > 0)
195                         goto unacct;
196                 percpu_counter_add(&sbinfo->used_blocks, pages);
197         }
198
199         return true;
200
201 unacct:
202         shmem_unacct_blocks(info->flags, pages);
203         return false;
204 }
205
206 static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
207 {
208         struct shmem_inode_info *info = SHMEM_I(inode);
209         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
210
211         if (sbinfo->max_blocks)
212                 percpu_counter_sub(&sbinfo->used_blocks, pages);
213         shmem_unacct_blocks(info->flags, pages);
214 }
215
216 static const struct super_operations shmem_ops;
217 static const struct address_space_operations shmem_aops;
218 static const struct file_operations shmem_file_operations;
219 static const struct inode_operations shmem_inode_operations;
220 static const struct inode_operations shmem_dir_inode_operations;
221 static const struct inode_operations shmem_special_inode_operations;
222 static const struct vm_operations_struct shmem_vm_ops;
223 static struct file_system_type shmem_fs_type;
224
225 static LIST_HEAD(shmem_swaplist);
226 static DEFINE_MUTEX(shmem_swaplist_mutex);
227
228 static int shmem_reserve_inode(struct super_block *sb)
229 {
230         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
231         if (sbinfo->max_inodes) {
232                 spin_lock(&sbinfo->stat_lock);
233                 if (!sbinfo->free_inodes) {
234                         spin_unlock(&sbinfo->stat_lock);
235                         return -ENOSPC;
236                 }
237                 sbinfo->free_inodes--;
238                 spin_unlock(&sbinfo->stat_lock);
239         }
240         return 0;
241 }
242
243 static void shmem_free_inode(struct super_block *sb)
244 {
245         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
246         if (sbinfo->max_inodes) {
247                 spin_lock(&sbinfo->stat_lock);
248                 sbinfo->free_inodes++;
249                 spin_unlock(&sbinfo->stat_lock);
250         }
251 }
252
253 /**
254  * shmem_recalc_inode - recalculate the block usage of an inode
255  * @inode: inode to recalc
256  *
257  * We have to calculate the free blocks since the mm can drop
258  * undirtied hole pages behind our back.
259  *
260  * But normally   info->alloced == inode->i_mapping->nrpages + info->swapped
261  * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
262  *
263  * It has to be called with the spinlock held.
264  */
265 static void shmem_recalc_inode(struct inode *inode)
266 {
267         struct shmem_inode_info *info = SHMEM_I(inode);
268         long freed;
269
270         freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
271         if (freed > 0) {
272                 info->alloced -= freed;
273                 inode->i_blocks -= freed * BLOCKS_PER_PAGE;
274                 shmem_inode_unacct_blocks(inode, freed);
275         }
276 }
277
278 bool shmem_charge(struct inode *inode, long pages)
279 {
280         struct shmem_inode_info *info = SHMEM_I(inode);
281         unsigned long flags;
282
283         if (!shmem_inode_acct_block(inode, pages))
284                 return false;
285
286         /* nrpages adjustment first, then shmem_recalc_inode() when balanced */
287         inode->i_mapping->nrpages += pages;
288
289         spin_lock_irqsave(&info->lock, flags);
290         info->alloced += pages;
291         inode->i_blocks += pages * BLOCKS_PER_PAGE;
292         shmem_recalc_inode(inode);
293         spin_unlock_irqrestore(&info->lock, flags);
294
295         return true;
296 }
297
298 void shmem_uncharge(struct inode *inode, long pages)
299 {
300         struct shmem_inode_info *info = SHMEM_I(inode);
301         unsigned long flags;
302
303         /* nrpages adjustment done by __delete_from_page_cache() or caller */
304
305         spin_lock_irqsave(&info->lock, flags);
306         info->alloced -= pages;
307         inode->i_blocks -= pages * BLOCKS_PER_PAGE;
308         shmem_recalc_inode(inode);
309         spin_unlock_irqrestore(&info->lock, flags);
310
311         shmem_inode_unacct_blocks(inode, pages);
312 }
313
314 /*
315  * Replace item expected in radix tree by a new item, while holding tree lock.
316  */
317 static int shmem_radix_tree_replace(struct address_space *mapping,
318                         pgoff_t index, void *expected, void *replacement)
319 {
320         void **pslot;
321         void *item;
322
323         VM_BUG_ON(!expected);
324         VM_BUG_ON(!replacement);
325         pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
326         if (!pslot)
327                 return -ENOENT;
328         item = radix_tree_deref_slot_protected(pslot, &mapping->tree_lock);
329         if (item != expected)
330                 return -ENOENT;
331         radix_tree_replace_slot(pslot, replacement);
332         return 0;
333 }
334
335 /*
336  * Sometimes, before we decide whether to proceed or to fail, we must check
337  * that an entry was not already brought back from swap by a racing thread.
338  *
339  * Checking page is not enough: by the time a SwapCache page is locked, it
340  * might be reused, and again be SwapCache, using the same swap as before.
341  */
342 static bool shmem_confirm_swap(struct address_space *mapping,
343                                pgoff_t index, swp_entry_t swap)
344 {
345         void *item;
346
347         rcu_read_lock();
348         item = radix_tree_lookup(&mapping->page_tree, index);
349         rcu_read_unlock();
350         return item == swp_to_radix_entry(swap);
351 }
352
353 /*
354  * Definitions for "huge tmpfs": tmpfs mounted with the huge= option
355  *
356  * SHMEM_HUGE_NEVER:
357  *      disables huge pages for the mount;
358  * SHMEM_HUGE_ALWAYS:
359  *      enables huge pages for the mount;
360  * SHMEM_HUGE_WITHIN_SIZE:
361  *      only allocate huge pages if the page will be fully within i_size,
362  *      also respect fadvise()/madvise() hints;
363  * SHMEM_HUGE_ADVISE:
364  *      only allocate huge pages if requested with fadvise()/madvise();
365  */
366
367 #define SHMEM_HUGE_NEVER        0
368 #define SHMEM_HUGE_ALWAYS       1
369 #define SHMEM_HUGE_WITHIN_SIZE  2
370 #define SHMEM_HUGE_ADVISE       3
371
372 /*
373  * Special values.
374  * Only can be set via /sys/kernel/mm/transparent_hugepage/shmem_enabled:
375  *
376  * SHMEM_HUGE_DENY:
377  *      disables huge on shm_mnt and all mounts, for emergency use;
378  * SHMEM_HUGE_FORCE:
379  *      enables huge on shm_mnt and all mounts, w/o needing option, for testing;
380  *
381  */
382 #define SHMEM_HUGE_DENY         (-1)
383 #define SHMEM_HUGE_FORCE        (-2)
384
385 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
386 /* ifdef here to avoid bloating shmem.o when not necessary */
387
388 int shmem_huge __read_mostly;
389
390 #if defined(CONFIG_SYSFS) || defined(CONFIG_TMPFS)
391 static int shmem_parse_huge(const char *str)
392 {
393         if (!strcmp(str, "never"))
394                 return SHMEM_HUGE_NEVER;
395         if (!strcmp(str, "always"))
396                 return SHMEM_HUGE_ALWAYS;
397         if (!strcmp(str, "within_size"))
398                 return SHMEM_HUGE_WITHIN_SIZE;
399         if (!strcmp(str, "advise"))
400                 return SHMEM_HUGE_ADVISE;
401         if (!strcmp(str, "deny"))
402                 return SHMEM_HUGE_DENY;
403         if (!strcmp(str, "force"))
404                 return SHMEM_HUGE_FORCE;
405         return -EINVAL;
406 }
407
408 static const char *shmem_format_huge(int huge)
409 {
410         switch (huge) {
411         case SHMEM_HUGE_NEVER:
412                 return "never";
413         case SHMEM_HUGE_ALWAYS:
414                 return "always";
415         case SHMEM_HUGE_WITHIN_SIZE:
416                 return "within_size";
417         case SHMEM_HUGE_ADVISE:
418                 return "advise";
419         case SHMEM_HUGE_DENY:
420                 return "deny";
421         case SHMEM_HUGE_FORCE:
422                 return "force";
423         default:
424                 VM_BUG_ON(1);
425                 return "bad_val";
426         }
427 }
428 #endif
429
430 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
431                 struct shrink_control *sc, unsigned long nr_to_split)
432 {
433         LIST_HEAD(list), *pos, *next;
434         LIST_HEAD(to_remove);
435         struct inode *inode;
436         struct shmem_inode_info *info;
437         struct page *page;
438         unsigned long batch = sc ? sc->nr_to_scan : 128;
439         int removed = 0, split = 0;
440
441         if (list_empty(&sbinfo->shrinklist))
442                 return SHRINK_STOP;
443
444         spin_lock(&sbinfo->shrinklist_lock);
445         list_for_each_safe(pos, next, &sbinfo->shrinklist) {
446                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
447
448                 /* pin the inode */
449                 inode = igrab(&info->vfs_inode);
450
451                 /* inode is about to be evicted */
452                 if (!inode) {
453                         list_del_init(&info->shrinklist);
454                         removed++;
455                         goto next;
456                 }
457
458                 /* Check if there's anything to gain */
459                 if (round_up(inode->i_size, PAGE_SIZE) ==
460                                 round_up(inode->i_size, HPAGE_PMD_SIZE)) {
461                         list_move(&info->shrinklist, &to_remove);
462                         removed++;
463                         goto next;
464                 }
465
466                 list_move(&info->shrinklist, &list);
467 next:
468                 if (!--batch)
469                         break;
470         }
471         spin_unlock(&sbinfo->shrinklist_lock);
472
473         list_for_each_safe(pos, next, &to_remove) {
474                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
475                 inode = &info->vfs_inode;
476                 list_del_init(&info->shrinklist);
477                 iput(inode);
478         }
479
480         list_for_each_safe(pos, next, &list) {
481                 int ret;
482
483                 info = list_entry(pos, struct shmem_inode_info, shrinklist);
484                 inode = &info->vfs_inode;
485
486                 if (nr_to_split && split >= nr_to_split)
487                         goto leave;
488
489                 page = find_get_page(inode->i_mapping,
490                                 (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
491                 if (!page)
492                         goto drop;
493
494                 /* No huge page at the end of the file: nothing to split */
495                 if (!PageTransHuge(page)) {
496                         put_page(page);
497                         goto drop;
498                 }
499
500                 /*
501                  * Leave the inode on the list if we failed to lock
502                  * the page at this time.
503                  *
504                  * Waiting for the lock may lead to deadlock in the
505                  * reclaim path.
506                  */
507                 if (!trylock_page(page)) {
508                         put_page(page);
509                         goto leave;
510                 }
511
512                 ret = split_huge_page(page);
513                 unlock_page(page);
514                 put_page(page);
515
516                 /* If split failed leave the inode on the list */
517                 if (ret)
518                         goto leave;
519
520                 split++;
521 drop:
522                 list_del_init(&info->shrinklist);
523                 removed++;
524 leave:
525                 iput(inode);
526         }
527
528         spin_lock(&sbinfo->shrinklist_lock);
529         list_splice_tail(&list, &sbinfo->shrinklist);
530         sbinfo->shrinklist_len -= removed;
531         spin_unlock(&sbinfo->shrinklist_lock);
532
533         return split;
534 }
535
536 static long shmem_unused_huge_scan(struct super_block *sb,
537                 struct shrink_control *sc)
538 {
539         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
540
541         if (!READ_ONCE(sbinfo->shrinklist_len))
542                 return SHRINK_STOP;
543
544         return shmem_unused_huge_shrink(sbinfo, sc, 0);
545 }
546
547 static long shmem_unused_huge_count(struct super_block *sb,
548                 struct shrink_control *sc)
549 {
550         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
551         return READ_ONCE(sbinfo->shrinklist_len);
552 }
553 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
554
555 #define shmem_huge SHMEM_HUGE_DENY
556
557 static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
558                 struct shrink_control *sc, unsigned long nr_to_split)
559 {
560         return 0;
561 }
562 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
563
564 /*
565  * Like add_to_page_cache_locked, but error if expected item has gone.
566  */
567 static int shmem_add_to_page_cache(struct page *page,
568                                    struct address_space *mapping,
569                                    pgoff_t index, void *expected)
570 {
571         int error, nr = hpage_nr_pages(page);
572
573         VM_BUG_ON_PAGE(PageTail(page), page);
574         VM_BUG_ON_PAGE(index != round_down(index, nr), page);
575         VM_BUG_ON_PAGE(!PageLocked(page), page);
576         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
577         VM_BUG_ON(expected && PageTransHuge(page));
578
579         page_ref_add(page, nr);
580         page->mapping = mapping;
581         page->index = index;
582
583         spin_lock_irq(&mapping->tree_lock);
584         if (PageTransHuge(page)) {
585                 void __rcu **results;
586                 pgoff_t idx;
587                 int i;
588
589                 error = 0;
590                 if (radix_tree_gang_lookup_slot(&mapping->page_tree,
591                                         &results, &idx, index, 1) &&
592                                 idx < index + HPAGE_PMD_NR) {
593                         error = -EEXIST;
594                 }
595
596                 if (!error) {
597                         for (i = 0; i < HPAGE_PMD_NR; i++) {
598                                 error = radix_tree_insert(&mapping->page_tree,
599                                                 index + i, page + i);
600                                 VM_BUG_ON(error);
601                         }
602                         count_vm_event(THP_FILE_ALLOC);
603                 }
604         } else if (!expected) {
605                 error = radix_tree_insert(&mapping->page_tree, index, page);
606         } else {
607                 error = shmem_radix_tree_replace(mapping, index, expected,
608                                                                  page);
609         }
610
611         if (!error) {
612                 mapping->nrpages += nr;
613                 if (PageTransHuge(page))
614                         __inc_node_page_state(page, NR_SHMEM_THPS);
615                 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
616                 __mod_node_page_state(page_pgdat(page), NR_SHMEM, nr);
617                 spin_unlock_irq(&mapping->tree_lock);
618         } else {
619                 page->mapping = NULL;
620                 spin_unlock_irq(&mapping->tree_lock);
621                 page_ref_sub(page, nr);
622         }
623         return error;
624 }
625
626 /*
627  * Like delete_from_page_cache, but substitutes swap for page.
628  */
629 static void shmem_delete_from_page_cache(struct page *page, void *radswap)
630 {
631         struct address_space *mapping = page->mapping;
632         int error;
633
634         VM_BUG_ON_PAGE(PageCompound(page), page);
635
636         spin_lock_irq(&mapping->tree_lock);
637         error = shmem_radix_tree_replace(mapping, page->index, page, radswap);
638         page->mapping = NULL;
639         mapping->nrpages--;
640         __dec_node_page_state(page, NR_FILE_PAGES);
641         __dec_node_page_state(page, NR_SHMEM);
642         spin_unlock_irq(&mapping->tree_lock);
643         put_page(page);
644         BUG_ON(error);
645 }
646
647 /*
648  * Remove swap entry from radix tree, free the swap and its page cache.
649  */
650 static int shmem_free_swap(struct address_space *mapping,
651                            pgoff_t index, void *radswap)
652 {
653         void *old;
654
655         spin_lock_irq(&mapping->tree_lock);
656         old = radix_tree_delete_item(&mapping->page_tree, index, radswap);
657         spin_unlock_irq(&mapping->tree_lock);
658         if (old != radswap)
659                 return -ENOENT;
660         free_swap_and_cache(radix_to_swp_entry(radswap));
661         return 0;
662 }
663
664 /*
665  * Determine (in bytes) how many of the shmem object's pages mapped by the
666  * given offsets are swapped out.
667  *
668  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
669  * as long as the inode doesn't go away and racy results are not a problem.
670  */
671 unsigned long shmem_partial_swap_usage(struct address_space *mapping,
672                                                 pgoff_t start, pgoff_t end)
673 {
674         struct radix_tree_iter iter;
675         void **slot;
676         struct page *page;
677         unsigned long swapped = 0;
678
679         rcu_read_lock();
680
681         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
682                 if (iter.index >= end)
683                         break;
684
685                 page = radix_tree_deref_slot(slot);
686
687                 if (radix_tree_deref_retry(page)) {
688                         slot = radix_tree_iter_retry(&iter);
689                         continue;
690                 }
691
692                 if (radix_tree_exceptional_entry(page))
693                         swapped++;
694
695                 if (need_resched()) {
696                         cond_resched_rcu();
697                         slot = radix_tree_iter_next(&iter);
698                 }
699         }
700
701         rcu_read_unlock();
702
703         return swapped << PAGE_SHIFT;
704 }
705
706 /*
707  * Determine (in bytes) how many of the shmem object's pages mapped by the
708  * given vma is swapped out.
709  *
710  * This is safe to call without i_mutex or mapping->tree_lock thanks to RCU,
711  * as long as the inode doesn't go away and racy results are not a problem.
712  */
713 unsigned long shmem_swap_usage(struct vm_area_struct *vma)
714 {
715         struct inode *inode = file_inode(vma->vm_file);
716         struct shmem_inode_info *info = SHMEM_I(inode);
717         struct address_space *mapping = inode->i_mapping;
718         unsigned long swapped;
719
720         /* Be careful as we don't hold info->lock */
721         swapped = READ_ONCE(info->swapped);
722
723         /*
724          * The easier cases are when the shmem object has nothing in swap, or
725          * the vma maps it whole. Then we can simply use the stats that we
726          * already track.
727          */
728         if (!swapped)
729                 return 0;
730
731         if (!vma->vm_pgoff && vma->vm_end - vma->vm_start >= inode->i_size)
732                 return swapped << PAGE_SHIFT;
733
734         /* Here comes the more involved part */
735         return shmem_partial_swap_usage(mapping,
736                         linear_page_index(vma, vma->vm_start),
737                         linear_page_index(vma, vma->vm_end));
738 }
739
740 /*
741  * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
742  */
743 void shmem_unlock_mapping(struct address_space *mapping)
744 {
745         struct pagevec pvec;
746         pgoff_t indices[PAGEVEC_SIZE];
747         pgoff_t index = 0;
748
749         pagevec_init(&pvec, 0);
750         /*
751          * Minor point, but we might as well stop if someone else SHM_LOCKs it.
752          */
753         while (!mapping_unevictable(mapping)) {
754                 /*
755                  * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
756                  * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
757                  */
758                 pvec.nr = find_get_entries(mapping, index,
759                                            PAGEVEC_SIZE, pvec.pages, indices);
760                 if (!pvec.nr)
761                         break;
762                 index = indices[pvec.nr - 1] + 1;
763                 pagevec_remove_exceptionals(&pvec);
764                 check_move_unevictable_pages(pvec.pages, pvec.nr);
765                 pagevec_release(&pvec);
766                 cond_resched();
767         }
768 }
769
770 /*
771  * Remove range of pages and swap entries from radix tree, and free them.
772  * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
773  */
774 static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
775                                                                  bool unfalloc)
776 {
777         struct address_space *mapping = inode->i_mapping;
778         struct shmem_inode_info *info = SHMEM_I(inode);
779         pgoff_t start = (lstart + PAGE_SIZE - 1) >> PAGE_SHIFT;
780         pgoff_t end = (lend + 1) >> PAGE_SHIFT;
781         unsigned int partial_start = lstart & (PAGE_SIZE - 1);
782         unsigned int partial_end = (lend + 1) & (PAGE_SIZE - 1);
783         struct pagevec pvec;
784         pgoff_t indices[PAGEVEC_SIZE];
785         long nr_swaps_freed = 0;
786         pgoff_t index;
787         int i;
788
789         if (lend == -1)
790                 end = -1;       /* unsigned, so actually very big */
791
792         pagevec_init(&pvec, 0);
793         index = start;
794         while (index < end) {
795                 pvec.nr = find_get_entries(mapping, index,
796                         min(end - index, (pgoff_t)PAGEVEC_SIZE),
797                         pvec.pages, indices);
798                 if (!pvec.nr)
799                         break;
800                 for (i = 0; i < pagevec_count(&pvec); i++) {
801                         struct page *page = pvec.pages[i];
802
803                         index = indices[i];
804                         if (index >= end)
805                                 break;
806
807                         if (radix_tree_exceptional_entry(page)) {
808                                 if (unfalloc)
809                                         continue;
810                                 nr_swaps_freed += !shmem_free_swap(mapping,
811                                                                 index, page);
812                                 continue;
813                         }
814
815                         VM_BUG_ON_PAGE(page_to_pgoff(page) != index, page);
816
817                         if (!trylock_page(page))
818                                 continue;
819
820                         if (PageTransTail(page)) {
821                                 /* Middle of THP: zero out the page */
822                                 clear_highpage(page);
823                                 unlock_page(page);
824                                 continue;
825                         } else if (PageTransHuge(page)) {
826                                 if (index == round_down(end, HPAGE_PMD_NR)) {
827                                         /*
828                                          * Range ends in the middle of THP:
829                                          * zero out the page
830                                          */
831                                         clear_highpage(page);
832                                         unlock_page(page);
833                                         continue;
834                                 }
835                                 index += HPAGE_PMD_NR - 1;
836                                 i += HPAGE_PMD_NR - 1;
837                         }
838
839                         if (!unfalloc || !PageUptodate(page)) {
840                                 VM_BUG_ON_PAGE(PageTail(page), page);
841                                 if (page_mapping(page) == mapping) {
842                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
843                                         truncate_inode_page(mapping, page);
844                                 }
845                         }
846                         unlock_page(page);
847                 }
848                 pagevec_remove_exceptionals(&pvec);
849                 pagevec_release(&pvec);
850                 cond_resched();
851                 index++;
852         }
853
854         if (partial_start) {
855                 struct page *page = NULL;
856                 shmem_getpage(inode, start - 1, &page, SGP_READ);
857                 if (page) {
858                         unsigned int top = PAGE_SIZE;
859                         if (start > end) {
860                                 top = partial_end;
861                                 partial_end = 0;
862                         }
863                         zero_user_segment(page, partial_start, top);
864                         set_page_dirty(page);
865                         unlock_page(page);
866                         put_page(page);
867                 }
868         }
869         if (partial_end) {
870                 struct page *page = NULL;
871                 shmem_getpage(inode, end, &page, SGP_READ);
872                 if (page) {
873                         zero_user_segment(page, 0, partial_end);
874                         set_page_dirty(page);
875                         unlock_page(page);
876                         put_page(page);
877                 }
878         }
879         if (start >= end)
880                 return;
881
882         index = start;
883         while (index < end) {
884                 cond_resched();
885
886                 pvec.nr = find_get_entries(mapping, index,
887                                 min(end - index, (pgoff_t)PAGEVEC_SIZE),
888                                 pvec.pages, indices);
889                 if (!pvec.nr) {
890                         /* If all gone or hole-punch or unfalloc, we're done */
891                         if (index == start || end != -1)
892                                 break;
893                         /* But if truncating, restart to make sure all gone */
894                         index = start;
895                         continue;
896                 }
897                 for (i = 0; i < pagevec_count(&pvec); i++) {
898                         struct page *page = pvec.pages[i];
899
900                         index = indices[i];
901                         if (index >= end)
902                                 break;
903
904                         if (radix_tree_exceptional_entry(page)) {
905                                 if (unfalloc)
906                                         continue;
907                                 if (shmem_free_swap(mapping, index, page)) {
908                                         /* Swap was replaced by page: retry */
909                                         index--;
910                                         break;
911                                 }
912                                 nr_swaps_freed++;
913                                 continue;
914                         }
915
916                         lock_page(page);
917
918                         if (PageTransTail(page)) {
919                                 /* Middle of THP: zero out the page */
920                                 clear_highpage(page);
921                                 unlock_page(page);
922                                 /*
923                                  * Partial thp truncate due 'start' in middle
924                                  * of THP: don't need to look on these pages
925                                  * again on !pvec.nr restart.
926                                  */
927                                 if (index != round_down(end, HPAGE_PMD_NR))
928                                         start++;
929                                 continue;
930                         } else if (PageTransHuge(page)) {
931                                 if (index == round_down(end, HPAGE_PMD_NR)) {
932                                         /*
933                                          * Range ends in the middle of THP:
934                                          * zero out the page
935                                          */
936                                         clear_highpage(page);
937                                         unlock_page(page);
938                                         continue;
939                                 }
940                                 index += HPAGE_PMD_NR - 1;
941                                 i += HPAGE_PMD_NR - 1;
942                         }
943
944                         if (!unfalloc || !PageUptodate(page)) {
945                                 VM_BUG_ON_PAGE(PageTail(page), page);
946                                 if (page_mapping(page) == mapping) {
947                                         VM_BUG_ON_PAGE(PageWriteback(page), page);
948                                         truncate_inode_page(mapping, page);
949                                 } else {
950                                         /* Page was replaced by swap: retry */
951                                         unlock_page(page);
952                                         index--;
953                                         break;
954                                 }
955                         }
956                         unlock_page(page);
957                 }
958                 pagevec_remove_exceptionals(&pvec);
959                 pagevec_release(&pvec);
960                 index++;
961         }
962
963         spin_lock_irq(&info->lock);
964         info->swapped -= nr_swaps_freed;
965         shmem_recalc_inode(inode);
966         spin_unlock_irq(&info->lock);
967 }
968
969 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
970 {
971         shmem_undo_range(inode, lstart, lend, false);
972         inode->i_ctime = inode->i_mtime = current_time(inode);
973 }
974 EXPORT_SYMBOL_GPL(shmem_truncate_range);
975
976 static int shmem_getattr(struct vfsmount *mnt, struct dentry *dentry,
977                          struct kstat *stat)
978 {
979         struct inode *inode = dentry->d_inode;
980         struct shmem_inode_info *info = SHMEM_I(inode);
981
982         if (info->alloced - info->swapped != inode->i_mapping->nrpages) {
983                 spin_lock_irq(&info->lock);
984                 shmem_recalc_inode(inode);
985                 spin_unlock_irq(&info->lock);
986         }
987         generic_fillattr(inode, stat);
988         return 0;
989 }
990
991 static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
992 {
993         struct inode *inode = d_inode(dentry);
994         struct shmem_inode_info *info = SHMEM_I(inode);
995         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
996         int error;
997
998         error = setattr_prepare(dentry, attr);
999         if (error)
1000                 return error;
1001
1002         if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
1003                 loff_t oldsize = inode->i_size;
1004                 loff_t newsize = attr->ia_size;
1005
1006                 /* protected by i_mutex */
1007                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
1008                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
1009                         return -EPERM;
1010
1011                 if (newsize != oldsize) {
1012                         error = shmem_reacct_size(SHMEM_I(inode)->flags,
1013                                         oldsize, newsize);
1014                         if (error)
1015                                 return error;
1016                         i_size_write(inode, newsize);
1017                         inode->i_ctime = inode->i_mtime = current_time(inode);
1018                 }
1019                 if (newsize <= oldsize) {
1020                         loff_t holebegin = round_up(newsize, PAGE_SIZE);
1021                         if (oldsize > holebegin)
1022                                 unmap_mapping_range(inode->i_mapping,
1023                                                         holebegin, 0, 1);
1024                         if (info->alloced)
1025                                 shmem_truncate_range(inode,
1026                                                         newsize, (loff_t)-1);
1027                         /* unmap again to remove racily COWed private pages */
1028                         if (oldsize > holebegin)
1029                                 unmap_mapping_range(inode->i_mapping,
1030                                                         holebegin, 0, 1);
1031
1032                         /*
1033                          * Part of the huge page can be beyond i_size: subject
1034                          * to shrink under memory pressure.
1035                          */
1036                         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
1037                                 spin_lock(&sbinfo->shrinklist_lock);
1038                                 /*
1039                                  * _careful to defend against unlocked access to
1040                                  * ->shrink_list in shmem_unused_huge_shrink()
1041                                  */
1042                                 if (list_empty_careful(&info->shrinklist)) {
1043                                         list_add_tail(&info->shrinklist,
1044                                                         &sbinfo->shrinklist);
1045                                         sbinfo->shrinklist_len++;
1046                                 }
1047                                 spin_unlock(&sbinfo->shrinklist_lock);
1048                         }
1049                 }
1050         }
1051
1052         setattr_copy(inode, attr);
1053         if (attr->ia_valid & ATTR_MODE)
1054                 error = posix_acl_chmod(inode, inode->i_mode);
1055         return error;
1056 }
1057
1058 static void shmem_evict_inode(struct inode *inode)
1059 {
1060         struct shmem_inode_info *info = SHMEM_I(inode);
1061         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1062
1063         if (inode->i_mapping->a_ops == &shmem_aops) {
1064                 shmem_unacct_size(info->flags, inode->i_size);
1065                 inode->i_size = 0;
1066                 shmem_truncate_range(inode, 0, (loff_t)-1);
1067                 if (!list_empty(&info->shrinklist)) {
1068                         spin_lock(&sbinfo->shrinklist_lock);
1069                         if (!list_empty(&info->shrinklist)) {
1070                                 list_del_init(&info->shrinklist);
1071                                 sbinfo->shrinklist_len--;
1072                         }
1073                         spin_unlock(&sbinfo->shrinklist_lock);
1074                 }
1075                 if (!list_empty(&info->swaplist)) {
1076                         mutex_lock(&shmem_swaplist_mutex);
1077                         list_del_init(&info->swaplist);
1078                         mutex_unlock(&shmem_swaplist_mutex);
1079                 }
1080         }
1081
1082         simple_xattrs_free(&info->xattrs);
1083         WARN_ON(inode->i_blocks);
1084         shmem_free_inode(inode->i_sb);
1085         clear_inode(inode);
1086 }
1087
1088 /*
1089  * If swap found in inode, free it and move page from swapcache to filecache.
1090  */
1091 static int shmem_unuse_inode(struct shmem_inode_info *info,
1092                              swp_entry_t swap, struct page **pagep)
1093 {
1094         struct address_space *mapping = info->vfs_inode.i_mapping;
1095         void *radswap;
1096         pgoff_t index;
1097         gfp_t gfp;
1098         int error = 0;
1099
1100         radswap = swp_to_radix_entry(swap);
1101         index = radix_tree_locate_item(&mapping->page_tree, radswap);
1102         if (index == -1)
1103                 return -EAGAIN; /* tell shmem_unuse we found nothing */
1104
1105         /*
1106          * Move _head_ to start search for next from here.
1107          * But be careful: shmem_evict_inode checks list_empty without taking
1108          * mutex, and there's an instant in list_move_tail when info->swaplist
1109          * would appear empty, if it were the only one on shmem_swaplist.
1110          */
1111         if (shmem_swaplist.next != &info->swaplist)
1112                 list_move_tail(&shmem_swaplist, &info->swaplist);
1113
1114         gfp = mapping_gfp_mask(mapping);
1115         if (shmem_should_replace_page(*pagep, gfp)) {
1116                 mutex_unlock(&shmem_swaplist_mutex);
1117                 error = shmem_replace_page(pagep, gfp, info, index);
1118                 mutex_lock(&shmem_swaplist_mutex);
1119                 /*
1120                  * We needed to drop mutex to make that restrictive page
1121                  * allocation, but the inode might have been freed while we
1122                  * dropped it: although a racing shmem_evict_inode() cannot
1123                  * complete without emptying the radix_tree, our page lock
1124                  * on this swapcache page is not enough to prevent that -
1125                  * free_swap_and_cache() of our swap entry will only
1126                  * trylock_page(), removing swap from radix_tree whatever.
1127                  *
1128                  * We must not proceed to shmem_add_to_page_cache() if the
1129                  * inode has been freed, but of course we cannot rely on
1130                  * inode or mapping or info to check that.  However, we can
1131                  * safely check if our swap entry is still in use (and here
1132                  * it can't have got reused for another page): if it's still
1133                  * in use, then the inode cannot have been freed yet, and we
1134                  * can safely proceed (if it's no longer in use, that tells
1135                  * nothing about the inode, but we don't need to unuse swap).
1136                  */
1137                 if (!page_swapcount(*pagep))
1138                         error = -ENOENT;
1139         }
1140
1141         /*
1142          * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
1143          * but also to hold up shmem_evict_inode(): so inode cannot be freed
1144          * beneath us (pagelock doesn't help until the page is in pagecache).
1145          */
1146         if (!error)
1147                 error = shmem_add_to_page_cache(*pagep, mapping, index,
1148                                                 radswap);
1149         if (error != -ENOMEM) {
1150                 /*
1151                  * Truncation and eviction use free_swap_and_cache(), which
1152                  * only does trylock page: if we raced, best clean up here.
1153                  */
1154                 delete_from_swap_cache(*pagep);
1155                 set_page_dirty(*pagep);
1156                 if (!error) {
1157                         spin_lock_irq(&info->lock);
1158                         info->swapped--;
1159                         spin_unlock_irq(&info->lock);
1160                         swap_free(swap);
1161                 }
1162         }
1163         return error;
1164 }
1165
1166 /*
1167  * Search through swapped inodes to find and replace swap by page.
1168  */
1169 int shmem_unuse(swp_entry_t swap, struct page *page)
1170 {
1171         struct list_head *this, *next;
1172         struct shmem_inode_info *info;
1173         struct mem_cgroup *memcg;
1174         int error = 0;
1175
1176         /*
1177          * There's a faint possibility that swap page was replaced before
1178          * caller locked it: caller will come back later with the right page.
1179          */
1180         if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
1181                 goto out;
1182
1183         /*
1184          * Charge page using GFP_KERNEL while we can wait, before taking
1185          * the shmem_swaplist_mutex which might hold up shmem_writepage().
1186          * Charged back to the user (not to caller) when swap account is used.
1187          */
1188         error = mem_cgroup_try_charge(page, current->mm, GFP_KERNEL, &memcg,
1189                         false);
1190         if (error)
1191                 goto out;
1192         /* No radix_tree_preload: swap entry keeps a place for page in tree */
1193         error = -EAGAIN;
1194
1195         mutex_lock(&shmem_swaplist_mutex);
1196         list_for_each_safe(this, next, &shmem_swaplist) {
1197                 info = list_entry(this, struct shmem_inode_info, swaplist);
1198                 if (info->swapped)
1199                         error = shmem_unuse_inode(info, swap, &page);
1200                 else
1201                         list_del_init(&info->swaplist);
1202                 cond_resched();
1203                 if (error != -EAGAIN)
1204                         break;
1205                 /* found nothing in this: move on to search the next */
1206         }
1207         mutex_unlock(&shmem_swaplist_mutex);
1208
1209         if (error) {
1210                 if (error != -ENOMEM)
1211                         error = 0;
1212                 mem_cgroup_cancel_charge(page, memcg, false);
1213         } else
1214                 mem_cgroup_commit_charge(page, memcg, true, false);
1215 out:
1216         unlock_page(page);
1217         put_page(page);
1218         return error;
1219 }
1220
1221 /*
1222  * Move the page from the page cache to the swap cache.
1223  */
1224 static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1225 {
1226         struct shmem_inode_info *info;
1227         struct address_space *mapping;
1228         struct inode *inode;
1229         swp_entry_t swap;
1230         pgoff_t index;
1231
1232         VM_BUG_ON_PAGE(PageCompound(page), page);
1233         BUG_ON(!PageLocked(page));
1234         mapping = page->mapping;
1235         index = page->index;
1236         inode = mapping->host;
1237         info = SHMEM_I(inode);
1238         if (info->flags & VM_LOCKED)
1239                 goto redirty;
1240         if (!total_swap_pages)
1241                 goto redirty;
1242
1243         /*
1244          * Our capabilities prevent regular writeback or sync from ever calling
1245          * shmem_writepage; but a stacking filesystem might use ->writepage of
1246          * its underlying filesystem, in which case tmpfs should write out to
1247          * swap only in response to memory pressure, and not for the writeback
1248          * threads or sync.
1249          */
1250         if (!wbc->for_reclaim) {
1251                 WARN_ON_ONCE(1);        /* Still happens? Tell us about it! */
1252                 goto redirty;
1253         }
1254
1255         /*
1256          * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
1257          * value into swapfile.c, the only way we can correctly account for a
1258          * fallocated page arriving here is now to initialize it and write it.
1259          *
1260          * That's okay for a page already fallocated earlier, but if we have
1261          * not yet completed the fallocation, then (a) we want to keep track
1262          * of this page in case we have to undo it, and (b) it may not be a
1263          * good idea to continue anyway, once we're pushing into swap.  So
1264          * reactivate the page, and let shmem_fallocate() quit when too many.
1265          */
1266         if (!PageUptodate(page)) {
1267                 if (inode->i_private) {
1268                         struct shmem_falloc *shmem_falloc;
1269                         spin_lock(&inode->i_lock);
1270                         shmem_falloc = inode->i_private;
1271                         if (shmem_falloc &&
1272                             !shmem_falloc->waitq &&
1273                             index >= shmem_falloc->start &&
1274                             index < shmem_falloc->next)
1275                                 shmem_falloc->nr_unswapped++;
1276                         else
1277                                 shmem_falloc = NULL;
1278                         spin_unlock(&inode->i_lock);
1279                         if (shmem_falloc)
1280                                 goto redirty;
1281                 }
1282                 clear_highpage(page);
1283                 flush_dcache_page(page);
1284                 SetPageUptodate(page);
1285         }
1286
1287         swap = get_swap_page();
1288         if (!swap.val)
1289                 goto redirty;
1290
1291         if (mem_cgroup_try_charge_swap(page, swap))
1292                 goto free_swap;
1293
1294         /*
1295          * Add inode to shmem_unuse()'s list of swapped-out inodes,
1296          * if it's not already there.  Do it now before the page is
1297          * moved to swap cache, when its pagelock no longer protects
1298          * the inode from eviction.  But don't unlock the mutex until
1299          * we've incremented swapped, because shmem_unuse_inode() will
1300          * prune a !swapped inode from the swaplist under this mutex.
1301          */
1302         mutex_lock(&shmem_swaplist_mutex);
1303         if (list_empty(&info->swaplist))
1304                 list_add_tail(&info->swaplist, &shmem_swaplist);
1305
1306         if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
1307                 spin_lock_irq(&info->lock);
1308                 shmem_recalc_inode(inode);
1309                 info->swapped++;
1310                 spin_unlock_irq(&info->lock);
1311
1312                 swap_shmem_alloc(swap);
1313                 shmem_delete_from_page_cache(page, swp_to_radix_entry(swap));
1314
1315                 mutex_unlock(&shmem_swaplist_mutex);
1316                 BUG_ON(page_mapped(page));
1317                 swap_writepage(page, wbc);
1318                 return 0;
1319         }
1320
1321         mutex_unlock(&shmem_swaplist_mutex);
1322 free_swap:
1323         swapcache_free(swap);
1324 redirty:
1325         set_page_dirty(page);
1326         if (wbc->for_reclaim)
1327                 return AOP_WRITEPAGE_ACTIVATE;  /* Return with page locked */
1328         unlock_page(page);
1329         return 0;
1330 }
1331
1332 #if defined(CONFIG_NUMA) && defined(CONFIG_TMPFS)
1333 static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1334 {
1335         char buffer[64];
1336
1337         if (!mpol || mpol->mode == MPOL_DEFAULT)
1338                 return;         /* show nothing */
1339
1340         mpol_to_str(buffer, sizeof(buffer), mpol);
1341
1342         seq_printf(seq, ",mpol=%s", buffer);
1343 }
1344
1345 static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1346 {
1347         struct mempolicy *mpol = NULL;
1348         if (sbinfo->mpol) {
1349                 spin_lock(&sbinfo->stat_lock);  /* prevent replace/use races */
1350                 mpol = sbinfo->mpol;
1351                 mpol_get(mpol);
1352                 spin_unlock(&sbinfo->stat_lock);
1353         }
1354         return mpol;
1355 }
1356 #else /* !CONFIG_NUMA || !CONFIG_TMPFS */
1357 static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
1358 {
1359 }
1360 static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1361 {
1362         return NULL;
1363 }
1364 #endif /* CONFIG_NUMA && CONFIG_TMPFS */
1365 #ifndef CONFIG_NUMA
1366 #define vm_policy vm_private_data
1367 #endif
1368
1369 static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
1370                 struct shmem_inode_info *info, pgoff_t index)
1371 {
1372         /* Create a pseudo vma that just contains the policy */
1373         vma->vm_start = 0;
1374         /* Bias interleave by inode number to distribute better across nodes */
1375         vma->vm_pgoff = index + info->vfs_inode.i_ino;
1376         vma->vm_ops = NULL;
1377         vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
1378 }
1379
1380 static void shmem_pseudo_vma_destroy(struct vm_area_struct *vma)
1381 {
1382         /* Drop reference taken by mpol_shared_policy_lookup() */
1383         mpol_cond_put(vma->vm_policy);
1384 }
1385
1386 static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
1387                         struct shmem_inode_info *info, pgoff_t index)
1388 {
1389         struct vm_area_struct pvma;
1390         struct page *page;
1391
1392         shmem_pseudo_vma_init(&pvma, info, index);
1393         page = swapin_readahead(swap, gfp, &pvma, 0);
1394         shmem_pseudo_vma_destroy(&pvma);
1395
1396         return page;
1397 }
1398
1399 static struct page *shmem_alloc_hugepage(gfp_t gfp,
1400                 struct shmem_inode_info *info, pgoff_t index)
1401 {
1402         struct vm_area_struct pvma;
1403         struct inode *inode = &info->vfs_inode;
1404         struct address_space *mapping = inode->i_mapping;
1405         pgoff_t idx, hindex;
1406         void __rcu **results;
1407         struct page *page;
1408
1409         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1410                 return NULL;
1411
1412         hindex = round_down(index, HPAGE_PMD_NR);
1413         rcu_read_lock();
1414         if (radix_tree_gang_lookup_slot(&mapping->page_tree, &results, &idx,
1415                                 hindex, 1) && idx < hindex + HPAGE_PMD_NR) {
1416                 rcu_read_unlock();
1417                 return NULL;
1418         }
1419         rcu_read_unlock();
1420
1421         shmem_pseudo_vma_init(&pvma, info, hindex);
1422         page = alloc_pages_vma(gfp | __GFP_COMP | __GFP_NORETRY | __GFP_NOWARN,
1423                         HPAGE_PMD_ORDER, &pvma, 0, numa_node_id(), true);
1424         shmem_pseudo_vma_destroy(&pvma);
1425         if (page)
1426                 prep_transhuge_page(page);
1427         return page;
1428 }
1429
1430 static struct page *shmem_alloc_page(gfp_t gfp,
1431                         struct shmem_inode_info *info, pgoff_t index)
1432 {
1433         struct vm_area_struct pvma;
1434         struct page *page;
1435
1436         shmem_pseudo_vma_init(&pvma, info, index);
1437         page = alloc_page_vma(gfp, &pvma, 0);
1438         shmem_pseudo_vma_destroy(&pvma);
1439
1440         return page;
1441 }
1442
1443 static struct page *shmem_alloc_and_acct_page(gfp_t gfp,
1444                 struct inode *inode,
1445                 pgoff_t index, bool huge)
1446 {
1447         struct shmem_inode_info *info = SHMEM_I(inode);
1448         struct page *page;
1449         int nr;
1450         int err = -ENOSPC;
1451
1452         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1453                 huge = false;
1454         nr = huge ? HPAGE_PMD_NR : 1;
1455
1456         if (!shmem_inode_acct_block(inode, nr))
1457                 goto failed;
1458
1459         if (huge)
1460                 page = shmem_alloc_hugepage(gfp, info, index);
1461         else
1462                 page = shmem_alloc_page(gfp, info, index);
1463         if (page) {
1464                 __SetPageLocked(page);
1465                 __SetPageSwapBacked(page);
1466                 return page;
1467         }
1468
1469         err = -ENOMEM;
1470         shmem_inode_unacct_blocks(inode, nr);
1471 failed:
1472         return ERR_PTR(err);
1473 }
1474
1475 /*
1476  * When a page is moved from swapcache to shmem filecache (either by the
1477  * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
1478  * shmem_unuse_inode()), it may have been read in earlier from swap, in
1479  * ignorance of the mapping it belongs to.  If that mapping has special
1480  * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
1481  * we may need to copy to a suitable page before moving to filecache.
1482  *
1483  * In a future release, this may well be extended to respect cpuset and
1484  * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
1485  * but for now it is a simple matter of zone.
1486  */
1487 static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
1488 {
1489         return page_zonenum(page) > gfp_zone(gfp);
1490 }
1491
1492 static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1493                                 struct shmem_inode_info *info, pgoff_t index)
1494 {
1495         struct page *oldpage, *newpage;
1496         struct address_space *swap_mapping;
1497         swp_entry_t entry;
1498         pgoff_t swap_index;
1499         int error;
1500
1501         oldpage = *pagep;
1502         entry.val = page_private(oldpage);
1503         swap_index = swp_offset(entry);
1504         swap_mapping = page_mapping(oldpage);
1505
1506         /*
1507          * We have arrived here because our zones are constrained, so don't
1508          * limit chance of success by further cpuset and node constraints.
1509          */
1510         gfp &= ~GFP_CONSTRAINT_MASK;
1511         newpage = shmem_alloc_page(gfp, info, index);
1512         if (!newpage)
1513                 return -ENOMEM;
1514
1515         get_page(newpage);
1516         copy_highpage(newpage, oldpage);
1517         flush_dcache_page(newpage);
1518
1519         __SetPageLocked(newpage);
1520         __SetPageSwapBacked(newpage);
1521         SetPageUptodate(newpage);
1522         set_page_private(newpage, entry.val);
1523         SetPageSwapCache(newpage);
1524
1525         /*
1526          * Our caller will very soon move newpage out of swapcache, but it's
1527          * a nice clean interface for us to replace oldpage by newpage there.
1528          */
1529         spin_lock_irq(&swap_mapping->tree_lock);
1530         error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1531                                                                    newpage);
1532         if (!error) {
1533                 __inc_node_page_state(newpage, NR_FILE_PAGES);
1534                 __dec_node_page_state(oldpage, NR_FILE_PAGES);
1535         }
1536         spin_unlock_irq(&swap_mapping->tree_lock);
1537
1538         if (unlikely(error)) {
1539                 /*
1540                  * Is this possible?  I think not, now that our callers check
1541                  * both PageSwapCache and page_private after getting page lock;
1542                  * but be defensive.  Reverse old to newpage for clear and free.
1543                  */
1544                 oldpage = newpage;
1545         } else {
1546                 mem_cgroup_migrate(oldpage, newpage);
1547                 lru_cache_add_anon(newpage);
1548                 *pagep = newpage;
1549         }
1550
1551         ClearPageSwapCache(oldpage);
1552         set_page_private(oldpage, 0);
1553
1554         unlock_page(oldpage);
1555         put_page(oldpage);
1556         put_page(oldpage);
1557         return error;
1558 }
1559
1560 /*
1561  * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1562  *
1563  * If we allocate a new one we do not mark it dirty. That's up to the
1564  * vm. If we swap it in we mark it dirty since we also free the swap
1565  * entry since a page cannot live in both the swap and page cache.
1566  *
1567  * fault_mm and fault_type are only supplied by shmem_fault:
1568  * otherwise they are NULL.
1569  */
1570 static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
1571         struct page **pagep, enum sgp_type sgp, gfp_t gfp,
1572         struct mm_struct *fault_mm, int *fault_type)
1573 {
1574         struct address_space *mapping = inode->i_mapping;
1575         struct shmem_inode_info *info = SHMEM_I(inode);
1576         struct shmem_sb_info *sbinfo;
1577         struct mm_struct *charge_mm;
1578         struct mem_cgroup *memcg;
1579         struct page *page;
1580         swp_entry_t swap;
1581         enum sgp_type sgp_huge = sgp;
1582         pgoff_t hindex = index;
1583         int error;
1584         int once = 0;
1585         int alloced = 0;
1586
1587         if (index > (MAX_LFS_FILESIZE >> PAGE_SHIFT))
1588                 return -EFBIG;
1589         if (sgp == SGP_NOHUGE || sgp == SGP_HUGE)
1590                 sgp = SGP_CACHE;
1591 repeat:
1592         swap.val = 0;
1593         page = find_lock_entry(mapping, index);
1594         if (radix_tree_exceptional_entry(page)) {
1595                 swap = radix_to_swp_entry(page);
1596                 page = NULL;
1597         }
1598
1599         if (sgp <= SGP_CACHE &&
1600             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1601                 error = -EINVAL;
1602                 goto unlock;
1603         }
1604
1605         if (page && sgp == SGP_WRITE)
1606                 mark_page_accessed(page);
1607
1608         /* fallocated page? */
1609         if (page && !PageUptodate(page)) {
1610                 if (sgp != SGP_READ)
1611                         goto clear;
1612                 unlock_page(page);
1613                 put_page(page);
1614                 page = NULL;
1615         }
1616         if (page || (sgp == SGP_READ && !swap.val)) {
1617                 *pagep = page;
1618                 return 0;
1619         }
1620
1621         /*
1622          * Fast cache lookup did not find it:
1623          * bring it back from swap or allocate.
1624          */
1625         sbinfo = SHMEM_SB(inode->i_sb);
1626         charge_mm = fault_mm ? : current->mm;
1627
1628         if (swap.val) {
1629                 /* Look it up and read it in.. */
1630                 page = lookup_swap_cache(swap);
1631                 if (!page) {
1632                         /* Or update major stats only when swapin succeeds?? */
1633                         if (fault_type) {
1634                                 *fault_type |= VM_FAULT_MAJOR;
1635                                 count_vm_event(PGMAJFAULT);
1636                                 mem_cgroup_count_vm_event(fault_mm, PGMAJFAULT);
1637                         }
1638                         /* Here we actually start the io */
1639                         page = shmem_swapin(swap, gfp, info, index);
1640                         if (!page) {
1641                                 error = -ENOMEM;
1642                                 goto failed;
1643                         }
1644                 }
1645
1646                 /* We have to do this with page locked to prevent races */
1647                 lock_page(page);
1648                 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1649                     !shmem_confirm_swap(mapping, index, swap)) {
1650                         error = -EEXIST;        /* try again */
1651                         goto unlock;
1652                 }
1653                 if (!PageUptodate(page)) {
1654                         error = -EIO;
1655                         goto failed;
1656                 }
1657                 wait_on_page_writeback(page);
1658
1659                 if (shmem_should_replace_page(page, gfp)) {
1660                         error = shmem_replace_page(&page, gfp, info, index);
1661                         if (error)
1662                                 goto failed;
1663                 }
1664
1665                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1666                                 false);
1667                 if (!error) {
1668                         error = shmem_add_to_page_cache(page, mapping, index,
1669                                                 swp_to_radix_entry(swap));
1670                         /*
1671                          * We already confirmed swap under page lock, and make
1672                          * no memory allocation here, so usually no possibility
1673                          * of error; but free_swap_and_cache() only trylocks a
1674                          * page, so it is just possible that the entry has been
1675                          * truncated or holepunched since swap was confirmed.
1676                          * shmem_undo_range() will have done some of the
1677                          * unaccounting, now delete_from_swap_cache() will do
1678                          * the rest.
1679                          * Reset swap.val? No, leave it so "failed" goes back to
1680                          * "repeat": reading a hole and writing should succeed.
1681                          */
1682                         if (error) {
1683                                 mem_cgroup_cancel_charge(page, memcg, false);
1684                                 delete_from_swap_cache(page);
1685                         }
1686                 }
1687                 if (error)
1688                         goto failed;
1689
1690                 mem_cgroup_commit_charge(page, memcg, true, false);
1691
1692                 spin_lock_irq(&info->lock);
1693                 info->swapped--;
1694                 shmem_recalc_inode(inode);
1695                 spin_unlock_irq(&info->lock);
1696
1697                 if (sgp == SGP_WRITE)
1698                         mark_page_accessed(page);
1699
1700                 delete_from_swap_cache(page);
1701                 set_page_dirty(page);
1702                 swap_free(swap);
1703
1704         } else {
1705                 /* shmem_symlink() */
1706                 if (mapping->a_ops != &shmem_aops)
1707                         goto alloc_nohuge;
1708                 if (shmem_huge == SHMEM_HUGE_DENY || sgp_huge == SGP_NOHUGE)
1709                         goto alloc_nohuge;
1710                 if (shmem_huge == SHMEM_HUGE_FORCE)
1711                         goto alloc_huge;
1712                 switch (sbinfo->huge) {
1713                         loff_t i_size;
1714                         pgoff_t off;
1715                 case SHMEM_HUGE_NEVER:
1716                         goto alloc_nohuge;
1717                 case SHMEM_HUGE_WITHIN_SIZE:
1718                         off = round_up(index, HPAGE_PMD_NR);
1719                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
1720                         if (i_size >= HPAGE_PMD_SIZE &&
1721                                         i_size >> PAGE_SHIFT >= off)
1722                                 goto alloc_huge;
1723                         /* fallthrough */
1724                 case SHMEM_HUGE_ADVISE:
1725                         if (sgp_huge == SGP_HUGE)
1726                                 goto alloc_huge;
1727                         /* TODO: implement fadvise() hints */
1728                         goto alloc_nohuge;
1729                 }
1730
1731 alloc_huge:
1732                 page = shmem_alloc_and_acct_page(gfp, inode, index, true);
1733                 if (IS_ERR(page)) {
1734 alloc_nohuge:           page = shmem_alloc_and_acct_page(gfp, inode,
1735                                         index, false);
1736                 }
1737                 if (IS_ERR(page)) {
1738                         int retry = 5;
1739                         error = PTR_ERR(page);
1740                         page = NULL;
1741                         if (error != -ENOSPC)
1742                                 goto failed;
1743                         /*
1744                          * Try to reclaim some spece by splitting a huge page
1745                          * beyond i_size on the filesystem.
1746                          */
1747                         while (retry--) {
1748                                 int ret;
1749                                 ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
1750                                 if (ret == SHRINK_STOP)
1751                                         break;
1752                                 if (ret)
1753                                         goto alloc_nohuge;
1754                         }
1755                         goto failed;
1756                 }
1757
1758                 if (PageTransHuge(page))
1759                         hindex = round_down(index, HPAGE_PMD_NR);
1760                 else
1761                         hindex = index;
1762
1763                 if (sgp == SGP_WRITE)
1764                         __SetPageReferenced(page);
1765
1766                 error = mem_cgroup_try_charge(page, charge_mm, gfp, &memcg,
1767                                 PageTransHuge(page));
1768                 if (error)
1769                         goto unacct;
1770                 error = radix_tree_maybe_preload_order(gfp & GFP_RECLAIM_MASK,
1771                                 compound_order(page));
1772                 if (!error) {
1773                         error = shmem_add_to_page_cache(page, mapping, hindex,
1774                                                         NULL);
1775                         radix_tree_preload_end();
1776                 }
1777                 if (error) {
1778                         mem_cgroup_cancel_charge(page, memcg,
1779                                         PageTransHuge(page));
1780                         goto unacct;
1781                 }
1782                 mem_cgroup_commit_charge(page, memcg, false,
1783                                 PageTransHuge(page));
1784                 lru_cache_add_anon(page);
1785
1786                 spin_lock_irq(&info->lock);
1787                 info->alloced += 1 << compound_order(page);
1788                 inode->i_blocks += BLOCKS_PER_PAGE << compound_order(page);
1789                 shmem_recalc_inode(inode);
1790                 spin_unlock_irq(&info->lock);
1791                 alloced = true;
1792
1793                 if (PageTransHuge(page) &&
1794                                 DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
1795                                 hindex + HPAGE_PMD_NR - 1) {
1796                         /*
1797                          * Part of the huge page is beyond i_size: subject
1798                          * to shrink under memory pressure.
1799                          */
1800                         spin_lock(&sbinfo->shrinklist_lock);
1801                         /*
1802                          * _careful to defend against unlocked access to
1803                          * ->shrink_list in shmem_unused_huge_shrink()
1804                          */
1805                         if (list_empty_careful(&info->shrinklist)) {
1806                                 list_add_tail(&info->shrinklist,
1807                                                 &sbinfo->shrinklist);
1808                                 sbinfo->shrinklist_len++;
1809                         }
1810                         spin_unlock(&sbinfo->shrinklist_lock);
1811                 }
1812
1813                 /*
1814                  * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
1815                  */
1816                 if (sgp == SGP_FALLOC)
1817                         sgp = SGP_WRITE;
1818 clear:
1819                 /*
1820                  * Let SGP_WRITE caller clear ends if write does not fill page;
1821                  * but SGP_FALLOC on a page fallocated earlier must initialize
1822                  * it now, lest undo on failure cancel our earlier guarantee.
1823                  */
1824                 if (sgp != SGP_WRITE && !PageUptodate(page)) {
1825                         struct page *head = compound_head(page);
1826                         int i;
1827
1828                         for (i = 0; i < (1 << compound_order(head)); i++) {
1829                                 clear_highpage(head + i);
1830                                 flush_dcache_page(head + i);
1831                         }
1832                         SetPageUptodate(head);
1833                 }
1834         }
1835
1836         /* Perhaps the file has been truncated since we checked */
1837         if (sgp <= SGP_CACHE &&
1838             ((loff_t)index << PAGE_SHIFT) >= i_size_read(inode)) {
1839                 if (alloced) {
1840                         ClearPageDirty(page);
1841                         delete_from_page_cache(page);
1842                         spin_lock_irq(&info->lock);
1843                         shmem_recalc_inode(inode);
1844                         spin_unlock_irq(&info->lock);
1845                 }
1846                 error = -EINVAL;
1847                 goto unlock;
1848         }
1849         *pagep = page + index - hindex;
1850         return 0;
1851
1852         /*
1853          * Error recovery.
1854          */
1855 unacct:
1856         shmem_inode_unacct_blocks(inode, 1 << compound_order(page));
1857
1858         if (PageTransHuge(page)) {
1859                 unlock_page(page);
1860                 put_page(page);
1861                 goto alloc_nohuge;
1862         }
1863 failed:
1864         if (swap.val && !shmem_confirm_swap(mapping, index, swap))
1865                 error = -EEXIST;
1866 unlock:
1867         if (page) {
1868                 unlock_page(page);
1869                 put_page(page);
1870         }
1871         if (error == -ENOSPC && !once++) {
1872                 spin_lock_irq(&info->lock);
1873                 shmem_recalc_inode(inode);
1874                 spin_unlock_irq(&info->lock);
1875                 goto repeat;
1876         }
1877         if (error == -EEXIST)   /* from above or from radix_tree_insert */
1878                 goto repeat;
1879         return error;
1880 }
1881
1882 /*
1883  * This is like autoremove_wake_function, but it removes the wait queue
1884  * entry unconditionally - even if something else had already woken the
1885  * target.
1886  */
1887 static int synchronous_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
1888 {
1889         int ret = default_wake_function(wait, mode, sync, key);
1890         list_del_init(&wait->task_list);
1891         return ret;
1892 }
1893
1894 static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1895 {
1896         struct inode *inode = file_inode(vma->vm_file);
1897         gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
1898         enum sgp_type sgp;
1899         int error;
1900         int ret = VM_FAULT_LOCKED;
1901
1902         /*
1903          * Trinity finds that probing a hole which tmpfs is punching can
1904          * prevent the hole-punch from ever completing: which in turn
1905          * locks writers out with its hold on i_mutex.  So refrain from
1906          * faulting pages into the hole while it's being punched.  Although
1907          * shmem_undo_range() does remove the additions, it may be unable to
1908          * keep up, as each new page needs its own unmap_mapping_range() call,
1909          * and the i_mmap tree grows ever slower to scan if new vmas are added.
1910          *
1911          * It does not matter if we sometimes reach this check just before the
1912          * hole-punch begins, so that one fault then races with the punch:
1913          * we just need to make racing faults a rare case.
1914          *
1915          * The implementation below would be much simpler if we just used a
1916          * standard mutex or completion: but we cannot take i_mutex in fault,
1917          * and bloating every shmem inode for this unlikely case would be sad.
1918          */
1919         if (unlikely(inode->i_private)) {
1920                 struct shmem_falloc *shmem_falloc;
1921
1922                 spin_lock(&inode->i_lock);
1923                 shmem_falloc = inode->i_private;
1924                 if (shmem_falloc &&
1925                     shmem_falloc->waitq &&
1926                     vmf->pgoff >= shmem_falloc->start &&
1927                     vmf->pgoff < shmem_falloc->next) {
1928                         wait_queue_head_t *shmem_falloc_waitq;
1929                         DEFINE_WAIT_FUNC(shmem_fault_wait, synchronous_wake_function);
1930
1931                         ret = VM_FAULT_NOPAGE;
1932                         if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1933                            !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1934                                 /* It's polite to up mmap_sem if we can */
1935                                 up_read(&vma->vm_mm->mmap_sem);
1936                                 ret = VM_FAULT_RETRY;
1937                         }
1938
1939                         shmem_falloc_waitq = shmem_falloc->waitq;
1940                         prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
1941                                         TASK_UNINTERRUPTIBLE);
1942                         spin_unlock(&inode->i_lock);
1943                         schedule();
1944
1945                         /*
1946                          * shmem_falloc_waitq points into the shmem_fallocate()
1947                          * stack of the hole-punching task: shmem_falloc_waitq
1948                          * is usually invalid by the time we reach here, but
1949                          * finish_wait() does not dereference it in that case;
1950                          * though i_lock needed lest racing with wake_up_all().
1951                          */
1952                         spin_lock(&inode->i_lock);
1953                         finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
1954                         spin_unlock(&inode->i_lock);
1955                         return ret;
1956                 }
1957                 spin_unlock(&inode->i_lock);
1958         }
1959
1960         sgp = SGP_CACHE;
1961         if (vma->vm_flags & VM_HUGEPAGE)
1962                 sgp = SGP_HUGE;
1963         else if (vma->vm_flags & VM_NOHUGEPAGE)
1964                 sgp = SGP_NOHUGE;
1965
1966         error = shmem_getpage_gfp(inode, vmf->pgoff, &vmf->page, sgp,
1967                                   gfp, vma->vm_mm, &ret);
1968         if (error)
1969                 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1970         return ret;
1971 }
1972
1973 unsigned long shmem_get_unmapped_area(struct file *file,
1974                                       unsigned long uaddr, unsigned long len,
1975                                       unsigned long pgoff, unsigned long flags)
1976 {
1977         unsigned long (*get_area)(struct file *,
1978                 unsigned long, unsigned long, unsigned long, unsigned long);
1979         unsigned long addr;
1980         unsigned long offset;
1981         unsigned long inflated_len;
1982         unsigned long inflated_addr;
1983         unsigned long inflated_offset;
1984
1985         if (len > TASK_SIZE)
1986                 return -ENOMEM;
1987
1988         get_area = current->mm->get_unmapped_area;
1989         addr = get_area(file, uaddr, len, pgoff, flags);
1990
1991         if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
1992                 return addr;
1993         if (IS_ERR_VALUE(addr))
1994                 return addr;
1995         if (addr & ~PAGE_MASK)
1996                 return addr;
1997         if (addr > TASK_SIZE - len)
1998                 return addr;
1999
2000         if (shmem_huge == SHMEM_HUGE_DENY)
2001                 return addr;
2002         if (len < HPAGE_PMD_SIZE)
2003                 return addr;
2004         if (flags & MAP_FIXED)
2005                 return addr;
2006         /*
2007          * Our priority is to support MAP_SHARED mapped hugely;
2008          * and support MAP_PRIVATE mapped hugely too, until it is COWed.
2009          * But if caller specified an address hint, respect that as before.
2010          */
2011         if (uaddr)
2012                 return addr;
2013
2014         if (shmem_huge != SHMEM_HUGE_FORCE) {
2015                 struct super_block *sb;
2016
2017                 if (file) {
2018                         VM_BUG_ON(file->f_op != &shmem_file_operations);
2019                         sb = file_inode(file)->i_sb;
2020                 } else {
2021                         /*
2022                          * Called directly from mm/mmap.c, or drivers/char/mem.c
2023                          * for "/dev/zero", to create a shared anonymous object.
2024                          */
2025                         if (IS_ERR(shm_mnt))
2026                                 return addr;
2027                         sb = shm_mnt->mnt_sb;
2028                 }
2029                 if (SHMEM_SB(sb)->huge == SHMEM_HUGE_NEVER)
2030                         return addr;
2031         }
2032
2033         offset = (pgoff << PAGE_SHIFT) & (HPAGE_PMD_SIZE-1);
2034         if (offset && offset + len < 2 * HPAGE_PMD_SIZE)
2035                 return addr;
2036         if ((addr & (HPAGE_PMD_SIZE-1)) == offset)
2037                 return addr;
2038
2039         inflated_len = len + HPAGE_PMD_SIZE - PAGE_SIZE;
2040         if (inflated_len > TASK_SIZE)
2041                 return addr;
2042         if (inflated_len < len)
2043                 return addr;
2044
2045         inflated_addr = get_area(NULL, 0, inflated_len, 0, flags);
2046         if (IS_ERR_VALUE(inflated_addr))
2047                 return addr;
2048         if (inflated_addr & ~PAGE_MASK)
2049                 return addr;
2050
2051         inflated_offset = inflated_addr & (HPAGE_PMD_SIZE-1);
2052         inflated_addr += offset - inflated_offset;
2053         if (inflated_offset > offset)
2054                 inflated_addr += HPAGE_PMD_SIZE;
2055
2056         if (inflated_addr > TASK_SIZE - len)
2057                 return addr;
2058         return inflated_addr;
2059 }
2060
2061 #ifdef CONFIG_NUMA
2062 static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
2063 {
2064         struct inode *inode = file_inode(vma->vm_file);
2065         return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
2066 }
2067
2068 static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
2069                                           unsigned long addr)
2070 {
2071         struct inode *inode = file_inode(vma->vm_file);
2072         pgoff_t index;
2073
2074         index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
2075         return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
2076 }
2077 #endif
2078
2079 int shmem_lock(struct file *file, int lock, struct user_struct *user)
2080 {
2081         struct inode *inode = file_inode(file);
2082         struct shmem_inode_info *info = SHMEM_I(inode);
2083         int retval = -ENOMEM;
2084
2085         spin_lock_irq(&info->lock);
2086         if (lock && !(info->flags & VM_LOCKED)) {
2087                 if (!user_shm_lock(inode->i_size, user))
2088                         goto out_nomem;
2089                 info->flags |= VM_LOCKED;
2090                 mapping_set_unevictable(file->f_mapping);
2091         }
2092         if (!lock && (info->flags & VM_LOCKED) && user) {
2093                 user_shm_unlock(inode->i_size, user);
2094                 info->flags &= ~VM_LOCKED;
2095                 mapping_clear_unevictable(file->f_mapping);
2096         }
2097         retval = 0;
2098
2099 out_nomem:
2100         spin_unlock_irq(&info->lock);
2101         return retval;
2102 }
2103
2104 static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
2105 {
2106         file_accessed(file);
2107         vma->vm_ops = &shmem_vm_ops;
2108         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
2109                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
2110                         (vma->vm_end & HPAGE_PMD_MASK)) {
2111                 khugepaged_enter(vma, vma->vm_flags);
2112         }
2113         return 0;
2114 }
2115
2116 static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
2117                                      umode_t mode, dev_t dev, unsigned long flags)
2118 {
2119         struct inode *inode;
2120         struct shmem_inode_info *info;
2121         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2122
2123         if (shmem_reserve_inode(sb))
2124                 return NULL;
2125
2126         inode = new_inode(sb);
2127         if (inode) {
2128                 inode->i_ino = get_next_ino();
2129                 inode_init_owner(inode, dir, mode);
2130                 inode->i_blocks = 0;
2131                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
2132                 inode->i_generation = get_seconds();
2133                 info = SHMEM_I(inode);
2134                 memset(info, 0, (char *)inode - (char *)info);
2135                 spin_lock_init(&info->lock);
2136                 info->seals = F_SEAL_SEAL;
2137                 info->flags = flags & VM_NORESERVE;
2138                 INIT_LIST_HEAD(&info->shrinklist);
2139                 INIT_LIST_HEAD(&info->swaplist);
2140                 simple_xattrs_init(&info->xattrs);
2141                 cache_no_acl(inode);
2142
2143                 switch (mode & S_IFMT) {
2144                 default:
2145                         inode->i_op = &shmem_special_inode_operations;
2146                         init_special_inode(inode, mode, dev);
2147                         break;
2148                 case S_IFREG:
2149                         inode->i_mapping->a_ops = &shmem_aops;
2150                         inode->i_op = &shmem_inode_operations;
2151                         inode->i_fop = &shmem_file_operations;
2152                         mpol_shared_policy_init(&info->policy,
2153                                                  shmem_get_sbmpol(sbinfo));
2154                         break;
2155                 case S_IFDIR:
2156                         inc_nlink(inode);
2157                         /* Some things misbehave if size == 0 on a directory */
2158                         inode->i_size = 2 * BOGO_DIRENT_SIZE;
2159                         inode->i_op = &shmem_dir_inode_operations;
2160                         inode->i_fop = &simple_dir_operations;
2161                         break;
2162                 case S_IFLNK:
2163                         /*
2164                          * Must not load anything in the rbtree,
2165                          * mpol_free_shared_policy will not be called.
2166                          */
2167                         mpol_shared_policy_init(&info->policy, NULL);
2168                         break;
2169                 }
2170
2171                 lockdep_annotate_inode_mutex_key(inode);
2172         } else
2173                 shmem_free_inode(sb);
2174         return inode;
2175 }
2176
2177 bool shmem_mapping(struct address_space *mapping)
2178 {
2179         if (!mapping->host)
2180                 return false;
2181
2182         return mapping->host->i_sb->s_op == &shmem_ops;
2183 }
2184
2185 #ifdef CONFIG_TMPFS
2186 static const struct inode_operations shmem_symlink_inode_operations;
2187 static const struct inode_operations shmem_short_symlink_operations;
2188
2189 #ifdef CONFIG_TMPFS_XATTR
2190 static int shmem_initxattrs(struct inode *, const struct xattr *, void *);
2191 #else
2192 #define shmem_initxattrs NULL
2193 #endif
2194
2195 static int
2196 shmem_write_begin(struct file *file, struct address_space *mapping,
2197                         loff_t pos, unsigned len, unsigned flags,
2198                         struct page **pagep, void **fsdata)
2199 {
2200         struct inode *inode = mapping->host;
2201         struct shmem_inode_info *info = SHMEM_I(inode);
2202         pgoff_t index = pos >> PAGE_SHIFT;
2203
2204         /* i_mutex is held by caller */
2205         if (unlikely(info->seals)) {
2206                 if (info->seals & F_SEAL_WRITE)
2207                         return -EPERM;
2208                 if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
2209                         return -EPERM;
2210         }
2211
2212         return shmem_getpage(inode, index, pagep, SGP_WRITE);
2213 }
2214
2215 static int
2216 shmem_write_end(struct file *file, struct address_space *mapping,
2217                         loff_t pos, unsigned len, unsigned copied,
2218                         struct page *page, void *fsdata)
2219 {
2220         struct inode *inode = mapping->host;
2221
2222         if (pos + copied > inode->i_size)
2223                 i_size_write(inode, pos + copied);
2224
2225         if (!PageUptodate(page)) {
2226                 struct page *head = compound_head(page);
2227                 if (PageTransCompound(page)) {
2228                         int i;
2229
2230                         for (i = 0; i < HPAGE_PMD_NR; i++) {
2231                                 if (head + i == page)
2232                                         continue;
2233                                 clear_highpage(head + i);
2234                                 flush_dcache_page(head + i);
2235                         }
2236                 }
2237                 if (copied < PAGE_SIZE) {
2238                         unsigned from = pos & (PAGE_SIZE - 1);
2239                         zero_user_segments(page, 0, from,
2240                                         from + copied, PAGE_SIZE);
2241                 }
2242                 SetPageUptodate(head);
2243         }
2244         set_page_dirty(page);
2245         unlock_page(page);
2246         put_page(page);
2247
2248         return copied;
2249 }
2250
2251 static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
2252 {
2253         struct file *file = iocb->ki_filp;
2254         struct inode *inode = file_inode(file);
2255         struct address_space *mapping = inode->i_mapping;
2256         pgoff_t index;
2257         unsigned long offset;
2258         enum sgp_type sgp = SGP_READ;
2259         int error = 0;
2260         ssize_t retval = 0;
2261         loff_t *ppos = &iocb->ki_pos;
2262
2263         /*
2264          * Might this read be for a stacking filesystem?  Then when reading
2265          * holes of a sparse file, we actually need to allocate those pages,
2266          * and even mark them dirty, so it cannot exceed the max_blocks limit.
2267          */
2268         if (!iter_is_iovec(to))
2269                 sgp = SGP_CACHE;
2270
2271         index = *ppos >> PAGE_SHIFT;
2272         offset = *ppos & ~PAGE_MASK;
2273
2274         for (;;) {
2275                 struct page *page = NULL;
2276                 pgoff_t end_index;
2277                 unsigned long nr, ret;
2278                 loff_t i_size = i_size_read(inode);
2279
2280                 end_index = i_size >> PAGE_SHIFT;
2281                 if (index > end_index)
2282                         break;
2283                 if (index == end_index) {
2284                         nr = i_size & ~PAGE_MASK;
2285                         if (nr <= offset)
2286                                 break;
2287                 }
2288
2289                 error = shmem_getpage(inode, index, &page, sgp);
2290                 if (error) {
2291                         if (error == -EINVAL)
2292                                 error = 0;
2293                         break;
2294                 }
2295                 if (page) {
2296                         if (sgp == SGP_CACHE)
2297                                 set_page_dirty(page);
2298                         unlock_page(page);
2299                 }
2300
2301                 /*
2302                  * We must evaluate after, since reads (unlike writes)
2303                  * are called without i_mutex protection against truncate
2304                  */
2305                 nr = PAGE_SIZE;
2306                 i_size = i_size_read(inode);
2307                 end_index = i_size >> PAGE_SHIFT;
2308                 if (index == end_index) {
2309                         nr = i_size & ~PAGE_MASK;
2310                         if (nr <= offset) {
2311                                 if (page)
2312                                         put_page(page);
2313                                 break;
2314                         }
2315                 }
2316                 nr -= offset;
2317
2318                 if (page) {
2319                         /*
2320                          * If users can be writing to this page using arbitrary
2321                          * virtual addresses, take care about potential aliasing
2322                          * before reading the page on the kernel side.
2323                          */
2324                         if (mapping_writably_mapped(mapping))
2325                                 flush_dcache_page(page);
2326                         /*
2327                          * Mark the page accessed if we read the beginning.
2328                          */
2329                         if (!offset)
2330                                 mark_page_accessed(page);
2331                 } else {
2332                         page = ZERO_PAGE(0);
2333                         get_page(page);
2334                 }
2335
2336                 /*
2337                  * Ok, we have the page, and it's up-to-date, so
2338                  * now we can copy it to user space...
2339                  */
2340                 ret = copy_page_to_iter(page, offset, nr, to);
2341                 retval += ret;
2342                 offset += ret;
2343                 index += offset >> PAGE_SHIFT;
2344                 offset &= ~PAGE_MASK;
2345
2346                 put_page(page);
2347                 if (!iov_iter_count(to))
2348                         break;
2349                 if (ret < nr) {
2350                         error = -EFAULT;
2351                         break;
2352                 }
2353                 cond_resched();
2354         }
2355
2356         *ppos = ((loff_t) index << PAGE_SHIFT) + offset;
2357         file_accessed(file);
2358         return retval ? retval : error;
2359 }
2360
2361 /*
2362  * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
2363  */
2364 static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
2365                                     pgoff_t index, pgoff_t end, int whence)
2366 {
2367         struct page *page;
2368         struct pagevec pvec;
2369         pgoff_t indices[PAGEVEC_SIZE];
2370         bool done = false;
2371         int i;
2372
2373         pagevec_init(&pvec, 0);
2374         pvec.nr = 1;            /* start small: we may be there already */
2375         while (!done) {
2376                 pvec.nr = find_get_entries(mapping, index,
2377                                         pvec.nr, pvec.pages, indices);
2378                 if (!pvec.nr) {
2379                         if (whence == SEEK_DATA)
2380                                 index = end;
2381                         break;
2382                 }
2383                 for (i = 0; i < pvec.nr; i++, index++) {
2384                         if (index < indices[i]) {
2385                                 if (whence == SEEK_HOLE) {
2386                                         done = true;
2387                                         break;
2388                                 }
2389                                 index = indices[i];
2390                         }
2391                         page = pvec.pages[i];
2392                         if (page && !radix_tree_exceptional_entry(page)) {
2393                                 if (!PageUptodate(page))
2394                                         page = NULL;
2395                         }
2396                         if (index >= end ||
2397                             (page && whence == SEEK_DATA) ||
2398                             (!page && whence == SEEK_HOLE)) {
2399                                 done = true;
2400                                 break;
2401                         }
2402                 }
2403                 pagevec_remove_exceptionals(&pvec);
2404                 pagevec_release(&pvec);
2405                 pvec.nr = PAGEVEC_SIZE;
2406                 cond_resched();
2407         }
2408         return index;
2409 }
2410
2411 static loff_t shmem_file_llseek(struct file *file, loff_t offset, int whence)
2412 {
2413         struct address_space *mapping = file->f_mapping;
2414         struct inode *inode = mapping->host;
2415         pgoff_t start, end;
2416         loff_t new_offset;
2417
2418         if (whence != SEEK_DATA && whence != SEEK_HOLE)
2419                 return generic_file_llseek_size(file, offset, whence,
2420                                         MAX_LFS_FILESIZE, i_size_read(inode));
2421         inode_lock(inode);
2422         /* We're holding i_mutex so we can access i_size directly */
2423
2424         if (offset < 0 || offset >= inode->i_size)
2425                 offset = -ENXIO;
2426         else {
2427                 start = offset >> PAGE_SHIFT;
2428                 end = (inode->i_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2429                 new_offset = shmem_seek_hole_data(mapping, start, end, whence);
2430                 new_offset <<= PAGE_SHIFT;
2431                 if (new_offset > offset) {
2432                         if (new_offset < inode->i_size)
2433                                 offset = new_offset;
2434                         else if (whence == SEEK_DATA)
2435                                 offset = -ENXIO;
2436                         else
2437                                 offset = inode->i_size;
2438                 }
2439         }
2440
2441         if (offset >= 0)
2442                 offset = vfs_setpos(file, offset, MAX_LFS_FILESIZE);
2443         inode_unlock(inode);
2444         return offset;
2445 }
2446
2447 /*
2448  * We need a tag: a new tag would expand every radix_tree_node by 8 bytes,
2449  * so reuse a tag which we firmly believe is never set or cleared on shmem.
2450  */
2451 #define SHMEM_TAG_PINNED        PAGECACHE_TAG_TOWRITE
2452 #define LAST_SCAN               4       /* about 150ms max */
2453
2454 static void shmem_tag_pins(struct address_space *mapping)
2455 {
2456         struct radix_tree_iter iter;
2457         void **slot;
2458         pgoff_t start;
2459         struct page *page;
2460
2461         lru_add_drain();
2462         start = 0;
2463         rcu_read_lock();
2464
2465         radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
2466                 page = radix_tree_deref_slot(slot);
2467                 if (!page || radix_tree_exception(page)) {
2468                         if (radix_tree_deref_retry(page)) {
2469                                 slot = radix_tree_iter_retry(&iter);
2470                                 continue;
2471                         }
2472                 } else if (page_count(page) - page_mapcount(page) > 1) {
2473                         spin_lock_irq(&mapping->tree_lock);
2474                         radix_tree_tag_set(&mapping->page_tree, iter.index,
2475                                            SHMEM_TAG_PINNED);
2476                         spin_unlock_irq(&mapping->tree_lock);
2477                 }
2478
2479                 if (need_resched()) {
2480                         cond_resched_rcu();
2481                         slot = radix_tree_iter_next(&iter);
2482                 }
2483         }
2484         rcu_read_unlock();
2485 }
2486
2487 /*
2488  * Setting SEAL_WRITE requires us to verify there's no pending writer. However,
2489  * via get_user_pages(), drivers might have some pending I/O without any active
2490  * user-space mappings (eg., direct-IO, AIO). Therefore, we look at all pages
2491  * and see whether it has an elevated ref-count. If so, we tag them and wait for
2492  * them to be dropped.
2493  * The caller must guarantee that no new user will acquire writable references
2494  * to those pages to avoid races.
2495  */
2496 static int shmem_wait_for_pins(struct address_space *mapping)
2497 {
2498         struct radix_tree_iter iter;
2499         void **slot;
2500         pgoff_t start;
2501         struct page *page;
2502         int error, scan;
2503
2504         shmem_tag_pins(mapping);
2505
2506         error = 0;
2507         for (scan = 0; scan <= LAST_SCAN; scan++) {
2508                 if (!radix_tree_tagged(&mapping->page_tree, SHMEM_TAG_PINNED))
2509                         break;
2510
2511                 if (!scan)
2512                         lru_add_drain_all();
2513                 else if (schedule_timeout_killable((HZ << scan) / 200))
2514                         scan = LAST_SCAN;
2515
2516                 start = 0;
2517                 rcu_read_lock();
2518                 radix_tree_for_each_tagged(slot, &mapping->page_tree, &iter,
2519                                            start, SHMEM_TAG_PINNED) {
2520
2521                         page = radix_tree_deref_slot(slot);
2522                         if (radix_tree_exception(page)) {
2523                                 if (radix_tree_deref_retry(page)) {
2524                                         slot = radix_tree_iter_retry(&iter);
2525                                         continue;
2526                                 }
2527
2528                                 page = NULL;
2529                         }
2530
2531                         if (page &&
2532                             page_count(page) - page_mapcount(page) != 1) {
2533                                 if (scan < LAST_SCAN)
2534                                         goto continue_resched;
2535
2536                                 /*
2537                                  * On the last scan, we clean up all those tags
2538                                  * we inserted; but make a note that we still
2539                                  * found pages pinned.
2540                                  */
2541                                 error = -EBUSY;
2542                         }
2543
2544                         spin_lock_irq(&mapping->tree_lock);
2545                         radix_tree_tag_clear(&mapping->page_tree,
2546                                              iter.index, SHMEM_TAG_PINNED);
2547                         spin_unlock_irq(&mapping->tree_lock);
2548 continue_resched:
2549                         if (need_resched()) {
2550                                 cond_resched_rcu();
2551                                 slot = radix_tree_iter_next(&iter);
2552                         }
2553                 }
2554                 rcu_read_unlock();
2555         }
2556
2557         return error;
2558 }
2559
2560 #define F_ALL_SEALS (F_SEAL_SEAL | \
2561                      F_SEAL_SHRINK | \
2562                      F_SEAL_GROW | \
2563                      F_SEAL_WRITE)
2564
2565 int shmem_add_seals(struct file *file, unsigned int seals)
2566 {
2567         struct inode *inode = file_inode(file);
2568         struct shmem_inode_info *info = SHMEM_I(inode);
2569         int error;
2570
2571         /*
2572          * SEALING
2573          * Sealing allows multiple parties to share a shmem-file but restrict
2574          * access to a specific subset of file operations. Seals can only be
2575          * added, but never removed. This way, mutually untrusted parties can
2576          * share common memory regions with a well-defined policy. A malicious
2577          * peer can thus never perform unwanted operations on a shared object.
2578          *
2579          * Seals are only supported on special shmem-files and always affect
2580          * the whole underlying inode. Once a seal is set, it may prevent some
2581          * kinds of access to the file. Currently, the following seals are
2582          * defined:
2583          *   SEAL_SEAL: Prevent further seals from being set on this file
2584          *   SEAL_SHRINK: Prevent the file from shrinking
2585          *   SEAL_GROW: Prevent the file from growing
2586          *   SEAL_WRITE: Prevent write access to the file
2587          *
2588          * As we don't require any trust relationship between two parties, we
2589          * must prevent seals from being removed. Therefore, sealing a file
2590          * only adds a given set of seals to the file, it never touches
2591          * existing seals. Furthermore, the "setting seals"-operation can be
2592          * sealed itself, which basically prevents any further seal from being
2593          * added.
2594          *
2595          * Semantics of sealing are only defined on volatile files. Only
2596          * anonymous shmem files support sealing. More importantly, seals are
2597          * never written to disk. Therefore, there's no plan to support it on
2598          * other file types.
2599          */
2600
2601         if (file->f_op != &shmem_file_operations)
2602                 return -EINVAL;
2603         if (!(file->f_mode & FMODE_WRITE))
2604                 return -EPERM;
2605         if (seals & ~(unsigned int)F_ALL_SEALS)
2606                 return -EINVAL;
2607
2608         inode_lock(inode);
2609
2610         if (info->seals & F_SEAL_SEAL) {
2611                 error = -EPERM;
2612                 goto unlock;
2613         }
2614
2615         if ((seals & F_SEAL_WRITE) && !(info->seals & F_SEAL_WRITE)) {
2616                 error = mapping_deny_writable(file->f_mapping);
2617                 if (error)
2618                         goto unlock;
2619
2620                 error = shmem_wait_for_pins(file->f_mapping);
2621                 if (error) {
2622                         mapping_allow_writable(file->f_mapping);
2623                         goto unlock;
2624                 }
2625         }
2626
2627         info->seals |= seals;
2628         error = 0;
2629
2630 unlock:
2631         inode_unlock(inode);
2632         return error;
2633 }
2634 EXPORT_SYMBOL_GPL(shmem_add_seals);
2635
2636 int shmem_get_seals(struct file *file)
2637 {
2638         if (file->f_op != &shmem_file_operations)
2639                 return -EINVAL;
2640
2641         return SHMEM_I(file_inode(file))->seals;
2642 }
2643 EXPORT_SYMBOL_GPL(shmem_get_seals);
2644
2645 long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg)
2646 {
2647         long error;
2648
2649         switch (cmd) {
2650         case F_ADD_SEALS:
2651                 /* disallow upper 32bit */
2652                 if (arg > UINT_MAX)
2653                         return -EINVAL;
2654
2655                 error = shmem_add_seals(file, arg);
2656                 break;
2657         case F_GET_SEALS:
2658                 error = shmem_get_seals(file);
2659                 break;
2660         default:
2661                 error = -EINVAL;
2662                 break;
2663         }
2664
2665         return error;
2666 }
2667
2668 static long shmem_fallocate(struct file *file, int mode, loff_t offset,
2669                                                          loff_t len)
2670 {
2671         struct inode *inode = file_inode(file);
2672         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
2673         struct shmem_inode_info *info = SHMEM_I(inode);
2674         struct shmem_falloc shmem_falloc;
2675         pgoff_t start, index, end;
2676         int error;
2677
2678         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2679                 return -EOPNOTSUPP;
2680
2681         inode_lock(inode);
2682
2683         if (mode & FALLOC_FL_PUNCH_HOLE) {
2684                 struct address_space *mapping = file->f_mapping;
2685                 loff_t unmap_start = round_up(offset, PAGE_SIZE);
2686                 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
2687                 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
2688
2689                 /* protected by i_mutex */
2690                 if (info->seals & F_SEAL_WRITE) {
2691                         error = -EPERM;
2692                         goto out;
2693                 }
2694
2695                 shmem_falloc.waitq = &shmem_falloc_waitq;
2696                 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
2697                 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
2698                 spin_lock(&inode->i_lock);
2699                 inode->i_private = &shmem_falloc;
2700                 spin_unlock(&inode->i_lock);
2701
2702                 if ((u64)unmap_end > (u64)unmap_start)
2703                         unmap_mapping_range(mapping, unmap_start,
2704                                             1 + unmap_end - unmap_start, 0);
2705                 shmem_truncate_range(inode, offset, offset + len - 1);
2706                 /* No need to unmap again: hole-punching leaves COWed pages */
2707
2708                 spin_lock(&inode->i_lock);
2709                 inode->i_private = NULL;
2710                 wake_up_all(&shmem_falloc_waitq);
2711                 WARN_ON_ONCE(!list_empty(&shmem_falloc_waitq.task_list));
2712                 spin_unlock(&inode->i_lock);
2713                 error = 0;
2714                 goto out;
2715         }
2716
2717         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
2718         error = inode_newsize_ok(inode, offset + len);
2719         if (error)
2720                 goto out;
2721
2722         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
2723                 error = -EPERM;
2724                 goto out;
2725         }
2726
2727         start = offset >> PAGE_SHIFT;
2728         end = (offset + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
2729         /* Try to avoid a swapstorm if len is impossible to satisfy */
2730         if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
2731                 error = -ENOSPC;
2732                 goto out;
2733         }
2734
2735         shmem_falloc.waitq = NULL;
2736         shmem_falloc.start = start;
2737         shmem_falloc.next  = start;
2738         shmem_falloc.nr_falloced = 0;
2739         shmem_falloc.nr_unswapped = 0;
2740         spin_lock(&inode->i_lock);
2741         inode->i_private = &shmem_falloc;
2742         spin_unlock(&inode->i_lock);
2743
2744         for (index = start; index < end; index++) {
2745                 struct page *page;
2746
2747                 /*
2748                  * Good, the fallocate(2) manpage permits EINTR: we may have
2749                  * been interrupted because we are using up too much memory.
2750                  */
2751                 if (signal_pending(current))
2752                         error = -EINTR;
2753                 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
2754                         error = -ENOMEM;
2755                 else
2756                         error = shmem_getpage(inode, index, &page, SGP_FALLOC);
2757                 if (error) {
2758                         /* Remove the !PageUptodate pages we added */
2759                         if (index > start) {
2760                                 shmem_undo_range(inode,
2761                                     (loff_t)start << PAGE_SHIFT,
2762                                     ((loff_t)index << PAGE_SHIFT) - 1, true);
2763                         }
2764                         goto undone;
2765                 }
2766
2767                 /*
2768                  * Inform shmem_writepage() how far we have reached.
2769                  * No need for lock or barrier: we have the page lock.
2770                  */
2771                 shmem_falloc.next++;
2772                 if (!PageUptodate(page))
2773                         shmem_falloc.nr_falloced++;
2774
2775                 /*
2776                  * If !PageUptodate, leave it that way so that freeable pages
2777                  * can be recognized if we need to rollback on error later.
2778                  * But set_page_dirty so that memory pressure will swap rather
2779                  * than free the pages we are allocating (and SGP_CACHE pages
2780                  * might still be clean: we now need to mark those dirty too).
2781                  */
2782                 set_page_dirty(page);
2783                 unlock_page(page);
2784                 put_page(page);
2785                 cond_resched();
2786         }
2787
2788         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
2789                 i_size_write(inode, offset + len);
2790         inode->i_ctime = current_time(inode);
2791 undone:
2792         spin_lock(&inode->i_lock);
2793         inode->i_private = NULL;
2794         spin_unlock(&inode->i_lock);
2795 out:
2796         inode_unlock(inode);
2797         return error;
2798 }
2799
2800 static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
2801 {
2802         struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
2803
2804         buf->f_type = TMPFS_MAGIC;
2805         buf->f_bsize = PAGE_SIZE;
2806         buf->f_namelen = NAME_MAX;
2807         if (sbinfo->max_blocks) {
2808                 buf->f_blocks = sbinfo->max_blocks;
2809                 buf->f_bavail =
2810                 buf->f_bfree  = sbinfo->max_blocks -
2811                                 percpu_counter_sum(&sbinfo->used_blocks);
2812         }
2813         if (sbinfo->max_inodes) {
2814                 buf->f_files = sbinfo->max_inodes;
2815                 buf->f_ffree = sbinfo->free_inodes;
2816         }
2817         /* else leave those fields 0 like simple_statfs */
2818         return 0;
2819 }
2820
2821 /*
2822  * File creation. Allocate an inode, and we're done..
2823  */
2824 static int
2825 shmem_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
2826 {
2827         struct inode *inode;
2828         int error = -ENOSPC;
2829
2830         inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
2831         if (inode) {
2832                 error = simple_acl_create(dir, inode);
2833                 if (error)
2834                         goto out_iput;
2835                 error = security_inode_init_security(inode, dir,
2836                                                      &dentry->d_name,
2837                                                      shmem_initxattrs, NULL);
2838                 if (error && error != -EOPNOTSUPP)
2839                         goto out_iput;
2840
2841                 error = 0;
2842                 dir->i_size += BOGO_DIRENT_SIZE;
2843                 dir->i_ctime = dir->i_mtime = current_time(dir);
2844                 d_instantiate(dentry, inode);
2845                 dget(dentry); /* Extra count - pin the dentry in core */
2846         }
2847         return error;
2848 out_iput:
2849         iput(inode);
2850         return error;
2851 }
2852
2853 static int
2854 shmem_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode)
2855 {
2856         struct inode *inode;
2857         int error = -ENOSPC;
2858
2859         inode = shmem_get_inode(dir->i_sb, dir, mode, 0, VM_NORESERVE);
2860         if (inode) {
2861                 error = security_inode_init_security(inode, dir,
2862                                                      NULL,
2863                                                      shmem_initxattrs, NULL);
2864                 if (error && error != -EOPNOTSUPP)
2865                         goto out_iput;
2866                 error = simple_acl_create(dir, inode);
2867                 if (error)
2868                         goto out_iput;
2869                 d_tmpfile(dentry, inode);
2870         }
2871         return error;
2872 out_iput:
2873         iput(inode);
2874         return error;
2875 }
2876
2877 static int shmem_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
2878 {
2879         int error;
2880
2881         if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
2882                 return error;
2883         inc_nlink(dir);
2884         return 0;
2885 }
2886
2887 static int shmem_create(struct inode *dir, struct dentry *dentry, umode_t mode,
2888                 bool excl)
2889 {
2890         return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
2891 }
2892
2893 /*
2894  * Link a file..
2895  */
2896 static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
2897 {
2898         struct inode *inode = d_inode(old_dentry);
2899         int ret = 0;
2900
2901         /*
2902          * No ordinary (disk based) filesystem counts links as inodes;
2903          * but each new link needs a new dentry, pinning lowmem, and
2904          * tmpfs dentries cannot be pruned until they are unlinked.
2905          * But if an O_TMPFILE file is linked into the tmpfs, the
2906          * first link must skip that, to get the accounting right.
2907          */
2908         if (inode->i_nlink) {
2909                 ret = shmem_reserve_inode(inode->i_sb);
2910                 if (ret)
2911                         goto out;
2912         }
2913
2914         dir->i_size += BOGO_DIRENT_SIZE;
2915         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2916         inc_nlink(inode);
2917         ihold(inode);   /* New dentry reference */
2918         dget(dentry);           /* Extra pinning count for the created dentry */
2919         d_instantiate(dentry, inode);
2920 out:
2921         return ret;
2922 }
2923
2924 static int shmem_unlink(struct inode *dir, struct dentry *dentry)
2925 {
2926         struct inode *inode = d_inode(dentry);
2927
2928         if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
2929                 shmem_free_inode(inode->i_sb);
2930
2931         dir->i_size -= BOGO_DIRENT_SIZE;
2932         inode->i_ctime = dir->i_ctime = dir->i_mtime = current_time(inode);
2933         drop_nlink(inode);
2934         dput(dentry);   /* Undo the count from "create" - this does all the work */
2935         return 0;
2936 }
2937
2938 static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
2939 {
2940         if (!simple_empty(dentry))
2941                 return -ENOTEMPTY;
2942
2943         drop_nlink(d_inode(dentry));
2944         drop_nlink(dir);
2945         return shmem_unlink(dir, dentry);
2946 }
2947
2948 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2949 {
2950         bool old_is_dir = d_is_dir(old_dentry);
2951         bool new_is_dir = d_is_dir(new_dentry);
2952
2953         if (old_dir != new_dir && old_is_dir != new_is_dir) {
2954                 if (old_is_dir) {
2955                         drop_nlink(old_dir);
2956                         inc_nlink(new_dir);
2957                 } else {
2958                         drop_nlink(new_dir);
2959                         inc_nlink(old_dir);
2960                 }
2961         }
2962         old_dir->i_ctime = old_dir->i_mtime =
2963         new_dir->i_ctime = new_dir->i_mtime =
2964         d_inode(old_dentry)->i_ctime =
2965         d_inode(new_dentry)->i_ctime = current_time(old_dir);
2966
2967         return 0;
2968 }
2969
2970 static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2971 {
2972         struct dentry *whiteout;
2973         int error;
2974
2975         whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2976         if (!whiteout)
2977                 return -ENOMEM;
2978
2979         error = shmem_mknod(old_dir, whiteout,
2980                             S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2981         dput(whiteout);
2982         if (error)
2983                 return error;
2984
2985         /*
2986          * Cheat and hash the whiteout while the old dentry is still in
2987          * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2988          *
2989          * d_lookup() will consistently find one of them at this point,
2990          * not sure which one, but that isn't even important.
2991          */
2992         d_rehash(whiteout);
2993         return 0;
2994 }
2995
2996 /*
2997  * The VFS layer already does all the dentry stuff for rename,
2998  * we just have to decrement the usage count for the target if
2999  * it exists so that the VFS layer correctly free's it when it
3000  * gets overwritten.
3001  */
3002 static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags)
3003 {
3004         struct inode *inode = d_inode(old_dentry);
3005         int they_are_dirs = S_ISDIR(inode->i_mode);
3006
3007         if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3008                 return -EINVAL;
3009
3010         if (flags & RENAME_EXCHANGE)
3011                 return shmem_exchange(old_dir, old_dentry, new_dir, new_dentry);
3012
3013         if (!simple_empty(new_dentry))
3014                 return -ENOTEMPTY;
3015
3016         if (flags & RENAME_WHITEOUT) {
3017                 int error;
3018
3019                 error = shmem_whiteout(old_dir, old_dentry);
3020                 if (error)
3021                         return error;
3022         }
3023
3024         if (d_really_is_positive(new_dentry)) {
3025                 (void) shmem_unlink(new_dir, new_dentry);
3026                 if (they_are_dirs) {
3027                         drop_nlink(d_inode(new_dentry));
3028                         drop_nlink(old_dir);
3029                 }
3030         } else if (they_are_dirs) {
3031                 drop_nlink(old_dir);
3032                 inc_nlink(new_dir);
3033         }
3034
3035         old_dir->i_size -= BOGO_DIRENT_SIZE;
3036         new_dir->i_size += BOGO_DIRENT_SIZE;
3037         old_dir->i_ctime = old_dir->i_mtime =
3038         new_dir->i_ctime = new_dir->i_mtime =
3039         inode->i_ctime = current_time(old_dir);
3040         return 0;
3041 }
3042
3043 static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
3044 {
3045         int error;
3046         int len;
3047         struct inode *inode;
3048         struct page *page;
3049         struct shmem_inode_info *info;
3050
3051         len = strlen(symname) + 1;
3052         if (len > PAGE_SIZE)
3053                 return -ENAMETOOLONG;
3054
3055         inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
3056         if (!inode)
3057                 return -ENOSPC;
3058
3059         error = security_inode_init_security(inode, dir, &dentry->d_name,
3060                                              shmem_initxattrs, NULL);
3061         if (error) {
3062                 if (error != -EOPNOTSUPP) {
3063                         iput(inode);
3064                         return error;
3065                 }
3066                 error = 0;
3067         }
3068
3069         info = SHMEM_I(inode);
3070         inode->i_size = len-1;
3071         if (len <= SHORT_SYMLINK_LEN) {
3072                 inode->i_link = kmemdup(symname, len, GFP_KERNEL);
3073                 if (!inode->i_link) {
3074                         iput(inode);
3075                         return -ENOMEM;
3076                 }
3077                 inode->i_op = &shmem_short_symlink_operations;
3078         } else {
3079                 inode_nohighmem(inode);
3080                 error = shmem_getpage(inode, 0, &page, SGP_WRITE);
3081                 if (error) {
3082                         iput(inode);
3083                         return error;
3084                 }
3085                 inode->i_mapping->a_ops = &shmem_aops;
3086                 inode->i_op = &shmem_symlink_inode_operations;
3087                 memcpy(page_address(page), symname, len);
3088                 SetPageUptodate(page);
3089                 set_page_dirty(page);
3090                 unlock_page(page);
3091                 put_page(page);
3092         }
3093         dir->i_size += BOGO_DIRENT_SIZE;
3094         dir->i_ctime = dir->i_mtime = current_time(dir);
3095         d_instantiate(dentry, inode);
3096         dget(dentry);
3097         return 0;
3098 }
3099
3100 static void shmem_put_link(void *arg)
3101 {
3102         mark_page_accessed(arg);
3103         put_page(arg);
3104 }
3105
3106 static const char *shmem_get_link(struct dentry *dentry,
3107                                   struct inode *inode,
3108                                   struct delayed_call *done)
3109 {
3110         struct page *page = NULL;
3111         int error;
3112         if (!dentry) {
3113                 page = find_get_page(inode->i_mapping, 0);
3114                 if (!page)
3115                         return ERR_PTR(-ECHILD);
3116                 if (!PageUptodate(page)) {
3117                         put_page(page);
3118                         return ERR_PTR(-ECHILD);
3119                 }
3120         } else {
3121                 error = shmem_getpage(inode, 0, &page, SGP_READ);
3122                 if (error)
3123                         return ERR_PTR(error);
3124                 unlock_page(page);
3125         }
3126         set_delayed_call(done, shmem_put_link, page);
3127         return page_address(page);
3128 }
3129
3130 #ifdef CONFIG_TMPFS_XATTR
3131 /*
3132  * Superblocks without xattr inode operations may get some security.* xattr
3133  * support from the LSM "for free". As soon as we have any other xattrs
3134  * like ACLs, we also need to implement the security.* handlers at
3135  * filesystem level, though.
3136  */
3137
3138 /*
3139  * Callback for security_inode_init_security() for acquiring xattrs.
3140  */
3141 static int shmem_initxattrs(struct inode *inode,
3142                             const struct xattr *xattr_array,
3143                             void *fs_info)
3144 {
3145         struct shmem_inode_info *info = SHMEM_I(inode);
3146         const struct xattr *xattr;
3147         struct simple_xattr *new_xattr;
3148         size_t len;
3149
3150         for (xattr = xattr_array; xattr->name != NULL; xattr++) {
3151                 new_xattr = simple_xattr_alloc(xattr->value, xattr->value_len);
3152                 if (!new_xattr)
3153                         return -ENOMEM;
3154
3155                 len = strlen(xattr->name) + 1;
3156                 new_xattr->name = kmalloc(XATTR_SECURITY_PREFIX_LEN + len,
3157                                           GFP_KERNEL);
3158                 if (!new_xattr->name) {
3159                         kfree(new_xattr);
3160                         return -ENOMEM;
3161                 }
3162
3163                 memcpy(new_xattr->name, XATTR_SECURITY_PREFIX,
3164                        XATTR_SECURITY_PREFIX_LEN);
3165                 memcpy(new_xattr->name + XATTR_SECURITY_PREFIX_LEN,
3166                        xattr->name, len);
3167
3168                 simple_xattr_list_add(&info->xattrs, new_xattr);
3169         }
3170
3171         return 0;
3172 }
3173
3174 static int shmem_xattr_handler_get(const struct xattr_handler *handler,
3175                                    struct dentry *unused, struct inode *inode,
3176                                    const char *name, void *buffer, size_t size)
3177 {
3178         struct shmem_inode_info *info = SHMEM_I(inode);
3179
3180         name = xattr_full_name(handler, name);
3181         return simple_xattr_get(&info->xattrs, name, buffer, size);
3182 }
3183
3184 static int shmem_xattr_handler_set(const struct xattr_handler *handler,
3185                                    struct dentry *unused, struct inode *inode,
3186                                    const char *name, const void *value,
3187                                    size_t size, int flags)
3188 {
3189         struct shmem_inode_info *info = SHMEM_I(inode);
3190
3191         name = xattr_full_name(handler, name);
3192         return simple_xattr_set(&info->xattrs, name, value, size, flags);
3193 }
3194
3195 static const struct xattr_handler shmem_security_xattr_handler = {
3196         .prefix = XATTR_SECURITY_PREFIX,
3197         .get = shmem_xattr_handler_get,
3198         .set = shmem_xattr_handler_set,
3199 };
3200
3201 static const struct xattr_handler shmem_trusted_xattr_handler = {
3202         .prefix = XATTR_TRUSTED_PREFIX,
3203         .get = shmem_xattr_handler_get,
3204         .set = shmem_xattr_handler_set,
3205 };
3206
3207 static const struct xattr_handler *shmem_xattr_handlers[] = {
3208 #ifdef CONFIG_TMPFS_POSIX_ACL
3209         &posix_acl_access_xattr_handler,
3210         &posix_acl_default_xattr_handler,
3211 #endif
3212         &shmem_security_xattr_handler,
3213         &shmem_trusted_xattr_handler,
3214         NULL
3215 };
3216
3217 static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
3218 {
3219         struct shmem_inode_info *info = SHMEM_I(d_inode(dentry));
3220         return simple_xattr_list(d_inode(dentry), &info->xattrs, buffer, size);
3221 }
3222 #endif /* CONFIG_TMPFS_XATTR */
3223
3224 static const struct inode_operations shmem_short_symlink_operations = {
3225         .readlink       = generic_readlink,
3226         .get_link       = simple_get_link,
3227 #ifdef CONFIG_TMPFS_XATTR
3228         .listxattr      = shmem_listxattr,
3229 #endif
3230 };
3231
3232 static const struct inode_operations shmem_symlink_inode_operations = {
3233         .readlink       = generic_readlink,
3234         .get_link       = shmem_get_link,
3235 #ifdef CONFIG_TMPFS_XATTR
3236         .listxattr      = shmem_listxattr,
3237 #endif
3238 };
3239
3240 static struct dentry *shmem_get_parent(struct dentry *child)
3241 {
3242         return ERR_PTR(-ESTALE);
3243 }
3244
3245 static int shmem_match(struct inode *ino, void *vfh)
3246 {
3247         __u32 *fh = vfh;
3248         __u64 inum = fh[2];
3249         inum = (inum << 32) | fh[1];
3250         return ino->i_ino == inum && fh[0] == ino->i_generation;
3251 }
3252
3253 static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
3254                 struct fid *fid, int fh_len, int fh_type)
3255 {
3256         struct inode *inode;
3257         struct dentry *dentry = NULL;
3258         u64 inum;
3259
3260         if (fh_len < 3)
3261                 return NULL;
3262
3263         inum = fid->raw[2];
3264         inum = (inum << 32) | fid->raw[1];
3265
3266         inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
3267                         shmem_match, fid->raw);
3268         if (inode) {
3269                 dentry = d_find_alias(inode);
3270                 iput(inode);
3271         }
3272
3273         return dentry;
3274 }
3275
3276 static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
3277                                 struct inode *parent)
3278 {
3279         if (*len < 3) {
3280                 *len = 3;
3281                 return FILEID_INVALID;
3282         }
3283
3284         if (inode_unhashed(inode)) {
3285                 /* Unfortunately insert_inode_hash is not idempotent,
3286                  * so as we hash inodes here rather than at creation
3287                  * time, we need a lock to ensure we only try
3288                  * to do it once
3289                  */
3290                 static DEFINE_SPINLOCK(lock);
3291                 spin_lock(&lock);
3292                 if (inode_unhashed(inode))
3293                         __insert_inode_hash(inode,
3294                                             inode->i_ino + inode->i_generation);
3295                 spin_unlock(&lock);
3296         }
3297
3298         fh[0] = inode->i_generation;
3299         fh[1] = inode->i_ino;
3300         fh[2] = ((__u64)inode->i_ino) >> 32;
3301
3302         *len = 3;
3303         return 1;
3304 }
3305
3306 static const struct export_operations shmem_export_ops = {
3307         .get_parent     = shmem_get_parent,
3308         .encode_fh      = shmem_encode_fh,
3309         .fh_to_dentry   = shmem_fh_to_dentry,
3310 };
3311
3312 static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
3313                                bool remount)
3314 {
3315         char *this_char, *value, *rest;
3316         struct mempolicy *mpol = NULL;
3317         uid_t uid;
3318         gid_t gid;
3319
3320         while (options != NULL) {
3321                 this_char = options;
3322                 for (;;) {
3323                         /*
3324                          * NUL-terminate this option: unfortunately,
3325                          * mount options form a comma-separated list,
3326                          * but mpol's nodelist may also contain commas.
3327                          */
3328                         options = strchr(options, ',');
3329                         if (options == NULL)
3330                                 break;
3331                         options++;
3332                         if (!isdigit(*options)) {
3333                                 options[-1] = '\0';
3334                                 break;
3335                         }
3336                 }
3337                 if (!*this_char)
3338                         continue;
3339                 if ((value = strchr(this_char,'=')) != NULL) {
3340                         *value++ = 0;
3341                 } else {
3342                         pr_err("tmpfs: No value for mount option '%s'\n",
3343                                this_char);
3344                         goto error;
3345                 }
3346
3347                 if (!strcmp(this_char,"size")) {
3348                         unsigned long long size;
3349                         size = memparse(value,&rest);
3350                         if (*rest == '%') {
3351                                 size <<= PAGE_SHIFT;
3352                                 size *= totalram_pages;
3353                                 do_div(size, 100);
3354                                 rest++;
3355                         }
3356                         if (*rest)
3357                                 goto bad_val;
3358                         sbinfo->max_blocks =
3359                                 DIV_ROUND_UP(size, PAGE_SIZE);
3360                 } else if (!strcmp(this_char,"nr_blocks")) {
3361                         sbinfo->max_blocks = memparse(value, &rest);
3362                         if (*rest)
3363                                 goto bad_val;
3364                 } else if (!strcmp(this_char,"nr_inodes")) {
3365                         sbinfo->max_inodes = memparse(value, &rest);
3366                         if (*rest)
3367                                 goto bad_val;
3368                 } else if (!strcmp(this_char,"mode")) {
3369                         if (remount)
3370                                 continue;
3371                         sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
3372                         if (*rest)
3373                                 goto bad_val;
3374                 } else if (!strcmp(this_char,"uid")) {
3375                         if (remount)
3376                                 continue;
3377                         uid = simple_strtoul(value, &rest, 0);
3378                         if (*rest)
3379                                 goto bad_val;
3380                         sbinfo->uid = make_kuid(current_user_ns(), uid);
3381                         if (!uid_valid(sbinfo->uid))
3382                                 goto bad_val;
3383                 } else if (!strcmp(this_char,"gid")) {
3384                         if (remount)
3385                                 continue;
3386                         gid = simple_strtoul(value, &rest, 0);
3387                         if (*rest)
3388                                 goto bad_val;
3389                         sbinfo->gid = make_kgid(current_user_ns(), gid);
3390                         if (!gid_valid(sbinfo->gid))
3391                                 goto bad_val;
3392 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3393                 } else if (!strcmp(this_char, "huge")) {
3394                         int huge;
3395                         huge = shmem_parse_huge(value);
3396                         if (huge < 0)
3397                                 goto bad_val;
3398                         if (!has_transparent_hugepage() &&
3399                                         huge != SHMEM_HUGE_NEVER)
3400                                 goto bad_val;
3401                         sbinfo->huge = huge;
3402 #endif
3403 #ifdef CONFIG_NUMA
3404                 } else if (!strcmp(this_char,"mpol")) {
3405                         mpol_put(mpol);
3406                         mpol = NULL;
3407                         if (mpol_parse_str(value, &mpol))
3408                                 goto bad_val;
3409 #endif
3410                 } else {
3411                         pr_err("tmpfs: Bad mount option %s\n", this_char);
3412                         goto error;
3413                 }
3414         }
3415         sbinfo->mpol = mpol;
3416         return 0;
3417
3418 bad_val:
3419         pr_err("tmpfs: Bad value '%s' for mount option '%s'\n",
3420                value, this_char);
3421 error:
3422         mpol_put(mpol);
3423         return 1;
3424
3425 }
3426
3427 static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
3428 {
3429         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3430         struct shmem_sb_info config = *sbinfo;
3431         unsigned long inodes;
3432         int error = -EINVAL;
3433
3434         config.mpol = NULL;
3435         if (shmem_parse_options(data, &config, true))
3436                 return error;
3437
3438         spin_lock(&sbinfo->stat_lock);
3439         inodes = sbinfo->max_inodes - sbinfo->free_inodes;
3440         if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
3441                 goto out;
3442         if (config.max_inodes < inodes)
3443                 goto out;
3444         /*
3445          * Those tests disallow limited->unlimited while any are in use;
3446          * but we must separately disallow unlimited->limited, because
3447          * in that case we have no record of how much is already in use.
3448          */
3449         if (config.max_blocks && !sbinfo->max_blocks)
3450                 goto out;
3451         if (config.max_inodes && !sbinfo->max_inodes)
3452                 goto out;
3453
3454         error = 0;
3455         sbinfo->huge = config.huge;
3456         sbinfo->max_blocks  = config.max_blocks;
3457         sbinfo->max_inodes  = config.max_inodes;
3458         sbinfo->free_inodes = config.max_inodes - inodes;
3459
3460         /*
3461          * Preserve previous mempolicy unless mpol remount option was specified.
3462          */
3463         if (config.mpol) {
3464                 mpol_put(sbinfo->mpol);
3465                 sbinfo->mpol = config.mpol;     /* transfers initial ref */
3466         }
3467 out:
3468         spin_unlock(&sbinfo->stat_lock);
3469         return error;
3470 }
3471
3472 static int shmem_show_options(struct seq_file *seq, struct dentry *root)
3473 {
3474         struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb);
3475
3476         if (sbinfo->max_blocks != shmem_default_max_blocks())
3477                 seq_printf(seq, ",size=%luk",
3478                         sbinfo->max_blocks << (PAGE_SHIFT - 10));
3479         if (sbinfo->max_inodes != shmem_default_max_inodes())
3480                 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
3481         if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
3482                 seq_printf(seq, ",mode=%03ho", sbinfo->mode);
3483         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
3484                 seq_printf(seq, ",uid=%u",
3485                                 from_kuid_munged(&init_user_ns, sbinfo->uid));
3486         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
3487                 seq_printf(seq, ",gid=%u",
3488                                 from_kgid_munged(&init_user_ns, sbinfo->gid));
3489 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3490         /* Rightly or wrongly, show huge mount option unmasked by shmem_huge */
3491         if (sbinfo->huge)
3492                 seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge));
3493 #endif
3494         shmem_show_mpol(seq, sbinfo->mpol);
3495         return 0;
3496 }
3497
3498 #define MFD_NAME_PREFIX "memfd:"
3499 #define MFD_NAME_PREFIX_LEN (sizeof(MFD_NAME_PREFIX) - 1)
3500 #define MFD_NAME_MAX_LEN (NAME_MAX - MFD_NAME_PREFIX_LEN)
3501
3502 #define MFD_ALL_FLAGS (MFD_CLOEXEC | MFD_ALLOW_SEALING)
3503
3504 SYSCALL_DEFINE2(memfd_create,
3505                 const char __user *, uname,
3506                 unsigned int, flags)
3507 {
3508         struct shmem_inode_info *info;
3509         struct file *file;
3510         int fd, error;
3511         char *name;
3512         long len;
3513
3514         if (flags & ~(unsigned int)MFD_ALL_FLAGS)
3515                 return -EINVAL;
3516
3517         /* length includes terminating zero */
3518         len = strnlen_user(uname, MFD_NAME_MAX_LEN + 1);
3519         if (len <= 0)
3520                 return -EFAULT;
3521         if (len > MFD_NAME_MAX_LEN + 1)
3522                 return -EINVAL;
3523
3524         name = kmalloc(len + MFD_NAME_PREFIX_LEN, GFP_TEMPORARY);
3525         if (!name)
3526                 return -ENOMEM;
3527
3528         strcpy(name, MFD_NAME_PREFIX);
3529         if (copy_from_user(&name[MFD_NAME_PREFIX_LEN], uname, len)) {
3530                 error = -EFAULT;
3531                 goto err_name;
3532         }
3533
3534         /* terminating-zero may have changed after strnlen_user() returned */
3535         if (name[len + MFD_NAME_PREFIX_LEN - 1]) {
3536                 error = -EFAULT;
3537                 goto err_name;
3538         }
3539
3540         fd = get_unused_fd_flags((flags & MFD_CLOEXEC) ? O_CLOEXEC : 0);
3541         if (fd < 0) {
3542                 error = fd;
3543                 goto err_name;
3544         }
3545
3546         file = shmem_file_setup(name, 0, VM_NORESERVE);
3547         if (IS_ERR(file)) {
3548                 error = PTR_ERR(file);
3549                 goto err_fd;
3550         }
3551         info = SHMEM_I(file_inode(file));
3552         file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
3553         file->f_flags |= O_RDWR | O_LARGEFILE;
3554         if (flags & MFD_ALLOW_SEALING)
3555                 info->seals &= ~F_SEAL_SEAL;
3556
3557         fd_install(fd, file);
3558         kfree(name);
3559         return fd;
3560
3561 err_fd:
3562         put_unused_fd(fd);
3563 err_name:
3564         kfree(name);
3565         return error;
3566 }
3567
3568 #endif /* CONFIG_TMPFS */
3569
3570 static void shmem_put_super(struct super_block *sb)
3571 {
3572         struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
3573
3574         percpu_counter_destroy(&sbinfo->used_blocks);
3575         mpol_put(sbinfo->mpol);
3576         kfree(sbinfo);
3577         sb->s_fs_info = NULL;
3578 }
3579
3580 int shmem_fill_super(struct super_block *sb, void *data, int silent)
3581 {
3582         struct inode *inode;
3583         struct shmem_sb_info *sbinfo;
3584         int err = -ENOMEM;
3585
3586         /* Round up to L1_CACHE_BYTES to resist false sharing */
3587         sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
3588                                 L1_CACHE_BYTES), GFP_KERNEL);
3589         if (!sbinfo)
3590                 return -ENOMEM;
3591
3592         sbinfo->mode = S_IRWXUGO | S_ISVTX;
3593         sbinfo->uid = current_fsuid();
3594         sbinfo->gid = current_fsgid();
3595         sb->s_fs_info = sbinfo;
3596
3597 #ifdef CONFIG_TMPFS
3598         /*
3599          * Per default we only allow half of the physical ram per
3600          * tmpfs instance, limiting inodes to one per page of lowmem;
3601          * but the internal instance is left unlimited.
3602          */
3603         if (!(sb->s_flags & MS_KERNMOUNT)) {
3604                 sbinfo->max_blocks = shmem_default_max_blocks();
3605                 sbinfo->max_inodes = shmem_default_max_inodes();
3606                 if (shmem_parse_options(data, sbinfo, false)) {
3607                         err = -EINVAL;
3608                         goto failed;
3609                 }
3610         } else {
3611                 sb->s_flags |= MS_NOUSER;
3612         }
3613         sb->s_export_op = &shmem_export_ops;
3614         sb->s_flags |= MS_NOSEC;
3615 #else
3616         sb->s_flags |= MS_NOUSER;
3617 #endif
3618
3619         spin_lock_init(&sbinfo->stat_lock);
3620         if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
3621                 goto failed;
3622         sbinfo->free_inodes = sbinfo->max_inodes;
3623         spin_lock_init(&sbinfo->shrinklist_lock);
3624         INIT_LIST_HEAD(&sbinfo->shrinklist);
3625
3626         sb->s_maxbytes = MAX_LFS_FILESIZE;
3627         sb->s_blocksize = PAGE_SIZE;
3628         sb->s_blocksize_bits = PAGE_SHIFT;
3629         sb->s_magic = TMPFS_MAGIC;
3630         sb->s_op = &shmem_ops;
3631         sb->s_time_gran = 1;
3632 #ifdef CONFIG_TMPFS_XATTR
3633         sb->s_xattr = shmem_xattr_handlers;
3634 #endif
3635 #ifdef CONFIG_TMPFS_POSIX_ACL
3636         sb->s_flags |= MS_POSIXACL;
3637 #endif
3638
3639         inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
3640         if (!inode)
3641                 goto failed;
3642         inode->i_uid = sbinfo->uid;
3643         inode->i_gid = sbinfo->gid;
3644         sb->s_root = d_make_root(inode);
3645         if (!sb->s_root)
3646                 goto failed;
3647         return 0;
3648
3649 failed:
3650         shmem_put_super(sb);
3651         return err;
3652 }
3653
3654 static struct kmem_cache *shmem_inode_cachep;
3655
3656 static struct inode *shmem_alloc_inode(struct super_block *sb)
3657 {
3658         struct shmem_inode_info *info;
3659         info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
3660         if (!info)
3661                 return NULL;
3662         return &info->vfs_inode;
3663 }
3664
3665 static void shmem_destroy_callback(struct rcu_head *head)
3666 {
3667         struct inode *inode = container_of(head, struct inode, i_rcu);
3668         if (S_ISLNK(inode->i_mode))
3669                 kfree(inode->i_link);
3670         kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
3671 }
3672
3673 static void shmem_destroy_inode(struct inode *inode)
3674 {
3675         if (S_ISREG(inode->i_mode))
3676                 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
3677         call_rcu(&inode->i_rcu, shmem_destroy_callback);
3678 }
3679
3680 static void shmem_init_inode(void *foo)
3681 {
3682         struct shmem_inode_info *info = foo;
3683         inode_init_once(&info->vfs_inode);
3684 }
3685
3686 static int shmem_init_inodecache(void)
3687 {
3688         shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
3689                                 sizeof(struct shmem_inode_info),
3690                                 0, SLAB_PANIC|SLAB_ACCOUNT, shmem_init_inode);
3691         return 0;
3692 }
3693
3694 static void shmem_destroy_inodecache(void)
3695 {
3696         kmem_cache_destroy(shmem_inode_cachep);
3697 }
3698
3699 static const struct address_space_operations shmem_aops = {
3700         .writepage      = shmem_writepage,
3701         .set_page_dirty = __set_page_dirty_no_writeback,
3702 #ifdef CONFIG_TMPFS
3703         .write_begin    = shmem_write_begin,
3704         .write_end      = shmem_write_end,
3705 #endif
3706 #ifdef CONFIG_MIGRATION
3707         .migratepage    = migrate_page,
3708 #endif
3709         .error_remove_page = generic_error_remove_page,
3710 };
3711
3712 static const struct file_operations shmem_file_operations = {
3713         .mmap           = shmem_mmap,
3714         .get_unmapped_area = shmem_get_unmapped_area,
3715 #ifdef CONFIG_TMPFS
3716         .llseek         = shmem_file_llseek,
3717         .read_iter      = shmem_file_read_iter,
3718         .write_iter     = generic_file_write_iter,
3719         .fsync          = noop_fsync,
3720         .splice_read    = generic_file_splice_read,
3721         .splice_write   = iter_file_splice_write,
3722         .fallocate      = shmem_fallocate,
3723 #endif
3724 };
3725
3726 static const struct inode_operations shmem_inode_operations = {
3727         .getattr        = shmem_getattr,
3728         .setattr        = shmem_setattr,
3729 #ifdef CONFIG_TMPFS_XATTR
3730         .listxattr      = shmem_listxattr,
3731         .set_acl        = simple_set_acl,
3732 #endif
3733 };
3734
3735 static const struct inode_operations shmem_dir_inode_operations = {
3736 #ifdef CONFIG_TMPFS
3737         .create         = shmem_create,
3738         .lookup         = simple_lookup,
3739         .link           = shmem_link,
3740         .unlink         = shmem_unlink,
3741         .symlink        = shmem_symlink,
3742         .mkdir          = shmem_mkdir,
3743         .rmdir          = shmem_rmdir,
3744         .mknod          = shmem_mknod,
3745         .rename         = shmem_rename2,
3746         .tmpfile        = shmem_tmpfile,
3747 #endif
3748 #ifdef CONFIG_TMPFS_XATTR
3749         .listxattr      = shmem_listxattr,
3750 #endif
3751 #ifdef CONFIG_TMPFS_POSIX_ACL
3752         .setattr        = shmem_setattr,
3753         .set_acl        = simple_set_acl,
3754 #endif
3755 };
3756
3757 static const struct inode_operations shmem_special_inode_operations = {
3758 #ifdef CONFIG_TMPFS_XATTR
3759         .listxattr      = shmem_listxattr,
3760 #endif
3761 #ifdef CONFIG_TMPFS_POSIX_ACL
3762         .setattr        = shmem_setattr,
3763         .set_acl        = simple_set_acl,
3764 #endif
3765 };
3766
3767 static const struct super_operations shmem_ops = {
3768         .alloc_inode    = shmem_alloc_inode,
3769         .destroy_inode  = shmem_destroy_inode,
3770 #ifdef CONFIG_TMPFS
3771         .statfs         = shmem_statfs,
3772         .remount_fs     = shmem_remount_fs,
3773         .show_options   = shmem_show_options,
3774 #endif
3775         .evict_inode    = shmem_evict_inode,
3776         .drop_inode     = generic_delete_inode,
3777         .put_super      = shmem_put_super,
3778 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3779         .nr_cached_objects      = shmem_unused_huge_count,
3780         .free_cached_objects    = shmem_unused_huge_scan,
3781 #endif
3782 };
3783
3784 static const struct vm_operations_struct shmem_vm_ops = {
3785         .fault          = shmem_fault,
3786         .map_pages      = filemap_map_pages,
3787 #ifdef CONFIG_NUMA
3788         .set_policy     = shmem_set_policy,
3789         .get_policy     = shmem_get_policy,
3790 #endif
3791 };
3792
3793 static struct dentry *shmem_mount(struct file_system_type *fs_type,
3794         int flags, const char *dev_name, void *data)
3795 {
3796         return mount_nodev(fs_type, flags, data, shmem_fill_super);
3797 }
3798
3799 static struct file_system_type shmem_fs_type = {
3800         .owner          = THIS_MODULE,
3801         .name           = "tmpfs",
3802         .mount          = shmem_mount,
3803         .kill_sb        = kill_litter_super,
3804         .fs_flags       = FS_USERNS_MOUNT,
3805 };
3806
3807 int __init shmem_init(void)
3808 {
3809         int error;
3810
3811         /* If rootfs called this, don't re-init */
3812         if (shmem_inode_cachep)
3813                 return 0;
3814
3815         error = shmem_init_inodecache();
3816         if (error)
3817                 goto out3;
3818
3819         error = register_filesystem(&shmem_fs_type);
3820         if (error) {
3821                 pr_err("Could not register tmpfs\n");
3822                 goto out2;
3823         }
3824
3825         shm_mnt = kern_mount(&shmem_fs_type);
3826         if (IS_ERR(shm_mnt)) {
3827                 error = PTR_ERR(shm_mnt);
3828                 pr_err("Could not kern_mount tmpfs\n");
3829                 goto out1;
3830         }
3831
3832 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3833         if (has_transparent_hugepage() && shmem_huge > SHMEM_HUGE_DENY)
3834                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3835         else
3836                 shmem_huge = 0; /* just in case it was patched */
3837 #endif
3838         return 0;
3839
3840 out1:
3841         unregister_filesystem(&shmem_fs_type);
3842 out2:
3843         shmem_destroy_inodecache();
3844 out3:
3845         shm_mnt = ERR_PTR(error);
3846         return error;
3847 }
3848
3849 #if defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE) && defined(CONFIG_SYSFS)
3850 static ssize_t shmem_enabled_show(struct kobject *kobj,
3851                 struct kobj_attribute *attr, char *buf)
3852 {
3853         int values[] = {
3854                 SHMEM_HUGE_ALWAYS,
3855                 SHMEM_HUGE_WITHIN_SIZE,
3856                 SHMEM_HUGE_ADVISE,
3857                 SHMEM_HUGE_NEVER,
3858                 SHMEM_HUGE_DENY,
3859                 SHMEM_HUGE_FORCE,
3860         };
3861         int i, count;
3862
3863         for (i = 0, count = 0; i < ARRAY_SIZE(values); i++) {
3864                 const char *fmt = shmem_huge == values[i] ? "[%s] " : "%s ";
3865
3866                 count += sprintf(buf + count, fmt,
3867                                 shmem_format_huge(values[i]));
3868         }
3869         buf[count - 1] = '\n';
3870         return count;
3871 }
3872
3873 static ssize_t shmem_enabled_store(struct kobject *kobj,
3874                 struct kobj_attribute *attr, const char *buf, size_t count)
3875 {
3876         char tmp[16];
3877         int huge;
3878
3879         if (count + 1 > sizeof(tmp))
3880                 return -EINVAL;
3881         memcpy(tmp, buf, count);
3882         tmp[count] = '\0';
3883         if (count && tmp[count - 1] == '\n')
3884                 tmp[count - 1] = '\0';
3885
3886         huge = shmem_parse_huge(tmp);
3887         if (huge == -EINVAL)
3888                 return -EINVAL;
3889         if (!has_transparent_hugepage() &&
3890                         huge != SHMEM_HUGE_NEVER && huge != SHMEM_HUGE_DENY)
3891                 return -EINVAL;
3892
3893         shmem_huge = huge;
3894         if (shmem_huge > SHMEM_HUGE_DENY)
3895                 SHMEM_SB(shm_mnt->mnt_sb)->huge = shmem_huge;
3896         return count;
3897 }
3898
3899 struct kobj_attribute shmem_enabled_attr =
3900         __ATTR(shmem_enabled, 0644, shmem_enabled_show, shmem_enabled_store);
3901 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE && CONFIG_SYSFS */
3902
3903 #ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
3904 bool shmem_huge_enabled(struct vm_area_struct *vma)
3905 {
3906         struct inode *inode = file_inode(vma->vm_file);
3907         struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
3908         loff_t i_size;
3909         pgoff_t off;
3910
3911         if (shmem_huge == SHMEM_HUGE_FORCE)
3912                 return true;
3913         if (shmem_huge == SHMEM_HUGE_DENY)
3914                 return false;
3915         switch (sbinfo->huge) {
3916                 case SHMEM_HUGE_NEVER:
3917                         return false;
3918                 case SHMEM_HUGE_ALWAYS:
3919                         return true;
3920                 case SHMEM_HUGE_WITHIN_SIZE:
3921                         off = round_up(vma->vm_pgoff, HPAGE_PMD_NR);
3922                         i_size = round_up(i_size_read(inode), PAGE_SIZE);
3923                         if (i_size >= HPAGE_PMD_SIZE &&
3924                                         i_size >> PAGE_SHIFT >= off)
3925                                 return true;
3926                 case SHMEM_HUGE_ADVISE:
3927                         /* TODO: implement fadvise() hints */
3928                         return (vma->vm_flags & VM_HUGEPAGE);
3929                 default:
3930                         VM_BUG_ON(1);
3931                         return false;
3932         }
3933 }
3934 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
3935
3936 #else /* !CONFIG_SHMEM */
3937
3938 /*
3939  * tiny-shmem: simple shmemfs and tmpfs using ramfs code
3940  *
3941  * This is intended for small system where the benefits of the full
3942  * shmem code (swap-backed and resource-limited) are outweighed by
3943  * their complexity. On systems without swap this code should be
3944  * effectively equivalent, but much lighter weight.
3945  */
3946
3947 static struct file_system_type shmem_fs_type = {
3948         .name           = "tmpfs",
3949         .mount          = ramfs_mount,
3950         .kill_sb        = kill_litter_super,
3951         .fs_flags       = FS_USERNS_MOUNT,
3952 };
3953
3954 int __init shmem_init(void)
3955 {
3956         BUG_ON(register_filesystem(&shmem_fs_type) != 0);
3957
3958         shm_mnt = kern_mount(&shmem_fs_type);
3959         BUG_ON(IS_ERR(shm_mnt));
3960
3961         return 0;
3962 }
3963
3964 int shmem_unuse(swp_entry_t swap, struct page *page)
3965 {
3966         return 0;
3967 }
3968
3969 int shmem_lock(struct file *file, int lock, struct user_struct *user)
3970 {
3971         return 0;
3972 }
3973
3974 void shmem_unlock_mapping(struct address_space *mapping)
3975 {
3976 }
3977
3978 #ifdef CONFIG_MMU
3979 unsigned long shmem_get_unmapped_area(struct file *file,
3980                                       unsigned long addr, unsigned long len,
3981                                       unsigned long pgoff, unsigned long flags)
3982 {
3983         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
3984 }
3985 #endif
3986
3987 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
3988 {
3989         truncate_inode_pages_range(inode->i_mapping, lstart, lend);
3990 }
3991 EXPORT_SYMBOL_GPL(shmem_truncate_range);
3992
3993 #define shmem_vm_ops                            generic_file_vm_ops
3994 #define shmem_file_operations                   ramfs_file_operations
3995 #define shmem_get_inode(sb, dir, mode, dev, flags)      ramfs_get_inode(sb, dir, mode, dev)
3996 #define shmem_acct_size(flags, size)            0
3997 #define shmem_unacct_size(flags, size)          do {} while (0)
3998
3999 #endif /* CONFIG_SHMEM */
4000
4001 /* common code */
4002
4003 static const struct dentry_operations anon_ops = {
4004         .d_dname = simple_dname
4005 };
4006
4007 static struct file *__shmem_file_setup(const char *name, loff_t size,
4008                                        unsigned long flags, unsigned int i_flags)
4009 {
4010         struct file *res;
4011         struct inode *inode;
4012         struct path path;
4013         struct super_block *sb;
4014         struct qstr this;
4015
4016         if (IS_ERR(shm_mnt))
4017                 return ERR_CAST(shm_mnt);
4018
4019         if (size < 0 || size > MAX_LFS_FILESIZE)
4020                 return ERR_PTR(-EINVAL);
4021
4022         if (shmem_acct_size(flags, size))
4023                 return ERR_PTR(-ENOMEM);
4024
4025         res = ERR_PTR(-ENOMEM);
4026         this.name = name;
4027         this.len = strlen(name);
4028         this.hash = 0; /* will go */
4029         sb = shm_mnt->mnt_sb;
4030         path.mnt = mntget(shm_mnt);
4031         path.dentry = d_alloc_pseudo(sb, &this);
4032         if (!path.dentry)
4033                 goto put_memory;
4034         d_set_d_op(path.dentry, &anon_ops);
4035
4036         res = ERR_PTR(-ENOSPC);
4037         inode = shmem_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
4038         if (!inode)
4039                 goto put_memory;
4040
4041         inode->i_flags |= i_flags;
4042         d_instantiate(path.dentry, inode);
4043         inode->i_size = size;
4044         clear_nlink(inode);     /* It is unlinked */
4045         res = ERR_PTR(ramfs_nommu_expand_for_mapping(inode, size));
4046         if (IS_ERR(res))
4047                 goto put_path;
4048
4049         res = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4050                   &shmem_file_operations);
4051         if (IS_ERR(res))
4052                 goto put_path;
4053
4054         return res;
4055
4056 put_memory:
4057         shmem_unacct_size(flags, size);
4058 put_path:
4059         path_put(&path);
4060         return res;
4061 }
4062
4063 /**
4064  * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
4065  *      kernel internal.  There will be NO LSM permission checks against the
4066  *      underlying inode.  So users of this interface must do LSM checks at a
4067  *      higher layer.  The users are the big_key and shm implementations.  LSM
4068  *      checks are provided at the key or shm level rather than the inode.
4069  * @name: name for dentry (to be seen in /proc/<pid>/maps
4070  * @size: size to be set for the file
4071  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4072  */
4073 struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
4074 {
4075         return __shmem_file_setup(name, size, flags, S_PRIVATE);
4076 }
4077
4078 /**
4079  * shmem_file_setup - get an unlinked file living in tmpfs
4080  * @name: name for dentry (to be seen in /proc/<pid>/maps
4081  * @size: size to be set for the file
4082  * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
4083  */
4084 struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
4085 {
4086         return __shmem_file_setup(name, size, flags, 0);
4087 }
4088 EXPORT_SYMBOL_GPL(shmem_file_setup);
4089
4090 /**
4091  * shmem_zero_setup - setup a shared anonymous mapping
4092  * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
4093  */
4094 int shmem_zero_setup(struct vm_area_struct *vma)
4095 {
4096         struct file *file;
4097         loff_t size = vma->vm_end - vma->vm_start;
4098
4099         /*
4100          * Cloning a new file under mmap_sem leads to a lock ordering conflict
4101          * between XFS directory reading and selinux: since this file is only
4102          * accessible to the user through its mapping, use S_PRIVATE flag to
4103          * bypass file security, in the same way as shmem_kernel_file_setup().
4104          */
4105         file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
4106         if (IS_ERR(file))
4107                 return PTR_ERR(file);
4108
4109         if (vma->vm_file)
4110                 fput(vma->vm_file);
4111         vma->vm_file = file;
4112         vma->vm_ops = &shmem_vm_ops;
4113
4114         if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE) &&
4115                         ((vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK) <
4116                         (vma->vm_end & HPAGE_PMD_MASK)) {
4117                 khugepaged_enter(vma, vma->vm_flags);
4118         }
4119
4120         return 0;
4121 }
4122
4123 /**
4124  * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
4125  * @mapping:    the page's address_space
4126  * @index:      the page index
4127  * @gfp:        the page allocator flags to use if allocating
4128  *
4129  * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
4130  * with any new page allocations done using the specified allocation flags.
4131  * But read_cache_page_gfp() uses the ->readpage() method: which does not
4132  * suit tmpfs, since it may have pages in swapcache, and needs to find those
4133  * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
4134  *
4135  * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
4136  * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
4137  */
4138 struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
4139                                          pgoff_t index, gfp_t gfp)
4140 {
4141 #ifdef CONFIG_SHMEM
4142         struct inode *inode = mapping->host;
4143         struct page *page;
4144         int error;
4145
4146         BUG_ON(mapping->a_ops != &shmem_aops);
4147         error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE,
4148                                   gfp, NULL, NULL);
4149         if (error)
4150                 page = ERR_PTR(error);
4151         else
4152                 unlock_page(page);
4153         return page;
4154 #else
4155         /*
4156          * The tiny !SHMEM case uses ramfs without swap
4157          */
4158         return read_cache_page_gfp(mapping, index, gfp);
4159 #endif
4160 }
4161 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);