OSDN Git Service

Revert "net: phy: Correctly process PHY_HALTED in phy_stop_machine()"
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / huge_memory.c
1 /*
2  *  Copyright (C) 2009  Red Hat, Inc.
3  *
4  *  This work is licensed under the terms of the GNU GPL, version 2. See
5  *  the COPYING file in the top-level directory.
6  */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
10 #include <linux/mm.h>
11 #include <linux/sched.h>
12 #include <linux/highmem.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mmu_notifier.h>
15 #include <linux/rmap.h>
16 #include <linux/swap.h>
17 #include <linux/shrinker.h>
18 #include <linux/mm_inline.h>
19 #include <linux/dax.h>
20 #include <linux/kthread.h>
21 #include <linux/khugepaged.h>
22 #include <linux/freezer.h>
23 #include <linux/mman.h>
24 #include <linux/pagemap.h>
25 #include <linux/migrate.h>
26 #include <linux/hashtable.h>
27 #include <linux/userfaultfd_k.h>
28 #include <linux/page_idle.h>
29
30 #include <asm/tlb.h>
31 #include <asm/pgalloc.h>
32 #include "internal.h"
33
34 /*
35  * By default transparent hugepage support is disabled in order that avoid
36  * to risk increase the memory footprint of applications without a guaranteed
37  * benefit. When transparent hugepage support is enabled, is for all mappings,
38  * and khugepaged scans all mappings.
39  * Defrag is invoked by khugepaged hugepage allocations and by page faults
40  * for all hugepage allocations.
41  */
42 unsigned long transparent_hugepage_flags __read_mostly =
43 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
44         (1<<TRANSPARENT_HUGEPAGE_FLAG)|
45 #endif
46 #ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
47         (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
48 #endif
49         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
50         (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
51         (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
52
53 /* default scan 8*512 pte (or vmas) every 30 second */
54 static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
55 static unsigned int khugepaged_pages_collapsed;
56 static unsigned int khugepaged_full_scans;
57 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
58 /* during fragmentation poll the hugepage allocator once every minute */
59 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
60 static struct task_struct *khugepaged_thread __read_mostly;
61 static DEFINE_MUTEX(khugepaged_mutex);
62 static DEFINE_SPINLOCK(khugepaged_mm_lock);
63 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
64 /*
65  * default collapse hugepages if there is at least one pte mapped like
66  * it would have happened if the vma was large enough during page
67  * fault.
68  */
69 static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
70
71 static int khugepaged(void *none);
72 static int khugepaged_slab_init(void);
73 static void khugepaged_slab_exit(void);
74
75 #define MM_SLOTS_HASH_BITS 10
76 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77
78 static struct kmem_cache *mm_slot_cache __read_mostly;
79
80 /**
81  * struct mm_slot - hash lookup from mm to mm_slot
82  * @hash: hash collision list
83  * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
84  * @mm: the mm that this information is valid for
85  */
86 struct mm_slot {
87         struct hlist_node hash;
88         struct list_head mm_node;
89         struct mm_struct *mm;
90 };
91
92 /**
93  * struct khugepaged_scan - cursor for scanning
94  * @mm_head: the head of the mm list to scan
95  * @mm_slot: the current mm_slot we are scanning
96  * @address: the next address inside that to be scanned
97  *
98  * There is only the one khugepaged_scan instance of this cursor structure.
99  */
100 struct khugepaged_scan {
101         struct list_head mm_head;
102         struct mm_slot *mm_slot;
103         unsigned long address;
104 };
105 static struct khugepaged_scan khugepaged_scan = {
106         .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
107 };
108
109
110 static void set_recommended_min_free_kbytes(void)
111 {
112         struct zone *zone;
113         int nr_zones = 0;
114         unsigned long recommended_min;
115
116         for_each_populated_zone(zone)
117                 nr_zones++;
118
119         /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
120         recommended_min = pageblock_nr_pages * nr_zones * 2;
121
122         /*
123          * Make sure that on average at least two pageblocks are almost free
124          * of another type, one for a migratetype to fall back to and a
125          * second to avoid subsequent fallbacks of other types There are 3
126          * MIGRATE_TYPES we care about.
127          */
128         recommended_min += pageblock_nr_pages * nr_zones *
129                            MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
130
131         /* don't ever allow to reserve more than 5% of the lowmem */
132         recommended_min = min(recommended_min,
133                               (unsigned long) nr_free_buffer_pages() / 20);
134         recommended_min <<= (PAGE_SHIFT-10);
135
136         if (recommended_min > min_free_kbytes) {
137                 if (user_min_free_kbytes >= 0)
138                         pr_info("raising min_free_kbytes from %d to %lu "
139                                 "to help transparent hugepage allocations\n",
140                                 min_free_kbytes, recommended_min);
141
142                 min_free_kbytes = recommended_min;
143         }
144         setup_per_zone_wmarks();
145 }
146
147 static int start_stop_khugepaged(void)
148 {
149         int err = 0;
150         if (khugepaged_enabled()) {
151                 if (!khugepaged_thread)
152                         khugepaged_thread = kthread_run(khugepaged, NULL,
153                                                         "khugepaged");
154                 if (IS_ERR(khugepaged_thread)) {
155                         pr_err("khugepaged: kthread_run(khugepaged) failed\n");
156                         err = PTR_ERR(khugepaged_thread);
157                         khugepaged_thread = NULL;
158                         goto fail;
159                 }
160
161                 if (!list_empty(&khugepaged_scan.mm_head))
162                         wake_up_interruptible(&khugepaged_wait);
163
164                 set_recommended_min_free_kbytes();
165         } else if (khugepaged_thread) {
166                 kthread_stop(khugepaged_thread);
167                 khugepaged_thread = NULL;
168         }
169 fail:
170         return err;
171 }
172
173 static atomic_t huge_zero_refcount;
174 struct page *huge_zero_page __read_mostly;
175
176 struct page *get_huge_zero_page(void)
177 {
178         struct page *zero_page;
179 retry:
180         if (likely(atomic_inc_not_zero(&huge_zero_refcount)))
181                 return READ_ONCE(huge_zero_page);
182
183         zero_page = alloc_pages((GFP_TRANSHUGE | __GFP_ZERO) & ~__GFP_MOVABLE,
184                         HPAGE_PMD_ORDER);
185         if (!zero_page) {
186                 count_vm_event(THP_ZERO_PAGE_ALLOC_FAILED);
187                 return NULL;
188         }
189         count_vm_event(THP_ZERO_PAGE_ALLOC);
190         preempt_disable();
191         if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
192                 preempt_enable();
193                 __free_pages(zero_page, compound_order(zero_page));
194                 goto retry;
195         }
196
197         /* We take additional reference here. It will be put back by shrinker */
198         atomic_set(&huge_zero_refcount, 2);
199         preempt_enable();
200         return READ_ONCE(huge_zero_page);
201 }
202
203 static void put_huge_zero_page(void)
204 {
205         /*
206          * Counter should never go to zero here. Only shrinker can put
207          * last reference.
208          */
209         BUG_ON(atomic_dec_and_test(&huge_zero_refcount));
210 }
211
212 static unsigned long shrink_huge_zero_page_count(struct shrinker *shrink,
213                                         struct shrink_control *sc)
214 {
215         /* we can free zero page only if last reference remains */
216         return atomic_read(&huge_zero_refcount) == 1 ? HPAGE_PMD_NR : 0;
217 }
218
219 static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
220                                        struct shrink_control *sc)
221 {
222         if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
223                 struct page *zero_page = xchg(&huge_zero_page, NULL);
224                 BUG_ON(zero_page == NULL);
225                 __free_pages(zero_page, compound_order(zero_page));
226                 return HPAGE_PMD_NR;
227         }
228
229         return 0;
230 }
231
232 static struct shrinker huge_zero_page_shrinker = {
233         .count_objects = shrink_huge_zero_page_count,
234         .scan_objects = shrink_huge_zero_page_scan,
235         .seeks = DEFAULT_SEEKS,
236 };
237
238 #ifdef CONFIG_SYSFS
239
240 static ssize_t double_flag_show(struct kobject *kobj,
241                                 struct kobj_attribute *attr, char *buf,
242                                 enum transparent_hugepage_flag enabled,
243                                 enum transparent_hugepage_flag req_madv)
244 {
245         if (test_bit(enabled, &transparent_hugepage_flags)) {
246                 VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
247                 return sprintf(buf, "[always] madvise never\n");
248         } else if (test_bit(req_madv, &transparent_hugepage_flags))
249                 return sprintf(buf, "always [madvise] never\n");
250         else
251                 return sprintf(buf, "always madvise [never]\n");
252 }
253 static ssize_t double_flag_store(struct kobject *kobj,
254                                  struct kobj_attribute *attr,
255                                  const char *buf, size_t count,
256                                  enum transparent_hugepage_flag enabled,
257                                  enum transparent_hugepage_flag req_madv)
258 {
259         if (!memcmp("always", buf,
260                     min(sizeof("always")-1, count))) {
261                 set_bit(enabled, &transparent_hugepage_flags);
262                 clear_bit(req_madv, &transparent_hugepage_flags);
263         } else if (!memcmp("madvise", buf,
264                            min(sizeof("madvise")-1, count))) {
265                 clear_bit(enabled, &transparent_hugepage_flags);
266                 set_bit(req_madv, &transparent_hugepage_flags);
267         } else if (!memcmp("never", buf,
268                            min(sizeof("never")-1, count))) {
269                 clear_bit(enabled, &transparent_hugepage_flags);
270                 clear_bit(req_madv, &transparent_hugepage_flags);
271         } else
272                 return -EINVAL;
273
274         return count;
275 }
276
277 static ssize_t enabled_show(struct kobject *kobj,
278                             struct kobj_attribute *attr, char *buf)
279 {
280         return double_flag_show(kobj, attr, buf,
281                                 TRANSPARENT_HUGEPAGE_FLAG,
282                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
283 }
284 static ssize_t enabled_store(struct kobject *kobj,
285                              struct kobj_attribute *attr,
286                              const char *buf, size_t count)
287 {
288         ssize_t ret;
289
290         ret = double_flag_store(kobj, attr, buf, count,
291                                 TRANSPARENT_HUGEPAGE_FLAG,
292                                 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
293
294         if (ret > 0) {
295                 int err;
296
297                 mutex_lock(&khugepaged_mutex);
298                 err = start_stop_khugepaged();
299                 mutex_unlock(&khugepaged_mutex);
300
301                 if (err)
302                         ret = err;
303         }
304
305         return ret;
306 }
307 static struct kobj_attribute enabled_attr =
308         __ATTR(enabled, 0644, enabled_show, enabled_store);
309
310 static ssize_t single_flag_show(struct kobject *kobj,
311                                 struct kobj_attribute *attr, char *buf,
312                                 enum transparent_hugepage_flag flag)
313 {
314         return sprintf(buf, "%d\n",
315                        !!test_bit(flag, &transparent_hugepage_flags));
316 }
317
318 static ssize_t single_flag_store(struct kobject *kobj,
319                                  struct kobj_attribute *attr,
320                                  const char *buf, size_t count,
321                                  enum transparent_hugepage_flag flag)
322 {
323         unsigned long value;
324         int ret;
325
326         ret = kstrtoul(buf, 10, &value);
327         if (ret < 0)
328                 return ret;
329         if (value > 1)
330                 return -EINVAL;
331
332         if (value)
333                 set_bit(flag, &transparent_hugepage_flags);
334         else
335                 clear_bit(flag, &transparent_hugepage_flags);
336
337         return count;
338 }
339
340 /*
341  * Currently defrag only disables __GFP_NOWAIT for allocation. A blind
342  * __GFP_REPEAT is too aggressive, it's never worth swapping tons of
343  * memory just to allocate one more hugepage.
344  */
345 static ssize_t defrag_show(struct kobject *kobj,
346                            struct kobj_attribute *attr, char *buf)
347 {
348         return double_flag_show(kobj, attr, buf,
349                                 TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
350                                 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
351 }
352 static ssize_t defrag_store(struct kobject *kobj,
353                             struct kobj_attribute *attr,
354                             const char *buf, size_t count)
355 {
356         return double_flag_store(kobj, attr, buf, count,
357                                  TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
358                                  TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
359 }
360 static struct kobj_attribute defrag_attr =
361         __ATTR(defrag, 0644, defrag_show, defrag_store);
362
363 static ssize_t use_zero_page_show(struct kobject *kobj,
364                 struct kobj_attribute *attr, char *buf)
365 {
366         return single_flag_show(kobj, attr, buf,
367                                 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
368 }
369 static ssize_t use_zero_page_store(struct kobject *kobj,
370                 struct kobj_attribute *attr, const char *buf, size_t count)
371 {
372         return single_flag_store(kobj, attr, buf, count,
373                                  TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
374 }
375 static struct kobj_attribute use_zero_page_attr =
376         __ATTR(use_zero_page, 0644, use_zero_page_show, use_zero_page_store);
377 #ifdef CONFIG_DEBUG_VM
378 static ssize_t debug_cow_show(struct kobject *kobj,
379                                 struct kobj_attribute *attr, char *buf)
380 {
381         return single_flag_show(kobj, attr, buf,
382                                 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
383 }
384 static ssize_t debug_cow_store(struct kobject *kobj,
385                                struct kobj_attribute *attr,
386                                const char *buf, size_t count)
387 {
388         return single_flag_store(kobj, attr, buf, count,
389                                  TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
390 }
391 static struct kobj_attribute debug_cow_attr =
392         __ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
393 #endif /* CONFIG_DEBUG_VM */
394
395 static struct attribute *hugepage_attr[] = {
396         &enabled_attr.attr,
397         &defrag_attr.attr,
398         &use_zero_page_attr.attr,
399 #ifdef CONFIG_DEBUG_VM
400         &debug_cow_attr.attr,
401 #endif
402         NULL,
403 };
404
405 static struct attribute_group hugepage_attr_group = {
406         .attrs = hugepage_attr,
407 };
408
409 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
410                                          struct kobj_attribute *attr,
411                                          char *buf)
412 {
413         return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
414 }
415
416 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
417                                           struct kobj_attribute *attr,
418                                           const char *buf, size_t count)
419 {
420         unsigned long msecs;
421         int err;
422
423         err = kstrtoul(buf, 10, &msecs);
424         if (err || msecs > UINT_MAX)
425                 return -EINVAL;
426
427         khugepaged_scan_sleep_millisecs = msecs;
428         wake_up_interruptible(&khugepaged_wait);
429
430         return count;
431 }
432 static struct kobj_attribute scan_sleep_millisecs_attr =
433         __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
434                scan_sleep_millisecs_store);
435
436 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
437                                           struct kobj_attribute *attr,
438                                           char *buf)
439 {
440         return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
441 }
442
443 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
444                                            struct kobj_attribute *attr,
445                                            const char *buf, size_t count)
446 {
447         unsigned long msecs;
448         int err;
449
450         err = kstrtoul(buf, 10, &msecs);
451         if (err || msecs > UINT_MAX)
452                 return -EINVAL;
453
454         khugepaged_alloc_sleep_millisecs = msecs;
455         wake_up_interruptible(&khugepaged_wait);
456
457         return count;
458 }
459 static struct kobj_attribute alloc_sleep_millisecs_attr =
460         __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
461                alloc_sleep_millisecs_store);
462
463 static ssize_t pages_to_scan_show(struct kobject *kobj,
464                                   struct kobj_attribute *attr,
465                                   char *buf)
466 {
467         return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
468 }
469 static ssize_t pages_to_scan_store(struct kobject *kobj,
470                                    struct kobj_attribute *attr,
471                                    const char *buf, size_t count)
472 {
473         int err;
474         unsigned long pages;
475
476         err = kstrtoul(buf, 10, &pages);
477         if (err || !pages || pages > UINT_MAX)
478                 return -EINVAL;
479
480         khugepaged_pages_to_scan = pages;
481
482         return count;
483 }
484 static struct kobj_attribute pages_to_scan_attr =
485         __ATTR(pages_to_scan, 0644, pages_to_scan_show,
486                pages_to_scan_store);
487
488 static ssize_t pages_collapsed_show(struct kobject *kobj,
489                                     struct kobj_attribute *attr,
490                                     char *buf)
491 {
492         return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
493 }
494 static struct kobj_attribute pages_collapsed_attr =
495         __ATTR_RO(pages_collapsed);
496
497 static ssize_t full_scans_show(struct kobject *kobj,
498                                struct kobj_attribute *attr,
499                                char *buf)
500 {
501         return sprintf(buf, "%u\n", khugepaged_full_scans);
502 }
503 static struct kobj_attribute full_scans_attr =
504         __ATTR_RO(full_scans);
505
506 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
507                                       struct kobj_attribute *attr, char *buf)
508 {
509         return single_flag_show(kobj, attr, buf,
510                                 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
511 }
512 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
513                                        struct kobj_attribute *attr,
514                                        const char *buf, size_t count)
515 {
516         return single_flag_store(kobj, attr, buf, count,
517                                  TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
518 }
519 static struct kobj_attribute khugepaged_defrag_attr =
520         __ATTR(defrag, 0644, khugepaged_defrag_show,
521                khugepaged_defrag_store);
522
523 /*
524  * max_ptes_none controls if khugepaged should collapse hugepages over
525  * any unmapped ptes in turn potentially increasing the memory
526  * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
527  * reduce the available free memory in the system as it
528  * runs. Increasing max_ptes_none will instead potentially reduce the
529  * free memory in the system during the khugepaged scan.
530  */
531 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
532                                              struct kobj_attribute *attr,
533                                              char *buf)
534 {
535         return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
536 }
537 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
538                                               struct kobj_attribute *attr,
539                                               const char *buf, size_t count)
540 {
541         int err;
542         unsigned long max_ptes_none;
543
544         err = kstrtoul(buf, 10, &max_ptes_none);
545         if (err || max_ptes_none > HPAGE_PMD_NR-1)
546                 return -EINVAL;
547
548         khugepaged_max_ptes_none = max_ptes_none;
549
550         return count;
551 }
552 static struct kobj_attribute khugepaged_max_ptes_none_attr =
553         __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
554                khugepaged_max_ptes_none_store);
555
556 static struct attribute *khugepaged_attr[] = {
557         &khugepaged_defrag_attr.attr,
558         &khugepaged_max_ptes_none_attr.attr,
559         &pages_to_scan_attr.attr,
560         &pages_collapsed_attr.attr,
561         &full_scans_attr.attr,
562         &scan_sleep_millisecs_attr.attr,
563         &alloc_sleep_millisecs_attr.attr,
564         NULL,
565 };
566
567 static struct attribute_group khugepaged_attr_group = {
568         .attrs = khugepaged_attr,
569         .name = "khugepaged",
570 };
571
572 static int __init hugepage_init_sysfs(struct kobject **hugepage_kobj)
573 {
574         int err;
575
576         *hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
577         if (unlikely(!*hugepage_kobj)) {
578                 pr_err("failed to create transparent hugepage kobject\n");
579                 return -ENOMEM;
580         }
581
582         err = sysfs_create_group(*hugepage_kobj, &hugepage_attr_group);
583         if (err) {
584                 pr_err("failed to register transparent hugepage group\n");
585                 goto delete_obj;
586         }
587
588         err = sysfs_create_group(*hugepage_kobj, &khugepaged_attr_group);
589         if (err) {
590                 pr_err("failed to register transparent hugepage group\n");
591                 goto remove_hp_group;
592         }
593
594         return 0;
595
596 remove_hp_group:
597         sysfs_remove_group(*hugepage_kobj, &hugepage_attr_group);
598 delete_obj:
599         kobject_put(*hugepage_kobj);
600         return err;
601 }
602
603 static void __init hugepage_exit_sysfs(struct kobject *hugepage_kobj)
604 {
605         sysfs_remove_group(hugepage_kobj, &khugepaged_attr_group);
606         sysfs_remove_group(hugepage_kobj, &hugepage_attr_group);
607         kobject_put(hugepage_kobj);
608 }
609 #else
610 static inline int hugepage_init_sysfs(struct kobject **hugepage_kobj)
611 {
612         return 0;
613 }
614
615 static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
616 {
617 }
618 #endif /* CONFIG_SYSFS */
619
620 static int __init hugepage_init(void)
621 {
622         int err;
623         struct kobject *hugepage_kobj;
624
625         if (!has_transparent_hugepage()) {
626                 transparent_hugepage_flags = 0;
627                 return -EINVAL;
628         }
629
630         err = hugepage_init_sysfs(&hugepage_kobj);
631         if (err)
632                 goto err_sysfs;
633
634         err = khugepaged_slab_init();
635         if (err)
636                 goto err_slab;
637
638         err = register_shrinker(&huge_zero_page_shrinker);
639         if (err)
640                 goto err_hzp_shrinker;
641
642         /*
643          * By default disable transparent hugepages on smaller systems,
644          * where the extra memory used could hurt more than TLB overhead
645          * is likely to save.  The admin can still enable it through /sys.
646          */
647         if (totalram_pages < (512 << (20 - PAGE_SHIFT))) {
648                 transparent_hugepage_flags = 0;
649                 return 0;
650         }
651
652         err = start_stop_khugepaged();
653         if (err)
654                 goto err_khugepaged;
655
656         return 0;
657 err_khugepaged:
658         unregister_shrinker(&huge_zero_page_shrinker);
659 err_hzp_shrinker:
660         khugepaged_slab_exit();
661 err_slab:
662         hugepage_exit_sysfs(hugepage_kobj);
663 err_sysfs:
664         return err;
665 }
666 subsys_initcall(hugepage_init);
667
668 static int __init setup_transparent_hugepage(char *str)
669 {
670         int ret = 0;
671         if (!str)
672                 goto out;
673         if (!strcmp(str, "always")) {
674                 set_bit(TRANSPARENT_HUGEPAGE_FLAG,
675                         &transparent_hugepage_flags);
676                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
677                           &transparent_hugepage_flags);
678                 ret = 1;
679         } else if (!strcmp(str, "madvise")) {
680                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
681                           &transparent_hugepage_flags);
682                 set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
683                         &transparent_hugepage_flags);
684                 ret = 1;
685         } else if (!strcmp(str, "never")) {
686                 clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
687                           &transparent_hugepage_flags);
688                 clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
689                           &transparent_hugepage_flags);
690                 ret = 1;
691         }
692 out:
693         if (!ret)
694                 pr_warn("transparent_hugepage= cannot parse, ignored\n");
695         return ret;
696 }
697 __setup("transparent_hugepage=", setup_transparent_hugepage);
698
699 pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
700 {
701         if (likely(vma->vm_flags & VM_WRITE))
702                 pmd = pmd_mkwrite(pmd);
703         return pmd;
704 }
705
706 static inline pmd_t mk_huge_pmd(struct page *page, pgprot_t prot)
707 {
708         pmd_t entry;
709         entry = mk_pmd(page, prot);
710         entry = pmd_mkhuge(entry);
711         return entry;
712 }
713
714 static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
715                                         struct vm_area_struct *vma,
716                                         unsigned long address, pmd_t *pmd,
717                                         struct page *page, gfp_t gfp,
718                                         unsigned int flags)
719 {
720         struct mem_cgroup *memcg;
721         pgtable_t pgtable;
722         spinlock_t *ptl;
723         unsigned long haddr = address & HPAGE_PMD_MASK;
724
725         VM_BUG_ON_PAGE(!PageCompound(page), page);
726
727         if (mem_cgroup_try_charge(page, mm, gfp, &memcg)) {
728                 put_page(page);
729                 count_vm_event(THP_FAULT_FALLBACK);
730                 return VM_FAULT_FALLBACK;
731         }
732
733         pgtable = pte_alloc_one(mm, haddr);
734         if (unlikely(!pgtable)) {
735                 mem_cgroup_cancel_charge(page, memcg);
736                 put_page(page);
737                 return VM_FAULT_OOM;
738         }
739
740         clear_huge_page(page, haddr, HPAGE_PMD_NR);
741         /*
742          * The memory barrier inside __SetPageUptodate makes sure that
743          * clear_huge_page writes become visible before the set_pmd_at()
744          * write.
745          */
746         __SetPageUptodate(page);
747
748         ptl = pmd_lock(mm, pmd);
749         if (unlikely(!pmd_none(*pmd))) {
750                 spin_unlock(ptl);
751                 mem_cgroup_cancel_charge(page, memcg);
752                 put_page(page);
753                 pte_free(mm, pgtable);
754         } else {
755                 pmd_t entry;
756
757                 /* Deliver the page fault to userland */
758                 if (userfaultfd_missing(vma)) {
759                         int ret;
760
761                         spin_unlock(ptl);
762                         mem_cgroup_cancel_charge(page, memcg);
763                         put_page(page);
764                         pte_free(mm, pgtable);
765                         ret = handle_userfault(vma, address, flags,
766                                                VM_UFFD_MISSING);
767                         VM_BUG_ON(ret & VM_FAULT_FALLBACK);
768                         return ret;
769                 }
770
771                 entry = mk_huge_pmd(page, vma->vm_page_prot);
772                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
773                 page_add_new_anon_rmap(page, vma, haddr);
774                 mem_cgroup_commit_charge(page, memcg, false);
775                 lru_cache_add_active_or_unevictable(page, vma);
776                 pgtable_trans_huge_deposit(mm, pmd, pgtable);
777                 set_pmd_at(mm, haddr, pmd, entry);
778                 add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
779                 atomic_long_inc(&mm->nr_ptes);
780                 spin_unlock(ptl);
781                 count_vm_event(THP_FAULT_ALLOC);
782         }
783
784         return 0;
785 }
786
787 static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
788 {
789         return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_RECLAIM)) | extra_gfp;
790 }
791
792 /* Caller must hold page table lock. */
793 static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
794                 struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd,
795                 struct page *zero_page)
796 {
797         pmd_t entry;
798         if (!pmd_none(*pmd))
799                 return false;
800         entry = mk_pmd(zero_page, vma->vm_page_prot);
801         entry = pmd_mkhuge(entry);
802         pgtable_trans_huge_deposit(mm, pmd, pgtable);
803         set_pmd_at(mm, haddr, pmd, entry);
804         atomic_long_inc(&mm->nr_ptes);
805         return true;
806 }
807
808 int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
809                                unsigned long address, pmd_t *pmd,
810                                unsigned int flags)
811 {
812         gfp_t gfp;
813         struct page *page;
814         unsigned long haddr = address & HPAGE_PMD_MASK;
815
816         if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
817                 return VM_FAULT_FALLBACK;
818         if (unlikely(anon_vma_prepare(vma)))
819                 return VM_FAULT_OOM;
820         if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
821                 return VM_FAULT_OOM;
822         if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
823                         transparent_hugepage_use_zero_page()) {
824                 spinlock_t *ptl;
825                 pgtable_t pgtable;
826                 struct page *zero_page;
827                 bool set;
828                 int ret;
829                 pgtable = pte_alloc_one(mm, haddr);
830                 if (unlikely(!pgtable))
831                         return VM_FAULT_OOM;
832                 zero_page = get_huge_zero_page();
833                 if (unlikely(!zero_page)) {
834                         pte_free(mm, pgtable);
835                         count_vm_event(THP_FAULT_FALLBACK);
836                         return VM_FAULT_FALLBACK;
837                 }
838                 ptl = pmd_lock(mm, pmd);
839                 ret = 0;
840                 set = false;
841                 if (pmd_none(*pmd)) {
842                         if (userfaultfd_missing(vma)) {
843                                 spin_unlock(ptl);
844                                 ret = handle_userfault(vma, address, flags,
845                                                        VM_UFFD_MISSING);
846                                 VM_BUG_ON(ret & VM_FAULT_FALLBACK);
847                         } else {
848                                 set_huge_zero_page(pgtable, mm, vma,
849                                                    haddr, pmd,
850                                                    zero_page);
851                                 spin_unlock(ptl);
852                                 set = true;
853                         }
854                 } else
855                         spin_unlock(ptl);
856                 if (!set) {
857                         pte_free(mm, pgtable);
858                         put_huge_zero_page();
859                 }
860                 return ret;
861         }
862         gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
863         page = alloc_hugepage_vma(gfp, vma, haddr, HPAGE_PMD_ORDER);
864         if (unlikely(!page)) {
865                 count_vm_event(THP_FAULT_FALLBACK);
866                 return VM_FAULT_FALLBACK;
867         }
868         return __do_huge_pmd_anonymous_page(mm, vma, address, pmd, page, gfp,
869                                             flags);
870 }
871
872 static void insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
873                 pmd_t *pmd, unsigned long pfn, pgprot_t prot, bool write)
874 {
875         struct mm_struct *mm = vma->vm_mm;
876         pmd_t entry;
877         spinlock_t *ptl;
878
879         ptl = pmd_lock(mm, pmd);
880         if (pmd_none(*pmd)) {
881                 entry = pmd_mkhuge(pfn_pmd(pfn, prot));
882                 if (write) {
883                         entry = pmd_mkyoung(pmd_mkdirty(entry));
884                         entry = maybe_pmd_mkwrite(entry, vma);
885                 }
886                 set_pmd_at(mm, addr, pmd, entry);
887                 update_mmu_cache_pmd(vma, addr, pmd);
888         }
889         spin_unlock(ptl);
890 }
891
892 int vmf_insert_pfn_pmd(struct vm_area_struct *vma, unsigned long addr,
893                         pmd_t *pmd, unsigned long pfn, bool write)
894 {
895         pgprot_t pgprot = vma->vm_page_prot;
896         /*
897          * If we had pmd_special, we could avoid all these restrictions,
898          * but we need to be consistent with PTEs and architectures that
899          * can't support a 'special' bit.
900          */
901         BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
902         BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
903                                                 (VM_PFNMAP|VM_MIXEDMAP));
904         BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
905         BUG_ON((vma->vm_flags & VM_MIXEDMAP) && pfn_valid(pfn));
906
907         if (addr < vma->vm_start || addr >= vma->vm_end)
908                 return VM_FAULT_SIGBUS;
909         if (track_pfn_insert(vma, &pgprot, pfn))
910                 return VM_FAULT_SIGBUS;
911         insert_pfn_pmd(vma, addr, pmd, pfn, pgprot, write);
912         return VM_FAULT_NOPAGE;
913 }
914
915 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
916                   pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
917                   struct vm_area_struct *vma)
918 {
919         spinlock_t *dst_ptl, *src_ptl;
920         struct page *src_page;
921         pmd_t pmd;
922         pgtable_t pgtable;
923         int ret;
924
925         ret = -ENOMEM;
926         pgtable = pte_alloc_one(dst_mm, addr);
927         if (unlikely(!pgtable))
928                 goto out;
929
930         dst_ptl = pmd_lock(dst_mm, dst_pmd);
931         src_ptl = pmd_lockptr(src_mm, src_pmd);
932         spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
933
934         ret = -EAGAIN;
935         pmd = *src_pmd;
936         if (unlikely(!pmd_trans_huge(pmd))) {
937                 pte_free(dst_mm, pgtable);
938                 goto out_unlock;
939         }
940         /*
941          * When page table lock is held, the huge zero pmd should not be
942          * under splitting since we don't split the page itself, only pmd to
943          * a page table.
944          */
945         if (is_huge_zero_pmd(pmd)) {
946                 struct page *zero_page;
947                 /*
948                  * get_huge_zero_page() will never allocate a new page here,
949                  * since we already have a zero page to copy. It just takes a
950                  * reference.
951                  */
952                 zero_page = get_huge_zero_page();
953                 set_huge_zero_page(pgtable, dst_mm, vma, addr, dst_pmd,
954                                 zero_page);
955                 ret = 0;
956                 goto out_unlock;
957         }
958
959         if (unlikely(pmd_trans_splitting(pmd))) {
960                 /* split huge page running from under us */
961                 spin_unlock(src_ptl);
962                 spin_unlock(dst_ptl);
963                 pte_free(dst_mm, pgtable);
964
965                 wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
966                 goto out;
967         }
968         src_page = pmd_page(pmd);
969         VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
970         get_page(src_page);
971         page_dup_rmap(src_page);
972         add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
973
974         pmdp_set_wrprotect(src_mm, addr, src_pmd);
975         pmd = pmd_mkold(pmd_wrprotect(pmd));
976         pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
977         set_pmd_at(dst_mm, addr, dst_pmd, pmd);
978         atomic_long_inc(&dst_mm->nr_ptes);
979
980         ret = 0;
981 out_unlock:
982         spin_unlock(src_ptl);
983         spin_unlock(dst_ptl);
984 out:
985         return ret;
986 }
987
988 void huge_pmd_set_accessed(struct mm_struct *mm,
989                            struct vm_area_struct *vma,
990                            unsigned long address,
991                            pmd_t *pmd, pmd_t orig_pmd,
992                            int dirty)
993 {
994         spinlock_t *ptl;
995         pmd_t entry;
996         unsigned long haddr;
997
998         ptl = pmd_lock(mm, pmd);
999         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1000                 goto unlock;
1001
1002         entry = pmd_mkyoung(orig_pmd);
1003         haddr = address & HPAGE_PMD_MASK;
1004         if (pmdp_set_access_flags(vma, haddr, pmd, entry, dirty))
1005                 update_mmu_cache_pmd(vma, address, pmd);
1006
1007 unlock:
1008         spin_unlock(ptl);
1009 }
1010
1011 /*
1012  * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
1013  * during copy_user_huge_page()'s copy_page_rep(): in the case when
1014  * the source page gets split and a tail freed before copy completes.
1015  * Called under pmd_lock of checked pmd, so safe from splitting itself.
1016  */
1017 static void get_user_huge_page(struct page *page)
1018 {
1019         if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
1020                 struct page *endpage = page + HPAGE_PMD_NR;
1021
1022                 atomic_add(HPAGE_PMD_NR, &page->_count);
1023                 while (++page < endpage)
1024                         get_huge_page_tail(page);
1025         } else {
1026                 get_page(page);
1027         }
1028 }
1029
1030 static void put_user_huge_page(struct page *page)
1031 {
1032         if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
1033                 struct page *endpage = page + HPAGE_PMD_NR;
1034
1035                 while (page < endpage)
1036                         put_page(page++);
1037         } else {
1038                 put_page(page);
1039         }
1040 }
1041
1042 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
1043                                         struct vm_area_struct *vma,
1044                                         unsigned long address,
1045                                         pmd_t *pmd, pmd_t orig_pmd,
1046                                         struct page *page,
1047                                         unsigned long haddr)
1048 {
1049         struct mem_cgroup *memcg;
1050         spinlock_t *ptl;
1051         pgtable_t pgtable;
1052         pmd_t _pmd;
1053         int ret = 0, i;
1054         struct page **pages;
1055         unsigned long mmun_start;       /* For mmu_notifiers */
1056         unsigned long mmun_end;         /* For mmu_notifiers */
1057
1058         pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
1059                         GFP_KERNEL);
1060         if (unlikely(!pages)) {
1061                 ret |= VM_FAULT_OOM;
1062                 goto out;
1063         }
1064
1065         for (i = 0; i < HPAGE_PMD_NR; i++) {
1066                 pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
1067                                                __GFP_OTHER_NODE,
1068                                                vma, address, page_to_nid(page));
1069                 if (unlikely(!pages[i] ||
1070                              mem_cgroup_try_charge(pages[i], mm, GFP_KERNEL,
1071                                                    &memcg))) {
1072                         if (pages[i])
1073                                 put_page(pages[i]);
1074                         while (--i >= 0) {
1075                                 memcg = (void *)page_private(pages[i]);
1076                                 set_page_private(pages[i], 0);
1077                                 mem_cgroup_cancel_charge(pages[i], memcg);
1078                                 put_page(pages[i]);
1079                         }
1080                         kfree(pages);
1081                         ret |= VM_FAULT_OOM;
1082                         goto out;
1083                 }
1084                 set_page_private(pages[i], (unsigned long)memcg);
1085         }
1086
1087         for (i = 0; i < HPAGE_PMD_NR; i++) {
1088                 copy_user_highpage(pages[i], page + i,
1089                                    haddr + PAGE_SIZE * i, vma);
1090                 __SetPageUptodate(pages[i]);
1091                 cond_resched();
1092         }
1093
1094         mmun_start = haddr;
1095         mmun_end   = haddr + HPAGE_PMD_SIZE;
1096         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1097
1098         ptl = pmd_lock(mm, pmd);
1099         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1100                 goto out_free_pages;
1101         VM_BUG_ON_PAGE(!PageHead(page), page);
1102
1103         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1104         /* leave pmd empty until pte is filled */
1105
1106         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1107         pmd_populate(mm, &_pmd, pgtable);
1108
1109         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1110                 pte_t *pte, entry;
1111                 entry = mk_pte(pages[i], vma->vm_page_prot);
1112                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1113                 memcg = (void *)page_private(pages[i]);
1114                 set_page_private(pages[i], 0);
1115                 page_add_new_anon_rmap(pages[i], vma, haddr);
1116                 mem_cgroup_commit_charge(pages[i], memcg, false);
1117                 lru_cache_add_active_or_unevictable(pages[i], vma);
1118                 pte = pte_offset_map(&_pmd, haddr);
1119                 VM_BUG_ON(!pte_none(*pte));
1120                 set_pte_at(mm, haddr, pte, entry);
1121                 pte_unmap(pte);
1122         }
1123         kfree(pages);
1124
1125         smp_wmb(); /* make pte visible before pmd */
1126         pmd_populate(mm, pmd, pgtable);
1127         page_remove_rmap(page);
1128         spin_unlock(ptl);
1129
1130         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1131
1132         ret |= VM_FAULT_WRITE;
1133         put_page(page);
1134
1135 out:
1136         return ret;
1137
1138 out_free_pages:
1139         spin_unlock(ptl);
1140         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1141         for (i = 0; i < HPAGE_PMD_NR; i++) {
1142                 memcg = (void *)page_private(pages[i]);
1143                 set_page_private(pages[i], 0);
1144                 mem_cgroup_cancel_charge(pages[i], memcg);
1145                 put_page(pages[i]);
1146         }
1147         kfree(pages);
1148         goto out;
1149 }
1150
1151 int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1152                         unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
1153 {
1154         spinlock_t *ptl;
1155         int ret = 0;
1156         struct page *page = NULL, *new_page;
1157         struct mem_cgroup *memcg;
1158         unsigned long haddr;
1159         unsigned long mmun_start;       /* For mmu_notifiers */
1160         unsigned long mmun_end;         /* For mmu_notifiers */
1161         gfp_t huge_gfp;                 /* for allocation and charge */
1162
1163         ptl = pmd_lockptr(mm, pmd);
1164         VM_BUG_ON_VMA(!vma->anon_vma, vma);
1165         haddr = address & HPAGE_PMD_MASK;
1166         if (is_huge_zero_pmd(orig_pmd))
1167                 goto alloc;
1168         spin_lock(ptl);
1169         if (unlikely(!pmd_same(*pmd, orig_pmd)))
1170                 goto out_unlock;
1171
1172         page = pmd_page(orig_pmd);
1173         VM_BUG_ON_PAGE(!PageCompound(page) || !PageHead(page), page);
1174         if (page_mapcount(page) == 1) {
1175                 pmd_t entry;
1176                 entry = pmd_mkyoung(orig_pmd);
1177                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1178                 if (pmdp_set_access_flags(vma, haddr, pmd, entry,  1))
1179                         update_mmu_cache_pmd(vma, address, pmd);
1180                 ret |= VM_FAULT_WRITE;
1181                 goto out_unlock;
1182         }
1183         get_user_huge_page(page);
1184         spin_unlock(ptl);
1185 alloc:
1186         if (transparent_hugepage_enabled(vma) &&
1187             !transparent_hugepage_debug_cow()) {
1188                 huge_gfp = alloc_hugepage_gfpmask(transparent_hugepage_defrag(vma), 0);
1189                 new_page = alloc_hugepage_vma(huge_gfp, vma, haddr, HPAGE_PMD_ORDER);
1190         } else
1191                 new_page = NULL;
1192
1193         if (unlikely(!new_page)) {
1194                 if (!page) {
1195                         split_huge_page_pmd(vma, address, pmd);
1196                         ret |= VM_FAULT_FALLBACK;
1197                 } else {
1198                         ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
1199                                         pmd, orig_pmd, page, haddr);
1200                         if (ret & VM_FAULT_OOM) {
1201                                 split_huge_page(page);
1202                                 ret |= VM_FAULT_FALLBACK;
1203                         }
1204                         put_user_huge_page(page);
1205                 }
1206                 count_vm_event(THP_FAULT_FALLBACK);
1207                 goto out;
1208         }
1209
1210         if (unlikely(mem_cgroup_try_charge(new_page, mm, huge_gfp, &memcg))) {
1211                 put_page(new_page);
1212                 if (page) {
1213                         split_huge_page(page);
1214                         put_user_huge_page(page);
1215                 } else
1216                         split_huge_page_pmd(vma, address, pmd);
1217                 ret |= VM_FAULT_FALLBACK;
1218                 count_vm_event(THP_FAULT_FALLBACK);
1219                 goto out;
1220         }
1221
1222         count_vm_event(THP_FAULT_ALLOC);
1223
1224         if (!page)
1225                 clear_huge_page(new_page, haddr, HPAGE_PMD_NR);
1226         else
1227                 copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
1228         __SetPageUptodate(new_page);
1229
1230         mmun_start = haddr;
1231         mmun_end   = haddr + HPAGE_PMD_SIZE;
1232         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1233
1234         spin_lock(ptl);
1235         if (page)
1236                 put_user_huge_page(page);
1237         if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1238                 spin_unlock(ptl);
1239                 mem_cgroup_cancel_charge(new_page, memcg);
1240                 put_page(new_page);
1241                 goto out_mn;
1242         } else {
1243                 pmd_t entry;
1244                 entry = mk_huge_pmd(new_page, vma->vm_page_prot);
1245                 entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
1246                 pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1247                 page_add_new_anon_rmap(new_page, vma, haddr);
1248                 mem_cgroup_commit_charge(new_page, memcg, false);
1249                 lru_cache_add_active_or_unevictable(new_page, vma);
1250                 set_pmd_at(mm, haddr, pmd, entry);
1251                 update_mmu_cache_pmd(vma, address, pmd);
1252                 if (!page) {
1253                         add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
1254                         put_huge_zero_page();
1255                 } else {
1256                         VM_BUG_ON_PAGE(!PageHead(page), page);
1257                         page_remove_rmap(page);
1258                         put_page(page);
1259                 }
1260                 ret |= VM_FAULT_WRITE;
1261         }
1262         spin_unlock(ptl);
1263 out_mn:
1264         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1265 out:
1266         return ret;
1267 out_unlock:
1268         spin_unlock(ptl);
1269         return ret;
1270 }
1271
1272 /*
1273  * FOLL_FORCE can write to even unwritable pmd's, but only
1274  * after we've gone through a COW cycle and they are dirty.
1275  */
1276 static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
1277 {
1278         return pmd_write(pmd) ||
1279                ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
1280 }
1281
1282 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
1283                                    unsigned long addr,
1284                                    pmd_t *pmd,
1285                                    unsigned int flags)
1286 {
1287         struct mm_struct *mm = vma->vm_mm;
1288         struct page *page = NULL;
1289
1290         assert_spin_locked(pmd_lockptr(mm, pmd));
1291
1292         if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
1293                 goto out;
1294
1295         /* Avoid dumping huge zero page */
1296         if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
1297                 return ERR_PTR(-EFAULT);
1298
1299         /* Full NUMA hinting faults to serialise migration in fault paths */
1300         if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
1301                 goto out;
1302
1303         page = pmd_page(*pmd);
1304         VM_BUG_ON_PAGE(!PageHead(page), page);
1305         if (flags & FOLL_TOUCH) {
1306                 pmd_t _pmd;
1307                 /*
1308                  * We should set the dirty bit only for FOLL_WRITE but
1309                  * for now the dirty bit in the pmd is meaningless.
1310                  * And if the dirty bit will become meaningful and
1311                  * we'll only set it with FOLL_WRITE, an atomic
1312                  * set_bit will be required on the pmd to set the
1313                  * young bit, instead of the current set_pmd_at.
1314                  */
1315                 _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
1316                 if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
1317                                           pmd, _pmd,  1))
1318                         update_mmu_cache_pmd(vma, addr, pmd);
1319         }
1320         if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
1321                 if (page->mapping && trylock_page(page)) {
1322                         lru_add_drain();
1323                         if (page->mapping)
1324                                 mlock_vma_page(page);
1325                         unlock_page(page);
1326                 }
1327         }
1328         page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
1329         VM_BUG_ON_PAGE(!PageCompound(page), page);
1330         if (flags & FOLL_GET)
1331                 get_page_foll(page);
1332
1333 out:
1334         return page;
1335 }
1336
1337 /* NUMA hinting page fault entry point for trans huge pmds */
1338 int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
1339                                 unsigned long addr, pmd_t pmd, pmd_t *pmdp)
1340 {
1341         spinlock_t *ptl;
1342         struct anon_vma *anon_vma = NULL;
1343         struct page *page;
1344         unsigned long haddr = addr & HPAGE_PMD_MASK;
1345         int page_nid = -1, this_nid = numa_node_id();
1346         int target_nid, last_cpupid = -1;
1347         bool page_locked;
1348         bool migrated = false;
1349         bool was_writable;
1350         int flags = 0;
1351
1352         /* A PROT_NONE fault should not end up here */
1353         BUG_ON(!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)));
1354
1355         ptl = pmd_lock(mm, pmdp);
1356         if (unlikely(!pmd_same(pmd, *pmdp)))
1357                 goto out_unlock;
1358
1359         /*
1360          * If there are potential migrations, wait for completion and retry
1361          * without disrupting NUMA hinting information. Do not relock and
1362          * check_same as the page may no longer be mapped.
1363          */
1364         if (unlikely(pmd_trans_migrating(*pmdp))) {
1365                 page = pmd_page(*pmdp);
1366                 if (!get_page_unless_zero(page))
1367                         goto out_unlock;
1368                 spin_unlock(ptl);
1369                 wait_on_page_locked(page);
1370                 put_page(page);
1371                 goto out;
1372         }
1373
1374         page = pmd_page(pmd);
1375         BUG_ON(is_huge_zero_page(page));
1376         page_nid = page_to_nid(page);
1377         last_cpupid = page_cpupid_last(page);
1378         count_vm_numa_event(NUMA_HINT_FAULTS);
1379         if (page_nid == this_nid) {
1380                 count_vm_numa_event(NUMA_HINT_FAULTS_LOCAL);
1381                 flags |= TNF_FAULT_LOCAL;
1382         }
1383
1384         /* See similar comment in do_numa_page for explanation */
1385         if (!(vma->vm_flags & VM_WRITE))
1386                 flags |= TNF_NO_GROUP;
1387
1388         /*
1389          * Acquire the page lock to serialise THP migrations but avoid dropping
1390          * page_table_lock if at all possible
1391          */
1392         page_locked = trylock_page(page);
1393         target_nid = mpol_misplaced(page, vma, haddr);
1394         if (target_nid == -1) {
1395                 /* If the page was locked, there are no parallel migrations */
1396                 if (page_locked)
1397                         goto clear_pmdnuma;
1398         }
1399
1400         /* Migration could have started since the pmd_trans_migrating check */
1401         if (!page_locked) {
1402                 if (!get_page_unless_zero(page))
1403                         goto out_unlock;
1404                 spin_unlock(ptl);
1405                 wait_on_page_locked(page);
1406                 put_page(page);
1407                 page_nid = -1;
1408                 goto out;
1409         }
1410
1411         /*
1412          * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
1413          * to serialises splits
1414          */
1415         get_page(page);
1416         spin_unlock(ptl);
1417         anon_vma = page_lock_anon_vma_read(page);
1418
1419         /* Confirm the PMD did not change while page_table_lock was released */
1420         spin_lock(ptl);
1421         if (unlikely(!pmd_same(pmd, *pmdp))) {
1422                 unlock_page(page);
1423                 put_page(page);
1424                 page_nid = -1;
1425                 goto out_unlock;
1426         }
1427
1428         /* Bail if we fail to protect against THP splits for any reason */
1429         if (unlikely(!anon_vma)) {
1430                 put_page(page);
1431                 page_nid = -1;
1432                 goto clear_pmdnuma;
1433         }
1434
1435         /*
1436          * Migrate the THP to the requested node, returns with page unlocked
1437          * and access rights restored.
1438          */
1439         spin_unlock(ptl);
1440         migrated = migrate_misplaced_transhuge_page(mm, vma,
1441                                 pmdp, pmd, addr, page, target_nid);
1442         if (migrated) {
1443                 flags |= TNF_MIGRATED;
1444                 page_nid = target_nid;
1445         } else
1446                 flags |= TNF_MIGRATE_FAIL;
1447
1448         goto out;
1449 clear_pmdnuma:
1450         BUG_ON(!PageLocked(page));
1451         was_writable = pmd_write(pmd);
1452         pmd = pmd_modify(pmd, vma->vm_page_prot);
1453         pmd = pmd_mkyoung(pmd);
1454         if (was_writable)
1455                 pmd = pmd_mkwrite(pmd);
1456         set_pmd_at(mm, haddr, pmdp, pmd);
1457         update_mmu_cache_pmd(vma, addr, pmdp);
1458         unlock_page(page);
1459 out_unlock:
1460         spin_unlock(ptl);
1461
1462 out:
1463         if (anon_vma)
1464                 page_unlock_anon_vma_read(anon_vma);
1465
1466         if (page_nid != -1)
1467                 task_numa_fault(last_cpupid, page_nid, HPAGE_PMD_NR, flags);
1468
1469         return 0;
1470 }
1471
1472 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1473                  pmd_t *pmd, unsigned long addr)
1474 {
1475         pmd_t orig_pmd;
1476         spinlock_t *ptl;
1477
1478         if (__pmd_trans_huge_lock(pmd, vma, &ptl) != 1)
1479                 return 0;
1480         /*
1481          * For architectures like ppc64 we look at deposited pgtable
1482          * when calling pmdp_huge_get_and_clear. So do the
1483          * pgtable_trans_huge_withdraw after finishing pmdp related
1484          * operations.
1485          */
1486         orig_pmd = pmdp_huge_get_and_clear_full(tlb->mm, addr, pmd,
1487                         tlb->fullmm);
1488         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1489         if (vma_is_dax(vma)) {
1490                 spin_unlock(ptl);
1491                 if (is_huge_zero_pmd(orig_pmd))
1492                         put_huge_zero_page();
1493         } else if (is_huge_zero_pmd(orig_pmd)) {
1494                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1495                 atomic_long_dec(&tlb->mm->nr_ptes);
1496                 spin_unlock(ptl);
1497                 put_huge_zero_page();
1498         } else {
1499                 struct page *page = pmd_page(orig_pmd);
1500                 page_remove_rmap(page);
1501                 VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
1502                 add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
1503                 VM_BUG_ON_PAGE(!PageHead(page), page);
1504                 pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
1505                 atomic_long_dec(&tlb->mm->nr_ptes);
1506                 spin_unlock(ptl);
1507                 tlb_remove_page(tlb, page);
1508         }
1509         return 1;
1510 }
1511
1512 int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
1513                   unsigned long old_addr,
1514                   unsigned long new_addr, unsigned long old_end,
1515                   pmd_t *old_pmd, pmd_t *new_pmd)
1516 {
1517         spinlock_t *old_ptl, *new_ptl;
1518         int ret = 0;
1519         pmd_t pmd;
1520
1521         struct mm_struct *mm = vma->vm_mm;
1522
1523         if ((old_addr & ~HPAGE_PMD_MASK) ||
1524             (new_addr & ~HPAGE_PMD_MASK) ||
1525             old_end - old_addr < HPAGE_PMD_SIZE ||
1526             (new_vma->vm_flags & VM_NOHUGEPAGE))
1527                 goto out;
1528
1529         /*
1530          * The destination pmd shouldn't be established, free_pgtables()
1531          * should have release it.
1532          */
1533         if (WARN_ON(!pmd_none(*new_pmd))) {
1534                 VM_BUG_ON(pmd_trans_huge(*new_pmd));
1535                 goto out;
1536         }
1537
1538         /*
1539          * We don't have to worry about the ordering of src and dst
1540          * ptlocks because exclusive mmap_sem prevents deadlock.
1541          */
1542         ret = __pmd_trans_huge_lock(old_pmd, vma, &old_ptl);
1543         if (ret == 1) {
1544                 new_ptl = pmd_lockptr(mm, new_pmd);
1545                 if (new_ptl != old_ptl)
1546                         spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
1547                 pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
1548                 VM_BUG_ON(!pmd_none(*new_pmd));
1549
1550                 if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
1551                         pgtable_t pgtable;
1552                         pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
1553                         pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
1554                 }
1555                 set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
1556                 if (new_ptl != old_ptl)
1557                         spin_unlock(new_ptl);
1558                 spin_unlock(old_ptl);
1559         }
1560 out:
1561         return ret;
1562 }
1563
1564 /*
1565  * Returns
1566  *  - 0 if PMD could not be locked
1567  *  - 1 if PMD was locked but protections unchange and TLB flush unnecessary
1568  *  - HPAGE_PMD_NR is protections changed and TLB flush necessary
1569  */
1570 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
1571                 unsigned long addr, pgprot_t newprot, int prot_numa)
1572 {
1573         struct mm_struct *mm = vma->vm_mm;
1574         spinlock_t *ptl;
1575         int ret = 0;
1576
1577         if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
1578                 pmd_t entry;
1579                 bool preserve_write = prot_numa && pmd_write(*pmd);
1580                 ret = 1;
1581
1582                 /*
1583                  * Avoid trapping faults against the zero page. The read-only
1584                  * data is likely to be read-cached on the local CPU and
1585                  * local/remote hits to the zero page are not interesting.
1586                  */
1587                 if (prot_numa && is_huge_zero_pmd(*pmd)) {
1588                         spin_unlock(ptl);
1589                         return ret;
1590                 }
1591
1592                 if (!prot_numa || !pmd_protnone(*pmd)) {
1593                         entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
1594                         entry = pmd_modify(entry, newprot);
1595                         if (preserve_write)
1596                                 entry = pmd_mkwrite(entry);
1597                         ret = HPAGE_PMD_NR;
1598                         set_pmd_at(mm, addr, pmd, entry);
1599                         BUG_ON(!preserve_write && pmd_write(entry));
1600                 }
1601                 spin_unlock(ptl);
1602         }
1603
1604         return ret;
1605 }
1606
1607 /*
1608  * Returns 1 if a given pmd maps a stable (not under splitting) thp.
1609  * Returns -1 if it maps a thp under splitting. Returns 0 otherwise.
1610  *
1611  * Note that if it returns 1, this routine returns without unlocking page
1612  * table locks. So callers must unlock them.
1613  */
1614 int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
1615                 spinlock_t **ptl)
1616 {
1617         *ptl = pmd_lock(vma->vm_mm, pmd);
1618         if (likely(pmd_trans_huge(*pmd))) {
1619                 if (unlikely(pmd_trans_splitting(*pmd))) {
1620                         spin_unlock(*ptl);
1621                         wait_split_huge_page(vma->anon_vma, pmd);
1622                         return -1;
1623                 } else {
1624                         /* Thp mapped by 'pmd' is stable, so we can
1625                          * handle it as it is. */
1626                         return 1;
1627                 }
1628         }
1629         spin_unlock(*ptl);
1630         return 0;
1631 }
1632
1633 /*
1634  * This function returns whether a given @page is mapped onto the @address
1635  * in the virtual space of @mm.
1636  *
1637  * When it's true, this function returns *pmd with holding the page table lock
1638  * and passing it back to the caller via @ptl.
1639  * If it's false, returns NULL without holding the page table lock.
1640  */
1641 pmd_t *page_check_address_pmd(struct page *page,
1642                               struct mm_struct *mm,
1643                               unsigned long address,
1644                               enum page_check_address_pmd_flag flag,
1645                               spinlock_t **ptl)
1646 {
1647         pgd_t *pgd;
1648         pud_t *pud;
1649         pmd_t *pmd;
1650
1651         if (address & ~HPAGE_PMD_MASK)
1652                 return NULL;
1653
1654         pgd = pgd_offset(mm, address);
1655         if (!pgd_present(*pgd))
1656                 return NULL;
1657         pud = pud_offset(pgd, address);
1658         if (!pud_present(*pud))
1659                 return NULL;
1660         pmd = pmd_offset(pud, address);
1661
1662         *ptl = pmd_lock(mm, pmd);
1663         if (!pmd_present(*pmd))
1664                 goto unlock;
1665         if (pmd_page(*pmd) != page)
1666                 goto unlock;
1667         /*
1668          * split_vma() may create temporary aliased mappings. There is
1669          * no risk as long as all huge pmd are found and have their
1670          * splitting bit set before __split_huge_page_refcount
1671          * runs. Finding the same huge pmd more than once during the
1672          * same rmap walk is not a problem.
1673          */
1674         if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1675             pmd_trans_splitting(*pmd))
1676                 goto unlock;
1677         if (pmd_trans_huge(*pmd)) {
1678                 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1679                           !pmd_trans_splitting(*pmd));
1680                 return pmd;
1681         }
1682 unlock:
1683         spin_unlock(*ptl);
1684         return NULL;
1685 }
1686
1687 static int __split_huge_page_splitting(struct page *page,
1688                                        struct vm_area_struct *vma,
1689                                        unsigned long address)
1690 {
1691         struct mm_struct *mm = vma->vm_mm;
1692         spinlock_t *ptl;
1693         pmd_t *pmd;
1694         int ret = 0;
1695         /* For mmu_notifiers */
1696         const unsigned long mmun_start = address;
1697         const unsigned long mmun_end   = address + HPAGE_PMD_SIZE;
1698
1699         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1700         pmd = page_check_address_pmd(page, mm, address,
1701                         PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG, &ptl);
1702         if (pmd) {
1703                 /*
1704                  * We can't temporarily set the pmd to null in order
1705                  * to split it, the pmd must remain marked huge at all
1706                  * times or the VM won't take the pmd_trans_huge paths
1707                  * and it won't wait on the anon_vma->root->rwsem to
1708                  * serialize against split_huge_page*.
1709                  */
1710                 pmdp_splitting_flush(vma, address, pmd);
1711
1712                 ret = 1;
1713                 spin_unlock(ptl);
1714         }
1715         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1716
1717         return ret;
1718 }
1719
1720 static void __split_huge_page_refcount(struct page *page,
1721                                        struct list_head *list)
1722 {
1723         int i;
1724         struct zone *zone = page_zone(page);
1725         struct lruvec *lruvec;
1726         int tail_count = 0;
1727
1728         /* prevent PageLRU to go away from under us, and freeze lru stats */
1729         spin_lock_irq(&zone->lru_lock);
1730         lruvec = mem_cgroup_page_lruvec(page, zone);
1731
1732         compound_lock(page);
1733         /* complete memcg works before add pages to LRU */
1734         mem_cgroup_split_huge_fixup(page);
1735
1736         for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
1737                 struct page *page_tail = page + i;
1738
1739                 /* tail_page->_mapcount cannot change */
1740                 BUG_ON(page_mapcount(page_tail) < 0);
1741                 tail_count += page_mapcount(page_tail);
1742                 /* check for overflow */
1743                 BUG_ON(tail_count < 0);
1744                 BUG_ON(atomic_read(&page_tail->_count) != 0);
1745                 /*
1746                  * tail_page->_count is zero and not changing from
1747                  * under us. But get_page_unless_zero() may be running
1748                  * from under us on the tail_page. If we used
1749                  * atomic_set() below instead of atomic_add(), we
1750                  * would then run atomic_set() concurrently with
1751                  * get_page_unless_zero(), and atomic_set() is
1752                  * implemented in C not using locked ops. spin_unlock
1753                  * on x86 sometime uses locked ops because of PPro
1754                  * errata 66, 92, so unless somebody can guarantee
1755                  * atomic_set() here would be safe on all archs (and
1756                  * not only on x86), it's safer to use atomic_add().
1757                  */
1758                 atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
1759                            &page_tail->_count);
1760
1761                 /* after clearing PageTail the gup refcount can be released */
1762                 smp_mb__after_atomic();
1763
1764                 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1765                 page_tail->flags |= (page->flags &
1766                                      ((1L << PG_referenced) |
1767                                       (1L << PG_swapbacked) |
1768                                       (1L << PG_mlocked) |
1769                                       (1L << PG_uptodate) |
1770                                       (1L << PG_active) |
1771                                       (1L << PG_unevictable)));
1772                 page_tail->flags |= (1L << PG_dirty);
1773
1774                 clear_compound_head(page_tail);
1775
1776                 if (page_is_young(page))
1777                         set_page_young(page_tail);
1778                 if (page_is_idle(page))
1779                         set_page_idle(page_tail);
1780
1781                 /*
1782                  * __split_huge_page_splitting() already set the
1783                  * splitting bit in all pmd that could map this
1784                  * hugepage, that will ensure no CPU can alter the
1785                  * mapcount on the head page. The mapcount is only
1786                  * accounted in the head page and it has to be
1787                  * transferred to all tail pages in the below code. So
1788                  * for this code to be safe, the split the mapcount
1789                  * can't change. But that doesn't mean userland can't
1790                  * keep changing and reading the page contents while
1791                  * we transfer the mapcount, so the pmd splitting
1792                  * status is achieved setting a reserved bit in the
1793                  * pmd, not by clearing the present bit.
1794                 */
1795                 page_tail->_mapcount = page->_mapcount;
1796
1797                 BUG_ON(page_tail->mapping);
1798                 page_tail->mapping = page->mapping;
1799
1800                 page_tail->index = page->index + i;
1801                 page_cpupid_xchg_last(page_tail, page_cpupid_last(page));
1802
1803                 BUG_ON(!PageAnon(page_tail));
1804                 BUG_ON(!PageUptodate(page_tail));
1805                 BUG_ON(!PageDirty(page_tail));
1806                 BUG_ON(!PageSwapBacked(page_tail));
1807
1808                 lru_add_page_tail(page, page_tail, lruvec, list);
1809         }
1810         atomic_sub(tail_count, &page->_count);
1811         BUG_ON(atomic_read(&page->_count) <= 0);
1812
1813         __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1814
1815         ClearPageCompound(page);
1816         compound_unlock(page);
1817         spin_unlock_irq(&zone->lru_lock);
1818
1819         for (i = 1; i < HPAGE_PMD_NR; i++) {
1820                 struct page *page_tail = page + i;
1821                 BUG_ON(page_count(page_tail) <= 0);
1822                 /*
1823                  * Tail pages may be freed if there wasn't any mapping
1824                  * like if add_to_swap() is running on a lru page that
1825                  * had its mapping zapped. And freeing these pages
1826                  * requires taking the lru_lock so we do the put_page
1827                  * of the tail pages after the split is complete.
1828                  */
1829                 put_page(page_tail);
1830         }
1831
1832         /*
1833          * Only the head page (now become a regular page) is required
1834          * to be pinned by the caller.
1835          */
1836         BUG_ON(page_count(page) <= 0);
1837 }
1838
1839 static int __split_huge_page_map(struct page *page,
1840                                  struct vm_area_struct *vma,
1841                                  unsigned long address)
1842 {
1843         struct mm_struct *mm = vma->vm_mm;
1844         spinlock_t *ptl;
1845         pmd_t *pmd, _pmd;
1846         int ret = 0, i;
1847         pgtable_t pgtable;
1848         unsigned long haddr;
1849
1850         pmd = page_check_address_pmd(page, mm, address,
1851                         PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG, &ptl);
1852         if (pmd) {
1853                 pgtable = pgtable_trans_huge_withdraw(mm, pmd);
1854                 pmd_populate(mm, &_pmd, pgtable);
1855                 if (pmd_write(*pmd))
1856                         BUG_ON(page_mapcount(page) != 1);
1857
1858                 haddr = address;
1859                 for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
1860                         pte_t *pte, entry;
1861                         BUG_ON(PageCompound(page+i));
1862                         /*
1863                          * Note that NUMA hinting access restrictions are not
1864                          * transferred to avoid any possibility of altering
1865                          * permissions across VMAs.
1866                          */
1867                         entry = mk_pte(page + i, vma->vm_page_prot);
1868                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1869                         if (!pmd_write(*pmd))
1870                                 entry = pte_wrprotect(entry);
1871                         if (!pmd_young(*pmd))
1872                                 entry = pte_mkold(entry);
1873                         pte = pte_offset_map(&_pmd, haddr);
1874                         BUG_ON(!pte_none(*pte));
1875                         set_pte_at(mm, haddr, pte, entry);
1876                         pte_unmap(pte);
1877                 }
1878
1879                 smp_wmb(); /* make pte visible before pmd */
1880                 /*
1881                  * Up to this point the pmd is present and huge and
1882                  * userland has the whole access to the hugepage
1883                  * during the split (which happens in place). If we
1884                  * overwrite the pmd with the not-huge version
1885                  * pointing to the pte here (which of course we could
1886                  * if all CPUs were bug free), userland could trigger
1887                  * a small page size TLB miss on the small sized TLB
1888                  * while the hugepage TLB entry is still established
1889                  * in the huge TLB. Some CPU doesn't like that. See
1890                  * http://support.amd.com/us/Processor_TechDocs/41322.pdf,
1891                  * Erratum 383 on page 93. Intel should be safe but is
1892                  * also warns that it's only safe if the permission
1893                  * and cache attributes of the two entries loaded in
1894                  * the two TLB is identical (which should be the case
1895                  * here). But it is generally safer to never allow
1896                  * small and huge TLB entries for the same virtual
1897                  * address to be loaded simultaneously. So instead of
1898                  * doing "pmd_populate(); flush_pmd_tlb_range();" we first
1899                  * mark the current pmd notpresent (atomically because
1900                  * here the pmd_trans_huge and pmd_trans_splitting
1901                  * must remain set at all times on the pmd until the
1902                  * split is complete for this pmd), then we flush the
1903                  * SMP TLB and finally we write the non-huge version
1904                  * of the pmd entry with pmd_populate.
1905                  */
1906                 pmdp_invalidate(vma, address, pmd);
1907                 pmd_populate(mm, pmd, pgtable);
1908                 ret = 1;
1909                 spin_unlock(ptl);
1910         }
1911
1912         return ret;
1913 }
1914
1915 /* must be called with anon_vma->root->rwsem held */
1916 static void __split_huge_page(struct page *page,
1917                               struct anon_vma *anon_vma,
1918                               struct list_head *list)
1919 {
1920         int mapcount, mapcount2;
1921         pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1922         struct anon_vma_chain *avc;
1923
1924         BUG_ON(!PageHead(page));
1925         BUG_ON(PageTail(page));
1926
1927         mapcount = 0;
1928         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1929                 struct vm_area_struct *vma = avc->vma;
1930                 unsigned long addr = vma_address(page, vma);
1931                 BUG_ON(is_vma_temporary_stack(vma));
1932                 mapcount += __split_huge_page_splitting(page, vma, addr);
1933         }
1934         /*
1935          * It is critical that new vmas are added to the tail of the
1936          * anon_vma list. This guarantes that if copy_huge_pmd() runs
1937          * and establishes a child pmd before
1938          * __split_huge_page_splitting() freezes the parent pmd (so if
1939          * we fail to prevent copy_huge_pmd() from running until the
1940          * whole __split_huge_page() is complete), we will still see
1941          * the newly established pmd of the child later during the
1942          * walk, to be able to set it as pmd_trans_splitting too.
1943          */
1944         if (mapcount != page_mapcount(page)) {
1945                 pr_err("mapcount %d page_mapcount %d\n",
1946                         mapcount, page_mapcount(page));
1947                 BUG();
1948         }
1949
1950         __split_huge_page_refcount(page, list);
1951
1952         mapcount2 = 0;
1953         anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
1954                 struct vm_area_struct *vma = avc->vma;
1955                 unsigned long addr = vma_address(page, vma);
1956                 BUG_ON(is_vma_temporary_stack(vma));
1957                 mapcount2 += __split_huge_page_map(page, vma, addr);
1958         }
1959         if (mapcount != mapcount2) {
1960                 pr_err("mapcount %d mapcount2 %d page_mapcount %d\n",
1961                         mapcount, mapcount2, page_mapcount(page));
1962                 BUG();
1963         }
1964 }
1965
1966 /*
1967  * Split a hugepage into normal pages. This doesn't change the position of head
1968  * page. If @list is null, tail pages will be added to LRU list, otherwise, to
1969  * @list. Both head page and tail pages will inherit mapping, flags, and so on
1970  * from the hugepage.
1971  * Return 0 if the hugepage is split successfully otherwise return 1.
1972  */
1973 int split_huge_page_to_list(struct page *page, struct list_head *list)
1974 {
1975         struct anon_vma *anon_vma;
1976         int ret = 1;
1977
1978         BUG_ON(is_huge_zero_page(page));
1979         BUG_ON(!PageAnon(page));
1980
1981         /*
1982          * The caller does not necessarily hold an mmap_sem that would prevent
1983          * the anon_vma disappearing so we first we take a reference to it
1984          * and then lock the anon_vma for write. This is similar to
1985          * page_lock_anon_vma_read except the write lock is taken to serialise
1986          * against parallel split or collapse operations.
1987          */
1988         anon_vma = page_get_anon_vma(page);
1989         if (!anon_vma)
1990                 goto out;
1991         anon_vma_lock_write(anon_vma);
1992
1993         ret = 0;
1994         if (!PageCompound(page))
1995                 goto out_unlock;
1996
1997         BUG_ON(!PageSwapBacked(page));
1998         __split_huge_page(page, anon_vma, list);
1999         count_vm_event(THP_SPLIT);
2000
2001         BUG_ON(PageCompound(page));
2002 out_unlock:
2003         anon_vma_unlock_write(anon_vma);
2004         put_anon_vma(anon_vma);
2005 out:
2006         return ret;
2007 }
2008
2009 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
2010
2011 int hugepage_madvise(struct vm_area_struct *vma,
2012                      unsigned long *vm_flags, int advice)
2013 {
2014         switch (advice) {
2015         case MADV_HUGEPAGE:
2016 #ifdef CONFIG_S390
2017                 /*
2018                  * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
2019                  * can't handle this properly after s390_enable_sie, so we simply
2020                  * ignore the madvise to prevent qemu from causing a SIGSEGV.
2021                  */
2022                 if (mm_has_pgste(vma->vm_mm))
2023                         return 0;
2024 #endif
2025                 /*
2026                  * Be somewhat over-protective like KSM for now!
2027                  */
2028                 if (*vm_flags & VM_NO_THP)
2029                         return -EINVAL;
2030                 *vm_flags &= ~VM_NOHUGEPAGE;
2031                 *vm_flags |= VM_HUGEPAGE;
2032                 /*
2033                  * If the vma become good for khugepaged to scan,
2034                  * register it here without waiting a page fault that
2035                  * may not happen any time soon.
2036                  */
2037                 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
2038                         return -ENOMEM;
2039                 break;
2040         case MADV_NOHUGEPAGE:
2041                 /*
2042                  * Be somewhat over-protective like KSM for now!
2043                  */
2044                 if (*vm_flags & VM_NO_THP)
2045                         return -EINVAL;
2046                 *vm_flags &= ~VM_HUGEPAGE;
2047                 *vm_flags |= VM_NOHUGEPAGE;
2048                 /*
2049                  * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
2050                  * this vma even if we leave the mm registered in khugepaged if
2051                  * it got registered before VM_NOHUGEPAGE was set.
2052                  */
2053                 break;
2054         }
2055
2056         return 0;
2057 }
2058
2059 static int __init khugepaged_slab_init(void)
2060 {
2061         mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
2062                                           sizeof(struct mm_slot),
2063                                           __alignof__(struct mm_slot), 0, NULL);
2064         if (!mm_slot_cache)
2065                 return -ENOMEM;
2066
2067         return 0;
2068 }
2069
2070 static void __init khugepaged_slab_exit(void)
2071 {
2072         kmem_cache_destroy(mm_slot_cache);
2073 }
2074
2075 static inline struct mm_slot *alloc_mm_slot(void)
2076 {
2077         if (!mm_slot_cache)     /* initialization failed */
2078                 return NULL;
2079         return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
2080 }
2081
2082 static inline void free_mm_slot(struct mm_slot *mm_slot)
2083 {
2084         kmem_cache_free(mm_slot_cache, mm_slot);
2085 }
2086
2087 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
2088 {
2089         struct mm_slot *mm_slot;
2090
2091         hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
2092                 if (mm == mm_slot->mm)
2093                         return mm_slot;
2094
2095         return NULL;
2096 }
2097
2098 static void insert_to_mm_slots_hash(struct mm_struct *mm,
2099                                     struct mm_slot *mm_slot)
2100 {
2101         mm_slot->mm = mm;
2102         hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
2103 }
2104
2105 static inline int khugepaged_test_exit(struct mm_struct *mm)
2106 {
2107         return atomic_read(&mm->mm_users) == 0;
2108 }
2109
2110 int __khugepaged_enter(struct mm_struct *mm)
2111 {
2112         struct mm_slot *mm_slot;
2113         int wakeup;
2114
2115         mm_slot = alloc_mm_slot();
2116         if (!mm_slot)
2117                 return -ENOMEM;
2118
2119         /* __khugepaged_exit() must not run from under us */
2120         VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
2121         if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2122                 free_mm_slot(mm_slot);
2123                 return 0;
2124         }
2125
2126         spin_lock(&khugepaged_mm_lock);
2127         insert_to_mm_slots_hash(mm, mm_slot);
2128         /*
2129          * Insert just behind the scanning cursor, to let the area settle
2130          * down a little.
2131          */
2132         wakeup = list_empty(&khugepaged_scan.mm_head);
2133         list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
2134         spin_unlock(&khugepaged_mm_lock);
2135
2136         atomic_inc(&mm->mm_count);
2137         if (wakeup)
2138                 wake_up_interruptible(&khugepaged_wait);
2139
2140         return 0;
2141 }
2142
2143 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2144                                unsigned long vm_flags)
2145 {
2146         unsigned long hstart, hend;
2147         if (!vma->anon_vma)
2148                 /*
2149                  * Not yet faulted in so we will register later in the
2150                  * page fault if needed.
2151                  */
2152                 return 0;
2153         if (vma->vm_ops || (vm_flags & VM_NO_THP))
2154                 /* khugepaged not yet working on file or special mappings */
2155                 return 0;
2156         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2157         hend = vma->vm_end & HPAGE_PMD_MASK;
2158         if (hstart < hend)
2159                 return khugepaged_enter(vma, vm_flags);
2160         return 0;
2161 }
2162
2163 void __khugepaged_exit(struct mm_struct *mm)
2164 {
2165         struct mm_slot *mm_slot;
2166         int free = 0;
2167
2168         spin_lock(&khugepaged_mm_lock);
2169         mm_slot = get_mm_slot(mm);
2170         if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
2171                 hash_del(&mm_slot->hash);
2172                 list_del(&mm_slot->mm_node);
2173                 free = 1;
2174         }
2175         spin_unlock(&khugepaged_mm_lock);
2176
2177         if (free) {
2178                 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2179                 free_mm_slot(mm_slot);
2180                 mmdrop(mm);
2181         } else if (mm_slot) {
2182                 /*
2183                  * This is required to serialize against
2184                  * khugepaged_test_exit() (which is guaranteed to run
2185                  * under mmap sem read mode). Stop here (after we
2186                  * return all pagetables will be destroyed) until
2187                  * khugepaged has finished working on the pagetables
2188                  * under the mmap_sem.
2189                  */
2190                 down_write(&mm->mmap_sem);
2191                 up_write(&mm->mmap_sem);
2192         }
2193 }
2194
2195 static void release_pte_page(struct page *page)
2196 {
2197         /* 0 stands for page_is_file_cache(page) == false */
2198         dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
2199         unlock_page(page);
2200         putback_lru_page(page);
2201 }
2202
2203 static void release_pte_pages(pte_t *pte, pte_t *_pte)
2204 {
2205         while (--_pte >= pte) {
2206                 pte_t pteval = *_pte;
2207                 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
2208                         release_pte_page(pte_page(pteval));
2209         }
2210 }
2211
2212 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
2213                                         unsigned long address,
2214                                         pte_t *pte)
2215 {
2216         struct page *page;
2217         pte_t *_pte;
2218         int none_or_zero = 0;
2219         bool referenced = false, writable = false;
2220         for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
2221              _pte++, address += PAGE_SIZE) {
2222                 pte_t pteval = *_pte;
2223                 if (pte_none(pteval) || (pte_present(pteval) &&
2224                                 is_zero_pfn(pte_pfn(pteval)))) {
2225                         if (!userfaultfd_armed(vma) &&
2226                             ++none_or_zero <= khugepaged_max_ptes_none)
2227                                 continue;
2228                         else
2229                                 goto out;
2230                 }
2231                 if (!pte_present(pteval))
2232                         goto out;
2233                 page = vm_normal_page(vma, address, pteval);
2234                 if (unlikely(!page))
2235                         goto out;
2236
2237                 VM_BUG_ON_PAGE(PageCompound(page), page);
2238                 VM_BUG_ON_PAGE(!PageAnon(page), page);
2239                 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
2240
2241                 /*
2242                  * We can do it before isolate_lru_page because the
2243                  * page can't be freed from under us. NOTE: PG_lock
2244                  * is needed to serialize against split_huge_page
2245                  * when invoked from the VM.
2246                  */
2247                 if (!trylock_page(page))
2248                         goto out;
2249
2250                 /*
2251                  * cannot use mapcount: can't collapse if there's a gup pin.
2252                  * The page must only be referenced by the scanned process
2253                  * and page swap cache.
2254                  */
2255                 if (page_count(page) != 1 + !!PageSwapCache(page)) {
2256                         unlock_page(page);
2257                         goto out;
2258                 }
2259                 if (pte_write(pteval)) {
2260                         writable = true;
2261                 } else {
2262                         if (PageSwapCache(page) && !reuse_swap_page(page)) {
2263                                 unlock_page(page);
2264                                 goto out;
2265                         }
2266                         /*
2267                          * Page is not in the swap cache. It can be collapsed
2268                          * into a THP.
2269                          */
2270                 }
2271
2272                 /*
2273                  * Isolate the page to avoid collapsing an hugepage
2274                  * currently in use by the VM.
2275                  */
2276                 if (isolate_lru_page(page)) {
2277                         unlock_page(page);
2278                         goto out;
2279                 }
2280                 /* 0 stands for page_is_file_cache(page) == false */
2281                 inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
2282                 VM_BUG_ON_PAGE(!PageLocked(page), page);
2283                 VM_BUG_ON_PAGE(PageLRU(page), page);
2284
2285                 /* If there is no mapped pte young don't collapse the page */
2286                 if (pte_young(pteval) ||
2287                     page_is_young(page) || PageReferenced(page) ||
2288                     mmu_notifier_test_young(vma->vm_mm, address))
2289                         referenced = true;
2290         }
2291         if (likely(referenced && writable))
2292                 return 1;
2293 out:
2294         release_pte_pages(pte, _pte);
2295         return 0;
2296 }
2297
2298 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
2299                                       struct vm_area_struct *vma,
2300                                       unsigned long address,
2301                                       spinlock_t *ptl)
2302 {
2303         pte_t *_pte;
2304         for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
2305                 pte_t pteval = *_pte;
2306                 struct page *src_page;
2307
2308                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2309                         clear_user_highpage(page, address);
2310                         add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
2311                         if (is_zero_pfn(pte_pfn(pteval))) {
2312                                 /*
2313                                  * ptl mostly unnecessary.
2314                                  */
2315                                 spin_lock(ptl);
2316                                 /*
2317                                  * paravirt calls inside pte_clear here are
2318                                  * superfluous.
2319                                  */
2320                                 pte_clear(vma->vm_mm, address, _pte);
2321                                 spin_unlock(ptl);
2322                         }
2323                 } else {
2324                         src_page = pte_page(pteval);
2325                         copy_user_highpage(page, src_page, address, vma);
2326                         VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
2327                         release_pte_page(src_page);
2328                         /*
2329                          * ptl mostly unnecessary, but preempt has to
2330                          * be disabled to update the per-cpu stats
2331                          * inside page_remove_rmap().
2332                          */
2333                         spin_lock(ptl);
2334                         /*
2335                          * paravirt calls inside pte_clear here are
2336                          * superfluous.
2337                          */
2338                         pte_clear(vma->vm_mm, address, _pte);
2339                         page_remove_rmap(src_page);
2340                         spin_unlock(ptl);
2341                         free_page_and_swap_cache(src_page);
2342                 }
2343
2344                 address += PAGE_SIZE;
2345                 page++;
2346         }
2347 }
2348
2349 static void khugepaged_alloc_sleep(void)
2350 {
2351         DEFINE_WAIT(wait);
2352
2353         add_wait_queue(&khugepaged_wait, &wait);
2354         freezable_schedule_timeout_interruptible(
2355                 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
2356         remove_wait_queue(&khugepaged_wait, &wait);
2357 }
2358
2359 static int khugepaged_node_load[MAX_NUMNODES];
2360
2361 static bool khugepaged_scan_abort(int nid)
2362 {
2363         int i;
2364
2365         /*
2366          * If zone_reclaim_mode is disabled, then no extra effort is made to
2367          * allocate memory locally.
2368          */
2369         if (!zone_reclaim_mode)
2370                 return false;
2371
2372         /* If there is a count for this node already, it must be acceptable */
2373         if (khugepaged_node_load[nid])
2374                 return false;
2375
2376         for (i = 0; i < MAX_NUMNODES; i++) {
2377                 if (!khugepaged_node_load[i])
2378                         continue;
2379                 if (node_distance(nid, i) > RECLAIM_DISTANCE)
2380                         return true;
2381         }
2382         return false;
2383 }
2384
2385 #ifdef CONFIG_NUMA
2386 static int khugepaged_find_target_node(void)
2387 {
2388         static int last_khugepaged_target_node = NUMA_NO_NODE;
2389         int nid, target_node = 0, max_value = 0;
2390
2391         /* find first node with max normal pages hit */
2392         for (nid = 0; nid < MAX_NUMNODES; nid++)
2393                 if (khugepaged_node_load[nid] > max_value) {
2394                         max_value = khugepaged_node_load[nid];
2395                         target_node = nid;
2396                 }
2397
2398         /* do some balance if several nodes have the same hit record */
2399         if (target_node <= last_khugepaged_target_node)
2400                 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
2401                                 nid++)
2402                         if (max_value == khugepaged_node_load[nid]) {
2403                                 target_node = nid;
2404                                 break;
2405                         }
2406
2407         last_khugepaged_target_node = target_node;
2408         return target_node;
2409 }
2410
2411 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2412 {
2413         if (IS_ERR(*hpage)) {
2414                 if (!*wait)
2415                         return false;
2416
2417                 *wait = false;
2418                 *hpage = NULL;
2419                 khugepaged_alloc_sleep();
2420         } else if (*hpage) {
2421                 put_page(*hpage);
2422                 *hpage = NULL;
2423         }
2424
2425         return true;
2426 }
2427
2428 static struct page *
2429 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2430                        unsigned long address, int node)
2431 {
2432         VM_BUG_ON_PAGE(*hpage, *hpage);
2433
2434         /*
2435          * Before allocating the hugepage, release the mmap_sem read lock.
2436          * The allocation can take potentially a long time if it involves
2437          * sync compaction, and we do not need to hold the mmap_sem during
2438          * that. We will recheck the vma after taking it again in write mode.
2439          */
2440         up_read(&mm->mmap_sem);
2441
2442         *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
2443         if (unlikely(!*hpage)) {
2444                 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2445                 *hpage = ERR_PTR(-ENOMEM);
2446                 return NULL;
2447         }
2448
2449         count_vm_event(THP_COLLAPSE_ALLOC);
2450         return *hpage;
2451 }
2452 #else
2453 static int khugepaged_find_target_node(void)
2454 {
2455         return 0;
2456 }
2457
2458 static inline struct page *alloc_hugepage(int defrag)
2459 {
2460         return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
2461                            HPAGE_PMD_ORDER);
2462 }
2463
2464 static struct page *khugepaged_alloc_hugepage(bool *wait)
2465 {
2466         struct page *hpage;
2467
2468         do {
2469                 hpage = alloc_hugepage(khugepaged_defrag());
2470                 if (!hpage) {
2471                         count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
2472                         if (!*wait)
2473                                 return NULL;
2474
2475                         *wait = false;
2476                         khugepaged_alloc_sleep();
2477                 } else
2478                         count_vm_event(THP_COLLAPSE_ALLOC);
2479         } while (unlikely(!hpage) && likely(khugepaged_enabled()));
2480
2481         return hpage;
2482 }
2483
2484 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
2485 {
2486         if (!*hpage)
2487                 *hpage = khugepaged_alloc_hugepage(wait);
2488
2489         if (unlikely(!*hpage))
2490                 return false;
2491
2492         return true;
2493 }
2494
2495 static struct page *
2496 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, struct mm_struct *mm,
2497                        unsigned long address, int node)
2498 {
2499         up_read(&mm->mmap_sem);
2500         VM_BUG_ON(!*hpage);
2501
2502         return  *hpage;
2503 }
2504 #endif
2505
2506 static bool hugepage_vma_check(struct vm_area_struct *vma)
2507 {
2508         if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
2509             (vma->vm_flags & VM_NOHUGEPAGE))
2510                 return false;
2511
2512         if (!vma->anon_vma || vma->vm_ops)
2513                 return false;
2514         if (is_vma_temporary_stack(vma))
2515                 return false;
2516         return !(vma->vm_flags & VM_NO_THP);
2517 }
2518
2519 static void collapse_huge_page(struct mm_struct *mm,
2520                                    unsigned long address,
2521                                    struct page **hpage,
2522                                    struct vm_area_struct *vma,
2523                                    int node)
2524 {
2525         pmd_t *pmd, _pmd;
2526         pte_t *pte;
2527         pgtable_t pgtable;
2528         struct page *new_page;
2529         spinlock_t *pmd_ptl, *pte_ptl;
2530         int isolated;
2531         unsigned long hstart, hend;
2532         struct mem_cgroup *memcg;
2533         unsigned long mmun_start;       /* For mmu_notifiers */
2534         unsigned long mmun_end;         /* For mmu_notifiers */
2535         gfp_t gfp;
2536
2537         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2538
2539         /* Only allocate from the target node */
2540         gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP_OTHER_NODE) |
2541                 __GFP_THISNODE;
2542
2543         /* release the mmap_sem read lock. */
2544         new_page = khugepaged_alloc_page(hpage, gfp, mm, address, node);
2545         if (!new_page)
2546                 return;
2547
2548         if (unlikely(mem_cgroup_try_charge(new_page, mm,
2549                                            gfp, &memcg)))
2550                 return;
2551
2552         /*
2553          * Prevent all access to pagetables with the exception of
2554          * gup_fast later hanlded by the ptep_clear_flush and the VM
2555          * handled by the anon_vma lock + PG_lock.
2556          */
2557         down_write(&mm->mmap_sem);
2558         if (unlikely(khugepaged_test_exit(mm)))
2559                 goto out;
2560
2561         vma = find_vma(mm, address);
2562         if (!vma)
2563                 goto out;
2564         hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2565         hend = vma->vm_end & HPAGE_PMD_MASK;
2566         if (address < hstart || address + HPAGE_PMD_SIZE > hend)
2567                 goto out;
2568         if (!hugepage_vma_check(vma))
2569                 goto out;
2570         pmd = mm_find_pmd(mm, address);
2571         if (!pmd)
2572                 goto out;
2573
2574         anon_vma_lock_write(vma->anon_vma);
2575
2576         pte = pte_offset_map(pmd, address);
2577         pte_ptl = pte_lockptr(mm, pmd);
2578
2579         mmun_start = address;
2580         mmun_end   = address + HPAGE_PMD_SIZE;
2581         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2582         pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
2583         /*
2584          * After this gup_fast can't run anymore. This also removes
2585          * any huge TLB entry from the CPU so we won't allow
2586          * huge and small TLB entries for the same virtual address
2587          * to avoid the risk of CPU bugs in that area.
2588          */
2589         _pmd = pmdp_collapse_flush(vma, address, pmd);
2590         spin_unlock(pmd_ptl);
2591         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
2592
2593         spin_lock(pte_ptl);
2594         isolated = __collapse_huge_page_isolate(vma, address, pte);
2595         spin_unlock(pte_ptl);
2596
2597         if (unlikely(!isolated)) {
2598                 pte_unmap(pte);
2599                 spin_lock(pmd_ptl);
2600                 BUG_ON(!pmd_none(*pmd));
2601                 /*
2602                  * We can only use set_pmd_at when establishing
2603                  * hugepmds and never for establishing regular pmds that
2604                  * points to regular pagetables. Use pmd_populate for that
2605                  */
2606                 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
2607                 spin_unlock(pmd_ptl);
2608                 anon_vma_unlock_write(vma->anon_vma);
2609                 goto out;
2610         }
2611
2612         /*
2613          * All pages are isolated and locked so anon_vma rmap
2614          * can't run anymore.
2615          */
2616         anon_vma_unlock_write(vma->anon_vma);
2617
2618         __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
2619         pte_unmap(pte);
2620         __SetPageUptodate(new_page);
2621         pgtable = pmd_pgtable(_pmd);
2622
2623         _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
2624         _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
2625
2626         /*
2627          * spin_lock() below is not the equivalent of smp_wmb(), so
2628          * this is needed to avoid the copy_huge_page writes to become
2629          * visible after the set_pmd_at() write.
2630          */
2631         smp_wmb();
2632
2633         spin_lock(pmd_ptl);
2634         BUG_ON(!pmd_none(*pmd));
2635         page_add_new_anon_rmap(new_page, vma, address);
2636         mem_cgroup_commit_charge(new_page, memcg, false);
2637         lru_cache_add_active_or_unevictable(new_page, vma);
2638         pgtable_trans_huge_deposit(mm, pmd, pgtable);
2639         set_pmd_at(mm, address, pmd, _pmd);
2640         update_mmu_cache_pmd(vma, address, pmd);
2641         spin_unlock(pmd_ptl);
2642
2643         *hpage = NULL;
2644
2645         khugepaged_pages_collapsed++;
2646 out_up_write:
2647         up_write(&mm->mmap_sem);
2648         return;
2649
2650 out:
2651         mem_cgroup_cancel_charge(new_page, memcg);
2652         goto out_up_write;
2653 }
2654
2655 static int khugepaged_scan_pmd(struct mm_struct *mm,
2656                                struct vm_area_struct *vma,
2657                                unsigned long address,
2658                                struct page **hpage)
2659 {
2660         pmd_t *pmd;
2661         pte_t *pte, *_pte;
2662         int ret = 0, none_or_zero = 0;
2663         struct page *page;
2664         unsigned long _address;
2665         spinlock_t *ptl;
2666         int node = NUMA_NO_NODE;
2667         bool writable = false, referenced = false;
2668
2669         VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2670
2671         pmd = mm_find_pmd(mm, address);
2672         if (!pmd)
2673                 goto out;
2674
2675         memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2676         pte = pte_offset_map_lock(mm, pmd, address, &ptl);
2677         for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
2678              _pte++, _address += PAGE_SIZE) {
2679                 pte_t pteval = *_pte;
2680                 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
2681                         if (!userfaultfd_armed(vma) &&
2682                             ++none_or_zero <= khugepaged_max_ptes_none)
2683                                 continue;
2684                         else
2685                                 goto out_unmap;
2686                 }
2687                 if (!pte_present(pteval))
2688                         goto out_unmap;
2689                 if (pte_write(pteval))
2690                         writable = true;
2691
2692                 page = vm_normal_page(vma, _address, pteval);
2693                 if (unlikely(!page))
2694                         goto out_unmap;
2695                 /*
2696                  * Record which node the original page is from and save this
2697                  * information to khugepaged_node_load[].
2698                  * Khupaged will allocate hugepage from the node has the max
2699                  * hit record.
2700                  */
2701                 node = page_to_nid(page);
2702                 if (khugepaged_scan_abort(node))
2703                         goto out_unmap;
2704                 khugepaged_node_load[node]++;
2705                 VM_BUG_ON_PAGE(PageCompound(page), page);
2706                 if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
2707                         goto out_unmap;
2708                 /*
2709                  * cannot use mapcount: can't collapse if there's a gup pin.
2710                  * The page must only be referenced by the scanned process
2711                  * and page swap cache.
2712                  */
2713                 if (page_count(page) != 1 + !!PageSwapCache(page))
2714                         goto out_unmap;
2715                 if (pte_young(pteval) ||
2716                     page_is_young(page) || PageReferenced(page) ||
2717                     mmu_notifier_test_young(vma->vm_mm, address))
2718                         referenced = true;
2719         }
2720         if (referenced && writable)
2721                 ret = 1;
2722 out_unmap:
2723         pte_unmap_unlock(pte, ptl);
2724         if (ret) {
2725                 node = khugepaged_find_target_node();
2726                 /* collapse_huge_page will return with the mmap_sem released */
2727                 collapse_huge_page(mm, address, hpage, vma, node);
2728         }
2729 out:
2730         return ret;
2731 }
2732
2733 static void collect_mm_slot(struct mm_slot *mm_slot)
2734 {
2735         struct mm_struct *mm = mm_slot->mm;
2736
2737         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2738
2739         if (khugepaged_test_exit(mm)) {
2740                 /* free mm_slot */
2741                 hash_del(&mm_slot->hash);
2742                 list_del(&mm_slot->mm_node);
2743
2744                 /*
2745                  * Not strictly needed because the mm exited already.
2746                  *
2747                  * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
2748                  */
2749
2750                 /* khugepaged_mm_lock actually not necessary for the below */
2751                 free_mm_slot(mm_slot);
2752                 mmdrop(mm);
2753         }
2754 }
2755
2756 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2757                                             struct page **hpage)
2758         __releases(&khugepaged_mm_lock)
2759         __acquires(&khugepaged_mm_lock)
2760 {
2761         struct mm_slot *mm_slot;
2762         struct mm_struct *mm;
2763         struct vm_area_struct *vma;
2764         int progress = 0;
2765
2766         VM_BUG_ON(!pages);
2767         VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
2768
2769         if (khugepaged_scan.mm_slot)
2770                 mm_slot = khugepaged_scan.mm_slot;
2771         else {
2772                 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2773                                      struct mm_slot, mm_node);
2774                 khugepaged_scan.address = 0;
2775                 khugepaged_scan.mm_slot = mm_slot;
2776         }
2777         spin_unlock(&khugepaged_mm_lock);
2778
2779         mm = mm_slot->mm;
2780         down_read(&mm->mmap_sem);
2781         if (unlikely(khugepaged_test_exit(mm)))
2782                 vma = NULL;
2783         else
2784                 vma = find_vma(mm, khugepaged_scan.address);
2785
2786         progress++;
2787         for (; vma; vma = vma->vm_next) {
2788                 unsigned long hstart, hend;
2789
2790                 cond_resched();
2791                 if (unlikely(khugepaged_test_exit(mm))) {
2792                         progress++;
2793                         break;
2794                 }
2795                 if (!hugepage_vma_check(vma)) {
2796 skip:
2797                         progress++;
2798                         continue;
2799                 }
2800                 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2801                 hend = vma->vm_end & HPAGE_PMD_MASK;
2802                 if (hstart >= hend)
2803                         goto skip;
2804                 if (khugepaged_scan.address > hend)
2805                         goto skip;
2806                 if (khugepaged_scan.address < hstart)
2807                         khugepaged_scan.address = hstart;
2808                 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2809
2810                 while (khugepaged_scan.address < hend) {
2811                         int ret;
2812                         cond_resched();
2813                         if (unlikely(khugepaged_test_exit(mm)))
2814                                 goto breakouterloop;
2815
2816                         VM_BUG_ON(khugepaged_scan.address < hstart ||
2817                                   khugepaged_scan.address + HPAGE_PMD_SIZE >
2818                                   hend);
2819                         ret = khugepaged_scan_pmd(mm, vma,
2820                                                   khugepaged_scan.address,
2821                                                   hpage);
2822                         /* move to next address */
2823                         khugepaged_scan.address += HPAGE_PMD_SIZE;
2824                         progress += HPAGE_PMD_NR;
2825                         if (ret)
2826                                 /* we released mmap_sem so break loop */
2827                                 goto breakouterloop_mmap_sem;
2828                         if (progress >= pages)
2829                                 goto breakouterloop;
2830                 }
2831         }
2832 breakouterloop:
2833         up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
2834 breakouterloop_mmap_sem:
2835
2836         spin_lock(&khugepaged_mm_lock);
2837         VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2838         /*
2839          * Release the current mm_slot if this mm is about to die, or
2840          * if we scanned all vmas of this mm.
2841          */
2842         if (khugepaged_test_exit(mm) || !vma) {
2843                 /*
2844                  * Make sure that if mm_users is reaching zero while
2845                  * khugepaged runs here, khugepaged_exit will find
2846                  * mm_slot not pointing to the exiting mm.
2847                  */
2848                 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2849                         khugepaged_scan.mm_slot = list_entry(
2850                                 mm_slot->mm_node.next,
2851                                 struct mm_slot, mm_node);
2852                         khugepaged_scan.address = 0;
2853                 } else {
2854                         khugepaged_scan.mm_slot = NULL;
2855                         khugepaged_full_scans++;
2856                 }
2857
2858                 collect_mm_slot(mm_slot);
2859         }
2860
2861         return progress;
2862 }
2863
2864 static int khugepaged_has_work(void)
2865 {
2866         return !list_empty(&khugepaged_scan.mm_head) &&
2867                 khugepaged_enabled();
2868 }
2869
2870 static int khugepaged_wait_event(void)
2871 {
2872         return !list_empty(&khugepaged_scan.mm_head) ||
2873                 kthread_should_stop();
2874 }
2875
2876 static void khugepaged_do_scan(void)
2877 {
2878         struct page *hpage = NULL;
2879         unsigned int progress = 0, pass_through_head = 0;
2880         unsigned int pages = khugepaged_pages_to_scan;
2881         bool wait = true;
2882
2883         barrier(); /* write khugepaged_pages_to_scan to local stack */
2884
2885         while (progress < pages) {
2886                 if (!khugepaged_prealloc_page(&hpage, &wait))
2887                         break;
2888
2889                 cond_resched();
2890
2891                 if (unlikely(kthread_should_stop() || try_to_freeze()))
2892                         break;
2893
2894                 spin_lock(&khugepaged_mm_lock);
2895                 if (!khugepaged_scan.mm_slot)
2896                         pass_through_head++;
2897                 if (khugepaged_has_work() &&
2898                     pass_through_head < 2)
2899                         progress += khugepaged_scan_mm_slot(pages - progress,
2900                                                             &hpage);
2901                 else
2902                         progress = pages;
2903                 spin_unlock(&khugepaged_mm_lock);
2904         }
2905
2906         if (!IS_ERR_OR_NULL(hpage))
2907                 put_page(hpage);
2908 }
2909
2910 static void khugepaged_wait_work(void)
2911 {
2912         if (khugepaged_has_work()) {
2913                 if (!khugepaged_scan_sleep_millisecs)
2914                         return;
2915
2916                 wait_event_freezable_timeout(khugepaged_wait,
2917                                              kthread_should_stop(),
2918                         msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
2919                 return;
2920         }
2921
2922         if (khugepaged_enabled())
2923                 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2924 }
2925
2926 static int khugepaged(void *none)
2927 {
2928         struct mm_slot *mm_slot;
2929
2930         set_freezable();
2931         set_user_nice(current, MAX_NICE);
2932
2933         while (!kthread_should_stop()) {
2934                 khugepaged_do_scan();
2935                 khugepaged_wait_work();
2936         }
2937
2938         spin_lock(&khugepaged_mm_lock);
2939         mm_slot = khugepaged_scan.mm_slot;
2940         khugepaged_scan.mm_slot = NULL;
2941         if (mm_slot)
2942                 collect_mm_slot(mm_slot);
2943         spin_unlock(&khugepaged_mm_lock);
2944         return 0;
2945 }
2946
2947 static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
2948                 unsigned long haddr, pmd_t *pmd)
2949 {
2950         struct mm_struct *mm = vma->vm_mm;
2951         pgtable_t pgtable;
2952         pmd_t _pmd;
2953         int i;
2954
2955         pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2956         /* leave pmd empty until pte is filled */
2957
2958         pgtable = pgtable_trans_huge_withdraw(mm, pmd);
2959         pmd_populate(mm, &_pmd, pgtable);
2960
2961         for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
2962                 pte_t *pte, entry;
2963                 entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
2964                 entry = pte_mkspecial(entry);
2965                 pte = pte_offset_map(&_pmd, haddr);
2966                 VM_BUG_ON(!pte_none(*pte));
2967                 set_pte_at(mm, haddr, pte, entry);
2968                 pte_unmap(pte);
2969         }
2970         smp_wmb(); /* make pte visible before pmd */
2971         pmd_populate(mm, pmd, pgtable);
2972         put_huge_zero_page();
2973 }
2974
2975 void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address,
2976                 pmd_t *pmd)
2977 {
2978         spinlock_t *ptl;
2979         struct page *page = NULL;
2980         struct mm_struct *mm = vma->vm_mm;
2981         unsigned long haddr = address & HPAGE_PMD_MASK;
2982         unsigned long mmun_start;       /* For mmu_notifiers */
2983         unsigned long mmun_end;         /* For mmu_notifiers */
2984
2985         BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE);
2986
2987         mmun_start = haddr;
2988         mmun_end   = haddr + HPAGE_PMD_SIZE;
2989 again:
2990         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
2991         ptl = pmd_lock(mm, pmd);
2992         if (unlikely(!pmd_trans_huge(*pmd)))
2993                 goto unlock;
2994         if (vma_is_dax(vma)) {
2995                 pmd_t _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
2996                 if (is_huge_zero_pmd(_pmd))
2997                         put_huge_zero_page();
2998         } else if (is_huge_zero_pmd(*pmd)) {
2999                 __split_huge_zero_page_pmd(vma, haddr, pmd);
3000         } else {
3001                 page = pmd_page(*pmd);
3002                 VM_BUG_ON_PAGE(!page_count(page), page);
3003                 get_page(page);
3004         }
3005  unlock:
3006         spin_unlock(ptl);
3007         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3008
3009         if (!page)
3010                 return;
3011
3012         split_huge_page(page);
3013         put_page(page);
3014
3015         /*
3016          * We don't always have down_write of mmap_sem here: a racing
3017          * do_huge_pmd_wp_page() might have copied-on-write to another
3018          * huge page before our split_huge_page() got the anon_vma lock.
3019          */
3020         if (unlikely(pmd_trans_huge(*pmd)))
3021                 goto again;
3022 }
3023
3024 void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
3025                 pmd_t *pmd)
3026 {
3027         struct vm_area_struct *vma;
3028
3029         vma = find_vma(mm, address);
3030         BUG_ON(vma == NULL);
3031         split_huge_page_pmd(vma, address, pmd);
3032 }
3033
3034 static void split_huge_page_address(struct mm_struct *mm,
3035                                     unsigned long address)
3036 {
3037         pgd_t *pgd;
3038         pud_t *pud;
3039         pmd_t *pmd;
3040
3041         VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
3042
3043         pgd = pgd_offset(mm, address);
3044         if (!pgd_present(*pgd))
3045                 return;
3046
3047         pud = pud_offset(pgd, address);
3048         if (!pud_present(*pud))
3049                 return;
3050
3051         pmd = pmd_offset(pud, address);
3052         if (!pmd_present(*pmd))
3053                 return;
3054         /*
3055          * Caller holds the mmap_sem write mode, so a huge pmd cannot
3056          * materialize from under us.
3057          */
3058         split_huge_page_pmd_mm(mm, address, pmd);
3059 }
3060
3061 void vma_adjust_trans_huge(struct vm_area_struct *vma,
3062                              unsigned long start,
3063                              unsigned long end,
3064                              long adjust_next)
3065 {
3066         /*
3067          * If the new start address isn't hpage aligned and it could
3068          * previously contain an hugepage: check if we need to split
3069          * an huge pmd.
3070          */
3071         if (start & ~HPAGE_PMD_MASK &&
3072             (start & HPAGE_PMD_MASK) >= vma->vm_start &&
3073             (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3074                 split_huge_page_address(vma->vm_mm, start);
3075
3076         /*
3077          * If the new end address isn't hpage aligned and it could
3078          * previously contain an hugepage: check if we need to split
3079          * an huge pmd.
3080          */
3081         if (end & ~HPAGE_PMD_MASK &&
3082             (end & HPAGE_PMD_MASK) >= vma->vm_start &&
3083             (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
3084                 split_huge_page_address(vma->vm_mm, end);
3085
3086         /*
3087          * If we're also updating the vma->vm_next->vm_start, if the new
3088          * vm_next->vm_start isn't page aligned and it could previously
3089          * contain an hugepage: check if we need to split an huge pmd.
3090          */
3091         if (adjust_next > 0) {
3092                 struct vm_area_struct *next = vma->vm_next;
3093                 unsigned long nstart = next->vm_start;
3094                 nstart += adjust_next << PAGE_SHIFT;
3095                 if (nstart & ~HPAGE_PMD_MASK &&
3096                     (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
3097                     (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
3098                         split_huge_page_address(next->vm_mm, nstart);
3099         }
3100 }