3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
6 #include <asm/fixmap.h>
9 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
12 #define PGALLOC_USER_GFP __GFP_HIGHMEM
14 #define PGALLOC_USER_GFP 0
17 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
19 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
21 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
24 pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
28 pte = alloc_pages(__userpte_alloc_gfp, 0);
31 if (!pgtable_page_ctor(pte)) {
38 static int __init setup_userpte(char *arg)
44 * "userpte=nohigh" disables allocation of user pagetables in
47 if (strcmp(arg, "nohigh") == 0)
48 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
53 early_param("userpte", setup_userpte);
55 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
57 pgtable_page_dtor(pte);
58 paravirt_release_pte(page_to_pfn(pte));
59 tlb_remove_page(tlb, pte);
62 #if CONFIG_PGTABLE_LEVELS > 2
63 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
65 struct page *page = virt_to_page(pmd);
66 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
68 * NOTE! For PAE, any changes to the top page-directory-pointer-table
69 * entries need a full cr3 reload to flush.
72 tlb->need_flush_all = 1;
74 pgtable_pmd_page_dtor(page);
75 tlb_remove_page(tlb, page);
78 #if CONFIG_PGTABLE_LEVELS > 3
79 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
81 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
82 tlb_remove_page(tlb, virt_to_page(pud));
84 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
85 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
87 static inline void pgd_list_add(pgd_t *pgd)
89 struct page *page = virt_to_page(pgd);
91 list_add(&page->lru, &pgd_list);
94 static inline void pgd_list_del(pgd_t *pgd)
96 struct page *page = virt_to_page(pgd);
101 #define UNSHARED_PTRS_PER_PGD \
102 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
105 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
107 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
108 virt_to_page(pgd)->index = (pgoff_t)mm;
111 struct mm_struct *pgd_page_get_mm(struct page *page)
113 return (struct mm_struct *)page->index;
116 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
118 /* If the pgd points to a shared pagetable level (either the
119 ptes in non-PAE, or shared PMD in PAE), then just copy the
120 references from swapper_pg_dir. */
121 if (CONFIG_PGTABLE_LEVELS == 2 ||
122 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
123 CONFIG_PGTABLE_LEVELS == 4) {
124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
129 /* list required to sync kernel mapping updates */
130 if (!SHARED_KERNEL_PMD) {
136 static void pgd_dtor(pgd_t *pgd)
138 if (SHARED_KERNEL_PMD)
141 spin_lock(&pgd_lock);
143 spin_unlock(&pgd_lock);
147 * List of all pgd's needed for non-PAE so it can invalidate entries
148 * in both cached and uncached pgd's; not needed for PAE since the
149 * kernel pmd is shared. If PAE were not to share the pmd a similar
150 * tactic would be needed. This is essentially codepath-based locking
151 * against pageattr.c; it is the unique case in which a valid change
152 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
153 * vmalloc faults work because attached pagetables are never freed.
157 #ifdef CONFIG_X86_PAE
159 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
160 * updating the top-level pagetable entries to guarantee the
161 * processor notices the update. Since this is expensive, and
162 * all 4 top-level entries are used almost immediately in a
163 * new process's life, we just pre-populate them here.
165 * Also, if we're in a paravirt environment where the kernel pmd is
166 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
167 * and initialize the kernel pmds here.
169 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
171 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
173 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
175 /* Note: almost everything apart from _PAGE_PRESENT is
176 reserved at the pmd (PDPT) level. */
177 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
180 * According to Intel App note "TLBs, Paging-Structure Caches,
181 * and Their Invalidation", April 2007, document 317080-001,
182 * section 8.1: in PAE mode we explicitly have to flush the
183 * TLB via cr3 if the top-level pgd is changed...
187 #else /* !CONFIG_X86_PAE */
189 /* No need to prepopulate any pagetable entries in non-PAE modes. */
190 #define PREALLOCATED_PMDS 0
192 #endif /* CONFIG_X86_PAE */
194 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[])
198 for(i = 0; i < PREALLOCATED_PMDS; i++)
200 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
201 free_page((unsigned long)pmds[i]);
206 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[])
210 gfp_t gfp = PGALLOC_GFP;
213 gfp &= ~__GFP_ACCOUNT;
215 for(i = 0; i < PREALLOCATED_PMDS; i++) {
216 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
219 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
220 free_page((unsigned long)pmd);
238 * Mop up any pmd pages which may still be attached to the pgd.
239 * Normally they will be freed by munmap/exit_mmap, but any pmd we
240 * preallocate which never got a corresponding vma will need to be
243 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
247 for(i = 0; i < PREALLOCATED_PMDS; i++) {
250 if (pgd_val(pgd) != 0) {
251 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
253 pgdp[i] = native_make_pgd(0);
255 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
262 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
267 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
270 pud = pud_offset(pgd, 0);
272 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
273 pmd_t *pmd = pmds[i];
275 if (i >= KERNEL_PGD_BOUNDARY)
276 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
277 sizeof(pmd_t) * PTRS_PER_PMD);
279 pud_populate(mm, pud, pmd);
284 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
285 * assumes that pgd should be in one page.
287 * But kernel with PAE paging that is not running as a Xen domain
288 * only needs to allocate 32 bytes for pgd instead of one page.
290 #ifdef CONFIG_X86_PAE
292 #include <linux/slab.h>
294 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
297 static struct kmem_cache *pgd_cache;
299 static int __init pgd_cache_init(void)
302 * When PAE kernel is running as a Xen domain, it does not use
303 * shared kernel pmd. And this requires a whole page for pgd.
305 if (!SHARED_KERNEL_PMD)
309 * when PAE kernel is not running as a Xen domain, it uses
310 * shared kernel pmd. Shared kernel pmd does not require a whole
311 * page for pgd. We are able to just allocate a 32-byte for pgd.
312 * During boot time, we create a 32-byte slab for pgd table allocation.
314 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
321 core_initcall(pgd_cache_init);
323 static inline pgd_t *_pgd_alloc(void)
326 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
327 * We allocate one page for pgd.
329 if (!SHARED_KERNEL_PMD)
330 return (pgd_t *)__get_free_page(PGALLOC_GFP);
333 * Now PAE kernel is not running as a Xen domain. We can allocate
334 * a 32-byte slab for pgd to save memory space.
336 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
339 static inline void _pgd_free(pgd_t *pgd)
341 if (!SHARED_KERNEL_PMD)
342 free_page((unsigned long)pgd);
344 kmem_cache_free(pgd_cache, pgd);
347 static inline pgd_t *_pgd_alloc(void)
350 // Instead of one PML4, we aquire two PML4s and, thus, an 8kb-aligned memory
351 // block. Therefore, we have to allocate at least 3 pages. However, the
352 // __get_free_pages returns us 4 pages. Hence, we store the base pointer at
353 // the beginning of the page of our 8kb-aligned memory block in order to
354 // correctly free it afterwars.
356 unsigned long pages = __get_free_pages(PGALLOC_GFP, get_order(4*PAGE_SIZE));
358 if(native_get_normal_pgd((pgd_t*) pages) == (pgd_t*) pages)
360 *((unsigned long*)(pages + 2 * PAGE_SIZE)) = pages;
361 return (pgd_t *) pages;
365 *((unsigned long*)(pages + 3 * PAGE_SIZE)) = pages;
366 return (pgd_t *) (pages + PAGE_SIZE);
369 return (pgd_t *)__get_free_page(PGALLOC_GFP);
373 static inline void _pgd_free(pgd_t *pgd)
376 unsigned long pages = *((unsigned long*) ((char*) pgd + 2 * PAGE_SIZE));
377 free_pages(pages, get_order(4*PAGE_SIZE));
379 free_page((unsigned long)pgd);
382 #endif /* CONFIG_X86_PAE */
384 pgd_t *pgd_alloc(struct mm_struct *mm)
387 pmd_t *pmds[PREALLOCATED_PMDS];
396 if (preallocate_pmds(mm, pmds) != 0)
399 if (paravirt_pgd_alloc(mm) != 0)
403 * Make sure that pre-populating the pmds is atomic with
404 * respect to anything walking the pgd_list, so that they
405 * never see a partially populated pgd.
407 spin_lock(&pgd_lock);
410 pgd_prepopulate_pmd(mm, pgd, pmds);
412 spin_unlock(&pgd_lock);
424 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
426 pgd_mop_up_pmds(mm, pgd);
428 paravirt_pgd_free(mm, pgd);
433 * Used to set accessed or dirty bits in the page table entries
434 * on other architectures. On x86, the accessed and dirty bits
435 * are tracked by hardware. However, do_wp_page calls this function
436 * to also make the pte writeable at the same time the dirty bit is
437 * set. In that case we do actually need to write the PTE.
439 int ptep_set_access_flags(struct vm_area_struct *vma,
440 unsigned long address, pte_t *ptep,
441 pte_t entry, int dirty)
443 int changed = !pte_same(*ptep, entry);
445 if (changed && dirty) {
447 pte_update(vma->vm_mm, address, ptep);
453 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
454 int pmdp_set_access_flags(struct vm_area_struct *vma,
455 unsigned long address, pmd_t *pmdp,
456 pmd_t entry, int dirty)
458 int changed = !pmd_same(*pmdp, entry);
460 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
462 if (changed && dirty) {
465 * We had a write-protection fault here and changed the pmd
466 * to to more permissive. No need to flush the TLB for that,
467 * #PF is architecturally guaranteed to do that and in the
468 * worst-case we'll generate a spurious fault.
476 int ptep_test_and_clear_young(struct vm_area_struct *vma,
477 unsigned long addr, pte_t *ptep)
481 if (pte_young(*ptep))
482 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
483 (unsigned long *) &ptep->pte);
486 pte_update(vma->vm_mm, addr, ptep);
491 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
492 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
493 unsigned long addr, pmd_t *pmdp)
497 if (pmd_young(*pmdp))
498 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
499 (unsigned long *)pmdp);
505 int ptep_clear_flush_young(struct vm_area_struct *vma,
506 unsigned long address, pte_t *ptep)
509 * On x86 CPUs, clearing the accessed bit without a TLB flush
510 * doesn't cause data corruption. [ It could cause incorrect
511 * page aging and the (mistaken) reclaim of hot pages, but the
512 * chance of that should be relatively low. ]
514 * So as a performance optimization don't flush the TLB when
515 * clearing the accessed bit, it will eventually be flushed by
516 * a context switch or a VM operation anyway. [ In the rare
517 * event of it not getting flushed for a long time the delay
518 * shouldn't really matter because there's no real memory
519 * pressure for swapout to react to. ]
521 return ptep_test_and_clear_young(vma, address, ptep);
524 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
525 int pmdp_clear_flush_young(struct vm_area_struct *vma,
526 unsigned long address, pmd_t *pmdp)
530 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
532 young = pmdp_test_and_clear_young(vma, address, pmdp);
534 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
541 * reserve_top_address - reserves a hole in the top of kernel address space
542 * @reserve - size of hole to reserve
544 * Can be used to relocate the fixmap area and poke a hole in the top
545 * of kernel address space to make room for a hypervisor.
547 void __init reserve_top_address(unsigned long reserve)
550 BUG_ON(fixmaps_set > 0);
551 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
552 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
553 -reserve, __FIXADDR_TOP + PAGE_SIZE);
559 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
561 unsigned long address = __fix_to_virt(idx);
563 if (idx >= __end_of_fixed_addresses) {
567 set_pte_vaddr(address, pte);
571 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
574 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
577 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
579 * pud_set_huge - setup kernel PUD mapping
581 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
582 * function sets up a huge page only if any of the following conditions are met:
584 * - MTRRs are disabled, or
586 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
588 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
589 * has no effect on the requested PAT memory type.
591 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
592 * page mapping attempt fails.
594 * Returns 1 on success and 0 on failure.
596 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
600 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
601 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
602 (mtrr != MTRR_TYPE_WRBACK))
605 prot = pgprot_4k_2_large(prot);
607 set_pte((pte_t *)pud, pfn_pte(
608 (u64)addr >> PAGE_SHIFT,
609 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
615 * pmd_set_huge - setup kernel PMD mapping
617 * See text over pud_set_huge() above.
619 * Returns 1 on success and 0 on failure.
621 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
625 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
626 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
627 (mtrr != MTRR_TYPE_WRBACK)) {
628 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
629 __func__, addr, addr + PMD_SIZE);
633 prot = pgprot_4k_2_large(prot);
635 set_pte((pte_t *)pmd, pfn_pte(
636 (u64)addr >> PAGE_SHIFT,
637 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
643 * pud_clear_huge - clear kernel PUD mapping when it is set
645 * Returns 1 on success and 0 on failure (no PUD map is found).
647 int pud_clear_huge(pud_t *pud)
649 if (pud_large(*pud)) {
658 * pmd_clear_huge - clear kernel PMD mapping when it is set
660 * Returns 1 on success and 0 on failure (no PMD map is found).
662 int pmd_clear_huge(pmd_t *pmd)
664 if (pmd_large(*pmd)) {
671 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */