OSDN Git Service

xen: make install_p2mtop_page() static
[android-x86/kernel.git] / arch / x86 / xen / mmu.c
index 413b19b..0096909 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/highmem.h>
 #include <linux/debugfs.h>
 #include <linux/bug.h>
+#include <linux/vmalloc.h>
 #include <linux/module.h>
 #include <linux/gfp.h>
 
 #include <asm/mmu_context.h>
 #include <asm/setup.h>
 #include <asm/paravirt.h>
+#include <asm/e820.h>
 #include <asm/linkage.h>
+#include <asm/page.h>
 
 #include <asm/xen/hypercall.h>
 #include <asm/xen/hypervisor.h>
 
+#include <xen/xen.h>
 #include <xen/page.h>
 #include <xen/interface/xen.h>
 #include <xen/interface/hvm/hvm_op.h>
 #include <xen/interface/version.h>
+#include <xen/interface/memory.h>
 #include <xen/hvc-console.h>
 
 #include "multicalls.h"
 
 #define MMU_UPDATE_HISTO       30
 
+/*
+ * Protects atomic reservation decrease/increase against concurrent increases.
+ * Also protects non-atomic updates of current_pages and driver_pages, and
+ * balloon lists.
+ */
+DEFINE_SPINLOCK(xen_reservation_lock);
+
 #ifdef CONFIG_XEN_DEBUG_FS
 
 static struct {
@@ -126,7 +138,8 @@ static inline void check_zero(void)
  * large enough to allocate page table pages to allocate the rest.
  * Each page can map 2MB.
  */
-static pte_t level1_ident_pgt[PTRS_PER_PTE * 4] __page_aligned_bss;
+#define LEVEL1_IDENT_ENTRIES   (PTRS_PER_PTE * 4)
+static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES);
 
 #ifdef CONFIG_X86_64
 /* l3 pud for userspace vsyscall mapping */
@@ -157,27 +170,27 @@ DEFINE_PER_CPU(unsigned long, xen_current_cr3);    /* actual vcpu cr3 */
  */
 #define USER_LIMIT     ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK)
 
+static unsigned long max_p2m_pfn __read_mostly = MAX_DOMAIN_PAGES;
 
-#define P2M_ENTRIES_PER_PAGE   (PAGE_SIZE / sizeof(unsigned long))
-#define TOP_ENTRIES            (MAX_DOMAIN_PAGES / P2M_ENTRIES_PER_PAGE)
+#define P2M_ENTRIES_PER_PAGE           (PAGE_SIZE / sizeof(unsigned long))
+#define TOP_ENTRIES(pages)             ((pages) / P2M_ENTRIES_PER_PAGE)
+#define MAX_TOP_ENTRIES                        TOP_ENTRIES(MAX_DOMAIN_PAGES)
 
 /* Placeholder for holes in the address space */
-static unsigned long p2m_missing[P2M_ENTRIES_PER_PAGE] __page_aligned_data =
-               { [ 0 ... P2M_ENTRIES_PER_PAGE-1 ] = ~0UL };
+static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_ENTRIES_PER_PAGE);
 
  /* Array of pointers to pages containing p2m entries */
-static unsigned long *p2m_top[TOP_ENTRIES] __page_aligned_data =
-               { [ 0 ... TOP_ENTRIES - 1] = &p2m_missing[0] };
+static RESERVE_BRK_ARRAY(unsigned long *, p2m_top, MAX_TOP_ENTRIES);
 
 /* Arrays of p2m arrays expressed in mfns used for save/restore */
-static unsigned long p2m_top_mfn[TOP_ENTRIES] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, MAX_TOP_ENTRIES);
 
-static unsigned long p2m_top_mfn_list[TOP_ENTRIES / P2M_ENTRIES_PER_PAGE]
-       __page_aligned_bss;
+static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn_list,
+                        (MAX_TOP_ENTRIES / P2M_ENTRIES_PER_PAGE));
 
 static inline unsigned p2m_top_index(unsigned long pfn)
 {
-       BUG_ON(pfn >= MAX_DOMAIN_PAGES);
+       BUG_ON(pfn >= max_p2m_pfn);
        return pfn / P2M_ENTRIES_PER_PAGE;
 }
 
@@ -191,13 +204,15 @@ void xen_build_mfn_list_list(void)
 {
        unsigned pfn, idx;
 
-       for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) {
+       for (pfn = 0; pfn < max_p2m_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
 
                p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]);
        }
 
-       for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) {
+       for (idx = 0;
+            idx < TOP_ENTRIES(max_p2m_pfn)/P2M_ENTRIES_PER_PAGE;
+            idx++) {
                unsigned topidx = idx * P2M_ENTRIES_PER_PAGE;
                p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]);
        }
@@ -209,7 +224,7 @@ void xen_setup_mfn_list_list(void)
 
        HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
                virt_to_mfn(p2m_top_mfn_list);
-       HYPERVISOR_shared_info->arch.max_pfn = xen_start_info->nr_pages;
+       HYPERVISOR_shared_info->arch.max_pfn = max_p2m_pfn;
 }
 
 /* Set up p2m_top to point to the domain-builder provided p2m pages */
@@ -218,6 +233,25 @@ void __init xen_build_dynamic_phys_to_machine(void)
        unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list;
        unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages);
        unsigned pfn;
+       unsigned i;
+
+       max_p2m_pfn = max_pfn;
+
+       p2m_missing = extend_brk(sizeof(*p2m_missing) * P2M_ENTRIES_PER_PAGE,
+                                PAGE_SIZE);
+       for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++)
+               p2m_missing[i] = ~0UL;
+
+       p2m_top = extend_brk(sizeof(*p2m_top) * TOP_ENTRIES(max_pfn),
+                            PAGE_SIZE);
+       for (i = 0; i < TOP_ENTRIES(max_pfn); i++)
+               p2m_top[i] = p2m_missing;
+
+       p2m_top_mfn = extend_brk(sizeof(*p2m_top_mfn) * TOP_ENTRIES(max_pfn),
+                                PAGE_SIZE);
+       p2m_top_mfn_list = extend_brk(sizeof(*p2m_top_mfn_list) *
+                                     (TOP_ENTRIES(max_pfn) / P2M_ENTRIES_PER_PAGE),
+                                     PAGE_SIZE);
 
        for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) {
                unsigned topidx = p2m_top_index(pfn);
@@ -232,7 +266,7 @@ unsigned long get_phys_to_machine(unsigned long pfn)
 {
        unsigned topidx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES))
+       if (unlikely(pfn >= max_p2m_pfn))
                return INVALID_P2M_ENTRY;
 
        topidx = p2m_top_index(pfn);
@@ -241,8 +275,8 @@ unsigned long get_phys_to_machine(unsigned long pfn)
 }
 EXPORT_SYMBOL_GPL(get_phys_to_machine);
 
-/* install a  new p2m_top page */
-bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
+/* install a new p2m_top page */
+static bool install_p2mtop_page(unsigned long pfn, unsigned long *p)
 {
        unsigned topidx = p2m_top_index(pfn);
        unsigned long **pfnp, *mfnp;
@@ -278,7 +312,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn)
 {
        unsigned topidx, idx;
 
-       if (unlikely(pfn >= MAX_DOMAIN_PAGES)) {
+       if (unlikely(pfn >= max_p2m_pfn)) {
                BUG_ON(mfn != INVALID_P2M_ENTRY);
                return true;
        }
@@ -378,6 +412,28 @@ static bool xen_page_pinned(void *ptr)
        return PagePinned(page);
 }
 
+static bool xen_iomap_pte(pte_t pte)
+{
+       return pte_flags(pte) & _PAGE_IOMAP;
+}
+
+static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval)
+{
+       struct multicall_space mcs;
+       struct mmu_update *u;
+
+       mcs = xen_mc_entry(sizeof(*u));
+       u = mcs.args;
+
+       /* ptep might be kmapped when using 32-bit HIGHPTE */
+       u->ptr = arbitrary_virt_to_machine(ptep).maddr;
+       u->val = pte_val_ma(pteval);
+
+       MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_IO);
+
+       xen_mc_issue(PARAVIRT_LAZY_MMU);
+}
+
 static void xen_extend_mmu_update(const struct mmu_update *update)
 {
        struct multicall_space mcs;
@@ -454,6 +510,11 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags)
 void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
                    pte_t *ptep, pte_t pteval)
 {
+       if (xen_iomap_pte(pteval)) {
+               xen_set_iomap_pte(ptep, pteval);
+               goto out;
+       }
+
        ADD_STATS(set_pte_at, 1);
 //     ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
        ADD_STATS(set_pte_at_current, mm == current->mm);
@@ -524,8 +585,25 @@ static pteval_t pte_pfn_to_mfn(pteval_t val)
        return val;
 }
 
+static pteval_t iomap_pte(pteval_t val)
+{
+       if (val & _PAGE_PRESENT) {
+               unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
+               pteval_t flags = val & PTE_FLAGS_MASK;
+
+               /* We assume the pte frame number is a MFN, so
+                  just use it as-is. */
+               val = ((pteval_t)pfn << PAGE_SHIFT) | flags;
+       }
+
+       return val;
+}
+
 pteval_t xen_pte_val(pte_t pte)
 {
+       if (xen_initial_domain() && (pte.pte & _PAGE_IOMAP))
+               return pte.pte;
+
        return pte_mfn_to_pfn(pte.pte);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
@@ -538,7 +616,22 @@ PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val);
 
 pte_t xen_make_pte(pteval_t pte)
 {
-       pte = pte_pfn_to_mfn(pte);
+       phys_addr_t addr = (pte & PTE_PFN_MASK);
+
+       /*
+        * Unprivileged domains are allowed to do IOMAPpings for
+        * PCI passthrough, but not map ISA space.  The ISA
+        * mappings are just dummy local mappings to keep other
+        * parts of the kernel happy.
+        */
+       if (unlikely(pte & _PAGE_IOMAP) &&
+           (xen_initial_domain() || addr >= ISA_END_ADDRESS)) {
+               pte = iomap_pte(pte);
+       } else {
+               pte &= ~_PAGE_IOMAP;
+               pte = pte_pfn_to_mfn(pte);
+       }
+
        return native_make_pte(pte);
 }
 PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
@@ -594,6 +687,11 @@ void xen_set_pud(pud_t *ptr, pud_t val)
 
 void xen_set_pte(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        ADD_STATS(pte_update, 1);
 //     ADD_STATS(pte_update_pinned, xen_page_pinned(ptep));
        ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU);
@@ -610,6 +708,11 @@ void xen_set_pte(pte_t *ptep, pte_t pte)
 #ifdef CONFIG_X86_PAE
 void xen_set_pte_atomic(pte_t *ptep, pte_t pte)
 {
+       if (xen_iomap_pte(pte)) {
+               xen_set_iomap_pte(ptep, pte);
+               return;
+       }
+
        set_64bit((u64 *)ptep, native_pte_val(pte));
 }
 
@@ -936,8 +1039,6 @@ static int xen_pin_page(struct mm_struct *mm, struct page *page,
    read-only, and can be pinned. */
 static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd)
 {
-       vm_unmap_aliases();
-
        xen_mc_batch();
 
        if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) {
@@ -1501,7 +1602,6 @@ static void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, unsigned l
        if (PagePinned(virt_to_page(mm->pgd))) {
                SetPagePinned(page);
 
-               vm_unmap_aliases();
                if (!PageHighMem(page)) {
                        make_lowmem_page_readonly(__va(PFN_PHYS((unsigned long)pfn)));
                        if (level == PT_PTE && USE_SPLIT_PTLOCKS)
@@ -1619,6 +1719,9 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
        unsigned ident_pte;
        unsigned long pfn;
 
+       level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES,
+                                     PAGE_SIZE);
+
        ident_pte = 0;
        pfn = 0;
        for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) {
@@ -1629,7 +1732,7 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
                        pte_page = m2v(pmd[pmdidx].pmd);
                else {
                        /* Check for free pte pages */
-                       if (ident_pte == ARRAY_SIZE(level1_ident_pgt))
+                       if (ident_pte == LEVEL1_IDENT_ENTRIES)
                                break;
 
                        pte_page = &level1_ident_pgt[ident_pte];
@@ -1744,13 +1847,15 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
        return pgd;
 }
 #else  /* !CONFIG_X86_64 */
-static pmd_t level2_kernel_pgt[PTRS_PER_PMD] __page_aligned_bss;
+static RESERVE_BRK_ARRAY(pmd_t, level2_kernel_pgt, PTRS_PER_PMD);
 
 __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
                                         unsigned long max_pfn)
 {
        pmd_t *kernel_pmd;
 
+       level2_kernel_pgt = extend_brk(sizeof(pmd_t *) * PTRS_PER_PMD, PAGE_SIZE);
+
        max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) +
                                  xen_start_info->nr_pt_frames * PAGE_SIZE +
                                  512*1024);
@@ -1812,9 +1917,16 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
                pte = pfn_pte(phys, prot);
                break;
 
-       default:
+       case FIX_PARAVIRT_BOOTMAP:
+               /* This is an MFN, but it isn't an IO mapping from the
+                  IO domain */
                pte = mfn_pte(phys, prot);
                break;
+
+       default:
+               /* By default, set_fixmap is used for hardware mappings */
+               pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP));
+               break;
        }
 
        __native_set_fixmap(idx, pte);
@@ -1940,7 +2052,205 @@ void __init xen_init_mmu_ops(void)
        x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start;
        x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done;
        pv_mmu_ops = xen_mmu_ops;
+
+       vmap_lazy_unmap = false;
+}
+
+/* Protected by xen_reservation_lock. */
+#define MAX_CONTIG_ORDER 9 /* 2MB */
+static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
+
+#define VOID_PTE (mfn_pte(0, __pgprot(0)))
+static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order,
+                               unsigned long *in_frames,
+                               unsigned long *out_frames)
+{
+       int i;
+       struct multicall_space mcs;
+
+       xen_mc_batch();
+       for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) {
+               mcs = __xen_mc_entry(0);
+
+               if (in_frames)
+                       in_frames[i] = virt_to_mfn(vaddr);
+
+               MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0);
+               set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY);
+
+               if (out_frames)
+                       out_frames[i] = virt_to_pfn(vaddr);
+       }
+       xen_mc_issue(0);
+}
+
+/*
+ * Update the pfn-to-mfn mappings for a virtual address range, either to
+ * point to an array of mfns, or contiguously from a single starting
+ * mfn.
+ */
+static void xen_remap_exchanged_ptes(unsigned long vaddr, int order,
+                                    unsigned long *mfns,
+                                    unsigned long first_mfn)
+{
+       unsigned i, limit;
+       unsigned long mfn;
+
+       xen_mc_batch();
+
+       limit = 1u << order;
+       for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) {
+               struct multicall_space mcs;
+               unsigned flags;
+
+               mcs = __xen_mc_entry(0);
+               if (mfns)
+                       mfn = mfns[i];
+               else
+                       mfn = first_mfn + i;
+
+               if (i < (limit - 1))
+                       flags = 0;
+               else {
+                       if (order == 0)
+                               flags = UVMF_INVLPG | UVMF_ALL;
+                       else
+                               flags = UVMF_TLB_FLUSH | UVMF_ALL;
+               }
+
+               MULTI_update_va_mapping(mcs.mc, vaddr,
+                               mfn_pte(mfn, PAGE_KERNEL), flags);
+
+               set_phys_to_machine(virt_to_pfn(vaddr), mfn);
+       }
+
+       xen_mc_issue(0);
+}
+
+/*
+ * Perform the hypercall to exchange a region of our pfns to point to
+ * memory with the required contiguous alignment.  Takes the pfns as
+ * input, and populates mfns as output.
+ *
+ * Returns a success code indicating whether the hypervisor was able to
+ * satisfy the request or not.
+ */
+static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in,
+                              unsigned long *pfns_in,
+                              unsigned long extents_out,
+                              unsigned int order_out,
+                              unsigned long *mfns_out,
+                              unsigned int address_bits)
+{
+       long rc;
+       int success;
+
+       struct xen_memory_exchange exchange = {
+               .in = {
+                       .nr_extents   = extents_in,
+                       .extent_order = order_in,
+                       .extent_start = pfns_in,
+                       .domid        = DOMID_SELF
+               },
+               .out = {
+                       .nr_extents   = extents_out,
+                       .extent_order = order_out,
+                       .extent_start = mfns_out,
+                       .address_bits = address_bits,
+                       .domid        = DOMID_SELF
+               }
+       };
+
+       BUG_ON(extents_in << order_in != extents_out << order_out);
+
+       rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
+       success = (exchange.nr_exchanged == extents_in);
+
+       BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
+       BUG_ON(success && (rc != 0));
+
+       return success;
+}
+
+int xen_create_contiguous_region(unsigned long vstart, unsigned int order,
+                                unsigned int address_bits)
+{
+       unsigned long *in_frames = discontig_frames, out_frame;
+       unsigned long  flags;
+       int            success;
+
+       /*
+        * Currently an auto-translated guest will not perform I/O, nor will
+        * it require PAE page directories below 4GB. Therefore any calls to
+        * this function are redundant and can be ignored.
+        */
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return 0;
+
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return -ENOMEM;
+
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+
+       /* 1. Zap current PTEs, remembering MFNs. */
+       xen_zap_pfn_range(vstart, order, in_frames, NULL);
+
+       /* 2. Get a new contiguous memory extent. */
+       out_frame = virt_to_pfn(vstart);
+       success = xen_exchange_memory(1UL << order, 0, in_frames,
+                                     1, order, &out_frame,
+                                     address_bits);
+
+       /* 3. Map the new extent in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, NULL, out_frame);
+       else
+               xen_remap_exchanged_ptes(vstart, order, in_frames, 0);
+
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
+
+       return success ? 0 : -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
+
+void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
+{
+       unsigned long *out_frames = discontig_frames, in_frame;
+       unsigned long  flags;
+       int success;
+
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               return;
+
+       if (unlikely(order > MAX_CONTIG_ORDER))
+               return;
+
+       memset((void *) vstart, 0, PAGE_SIZE << order);
+
+       spin_lock_irqsave(&xen_reservation_lock, flags);
+
+       /* 1. Find start MFN of contiguous extent. */
+       in_frame = virt_to_mfn(vstart);
+
+       /* 2. Zap current PTEs. */
+       xen_zap_pfn_range(vstart, order, NULL, out_frames);
+
+       /* 3. Do the exchange for non-contiguous MFNs. */
+       success = xen_exchange_memory(1, order, &in_frame, 1UL << order,
+                                       0, out_frames, 0);
+
+       /* 4. Map new pages in place of old pages. */
+       if (success)
+               xen_remap_exchanged_ptes(vstart, order, out_frames, 0);
+       else
+               xen_remap_exchanged_ptes(vstart, order, NULL, in_frame);
+
+       spin_unlock_irqrestore(&xen_reservation_lock, flags);
 }
+EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
 
 #ifdef CONFIG_XEN_PVHVM
 static void xen_hvm_exit_mmap(struct mm_struct *mm)