#endif
}
+#define MLK(b, t) (b), (t), (((t) - (b)) >> 10)
+#define MLM(b, t) (b), (t), (((t) - (b)) >> 20)
+#define MLK_ROUNDUP(b, t) (b), (t), (DIV_ROUND_UP(((t) - (b)), SZ_1K))
+
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+static void print_vmalloc_lowmem_info(void)
+{
+ struct memblock_region *reg, *prev_reg = NULL;
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)high_memory, VMALLOC_END));
+
+ for_each_memblock_rev(memory, reg) {
+ phys_addr_t start_phys = reg->base;
+ phys_addr_t end_phys = reg->base + reg->size;
+
+ if (start_phys > arm_lowmem_limit)
+ continue;
+
+ if (end_phys > arm_lowmem_limit)
+ end_phys = arm_lowmem_limit;
+
+ if (prev_reg == NULL) {
+ prev_reg = reg;
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+
+ continue;
+ }
+
+ pr_notice(
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(end_phys),
+ (unsigned long)__va(prev_reg->base)));
+
+
+ pr_notice(
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
+ MLM((unsigned long)__va(start_phys),
+ (unsigned long)__va(end_phys)));
+ }
+}
+#endif
+
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
pr_notice("Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
- MLK(FIXADDR_START, FIXADDR_END),
+ MLK(FIXADDR_START, FIXADDR_END));
+#ifdef CONFIG_ENABLE_VMALLOC_SAVING
+ print_vmalloc_lowmem_info();
+#else
+ printk(KERN_NOTICE
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
+ MLM(PAGE_OFFSET, (unsigned long)high_memory));
+#endif
+ printk(KERN_NOTICE
+#ifdef CONFIG_HIGHMEM
+ " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+#ifdef CONFIG_MODULES
+ " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
+#endif
+ " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
+ " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
#ifdef CONFIG_HIGHMEM
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
}
}
+#undef MLK
+#undef MLM
+#undef MLK_ROUNDUP
+
#ifdef CONFIG_ARM_KERNMEM_PERMS
struct section_perm {
unsigned long start;
pmdval_t mask;
pmdval_t prot;
pmdval_t clear;
+ pteval_t ptemask;
+ pteval_t pteprot;
+ pteval_t pteclear;
};
static struct section_perm nx_perms[] = {
.end = (unsigned long)_stext,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
+ .ptemask = ~L_PTE_XN,
+ .pteprot = L_PTE_XN,
},
/* Make init RW (set NX). */
{
.end = (unsigned long)_sdata,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
+ .ptemask = ~L_PTE_XN,
+ .pteprot = L_PTE_XN,
},
#ifdef CONFIG_DEBUG_RODATA
/* Make rodata NX (set RO in ro_perms below). */
.end = (unsigned long)__init_begin,
.mask = ~PMD_SECT_XN,
.prot = PMD_SECT_XN,
+ .ptemask = ~L_PTE_XN,
+ .pteprot = L_PTE_XN,
},
#endif
};
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.clear = PMD_SECT_AP_WRITE,
#endif
+ .ptemask = ~L_PTE_RDONLY,
+ .pteprot = L_PTE_RDONLY,
},
};
#endif
* copied into each mm). During startup, this is the init_mm. Is only
* safe to be called with preemption disabled, as under stop_machine().
*/
+struct pte_data {
+ pteval_t mask;
+ pteval_t val;
+};
+
+static int __pte_update(pte_t *ptep, pgtable_t token, unsigned long addr,
+ void *d)
+{
+ struct pte_data *data = d;
+ pte_t pte = *ptep;
+
+ pte = __pte((pte_val(*ptep) & data->mask) | data->val);
+ set_pte_ext(ptep, pte, 0);
+
+ return 0;
+}
+
+static inline void pte_update(unsigned long addr, pteval_t mask,
+ pteval_t prot, struct mm_struct *mm)
+{
+ struct pte_data data;
+
+ data.mask = mask;
+ data.val = prot;
+
+ apply_to_page_range(mm, addr, SECTION_SIZE, __pte_update, &data);
+ flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
+}
+
static inline void section_update(unsigned long addr, pmdval_t mask,
pmdval_t prot, struct mm_struct *mm)
{
for (addr = perms[i].start;
addr < perms[i].end;
- addr += SECTION_SIZE)
- section_update(addr, perms[i].mask,
- set ? perms[i].prot : perms[i].clear, mm);
+ addr += SECTION_SIZE) {
+ pmd_t *pmd;
+
+ pmd = pmd_offset(pud_offset(pgd_offset(mm, addr),
+ addr), addr);
+ if (pmd_bad(*pmd))
+ section_update(addr, perms[i].mask,
+ set ? perms[i].prot : perms[i].clear,
+ mm);
+ else
+ pte_update(addr, perms[i].ptemask,
+ set ? perms[i].pteprot : perms[i].pteclear,
+ mm);
+ }
}
-
}
static void update_sections_early(struct section_perm perms[], int n)