2 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
6 * (C) Copyright 1995 1996 Linus Torvalds
9 #include <linux/bootmem.h>
10 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mmiotrace.h>
17 #include <asm/cacheflush.h>
19 #include <asm/fixmap.h>
20 #include <asm/pgtable.h>
21 #include <asm/tlbflush.h>
22 #include <asm/pgalloc.h>
27 unsigned long __phys_addr(unsigned long x)
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
33 EXPORT_SYMBOL(__phys_addr);
35 static inline int phys_addr_valid(unsigned long addr)
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
42 static inline int phys_addr_valid(unsigned long addr)
49 int page_is_ram(unsigned long pagenr)
51 resource_size_t addr, end;
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
70 for (i = 0; i < e820.nr_map; i++) {
74 if (e820.map[i].type != E820_RAM)
76 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
80 if ((pagenr >= addr) && (pagenr < end))
86 int pagerange_is_ram(unsigned long start, unsigned long end)
88 int ram_page = 0, not_rampage = 0;
89 unsigned long page_nr;
91 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
93 if (page_is_ram(page_nr))
98 if (ram_page == not_rampage)
106 * Fix up the linear direct mapping of the kernel to avoid cache attribute
109 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
110 unsigned long prot_val)
112 unsigned long nrpages = size >> PAGE_SHIFT;
118 err = _set_memory_uc(vaddr, nrpages);
121 err = _set_memory_wc(vaddr, nrpages);
124 err = _set_memory_wb(vaddr, nrpages);
132 * Remap an arbitrary physical address space into the kernel virtual
133 * address space. Needed when the kernel wants to access high addresses
136 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
137 * have to convert them into an offset in a page-aligned mapping, but the
138 * caller shouldn't need to know that small detail.
140 static void __iomem *__ioremap_caller(resource_size_t phys_addr,
141 unsigned long size, unsigned long prot_val, void *caller)
143 unsigned long pfn, offset, vaddr;
144 resource_size_t last_addr;
145 const resource_size_t unaligned_phys_addr = phys_addr;
146 const unsigned long unaligned_size = size;
147 struct vm_struct *area;
148 unsigned long new_prot_val;
151 void __iomem *ret_addr;
153 /* Don't allow wraparound or zero size */
154 last_addr = phys_addr + size - 1;
155 if (!size || last_addr < phys_addr)
158 if (!phys_addr_valid(phys_addr)) {
159 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
160 (unsigned long long)phys_addr);
166 * Don't remap the low PCI/ISA area, it's always mapped..
168 if (is_ISA_range(phys_addr, last_addr))
169 return (__force void __iomem *)phys_to_virt(phys_addr);
172 * Don't allow anybody to remap normal RAM that we're using..
174 for (pfn = phys_addr >> PAGE_SHIFT;
175 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
178 int is_ram = page_is_ram(pfn);
180 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
182 WARN_ON_ONCE(is_ram);
186 * Mappings have to be page-aligned
188 offset = phys_addr & ~PAGE_MASK;
189 phys_addr &= PAGE_MASK;
190 size = PAGE_ALIGN(last_addr+1) - phys_addr;
192 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
193 prot_val, &new_prot_val);
195 pr_debug("Warning: reserve_memtype returned %d\n", retval);
199 if (prot_val != new_prot_val) {
201 * Do not fallback to certain memory types with certain
203 * - request is uc-, return cannot be write-back
204 * - request is uc-, return cannot be write-combine
205 * - request is write-combine, return cannot be write-back
207 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
208 (new_prot_val == _PAGE_CACHE_WB ||
209 new_prot_val == _PAGE_CACHE_WC)) ||
210 (prot_val == _PAGE_CACHE_WC &&
211 new_prot_val == _PAGE_CACHE_WB)) {
213 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
214 (unsigned long long)phys_addr,
215 (unsigned long long)(phys_addr + size),
216 prot_val, new_prot_val);
217 free_memtype(phys_addr, phys_addr + size);
220 prot_val = new_prot_val;
226 prot = PAGE_KERNEL_NOCACHE;
228 case _PAGE_CACHE_UC_MINUS:
229 prot = PAGE_KERNEL_UC_MINUS;
232 prot = PAGE_KERNEL_WC;
242 area = get_vm_area_caller(size, VM_IOREMAP, caller);
245 area->phys_addr = phys_addr;
246 vaddr = (unsigned long) area->addr;
247 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
248 free_memtype(phys_addr, phys_addr + size);
253 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
254 free_memtype(phys_addr, phys_addr + size);
259 ret_addr = (void __iomem *) (vaddr + offset);
260 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
266 * ioremap_nocache - map bus memory into CPU space
267 * @offset: bus address of the memory
268 * @size: size of the resource to map
270 * ioremap_nocache performs a platform specific sequence of operations to
271 * make bus memory CPU accessible via the readb/readw/readl/writeb/
272 * writew/writel functions and the other mmio helpers. The returned
273 * address is not guaranteed to be usable directly as a virtual
276 * This version of ioremap ensures that the memory is marked uncachable
277 * on the CPU as well as honouring existing caching rules from things like
278 * the PCI bus. Note that there are other caches and buffers on many
279 * busses. In particular driver authors should read up on PCI writes
281 * It's useful if some control registers are in such an area and
282 * write combining or read caching is not desirable:
284 * Must be freed with iounmap.
286 void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
289 * Ideally, this should be:
290 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
292 * Till we fix all X drivers to use ioremap_wc(), we will use
295 unsigned long val = _PAGE_CACHE_UC_MINUS;
297 return __ioremap_caller(phys_addr, size, val,
298 __builtin_return_address(0));
300 EXPORT_SYMBOL(ioremap_nocache);
303 * ioremap_wc - map memory into CPU space write combined
304 * @offset: bus address of the memory
305 * @size: size of the resource to map
307 * This version of ioremap ensures that the memory is marked write combining.
308 * Write combining allows faster writes to some hardware devices.
310 * Must be freed with iounmap.
312 void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
316 __builtin_return_address(0));
318 return ioremap_nocache(phys_addr, size);
320 EXPORT_SYMBOL(ioremap_wc);
322 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
324 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
325 __builtin_return_address(0));
327 EXPORT_SYMBOL(ioremap_cache);
329 static void __iomem *ioremap_default(resource_size_t phys_addr,
337 * - WB for WB-able memory and no other conflicting mappings
338 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
339 * - Inherit from confliting mappings otherwise
341 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
345 ret = (void *) __ioremap_caller(phys_addr, size, flags,
346 __builtin_return_address(0));
348 free_memtype(phys_addr, phys_addr + size);
349 return (void __iomem *)ret;
352 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
353 unsigned long prot_val)
355 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
356 __builtin_return_address(0));
358 EXPORT_SYMBOL(ioremap_prot);
361 * iounmap - Free a IO remapping
362 * @addr: virtual address from ioremap_*
364 * Caller must ensure there is only one unmapping for the same pointer.
366 void iounmap(volatile void __iomem *addr)
368 struct vm_struct *p, *o;
370 if ((void __force *)addr <= high_memory)
374 * __ioremap special-cases the PCI/ISA range by not instantiating a
375 * vm_area and by simply returning an address into the kernel mapping
376 * of ISA space. So handle that here.
378 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
379 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
382 addr = (volatile void __iomem *)
383 (PAGE_MASK & (unsigned long __force)addr);
385 mmiotrace_iounmap(addr);
387 /* Use the vm area unlocked, assuming the caller
388 ensures there isn't another iounmap for the same address
389 in parallel. Reuse of the virtual address is prevented by
390 leaving it in the global lists until we're done with it.
391 cpa takes care of the direct mappings. */
392 read_lock(&vmlist_lock);
393 for (p = vmlist; p; p = p->next) {
394 if (p->addr == (void __force *)addr)
397 read_unlock(&vmlist_lock);
400 printk(KERN_ERR "iounmap: bad address %p\n", addr);
405 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
407 /* Finally remove it */
408 o = remove_vm_area((void __force *)addr);
409 BUG_ON(p != o || o == NULL);
412 EXPORT_SYMBOL(iounmap);
415 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
418 void *xlate_dev_mem_ptr(unsigned long phys)
421 unsigned long start = phys & PAGE_MASK;
423 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
424 if (page_is_ram(start >> PAGE_SHIFT))
427 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
429 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
434 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
436 if (page_is_ram(phys >> PAGE_SHIFT))
439 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
443 static int __initdata early_ioremap_debug;
445 static int __init early_ioremap_debug_setup(char *str)
447 early_ioremap_debug = 1;
451 early_param("early_ioremap_debug", early_ioremap_debug_setup);
453 static __initdata int after_paging_init;
454 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
456 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
458 /* Don't assume we're using swapper_pg_dir at this point */
459 pgd_t *base = __va(read_cr3());
460 pgd_t *pgd = &base[pgd_index(addr)];
461 pud_t *pud = pud_offset(pgd, addr);
462 pmd_t *pmd = pmd_offset(pud, addr);
467 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
469 return &bm_pte[pte_index(addr)];
472 void __init early_ioremap_init(void)
476 if (early_ioremap_debug)
477 printk(KERN_INFO "early_ioremap_init()\n");
479 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
480 memset(bm_pte, 0, sizeof(bm_pte));
481 pmd_populate_kernel(&init_mm, pmd, bm_pte);
484 * The boot-ioremap range spans multiple pmds, for which
485 * we are not prepared:
487 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
489 printk(KERN_WARNING "pmd %p != %p\n",
490 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
491 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
492 fix_to_virt(FIX_BTMAP_BEGIN));
493 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
494 fix_to_virt(FIX_BTMAP_END));
496 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
497 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
502 void __init early_ioremap_clear(void)
506 if (early_ioremap_debug)
507 printk(KERN_INFO "early_ioremap_clear()\n");
509 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
511 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
515 void __init early_ioremap_reset(void)
517 enum fixed_addresses idx;
518 unsigned long addr, phys;
521 after_paging_init = 1;
522 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
523 addr = fix_to_virt(idx);
524 pte = early_ioremap_pte(addr);
525 if (pte_present(*pte)) {
526 phys = pte_val(*pte) & PAGE_MASK;
527 set_fixmap(idx, phys);
532 static void __init __early_set_fixmap(enum fixed_addresses idx,
533 unsigned long phys, pgprot_t flags)
535 unsigned long addr = __fix_to_virt(idx);
538 if (idx >= __end_of_fixed_addresses) {
542 pte = early_ioremap_pte(addr);
544 if (pgprot_val(flags))
545 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
547 pte_clear(&init_mm, addr, pte);
548 __flush_tlb_one(addr);
551 static inline void __init early_set_fixmap(enum fixed_addresses idx,
554 if (after_paging_init)
555 set_fixmap(idx, phys);
557 __early_set_fixmap(idx, phys, PAGE_KERNEL);
560 static inline void __init early_clear_fixmap(enum fixed_addresses idx)
562 if (after_paging_init)
565 __early_set_fixmap(idx, 0, __pgprot(0));
569 static int __initdata early_ioremap_nested;
571 static int __init check_early_ioremap_leak(void)
573 if (!early_ioremap_nested)
576 "Debug warning: early ioremap leak of %d areas detected.\n",
577 early_ioremap_nested);
579 "please boot with early_ioremap_debug and report the dmesg.\n");
583 late_initcall(check_early_ioremap_leak);
585 void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
587 unsigned long offset, last_addr;
588 unsigned int nrpages, nesting;
589 enum fixed_addresses idx0, idx;
591 WARN_ON(system_state != SYSTEM_BOOTING);
593 nesting = early_ioremap_nested;
594 if (early_ioremap_debug) {
595 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
596 phys_addr, size, nesting);
600 /* Don't allow wraparound or zero size */
601 last_addr = phys_addr + size - 1;
602 if (!size || last_addr < phys_addr) {
607 if (nesting >= FIX_BTMAPS_NESTING) {
611 early_ioremap_nested++;
613 * Mappings have to be page-aligned
615 offset = phys_addr & ~PAGE_MASK;
616 phys_addr &= PAGE_MASK;
617 size = PAGE_ALIGN(last_addr) - phys_addr;
620 * Mappings have to fit in the FIX_BTMAP area.
622 nrpages = size >> PAGE_SHIFT;
623 if (nrpages > NR_FIX_BTMAPS) {
631 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
633 while (nrpages > 0) {
634 early_set_fixmap(idx, phys_addr);
635 phys_addr += PAGE_SIZE;
639 if (early_ioremap_debug)
640 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
642 return (void *) (offset + fix_to_virt(idx0));
645 void __init early_iounmap(void *addr, unsigned long size)
647 unsigned long virt_addr;
648 unsigned long offset;
649 unsigned int nrpages;
650 enum fixed_addresses idx;
653 nesting = --early_ioremap_nested;
654 if (WARN_ON(nesting < 0))
657 if (early_ioremap_debug) {
658 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
663 virt_addr = (unsigned long)addr;
664 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
668 offset = virt_addr & ~PAGE_MASK;
669 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
671 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
672 while (nrpages > 0) {
673 early_clear_fixmap(idx);
679 void __this_fixmap_does_not_exist(void)