4 * Copyright (C) 1993 Linus Torvalds
5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
9 #include <linux/config.h>
10 #include <linux/slab.h>
11 #include <linux/vmalloc.h>
12 #include <linux/spinlock.h>
13 #include <linux/highmem.h>
14 #include <linux/smp_lock.h>
16 #include <asm/uaccess.h>
17 #include <asm/pgalloc.h>
19 rwlock_t vmlist_lock = RW_LOCK_UNLOCKED;
20 struct vm_struct * vmlist;
22 static inline void free_area_pte(pmd_t * pmd, unsigned long address, unsigned long size)
34 pte = pte_offset(pmd, address);
41 page = ptep_get_and_clear(pte);
46 if (pte_present(page)) {
47 struct page *ptpage = pte_page(page);
48 if (VALID_PAGE(ptpage) && (!PageReserved(ptpage)))
52 printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
53 } while (address < end);
56 static inline void free_area_pmd(pgd_t * dir, unsigned long address, unsigned long size)
68 pmd = pmd_offset(dir, address);
69 address &= ~PGDIR_MASK;
74 free_area_pte(pmd, address, end - address);
75 address = (address + PMD_SIZE) & PMD_MASK;
77 } while (address < end);
80 void vmfree_area_pages(unsigned long address, unsigned long size)
83 unsigned long end = address + size;
85 dir = pgd_offset_k(address);
88 free_area_pmd(dir, address, end - address);
89 address = (address + PGDIR_SIZE) & PGDIR_MASK;
91 } while (address && (address < end));
95 static inline int alloc_area_pte (pte_t * pte, unsigned long address,
96 unsigned long size, int gfp_mask,
97 pgprot_t prot, struct page ***pages)
101 address &= ~PMD_MASK;
102 end = address + size;
109 spin_unlock(&init_mm.page_table_lock);
110 page = alloc_page(gfp_mask);
111 spin_lock(&init_mm.page_table_lock);
116 /* Add a reference to the page so we can free later */
118 atomic_inc(&page->count);
122 printk(KERN_ERR "alloc_area_pte: page already exists\n");
125 set_pte(pte, mk_pte(page, prot));
126 address += PAGE_SIZE;
128 } while (address < end);
132 static inline int alloc_area_pmd(pmd_t * pmd, unsigned long address,
133 unsigned long size, int gfp_mask,
134 pgprot_t prot, struct page ***pages)
138 address &= ~PGDIR_MASK;
139 end = address + size;
140 if (end > PGDIR_SIZE)
143 pte_t * pte = pte_alloc(&init_mm, pmd, address);
146 if (alloc_area_pte(pte, address, end - address,
147 gfp_mask, prot, pages))
149 address = (address + PMD_SIZE) & PMD_MASK;
151 } while (address < end);
155 static inline int __vmalloc_area_pages (unsigned long address,
159 struct page ***pages)
162 unsigned long start = address;
163 unsigned long end = address + size;
165 dir = pgd_offset_k(address);
166 spin_lock(&init_mm.page_table_lock);
170 pmd = pmd_alloc(&init_mm, dir, address);
174 if (alloc_area_pmd(pmd, address, end - address, gfp_mask, prot, pages))
175 goto err; // The kernel NEVER reclaims pmds, so no need to undo pmd_alloc() here
177 address = (address + PGDIR_SIZE) & PGDIR_MASK;
179 } while (address && (address < end));
180 spin_unlock(&init_mm.page_table_lock);
184 spin_unlock(&init_mm.page_table_lock);
187 vmfree_area_pages(start, address - start);
191 int vmalloc_area_pages(unsigned long address, unsigned long size,
192 int gfp_mask, pgprot_t prot)
194 return __vmalloc_area_pages(address, size, gfp_mask, prot, NULL);
197 struct vm_struct * get_vm_area(unsigned long size, unsigned long flags)
199 unsigned long addr, next;
200 struct vm_struct **p, *tmp, *area;
202 area = (struct vm_struct *) kmalloc(sizeof(*area), GFP_KERNEL);
212 addr = VMALLOC_START;
213 write_lock(&vmlist_lock);
214 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) {
215 if ((size + addr) < addr)
217 if (size + addr <= (unsigned long) tmp->addr)
219 next = tmp->size + (unsigned long) tmp->addr;
222 if (addr > VMALLOC_END-size)
226 area->addr = (void *)addr;
230 write_unlock(&vmlist_lock);
234 write_unlock(&vmlist_lock);
239 void __vfree(void * addr, int free_area_pages)
241 struct vm_struct **p, *tmp;
245 if ((PAGE_SIZE-1) & (unsigned long) addr) {
246 printk(KERN_ERR "Trying to vfree() bad address (%p)\n", addr);
249 write_lock(&vmlist_lock);
250 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
251 if (tmp->addr == addr) {
254 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), tmp->size);
255 write_unlock(&vmlist_lock);
260 write_unlock(&vmlist_lock);
261 printk(KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", addr);
264 void vfree(void * addr)
269 void * __vmalloc (unsigned long size, int gfp_mask, pgprot_t prot)
272 struct vm_struct *area;
274 size = PAGE_ALIGN(size);
275 if (!size || (size >> PAGE_SHIFT) > num_physpages)
277 area = get_vm_area(size, VM_ALLOC);
281 if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, gfp_mask,
289 void * vmap(struct page **pages, int count,
290 unsigned long flags, pgprot_t prot)
293 struct vm_struct *area;
294 unsigned long size = count << PAGE_SHIFT;
296 if (count <= 0 || count > max_mapnr)
298 area = get_vm_area(size, flags);
303 if (__vmalloc_area_pages(VMALLOC_VMADDR(addr), size, 0,
311 long vread(char *buf, char *addr, unsigned long count)
313 struct vm_struct *tmp;
314 char *vaddr, *buf_start = buf;
317 /* Don't allow overflow */
318 if ((unsigned long) addr + count < count)
319 count = -(unsigned long) addr;
321 read_lock(&vmlist_lock);
322 for (tmp = vmlist; tmp; tmp = tmp->next) {
323 vaddr = (char *) tmp->addr;
324 if (addr >= vaddr + tmp->size - PAGE_SIZE)
326 while (addr < vaddr) {
334 n = vaddr + tmp->size - PAGE_SIZE - addr;
345 read_unlock(&vmlist_lock);
346 return buf - buf_start;
349 long vwrite(char *buf, char *addr, unsigned long count)
351 struct vm_struct *tmp;
352 char *vaddr, *buf_start = buf;
355 /* Don't allow overflow */
356 if ((unsigned long) addr + count < count)
357 count = -(unsigned long) addr;
359 read_lock(&vmlist_lock);
360 for (tmp = vmlist; tmp; tmp = tmp->next) {
361 vaddr = (char *) tmp->addr;
362 if (addr >= vaddr + tmp->size - PAGE_SIZE)
364 while (addr < vaddr) {
371 n = vaddr + tmp->size - PAGE_SIZE - addr;
382 read_unlock(&vmlist_lock);
383 return buf - buf_start;