2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_device.h>
38 #include <linux/vmalloc.h>
40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
42 return chunk->end_addr - chunk->start_addr + 1;
45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
47 unsigned long val, nval;
52 if (val & mask_to_set)
55 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
62 unsigned long val, nval;
67 if ((val & mask_to_clear) != mask_to_clear)
70 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
76 * bitmap_set_ll - set the specified number of bits at the specified position
77 * @map: pointer to a bitmap
78 * @start: a bit position in @map
79 * @nr: number of bits to set
81 * Set @nr bits start from @start in @map lock-lessly. Several users
82 * can set/clear the same bitmap simultaneously without lock. If two
83 * users set the same bit, one user will return remain bits, otherwise
86 static int bitmap_set_ll(unsigned long *map, int start, int nr)
88 unsigned long *p = map + BIT_WORD(start);
89 const int size = start + nr;
90 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
93 while (nr - bits_to_set >= 0) {
94 if (set_bits_ll(p, mask_to_set))
97 bits_to_set = BITS_PER_LONG;
102 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 if (set_bits_ll(p, mask_to_set))
111 * bitmap_clear_ll - clear the specified number of bits at the specified position
112 * @map: pointer to a bitmap
113 * @start: a bit position in @map
114 * @nr: number of bits to set
116 * Clear @nr bits start from @start in @map lock-lessly. Several users
117 * can set/clear the same bitmap simultaneously without lock. If two
118 * users clear the same bit, one user will return remain bits,
119 * otherwise return 0.
121 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
123 unsigned long *p = map + BIT_WORD(start);
124 const int size = start + nr;
125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
128 while (nr - bits_to_clear >= 0) {
129 if (clear_bits_ll(p, mask_to_clear))
132 bits_to_clear = BITS_PER_LONG;
133 mask_to_clear = ~0UL;
137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 if (clear_bits_ll(p, mask_to_clear))
146 * gen_pool_create - create a new special memory pool
147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 * @nid: node id of the node the pool structure should be allocated on, or -1
150 * Create a new special memory pool that can be used to manage special purpose
151 * memory not managed by the regular kmalloc/kfree interface.
153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
155 struct gen_pool *pool;
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
159 spin_lock_init(&pool->lock);
160 INIT_LIST_HEAD(&pool->chunks);
161 pool->min_alloc_order = min_alloc_order;
162 pool->algo = gen_pool_first_fit;
168 EXPORT_SYMBOL(gen_pool_create);
171 * gen_pool_add_virt - add a new chunk of special memory to the pool
172 * @pool: pool to add new memory chunk to
173 * @virt: virtual starting address of memory chunk to add to pool
174 * @phys: physical starting address of memory chunk to add to pool
175 * @size: size in bytes of the memory chunk to add to pool
176 * @nid: node id of the node the chunk structure and bitmap should be
177 * allocated on, or -1
179 * Add a new chunk of special memory to the specified pool.
181 * Returns 0 on success or a -ve errno on failure.
183 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
184 size_t size, int nid)
186 struct gen_pool_chunk *chunk;
187 int nbits = size >> pool->min_alloc_order;
188 int nbytes = sizeof(struct gen_pool_chunk) +
189 BITS_TO_LONGS(nbits) * sizeof(long);
191 chunk = vzalloc_node(nbytes, nid);
192 if (unlikely(chunk == NULL))
195 chunk->phys_addr = phys;
196 chunk->start_addr = virt;
197 chunk->end_addr = virt + size - 1;
198 atomic_long_set(&chunk->avail, size);
200 spin_lock(&pool->lock);
201 list_add_rcu(&chunk->next_chunk, &pool->chunks);
202 spin_unlock(&pool->lock);
206 EXPORT_SYMBOL(gen_pool_add_virt);
209 * gen_pool_virt_to_phys - return the physical address of memory
210 * @pool: pool to allocate from
211 * @addr: starting address of memory
213 * Returns the physical address on success, or -1 on error.
215 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
217 struct gen_pool_chunk *chunk;
218 phys_addr_t paddr = -1;
221 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
222 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
223 paddr = chunk->phys_addr + (addr - chunk->start_addr);
231 EXPORT_SYMBOL(gen_pool_virt_to_phys);
234 * gen_pool_destroy - destroy a special memory pool
235 * @pool: pool to destroy
237 * Destroy the specified special memory pool. Verifies that there are no
238 * outstanding allocations.
240 void gen_pool_destroy(struct gen_pool *pool)
242 struct list_head *_chunk, *_next_chunk;
243 struct gen_pool_chunk *chunk;
244 int order = pool->min_alloc_order;
247 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
248 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
249 list_del(&chunk->next_chunk);
251 end_bit = chunk_size(chunk) >> order;
252 bit = find_next_bit(chunk->bits, end_bit, 0);
253 BUG_ON(bit < end_bit);
257 kfree_const(pool->name);
260 EXPORT_SYMBOL(gen_pool_destroy);
263 * gen_pool_alloc - allocate special memory from the pool
264 * @pool: pool to allocate from
265 * @size: number of bytes to allocate from the pool
267 * Allocate the requested number of bytes from the specified pool.
268 * Uses the pool allocation function (with first-fit algorithm by default).
269 * Can not be used in NMI handler on architectures without
270 * NMI-safe cmpxchg implementation.
272 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
274 struct gen_pool_chunk *chunk;
275 unsigned long addr = 0;
276 int order = pool->min_alloc_order;
277 int nbits, start_bit, end_bit, remain;
279 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
286 nbits = (size + (1UL << order) - 1) >> order;
288 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
289 if (size > atomic_long_read(&chunk->avail))
293 end_bit = chunk_size(chunk) >> order;
295 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
297 if (start_bit >= end_bit)
299 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
301 remain = bitmap_clear_ll(chunk->bits, start_bit,
307 addr = chunk->start_addr + ((unsigned long)start_bit << order);
308 size = nbits << order;
309 atomic_long_sub(size, &chunk->avail);
315 EXPORT_SYMBOL(gen_pool_alloc);
318 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
319 * @pool: pool to allocate from
320 * @size: number of bytes to allocate from the pool
321 * @dma: dma-view physical address return value. Use NULL if unneeded.
323 * Allocate the requested number of bytes from the specified pool.
324 * Uses the pool allocation function (with first-fit algorithm by default).
325 * Can not be used in NMI handler on architectures without
326 * NMI-safe cmpxchg implementation.
328 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
335 vaddr = gen_pool_alloc(pool, size);
340 *dma = gen_pool_virt_to_phys(pool, vaddr);
342 return (void *)vaddr;
344 EXPORT_SYMBOL(gen_pool_dma_alloc);
347 * gen_pool_free - free allocated special memory back to the pool
348 * @pool: pool to free to
349 * @addr: starting address of memory to free back to pool
350 * @size: size in bytes of memory to free
352 * Free previously allocated special memory back to the specified
353 * pool. Can not be used in NMI handler on architectures without
354 * NMI-safe cmpxchg implementation.
356 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
358 struct gen_pool_chunk *chunk;
359 int order = pool->min_alloc_order;
360 int start_bit, nbits, remain;
362 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
366 nbits = (size + (1UL << order) - 1) >> order;
368 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
369 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
370 BUG_ON(addr + size - 1 > chunk->end_addr);
371 start_bit = (addr - chunk->start_addr) >> order;
372 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
374 size = nbits << order;
375 atomic_long_add(size, &chunk->avail);
383 EXPORT_SYMBOL(gen_pool_free);
386 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
387 * @pool: the generic memory pool
388 * @func: func to call
389 * @data: additional data used by @func
391 * Call @func for every chunk of generic memory pool. The @func is
392 * called with rcu_read_lock held.
394 void gen_pool_for_each_chunk(struct gen_pool *pool,
395 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
398 struct gen_pool_chunk *chunk;
401 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
402 func(pool, chunk, data);
405 EXPORT_SYMBOL(gen_pool_for_each_chunk);
408 * addr_in_gen_pool - checks if an address falls within the range of a pool
409 * @pool: the generic memory pool
410 * @start: start address
411 * @size: size of the region
413 * Check if the range of addresses falls within the specified pool. Returns
414 * true if the entire range is contained in the pool and false otherwise.
416 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
420 unsigned long end = start + size - 1;
421 struct gen_pool_chunk *chunk;
424 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
425 if (start >= chunk->start_addr && start <= chunk->end_addr) {
426 if (end <= chunk->end_addr) {
437 * gen_pool_avail - get available free space of the pool
438 * @pool: pool to get available free space
440 * Return available free space of the specified pool.
442 size_t gen_pool_avail(struct gen_pool *pool)
444 struct gen_pool_chunk *chunk;
448 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
449 avail += atomic_long_read(&chunk->avail);
453 EXPORT_SYMBOL_GPL(gen_pool_avail);
456 * gen_pool_size - get size in bytes of memory managed by the pool
457 * @pool: pool to get size
459 * Return size in bytes of memory managed by the pool.
461 size_t gen_pool_size(struct gen_pool *pool)
463 struct gen_pool_chunk *chunk;
467 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
468 size += chunk_size(chunk);
472 EXPORT_SYMBOL_GPL(gen_pool_size);
475 * gen_pool_set_algo - set the allocation algorithm
476 * @pool: pool to change allocation algorithm
477 * @algo: custom algorithm function
478 * @data: additional data used by @algo
480 * Call @algo for each memory allocation in the pool.
481 * If @algo is NULL use gen_pool_first_fit as default
482 * memory allocation function.
484 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
490 pool->algo = gen_pool_first_fit;
496 EXPORT_SYMBOL(gen_pool_set_algo);
499 * gen_pool_first_fit - find the first available region
500 * of memory matching the size requirement (no alignment constraint)
501 * @map: The address to base the search on
502 * @size: The bitmap size in bits
503 * @start: The bitnumber to start searching at
504 * @nr: The number of zeroed bits we're looking for
505 * @data: additional data - unused
507 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
508 unsigned long start, unsigned int nr, void *data)
510 return bitmap_find_next_zero_area(map, size, start, nr, 0);
512 EXPORT_SYMBOL(gen_pool_first_fit);
515 * gen_pool_first_fit_order_align - find the first available region
516 * of memory matching the size requirement. The region will be aligned
517 * to the order of the size specified.
518 * @map: The address to base the search on
519 * @size: The bitmap size in bits
520 * @start: The bitnumber to start searching at
521 * @nr: The number of zeroed bits we're looking for
522 * @data: additional data - unused
524 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
525 unsigned long size, unsigned long start,
526 unsigned int nr, void *data)
528 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
530 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
532 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
535 * gen_pool_best_fit - find the best fitting region of memory
536 * macthing the size requirement (no alignment constraint)
537 * @map: The address to base the search on
538 * @size: The bitmap size in bits
539 * @start: The bitnumber to start searching at
540 * @nr: The number of zeroed bits we're looking for
541 * @data: additional data - unused
543 * Iterate over the bitmap to find the smallest free region
544 * which we can allocate the memory.
546 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
547 unsigned long start, unsigned int nr, void *data)
549 unsigned long start_bit = size;
550 unsigned long len = size + 1;
553 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
555 while (index < size) {
556 int next_bit = find_next_bit(map, size, index + nr);
557 if ((next_bit - index) < len) {
558 len = next_bit - index;
563 index = bitmap_find_next_zero_area(map, size,
564 next_bit + 1, nr, 0);
569 EXPORT_SYMBOL(gen_pool_best_fit);
571 static void devm_gen_pool_release(struct device *dev, void *res)
573 gen_pool_destroy(*(struct gen_pool **)res);
576 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
578 struct gen_pool **p = res;
580 /* NULL data matches only a pool without an assigned name */
581 if (!data && !(*p)->name)
584 if (!data || !(*p)->name)
587 return !strcmp((*p)->name, data);
591 * gen_pool_get - Obtain the gen_pool (if any) for a device
592 * @dev: device to retrieve the gen_pool from
593 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
595 * Returns the gen_pool for the device if one is present, or NULL.
597 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
601 p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
607 EXPORT_SYMBOL_GPL(gen_pool_get);
610 * devm_gen_pool_create - managed gen_pool_create
611 * @dev: device that provides the gen_pool
612 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
613 * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
614 * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
616 * Create a new special memory pool that can be used to manage special purpose
617 * memory not managed by the regular kmalloc/kfree interface. The pool will be
618 * automatically destroyed by the device management code.
620 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
621 int nid, const char *name)
623 struct gen_pool **ptr, *pool;
624 const char *pool_name = NULL;
626 /* Check that genpool to be created is uniquely addressed on device */
627 if (gen_pool_get(dev, name))
628 return ERR_PTR(-EINVAL);
631 pool_name = kstrdup_const(name, GFP_KERNEL);
633 return ERR_PTR(-ENOMEM);
636 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
640 pool = gen_pool_create(min_alloc_order, nid);
645 pool->name = pool_name;
646 devres_add(dev, ptr);
653 kfree_const(pool_name);
655 return ERR_PTR(-ENOMEM);
657 EXPORT_SYMBOL(devm_gen_pool_create);
661 * of_gen_pool_get - find a pool by phandle property
663 * @propname: property name containing phandle(s)
664 * @index: index into the phandle array
666 * Returns the pool that contains the chunk starting at the physical
667 * address of the device tree node pointed at by the phandle property,
668 * or NULL if not found.
670 struct gen_pool *of_gen_pool_get(struct device_node *np,
671 const char *propname, int index)
673 struct platform_device *pdev;
674 struct device_node *np_pool, *parent;
675 const char *name = NULL;
676 struct gen_pool *pool = NULL;
678 np_pool = of_parse_phandle(np, propname, index);
682 pdev = of_find_device_by_node(np_pool);
684 /* Check if named gen_pool is created by parent node device */
685 parent = of_get_parent(np_pool);
686 pdev = of_find_device_by_node(parent);
689 of_property_read_string(np_pool, "label", &name);
691 name = np_pool->name;
694 pool = gen_pool_get(&pdev->dev, name);
695 of_node_put(np_pool);
699 EXPORT_SYMBOL_GPL(of_gen_pool_get);
700 #endif /* CONFIG_OF */