1 // SPDX-License-Identifier: GPL-2.0
3 * drivers/staging/android/ion/ion_system_heap.c
5 * Copyright (C) 2011 Google, Inc.
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/highmem.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
18 #define NUM_ORDERS ARRAY_SIZE(orders)
20 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
21 __GFP_NORETRY) & ~__GFP_RECLAIM;
22 static gfp_t low_order_gfp_flags = GFP_HIGHUSER | __GFP_ZERO;
23 static const unsigned int orders[] = {8, 4, 0};
25 static int order_to_index(unsigned int order)
29 for (i = 0; i < NUM_ORDERS; i++)
30 if (order == orders[i])
36 static inline unsigned int order_to_size(int order)
38 return PAGE_SIZE << order;
41 struct ion_system_heap {
43 struct ion_page_pool *pools[NUM_ORDERS];
46 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
47 struct ion_buffer *buffer,
50 struct ion_page_pool *pool = heap->pools[order_to_index(order)];
52 return ion_page_pool_alloc(pool);
55 static void free_buffer_page(struct ion_system_heap *heap,
56 struct ion_buffer *buffer, struct page *page)
58 struct ion_page_pool *pool;
59 unsigned int order = compound_order(page);
62 if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
63 __free_pages(page, order);
67 pool = heap->pools[order_to_index(order)];
69 ion_page_pool_free(pool, page);
72 static struct page *alloc_largest_available(struct ion_system_heap *heap,
73 struct ion_buffer *buffer,
75 unsigned int max_order)
80 for (i = 0; i < NUM_ORDERS; i++) {
81 if (size < order_to_size(orders[i]))
83 if (max_order < orders[i])
86 page = alloc_buffer_page(heap, buffer, orders[i]);
96 static int ion_system_heap_allocate(struct ion_heap *heap,
97 struct ion_buffer *buffer,
101 struct ion_system_heap *sys_heap = container_of(heap,
102 struct ion_system_heap,
104 struct sg_table *table;
105 struct scatterlist *sg;
106 struct list_head pages;
107 struct page *page, *tmp_page;
109 unsigned long size_remaining = PAGE_ALIGN(size);
110 unsigned int max_order = orders[0];
112 if (size / PAGE_SIZE > totalram_pages() / 2)
115 INIT_LIST_HEAD(&pages);
116 while (size_remaining > 0) {
117 page = alloc_largest_available(sys_heap, buffer, size_remaining,
121 list_add_tail(&page->lru, &pages);
122 size_remaining -= PAGE_SIZE << compound_order(page);
123 max_order = compound_order(page);
126 table = kmalloc(sizeof(*table), GFP_KERNEL);
130 if (sg_alloc_table(table, i, GFP_KERNEL))
134 list_for_each_entry_safe(page, tmp_page, &pages, lru) {
135 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
137 list_del(&page->lru);
140 buffer->sg_table = table;
146 list_for_each_entry_safe(page, tmp_page, &pages, lru)
147 free_buffer_page(sys_heap, buffer, page);
151 static void ion_system_heap_free(struct ion_buffer *buffer)
153 struct ion_system_heap *sys_heap = container_of(buffer->heap,
154 struct ion_system_heap,
156 struct sg_table *table = buffer->sg_table;
157 struct scatterlist *sg;
160 /* zero the buffer before goto page pool */
161 if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
162 ion_heap_buffer_zero(buffer);
164 for_each_sg(table->sgl, sg, table->nents, i)
165 free_buffer_page(sys_heap, buffer, sg_page(sg));
166 sg_free_table(table);
170 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
173 struct ion_page_pool *pool;
174 struct ion_system_heap *sys_heap;
179 sys_heap = container_of(heap, struct ion_system_heap, heap);
184 for (i = 0; i < NUM_ORDERS; i++) {
185 pool = sys_heap->pools[i];
188 nr_total += ion_page_pool_shrink(pool,
193 nr_freed = ion_page_pool_shrink(pool,
196 nr_to_scan -= nr_freed;
197 nr_total += nr_freed;
205 static struct ion_heap_ops system_heap_ops = {
206 .allocate = ion_system_heap_allocate,
207 .free = ion_system_heap_free,
208 .map_kernel = ion_heap_map_kernel,
209 .unmap_kernel = ion_heap_unmap_kernel,
210 .map_user = ion_heap_map_user,
211 .shrink = ion_system_heap_shrink,
214 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
218 for (i = 0; i < NUM_ORDERS; i++)
220 ion_page_pool_destroy(pools[i]);
223 static int ion_system_heap_create_pools(struct ion_page_pool **pools)
226 gfp_t gfp_flags = low_order_gfp_flags;
228 for (i = 0; i < NUM_ORDERS; i++) {
229 struct ion_page_pool *pool;
232 gfp_flags = high_order_gfp_flags;
234 pool = ion_page_pool_create(gfp_flags, orders[i]);
236 goto err_create_pool;
242 ion_system_heap_destroy_pools(pools);
246 static struct ion_heap *__ion_system_heap_create(void)
248 struct ion_system_heap *heap;
250 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
252 return ERR_PTR(-ENOMEM);
253 heap->heap.ops = &system_heap_ops;
254 heap->heap.type = ION_HEAP_TYPE_SYSTEM;
255 heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
257 if (ion_system_heap_create_pools(heap->pools))
264 return ERR_PTR(-ENOMEM);
267 static int ion_system_heap_create(void)
269 struct ion_heap *heap;
271 heap = __ion_system_heap_create();
273 return PTR_ERR(heap);
274 heap->name = "ion_system_heap";
276 ion_device_add_heap(heap);
279 device_initcall(ion_system_heap_create);
281 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
282 struct ion_buffer *buffer,
286 int order = get_order(len);
288 struct sg_table *table;
292 page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
296 split_page(page, order);
298 len = PAGE_ALIGN(len);
299 for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
300 __free_page(page + i);
302 table = kmalloc(sizeof(*table), GFP_KERNEL);
308 ret = sg_alloc_table(table, 1, GFP_KERNEL);
312 sg_set_page(table->sgl, page, len, 0);
314 buffer->sg_table = table;
321 for (i = 0; i < len >> PAGE_SHIFT; i++)
322 __free_page(page + i);
327 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
329 struct sg_table *table = buffer->sg_table;
330 struct page *page = sg_page(table->sgl);
331 unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
334 for (i = 0; i < pages; i++)
335 __free_page(page + i);
336 sg_free_table(table);
340 static struct ion_heap_ops kmalloc_ops = {
341 .allocate = ion_system_contig_heap_allocate,
342 .free = ion_system_contig_heap_free,
343 .map_kernel = ion_heap_map_kernel,
344 .unmap_kernel = ion_heap_unmap_kernel,
345 .map_user = ion_heap_map_user,
348 static struct ion_heap *__ion_system_contig_heap_create(void)
350 struct ion_heap *heap;
352 heap = kzalloc(sizeof(*heap), GFP_KERNEL);
354 return ERR_PTR(-ENOMEM);
355 heap->ops = &kmalloc_ops;
356 heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
357 heap->name = "ion_system_contig_heap";
361 static int ion_system_contig_heap_create(void)
363 struct ion_heap *heap;
365 heap = __ion_system_contig_heap_create();
367 return PTR_ERR(heap);
369 ion_device_add_heap(heap);
372 device_initcall(ion_system_contig_heap_create);