OSDN Git Service

Merge tag 'staging-4.21-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
[tomoyo/tomoyo-test1.git] / drivers / staging / android / ion / ion_system_heap.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * drivers/staging/android/ion/ion_system_heap.c
4  *
5  * Copyright (C) 2011 Google, Inc.
6  */
7
8 #include <asm/page.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/highmem.h>
12 #include <linux/mm.h>
13 #include <linux/scatterlist.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include "ion.h"
17
18 #define NUM_ORDERS ARRAY_SIZE(orders)
19
20 static gfp_t high_order_gfp_flags = (GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN |
21                                      __GFP_NORETRY) & ~__GFP_RECLAIM;
22 static gfp_t low_order_gfp_flags  = GFP_HIGHUSER | __GFP_ZERO;
23 static const unsigned int orders[] = {8, 4, 0};
24
25 static int order_to_index(unsigned int order)
26 {
27         int i;
28
29         for (i = 0; i < NUM_ORDERS; i++)
30                 if (order == orders[i])
31                         return i;
32         BUG();
33         return -1;
34 }
35
36 static inline unsigned int order_to_size(int order)
37 {
38         return PAGE_SIZE << order;
39 }
40
41 struct ion_system_heap {
42         struct ion_heap heap;
43         struct ion_page_pool *pools[NUM_ORDERS];
44 };
45
46 static struct page *alloc_buffer_page(struct ion_system_heap *heap,
47                                       struct ion_buffer *buffer,
48                                       unsigned long order)
49 {
50         struct ion_page_pool *pool = heap->pools[order_to_index(order)];
51
52         return ion_page_pool_alloc(pool);
53 }
54
55 static void free_buffer_page(struct ion_system_heap *heap,
56                              struct ion_buffer *buffer, struct page *page)
57 {
58         struct ion_page_pool *pool;
59         unsigned int order = compound_order(page);
60
61         /* go to system */
62         if (buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE) {
63                 __free_pages(page, order);
64                 return;
65         }
66
67         pool = heap->pools[order_to_index(order)];
68
69         ion_page_pool_free(pool, page);
70 }
71
72 static struct page *alloc_largest_available(struct ion_system_heap *heap,
73                                             struct ion_buffer *buffer,
74                                             unsigned long size,
75                                             unsigned int max_order)
76 {
77         struct page *page;
78         int i;
79
80         for (i = 0; i < NUM_ORDERS; i++) {
81                 if (size < order_to_size(orders[i]))
82                         continue;
83                 if (max_order < orders[i])
84                         continue;
85
86                 page = alloc_buffer_page(heap, buffer, orders[i]);
87                 if (!page)
88                         continue;
89
90                 return page;
91         }
92
93         return NULL;
94 }
95
96 static int ion_system_heap_allocate(struct ion_heap *heap,
97                                     struct ion_buffer *buffer,
98                                     unsigned long size,
99                                     unsigned long flags)
100 {
101         struct ion_system_heap *sys_heap = container_of(heap,
102                                                         struct ion_system_heap,
103                                                         heap);
104         struct sg_table *table;
105         struct scatterlist *sg;
106         struct list_head pages;
107         struct page *page, *tmp_page;
108         int i = 0;
109         unsigned long size_remaining = PAGE_ALIGN(size);
110         unsigned int max_order = orders[0];
111
112         if (size / PAGE_SIZE > totalram_pages() / 2)
113                 return -ENOMEM;
114
115         INIT_LIST_HEAD(&pages);
116         while (size_remaining > 0) {
117                 page = alloc_largest_available(sys_heap, buffer, size_remaining,
118                                                max_order);
119                 if (!page)
120                         goto free_pages;
121                 list_add_tail(&page->lru, &pages);
122                 size_remaining -= PAGE_SIZE << compound_order(page);
123                 max_order = compound_order(page);
124                 i++;
125         }
126         table = kmalloc(sizeof(*table), GFP_KERNEL);
127         if (!table)
128                 goto free_pages;
129
130         if (sg_alloc_table(table, i, GFP_KERNEL))
131                 goto free_table;
132
133         sg = table->sgl;
134         list_for_each_entry_safe(page, tmp_page, &pages, lru) {
135                 sg_set_page(sg, page, PAGE_SIZE << compound_order(page), 0);
136                 sg = sg_next(sg);
137                 list_del(&page->lru);
138         }
139
140         buffer->sg_table = table;
141         return 0;
142
143 free_table:
144         kfree(table);
145 free_pages:
146         list_for_each_entry_safe(page, tmp_page, &pages, lru)
147                 free_buffer_page(sys_heap, buffer, page);
148         return -ENOMEM;
149 }
150
151 static void ion_system_heap_free(struct ion_buffer *buffer)
152 {
153         struct ion_system_heap *sys_heap = container_of(buffer->heap,
154                                                         struct ion_system_heap,
155                                                         heap);
156         struct sg_table *table = buffer->sg_table;
157         struct scatterlist *sg;
158         int i;
159
160         /* zero the buffer before goto page pool */
161         if (!(buffer->private_flags & ION_PRIV_FLAG_SHRINKER_FREE))
162                 ion_heap_buffer_zero(buffer);
163
164         for_each_sg(table->sgl, sg, table->nents, i)
165                 free_buffer_page(sys_heap, buffer, sg_page(sg));
166         sg_free_table(table);
167         kfree(table);
168 }
169
170 static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
171                                   int nr_to_scan)
172 {
173         struct ion_page_pool *pool;
174         struct ion_system_heap *sys_heap;
175         int nr_total = 0;
176         int i, nr_freed;
177         int only_scan = 0;
178
179         sys_heap = container_of(heap, struct ion_system_heap, heap);
180
181         if (!nr_to_scan)
182                 only_scan = 1;
183
184         for (i = 0; i < NUM_ORDERS; i++) {
185                 pool = sys_heap->pools[i];
186
187                 if (only_scan) {
188                         nr_total += ion_page_pool_shrink(pool,
189                                                          gfp_mask,
190                                                          nr_to_scan);
191
192                 } else {
193                         nr_freed = ion_page_pool_shrink(pool,
194                                                         gfp_mask,
195                                                         nr_to_scan);
196                         nr_to_scan -= nr_freed;
197                         nr_total += nr_freed;
198                         if (nr_to_scan <= 0)
199                                 break;
200                 }
201         }
202         return nr_total;
203 }
204
205 static struct ion_heap_ops system_heap_ops = {
206         .allocate = ion_system_heap_allocate,
207         .free = ion_system_heap_free,
208         .map_kernel = ion_heap_map_kernel,
209         .unmap_kernel = ion_heap_unmap_kernel,
210         .map_user = ion_heap_map_user,
211         .shrink = ion_system_heap_shrink,
212 };
213
214 static void ion_system_heap_destroy_pools(struct ion_page_pool **pools)
215 {
216         int i;
217
218         for (i = 0; i < NUM_ORDERS; i++)
219                 if (pools[i])
220                         ion_page_pool_destroy(pools[i]);
221 }
222
223 static int ion_system_heap_create_pools(struct ion_page_pool **pools)
224 {
225         int i;
226         gfp_t gfp_flags = low_order_gfp_flags;
227
228         for (i = 0; i < NUM_ORDERS; i++) {
229                 struct ion_page_pool *pool;
230
231                 if (orders[i] > 4)
232                         gfp_flags = high_order_gfp_flags;
233
234                 pool = ion_page_pool_create(gfp_flags, orders[i]);
235                 if (!pool)
236                         goto err_create_pool;
237                 pools[i] = pool;
238         }
239         return 0;
240
241 err_create_pool:
242         ion_system_heap_destroy_pools(pools);
243         return -ENOMEM;
244 }
245
246 static struct ion_heap *__ion_system_heap_create(void)
247 {
248         struct ion_system_heap *heap;
249
250         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
251         if (!heap)
252                 return ERR_PTR(-ENOMEM);
253         heap->heap.ops = &system_heap_ops;
254         heap->heap.type = ION_HEAP_TYPE_SYSTEM;
255         heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE;
256
257         if (ion_system_heap_create_pools(heap->pools))
258                 goto free_heap;
259
260         return &heap->heap;
261
262 free_heap:
263         kfree(heap);
264         return ERR_PTR(-ENOMEM);
265 }
266
267 static int ion_system_heap_create(void)
268 {
269         struct ion_heap *heap;
270
271         heap = __ion_system_heap_create();
272         if (IS_ERR(heap))
273                 return PTR_ERR(heap);
274         heap->name = "ion_system_heap";
275
276         ion_device_add_heap(heap);
277         return 0;
278 }
279 device_initcall(ion_system_heap_create);
280
281 static int ion_system_contig_heap_allocate(struct ion_heap *heap,
282                                            struct ion_buffer *buffer,
283                                            unsigned long len,
284                                            unsigned long flags)
285 {
286         int order = get_order(len);
287         struct page *page;
288         struct sg_table *table;
289         unsigned long i;
290         int ret;
291
292         page = alloc_pages(low_order_gfp_flags | __GFP_NOWARN, order);
293         if (!page)
294                 return -ENOMEM;
295
296         split_page(page, order);
297
298         len = PAGE_ALIGN(len);
299         for (i = len >> PAGE_SHIFT; i < (1 << order); i++)
300                 __free_page(page + i);
301
302         table = kmalloc(sizeof(*table), GFP_KERNEL);
303         if (!table) {
304                 ret = -ENOMEM;
305                 goto free_pages;
306         }
307
308         ret = sg_alloc_table(table, 1, GFP_KERNEL);
309         if (ret)
310                 goto free_table;
311
312         sg_set_page(table->sgl, page, len, 0);
313
314         buffer->sg_table = table;
315
316         return 0;
317
318 free_table:
319         kfree(table);
320 free_pages:
321         for (i = 0; i < len >> PAGE_SHIFT; i++)
322                 __free_page(page + i);
323
324         return ret;
325 }
326
327 static void ion_system_contig_heap_free(struct ion_buffer *buffer)
328 {
329         struct sg_table *table = buffer->sg_table;
330         struct page *page = sg_page(table->sgl);
331         unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
332         unsigned long i;
333
334         for (i = 0; i < pages; i++)
335                 __free_page(page + i);
336         sg_free_table(table);
337         kfree(table);
338 }
339
340 static struct ion_heap_ops kmalloc_ops = {
341         .allocate = ion_system_contig_heap_allocate,
342         .free = ion_system_contig_heap_free,
343         .map_kernel = ion_heap_map_kernel,
344         .unmap_kernel = ion_heap_unmap_kernel,
345         .map_user = ion_heap_map_user,
346 };
347
348 static struct ion_heap *__ion_system_contig_heap_create(void)
349 {
350         struct ion_heap *heap;
351
352         heap = kzalloc(sizeof(*heap), GFP_KERNEL);
353         if (!heap)
354                 return ERR_PTR(-ENOMEM);
355         heap->ops = &kmalloc_ops;
356         heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG;
357         heap->name = "ion_system_contig_heap";
358         return heap;
359 }
360
361 static int ion_system_contig_heap_create(void)
362 {
363         struct ion_heap *heap;
364
365         heap = __ion_system_contig_heap_create();
366         if (IS_ERR(heap))
367                 return PTR_ERR(heap);
368
369         ion_device_add_heap(heap);
370         return 0;
371 }
372 device_initcall(ion_system_contig_heap_create);
373