OSDN Git Service

eef8a71afb9b6a055057991ea5704fa25731654e
[android-x86/external-libdrm.git] / amdgpu / amdgpu_vamgr.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #ifdef HAVE_CONFIG_H
25 #include "config.h"
26 #endif
27
28 #include <stdlib.h>
29 #include <string.h>
30 #include <errno.h>
31 #include "amdgpu.h"
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
34 #include "util_math.h"
35
36 static struct amdgpu_bo_va_mgr vamgr = {{0}};
37
38 int amdgpu_va_range_query(amdgpu_device_handle dev,
39                           enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
40 {
41         if (type == amdgpu_gpu_va_range_general) {
42                 *start = dev->dev_info.virtual_address_offset;
43                 *end = dev->dev_info.virtual_address_max;
44                 return 0;
45         }
46         return -EINVAL;
47 }
48
49 static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
50                               uint64_t max, uint64_t alignment)
51 {
52         mgr->va_offset = start;
53         mgr->va_max = max;
54         mgr->va_alignment = alignment;
55
56         list_inithead(&mgr->va_holes);
57         pthread_mutex_init(&mgr->bo_va_mutex, NULL);
58 }
59
60 static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
61 {
62         struct amdgpu_bo_va_hole *hole;
63         LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
64                 list_del(&hole->list);
65                 free(hole);
66         }
67         pthread_mutex_destroy(&mgr->bo_va_mutex);
68 }
69
70 drm_private struct amdgpu_bo_va_mgr *
71 amdgpu_vamgr_get_global(struct amdgpu_device *dev)
72 {
73         int ref;
74         ref = atomic_inc_return(&vamgr.refcount);
75
76         if (ref == 1)
77                 amdgpu_vamgr_init(&vamgr, dev->dev_info.virtual_address_offset,
78                                   dev->dev_info.virtual_address_max,
79                                   dev->dev_info.virtual_address_alignment);
80         return &vamgr;
81 }
82
83 drm_private void
84 amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
85                        struct amdgpu_bo_va_mgr *src)
86 {
87         if (update_references(&(*dst)->refcount, NULL))
88                 amdgpu_vamgr_deinit(*dst);
89         *dst = src;
90 }
91
92 drm_private uint64_t
93 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
94                      uint64_t alignment, uint64_t base_required)
95 {
96         struct amdgpu_bo_va_hole *hole, *n;
97         uint64_t offset = 0, waste = 0;
98
99         alignment = MAX2(alignment, mgr->va_alignment);
100         size = ALIGN(size, mgr->va_alignment);
101
102         if (base_required % alignment)
103                 return AMDGPU_INVALID_VA_ADDRESS;
104
105         pthread_mutex_lock(&mgr->bo_va_mutex);
106         /* TODO: using more appropriate way to track the holes */
107         /* first look for a hole */
108         LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
109                 if (base_required) {
110                         if(hole->offset > base_required ||
111                                 (hole->offset + hole->size) < (base_required + size))
112                                 continue;
113                         waste = base_required - hole->offset;
114                         offset = base_required;
115                 } else {
116                         offset = hole->offset;
117                         waste = offset % alignment;
118                         waste = waste ? alignment - waste : 0;
119                         offset += waste;
120                         if (offset >= (hole->offset + hole->size)) {
121                                 continue;
122                         }
123                 }
124                 if (!waste && hole->size == size) {
125                         offset = hole->offset;
126                         list_del(&hole->list);
127                         free(hole);
128                         pthread_mutex_unlock(&mgr->bo_va_mutex);
129                         return offset;
130                 }
131                 if ((hole->size - waste) > size) {
132                         if (waste) {
133                                 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
134                                 n->size = waste;
135                                 n->offset = hole->offset;
136                                 list_add(&n->list, &hole->list);
137                         }
138                         hole->size -= (size + waste);
139                         hole->offset += size + waste;
140                         pthread_mutex_unlock(&mgr->bo_va_mutex);
141                         return offset;
142                 }
143                 if ((hole->size - waste) == size) {
144                         hole->size = waste;
145                         pthread_mutex_unlock(&mgr->bo_va_mutex);
146                         return offset;
147                 }
148         }
149
150         if (base_required) {
151                 if (base_required < mgr->va_offset)
152                         return AMDGPU_INVALID_VA_ADDRESS;
153                 offset = mgr->va_offset;
154                 waste = base_required - mgr->va_offset;
155         } else {
156                 offset = mgr->va_offset;
157                 waste = offset % alignment;
158                 waste = waste ? alignment - waste : 0;
159         }
160
161         if (offset + waste + size > mgr->va_max) {
162                 pthread_mutex_unlock(&mgr->bo_va_mutex);
163                 return AMDGPU_INVALID_VA_ADDRESS;
164         }
165
166         if (waste) {
167                 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
168                 n->size = waste;
169                 n->offset = offset;
170                 list_add(&n->list, &mgr->va_holes);
171         }
172
173         offset += waste;
174         mgr->va_offset += size + waste;
175         pthread_mutex_unlock(&mgr->bo_va_mutex);
176         return offset;
177 }
178
179 drm_private void
180 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
181 {
182         struct amdgpu_bo_va_hole *hole;
183
184         if (va == AMDGPU_INVALID_VA_ADDRESS)
185                 return;
186
187         size = ALIGN(size, mgr->va_alignment);
188
189         pthread_mutex_lock(&mgr->bo_va_mutex);
190         if ((va + size) == mgr->va_offset) {
191                 mgr->va_offset = va;
192                 /* Delete uppermost hole if it reaches the new top */
193                 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
194                         hole = container_of(mgr->va_holes.next, hole, list);
195                         if ((hole->offset + hole->size) == va) {
196                                 mgr->va_offset = hole->offset;
197                                 list_del(&hole->list);
198                                 free(hole);
199                         }
200                 }
201         } else {
202                 struct amdgpu_bo_va_hole *next;
203
204                 hole = container_of(&mgr->va_holes, hole, list);
205                 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
206                         if (next->offset < va)
207                                 break;
208                         hole = next;
209                 }
210
211                 if (&hole->list != &mgr->va_holes) {
212                         /* Grow upper hole if it's adjacent */
213                         if (hole->offset == (va + size)) {
214                                 hole->offset = va;
215                                 hole->size += size;
216                                 /* Merge lower hole if it's adjacent */
217                                 if (next != hole
218                                                 && &next->list != &mgr->va_holes
219                                                 && (next->offset + next->size) == va) {
220                                         next->size += hole->size;
221                                         list_del(&hole->list);
222                                         free(hole);
223                                 }
224                                 goto out;
225                         }
226                 }
227
228                 /* Grow lower hole if it's adjacent */
229                 if (next != hole && &next->list != &mgr->va_holes &&
230                                 (next->offset + next->size) == va) {
231                         next->size += size;
232                         goto out;
233                 }
234
235                 /* FIXME on allocation failure we just lose virtual address space
236                  * maybe print a warning
237                  */
238                 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
239                 if (next) {
240                         next->size = size;
241                         next->offset = va;
242                         list_add(&next->list, &hole->list);
243                 }
244         }
245 out:
246         pthread_mutex_unlock(&mgr->bo_va_mutex);
247 }
248
249 int amdgpu_va_range_alloc(amdgpu_device_handle dev,
250                           enum amdgpu_gpu_va_range va_range_type,
251                           uint64_t size,
252                           uint64_t va_base_alignment,
253                           uint64_t va_base_required,
254                           uint64_t *va_base_allocated,
255                           amdgpu_va_handle *va_range_handle,
256                           uint64_t flags)
257 {
258         va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment);
259         size = ALIGN(size, vamgr.va_alignment);
260
261         *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size,
262                                         va_base_alignment, va_base_required);
263
264         if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
265                 struct amdgpu_va* va;
266                 va = calloc(1, sizeof(struct amdgpu_va));
267                 if(!va){
268                         amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size);
269                         return -ENOMEM;
270                 }
271                 va->dev = dev;
272                 va->address = *va_base_allocated;
273                 va->size = size;
274                 va->range = va_range_type;
275                 *va_range_handle = va;
276         } else {
277                 return -EINVAL;
278         }
279
280         return 0;
281 }
282
283 int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
284 {
285         if(!va_range_handle || !va_range_handle->address)
286                 return 0;
287         amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address,
288                         va_range_handle->size);
289         free(va_range_handle);
290         return 0;
291 }