2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
28 #include "amdgpu_drm.h"
29 #include "amdgpu_internal.h"
30 #include "util_math.h"
32 static struct amdgpu_bo_va_mgr vamgr = {{0}};
34 int amdgpu_va_range_query(amdgpu_device_handle dev,
35 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
37 if (type == amdgpu_gpu_va_range_general) {
38 *start = dev->dev_info.virtual_address_offset;
39 *end = dev->dev_info.virtual_address_max;
45 static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, struct amdgpu_device *dev)
47 mgr->va_offset = dev->dev_info.virtual_address_offset;
48 mgr->va_max = dev->dev_info.virtual_address_max;
49 mgr->va_alignment = dev->dev_info.virtual_address_alignment;
51 list_inithead(&mgr->va_holes);
52 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
55 static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
57 struct amdgpu_bo_va_hole *hole;
58 LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
59 list_del(&hole->list);
62 pthread_mutex_destroy(&mgr->bo_va_mutex);
65 struct amdgpu_bo_va_mgr * amdgpu_vamgr_get_global(struct amdgpu_device *dev)
68 ref = atomic_inc_return(&vamgr.refcount);
71 amdgpu_vamgr_init(&vamgr, dev);
75 void amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
76 struct amdgpu_bo_va_mgr *src)
78 if (update_references(&(*dst)->refcount, NULL))
79 amdgpu_vamgr_deinit(*dst);
83 uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
84 uint64_t alignment, uint64_t base_required)
86 struct amdgpu_bo_va_hole *hole, *n;
87 uint64_t offset = 0, waste = 0;
89 alignment = MAX2(alignment, mgr->va_alignment);
90 size = ALIGN(size, mgr->va_alignment);
92 if (base_required % alignment)
93 return AMDGPU_INVALID_VA_ADDRESS;
95 pthread_mutex_lock(&mgr->bo_va_mutex);
96 /* TODO: using more appropriate way to track the holes */
97 /* first look for a hole */
98 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
100 if(hole->offset > base_required ||
101 (hole->offset + hole->size) < (base_required + size))
103 waste = base_required - hole->offset;
104 offset = base_required;
106 offset = hole->offset;
107 waste = offset % alignment;
108 waste = waste ? alignment - waste : 0;
110 if (offset >= (hole->offset + hole->size)) {
114 if (!waste && hole->size == size) {
115 offset = hole->offset;
116 list_del(&hole->list);
118 pthread_mutex_unlock(&mgr->bo_va_mutex);
121 if ((hole->size - waste) > size) {
123 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
125 n->offset = hole->offset;
126 list_add(&n->list, &hole->list);
128 hole->size -= (size + waste);
129 hole->offset += size + waste;
130 pthread_mutex_unlock(&mgr->bo_va_mutex);
133 if ((hole->size - waste) == size) {
135 pthread_mutex_unlock(&mgr->bo_va_mutex);
141 if (base_required < mgr->va_offset)
142 return AMDGPU_INVALID_VA_ADDRESS;
143 offset = mgr->va_offset;
144 waste = base_required - mgr->va_offset;
146 offset = mgr->va_offset;
147 waste = offset % alignment;
148 waste = waste ? alignment - waste : 0;
151 if (offset + waste + size > mgr->va_max) {
152 pthread_mutex_unlock(&mgr->bo_va_mutex);
153 return AMDGPU_INVALID_VA_ADDRESS;
157 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
160 list_add(&n->list, &mgr->va_holes);
164 mgr->va_offset += size + waste;
165 pthread_mutex_unlock(&mgr->bo_va_mutex);
169 void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr,
170 uint64_t va, uint64_t size)
172 struct amdgpu_bo_va_hole *hole;
174 if (va == AMDGPU_INVALID_VA_ADDRESS)
177 size = ALIGN(size, mgr->va_alignment);
179 pthread_mutex_lock(&mgr->bo_va_mutex);
180 if ((va + size) == mgr->va_offset) {
182 /* Delete uppermost hole if it reaches the new top */
183 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
184 hole = container_of(mgr->va_holes.next, hole, list);
185 if ((hole->offset + hole->size) == va) {
186 mgr->va_offset = hole->offset;
187 list_del(&hole->list);
192 struct amdgpu_bo_va_hole *next;
194 hole = container_of(&mgr->va_holes, hole, list);
195 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
196 if (next->offset < va)
201 if (&hole->list != &mgr->va_holes) {
202 /* Grow upper hole if it's adjacent */
203 if (hole->offset == (va + size)) {
206 /* Merge lower hole if it's adjacent */
208 && &next->list != &mgr->va_holes
209 && (next->offset + next->size) == va) {
210 next->size += hole->size;
211 list_del(&hole->list);
218 /* Grow lower hole if it's adjacent */
219 if (next != hole && &next->list != &mgr->va_holes &&
220 (next->offset + next->size) == va) {
225 /* FIXME on allocation failure we just lose virtual address space
226 * maybe print a warning
228 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
232 list_add(&next->list, &hole->list);
236 pthread_mutex_unlock(&mgr->bo_va_mutex);
239 int amdgpu_va_range_alloc(amdgpu_device_handle dev,
240 enum amdgpu_gpu_va_range va_range_type,
242 uint64_t va_base_alignment,
243 uint64_t va_base_required,
244 uint64_t *va_base_allocated,
245 amdgpu_va_handle *va_range_handle)
247 va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment);
248 size = ALIGN(size, vamgr.va_alignment);
250 *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size,
251 va_base_alignment, va_base_required);
253 if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
254 struct amdgpu_va* va;
255 va = calloc(1, sizeof(struct amdgpu_va));
257 amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size);
261 va->address = *va_base_allocated;
263 va->range = va_range_type;
264 *va_range_handle = va;
272 int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
274 if(!va_range_handle || !va_range_handle->address)
276 amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address,
277 va_range_handle->size);
278 free(va_range_handle);