2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
32 #include "amdgpu_drm.h"
33 #include "amdgpu_internal.h"
34 #include "util_math.h"
36 static struct amdgpu_bo_va_mgr vamgr = {{0}};
38 int amdgpu_va_range_query(amdgpu_device_handle dev,
39 enum amdgpu_gpu_va_range type, uint64_t *start, uint64_t *end)
41 if (type == amdgpu_gpu_va_range_general) {
42 *start = dev->dev_info.virtual_address_offset;
43 *end = dev->dev_info.virtual_address_max;
49 static void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
50 uint64_t max, uint64_t alignment)
52 mgr->va_offset = start;
54 mgr->va_alignment = alignment;
56 list_inithead(&mgr->va_holes);
57 pthread_mutex_init(&mgr->bo_va_mutex, NULL);
60 static void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
62 struct amdgpu_bo_va_hole *hole;
63 LIST_FOR_EACH_ENTRY(hole, &mgr->va_holes, list) {
64 list_del(&hole->list);
67 pthread_mutex_destroy(&mgr->bo_va_mutex);
70 drm_private struct amdgpu_bo_va_mgr *
71 amdgpu_vamgr_get_global(struct amdgpu_device *dev)
74 ref = atomic_inc_return(&vamgr.refcount);
77 amdgpu_vamgr_init(&vamgr, dev->dev_info.virtual_address_offset,
78 dev->dev_info.virtual_address_max,
79 dev->dev_info.virtual_address_alignment);
84 amdgpu_vamgr_reference(struct amdgpu_bo_va_mgr **dst,
85 struct amdgpu_bo_va_mgr *src)
87 if (update_references(&(*dst)->refcount, NULL))
88 amdgpu_vamgr_deinit(*dst);
93 amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
94 uint64_t alignment, uint64_t base_required)
96 struct amdgpu_bo_va_hole *hole, *n;
97 uint64_t offset = 0, waste = 0;
99 alignment = MAX2(alignment, mgr->va_alignment);
100 size = ALIGN(size, mgr->va_alignment);
102 if (base_required % alignment)
103 return AMDGPU_INVALID_VA_ADDRESS;
105 pthread_mutex_lock(&mgr->bo_va_mutex);
106 /* TODO: using more appropriate way to track the holes */
107 /* first look for a hole */
108 LIST_FOR_EACH_ENTRY_SAFE(hole, n, &vamgr.va_holes, list) {
110 if(hole->offset > base_required ||
111 (hole->offset + hole->size) < (base_required + size))
113 waste = base_required - hole->offset;
114 offset = base_required;
116 offset = hole->offset;
117 waste = offset % alignment;
118 waste = waste ? alignment - waste : 0;
120 if (offset >= (hole->offset + hole->size)) {
124 if (!waste && hole->size == size) {
125 offset = hole->offset;
126 list_del(&hole->list);
128 pthread_mutex_unlock(&mgr->bo_va_mutex);
131 if ((hole->size - waste) > size) {
133 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
135 n->offset = hole->offset;
136 list_add(&n->list, &hole->list);
138 hole->size -= (size + waste);
139 hole->offset += size + waste;
140 pthread_mutex_unlock(&mgr->bo_va_mutex);
143 if ((hole->size - waste) == size) {
145 pthread_mutex_unlock(&mgr->bo_va_mutex);
151 if (base_required < mgr->va_offset)
152 return AMDGPU_INVALID_VA_ADDRESS;
153 offset = mgr->va_offset;
154 waste = base_required - mgr->va_offset;
156 offset = mgr->va_offset;
157 waste = offset % alignment;
158 waste = waste ? alignment - waste : 0;
161 if (offset + waste + size > mgr->va_max) {
162 pthread_mutex_unlock(&mgr->bo_va_mutex);
163 return AMDGPU_INVALID_VA_ADDRESS;
167 n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
170 list_add(&n->list, &mgr->va_holes);
174 mgr->va_offset += size + waste;
175 pthread_mutex_unlock(&mgr->bo_va_mutex);
180 amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
182 struct amdgpu_bo_va_hole *hole;
184 if (va == AMDGPU_INVALID_VA_ADDRESS)
187 size = ALIGN(size, mgr->va_alignment);
189 pthread_mutex_lock(&mgr->bo_va_mutex);
190 if ((va + size) == mgr->va_offset) {
192 /* Delete uppermost hole if it reaches the new top */
193 if (!LIST_IS_EMPTY(&mgr->va_holes)) {
194 hole = container_of(mgr->va_holes.next, hole, list);
195 if ((hole->offset + hole->size) == va) {
196 mgr->va_offset = hole->offset;
197 list_del(&hole->list);
202 struct amdgpu_bo_va_hole *next;
204 hole = container_of(&mgr->va_holes, hole, list);
205 LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
206 if (next->offset < va)
211 if (&hole->list != &mgr->va_holes) {
212 /* Grow upper hole if it's adjacent */
213 if (hole->offset == (va + size)) {
216 /* Merge lower hole if it's adjacent */
218 && &next->list != &mgr->va_holes
219 && (next->offset + next->size) == va) {
220 next->size += hole->size;
221 list_del(&hole->list);
228 /* Grow lower hole if it's adjacent */
229 if (next != hole && &next->list != &mgr->va_holes &&
230 (next->offset + next->size) == va) {
235 /* FIXME on allocation failure we just lose virtual address space
236 * maybe print a warning
238 next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
242 list_add(&next->list, &hole->list);
246 pthread_mutex_unlock(&mgr->bo_va_mutex);
249 int amdgpu_va_range_alloc(amdgpu_device_handle dev,
250 enum amdgpu_gpu_va_range va_range_type,
252 uint64_t va_base_alignment,
253 uint64_t va_base_required,
254 uint64_t *va_base_allocated,
255 amdgpu_va_handle *va_range_handle,
258 va_base_alignment = MAX2(va_base_alignment, dev->vamgr->va_alignment);
259 size = ALIGN(size, vamgr.va_alignment);
261 *va_base_allocated = amdgpu_vamgr_find_va(dev->vamgr, size,
262 va_base_alignment, va_base_required);
264 if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
265 struct amdgpu_va* va;
266 va = calloc(1, sizeof(struct amdgpu_va));
268 amdgpu_vamgr_free_va(dev->vamgr, *va_base_allocated, size);
272 va->address = *va_base_allocated;
274 va->range = va_range_type;
275 *va_range_handle = va;
283 int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
285 if(!va_range_handle || !va_range_handle->address)
287 amdgpu_vamgr_free_va(va_range_handle->dev->vamgr, va_range_handle->address,
288 va_range_handle->size);
289 free(va_range_handle);