OSDN Git Service

util: fix MAKE_RGBA macro for 10bpp modes
[android-x86/external-libdrm.git] / amdgpu / amdgpu_vamgr.c
index 877e0ba..d25d421 100644 (file)
  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  * OTHER DEALINGS IN THE SOFTWARE.
  *
-*/
+ */
 
 #include <stdlib.h>
 #include <string.h>
+#include <errno.h>
 #include "amdgpu.h"
 #include "amdgpu_drm.h"
 #include "amdgpu_internal.h"
 #include "util_math.h"
 
-void amdgpu_vamgr_init(struct amdgpu_device *dev)
+drm_public int amdgpu_va_range_query(amdgpu_device_handle dev,
+                                    enum amdgpu_gpu_va_range type,
+                                    uint64_t *start, uint64_t *end)
 {
-       struct amdgpu_bo_va_mgr *vamgr = &dev->vamgr;
+       if (type != amdgpu_gpu_va_range_general)
+               return -EINVAL;
 
-       vamgr->va_offset = dev->dev_info.virtual_address_offset;
-       vamgr->va_max = dev->dev_info.virtual_address_max;
-       vamgr->va_alignment = dev->dev_info.virtual_address_alignment;
+       *start = dev->dev_info.virtual_address_offset;
+       *end = dev->dev_info.virtual_address_max;
+       return 0;
+}
+
+drm_private void amdgpu_vamgr_init(struct amdgpu_bo_va_mgr *mgr, uint64_t start,
+                                  uint64_t max, uint64_t alignment)
+{
+       struct amdgpu_bo_va_hole *n;
+
+       mgr->va_max = max;
+       mgr->va_alignment = alignment;
 
-       list_inithead(&vamgr->va_holes);
-       pthread_mutex_init(&vamgr->bo_va_mutex, NULL);
+       list_inithead(&mgr->va_holes);
+       pthread_mutex_init(&mgr->bo_va_mutex, NULL);
+       pthread_mutex_lock(&mgr->bo_va_mutex);
+       n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+       n->size = mgr->va_max - start;
+       n->offset = start;
+       list_add(&n->list, &mgr->va_holes);
+       pthread_mutex_unlock(&mgr->bo_va_mutex);
 }
 
-uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
-                               uint64_t size, uint64_t alignment)
+drm_private void amdgpu_vamgr_deinit(struct amdgpu_bo_va_mgr *mgr)
+{
+       struct amdgpu_bo_va_hole *hole, *tmp;
+       LIST_FOR_EACH_ENTRY_SAFE(hole, tmp, &mgr->va_holes, list) {
+               list_del(&hole->list);
+               free(hole);
+       }
+       pthread_mutex_destroy(&mgr->bo_va_mutex);
+}
+
+static drm_private uint64_t
+amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr, uint64_t size,
+                    uint64_t alignment, uint64_t base_required)
 {
        struct amdgpu_bo_va_hole *hole, *n;
        uint64_t offset = 0, waste = 0;
 
+
        alignment = MAX2(alignment, mgr->va_alignment);
        size = ALIGN(size, mgr->va_alignment);
 
+       if (base_required % alignment)
+               return AMDGPU_INVALID_VA_ADDRESS;
+
        pthread_mutex_lock(&mgr->bo_va_mutex);
-       /* TODO: using more appropriate way to track the holes */
-       /* first look for a hole */
-       LIST_FOR_EACH_ENTRY_SAFE(hole, n, &mgr->va_holes, list) {
-               offset = hole->offset;
-               waste = offset % alignment;
-               waste = waste ? alignment - waste : 0;
-               offset += waste;
-               if (offset >= (hole->offset + hole->size)) {
-                       continue;
+       LIST_FOR_EACH_ENTRY_SAFE_REV(hole, n, &mgr->va_holes, list) {
+               if (base_required) {
+                       if (hole->offset > base_required ||
+                           (hole->offset + hole->size) < (base_required + size))
+                               continue;
+                       waste = base_required - hole->offset;
+                       offset = base_required;
+               } else {
+                       offset = hole->offset;
+                       waste = offset % alignment;
+                       waste = waste ? alignment - waste : 0;
+                       offset += waste;
+                       if (offset >= (hole->offset + hole->size)) {
+                               continue;
+                       }
                }
                if (!waste && hole->size == size) {
                        offset = hole->offset;
@@ -69,8 +109,7 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
                }
                if ((hole->size - waste) > size) {
                        if (waste) {
-                               n = calloc(1,
-                                               sizeof(struct amdgpu_bo_va_hole));
+                               n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
                                n->size = waste;
                                n->offset = hole->offset;
                                list_add(&n->list, &hole->list);
@@ -87,90 +126,138 @@ uint64_t amdgpu_vamgr_find_va(struct amdgpu_bo_va_mgr *mgr,
                }
        }
 
-       offset = mgr->va_offset;
-       waste = offset % alignment;
-       waste = waste ? alignment - waste : 0;
-
-       if (offset + waste + size > mgr->va_max) {
-               pthread_mutex_unlock(&mgr->bo_va_mutex);
-               return AMDGPU_INVALID_VA_ADDRESS;
-       }
-
-       if (waste) {
-               n = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-               n->size = waste;
-               n->offset = offset;
-               list_add(&n->list, &mgr->va_holes);
-       }
-       offset += waste;
-       mgr->va_offset += size + waste;
        pthread_mutex_unlock(&mgr->bo_va_mutex);
-       return offset;
+       return AMDGPU_INVALID_VA_ADDRESS;
 }
 
-void amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va,
-                           uint64_t size)
+static drm_private void
+amdgpu_vamgr_free_va(struct amdgpu_bo_va_mgr *mgr, uint64_t va, uint64_t size)
 {
-       struct amdgpu_bo_va_hole *hole;
+       struct amdgpu_bo_va_hole *hole, *next;
+
+       if (va == AMDGPU_INVALID_VA_ADDRESS)
+               return;
 
        size = ALIGN(size, mgr->va_alignment);
 
        pthread_mutex_lock(&mgr->bo_va_mutex);
-       if ((va + size) == mgr->va_offset) {
-               mgr->va_offset = va;
-               /* Delete uppermost hole if it reaches the new top */
-               if (!LIST_IS_EMPTY(&mgr->va_holes)) {
-                       hole = container_of(mgr->va_holes.next, hole, list);
-                       if ((hole->offset + hole->size) == va) {
-                               mgr->va_offset = hole->offset;
+       hole = container_of(&mgr->va_holes, hole, list);
+       LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
+               if (next->offset < va)
+                       break;
+               hole = next;
+       }
+
+       if (&hole->list != &mgr->va_holes) {
+               /* Grow upper hole if it's adjacent */
+               if (hole->offset == (va + size)) {
+                       hole->offset = va;
+                       hole->size += size;
+                       /* Merge lower hole if it's adjacent */
+                       if (next != hole &&
+                           &next->list != &mgr->va_holes &&
+                           (next->offset + next->size) == va) {
+                               next->size += hole->size;
                                list_del(&hole->list);
                                free(hole);
                        }
+                       goto out;
                }
+       }
+
+       /* Grow lower hole if it's adjacent */
+       if (next != hole && &next->list != &mgr->va_holes &&
+           (next->offset + next->size) == va) {
+               next->size += size;
+               goto out;
+       }
+
+       /* FIXME on allocation failure we just lose virtual address space
+        * maybe print a warning
+        */
+       next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
+       if (next) {
+               next->size = size;
+               next->offset = va;
+               list_add(&next->list, &hole->list);
+       }
+
+out:
+       pthread_mutex_unlock(&mgr->bo_va_mutex);
+}
+
+drm_public int amdgpu_va_range_alloc(amdgpu_device_handle dev,
+                                    enum amdgpu_gpu_va_range va_range_type,
+                                    uint64_t size,
+                                    uint64_t va_base_alignment,
+                                    uint64_t va_base_required,
+                                    uint64_t *va_base_allocated,
+                                    amdgpu_va_handle *va_range_handle,
+                                    uint64_t flags)
+{
+       struct amdgpu_bo_va_mgr *vamgr;
+
+       /* Clear the flag when the high VA manager is not initialized */
+       if (flags & AMDGPU_VA_RANGE_HIGH && !dev->vamgr_high_32.va_max)
+               flags &= ~AMDGPU_VA_RANGE_HIGH;
+
+       if (flags & AMDGPU_VA_RANGE_HIGH) {
+               if (flags & AMDGPU_VA_RANGE_32_BIT)
+                       vamgr = &dev->vamgr_high_32;
+               else
+                       vamgr = &dev->vamgr_high;
        } else {
-               struct amdgpu_bo_va_hole *next;
+               if (flags & AMDGPU_VA_RANGE_32_BIT)
+                       vamgr = &dev->vamgr_32;
+               else
+                       vamgr = &dev->vamgr;
+       }
 
-               hole = container_of(&mgr->va_holes, hole, list);
-               LIST_FOR_EACH_ENTRY(next, &mgr->va_holes, list) {
-                       if (next->offset < va)
-                               break;
-                       hole = next;
-               }
+       va_base_alignment = MAX2(va_base_alignment, vamgr->va_alignment);
+       size = ALIGN(size, vamgr->va_alignment);
 
-               if (&hole->list != &mgr->va_holes) {
-                       /* Grow upper hole if it's adjacent */
-                       if (hole->offset == (va + size)) {
-                               hole->offset = va;
-                               hole->size += size;
-                               /* Merge lower hole if it's adjacent */
-                               if (next != hole
-                                               && &next->list != &mgr->va_holes
-                                               && (next->offset + next->size) == va) {
-                                       next->size += hole->size;
-                                       list_del(&hole->list);
-                                       free(hole);
-                               }
-                               goto out;
-                       }
-               }
+       *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+                                       va_base_alignment, va_base_required);
 
-               /* Grow lower hole if it's adjacent */
-               if (next != hole && &next->list != &mgr->va_holes &&
-                               (next->offset + next->size) == va) {
-                       next->size += size;
-                       goto out;
-               }
+       if (!(flags & AMDGPU_VA_RANGE_32_BIT) &&
+           (*va_base_allocated == AMDGPU_INVALID_VA_ADDRESS)) {
+               /* fallback to 32bit address */
+               if (flags & AMDGPU_VA_RANGE_HIGH)
+                       vamgr = &dev->vamgr_high_32;
+               else
+                       vamgr = &dev->vamgr_32;
+               *va_base_allocated = amdgpu_vamgr_find_va(vamgr, size,
+                                       va_base_alignment, va_base_required);
+       }
 
-               /* FIXME on allocation failure we just lose virtual address space
-                * maybe print a warning
-                */
-               next = calloc(1, sizeof(struct amdgpu_bo_va_hole));
-               if (next) {
-                       next->size = size;
-                       next->offset = va;
-                       list_add(&next->list, &hole->list);
+       if (*va_base_allocated != AMDGPU_INVALID_VA_ADDRESS) {
+               struct amdgpu_va* va;
+               va = calloc(1, sizeof(struct amdgpu_va));
+               if(!va){
+                       amdgpu_vamgr_free_va(vamgr, *va_base_allocated, size);
+                       return -ENOMEM;
                }
+               va->dev = dev;
+               va->address = *va_base_allocated;
+               va->size = size;
+               va->range = va_range_type;
+               va->vamgr = vamgr;
+               *va_range_handle = va;
+       } else {
+               return -EINVAL;
        }
-out:
-       pthread_mutex_unlock(&mgr->bo_va_mutex);
+
+       return 0;
+}
+
+drm_public int amdgpu_va_range_free(amdgpu_va_handle va_range_handle)
+{
+       if(!va_range_handle || !va_range_handle->address)
+               return 0;
+
+       amdgpu_vamgr_free_va(va_range_handle->vamgr,
+                       va_range_handle->address,
+                       va_range_handle->size);
+       free(va_range_handle);
+       return 0;
 }