static void amdgpu_bo_list_release_rcu(struct kref *ref)
{
- unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount);
+ struct amdgpu_bo_list_entry *e;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
kvfree(list->array);
kfree_rcu(list, rhead);
struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *e;
uint64_t total_size = 0;
unsigned i;
int r;
trace_amdgpu_bo_list_set(list, entry->robj);
}
- for (i = 0; i < list->num_entries; ++i)
+ amdgpu_bo_list_for_each_entry(e, list)
amdgpu_bo_unref(&list->array[i].robj);
kvfree(list->array);
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ amdgpu_bo_list_for_each_entry(e, list) {
+ unsigned priority = e->priority;
- if (!list->array[i].robj->parent)
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!e->robj->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
- list->array[i].user_pages = NULL;
+ e->user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
while (1) {
struct list_head need_pages;
- unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
break;
INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo;
-
- e = &p->bo_list->array[i];
- bo = e->robj;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
if (p->bo_list) {
struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
+ struct amdgpu_bo_list_entry *e;
gds = p->bo_list->gds_obj;
gws = p->bo_list->gws_obj;
oa = p->bo_list->oa_obj;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
} else {
gds = p->adev->gds.gds_gfx_bo;
gws = p->adev->gds.gws_gfx_bo;
error_free_pages:
if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
-
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
if (!e->user_pages)
continue;
struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
- int i, r;
+ int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
}
if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ struct amdgpu_bo_list_entry *e;
+
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
struct dma_fence *f;
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
+ bo = e->robj;
if (!bo)
continue;
- bo_va = p->bo_list->array[i].bo_va;
+ bo_va = e->bo_va;
if (bo_va == NULL)
continue;
return r;
if (amdgpu_vm_debug && p->bo_list) {
+ struct amdgpu_bo_list_entry *e;
+
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
+ if (!e->robj)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
}
}
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
enum drm_sched_priority priority;
struct amdgpu_job *job;
- unsigned i;
uint64_t seq;
int r;
amdgpu_mn_lock(p->mn);
if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
+ struct amdgpu_bo_list_entry *e;
+
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
amdgpu_mn_unlock(p->mn);