};
/**
+ * Structure describing fence information
+ *
+ * \sa amdgpu_cs_request, amdgpu_cs_query_fence,
+ * amdgpu_cs_submit(), amdgpu_cs_query_fence_status()
+*/
+struct amdgpu_cs_fence_info {
+ /** buffer object for the fence */
+ amdgpu_bo_handle handle;
+
+ /** fence offset in the unit of sizeof(uint64_t) */
+ uint64_t offset;
+};
+
+/**
* Structure describing submission request
*
* \note We could have several IBs as packet. e.g. CE, CE, DE case for gfx
* IBs to submit. Those IBs will be submit together as single entity
*/
struct amdgpu_cs_ib_info *ibs;
+
+ /**
+ * The returned sequence number for the command submission
+ */
+ uint64_t seq_no;
+
+ /**
+ * The fence information
+ */
+ struct amdgpu_cs_fence_info fence_info;
};
/**
* from the same GPU context to the same ip:ip_instance:ring will be executed in
* order.
*
+ * The caller can specify the user fence buffer/location with the fence_info in the
+ * cs_request.The sequence number is returned via the 'seq_no' paramter
+ * in ibs_request structure.
+ *
*
* \param dev - \c [in] Device handle.
* See #amdgpu_device_initialize()
* \param context - \c [in] GPU Context
* \param flags - \c [in] Global submission flags
- * \param ibs_request - \c [in] Pointer to submission requests.
+ * \param ibs_request - \c [in/out] Pointer to submission requests.
* We could submit to the several
* engines/rings simulteniously as
* 'atomic' operation
* \param number_of_requests - \c [in] Number of submission requests
- * \param fences - \c [out] Pointer to array of data to get
- * fences to identify submission
- * requests. Timestamps are valid
- * in this GPU context and could be used
- * to identify/detect completion of
- * submission request
*
* \return 0 on success\n
* <0 - Negative POSIX Error code
int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags,
struct amdgpu_cs_request *ibs_request,
- uint32_t number_of_requests,
- uint64_t *fences);
+ uint32_t number_of_requests);
/**
* Query status of Command Buffer Submission
int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
amdgpu_context_handle *context)
{
- struct amdgpu_bo_alloc_request alloc_buffer = {};
- struct amdgpu_bo_alloc_result info = {};
struct amdgpu_context *gpu_context;
union drm_amdgpu_ctx args;
int r;
r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
if (r)
- goto error_mutex;
-
- /* Create the fence BO */
- alloc_buffer.alloc_size = 4 * 1024;
- alloc_buffer.phys_alignment = 4 * 1024;
- alloc_buffer.preferred_heap = AMDGPU_GEM_DOMAIN_GTT;
-
- r = amdgpu_bo_alloc(dev, &alloc_buffer, &info);
- if (r)
- goto error_fence_alloc;
- gpu_context->fence_bo = info.buf_handle;
-
- r = amdgpu_bo_cpu_map(gpu_context->fence_bo, &gpu_context->fence_cpu);
- if (r)
- goto error_fence_map;
+ goto error;
/* Create the context */
memset(&args, 0, sizeof(args));
args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
if (r)
- goto error_kernel;
+ goto error;
gpu_context->id = args.out.alloc.ctx_id;
*context = (amdgpu_context_handle)gpu_context;
return 0;
-error_kernel:
- amdgpu_bo_cpu_unmap(gpu_context->fence_bo);
-
-error_fence_map:
- amdgpu_bo_free(gpu_context->fence_bo);
-
-error_fence_alloc:
+error:
pthread_mutex_destroy(&gpu_context->sequence_mutex);
-
-error_mutex:
free(gpu_context);
return r;
}
if (NULL == context)
return -EINVAL;
- r = amdgpu_bo_cpu_unmap(context->fence_bo);
- if (r)
- return r;
-
- r = amdgpu_bo_free(context->fence_bo);
- if (r)
- return r;
-
pthread_mutex_destroy(&context->sequence_mutex);
/* now deal with kernel side */
return r;
}
-static uint32_t amdgpu_cs_fence_index(unsigned ip, unsigned ring)
-{
- return ip * AMDGPU_CS_MAX_RINGS + ring;
-}
-
/**
* Submit command to kernel DRM
* \param dev - \c [in] Device handle
* \sa amdgpu_cs_submit()
*/
static int amdgpu_cs_submit_one(amdgpu_context_handle context,
- struct amdgpu_cs_request *ibs_request,
- uint64_t *fence)
+ struct amdgpu_cs_request *ibs_request)
{
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
uint32_t i, size;
+ bool user_fence;
int r = 0;
if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
return -EINVAL;
if (ibs_request->number_of_ibs > AMDGPU_CS_MAX_IBS_PER_SUBMIT)
return -EINVAL;
+ user_fence = (ibs_request->fence_info.handle != NULL);
- size = ibs_request->number_of_ibs + 2;
+ size = ibs_request->number_of_ibs + (user_fence ? 2 : 1);
chunk_array = alloca(sizeof(uint64_t) * size);
chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
- size = ibs_request->number_of_ibs + 1;
+ size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
+
chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
memset(&cs, 0, sizeof(cs));
pthread_mutex_lock(&context->sequence_mutex);
- if (ibs_request->ip_type != AMDGPU_HW_IP_UVD &&
- ibs_request->ip_type != AMDGPU_HW_IP_VCE) {
+ if (user_fence) {
i = cs.in.num_chunks++;
/* fence chunk */
chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
/* fence bo handle */
- chunk_data[i].fence_data.handle = context->fence_bo->handle;
+ chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
/* offset */
- chunk_data[i].fence_data.offset = amdgpu_cs_fence_index(
- ibs_request->ip_type, ibs_request->ring);
- chunk_data[i].fence_data.offset *= sizeof(uint64_t);
+ chunk_data[i].fence_data.offset =
+ ibs_request->fence_info.offset * sizeof(uint64_t);
}
if (ibs_request->number_of_dependencies) {
if (r)
goto error_unlock;
- *fence = cs.out.handle;
+ ibs_request->seq_no = cs.out.handle;
error_unlock:
pthread_mutex_unlock(&context->sequence_mutex);
int amdgpu_cs_submit(amdgpu_context_handle context,
uint64_t flags,
struct amdgpu_cs_request *ibs_request,
- uint32_t number_of_requests,
- uint64_t *fences)
+ uint32_t number_of_requests)
{
uint32_t i;
int r;
+ uint64_t bo_size;
+ uint64_t bo_offset;
if (NULL == context)
return -EINVAL;
if (NULL == ibs_request)
return -EINVAL;
- if (NULL == fences)
- return -EINVAL;
r = 0;
for (i = 0; i < number_of_requests; i++) {
- r = amdgpu_cs_submit_one(context, ibs_request, fences);
+ r = amdgpu_cs_submit_one(context, ibs_request);
if (r)
break;
- fences++;
ibs_request++;
}
uint64_t flags,
uint32_t *expired)
{
- amdgpu_context_handle context;
- uint64_t *expired_fence;
- unsigned ip_type, ip_instance;
- uint32_t ring;
bool busy = true;
int r;
if (fence->ring >= AMDGPU_CS_MAX_RINGS)
return -EINVAL;
- context = fence->context;
- ip_type = fence->ip_type;
- ip_instance = fence->ip_instance;
- ring = fence->ring;
- expired_fence = &context->expired_fences[ip_type][ip_instance][ring];
*expired = false;
- pthread_mutex_lock(&context->sequence_mutex);
- if (fence->fence <= *expired_fence) {
- /* This fence value is expired already. */
- pthread_mutex_unlock(&context->sequence_mutex);
- *expired = true;
- return 0;
- }
-
- /* Check the user fence only if the IP supports user fences. */
- if (fence->ip_type != AMDGPU_HW_IP_UVD &&
- fence->ip_type != AMDGPU_HW_IP_VCE) {
- uint64_t *signaled_fence = context->fence_cpu;
- signaled_fence += amdgpu_cs_fence_index(ip_type, ring);
-
- if (fence->fence <= *signaled_fence) {
- /* This fence value is signaled already. */
- *expired_fence = *signaled_fence;
- pthread_mutex_unlock(&context->sequence_mutex);
- *expired = true;
- return 0;
- }
-
- /* Checking the user fence is enough. */
- if (timeout_ns == 0) {
- pthread_mutex_unlock(&context->sequence_mutex);
- return 0;
- }
- }
+ r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
+ fence->ip_instance, fence->ring,
+ fence->fence, timeout_ns, flags, &busy);
- pthread_mutex_unlock(&context->sequence_mutex);
-
- r = amdgpu_ioctl_wait_cs(context, ip_type, ip_instance, ring,
- fence->fence, timeout_ns,
- flags, &busy);
- if (!r && !busy) {
+ if (!r && !busy)
*expired = true;
- pthread_mutex_lock(&context->sequence_mutex);
- /* The thread doesn't hold sequence_mutex. Other thread could
- update *expired_fence already. Check whether there is a
- newerly expired fence. */
- if (fence->fence > *expired_fence)
- *expired_fence = fence->fence;
- pthread_mutex_unlock(&context->sequence_mutex);
- }
return r;
}
/** Mutex for accessing fences and to maintain command submissions
in good sequence. */
pthread_mutex_t sequence_mutex;
- /** Buffer for user fences */
- struct amdgpu_bo *fence_bo;
- void *fence_cpu;
- /** The newest expired fence for the ring of the ip blocks. */
- uint64_t expired_fences[AMDGPU_HW_IP_NUM][AMDGPU_HW_IP_INSTANCE_MAX_COUNT][AMDGPU_CS_MAX_RINGS];
/* context id*/
uint32_t id;
};
ibs_request.number_of_ibs = 2;
ibs_request.ibs = ib_info;
ibs_request.resources = bo_list;
+ ibs_request.fence_info.handle = NULL;
+
+ r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
- r = amdgpu_cs_submit(context_handle, 0,
- &ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,
r = amdgpu_cs_ctx_free(context_handle);
CU_ASSERT_EQUAL(r, 0);
+
}
static void amdgpu_command_submission_gfx_shared_ib(void)
ibs_request.number_of_ibs = 2;
ibs_request.ibs = ib_info;
ibs_request.resources = bo_list;
+ ibs_request.fence_info.handle = NULL;
+
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- r = amdgpu_cs_submit(context_handle, 0,
- &ibs_request, 1, &fence_status.fence);
CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_GFX;
+ fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,
ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info;
ibs_request.resources = bo_list;
+ ibs_request.fence_info.handle = NULL;
memset(&fence_status, 0, sizeof(struct amdgpu_cs_fence));
- r = amdgpu_cs_submit(context_handle, 0,
- &ibs_request, 1, &fence_status.fence);
+ r = amdgpu_cs_submit(context_handle, 0,&ibs_request, 1);
CU_ASSERT_EQUAL(r, 0);
fence_status.context = context_handle;
fence_status.ip_type = AMDGPU_HW_IP_COMPUTE;
fence_status.ring = instance;
+ fence_status.fence = ibs_request.seq_no;
r = amdgpu_cs_query_fence_status(&fence_status,
AMDGPU_TIMEOUT_INFINITE,
ibs_request->ring = instance;
ibs_request->number_of_ibs = 1;
ibs_request->ibs = ib_info;
+ ibs_request->fence_info.handle = NULL;
memcpy(all_res, resources, sizeof(resources[0]) * res_cnt);
all_res[res_cnt] = ib_result_handle;
CU_ASSERT_NOT_EQUAL(ibs_request, NULL);
/* submit CS */
- r = amdgpu_cs_submit(context_handle, 0,
- ibs_request, 1, &fence_status.fence);
+ r = amdgpu_cs_submit(context_handle, 0, ibs_request, 1);
CU_ASSERT_EQUAL(r, 0);
r = amdgpu_bo_list_destroy(ibs_request->resources);
fence_status.ip_type = AMDGPU_HW_IP_DMA;
fence_status.ring = ibs_request->ring;
fence_status.context = context_handle;
+ fence_status.fence = ibs_request->seq_no;
/* wait for IB accomplished */
r = amdgpu_cs_query_fence_status(&fence_status,
ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info;
- r = amdgpu_cs_submit(context_handle, 0,
- &ibs_request, 1, &fence_status.fence);
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
if (r)
return r;
ibs_request.number_of_ibs = 1;
ibs_request.ibs = &ib_info;
- r = amdgpu_cs_submit(context_handle, 0,
- &ibs_request, 1, &fence_status.fence);
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
if (r)
return r;