2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
30 #include <sys/ioctl.h>
36 #include "amdgpu_drm.h"
37 #include "amdgpu_internal.h"
39 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem);
40 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem);
43 * Create command submission context
45 * \param dev - \c [in] Device handle. See #amdgpu_device_initialize()
46 * \param priority - \c [in] Context creation flags. See AMDGPU_CTX_PRIORITY_*
47 * \param context - \c [out] GPU Context handle
49 * \return 0 on success otherwise POSIX Error code
51 drm_public int amdgpu_cs_ctx_create2(amdgpu_device_handle dev,
53 amdgpu_context_handle *context)
55 struct amdgpu_context *gpu_context;
56 union drm_amdgpu_ctx args;
63 gpu_context = calloc(1, sizeof(struct amdgpu_context));
67 gpu_context->dev = dev;
69 r = pthread_mutex_init(&gpu_context->sequence_mutex, NULL);
73 /* Create the context */
74 memset(&args, 0, sizeof(args));
75 args.in.op = AMDGPU_CTX_OP_ALLOC_CTX;
76 args.in.priority = priority;
78 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CTX, &args, sizeof(args));
82 gpu_context->id = args.out.alloc.ctx_id;
83 for (i = 0; i < AMDGPU_HW_IP_NUM; i++)
84 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++)
85 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++)
86 list_inithead(&gpu_context->sem_list[i][j][k]);
87 *context = (amdgpu_context_handle)gpu_context;
92 pthread_mutex_destroy(&gpu_context->sequence_mutex);
97 drm_public int amdgpu_cs_ctx_create(amdgpu_device_handle dev,
98 amdgpu_context_handle *context)
100 return amdgpu_cs_ctx_create2(dev, AMDGPU_CTX_PRIORITY_NORMAL, context);
104 * Release command submission context
106 * \param dev - \c [in] amdgpu device handle
107 * \param context - \c [in] amdgpu context handle
109 * \return 0 on success otherwise POSIX Error code
111 drm_public int amdgpu_cs_ctx_free(amdgpu_context_handle context)
113 union drm_amdgpu_ctx args;
120 pthread_mutex_destroy(&context->sequence_mutex);
122 /* now deal with kernel side */
123 memset(&args, 0, sizeof(args));
124 args.in.op = AMDGPU_CTX_OP_FREE_CTX;
125 args.in.ctx_id = context->id;
126 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
127 &args, sizeof(args));
128 for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
129 for (j = 0; j < AMDGPU_HW_IP_INSTANCE_MAX_COUNT; j++) {
130 for (k = 0; k < AMDGPU_CS_MAX_RINGS; k++) {
131 amdgpu_semaphore_handle sem;
132 LIST_FOR_EACH_ENTRY(sem, &context->sem_list[i][j][k], list) {
133 list_del(&sem->list);
134 amdgpu_cs_reset_sem(sem);
135 amdgpu_cs_unreference_sem(sem);
145 drm_public int amdgpu_cs_ctx_override_priority(amdgpu_device_handle dev,
146 amdgpu_context_handle context,
150 union drm_amdgpu_sched args;
153 if (!dev || !context || master_fd < 0)
156 memset(&args, 0, sizeof(args));
158 args.in.op = AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE;
159 args.in.fd = dev->fd;
160 args.in.priority = priority;
161 args.in.ctx_id = context->id;
163 r = drmCommandWrite(master_fd, DRM_AMDGPU_SCHED, &args, sizeof(args));
170 drm_public int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
171 uint32_t *state, uint32_t *hangs)
173 union drm_amdgpu_ctx args;
179 memset(&args, 0, sizeof(args));
180 args.in.op = AMDGPU_CTX_OP_QUERY_STATE;
181 args.in.ctx_id = context->id;
182 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
183 &args, sizeof(args));
185 *state = args.out.state.reset_status;
186 *hangs = args.out.state.hangs;
191 drm_public int amdgpu_cs_query_reset_state2(amdgpu_context_handle context,
194 union drm_amdgpu_ctx args;
200 memset(&args, 0, sizeof(args));
201 args.in.op = AMDGPU_CTX_OP_QUERY_STATE2;
202 args.in.ctx_id = context->id;
203 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CTX,
204 &args, sizeof(args));
206 *flags = args.out.state.flags;
211 * Submit command to kernel DRM
212 * \param dev - \c [in] Device handle
213 * \param context - \c [in] GPU Context
214 * \param ibs_request - \c [in] Pointer to submission requests
215 * \param fence - \c [out] return fence for this submission
217 * \return 0 on success otherwise POSIX Error code
218 * \sa amdgpu_cs_submit()
220 static int amdgpu_cs_submit_one(amdgpu_context_handle context,
221 struct amdgpu_cs_request *ibs_request)
223 union drm_amdgpu_cs cs;
224 uint64_t *chunk_array;
225 struct drm_amdgpu_cs_chunk *chunks;
226 struct drm_amdgpu_cs_chunk_data *chunk_data;
227 struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
228 struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
229 struct list_head *sem_list;
230 amdgpu_semaphore_handle sem, tmp;
231 uint32_t i, size, sem_count = 0;
235 if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
237 if (ibs_request->ring >= AMDGPU_CS_MAX_RINGS)
239 if (ibs_request->number_of_ibs == 0) {
240 ibs_request->seq_no = AMDGPU_NULL_SUBMIT_SEQ;
243 user_fence = (ibs_request->fence_info.handle != NULL);
245 size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
247 chunk_array = alloca(sizeof(uint64_t) * size);
248 chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
250 size = ibs_request->number_of_ibs + (user_fence ? 1 : 0);
252 chunk_data = alloca(sizeof(struct drm_amdgpu_cs_chunk_data) * size);
254 memset(&cs, 0, sizeof(cs));
255 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
256 cs.in.ctx_id = context->id;
257 if (ibs_request->resources)
258 cs.in.bo_list_handle = ibs_request->resources->handle;
259 cs.in.num_chunks = ibs_request->number_of_ibs;
261 for (i = 0; i < ibs_request->number_of_ibs; i++) {
262 struct amdgpu_cs_ib_info *ib;
263 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
264 chunks[i].chunk_id = AMDGPU_CHUNK_ID_IB;
265 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_ib) / 4;
266 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
268 ib = &ibs_request->ibs[i];
270 chunk_data[i].ib_data._pad = 0;
271 chunk_data[i].ib_data.va_start = ib->ib_mc_address;
272 chunk_data[i].ib_data.ib_bytes = ib->size * 4;
273 chunk_data[i].ib_data.ip_type = ibs_request->ip_type;
274 chunk_data[i].ib_data.ip_instance = ibs_request->ip_instance;
275 chunk_data[i].ib_data.ring = ibs_request->ring;
276 chunk_data[i].ib_data.flags = ib->flags;
279 pthread_mutex_lock(&context->sequence_mutex);
282 i = cs.in.num_chunks++;
285 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
286 chunks[i].chunk_id = AMDGPU_CHUNK_ID_FENCE;
287 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_fence) / 4;
288 chunks[i].chunk_data = (uint64_t)(uintptr_t)&chunk_data[i];
290 /* fence bo handle */
291 chunk_data[i].fence_data.handle = ibs_request->fence_info.handle->handle;
293 chunk_data[i].fence_data.offset =
294 ibs_request->fence_info.offset * sizeof(uint64_t);
297 if (ibs_request->number_of_dependencies) {
298 dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) *
299 ibs_request->number_of_dependencies);
305 for (i = 0; i < ibs_request->number_of_dependencies; ++i) {
306 struct amdgpu_cs_fence *info = &ibs_request->dependencies[i];
307 struct drm_amdgpu_cs_chunk_dep *dep = &dependencies[i];
308 dep->ip_type = info->ip_type;
309 dep->ip_instance = info->ip_instance;
310 dep->ring = info->ring;
311 dep->ctx_id = info->context->id;
312 dep->handle = info->fence;
315 i = cs.in.num_chunks++;
317 /* dependencies chunk */
318 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
319 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
320 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4
321 * ibs_request->number_of_dependencies;
322 chunks[i].chunk_data = (uint64_t)(uintptr_t)dependencies;
325 sem_list = &context->sem_list[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring];
326 LIST_FOR_EACH_ENTRY(sem, sem_list, list)
329 sem_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_dep) * sem_count);
330 if (!sem_dependencies) {
335 LIST_FOR_EACH_ENTRY_SAFE(sem, tmp, sem_list, list) {
336 struct amdgpu_cs_fence *info = &sem->signal_fence;
337 struct drm_amdgpu_cs_chunk_dep *dep = &sem_dependencies[sem_count++];
338 dep->ip_type = info->ip_type;
339 dep->ip_instance = info->ip_instance;
340 dep->ring = info->ring;
341 dep->ctx_id = info->context->id;
342 dep->handle = info->fence;
344 list_del(&sem->list);
345 amdgpu_cs_reset_sem(sem);
346 amdgpu_cs_unreference_sem(sem);
348 i = cs.in.num_chunks++;
350 /* dependencies chunk */
351 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
352 chunks[i].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES;
353 chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_dep) / 4 * sem_count;
354 chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
357 r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
362 ibs_request->seq_no = cs.out.handle;
363 context->last_seq[ibs_request->ip_type][ibs_request->ip_instance][ibs_request->ring] = ibs_request->seq_no;
365 pthread_mutex_unlock(&context->sequence_mutex);
367 free(sem_dependencies);
371 drm_public int amdgpu_cs_submit(amdgpu_context_handle context,
373 struct amdgpu_cs_request *ibs_request,
374 uint32_t number_of_requests)
379 if (!context || !ibs_request)
383 for (i = 0; i < number_of_requests; i++) {
384 r = amdgpu_cs_submit_one(context, ibs_request);
394 * Calculate absolute timeout.
396 * \param timeout - \c [in] timeout in nanoseconds.
398 * \return absolute timeout in nanoseconds
400 drm_private uint64_t amdgpu_cs_calculate_timeout(uint64_t timeout)
404 if (timeout != AMDGPU_TIMEOUT_INFINITE) {
405 struct timespec current;
407 r = clock_gettime(CLOCK_MONOTONIC, ¤t);
409 fprintf(stderr, "clock_gettime() returned error (%d)!", errno);
410 return AMDGPU_TIMEOUT_INFINITE;
413 current_ns = ((uint64_t)current.tv_sec) * 1000000000ull;
414 current_ns += current.tv_nsec;
415 timeout += current_ns;
416 if (timeout < current_ns)
417 timeout = AMDGPU_TIMEOUT_INFINITE;
422 static int amdgpu_ioctl_wait_cs(amdgpu_context_handle context,
424 unsigned ip_instance,
431 amdgpu_device_handle dev = context->dev;
432 union drm_amdgpu_wait_cs args;
435 memset(&args, 0, sizeof(args));
436 args.in.handle = handle;
437 args.in.ip_type = ip;
438 args.in.ip_instance = ip_instance;
440 args.in.ctx_id = context->id;
442 if (flags & AMDGPU_QUERY_FENCE_TIMEOUT_IS_ABSOLUTE)
443 args.in.timeout = timeout_ns;
445 args.in.timeout = amdgpu_cs_calculate_timeout(timeout_ns);
447 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_CS, &args);
451 *busy = args.out.status;
455 drm_public int amdgpu_cs_query_fence_status(struct amdgpu_cs_fence *fence,
463 if (!fence || !expired || !fence->context)
465 if (fence->ip_type >= AMDGPU_HW_IP_NUM)
467 if (fence->ring >= AMDGPU_CS_MAX_RINGS)
469 if (fence->fence == AMDGPU_NULL_SUBMIT_SEQ) {
476 r = amdgpu_ioctl_wait_cs(fence->context, fence->ip_type,
477 fence->ip_instance, fence->ring,
478 fence->fence, timeout_ns, flags, &busy);
486 static int amdgpu_ioctl_wait_fences(struct amdgpu_cs_fence *fences,
487 uint32_t fence_count,
493 struct drm_amdgpu_fence *drm_fences;
494 amdgpu_device_handle dev = fences[0].context->dev;
495 union drm_amdgpu_wait_fences args;
499 drm_fences = alloca(sizeof(struct drm_amdgpu_fence) * fence_count);
500 for (i = 0; i < fence_count; i++) {
501 drm_fences[i].ctx_id = fences[i].context->id;
502 drm_fences[i].ip_type = fences[i].ip_type;
503 drm_fences[i].ip_instance = fences[i].ip_instance;
504 drm_fences[i].ring = fences[i].ring;
505 drm_fences[i].seq_no = fences[i].fence;
508 memset(&args, 0, sizeof(args));
509 args.in.fences = (uint64_t)(uintptr_t)drm_fences;
510 args.in.fence_count = fence_count;
511 args.in.wait_all = wait_all;
512 args.in.timeout_ns = amdgpu_cs_calculate_timeout(timeout_ns);
514 r = drmIoctl(dev->fd, DRM_IOCTL_AMDGPU_WAIT_FENCES, &args);
518 *status = args.out.status;
521 *first = args.out.first_signaled;
526 drm_public int amdgpu_cs_wait_fences(struct amdgpu_cs_fence *fences,
527 uint32_t fence_count,
536 if (!fences || !status || !fence_count)
539 for (i = 0; i < fence_count; i++) {
540 if (NULL == fences[i].context)
542 if (fences[i].ip_type >= AMDGPU_HW_IP_NUM)
544 if (fences[i].ring >= AMDGPU_CS_MAX_RINGS)
550 return amdgpu_ioctl_wait_fences(fences, fence_count, wait_all,
551 timeout_ns, status, first);
554 drm_public int amdgpu_cs_create_semaphore(amdgpu_semaphore_handle *sem)
556 struct amdgpu_semaphore *gpu_semaphore;
561 gpu_semaphore = calloc(1, sizeof(struct amdgpu_semaphore));
565 atomic_set(&gpu_semaphore->refcount, 1);
566 *sem = gpu_semaphore;
571 drm_public int amdgpu_cs_signal_semaphore(amdgpu_context_handle ctx,
573 uint32_t ip_instance,
575 amdgpu_semaphore_handle sem)
579 if (ip_type >= AMDGPU_HW_IP_NUM)
581 if (ring >= AMDGPU_CS_MAX_RINGS)
583 /* sem has been signaled */
584 if (sem->signal_fence.context)
586 pthread_mutex_lock(&ctx->sequence_mutex);
587 sem->signal_fence.context = ctx;
588 sem->signal_fence.ip_type = ip_type;
589 sem->signal_fence.ip_instance = ip_instance;
590 sem->signal_fence.ring = ring;
591 sem->signal_fence.fence = ctx->last_seq[ip_type][ip_instance][ring];
592 update_references(NULL, &sem->refcount);
593 pthread_mutex_unlock(&ctx->sequence_mutex);
597 drm_public int amdgpu_cs_wait_semaphore(amdgpu_context_handle ctx,
599 uint32_t ip_instance,
601 amdgpu_semaphore_handle sem)
605 if (ip_type >= AMDGPU_HW_IP_NUM)
607 if (ring >= AMDGPU_CS_MAX_RINGS)
609 /* must signal first */
610 if (!sem->signal_fence.context)
613 pthread_mutex_lock(&ctx->sequence_mutex);
614 list_add(&sem->list, &ctx->sem_list[ip_type][ip_instance][ring]);
615 pthread_mutex_unlock(&ctx->sequence_mutex);
619 static int amdgpu_cs_reset_sem(amdgpu_semaphore_handle sem)
621 if (!sem || !sem->signal_fence.context)
624 sem->signal_fence.context = NULL;
625 sem->signal_fence.ip_type = 0;
626 sem->signal_fence.ip_instance = 0;
627 sem->signal_fence.ring = 0;
628 sem->signal_fence.fence = 0;
633 static int amdgpu_cs_unreference_sem(amdgpu_semaphore_handle sem)
638 if (update_references(&sem->refcount, NULL))
643 drm_public int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
645 return amdgpu_cs_unreference_sem(sem);
648 drm_public int amdgpu_cs_create_syncobj2(amdgpu_device_handle dev,
655 return drmSyncobjCreate(dev->fd, flags, handle);
658 drm_public int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
664 return drmSyncobjCreate(dev->fd, 0, handle);
667 drm_public int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
673 return drmSyncobjDestroy(dev->fd, handle);
676 drm_public int amdgpu_cs_syncobj_reset(amdgpu_device_handle dev,
677 const uint32_t *syncobjs,
678 uint32_t syncobj_count)
683 return drmSyncobjReset(dev->fd, syncobjs, syncobj_count);
686 drm_public int amdgpu_cs_syncobj_signal(amdgpu_device_handle dev,
687 const uint32_t *syncobjs,
688 uint32_t syncobj_count)
693 return drmSyncobjSignal(dev->fd, syncobjs, syncobj_count);
696 drm_public int amdgpu_cs_syncobj_timeline_signal(amdgpu_device_handle dev,
697 const uint32_t *syncobjs,
699 uint32_t syncobj_count)
704 return drmSyncobjTimelineSignal(dev->fd, syncobjs,
705 points, syncobj_count);
708 drm_public int amdgpu_cs_syncobj_wait(amdgpu_device_handle dev,
709 uint32_t *handles, unsigned num_handles,
710 int64_t timeout_nsec, unsigned flags,
711 uint32_t *first_signaled)
716 return drmSyncobjWait(dev->fd, handles, num_handles, timeout_nsec,
717 flags, first_signaled);
720 drm_public int amdgpu_cs_syncobj_timeline_wait(amdgpu_device_handle dev,
721 uint32_t *handles, uint64_t *points,
722 unsigned num_handles,
723 int64_t timeout_nsec, unsigned flags,
724 uint32_t *first_signaled)
729 return drmSyncobjTimelineWait(dev->fd, handles, points, num_handles,
730 timeout_nsec, flags, first_signaled);
733 drm_public int amdgpu_cs_syncobj_query(amdgpu_device_handle dev,
734 uint32_t *handles, uint64_t *points,
735 unsigned num_handles)
740 return drmSyncobjQuery(dev->fd, handles, points, num_handles);
743 drm_public int amdgpu_cs_syncobj_query2(amdgpu_device_handle dev,
744 uint32_t *handles, uint64_t *points,
745 unsigned num_handles, uint32_t flags)
750 return drmSyncobjQuery2(dev->fd, handles, points, num_handles, flags);
753 drm_public int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
760 return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
763 drm_public int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
770 return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
773 drm_public int amdgpu_cs_syncobj_export_sync_file(amdgpu_device_handle dev,
780 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
783 drm_public int amdgpu_cs_syncobj_import_sync_file(amdgpu_device_handle dev,
790 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
793 drm_public int amdgpu_cs_syncobj_export_sync_file2(amdgpu_device_handle dev,
799 uint32_t binary_handle;
806 return drmSyncobjExportSyncFile(dev->fd, syncobj, sync_file_fd);
808 ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
812 ret = drmSyncobjTransfer(dev->fd, binary_handle, 0,
813 syncobj, point, flags);
816 ret = drmSyncobjExportSyncFile(dev->fd, binary_handle, sync_file_fd);
818 drmSyncobjDestroy(dev->fd, binary_handle);
822 drm_public int amdgpu_cs_syncobj_import_sync_file2(amdgpu_device_handle dev,
827 uint32_t binary_handle;
834 return drmSyncobjImportSyncFile(dev->fd, syncobj, sync_file_fd);
836 ret = drmSyncobjCreate(dev->fd, 0, &binary_handle);
839 ret = drmSyncobjImportSyncFile(dev->fd, binary_handle, sync_file_fd);
842 ret = drmSyncobjTransfer(dev->fd, syncobj, point,
843 binary_handle, 0, 0);
845 drmSyncobjDestroy(dev->fd, binary_handle);
849 drm_public int amdgpu_cs_syncobj_transfer(amdgpu_device_handle dev,
859 return drmSyncobjTransfer(dev->fd,
860 dst_handle, dst_point,
861 src_handle, src_point,
865 drm_public int amdgpu_cs_submit_raw(amdgpu_device_handle dev,
866 amdgpu_context_handle context,
867 amdgpu_bo_list_handle bo_list_handle,
869 struct drm_amdgpu_cs_chunk *chunks,
872 union drm_amdgpu_cs cs;
873 uint64_t *chunk_array;
878 memset(&cs, 0, sizeof(cs));
879 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
880 for (i = 0; i < num_chunks; i++)
881 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
882 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
883 cs.in.ctx_id = context->id;
884 cs.in.bo_list_handle = bo_list_handle ? bo_list_handle->handle : 0;
885 cs.in.num_chunks = num_chunks;
886 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
892 *seq_no = cs.out.handle;
896 drm_public int amdgpu_cs_submit_raw2(amdgpu_device_handle dev,
897 amdgpu_context_handle context,
898 uint32_t bo_list_handle,
900 struct drm_amdgpu_cs_chunk *chunks,
903 union drm_amdgpu_cs cs;
904 uint64_t *chunk_array;
907 memset(&cs, 0, sizeof(cs));
908 chunk_array = alloca(sizeof(uint64_t) * num_chunks);
909 for (i = 0; i < num_chunks; i++)
910 chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
911 cs.in.chunks = (uint64_t)(uintptr_t)chunk_array;
912 cs.in.ctx_id = context->id;
913 cs.in.bo_list_handle = bo_list_handle;
914 cs.in.num_chunks = num_chunks;
915 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_CS,
918 *seq_no = cs.out.handle;
922 drm_public void amdgpu_cs_chunk_fence_info_to_data(struct amdgpu_cs_fence_info *fence_info,
923 struct drm_amdgpu_cs_chunk_data *data)
925 data->fence_data.handle = fence_info->handle->handle;
926 data->fence_data.offset = fence_info->offset * sizeof(uint64_t);
929 drm_public void amdgpu_cs_chunk_fence_to_dep(struct amdgpu_cs_fence *fence,
930 struct drm_amdgpu_cs_chunk_dep *dep)
932 dep->ip_type = fence->ip_type;
933 dep->ip_instance = fence->ip_instance;
934 dep->ring = fence->ring;
935 dep->ctx_id = fence->context->id;
936 dep->handle = fence->fence;
939 drm_public int amdgpu_cs_fence_to_handle(amdgpu_device_handle dev,
940 struct amdgpu_cs_fence *fence,
942 uint32_t *out_handle)
944 union drm_amdgpu_fence_to_handle fth;
947 memset(&fth, 0, sizeof(fth));
948 fth.in.fence.ctx_id = fence->context->id;
949 fth.in.fence.ip_type = fence->ip_type;
950 fth.in.fence.ip_instance = fence->ip_instance;
951 fth.in.fence.ring = fence->ring;
952 fth.in.fence.seq_no = fence->fence;
955 r = drmCommandWriteRead(dev->fd, DRM_AMDGPU_FENCE_TO_HANDLE,
958 *out_handle = fth.out.handle;