1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
32 #include "freedreno_ringbuffer.h"
35 /* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
36 * a backing bo, and a reloc table.
39 struct list_head list;
41 struct fd_ringbuffer *ring;
42 struct fd_bo *ring_bo;
45 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
50 struct msm_ringbuffer {
51 struct fd_ringbuffer base;
55 /* submit ioctl related tables:
56 * Note that bos and cmds are tracked by the parent ringbuffer, since
57 * that is global to the submit ioctl call. The reloc's table is tracked
62 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
65 DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
68 /* should have matching entries in submit.bos: */
69 /* Note, only in parent ringbuffer */
70 DECLARE_ARRAY(struct fd_bo *, bos);
72 /* should have matching entries in submit.cmds: */
73 DECLARE_ARRAY(struct msm_cmd *, cmds);
75 /* List of physical cmdstream buffers (msm_cmd) assocated with this
76 * logical fd_ringbuffer.
78 * Note that this is different from msm_ringbuffer::cmds (which
79 * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
80 * related stuff, and *only* is tracked in the parent ringbuffer.
81 * And only has "completed" cmd buffers (ie. we already know the
82 * size) added via get_cmd().
84 struct list_head cmd_list;
91 /* maps fd_bo to idx: */
95 static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
97 return (struct msm_ringbuffer *)x;
100 static void msm_ringbuffer_unref(struct fd_ringbuffer *ring);
101 static void msm_ringbuffer_ref(struct fd_ringbuffer *ring);
103 #define INIT_SIZE 0x1000
105 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
107 static void ring_cmd_del(struct msm_cmd *cmd)
110 fd_bo_del(cmd->ring_bo);
111 list_del(&cmd->list);
112 to_msm_ringbuffer(cmd->ring)->cmd_count--;
117 static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
119 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
120 struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
126 cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
130 list_addtail(&cmd->list, &msm_ring->cmd_list);
131 msm_ring->cmd_count++;
140 static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
142 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
143 assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
144 return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
147 static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
149 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
152 idx = APPEND(&msm_ring->submit, bos);
153 idx = APPEND(msm_ring, bos);
155 msm_ring->submit.bos[idx].flags = 0;
156 msm_ring->submit.bos[idx].handle = bo->handle;
157 msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
159 msm_ring->bos[idx] = fd_bo_ref(bo);
164 /* add (if needed) bo, return idx: */
165 static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
167 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
168 struct msm_bo *msm_bo = to_msm_bo(bo);
170 pthread_mutex_lock(&idx_lock);
171 if (msm_bo->current_ring_seqno == msm_ring->seqno) {
176 if (!msm_ring->bo_table)
177 msm_ring->bo_table = drmHashCreate();
179 if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
181 idx = (uint32_t)(uintptr_t)val;
183 idx = append_bo(ring, bo);
184 val = (void *)(uintptr_t)idx;
185 drmHashInsert(msm_ring->bo_table, bo->handle, val);
187 msm_bo->current_ring_seqno = msm_ring->seqno;
190 pthread_mutex_unlock(&idx_lock);
191 if (flags & FD_RELOC_READ)
192 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
193 if (flags & FD_RELOC_WRITE)
194 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
198 static int check_cmd_bo(struct fd_ringbuffer *ring,
199 struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
201 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
202 return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
205 /* Ensure that submit has corresponding entry in cmds table for the
206 * target cmdstream buffer:
208 * Returns TRUE if new cmd added (else FALSE if it was already in
211 static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
212 uint32_t submit_offset, uint32_t size, uint32_t type)
214 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
215 struct drm_msm_gem_submit_cmd *cmd;
218 /* figure out if we already have a cmd buf: */
219 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
220 cmd = &msm_ring->submit.cmds[i];
221 if ((cmd->submit_offset == submit_offset) &&
222 (cmd->size == size) &&
223 (cmd->type == type) &&
224 check_cmd_bo(ring, cmd, target_cmd->ring_bo))
228 /* create cmd buf if not: */
229 i = APPEND(&msm_ring->submit, cmds);
230 APPEND(msm_ring, cmds);
231 msm_ring->cmds[i] = target_cmd;
232 cmd = &msm_ring->submit.cmds[i];
234 cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
235 cmd->submit_offset = submit_offset;
239 target_cmd->size = size;
244 static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
246 return fd_bo_map(current_cmd(ring)->ring_bo);
249 static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
250 uint32_t start, uint32_t offset)
254 /* a binary search would be more clever.. */
255 for (i = start; i < msm_cmd->nr_relocs; i++) {
256 struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
257 if (reloc->submit_offset >= offset)
264 static void delete_cmds(struct msm_ringbuffer *msm_ring)
266 struct msm_cmd *cmd, *tmp;
268 LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
273 static void flush_reset(struct fd_ringbuffer *ring)
275 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
278 for (i = 0; i < msm_ring->nr_bos; i++) {
279 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
282 msm_bo->current_ring_seqno = 0;
283 fd_bo_del(&msm_bo->base);
286 /* for each of the cmd buffers, clear their reloc's: */
287 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
288 struct msm_cmd *target_cmd = msm_ring->cmds[i];
291 target_cmd->nr_relocs = 0;
294 msm_ring->submit.nr_cmds = 0;
295 msm_ring->submit.nr_bos = 0;
296 msm_ring->nr_cmds = 0;
297 msm_ring->nr_bos = 0;
299 if (msm_ring->bo_table) {
300 drmHashDestroy(msm_ring->bo_table);
301 msm_ring->bo_table = NULL;
304 if (msm_ring->is_growable) {
305 delete_cmds(msm_ring);
307 /* in old mode, just reset the # of relocs: */
308 current_cmd(ring)->nr_relocs = 0;
312 static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
314 uint32_t submit_offset, size, type;
315 struct fd_ringbuffer *parent;
318 parent = ring->parent;
319 type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
322 type = MSM_SUBMIT_CMD_BUF;
325 submit_offset = offset_bytes(last_start, ring->start);
326 size = offset_bytes(ring->cur, last_start);
328 get_cmd(parent, current_cmd(ring), submit_offset, size, type);
331 static void dump_submit(struct msm_ringbuffer *msm_ring)
335 for (i = 0; i < msm_ring->submit.nr_bos; i++) {
336 struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
337 ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
339 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
340 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
341 struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
342 ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
343 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
344 for (j = 0; j < cmd->nr_relocs; j++) {
345 struct drm_msm_gem_submit_reloc *r = &relocs[j];
346 ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
347 ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
348 r->reloc_idx, r->reloc_offset);
353 static struct drm_msm_gem_submit_reloc *
354 handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
355 struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
357 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
358 struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
361 for (i = 0; i < nr_relocs; i++) {
362 unsigned idx = orig_relocs[i].reloc_idx;
363 struct fd_bo *bo = msm_ring->bos[idx];
366 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
367 flags |= FD_RELOC_READ;
368 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
369 flags |= FD_RELOC_WRITE;
371 relocs[i] = orig_relocs[i];
372 relocs[i].reloc_idx = bo2idx(parent, bo, flags);
378 static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
379 int in_fence_fd, int *out_fence_fd)
381 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
382 struct drm_msm_gem_submit req = {
383 .flags = to_msm_pipe(ring->pipe)->pipe,
384 .queueid = to_msm_pipe(ring->pipe)->queue_id,
389 assert(!ring->parent);
391 if (in_fence_fd != -1) {
392 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
393 req.fence_fd = in_fence_fd;
397 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
400 finalize_current_cmd(ring, last_start);
402 /* for each of the cmd's fix up their reloc's: */
403 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
404 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
405 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
406 uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
407 uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
408 struct drm_msm_gem_submit_reloc *relocs = &msm_cmd->relocs[a];
409 unsigned nr_relocs = (b > a) ? b - a : 0;
411 /* for reusable stateobjs, the reloc table has reloc_idx that
412 * points into it's own private bos table, rather than the global
413 * bos table used for the submit, so we need to add the stateobj's
414 * bos to the global table and construct new relocs table with
415 * corresponding reloc_idx
417 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
418 relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
422 cmd->relocs = VOID2U64(relocs);
423 cmd->nr_relocs = nr_relocs;
426 /* needs to be after get_cmd() as that could create bos/cmds table: */
427 req.bos = VOID2U64(msm_ring->submit.bos),
428 req.nr_bos = msm_ring->submit.nr_bos;
429 req.cmds = VOID2U64(msm_ring->submit.cmds),
430 req.nr_cmds = msm_ring->submit.nr_cmds;
432 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
434 ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
437 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
438 dump_submit(msm_ring);
440 /* update timestamp on all rings associated with submit: */
441 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
442 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
443 msm_cmd->ring->last_timestamp = req.fence;
447 *out_fence_fd = req.fence_fd;
451 /* free dynamically constructed stateobj relocs tables: */
452 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
453 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
454 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
455 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
456 /* we could have dropped last reference: */
457 msm_ring->cmds[i] = NULL;
459 /* need to drop ring_bo ref prior to unref'ing the ring,
460 * because ring_bo_del assumes it is dropping the *last*
463 fd_bo_del(msm_ring->bos[cmd->submit_idx]);
464 msm_ring->bos[cmd->submit_idx] = NULL;
466 msm_ringbuffer_unref(msm_cmd->ring);
467 free(U642VOID(cmd->relocs));
476 static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
478 assert(to_msm_ringbuffer(ring)->is_growable);
479 finalize_current_cmd(ring, ring->last_start);
480 ring_cmd_new(ring, size);
483 static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
488 static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
489 const struct fd_reloc *r)
491 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
492 struct msm_bo *msm_bo = to_msm_bo(r->bo);
493 struct drm_msm_gem_submit_reloc *reloc;
494 struct msm_cmd *cmd = current_cmd(ring);
495 uint32_t idx = APPEND(cmd, relocs);
498 reloc = &cmd->relocs[idx];
500 reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
501 reloc->reloc_offset = r->offset;
503 reloc->shift = r->shift;
504 reloc->submit_offset = offset_bytes(ring->cur, ring->start);
506 addr = msm_bo->presumed;
507 if (reloc->shift < 0)
508 addr >>= -reloc->shift;
510 addr <<= reloc->shift;
511 (*ring->cur++) = addr | r->or;
513 if (ring->pipe->gpu_id >= 500) {
514 struct drm_msm_gem_submit_reloc *reloc_hi;
516 /* NOTE: grab reloc_idx *before* APPEND() since that could
517 * realloc() meaning that 'reloc' ptr is no longer valid:
519 uint32_t reloc_idx = reloc->reloc_idx;
521 idx = APPEND(cmd, relocs);
523 reloc_hi = &cmd->relocs[idx];
525 reloc_hi->reloc_idx = reloc_idx;
526 reloc_hi->reloc_offset = r->offset;
527 reloc_hi->or = r->orhi;
528 reloc_hi->shift = r->shift - 32;
529 reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start);
531 addr = msm_bo->presumed >> 32;
532 if (reloc_hi->shift < 0)
533 addr >>= -reloc_hi->shift;
535 addr <<= reloc_hi->shift;
536 (*ring->cur++) = addr | r->orhi;
540 static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
541 struct fd_ringbuffer *target, uint32_t cmd_idx,
542 uint32_t submit_offset, uint32_t size)
544 struct msm_cmd *cmd = NULL;
546 int added_cmd = FALSE;
548 LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
554 assert(cmd && (idx == cmd_idx));
556 if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
557 /* All but the last cmd buffer is fully "baked" (ie. already has
558 * done get_cmd() to add it to the cmds table). But in this case,
559 * the size we get is invalid (since it is calculated from the
564 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
565 added_cmd = get_cmd(parent, cmd, submit_offset, size,
566 MSM_SUBMIT_CMD_IB_TARGET_BUF);
569 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
571 .flags = FD_RELOC_READ,
572 .offset = submit_offset,
575 /* Unlike traditional ringbuffers which are deleted as a set (after
576 * being flushed), mesa can't really guarantee that a stateobj isn't
577 * destroyed after emitted but before flush, so we must hold a ref:
579 if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
580 msm_ringbuffer_ref(target);
586 static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
588 return to_msm_ringbuffer(ring)->cmd_count;
591 static void msm_ringbuffer_unref(struct fd_ringbuffer *ring)
593 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
595 if (!atomic_dec_and_test(&msm_ring->refcnt))
599 delete_cmds(msm_ring);
601 free(msm_ring->submit.cmds);
602 free(msm_ring->submit.bos);
604 free(msm_ring->cmds);
608 static void msm_ringbuffer_ref(struct fd_ringbuffer *ring)
610 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
611 atomic_inc(&msm_ring->refcnt);
614 static const struct fd_ringbuffer_funcs funcs = {
615 .hostptr = msm_ringbuffer_hostptr,
616 .flush = msm_ringbuffer_flush,
617 .grow = msm_ringbuffer_grow,
618 .reset = msm_ringbuffer_reset,
619 .emit_reloc = msm_ringbuffer_emit_reloc,
620 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
621 .cmd_count = msm_ringbuffer_cmd_count,
622 .destroy = msm_ringbuffer_unref,
625 drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
626 uint32_t size, enum fd_ringbuffer_flags flags)
628 struct msm_ringbuffer *msm_ring;
629 struct fd_ringbuffer *ring;
631 msm_ring = calloc(1, sizeof(*msm_ring));
633 ERROR_MSG("allocation failed");
638 assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
640 msm_ring->is_growable = TRUE;
643 list_inithead(&msm_ring->cmd_list);
644 msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
645 atomic_set(&msm_ring->refcnt, 1);
647 ring = &msm_ring->base;
648 ring->funcs = &funcs;
650 ring->pipe = pipe; /* needed in ring_cmd_new() */
652 ring_cmd_new(ring, size);