1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
4 * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Rob Clark <robclark@freedesktop.org>
32 #include "xf86atomic.h"
33 #include "freedreno_ringbuffer.h"
36 /* represents a single cmd buffer in the submit ioctl. Each cmd buffer has
37 * a backing bo, and a reloc table.
40 struct list_head list;
42 struct fd_ringbuffer *ring;
43 struct fd_bo *ring_bo;
46 DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
50 /* has cmd already been added to parent rb's submit.cmds table? */
51 int is_appended_to_submit;
54 struct msm_ringbuffer {
55 struct fd_ringbuffer base;
57 /* submit ioctl related tables:
58 * Note that bos and cmds are tracked by the parent ringbuffer, since
59 * that is global to the submit ioctl call. The reloc's table is tracked
64 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
67 DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
70 /* should have matching entries in submit.bos: */
71 /* Note, only in parent ringbuffer */
72 DECLARE_ARRAY(struct fd_bo *, bos);
74 /* should have matching entries in submit.cmds: */
75 DECLARE_ARRAY(struct msm_cmd *, cmds);
77 /* List of physical cmdstream buffers (msm_cmd) assocated with this
78 * logical fd_ringbuffer.
80 * Note that this is different from msm_ringbuffer::cmds (which
81 * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
82 * related stuff, and *only* is tracked in the parent ringbuffer.
83 * And only has "completed" cmd buffers (ie. we already know the
84 * size) added via get_cmd().
86 struct list_head cmd_list;
91 unsigned offset; /* for sub-allocated stateobj rb's */
95 /* maps fd_bo to idx: */
98 /* maps msm_cmd to drm_msm_gem_submit_cmd in parent rb. Each rb has a
99 * list of msm_cmd's which correspond to each chunk of cmdstream in
100 * a 'growable' rb. For each of those we need to create one
101 * drm_msm_gem_submit_cmd in the parent rb which collects the state
102 * for the submit ioctl. Because we can have multiple IB's to the same
103 * target rb (for example, or same stateobj emit multiple times), and
104 * because in theory we can have multiple different rb's that have a
105 * reference to a given target, we need a hashtable to track this per
111 static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
113 return (struct msm_ringbuffer *)x;
116 #define INIT_SIZE 0x1000
118 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
120 static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
122 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
123 assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
124 return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
127 static void ring_cmd_del(struct msm_cmd *cmd)
129 fd_bo_del(cmd->ring_bo);
130 list_del(&cmd->list);
131 to_msm_ringbuffer(cmd->ring)->cmd_count--;
136 static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size,
137 enum fd_ringbuffer_flags flags)
139 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
140 struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
147 /* TODO separate suballoc buffer for small non-streaming state, using
148 * smaller page-sized backing bo's.
150 if (flags & FD_RINGBUFFER_STREAMING) {
151 struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
152 unsigned suballoc_offset = 0;
153 struct fd_bo *suballoc_bo = NULL;
155 if (msm_pipe->suballoc_ring) {
156 struct msm_ringbuffer *suballoc_ring = to_msm_ringbuffer(msm_pipe->suballoc_ring);
158 assert(msm_pipe->suballoc_ring->flags & FD_RINGBUFFER_OBJECT);
159 assert(suballoc_ring->cmd_count == 1);
161 suballoc_bo = current_cmd(msm_pipe->suballoc_ring)->ring_bo;
163 suballoc_offset = fd_ringbuffer_size(msm_pipe->suballoc_ring) +
164 suballoc_ring->offset;
166 suballoc_offset = ALIGN(suballoc_offset, 0x10);
168 if ((size + suballoc_offset) > suballoc_bo->size) {
174 cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, 0x8000, 0);
175 msm_ring->offset = 0;
177 cmd->ring_bo = fd_bo_ref(suballoc_bo);
178 msm_ring->offset = suballoc_offset;
181 if (msm_pipe->suballoc_ring)
182 fd_ringbuffer_del(msm_pipe->suballoc_ring);
184 msm_pipe->suballoc_ring = fd_ringbuffer_ref(ring);
186 cmd->ring_bo = fd_bo_new_ring(ring->pipe->dev, size, 0);
191 list_addtail(&cmd->list, &msm_ring->cmd_list);
192 msm_ring->cmd_count++;
201 static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
203 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
206 idx = APPEND(&msm_ring->submit, bos);
207 idx = APPEND(msm_ring, bos);
209 msm_ring->submit.bos[idx].flags = 0;
210 msm_ring->submit.bos[idx].handle = bo->handle;
211 msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
213 msm_ring->bos[idx] = fd_bo_ref(bo);
218 /* add (if needed) bo, return idx: */
219 static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
221 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
222 struct msm_bo *msm_bo = to_msm_bo(bo);
224 pthread_mutex_lock(&idx_lock);
225 if (msm_bo->current_ring_seqno == msm_ring->seqno) {
230 if (!msm_ring->bo_table)
231 msm_ring->bo_table = drmHashCreate();
233 if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
235 idx = (uint32_t)(uintptr_t)val;
237 idx = append_bo(ring, bo);
238 val = (void *)(uintptr_t)idx;
239 drmHashInsert(msm_ring->bo_table, bo->handle, val);
241 msm_bo->current_ring_seqno = msm_ring->seqno;
244 pthread_mutex_unlock(&idx_lock);
245 if (flags & FD_RELOC_READ)
246 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
247 if (flags & FD_RELOC_WRITE)
248 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
252 /* Ensure that submit has corresponding entry in cmds table for the
253 * target cmdstream buffer:
255 * Returns TRUE if new cmd added (else FALSE if it was already in
258 static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
259 uint32_t submit_offset, uint32_t size, uint32_t type)
261 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
262 struct drm_msm_gem_submit_cmd *cmd;
266 if (!msm_ring->cmd_table)
267 msm_ring->cmd_table = drmHashCreate();
269 /* figure out if we already have a cmd buf.. short-circuit hash
271 * - target cmd has never been added to submit.cmds
272 * - target cmd is not a streaming stateobj (which unlike longer
273 * lived CSO stateobj, is not expected to be reused with multiple
276 if (target_cmd->is_appended_to_submit &&
277 !(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING) &&
278 !drmHashLookup(msm_ring->cmd_table, (unsigned long)target_cmd, &val)) {
280 cmd = &msm_ring->submit.cmds[i];
282 assert(cmd->submit_offset == submit_offset);
283 assert(cmd->size == size);
284 assert(cmd->type == type);
285 assert(msm_ring->submit.bos[cmd->submit_idx].handle ==
286 target_cmd->ring_bo->handle);
291 /* create cmd buf if not: */
292 i = APPEND(&msm_ring->submit, cmds);
293 APPEND(msm_ring, cmds);
294 msm_ring->cmds[i] = target_cmd;
295 cmd = &msm_ring->submit.cmds[i];
297 cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
298 cmd->submit_offset = submit_offset;
302 target_cmd->is_appended_to_submit = TRUE;
304 if (!(target_cmd->ring->flags & FD_RINGBUFFER_STREAMING)) {
305 drmHashInsert(msm_ring->cmd_table, (unsigned long)target_cmd,
309 target_cmd->size = size;
314 static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
316 struct msm_cmd *cmd = current_cmd(ring);
317 uint8_t *base = fd_bo_map(cmd->ring_bo);
318 return base + to_msm_ringbuffer(ring)->offset;
321 static void delete_cmds(struct msm_ringbuffer *msm_ring)
323 struct msm_cmd *cmd, *tmp;
325 LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
330 static void flush_reset(struct fd_ringbuffer *ring)
332 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
335 for (i = 0; i < msm_ring->nr_bos; i++) {
336 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
339 msm_bo->current_ring_seqno = 0;
340 fd_bo_del(&msm_bo->base);
343 for (i = 0; i < msm_ring->nr_cmds; i++) {
344 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
346 if (msm_cmd->ring == ring)
349 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT)
350 fd_ringbuffer_del(msm_cmd->ring);
353 msm_ring->submit.nr_cmds = 0;
354 msm_ring->submit.nr_bos = 0;
355 msm_ring->nr_cmds = 0;
356 msm_ring->nr_bos = 0;
358 if (msm_ring->bo_table) {
359 drmHashDestroy(msm_ring->bo_table);
360 msm_ring->bo_table = NULL;
363 if (msm_ring->cmd_table) {
364 drmHashDestroy(msm_ring->cmd_table);
365 msm_ring->cmd_table = NULL;
368 if (msm_ring->is_growable) {
369 delete_cmds(msm_ring);
371 /* in old mode, just reset the # of relocs: */
372 current_cmd(ring)->nr_relocs = 0;
376 static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
378 uint32_t submit_offset, size, type;
379 struct fd_ringbuffer *parent;
382 parent = ring->parent;
383 type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
386 type = MSM_SUBMIT_CMD_BUF;
389 submit_offset = offset_bytes(last_start, ring->start);
390 size = offset_bytes(ring->cur, last_start);
392 get_cmd(parent, current_cmd(ring), submit_offset, size, type);
395 static void dump_submit(struct msm_ringbuffer *msm_ring)
399 for (i = 0; i < msm_ring->submit.nr_bos; i++) {
400 struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
401 ERROR_MSG(" bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
403 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
404 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
405 struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
406 ERROR_MSG(" cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
407 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
408 for (j = 0; j < cmd->nr_relocs; j++) {
409 struct drm_msm_gem_submit_reloc *r = &relocs[j];
410 ERROR_MSG(" reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
411 ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
412 r->reloc_idx, r->reloc_offset);
417 static struct drm_msm_gem_submit_reloc *
418 handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
419 struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
421 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
422 struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
425 for (i = 0; i < nr_relocs; i++) {
426 unsigned idx = orig_relocs[i].reloc_idx;
427 struct fd_bo *bo = msm_ring->bos[idx];
430 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
431 flags |= FD_RELOC_READ;
432 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
433 flags |= FD_RELOC_WRITE;
435 relocs[i] = orig_relocs[i];
436 relocs[i].reloc_idx = bo2idx(parent, bo, flags);
439 /* stateobj rb's could have reloc's to other stateobj rb's which didn't
440 * get propagated to the parent rb at _emit_reloc_ring() time (because
441 * the parent wasn't known then), so fix that up now:
443 for (i = 0; i < msm_ring->nr_cmds; i++) {
444 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
445 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
447 if (msm_ring->cmds[i]->ring == stateobj)
450 assert(msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT);
452 if (get_cmd(parent, msm_cmd, cmd->submit_offset, cmd->size, cmd->type)) {
453 fd_ringbuffer_ref(msm_cmd->ring);
460 static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
461 int in_fence_fd, int *out_fence_fd)
463 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
464 struct msm_pipe *msm_pipe = to_msm_pipe(ring->pipe);
465 struct drm_msm_gem_submit req = {
466 .flags = msm_pipe->pipe,
467 .queueid = msm_pipe->queue_id,
472 assert(!ring->parent);
474 if (in_fence_fd != -1) {
475 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
476 req.fence_fd = in_fence_fd;
480 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
483 finalize_current_cmd(ring, last_start);
485 /* for each of the cmd's fix up their reloc's: */
486 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
487 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
488 struct drm_msm_gem_submit_reloc *relocs = msm_cmd->relocs;
489 unsigned nr_relocs = msm_cmd->nr_relocs;
491 /* for reusable stateobjs, the reloc table has reloc_idx that
492 * points into it's own private bos table, rather than the global
493 * bos table used for the submit, so we need to add the stateobj's
494 * bos to the global table and construct new relocs table with
495 * corresponding reloc_idx
497 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
498 relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
502 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
503 cmd->relocs = VOID2U64(relocs);
504 cmd->nr_relocs = nr_relocs;
507 /* needs to be after get_cmd() as that could create bos/cmds table: */
508 req.bos = VOID2U64(msm_ring->submit.bos),
509 req.nr_bos = msm_ring->submit.nr_bos;
510 req.cmds = VOID2U64(msm_ring->submit.cmds),
511 req.nr_cmds = msm_ring->submit.nr_cmds;
513 DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
515 ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
518 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
519 dump_submit(msm_ring);
521 /* update timestamp on all rings associated with submit: */
522 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
523 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
524 msm_cmd->ring->last_timestamp = req.fence;
528 *out_fence_fd = req.fence_fd;
532 /* free dynamically constructed stateobj relocs tables: */
533 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
534 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
535 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
536 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
537 free(U642VOID(cmd->relocs));
546 static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
548 assert(to_msm_ringbuffer(ring)->is_growable);
549 finalize_current_cmd(ring, ring->last_start);
550 ring_cmd_new(ring, size, 0);
553 static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
558 static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
559 const struct fd_reloc *r)
561 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
562 struct msm_bo *msm_bo = to_msm_bo(r->bo);
563 struct drm_msm_gem_submit_reloc *reloc;
564 struct msm_cmd *cmd = current_cmd(ring);
565 uint32_t idx = APPEND(cmd, relocs);
568 reloc = &cmd->relocs[idx];
570 reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
571 reloc->reloc_offset = r->offset;
573 reloc->shift = r->shift;
574 reloc->submit_offset = offset_bytes(ring->cur, ring->start) +
575 to_msm_ringbuffer(ring)->offset;
577 addr = msm_bo->presumed;
578 if (reloc->shift < 0)
579 addr >>= -reloc->shift;
581 addr <<= reloc->shift;
582 (*ring->cur++) = addr | r->or;
584 if (ring->pipe->gpu_id >= 500) {
585 struct drm_msm_gem_submit_reloc *reloc_hi;
587 /* NOTE: grab reloc_idx *before* APPEND() since that could
588 * realloc() meaning that 'reloc' ptr is no longer valid:
590 uint32_t reloc_idx = reloc->reloc_idx;
592 idx = APPEND(cmd, relocs);
594 reloc_hi = &cmd->relocs[idx];
596 reloc_hi->reloc_idx = reloc_idx;
597 reloc_hi->reloc_offset = r->offset;
598 reloc_hi->or = r->orhi;
599 reloc_hi->shift = r->shift - 32;
600 reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start) +
601 to_msm_ringbuffer(ring)->offset;
603 addr = msm_bo->presumed >> 32;
604 if (reloc_hi->shift < 0)
605 addr >>= -reloc_hi->shift;
607 addr <<= reloc_hi->shift;
608 (*ring->cur++) = addr | r->orhi;
612 static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
613 struct fd_ringbuffer *target, uint32_t cmd_idx)
615 struct msm_cmd *cmd = NULL;
616 struct msm_ringbuffer *msm_target = to_msm_ringbuffer(target);
618 int added_cmd = FALSE;
620 uint32_t submit_offset = msm_target->offset;
622 LIST_FOR_EACH_ENTRY(cmd, &msm_target->cmd_list, list) {
628 assert(cmd && (idx == cmd_idx));
630 if (idx < (msm_target->cmd_count - 1)) {
631 /* All but the last cmd buffer is fully "baked" (ie. already has
632 * done get_cmd() to add it to the cmds table). But in this case,
633 * the size we get is invalid (since it is calculated from the
638 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
639 size = offset_bytes(target->cur, target->start);
640 added_cmd = get_cmd(parent, cmd, submit_offset, size,
641 MSM_SUBMIT_CMD_IB_TARGET_BUF);
644 msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
646 .flags = FD_RELOC_READ,
647 .offset = submit_offset,
650 /* Unlike traditional ringbuffers which are deleted as a set (after
651 * being flushed), mesa can't really guarantee that a stateobj isn't
652 * destroyed after emitted but before flush, so we must hold a ref:
654 if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
655 fd_ringbuffer_ref(target);
661 static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
663 return to_msm_ringbuffer(ring)->cmd_count;
666 static void msm_ringbuffer_destroy(struct fd_ringbuffer *ring)
668 struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
671 delete_cmds(msm_ring);
673 free(msm_ring->submit.cmds);
674 free(msm_ring->submit.bos);
676 free(msm_ring->cmds);
680 static const struct fd_ringbuffer_funcs funcs = {
681 .hostptr = msm_ringbuffer_hostptr,
682 .flush = msm_ringbuffer_flush,
683 .grow = msm_ringbuffer_grow,
684 .reset = msm_ringbuffer_reset,
685 .emit_reloc = msm_ringbuffer_emit_reloc,
686 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
687 .cmd_count = msm_ringbuffer_cmd_count,
688 .destroy = msm_ringbuffer_destroy,
691 drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
692 uint32_t size, enum fd_ringbuffer_flags flags)
694 struct msm_ringbuffer *msm_ring;
695 struct fd_ringbuffer *ring;
697 msm_ring = calloc(1, sizeof(*msm_ring));
699 ERROR_MSG("allocation failed");
704 assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
706 msm_ring->is_growable = TRUE;
709 list_inithead(&msm_ring->cmd_list);
710 msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
712 ring = &msm_ring->base;
713 atomic_set(&ring->refcnt, 1);
715 ring->funcs = &funcs;
717 ring->pipe = pipe; /* needed in ring_cmd_new() */
719 ring_cmd_new(ring, size, flags);