OSDN Git Service

35a7a7d44965b2d404810c9c0a68586ac845b66e
[android-x86/external-libdrm.git] / freedreno / msm / msm_ringbuffer.c
1 /* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */
2
3 /*
4  * Copyright (C) 2013 Rob Clark <robclark@freedesktop.org>
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  *
25  * Authors:
26  *    Rob Clark <robclark@freedesktop.org>
27  */
28
29 #include <assert.h>
30 #include <inttypes.h>
31
32 #include "freedreno_ringbuffer.h"
33 #include "msm_priv.h"
34
35 /* represents a single cmd buffer in the submit ioctl.  Each cmd buffer has
36  * a backing bo, and a reloc table.
37  */
38 struct msm_cmd {
39         struct list_head list;
40
41         struct fd_ringbuffer *ring;
42         struct fd_bo *ring_bo;
43
44         /* reloc's table: */
45         DECLARE_ARRAY(struct drm_msm_gem_submit_reloc, relocs);
46
47         uint32_t size;
48 };
49
50 struct msm_ringbuffer {
51         struct fd_ringbuffer base;
52
53         atomic_t refcnt;
54
55         /* submit ioctl related tables:
56          * Note that bos and cmds are tracked by the parent ringbuffer, since
57          * that is global to the submit ioctl call.  The reloc's table is tracked
58          * per cmd-buffer.
59          */
60         struct {
61                 /* bo's table: */
62                 DECLARE_ARRAY(struct drm_msm_gem_submit_bo, bos);
63
64                 /* cmd's table: */
65                 DECLARE_ARRAY(struct drm_msm_gem_submit_cmd, cmds);
66         } submit;
67
68         /* should have matching entries in submit.bos: */
69         /* Note, only in parent ringbuffer */
70         DECLARE_ARRAY(struct fd_bo *, bos);
71
72         /* should have matching entries in submit.cmds: */
73         DECLARE_ARRAY(struct msm_cmd *, cmds);
74
75         /* List of physical cmdstream buffers (msm_cmd) assocated with this
76          * logical fd_ringbuffer.
77          *
78          * Note that this is different from msm_ringbuffer::cmds (which
79          * shadows msm_ringbuffer::submit::cmds for tracking submit ioctl
80          * related stuff, and *only* is tracked in the parent ringbuffer.
81          * And only has "completed" cmd buffers (ie. we already know the
82          * size) added via get_cmd().
83          */
84         struct list_head cmd_list;
85
86         int is_growable;
87         unsigned cmd_count;
88
89         unsigned seqno;
90
91         /* maps fd_bo to idx: */
92         void *bo_table;
93 };
94
95 static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
96 {
97         return (struct msm_ringbuffer *)x;
98 }
99
100 static void msm_ringbuffer_unref(struct fd_ringbuffer *ring);
101 static void msm_ringbuffer_ref(struct fd_ringbuffer *ring);
102
103 #define INIT_SIZE 0x1000
104
105 static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
106 drm_private extern pthread_mutex_t table_lock;
107
108 static void ring_bo_del(struct fd_device *dev, struct fd_bo *bo)
109 {
110         int ret;
111
112         assert(atomic_read(&bo->refcnt) == 1);
113
114         pthread_mutex_lock(&table_lock);
115         ret = fd_bo_cache_free(&to_msm_device(dev)->ring_cache, bo);
116         pthread_mutex_unlock(&table_lock);
117
118         if (ret == 0)
119                 return;
120
121         fd_bo_del(bo);
122 }
123
124 static struct fd_bo * ring_bo_new(struct fd_device *dev, uint32_t size)
125 {
126         struct fd_bo *bo;
127
128         bo = fd_bo_cache_alloc(&to_msm_device(dev)->ring_cache, &size, 0);
129         if (bo)
130                 return bo;
131
132         bo = fd_bo_new(dev, size, 0);
133         if (!bo)
134                 return NULL;
135
136         /* keep ringbuffer bo's out of the normal bo cache: */
137         bo->bo_reuse = FALSE;
138
139         return bo;
140 }
141
142 static void ring_cmd_del(struct msm_cmd *cmd)
143 {
144         if (cmd->ring_bo)
145                 ring_bo_del(cmd->ring->pipe->dev, cmd->ring_bo);
146         list_del(&cmd->list);
147         to_msm_ringbuffer(cmd->ring)->cmd_count--;
148         free(cmd->relocs);
149         free(cmd);
150 }
151
152 static struct msm_cmd * ring_cmd_new(struct fd_ringbuffer *ring, uint32_t size)
153 {
154         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
155         struct msm_cmd *cmd = calloc(1, sizeof(*cmd));
156
157         if (!cmd)
158                 return NULL;
159
160         cmd->ring = ring;
161         cmd->ring_bo = ring_bo_new(ring->pipe->dev, size);
162         if (!cmd->ring_bo)
163                 goto fail;
164
165         list_addtail(&cmd->list, &msm_ring->cmd_list);
166         msm_ring->cmd_count++;
167
168         return cmd;
169
170 fail:
171         ring_cmd_del(cmd);
172         return NULL;
173 }
174
175 static struct msm_cmd *current_cmd(struct fd_ringbuffer *ring)
176 {
177         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
178         assert(!LIST_IS_EMPTY(&msm_ring->cmd_list));
179         return LIST_LAST_ENTRY(&msm_ring->cmd_list, struct msm_cmd, list);
180 }
181
182 static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
183 {
184         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
185         uint32_t idx;
186
187         idx = APPEND(&msm_ring->submit, bos);
188         idx = APPEND(msm_ring, bos);
189
190         msm_ring->submit.bos[idx].flags = 0;
191         msm_ring->submit.bos[idx].handle = bo->handle;
192         msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
193
194         msm_ring->bos[idx] = fd_bo_ref(bo);
195
196         return idx;
197 }
198
199 /* add (if needed) bo, return idx: */
200 static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
201 {
202         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
203         struct msm_bo *msm_bo = to_msm_bo(bo);
204         uint32_t idx;
205         pthread_mutex_lock(&idx_lock);
206         if (msm_bo->current_ring_seqno == msm_ring->seqno) {
207                 idx = msm_bo->idx;
208         } else {
209                 void *val;
210
211                 if (!msm_ring->bo_table)
212                         msm_ring->bo_table = drmHashCreate();
213
214                 if (!drmHashLookup(msm_ring->bo_table, bo->handle, &val)) {
215                         /* found */
216                         idx = (uint32_t)(uintptr_t)val;
217                 } else {
218                         idx = append_bo(ring, bo);
219                         val = (void *)(uintptr_t)idx;
220                         drmHashInsert(msm_ring->bo_table, bo->handle, val);
221                 }
222                 msm_bo->current_ring_seqno = msm_ring->seqno;
223                 msm_bo->idx = idx;
224         }
225         pthread_mutex_unlock(&idx_lock);
226         if (flags & FD_RELOC_READ)
227                 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
228         if (flags & FD_RELOC_WRITE)
229                 msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_WRITE;
230         return idx;
231 }
232
233 static int check_cmd_bo(struct fd_ringbuffer *ring,
234                 struct drm_msm_gem_submit_cmd *cmd, struct fd_bo *bo)
235 {
236         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
237         return msm_ring->submit.bos[cmd->submit_idx].handle == bo->handle;
238 }
239
240 /* Ensure that submit has corresponding entry in cmds table for the
241  * target cmdstream buffer:
242  *
243  * Returns TRUE if new cmd added (else FALSE if it was already in
244  * the cmds table)
245  */
246 static int get_cmd(struct fd_ringbuffer *ring, struct msm_cmd *target_cmd,
247                 uint32_t submit_offset, uint32_t size, uint32_t type)
248 {
249         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
250         struct drm_msm_gem_submit_cmd *cmd;
251         uint32_t i;
252
253         /* figure out if we already have a cmd buf: */
254         for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
255                 cmd = &msm_ring->submit.cmds[i];
256                 if ((cmd->submit_offset == submit_offset) &&
257                                 (cmd->size == size) &&
258                                 (cmd->type == type) &&
259                                 check_cmd_bo(ring, cmd, target_cmd->ring_bo))
260                         return FALSE;
261         }
262
263         /* create cmd buf if not: */
264         i = APPEND(&msm_ring->submit, cmds);
265         APPEND(msm_ring, cmds);
266         msm_ring->cmds[i] = target_cmd;
267         cmd = &msm_ring->submit.cmds[i];
268         cmd->type = type;
269         cmd->submit_idx = bo2idx(ring, target_cmd->ring_bo, FD_RELOC_READ);
270         cmd->submit_offset = submit_offset;
271         cmd->size = size;
272         cmd->pad = 0;
273
274         target_cmd->size = size;
275
276         return TRUE;
277 }
278
279 static void * msm_ringbuffer_hostptr(struct fd_ringbuffer *ring)
280 {
281         return fd_bo_map(current_cmd(ring)->ring_bo);
282 }
283
284 static uint32_t find_next_reloc_idx(struct msm_cmd *msm_cmd,
285                 uint32_t start, uint32_t offset)
286 {
287         uint32_t i;
288
289         /* a binary search would be more clever.. */
290         for (i = start; i < msm_cmd->nr_relocs; i++) {
291                 struct drm_msm_gem_submit_reloc *reloc = &msm_cmd->relocs[i];
292                 if (reloc->submit_offset >= offset)
293                         return i;
294         }
295
296         return i;
297 }
298
299 static void delete_cmds(struct msm_ringbuffer *msm_ring)
300 {
301         struct msm_cmd *cmd, *tmp;
302
303         LIST_FOR_EACH_ENTRY_SAFE(cmd, tmp, &msm_ring->cmd_list, list) {
304                 ring_cmd_del(cmd);
305         }
306 }
307
308 static void flush_reset(struct fd_ringbuffer *ring)
309 {
310         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
311         unsigned i;
312
313         for (i = 0; i < msm_ring->nr_bos; i++) {
314                 struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
315                 if (!msm_bo)
316                         continue;
317                 msm_bo->current_ring_seqno = 0;
318                 fd_bo_del(&msm_bo->base);
319         }
320
321         /* for each of the cmd buffers, clear their reloc's: */
322         for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
323                 struct msm_cmd *target_cmd = msm_ring->cmds[i];
324                 if (!target_cmd)
325                         continue;
326                 target_cmd->nr_relocs = 0;
327         }
328
329         msm_ring->submit.nr_cmds = 0;
330         msm_ring->submit.nr_bos = 0;
331         msm_ring->nr_cmds = 0;
332         msm_ring->nr_bos = 0;
333
334         if (msm_ring->bo_table) {
335                 drmHashDestroy(msm_ring->bo_table);
336                 msm_ring->bo_table = NULL;
337         }
338
339         if (msm_ring->is_growable) {
340                 delete_cmds(msm_ring);
341         } else {
342                 /* in old mode, just reset the # of relocs: */
343                 current_cmd(ring)->nr_relocs = 0;
344         }
345 }
346
347 static void finalize_current_cmd(struct fd_ringbuffer *ring, uint32_t *last_start)
348 {
349         uint32_t submit_offset, size, type;
350         struct fd_ringbuffer *parent;
351
352         if (ring->parent) {
353                 parent = ring->parent;
354                 type = MSM_SUBMIT_CMD_IB_TARGET_BUF;
355         } else {
356                 parent = ring;
357                 type = MSM_SUBMIT_CMD_BUF;
358         }
359
360         submit_offset = offset_bytes(last_start, ring->start);
361         size = offset_bytes(ring->cur, last_start);
362
363         get_cmd(parent, current_cmd(ring), submit_offset, size, type);
364 }
365
366 static void dump_submit(struct msm_ringbuffer *msm_ring)
367 {
368         uint32_t i, j;
369
370         for (i = 0; i < msm_ring->submit.nr_bos; i++) {
371                 struct drm_msm_gem_submit_bo *bo = &msm_ring->submit.bos[i];
372                 ERROR_MSG("  bos[%d]: handle=%u, flags=%x", i, bo->handle, bo->flags);
373         }
374         for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
375                 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
376                 struct drm_msm_gem_submit_reloc *relocs = U642VOID(cmd->relocs);
377                 ERROR_MSG("  cmd[%d]: type=%u, submit_idx=%u, submit_offset=%u, size=%u",
378                                 i, cmd->type, cmd->submit_idx, cmd->submit_offset, cmd->size);
379                 for (j = 0; j < cmd->nr_relocs; j++) {
380                         struct drm_msm_gem_submit_reloc *r = &relocs[j];
381                         ERROR_MSG("    reloc[%d]: submit_offset=%u, or=%08x, shift=%d, reloc_idx=%u"
382                                         ", reloc_offset=%"PRIu64, j, r->submit_offset, r->or, r->shift,
383                                         r->reloc_idx, r->reloc_offset);
384                 }
385         }
386 }
387
388 static struct drm_msm_gem_submit_reloc *
389 handle_stateobj_relocs(struct fd_ringbuffer *parent, struct fd_ringbuffer *stateobj,
390                 struct drm_msm_gem_submit_reloc *orig_relocs, unsigned nr_relocs)
391 {
392         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(stateobj);
393         struct drm_msm_gem_submit_reloc *relocs = malloc(nr_relocs * sizeof(*relocs));
394         unsigned i;
395
396         for (i = 0; i < nr_relocs; i++) {
397                 unsigned idx = orig_relocs[i].reloc_idx;
398                 struct fd_bo *bo = msm_ring->bos[idx];
399                 unsigned flags = 0;
400
401                 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_READ)
402                         flags |= FD_RELOC_READ;
403                 if (msm_ring->submit.bos[idx].flags & MSM_SUBMIT_BO_WRITE)
404                         flags |= FD_RELOC_WRITE;
405
406                 relocs[i] = orig_relocs[i];
407                 relocs[i].reloc_idx = bo2idx(parent, bo, flags);
408         }
409
410         return relocs;
411 }
412
413 static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start,
414                 int in_fence_fd, int *out_fence_fd)
415 {
416         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
417         struct drm_msm_gem_submit req = {
418                         .flags = to_msm_pipe(ring->pipe)->pipe,
419                         .queueid = to_msm_pipe(ring->pipe)->queue_id,
420         };
421         uint32_t i;
422         int ret;
423
424         assert(!ring->parent);
425
426         if (in_fence_fd != -1) {
427                 req.flags |= MSM_SUBMIT_FENCE_FD_IN | MSM_SUBMIT_NO_IMPLICIT;
428                 req.fence_fd = in_fence_fd;
429         }
430
431         if (out_fence_fd) {
432                 req.flags |= MSM_SUBMIT_FENCE_FD_OUT;
433         }
434
435         finalize_current_cmd(ring, last_start);
436
437         /* for each of the cmd's fix up their reloc's: */
438         for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
439                 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
440                 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
441                 uint32_t a = find_next_reloc_idx(msm_cmd, 0, cmd->submit_offset);
442                 uint32_t b = find_next_reloc_idx(msm_cmd, a, cmd->submit_offset + cmd->size);
443                 struct drm_msm_gem_submit_reloc *relocs = &msm_cmd->relocs[a];
444                 unsigned nr_relocs = (b > a) ? b - a : 0;
445
446                 /* for reusable stateobjs, the reloc table has reloc_idx that
447                  * points into it's own private bos table, rather than the global
448                  * bos table used for the submit, so we need to add the stateobj's
449                  * bos to the global table and construct new relocs table with
450                  * corresponding reloc_idx
451                  */
452                 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
453                         relocs = handle_stateobj_relocs(ring, msm_cmd->ring,
454                                         relocs, nr_relocs);
455                 }
456
457                 cmd->relocs = VOID2U64(relocs);
458                 cmd->nr_relocs = nr_relocs;
459         }
460
461         /* needs to be after get_cmd() as that could create bos/cmds table: */
462         req.bos = VOID2U64(msm_ring->submit.bos),
463         req.nr_bos = msm_ring->submit.nr_bos;
464         req.cmds = VOID2U64(msm_ring->submit.cmds),
465         req.nr_cmds = msm_ring->submit.nr_cmds;
466
467         DEBUG_MSG("nr_cmds=%u, nr_bos=%u", req.nr_cmds, req.nr_bos);
468
469         ret = drmCommandWriteRead(ring->pipe->dev->fd, DRM_MSM_GEM_SUBMIT,
470                         &req, sizeof(req));
471         if (ret) {
472                 ERROR_MSG("submit failed: %d (%s)", ret, strerror(errno));
473                 dump_submit(msm_ring);
474         } else if (!ret) {
475                 /* update timestamp on all rings associated with submit: */
476                 for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
477                         struct msm_cmd *msm_cmd = msm_ring->cmds[i];
478                         msm_cmd->ring->last_timestamp = req.fence;
479                 }
480
481                 if (out_fence_fd) {
482                         *out_fence_fd = req.fence_fd;
483                 }
484         }
485
486         /* free dynamically constructed stateobj relocs tables: */
487         for (i = 0; i < msm_ring->submit.nr_cmds; i++) {
488                 struct drm_msm_gem_submit_cmd *cmd = &msm_ring->submit.cmds[i];
489                 struct msm_cmd *msm_cmd = msm_ring->cmds[i];
490                 if (msm_cmd->ring->flags & FD_RINGBUFFER_OBJECT) {
491                         /* we could have dropped last reference: */
492                         msm_ring->cmds[i] = NULL;
493
494                         /* need to drop ring_bo ref prior to unref'ing the ring,
495                          * because ring_bo_del assumes it is dropping the *last*
496                          * reference:
497                          */
498                         fd_bo_del(msm_ring->bos[cmd->submit_idx]);
499                         msm_ring->bos[cmd->submit_idx] = NULL;
500
501                         msm_ringbuffer_unref(msm_cmd->ring);
502                         free(U642VOID(cmd->relocs));
503                 }
504         }
505
506         flush_reset(ring);
507
508         return ret;
509 }
510
511 static void msm_ringbuffer_grow(struct fd_ringbuffer *ring, uint32_t size)
512 {
513         assert(to_msm_ringbuffer(ring)->is_growable);
514         finalize_current_cmd(ring, ring->last_start);
515         ring_cmd_new(ring, size);
516 }
517
518 static void msm_ringbuffer_reset(struct fd_ringbuffer *ring)
519 {
520         flush_reset(ring);
521 }
522
523 static void msm_ringbuffer_emit_reloc(struct fd_ringbuffer *ring,
524                 const struct fd_reloc *r)
525 {
526         struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
527         struct msm_bo *msm_bo = to_msm_bo(r->bo);
528         struct drm_msm_gem_submit_reloc *reloc;
529         struct msm_cmd *cmd = current_cmd(ring);
530         uint32_t idx = APPEND(cmd, relocs);
531         uint32_t addr;
532
533         reloc = &cmd->relocs[idx];
534
535         reloc->reloc_idx = bo2idx(parent, r->bo, r->flags);
536         reloc->reloc_offset = r->offset;
537         reloc->or = r->or;
538         reloc->shift = r->shift;
539         reloc->submit_offset = offset_bytes(ring->cur, ring->start);
540
541         addr = msm_bo->presumed;
542         if (reloc->shift < 0)
543                 addr >>= -reloc->shift;
544         else
545                 addr <<= reloc->shift;
546         (*ring->cur++) = addr | r->or;
547
548         if (ring->pipe->gpu_id >= 500) {
549                 struct drm_msm_gem_submit_reloc *reloc_hi;
550
551                 /* NOTE: grab reloc_idx *before* APPEND() since that could
552                  * realloc() meaning that 'reloc' ptr is no longer valid:
553                  */
554                 uint32_t reloc_idx = reloc->reloc_idx;
555
556                 idx = APPEND(cmd, relocs);
557
558                 reloc_hi = &cmd->relocs[idx];
559
560                 reloc_hi->reloc_idx = reloc_idx;
561                 reloc_hi->reloc_offset = r->offset;
562                 reloc_hi->or = r->orhi;
563                 reloc_hi->shift = r->shift - 32;
564                 reloc_hi->submit_offset = offset_bytes(ring->cur, ring->start);
565
566                 addr = msm_bo->presumed >> 32;
567                 if (reloc_hi->shift < 0)
568                         addr >>= -reloc_hi->shift;
569                 else
570                         addr <<= reloc_hi->shift;
571                 (*ring->cur++) = addr | r->orhi;
572         }
573 }
574
575 static uint32_t msm_ringbuffer_emit_reloc_ring(struct fd_ringbuffer *ring,
576                 struct fd_ringbuffer *target, uint32_t cmd_idx,
577                 uint32_t submit_offset, uint32_t size)
578 {
579         struct msm_cmd *cmd = NULL;
580         uint32_t idx = 0;
581         int added_cmd = FALSE;
582
583         LIST_FOR_EACH_ENTRY(cmd, &to_msm_ringbuffer(target)->cmd_list, list) {
584                 if (idx == cmd_idx)
585                         break;
586                 idx++;
587         }
588
589         assert(cmd && (idx == cmd_idx));
590
591         if (idx < (to_msm_ringbuffer(target)->cmd_count - 1)) {
592                 /* All but the last cmd buffer is fully "baked" (ie. already has
593                  * done get_cmd() to add it to the cmds table).  But in this case,
594                  * the size we get is invalid (since it is calculated from the
595                  * last cmd buffer):
596                  */
597                 size = cmd->size;
598         } else {
599                 struct fd_ringbuffer *parent = ring->parent ? ring->parent : ring;
600                 added_cmd = get_cmd(parent, cmd, submit_offset, size,
601                                 MSM_SUBMIT_CMD_IB_TARGET_BUF);
602         }
603
604         msm_ringbuffer_emit_reloc(ring, &(struct fd_reloc){
605                 .bo = cmd->ring_bo,
606                 .flags = FD_RELOC_READ,
607                 .offset = submit_offset,
608         });
609
610         /* Unlike traditional ringbuffers which are deleted as a set (after
611          * being flushed), mesa can't really guarantee that a stateobj isn't
612          * destroyed after emitted but before flush, so we must hold a ref:
613          */
614         if (added_cmd && (target->flags & FD_RINGBUFFER_OBJECT)) {
615                 msm_ringbuffer_ref(target);
616         }
617
618         return size;
619 }
620
621 static uint32_t msm_ringbuffer_cmd_count(struct fd_ringbuffer *ring)
622 {
623         return to_msm_ringbuffer(ring)->cmd_count;
624 }
625
626 static void msm_ringbuffer_unref(struct fd_ringbuffer *ring)
627 {
628         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
629
630         if (!atomic_dec_and_test(&msm_ring->refcnt))
631                 return;
632
633         flush_reset(ring);
634         delete_cmds(msm_ring);
635
636         free(msm_ring->submit.cmds);
637         free(msm_ring->submit.bos);
638         free(msm_ring->bos);
639         free(msm_ring->cmds);
640         free(msm_ring);
641 }
642
643 static void msm_ringbuffer_ref(struct fd_ringbuffer *ring)
644 {
645         struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
646         atomic_inc(&msm_ring->refcnt);
647 }
648
649 static const struct fd_ringbuffer_funcs funcs = {
650                 .hostptr = msm_ringbuffer_hostptr,
651                 .flush = msm_ringbuffer_flush,
652                 .grow = msm_ringbuffer_grow,
653                 .reset = msm_ringbuffer_reset,
654                 .emit_reloc = msm_ringbuffer_emit_reloc,
655                 .emit_reloc_ring = msm_ringbuffer_emit_reloc_ring,
656                 .cmd_count = msm_ringbuffer_cmd_count,
657                 .destroy = msm_ringbuffer_unref,
658 };
659
660 drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
661                 uint32_t size, enum fd_ringbuffer_flags flags)
662 {
663         struct msm_ringbuffer *msm_ring;
664         struct fd_ringbuffer *ring;
665
666         msm_ring = calloc(1, sizeof(*msm_ring));
667         if (!msm_ring) {
668                 ERROR_MSG("allocation failed");
669                 return NULL;
670         }
671
672         if (size == 0) {
673                 assert(pipe->dev->version >= FD_VERSION_UNLIMITED_CMDS);
674                 size = INIT_SIZE;
675                 msm_ring->is_growable = TRUE;
676         }
677
678         list_inithead(&msm_ring->cmd_list);
679         msm_ring->seqno = ++to_msm_device(pipe->dev)->ring_cnt;
680         atomic_set(&msm_ring->refcnt, 1);
681
682         ring = &msm_ring->base;
683         ring->funcs = &funcs;
684         ring->size = size;
685         ring->pipe = pipe;   /* needed in ring_cmd_new() */
686
687         ring_cmd_new(ring, size);
688
689         return ring;
690 }