2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
34 #include <xf86atomic.h>
35 #include "libdrm_lists.h"
36 #include "nouveau_drm.h"
41 struct nouveau_pushbuf_krec {
42 struct nouveau_pushbuf_krec *next;
43 struct drm_nouveau_gem_pushbuf_bo buffer[NOUVEAU_GEM_MAX_BUFFERS];
44 struct drm_nouveau_gem_pushbuf_reloc reloc[NOUVEAU_GEM_MAX_RELOCS];
45 struct drm_nouveau_gem_pushbuf_push push[NOUVEAU_GEM_MAX_PUSH];
53 struct nouveau_pushbuf_priv {
54 struct nouveau_pushbuf base;
55 struct nouveau_pushbuf_krec *list;
56 struct nouveau_pushbuf_krec *krec;
57 struct nouveau_list bctx_list;
58 struct nouveau_bo *bo;
66 struct nouveau_bo *bos[];
69 static inline struct nouveau_pushbuf_priv *
70 nouveau_pushbuf(struct nouveau_pushbuf *push)
72 return (struct nouveau_pushbuf_priv *)push;
75 static int pushbuf_validate(struct nouveau_pushbuf *, bool);
76 static int pushbuf_flush(struct nouveau_pushbuf *);
79 pushbuf_kref_fits(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
82 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
83 struct nouveau_pushbuf_krec *krec = nvpb->krec;
84 struct nouveau_device *dev = push->client->device;
85 struct nouveau_bo *kbo;
86 struct drm_nouveau_gem_pushbuf_bo *kref;
89 /* VRAM is the only valid domain. GART and VRAM|GART buffers
90 * are all accounted to GART, so if this doesn't fit in VRAM
91 * straight up, a flush is needed.
93 if (*domains == NOUVEAU_GEM_DOMAIN_VRAM) {
94 if (krec->vram_used + bo->size > dev->vram_limit)
96 krec->vram_used += bo->size;
100 /* GART or VRAM|GART buffer. Account both of these buffer types
101 * to GART only for the moment, which simplifies things. If the
102 * buffer can fit already, we're done here.
104 if (krec->gart_used + bo->size <= dev->gart_limit) {
105 krec->gart_used += bo->size;
109 /* Ran out of GART space, if it's a VRAM|GART buffer and it'll
110 * fit into available VRAM, turn it into a VRAM buffer
112 if ((*domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
113 krec->vram_used + bo->size <= dev->vram_limit) {
114 *domains &= NOUVEAU_GEM_DOMAIN_VRAM;
115 krec->vram_used += bo->size;
119 /* Still couldn't fit the buffer in anywhere, so as a last resort;
120 * scan the buffer list for VRAM|GART buffers and turn them into
121 * VRAM buffers until we have enough space in GART for this one
124 for (i = 0; i < krec->nr_buffer; i++, kref++) {
125 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART))
128 kbo = (void *)(unsigned long)kref->user_priv;
129 if (!(kref->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) ||
130 krec->vram_used + kbo->size > dev->vram_limit)
133 kref->valid_domains &= NOUVEAU_GEM_DOMAIN_VRAM;
134 krec->gart_used -= kbo->size;
135 krec->vram_used += kbo->size;
136 if (krec->gart_used + bo->size <= dev->gart_limit) {
137 krec->gart_used += bo->size;
142 /* Couldn't resolve a placement, need to force a flush */
146 static struct drm_nouveau_gem_pushbuf_bo *
147 pushbuf_kref(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
150 struct nouveau_device *dev = push->client->device;
151 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
152 struct nouveau_pushbuf_krec *krec = nvpb->krec;
153 struct nouveau_pushbuf *fpush;
154 struct drm_nouveau_gem_pushbuf_bo *kref;
155 uint32_t domains, domains_wr, domains_rd;
158 if (flags & NOUVEAU_BO_VRAM)
159 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
160 if (flags & NOUVEAU_BO_GART)
161 domains |= NOUVEAU_GEM_DOMAIN_GART;
162 domains_wr = domains * !!(flags & NOUVEAU_BO_WR);
163 domains_rd = domains * !!(flags & NOUVEAU_BO_RD);
165 /* if buffer is referenced on another pushbuf that is owned by the
166 * same client, we need to flush the other pushbuf first to ensure
167 * the correct ordering of commands
169 fpush = cli_push_get(push->client, bo);
170 if (fpush && fpush != push)
171 pushbuf_flush(fpush);
173 kref = cli_kref_get(push->client, bo);
175 /* possible conflict in memory types - flush and retry */
176 if (!(kref->valid_domains & domains))
179 /* VRAM|GART buffer turning into a VRAM buffer. Make sure
180 * it'll fit in VRAM and force a flush if not.
182 if ((kref->valid_domains & NOUVEAU_GEM_DOMAIN_GART) &&
183 ( domains == NOUVEAU_GEM_DOMAIN_VRAM)) {
184 if (krec->vram_used + bo->size > dev->vram_limit)
186 krec->vram_used += bo->size;
187 krec->gart_used -= bo->size;
190 kref->valid_domains &= domains;
191 kref->write_domains |= domains_wr;
192 kref->read_domains |= domains_rd;
194 if (krec->nr_buffer == NOUVEAU_GEM_MAX_BUFFERS ||
195 !pushbuf_kref_fits(push, bo, &domains))
198 kref = &krec->buffer[krec->nr_buffer++];
199 kref->user_priv = (unsigned long)bo;
200 kref->handle = bo->handle;
201 kref->valid_domains = domains;
202 kref->write_domains = domains_wr;
203 kref->read_domains = domains_rd;
204 kref->presumed.valid = 1;
205 kref->presumed.offset = bo->offset;
206 if (bo->flags & NOUVEAU_BO_VRAM)
207 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
209 kref->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
211 cli_kref_set(push->client, bo, kref, push);
212 atomic_inc(&nouveau_bo(bo)->refcnt);
219 pushbuf_krel(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
220 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
222 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
223 struct nouveau_pushbuf_krec *krec = nvpb->krec;
224 struct drm_nouveau_gem_pushbuf_reloc *krel;
225 struct drm_nouveau_gem_pushbuf_bo *pkref;
226 struct drm_nouveau_gem_pushbuf_bo *bkref;
227 uint32_t reloc = data;
229 pkref = cli_kref_get(push->client, nvpb->bo);
230 bkref = cli_kref_get(push->client, bo);
231 krel = &krec->reloc[krec->nr_reloc++];
235 krel->reloc_bo_index = pkref - krec->buffer;
236 krel->reloc_bo_offset = (push->cur - nvpb->ptr) * 4;
237 krel->bo_index = bkref - krec->buffer;
243 if (flags & NOUVEAU_BO_LOW) {
244 reloc = (bkref->presumed.offset + data);
245 krel->flags |= NOUVEAU_GEM_RELOC_LOW;
247 if (flags & NOUVEAU_BO_HIGH) {
248 reloc = (bkref->presumed.offset + data) >> 32;
249 krel->flags |= NOUVEAU_GEM_RELOC_HIGH;
251 if (flags & NOUVEAU_BO_OR) {
252 if (bkref->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM)
256 krel->flags |= NOUVEAU_GEM_RELOC_OR;
263 pushbuf_dump(struct nouveau_pushbuf_krec *krec, int krec_id, int chid)
265 struct drm_nouveau_gem_pushbuf_reloc *krel;
266 struct drm_nouveau_gem_pushbuf_push *kpsh;
267 struct drm_nouveau_gem_pushbuf_bo *kref;
268 struct nouveau_bo *bo;
272 err("ch%d: krec %d pushes %d bufs %d relocs %d\n", chid,
273 krec_id, krec->nr_push, krec->nr_buffer, krec->nr_reloc);
276 for (i = 0; i < krec->nr_buffer; i++, kref++) {
277 err("ch%d: buf %08x %08x %08x %08x %08x\n", chid, i,
278 kref->handle, kref->valid_domains,
279 kref->read_domains, kref->write_domains);
283 for (i = 0; i < krec->nr_reloc; i++, krel++) {
284 err("ch%d: rel %08x %08x %08x %08x %08x %08x %08x\n",
285 chid, krel->reloc_bo_index, krel->reloc_bo_offset,
286 krel->bo_index, krel->flags, krel->data,
287 krel->vor, krel->tor);
291 for (i = 0; i < krec->nr_push; i++, kpsh++) {
292 kref = krec->buffer + kpsh->bo_index;
293 bo = (void *)(unsigned long)kref->user_priv;
294 bgn = (uint32_t *)((char *)bo->map + kpsh->offset);
295 end = bgn + (kpsh->length /4);
297 err("ch%d: psh %08x %010llx %010llx\n", chid, kpsh->bo_index,
298 (unsigned long long)kpsh->offset,
299 (unsigned long long)(kpsh->offset + kpsh->length));
301 err("\t0x%08x\n", *bgn++);
306 pushbuf_submit(struct nouveau_pushbuf *push, struct nouveau_object *chan)
308 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
309 struct nouveau_pushbuf_krec *krec = nvpb->list;
310 struct nouveau_device *dev = push->client->device;
311 struct nouveau_drm *drm = nouveau_drm(&dev->object);
312 struct drm_nouveau_gem_pushbuf_bo_presumed *info;
313 struct drm_nouveau_gem_pushbuf_bo *kref;
314 struct drm_nouveau_gem_pushbuf req;
315 struct nouveau_fifo *fifo = chan->data;
316 struct nouveau_bo *bo;
320 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
323 if (push->kick_notify)
324 push->kick_notify(push);
326 nouveau_pushbuf_data(push, NULL, 0, 0);
328 while (krec && krec->nr_push) {
329 req.channel = fifo->channel;
330 req.nr_buffers = krec->nr_buffer;
331 req.buffers = (uint64_t)(unsigned long)krec->buffer;
332 req.nr_relocs = krec->nr_reloc;
333 req.nr_push = krec->nr_push;
334 req.relocs = (uint64_t)(unsigned long)krec->reloc;
335 req.push = (uint64_t)(unsigned long)krec->push;
336 req.suffix0 = nvpb->suffix0;
337 req.suffix1 = nvpb->suffix1;
338 req.vram_available = 0; /* for valgrind */
339 req.gart_available = 0;
342 pushbuf_dump(krec, krec_id++, fifo->channel);
345 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
347 nvpb->suffix0 = req.suffix0;
348 nvpb->suffix1 = req.suffix1;
349 dev->vram_limit = (req.vram_available *
350 nouveau_device(dev)->vram_limit_percent) / 100;
351 dev->gart_limit = (req.gart_available *
352 nouveau_device(dev)->gart_limit_percent) / 100;
359 err("kernel rejected pushbuf: %s\n", strerror(-ret));
360 pushbuf_dump(krec, krec_id++, fifo->channel);
365 for (i = 0; i < krec->nr_buffer; i++, kref++) {
366 bo = (void *)(unsigned long)kref->user_priv;
368 info = &kref->presumed;
370 bo->flags &= ~NOUVEAU_BO_APER;
371 if (info->domain == NOUVEAU_GEM_DOMAIN_VRAM)
372 bo->flags |= NOUVEAU_BO_VRAM;
374 bo->flags |= NOUVEAU_BO_GART;
375 bo->offset = info->offset;
378 if (kref->write_domains)
379 nouveau_bo(bo)->access |= NOUVEAU_BO_WR;
380 if (kref->read_domains)
381 nouveau_bo(bo)->access |= NOUVEAU_BO_RD;
391 pushbuf_flush(struct nouveau_pushbuf *push)
393 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
394 struct nouveau_pushbuf_krec *krec = nvpb->krec;
395 struct drm_nouveau_gem_pushbuf_bo *kref;
396 struct nouveau_bufctx *bctx, *btmp;
397 struct nouveau_bo *bo;
401 ret = pushbuf_submit(push, push->channel);
403 nouveau_pushbuf_data(push, NULL, 0, 0);
404 krec->next = malloc(sizeof(*krec));
405 nvpb->krec = krec->next;
409 for (i = 0; i < krec->nr_buffer; i++, kref++) {
410 bo = (void *)(unsigned long)kref->user_priv;
411 cli_kref_set(push->client, bo, NULL, NULL);
413 nouveau_bo_ref(NULL, &bo);
423 DRMLISTFOREACHENTRYSAFE(bctx, btmp, &nvpb->bctx_list, head) {
424 DRMLISTJOIN(&bctx->current, &bctx->pending);
425 DRMINITLISTHEAD(&bctx->current);
426 DRMLISTDELINIT(&bctx->head);
433 pushbuf_refn_fail(struct nouveau_pushbuf *push, int sref, int srel)
435 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
436 struct nouveau_pushbuf_krec *krec = nvpb->krec;
437 struct drm_nouveau_gem_pushbuf_bo *kref;
439 kref = krec->buffer + sref;
440 while (krec->nr_buffer-- > sref) {
441 struct nouveau_bo *bo = (void *)(unsigned long)kref->user_priv;
442 cli_kref_set(push->client, bo, NULL, NULL);
443 nouveau_bo_ref(NULL, &bo);
446 krec->nr_buffer = sref;
447 krec->nr_reloc = srel;
451 pushbuf_refn(struct nouveau_pushbuf *push, bool retry,
452 struct nouveau_pushbuf_refn *refs, int nr)
454 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
455 struct nouveau_pushbuf_krec *krec = nvpb->krec;
456 struct drm_nouveau_gem_pushbuf_bo *kref;
457 int sref = krec->nr_buffer;
460 for (i = 0; i < nr; i++) {
461 kref = pushbuf_kref(push, refs[i].bo, refs[i].flags);
469 pushbuf_refn_fail(push, sref, krec->nr_reloc);
472 nouveau_pushbuf_space(push, 0, 0, 0);
473 return pushbuf_refn(push, false, refs, nr);
481 pushbuf_validate(struct nouveau_pushbuf *push, bool retry)
483 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
484 struct nouveau_pushbuf_krec *krec = nvpb->krec;
485 struct drm_nouveau_gem_pushbuf_bo *kref;
486 struct nouveau_bufctx *bctx = push->bufctx;
487 struct nouveau_bufref *bref;
488 int relocs = bctx ? bctx->relocs * 2: 0;
491 ret = nouveau_pushbuf_space(push, relocs, relocs, 0);
492 if (ret || bctx == NULL)
495 sref = krec->nr_buffer;
496 srel = krec->nr_reloc;
498 DRMLISTDEL(&bctx->head);
499 DRMLISTADD(&bctx->head, &nvpb->bctx_list);
501 DRMLISTFOREACHENTRY(bref, &bctx->pending, thead) {
502 kref = pushbuf_kref(push, bref->bo, bref->flags);
509 pushbuf_krel(push, bref->bo, bref->packet, 0, 0, 0);
511 pushbuf_krel(push, bref->bo, bref->data, bref->flags,
512 bref->vor, bref->tor);
517 DRMLISTJOIN(&bctx->pending, &bctx->current);
518 DRMINITLISTHEAD(&bctx->pending);
521 pushbuf_refn_fail(push, sref, srel);
524 return pushbuf_validate(push, false);
532 nouveau_pushbuf_new(struct nouveau_client *client, struct nouveau_object *chan,
533 int nr, uint32_t size, bool immediate,
534 struct nouveau_pushbuf **ppush)
536 struct nouveau_drm *drm = nouveau_drm(&client->device->object);
537 struct nouveau_fifo *fifo = chan->data;
538 struct nouveau_pushbuf_priv *nvpb;
539 struct nouveau_pushbuf *push;
540 struct drm_nouveau_gem_pushbuf req = {};
543 if (chan->oclass != NOUVEAU_FIFO_CHANNEL_CLASS)
546 /* nop pushbuf call, to get the current "return to main" sequence
547 * we need to append to the pushbuf on early chipsets
549 req.channel = fifo->channel;
551 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_PUSHBUF,
556 nvpb = calloc(1, sizeof(*nvpb) + nr * sizeof(*nvpb->bos));
561 nvpb->suffix0 = req.suffix0;
562 nvpb->suffix1 = req.suffix1;
564 nvpb->suffix0 = 0xffffffff;
565 nvpb->suffix1 = 0xffffffff;
567 nvpb->krec = calloc(1, sizeof(*nvpb->krec));
568 nvpb->list = nvpb->krec;
575 push->client = client;
576 push->channel = immediate ? chan : NULL;
577 push->flags = NOUVEAU_BO_RD;
578 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_GART) {
579 push->flags |= NOUVEAU_BO_GART;
580 nvpb->type = NOUVEAU_BO_GART;
582 if (fifo->pushbuf & NOUVEAU_GEM_DOMAIN_VRAM) {
583 push->flags |= NOUVEAU_BO_VRAM;
584 nvpb->type = NOUVEAU_BO_VRAM;
586 nvpb->type |= NOUVEAU_BO_MAP;
588 for (nvpb->bo_nr = 0; nvpb->bo_nr < nr; nvpb->bo_nr++) {
589 ret = nouveau_bo_new(client->device, nvpb->type, 0, size,
590 NULL, &nvpb->bos[nvpb->bo_nr]);
592 nouveau_pushbuf_del(&push);
597 DRMINITLISTHEAD(&nvpb->bctx_list);
603 nouveau_pushbuf_del(struct nouveau_pushbuf **ppush)
605 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(*ppush);
607 struct drm_nouveau_gem_pushbuf_bo *kref;
608 struct nouveau_pushbuf_krec *krec;
609 while ((krec = nvpb->list)) {
611 while (krec->nr_buffer--) {
612 unsigned long priv = kref++->user_priv;
613 struct nouveau_bo *bo = (void *)priv;
614 cli_kref_set(nvpb->base.client, bo, NULL, NULL);
615 nouveau_bo_ref(NULL, &bo);
617 nvpb->list = krec->next;
620 while (nvpb->bo_nr--)
621 nouveau_bo_ref(NULL, &nvpb->bos[nvpb->bo_nr]);
622 nouveau_bo_ref(NULL, &nvpb->bo);
628 struct nouveau_bufctx *
629 nouveau_pushbuf_bufctx(struct nouveau_pushbuf *push, struct nouveau_bufctx *ctx)
631 struct nouveau_bufctx *prev = push->bufctx;
637 nouveau_pushbuf_space(struct nouveau_pushbuf *push,
638 uint32_t dwords, uint32_t relocs, uint32_t pushes)
640 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
641 struct nouveau_pushbuf_krec *krec = nvpb->krec;
642 struct nouveau_client *client = push->client;
643 struct nouveau_bo *bo = NULL;
644 bool flushed = false;
647 /* switch to next buffer if insufficient space in the current one */
648 if (push->cur + dwords >= push->end) {
649 if (nvpb->bo_next < nvpb->bo_nr) {
650 nouveau_bo_ref(nvpb->bos[nvpb->bo_next++], &bo);
651 if (nvpb->bo_next == nvpb->bo_nr && push->channel)
654 ret = nouveau_bo_new(client->device, nvpb->type, 0,
655 nvpb->bos[0]->size, NULL, &bo);
661 /* make sure there's always enough space to queue up the pending
662 * data in the pushbuf proper
666 /* need to flush if we've run out of space on an immediate pushbuf,
667 * if the new buffer won't fit, or if the kernel push/reloc limits
670 if ((bo && ( push->channel ||
671 !pushbuf_kref(push, bo, push->flags))) ||
672 krec->nr_reloc + relocs >= NOUVEAU_GEM_MAX_RELOCS ||
673 krec->nr_push + pushes >= NOUVEAU_GEM_MAX_PUSH) {
674 if (nvpb->bo && krec->nr_buffer)
679 /* if necessary, switch to new buffer */
681 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR, push->client);
685 nouveau_pushbuf_data(push, NULL, 0, 0);
686 nouveau_bo_ref(bo, &nvpb->bo);
687 nouveau_bo_ref(NULL, &bo);
689 nvpb->bgn = nvpb->bo->map;
690 nvpb->ptr = nvpb->bgn;
691 push->cur = nvpb->bgn;
692 push->end = push->cur + (nvpb->bo->size / 4);
693 push->end -= 2 + push->rsvd_kick; /* space for suffix */
696 pushbuf_kref(push, nvpb->bo, push->flags);
697 return flushed ? pushbuf_validate(push, false) : 0;
701 nouveau_pushbuf_data(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
702 uint64_t offset, uint64_t length)
704 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(push);
705 struct nouveau_pushbuf_krec *krec = nvpb->krec;
706 struct drm_nouveau_gem_pushbuf_push *kpsh;
707 struct drm_nouveau_gem_pushbuf_bo *kref;
709 if (bo != nvpb->bo && nvpb->bgn != push->cur) {
710 if (nvpb->suffix0 || nvpb->suffix1) {
711 *push->cur++ = nvpb->suffix0;
712 *push->cur++ = nvpb->suffix1;
715 nouveau_pushbuf_data(push, nvpb->bo,
716 (nvpb->bgn - nvpb->ptr) * 4,
717 (push->cur - nvpb->bgn) * 4);
718 nvpb->bgn = push->cur;
722 kref = cli_kref_get(push->client, bo);
724 kpsh = &krec->push[krec->nr_push++];
725 kpsh->bo_index = kref - krec->buffer;
726 kpsh->offset = offset;
727 kpsh->length = length;
732 nouveau_pushbuf_refn(struct nouveau_pushbuf *push,
733 struct nouveau_pushbuf_refn *refs, int nr)
735 return pushbuf_refn(push, true, refs, nr);
739 nouveau_pushbuf_reloc(struct nouveau_pushbuf *push, struct nouveau_bo *bo,
740 uint32_t data, uint32_t flags, uint32_t vor, uint32_t tor)
742 *push->cur = pushbuf_krel(push, bo, data, flags, vor, tor);
747 nouveau_pushbuf_validate(struct nouveau_pushbuf *push)
749 return pushbuf_validate(push, true);
753 nouveau_pushbuf_refd(struct nouveau_pushbuf *push, struct nouveau_bo *bo)
755 struct drm_nouveau_gem_pushbuf_bo *kref;
758 if (cli_push_get(push->client, bo) == push) {
759 kref = cli_kref_get(push->client, bo);
761 if (kref->read_domains)
762 flags |= NOUVEAU_BO_RD;
763 if (kref->write_domains)
764 flags |= NOUVEAU_BO_WR;
771 nouveau_pushbuf_kick(struct nouveau_pushbuf *push, struct nouveau_object *chan)
774 return pushbuf_submit(push, chan);
776 return pushbuf_validate(push, false);