2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
28 #include "nouveau_private.h"
30 #define PB_BUFMGR_DWORDS (4096 / 2)
31 #define PB_MIN_USER_DWORDS 2048
34 nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
36 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
37 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
38 struct nouveau_bo *bo;
41 if (min < PB_MIN_USER_DWORDS)
42 min = PB_MIN_USER_DWORDS;
44 nvpb->current_offset = chan->cur - nvpb->pushbuf;
45 if (chan->cur + min + 2 <= chan->end)
49 if (nvpb->current == CALPB_BUFFERS)
51 bo = nvpb->buffer[nvpb->current];
53 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
57 nvpb->size = (bo->size - 8) / 4;
58 nvpb->pushbuf = bo->map;
59 nvpb->current_offset = 0;
61 chan->cur = nvpb->pushbuf;
62 chan->end = nvpb->pushbuf + nvpb->size;
69 nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
71 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
72 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
75 for (i = 0; i < CALPB_BUFFERS; i++)
76 nouveau_bo_ref(NULL, &nvpb->buffer[i]);
81 nouveau_pushbuf_init_call(struct nouveau_channel *chan, int buf_size)
83 struct drm_nouveau_gem_pushbuf req;
84 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
85 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
86 struct nouveau_device *dev = chan->device;
90 if (nvchan->drm.pushbuf_domains & NOUVEAU_GEM_DOMAIN_GART)
91 flags |= NOUVEAU_BO_GART;
93 flags |= NOUVEAU_BO_VRAM;
95 req.channel = chan->id;
97 ret = drmCommandWriteRead(nouveau_device(dev)->fd,
98 DRM_NOUVEAU_GEM_PUSHBUF, &req, sizeof(req));
102 for (i = 0; i < CALPB_BUFFERS; i++) {
103 ret = nouveau_bo_new(dev, flags | NOUVEAU_BO_MAP,
104 0, buf_size, &nvpb->buffer[i]);
106 nouveau_pushbuf_fini_call(chan);
111 nvpb->cal_suffix0 = req.suffix0;
112 nvpb->cal_suffix1 = req.suffix1;
117 nouveau_pushbuf_init(struct nouveau_channel *chan, int buf_size)
119 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
120 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
123 ret = nouveau_pushbuf_init_call(chan, buf_size);
127 ret = nouveau_pushbuf_space(chan, 0);
131 nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
132 sizeof(struct drm_nouveau_gem_pushbuf_bo));
133 nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
134 sizeof(struct drm_nouveau_gem_pushbuf_reloc));
139 nouveau_pushbuf_fini(struct nouveau_channel *chan)
141 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
142 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
143 nouveau_pushbuf_fini_call(chan);
149 nouveau_pushbuf_bo_add(struct nouveau_channel *chan, struct nouveau_bo *bo,
150 unsigned offset, unsigned length)
152 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
153 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
154 struct drm_nouveau_gem_pushbuf_push *p = &nvpb->push[nvpb->nr_push++];
155 struct drm_nouveau_gem_pushbuf_bo *pbbo;
156 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
158 pbbo = nouveau_bo_emit_buffer(chan, bo);
161 pbbo->valid_domains &= nvchan->drm.pushbuf_domains;
162 pbbo->read_domains |= nvchan->drm.pushbuf_domains;
163 nvbo->pending_refcnt++;
165 p->bo_index = pbbo - nvpb->buffers;
172 nouveau_pushbuf_submit(struct nouveau_channel *chan, struct nouveau_bo *bo,
173 unsigned offset, unsigned length)
175 struct nouveau_pushbuf_priv *nvpb = &nouveau_channel(chan)->pb;
178 if ((AVAIL_RING(chan) + nvpb->current_offset) != nvpb->size) {
179 if (nvpb->cal_suffix0 || nvpb->cal_suffix1) {
180 *(chan->cur++) = nvpb->cal_suffix0;
181 *(chan->cur++) = nvpb->cal_suffix1;
184 len = (chan->cur - nvpb->pushbuf) - nvpb->current_offset;
186 ret = nouveau_pushbuf_bo_add(chan, nvpb->buffer[nvpb->current],
187 nvpb->current_offset * 4, len * 4);
191 nvpb->current_offset += len;
194 return bo ? nouveau_pushbuf_bo_add(chan, bo, offset, length) : 0;
198 nouveau_pushbuf_bo_unref(struct nouveau_pushbuf_priv *nvpb, int index)
200 struct drm_nouveau_gem_pushbuf_bo *pbbo = &nvpb->buffers[index];
201 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
202 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
204 if (--nvbo->pending_refcnt)
207 if (pbbo->presumed.valid == 0) {
208 nvbo->domain = pbbo->presumed.domain;
209 nvbo->offset = pbbo->presumed.offset;
212 nvbo->pending = NULL;
213 nouveau_bo_ref(NULL, &bo);
215 /* we only ever remove from the tail of the pending lists,
222 nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
224 struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
225 struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
226 struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
227 struct drm_nouveau_gem_pushbuf req;
231 ret = nouveau_pushbuf_submit(chan, NULL, 0, 0);
238 req.channel = chan->id;
239 req.nr_push = nvpb->nr_push;
240 req.push = (uint64_t)(unsigned long)nvpb->push;
241 req.nr_buffers = nvpb->nr_buffers;
242 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
243 req.nr_relocs = nvpb->nr_relocs;
244 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
245 req.suffix0 = nvpb->cal_suffix0;
246 req.suffix1 = nvpb->cal_suffix1;
249 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
251 } while (ret == -EAGAIN);
252 nvpb->cal_suffix0 = req.suffix0;
253 nvpb->cal_suffix1 = req.suffix1;
254 nvdev->base.vm_vram_size = req.vram_available;
255 nvdev->base.vm_gart_size = req.gart_available;
257 /* Update presumed offset/domain for any buffers that moved.
258 * Dereference all buffers on validate list
260 for (i = 0; i < nvpb->nr_relocs; i++) {
261 nouveau_pushbuf_bo_unref(nvpb, nvpb->relocs[i].bo_index);
262 nouveau_pushbuf_bo_unref(nvpb, nvpb->relocs[i].reloc_bo_index);
265 for (i = 0; i < nvpb->nr_push; i++)
266 nouveau_pushbuf_bo_unref(nvpb, nvpb->push[i].bo_index);
268 nvpb->nr_buffers = 0;
272 /* Allocate space for next push buffer */
273 if (nouveau_pushbuf_space(chan, min))
276 if (chan->flush_notify)
277 chan->flush_notify(chan);
284 nouveau_pushbuf_marker_emit(struct nouveau_channel *chan,
285 unsigned wait_dwords, unsigned wait_relocs)
287 struct nouveau_pushbuf_priv *nvpb = &nouveau_channel(chan)->pb;
289 if (AVAIL_RING(chan) < wait_dwords)
290 return nouveau_pushbuf_flush(chan, wait_dwords);
292 if (nvpb->nr_relocs + wait_relocs >= NOUVEAU_GEM_MAX_RELOCS)
293 return nouveau_pushbuf_flush(chan, wait_dwords);
295 nvpb->marker = chan->cur;
296 nvpb->marker_offset = nvpb->current_offset;
297 nvpb->marker_push = nvpb->nr_push;
298 nvpb->marker_relocs = nvpb->nr_relocs;
303 nouveau_pushbuf_marker_undo(struct nouveau_channel *chan)
305 struct nouveau_pushbuf_priv *nvpb = &nouveau_channel(chan)->pb;
311 /* undo any relocs/buffers added to the list since last marker */
312 for (i = nvpb->marker_relocs; i < nvpb->nr_relocs; i++) {
313 nouveau_pushbuf_bo_unref(nvpb, nvpb->relocs[i].bo_index);
314 nouveau_pushbuf_bo_unref(nvpb, nvpb->relocs[i].reloc_bo_index);
316 nvpb->nr_relocs = nvpb->marker_relocs;
318 for (i = nvpb->marker_push; i < nvpb->nr_push; i++)
319 nouveau_pushbuf_bo_unref(nvpb, nvpb->push[i].bo_index);
320 nvpb->nr_push = nvpb->marker_push;
322 /* reset pushbuf back to last marker */
323 chan->cur = nvpb->marker;
324 nvpb->current_offset = nvpb->marker_offset;
329 nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
330 struct nouveau_bo *bo, uint32_t data, uint32_t data2,
331 uint32_t flags, uint32_t vor, uint32_t tor)
333 struct nouveau_pushbuf_priv *nvpb = &nouveau_channel(chan)->pb;
336 ret = nouveau_reloc_emit(chan, nvpb->buffer[nvpb->current],
337 (char *)ptr - (char *)nvpb->pushbuf, ptr,
338 bo, data, data2, flags, vor, tor);