OSDN Git Service

nouveau: move reloc code down, nothing to see here
[android-x86/external-libdrm.git] / nouveau / nouveau_pushbuf.c
1 /*
2  * Copyright 2007 Nouveau Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18  * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19  * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20  * SOFTWARE.
21  */
22
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <errno.h>
26 #include <assert.h>
27
28 #include "nouveau_private.h"
29
30 #define PB_BUFMGR_DWORDS   (4096 / 2)
31 #define PB_MIN_USER_DWORDS  2048
32
33 static int
34 nouveau_pushbuf_space_call(struct nouveau_channel *chan, unsigned min)
35 {
36         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
37         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
38         struct nouveau_bo *bo;
39         int ret;
40
41         if (min < PB_MIN_USER_DWORDS)
42                 min = PB_MIN_USER_DWORDS;
43
44         nvpb->current_offset = nvpb->base.cur - nvpb->pushbuf;
45         if (nvpb->current_offset + min + 2 <= nvpb->size)
46                 return 0;
47
48         nvpb->current++;
49         if (nvpb->current == CALPB_BUFFERS)
50                 nvpb->current = 0;
51         bo = nvpb->buffer[nvpb->current];
52
53         ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
54         if (ret)
55                 return ret;
56
57         nvpb->size = (bo->size - 8) / 4;
58         nvpb->pushbuf = bo->map;
59         nvpb->current_offset = 0;
60
61         nvpb->base.channel = chan;
62         nvpb->base.remaining = nvpb->size;
63         nvpb->base.cur = nvpb->pushbuf;
64
65         nouveau_bo_unmap(bo);
66         return 0;
67 }
68
69 static int
70 nouveau_pushbuf_space(struct nouveau_channel *chan, unsigned min)
71 {
72         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
73         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
74
75         if (nvpb->use_cal)
76                 return nouveau_pushbuf_space_call(chan, min);
77
78         if (nvpb->pushbuf) {
79                 free(nvpb->pushbuf);
80                 nvpb->pushbuf = NULL;
81         }
82
83         nvpb->size = min < PB_MIN_USER_DWORDS ? PB_MIN_USER_DWORDS : min;       
84         nvpb->pushbuf = malloc(sizeof(uint32_t) * nvpb->size);
85
86         nvpb->base.channel = chan;
87         nvpb->base.remaining = nvpb->size;
88         nvpb->base.cur = nvpb->pushbuf;
89         
90         return 0;
91 }
92
93 static void
94 nouveau_pushbuf_fini_call(struct nouveau_channel *chan)
95 {
96         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
97         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
98         int i;
99
100         for (i = 0; i < CALPB_BUFFERS; i++)
101                 nouveau_bo_ref(NULL, &nvpb->buffer[i]);
102         nvpb->use_cal = 0;
103         nvpb->pushbuf = NULL;
104 }
105
106 static void
107 nouveau_pushbuf_init_call(struct nouveau_channel *chan)
108 {
109         struct drm_nouveau_gem_pushbuf_call req;
110         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
111         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
112         struct nouveau_device *dev = chan->device;
113         int i, ret;
114
115         req.channel = chan->id;
116         req.handle = 0;
117         ret = drmCommandWriteRead(nouveau_device(dev)->fd,
118                                   DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
119                                   &req, sizeof(req));
120         if (ret) {
121                 ret = drmCommandWriteRead(nouveau_device(dev)->fd,
122                                           DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
123                                           &req, sizeof(req));
124                 if (ret)
125                         return;
126
127                 nvpb->no_aper_update = 1;
128         }
129
130         for (i = 0; i < CALPB_BUFFERS; i++) {
131                 ret = nouveau_bo_new(dev, NOUVEAU_BO_GART | NOUVEAU_BO_MAP,
132                                      0, CALPB_BUFSZ, &nvpb->buffer[i]);
133                 if (ret) {
134                         nouveau_pushbuf_fini_call(chan);
135                         return;
136                 }
137         }
138
139         nvpb->use_cal = 1;
140         nvpb->cal_suffix0 = req.suffix0;
141         nvpb->cal_suffix1 = req.suffix1;
142 }
143
144 int
145 nouveau_pushbuf_init(struct nouveau_channel *chan)
146 {
147         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
148         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
149         int ret;
150
151         nouveau_pushbuf_init_call(chan);
152
153         ret = nouveau_pushbuf_space(chan, 0);
154         if (ret) {
155                 if (nvpb->use_cal) {
156                         nouveau_pushbuf_fini_call(chan);
157                         ret = nouveau_pushbuf_space(chan, 0);
158                 }
159
160                 if (ret)
161                         return ret;
162         }
163
164         nvpb->buffers = calloc(NOUVEAU_GEM_MAX_BUFFERS,
165                                sizeof(struct drm_nouveau_gem_pushbuf_bo));
166         nvpb->relocs = calloc(NOUVEAU_GEM_MAX_RELOCS,
167                               sizeof(struct drm_nouveau_gem_pushbuf_reloc));
168         
169         chan->pushbuf = &nvpb->base;
170         return 0;
171 }
172
173 int
174 nouveau_pushbuf_flush(struct nouveau_channel *chan, unsigned min)
175 {
176         struct nouveau_device_priv *nvdev = nouveau_device(chan->device);
177         struct nouveau_channel_priv *nvchan = nouveau_channel(chan);
178         struct nouveau_pushbuf_priv *nvpb = &nvchan->pb;
179         unsigned i;
180         int ret;
181
182         if (nvpb->base.remaining == nvpb->size)
183                 return 0;
184
185         if (nvpb->use_cal) {
186                 struct drm_nouveau_gem_pushbuf_call req;
187
188                 *(nvpb->base.cur++) = nvpb->cal_suffix0;
189                 *(nvpb->base.cur++) = nvpb->cal_suffix1;
190                 if (nvpb->base.remaining > 2) /* space() will fixup if not */
191                         nvpb->base.remaining -= 2;
192
193 restart_cal:
194                 req.channel = chan->id;
195                 req.handle = nvpb->buffer[nvpb->current]->handle;
196                 req.offset = nvpb->current_offset * 4;
197                 req.nr_buffers = nvpb->nr_buffers;
198                 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
199                 req.nr_relocs = nvpb->nr_relocs;
200                 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
201                 req.nr_dwords = (nvpb->base.cur - nvpb->pushbuf) -
202                                 nvpb->current_offset;
203                 req.suffix0 = nvpb->cal_suffix0;
204                 req.suffix1 = nvpb->cal_suffix1;
205                 ret = drmCommandWriteRead(nvdev->fd, nvpb->no_aper_update ?
206                                           DRM_NOUVEAU_GEM_PUSHBUF_CALL :
207                                           DRM_NOUVEAU_GEM_PUSHBUF_CALL2,
208                                           &req, sizeof(req));
209                 if (ret == -EAGAIN)
210                         goto restart_cal;
211                 nvpb->cal_suffix0 = req.suffix0;
212                 nvpb->cal_suffix1 = req.suffix1;
213                 if (!nvpb->no_aper_update) {
214                         nvdev->base.vm_vram_size = req.vram_available;
215                         nvdev->base.vm_gart_size = req.gart_available;
216                 }
217         } else {
218                 struct drm_nouveau_gem_pushbuf req;
219
220 restart_push:
221                 req.channel = chan->id;
222                 req.nr_dwords = nvpb->size - nvpb->base.remaining;
223                 req.dwords = (uint64_t)(unsigned long)nvpb->pushbuf;
224                 req.nr_buffers = nvpb->nr_buffers;
225                 req.buffers = (uint64_t)(unsigned long)nvpb->buffers;
226                 req.nr_relocs = nvpb->nr_relocs;
227                 req.relocs = (uint64_t)(unsigned long)nvpb->relocs;
228                 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_PUSHBUF,
229                                       &req, sizeof(req));
230                 if (ret == -EAGAIN)
231                         goto restart_push;
232         }
233
234
235         /* Update presumed offset/domain for any buffers that moved.
236          * Dereference all buffers on validate list
237          */
238         for (i = 0; i < nvpb->nr_relocs; i++) {
239                 struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
240                 struct drm_nouveau_gem_pushbuf_bo *pbbo =
241                         &nvpb->buffers[r->bo_index];
242                 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
243                 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
244
245                 if (--nvbo->pending_refcnt)
246                         continue;
247
248                 if (pbbo->presumed_ok == 0) {
249                         nvbo->domain = pbbo->presumed_domain;
250                         nvbo->offset = pbbo->presumed_offset;
251                 }
252
253                 nvbo->pending = NULL;
254                 nouveau_bo_ref(NULL, &bo);
255         }
256
257         nvpb->nr_buffers = 0;
258         nvpb->nr_relocs = 0;
259
260         /* Allocate space for next push buffer */
261         assert(!nouveau_pushbuf_space(chan, min));
262
263         if (chan->flush_notify)
264                 chan->flush_notify(chan);
265
266         nvpb->marker = 0;
267         return ret;
268 }
269
270 int
271 nouveau_pushbuf_marker_emit(struct nouveau_channel *chan,
272                             unsigned wait_dwords, unsigned wait_relocs)
273 {
274         struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
275
276         if (AVAIL_RING(chan) < wait_dwords)
277                 return nouveau_pushbuf_flush(chan, wait_dwords);
278
279         if (nvpb->nr_relocs + wait_relocs >= NOUVEAU_GEM_MAX_RELOCS)
280                 return nouveau_pushbuf_flush(chan, wait_dwords);
281
282         nvpb->marker = nvpb->base.cur - nvpb->pushbuf;
283         nvpb->marker_relocs = nvpb->nr_relocs;
284         return 0;
285 }
286
287 void
288 nouveau_pushbuf_marker_undo(struct nouveau_channel *chan)
289 {
290         struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
291         unsigned i;
292
293         if (!nvpb->marker)
294                 return;
295
296         /* undo any relocs/buffers added to the list since last marker */
297         for (i = nvpb->marker_relocs; i < nvpb->nr_relocs; i++) {
298                 struct drm_nouveau_gem_pushbuf_reloc *r = &nvpb->relocs[i];
299                 struct drm_nouveau_gem_pushbuf_bo *pbbo =
300                         &nvpb->buffers[r->bo_index];
301                 struct nouveau_bo *bo = (void *)(unsigned long)pbbo->user_priv;
302                 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
303
304                 if (--nvbo->pending_refcnt)
305                         continue;
306
307                 nvbo->pending = NULL;
308                 nouveau_bo_ref(NULL, &bo);
309                 nvpb->nr_buffers--;
310         }
311         nvpb->nr_relocs = nvpb->marker_relocs;
312
313         /* reset pushbuf back to last marker */
314         nvpb->base.cur = nvpb->pushbuf + nvpb->marker;
315         nvpb->base.remaining = nvpb->size - nvpb->marker;
316         nvpb->marker = 0;
317 }
318
319 static uint32_t
320 nouveau_pushbuf_calc_reloc(struct drm_nouveau_gem_pushbuf_bo *pbbo,
321                            struct drm_nouveau_gem_pushbuf_reloc *r)
322 {
323         uint32_t push = 0;
324
325         if (r->flags & NOUVEAU_GEM_RELOC_LOW)
326                 push = (pbbo->presumed_offset + r->data);
327         else
328         if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
329                 push = (pbbo->presumed_offset + r->data) >> 32;
330         else
331                 push = r->data;
332
333         if (r->flags & NOUVEAU_GEM_RELOC_OR) {
334                 if (pbbo->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM)
335                         push |= r->vor;
336                 else
337                         push |= r->tor;
338         }
339
340         return push;
341 }
342
343 int
344 nouveau_pushbuf_emit_reloc(struct nouveau_channel *chan, void *ptr,
345                            struct nouveau_bo *bo, uint32_t data, uint32_t data2,
346                            uint32_t flags, uint32_t vor, uint32_t tor)
347 {
348         struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
349         struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
350         struct drm_nouveau_gem_pushbuf_reloc *r;
351         struct drm_nouveau_gem_pushbuf_bo *pbbo;
352         uint32_t domains = 0;
353
354         if (nvpb->nr_relocs >= NOUVEAU_GEM_MAX_RELOCS) {
355                 fprintf(stderr, "too many relocs!!\n");
356                 return -ENOMEM;
357         }
358
359         if (nvbo->user && (flags & NOUVEAU_BO_WR)) {
360                 fprintf(stderr, "write to user buffer!!\n");
361                 return -EINVAL;
362         }
363
364         pbbo = nouveau_bo_emit_buffer(chan, bo);
365         if (!pbbo) {
366                 fprintf(stderr, "buffer emit fail :(\n");
367                 return -ENOMEM;
368         }
369
370         nvbo->pending_refcnt++;
371
372         if (flags & NOUVEAU_BO_VRAM)
373                 domains |= NOUVEAU_GEM_DOMAIN_VRAM;
374         if (flags & NOUVEAU_BO_GART)
375                 domains |= NOUVEAU_GEM_DOMAIN_GART;
376
377         if (!(pbbo->valid_domains & domains)) {
378                 fprintf(stderr, "no valid domains remain!\n");
379                 return -EINVAL;
380         }
381         pbbo->valid_domains &= domains;
382
383         assert(flags & NOUVEAU_BO_RDWR);
384         if (flags & NOUVEAU_BO_RD) {
385                 pbbo->read_domains |= domains;
386         }
387         if (flags & NOUVEAU_BO_WR) {
388                 pbbo->write_domains |= domains;
389                 nvbo->write_marker = 1;
390         }
391
392         r = nvpb->relocs + nvpb->nr_relocs++;
393         r->bo_index = pbbo - nvpb->buffers;
394         r->reloc_index = (uint32_t *)ptr - nvpb->pushbuf;
395         r->flags = 0;
396         if (flags & NOUVEAU_BO_LOW)
397                 r->flags |= NOUVEAU_GEM_RELOC_LOW;
398         if (flags & NOUVEAU_BO_HIGH)
399                 r->flags |= NOUVEAU_GEM_RELOC_HIGH;
400         if (flags & NOUVEAU_BO_OR)
401                 r->flags |= NOUVEAU_GEM_RELOC_OR;
402         r->data = data;
403         r->vor = vor;
404         r->tor = tor;
405
406         *(uint32_t *)ptr = (flags & NOUVEAU_BO_DUMMY) ? 0 :
407                 nouveau_pushbuf_calc_reloc(pbbo, r);
408         return 0;
409 }
410