2 * Copyright 2005-2006 Stephane Marchesin
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
31 /* returns the number of hw fifos */
32 int nouveau_fifo_number(drm_device_t* dev)
34 drm_nouveau_private_t *dev_priv=dev->dev_private;
35 switch(dev_priv->card_type)
47 /* returns the size of fifo context */
48 int nouveau_fifo_ctx_size(drm_device_t* dev)
50 drm_nouveau_private_t *dev_priv=dev->dev_private;
52 if (dev_priv->card_type >= NV_40)
54 else if (dev_priv->card_type >= NV_17)
60 /***********************************
61 * functions doing the actual work
62 ***********************************/
64 /* voir nv_xaa.c : NVResetGraphics
65 * mémoire mappée par nv_driver.c : NVMapMem
66 * voir nv_driver.c : NVPreInit
69 static int nouveau_fifo_instmem_configure(drm_device_t *dev)
71 drm_nouveau_private_t *dev_priv = dev->dev_private;
73 NV_WRITE(NV03_PFIFO_RAMHT,
74 (0x03 << 24) /* search 128 */ |
75 ((dev_priv->ramht_bits - 9) << 16) |
76 (dev_priv->ramht_offset >> 8)
79 NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
81 switch(dev_priv->card_type)
85 NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
86 if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b))
87 NV_WRITE(0x2230,0x00000001);
90 NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
96 NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
97 (1 << 16) /* 64 Bytes entry*/);
98 /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
103 NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
110 int nouveau_fifo_init(drm_device_t *dev)
112 drm_nouveau_private_t *dev_priv = dev->dev_private;
115 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
116 ~NV_PMC_ENABLE_PFIFO);
117 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
118 NV_PMC_ENABLE_PFIFO);
120 NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
122 ret = nouveau_fifo_instmem_configure(dev);
124 DRM_ERROR("Failed to configure instance memory\n");
128 /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
130 DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
132 /* All channels into PIO mode */
133 NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
135 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
136 NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
137 /* Channel 0 active, PIO mode */
138 NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
139 /* PUT and GET to 0 */
140 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
141 NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
142 /* No cmdbuf object */
143 NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
144 NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
145 NV_WRITE(NV03_PFIFO_CACHE0_PULL0, 0x00000000);
146 NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
147 NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
148 NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
149 NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
150 NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
151 NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
153 NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
154 NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
155 NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
157 NV_PFIFO_CACHE1_BIG_ENDIAN |
161 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
162 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
163 NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
164 NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
167 if (dev_priv->card_type >= NV_10) {
168 NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
169 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
170 if (dev_priv->card_type >= NV_40)
171 NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
173 NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
175 NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
176 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
177 NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
180 NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
181 NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
186 nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
188 drm_nouveau_private_t *dev_priv = dev->dev_private;
189 struct nouveau_fifo *chan = &dev_priv->fifos[channel];
190 struct nouveau_config *config = &dev_priv->config;
191 struct mem_block *cb;
192 int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
193 nouveau_gpuobj_t *pushbuf = NULL;
196 /* Defaults for unconfigured values */
197 if (!config->cmdbuf.location)
198 config->cmdbuf.location = NOUVEAU_MEM_FB;
199 if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size)
200 config->cmdbuf.size = cb_min_size;
202 cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
203 config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
206 DRM_ERROR("Couldn't allocate DMA command buffer.\n");
207 return DRM_ERR(ENOMEM);
210 if (cb->flags & NOUVEAU_MEM_AGP) {
211 ret = nouveau_gpuobj_dma_new
212 (dev, channel, NV_CLASS_DMA_IN_MEMORY,
213 cb->start - dev_priv->agp_phys,
214 cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP,
216 } else if (dev_priv->card_type != NV_04) {
217 ret = nouveau_gpuobj_dma_new
218 (dev, channel, NV_CLASS_DMA_IN_MEMORY,
219 cb->start - drm_get_resource_start(dev, 1),
220 cb->size, NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM,
223 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
224 * exact reason for existing :) PCI access to cmdbuf in
227 ret = nouveau_gpuobj_dma_new
228 (dev, channel, NV_CLASS_DMA_IN_MEMORY,
229 cb->start, cb->size, NV_DMA_ACCESS_RO,
230 NV_DMA_TARGET_PCI, &pushbuf);
234 nouveau_mem_free(dev, cb);
235 DRM_ERROR("Error creating push buffer ctxdma: %d\n", ret);
239 if ((ret = nouveau_gpuobj_ref_add(dev, channel, 0, pushbuf,
241 DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
245 dev_priv->fifos[channel].pushbuf_base = 0;
246 dev_priv->fifos[channel].cmdbuf_mem = cb;
250 /* allocates and initializes a fifo for user space consumption */
251 int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp,
252 uint32_t vram_handle, uint32_t tt_handle)
255 drm_nouveau_private_t *dev_priv = dev->dev_private;
256 nouveau_engine_func_t *engine = &dev_priv->Engine;
257 struct nouveau_fifo *chan;
261 * Alright, here is the full story
262 * Nvidia cards have multiple hw fifo contexts (praise them for that,
263 * no complicated crash-prone context switches)
264 * We allocate a new context for each app and let it write to it directly
265 * (woo, full userspace command submission !)
266 * When there are no more contexts, you lost
268 for(channel=0; channel<nouveau_fifo_number(dev); channel++)
269 if (dev_priv->fifos[channel].used==0)
271 /* no more fifos. you lost. */
272 if (channel==nouveau_fifo_number(dev))
273 return DRM_ERR(EINVAL);
274 (*chan_ret) = channel;
275 chan = &dev_priv->fifos[channel];
276 memset(chan, sizeof(*chan), 0);
278 DRM_INFO("Allocating FIFO number %d\n", channel);
280 /* that fifo is used */
284 /* Setup channel's default objects */
285 ret = nouveau_gpuobj_channel_init(dev, channel, vram_handle, tt_handle);
287 nouveau_fifo_free(dev, channel);
291 /* allocate a command buffer, and create a dma object for the gpu */
292 ret = nouveau_fifo_cmdbuf_alloc(dev, channel);
294 nouveau_fifo_free(dev, channel);
298 /* Allocate space for per-channel fixed notifier memory */
299 ret = nouveau_notifier_init_channel(dev, channel, filp);
301 nouveau_fifo_free(dev, channel);
305 nouveau_wait_for_idle(dev);
307 /* disable the fifo caches */
308 NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
309 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
310 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
311 NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
313 /* Create a graphics context for new channel */
314 ret = engine->graph.create_context(dev, channel);
316 nouveau_fifo_free(dev, channel);
320 /* Construct inital RAMFC for new channel */
321 ret = engine->fifo.create_context(dev, channel);
323 nouveau_fifo_free(dev, channel);
327 /* enable the fifo dma operation */
328 NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
330 /* setup channel's default get/put values */
331 NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
332 NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
334 /* If this is the first channel, setup PFIFO ourselves. For any
335 * other case, the GPU will handle this when it switches contexts.
337 if (dev_priv->fifo_alloc_count == 0) {
338 ret = engine->fifo.load_context(dev, channel);
340 nouveau_fifo_free(dev, channel);
344 ret = engine->graph.load_context(dev, channel);
346 nouveau_fifo_free(dev, channel);
350 /* Temporary hack, to avoid breaking Xv on cards where the
351 * initial context value for 0x400710 doesn't have these bits
352 * set. Proper fix would be to find which object+method is
353 * responsible for modifying this state.
355 if (dev_priv->chipset >= 0x10) {
357 tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
358 NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
359 tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
360 NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
364 NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
365 NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
366 NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
367 NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
369 /* reenable the fifo caches */
370 NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
372 dev_priv->fifo_alloc_count++;
374 DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
379 void nouveau_fifo_free(drm_device_t* dev, int channel)
381 drm_nouveau_private_t *dev_priv = dev->dev_private;
382 nouveau_engine_func_t *engine = &dev_priv->Engine;
383 struct nouveau_fifo *chan = &dev_priv->fifos[channel];
386 DRM_INFO("%s: freeing fifo %d\n", __func__, channel);
388 /* disable the fifo caches */
389 NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
391 NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
392 // FIXME XXX needs more code
394 engine->fifo.destroy_context(dev, channel);
396 /* Cleanup PGRAPH state */
397 engine->graph.destroy_context(dev, channel);
399 /* reenable the fifo caches */
400 NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
402 /* Deallocate command buffer */
404 nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
406 if (chan->cmdbuf_mem) {
407 nouveau_mem_free(dev, chan->cmdbuf_mem);
408 chan->cmdbuf_mem = NULL;
411 nouveau_notifier_takedown_channel(dev, channel);
413 /* Destroy objects belonging to the channel */
414 nouveau_gpuobj_channel_takedown(dev, channel);
416 dev_priv->fifo_alloc_count--;
419 /* cleanups all the fifos from filp */
420 void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp)
423 drm_nouveau_private_t *dev_priv = dev->dev_private;
425 DRM_DEBUG("clearing FIFO enables from filp\n");
426 for(i=0;i<nouveau_fifo_number(dev);i++)
427 if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp)
428 nouveau_fifo_free(dev,i);
432 nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel)
434 drm_nouveau_private_t *dev_priv = dev->dev_private;
436 if (channel >= nouveau_fifo_number(dev))
438 if (dev_priv->fifos[channel].used == 0)
440 return (dev_priv->fifos[channel].filp == filp);
443 /***********************************
444 * ioctls wrapping the functions
445 ***********************************/
447 static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
450 drm_nouveau_private_t *dev_priv = dev->dev_private;
451 struct nouveau_fifo *chan;
452 drm_nouveau_fifo_alloc_t init;
455 DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data,
458 res = nouveau_fifo_alloc(dev, &init.channel, filp,
459 init.fb_ctxdma_handle,
460 init.tt_ctxdma_handle);
463 chan = &dev_priv->fifos[init.channel];
465 init.put_base = chan->pushbuf_base;
467 /* make the fifo available to user space */
468 /* first, the fifo control regs */
469 init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel);
470 init.ctrl_size = NV03_FIFO_REGS_SIZE;
471 res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS,
476 /* pass back FIFO map info to the caller */
477 init.cmdbuf = chan->cmdbuf_mem->start;
478 init.cmdbuf_size = chan->cmdbuf_mem->size;
480 /* and the notifier block */
481 init.notifier = chan->notifier_block->start;
482 init.notifier_size = chan->notifier_block->size;
483 res = drm_addmap(dev, init.notifier, init.notifier_size, _DRM_REGISTERS,
484 0, &chan->notifier_map);
488 DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data,
493 /***********************************
494 * finally, the ioctl table
495 ***********************************/
497 drm_ioctl_desc_t nouveau_ioctls[] = {
498 [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH},
499 [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH},
500 [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH},
501 [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH},
502 [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH},
503 [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH},
504 [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
507 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);