OSDN Git Service

nv40: some more nv67 changes
[android-x86/external-libdrm.git] / shared-core / nouveau_fifo.c
1 /*
2  * Copyright 2005-2006 Stephane Marchesin
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24
25 #include "drmP.h"
26 #include "drm.h"
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29
30
31 /* returns the size of fifo context */
32 int nouveau_fifo_ctx_size(struct drm_device *dev)
33 {
34         struct drm_nouveau_private *dev_priv=dev->dev_private;
35
36         if (dev_priv->card_type >= NV_40)
37                 return 128;
38         else if (dev_priv->card_type >= NV_17)
39                 return 64;
40         else
41                 return 32;
42 }
43
44 /***********************************
45  * functions doing the actual work
46  ***********************************/
47
48 /* voir nv_xaa.c : NVResetGraphics
49  * mémoire mappée par nv_driver.c : NVMapMem
50  * voir nv_driver.c : NVPreInit
51  */
52
53 static int nouveau_fifo_instmem_configure(struct drm_device *dev)
54 {
55         struct drm_nouveau_private *dev_priv = dev->dev_private;
56
57         NV_WRITE(NV03_PFIFO_RAMHT,
58                         (0x03 << 24) /* search 128 */ |
59                         ((dev_priv->ramht_bits - 9) << 16) |
60                         (dev_priv->ramht_offset >> 8)
61                         );
62
63         NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
64
65         switch(dev_priv->card_type)
66         {
67                 case NV_40:
68                         switch (dev_priv->chipset) {
69                         case 0x47:
70                         case 0x49:
71                         case 0x4b:
72                                 NV_WRITE(0x2230, 1);
73                                 break;
74                         default:
75                                 break;
76                         }
77                         NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
78                         break;
79                 case NV_44:
80                         NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
81                                         (2 << 16));
82                         break;
83                 case NV_30:
84                 case NV_20:
85                 case NV_17:
86                         NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
87                                         (1 << 16) /* 64 Bytes entry*/);
88                         /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
89                         break;
90                 case NV_11:
91                 case NV_10:
92                 case NV_04:
93                         NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
94                         break;
95         }
96
97         return 0;
98 }
99
100 int nouveau_fifo_init(struct drm_device *dev)
101 {
102         struct drm_nouveau_private *dev_priv = dev->dev_private;
103         int ret;
104
105         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
106                         ~NV_PMC_ENABLE_PFIFO);
107         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
108                          NV_PMC_ENABLE_PFIFO);
109
110         /* Enable PFIFO error reporting */
111         NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
112         NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
113
114         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
115
116         ret = nouveau_fifo_instmem_configure(dev);
117         if (ret) {
118                 DRM_ERROR("Failed to configure instance memory\n");
119                 return ret;
120         }
121
122         /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
123
124         DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
125
126         /* All channels into PIO mode */
127         NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
128
129         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
130         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
131         /* Channel 0 active, PIO mode */
132         NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
133         /* PUT and GET to 0 */
134         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
135         NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
136         /* No cmdbuf object */
137         NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
138         NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
139         NV_WRITE(NV03_PFIFO_CACHE0_PULL0, 0x00000000);
140         NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
141         NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
142         NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
143         NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
144         NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
145         NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
146
147         NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
148                                       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
149                                       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
150 #ifdef __BIG_ENDIAN
151                                       NV_PFIFO_CACHE1_BIG_ENDIAN |
152 #endif
153                                       0x00000000);
154
155         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
156         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
157         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
158         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
159
160         /* FIXME on NV04 */
161         if (dev_priv->card_type >= NV_10) {
162                 NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
163                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
164                 if (dev_priv->card_type >= NV_40)
165                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
166                 else
167                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
168         } else {
169                 NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
170                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
171                 NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
172         }
173
174         NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
175         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
176         return 0;
177 }
178
179 static int
180 nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
181 {
182         struct drm_device *dev = chan->dev;
183         struct drm_nouveau_private *dev_priv = dev->dev_private;
184         struct mem_block *pb = chan->pushbuf_mem;
185         struct nouveau_gpuobj *pushbuf = NULL;
186         int ret;
187
188         if (pb->flags & NOUVEAU_MEM_AGP) {
189                 ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
190                                                   NV_DMA_ACCESS_RO,
191                                                   &pushbuf,
192                                                   &chan->pushbuf_base);
193         } else
194         if (pb->flags & NOUVEAU_MEM_PCI) {
195                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
196                                              pb->start, pb->size,
197                                              NV_DMA_ACCESS_RO,
198                                              NV_DMA_TARGET_PCI_NONLINEAR,
199                                              &pushbuf);
200                 chan->pushbuf_base = 0;
201         } else if (dev_priv->card_type != NV_04) {
202                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
203                                              pb->start, pb->size,
204                                              NV_DMA_ACCESS_RO,
205                                              NV_DMA_TARGET_VIDMEM, &pushbuf);
206                 chan->pushbuf_base = 0;
207         } else {
208                 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
209                  * exact reason for existing :)  PCI access to cmdbuf in
210                  * VRAM.
211                  */
212                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
213                                              pb->start +
214                                                drm_get_resource_start(dev, 1),
215                                              pb->size, NV_DMA_ACCESS_RO,
216                                              NV_DMA_TARGET_PCI, &pushbuf);
217                 chan->pushbuf_base = 0;
218         }
219
220         if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
221                                           &chan->pushbuf))) {
222                 DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
223                 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
224                         nouveau_gpuobj_del(dev, &pushbuf);
225                 return ret;
226         }
227
228         return 0;
229 }
230
231 static struct mem_block *
232 nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
233 {
234         struct drm_nouveau_private *dev_priv = dev->dev_private;
235         struct nouveau_config *config = &dev_priv->config;
236         struct mem_block *pb;
237         int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
238
239         /* Defaults for unconfigured values */
240         if (!config->cmdbuf.location)
241                 config->cmdbuf.location = NOUVEAU_MEM_FB;
242         if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
243                 config->cmdbuf.size = pb_min_size;
244
245         pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
246                                config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
247                                (struct drm_file *)-2);
248         if (!pb)
249                 DRM_ERROR("Couldn't allocate DMA push buffer.\n");
250
251         return pb;
252 }
253
254 /* allocates and initializes a fifo for user space consumption */
255 int
256 nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
257                    struct drm_file *file_priv, struct mem_block *pushbuf,
258                    uint32_t vram_handle, uint32_t tt_handle)
259 {
260         int ret;
261         struct drm_nouveau_private *dev_priv = dev->dev_private;
262         struct nouveau_engine *engine = &dev_priv->Engine;
263         struct nouveau_channel *chan;
264         int channel;
265
266         /*
267          * Alright, here is the full story
268          * Nvidia cards have multiple hw fifo contexts (praise them for that,
269          * no complicated crash-prone context switches)
270          * We allocate a new context for each app and let it write to it directly
271          * (woo, full userspace command submission !)
272          * When there are no more contexts, you lost
273          */
274         for (channel = 0; channel < engine->fifo.channels; channel++) {
275                 if (dev_priv->fifos[channel] == NULL)
276                         break;
277         }
278
279         /* no more fifos. you lost. */
280         if (channel == engine->fifo.channels)
281                 return -EINVAL;
282
283         dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
284                                               DRM_MEM_DRIVER);
285         if (!dev_priv->fifos[channel])
286                 return -ENOMEM;
287         dev_priv->fifo_alloc_count++;
288         chan = dev_priv->fifos[channel];
289         chan->dev = dev;
290         chan->id = channel;
291         chan->file_priv = file_priv;
292         chan->pushbuf_mem = pushbuf;
293
294         DRM_INFO("Allocating FIFO number %d\n", channel);
295
296         /* Locate channel's user control regs */
297         if (dev_priv->card_type < NV_40) {
298                 chan->user = NV03_USER(channel);
299                 chan->user_size = NV03_USER_SIZE;
300                 chan->put = NV03_USER_DMA_PUT(channel);
301                 chan->get = NV03_USER_DMA_GET(channel);
302                 chan->ref_cnt = NV03_USER_REF_CNT(channel);
303         } else
304         if (dev_priv->card_type < NV_50) {
305                 chan->user = NV40_USER(channel);
306                 chan->user_size = NV40_USER_SIZE;
307                 chan->put = NV40_USER_DMA_PUT(channel);
308                 chan->get = NV40_USER_DMA_GET(channel);
309                 chan->ref_cnt = NV40_USER_REF_CNT(channel);
310         } else {
311                 chan->user = NV50_USER(channel);
312                 chan->user_size = NV50_USER_SIZE;
313                 chan->put = NV50_USER_DMA_PUT(channel);
314                 chan->get = NV50_USER_DMA_GET(channel);
315                 chan->ref_cnt = NV50_USER_REF_CNT(channel);
316         }
317
318         /* Allocate space for per-channel fixed notifier memory */
319         ret = nouveau_notifier_init_channel(chan);
320         if (ret) {
321                 nouveau_fifo_free(chan);
322                 return ret;
323         }
324
325         /* Setup channel's default objects */
326         ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
327         if (ret) {
328                 nouveau_fifo_free(chan);
329                 return ret;
330         }
331
332         /* Create a dma object for the push buffer */
333         ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
334         if (ret) {
335                 nouveau_fifo_free(chan);
336                 return ret;
337         }
338
339         nouveau_wait_for_idle(dev);
340
341         /* disable the fifo caches */
342         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
343         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
344         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
345         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
346
347         /* Create a graphics context for new channel */
348         ret = engine->graph.create_context(chan);
349         if (ret) {
350                 nouveau_fifo_free(chan);
351                 return ret;
352         }
353
354         /* Construct inital RAMFC for new channel */
355         ret = engine->fifo.create_context(chan);
356         if (ret) {
357                 nouveau_fifo_free(chan);
358                 return ret;
359         }
360
361         /* setup channel's default get/put values
362          * XXX: quite possibly extremely pointless..
363          */
364         NV_WRITE(chan->get, chan->pushbuf_base);
365         NV_WRITE(chan->put, chan->pushbuf_base);
366
367         /* If this is the first channel, setup PFIFO ourselves.  For any
368          * other case, the GPU will handle this when it switches contexts.
369          */
370         if (dev_priv->fifo_alloc_count == 1) {
371                 ret = engine->fifo.load_context(chan);
372                 if (ret) {
373                         nouveau_fifo_free(chan);
374                         return ret;
375                 }
376
377                 ret = engine->graph.load_context(chan);
378                 if (ret) {
379                         nouveau_fifo_free(chan);
380                         return ret;
381                 }
382         }
383
384         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
385                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
386         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
387         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
388         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
389
390         /* reenable the fifo caches */
391         NV_WRITE(NV03_PFIFO_CACHES, 1);
392
393         DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
394         *chan_ret = chan;
395         return 0;
396 }
397
398 /* stops a fifo */
399 void nouveau_fifo_free(struct nouveau_channel *chan)
400 {
401         struct drm_device *dev = chan->dev;
402         struct drm_nouveau_private *dev_priv = dev->dev_private;
403         struct nouveau_engine *engine = &dev_priv->Engine;
404         uint64_t t_start;
405
406         DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
407
408         /* Disable channel switching, if this channel isn't currenly
409          * active re-enable it if there's still pending commands.
410          * We really should do a manual context switch here, but I'm
411          * not sure I trust our ability to do this reliably yet..
412          */
413         NV_WRITE(NV03_PFIFO_CACHES, 0);
414         if (engine->fifo.channel_id(dev) != chan->id &&
415             NV_READ(chan->get) != NV_READ(chan->put)) {
416                 NV_WRITE(NV03_PFIFO_CACHES, 1);
417         }
418
419         /* Give the channel a chance to idle, wait 2s (hopefully) */
420         t_start = engine->timer.read(dev);
421         while (NV_READ(chan->get) != NV_READ(chan->put) ||
422                NV_READ(NV03_PFIFO_CACHE1_GET) !=
423                NV_READ(NV03_PFIFO_CACHE1_PUT)) {
424                 if (engine->timer.read(dev) - t_start > 2000000000ULL) {
425                         DRM_ERROR("Failed to idle channel %d before destroy."
426                                   "Prepare for strangeness..\n", chan->id);
427                         break;
428                 }
429         }
430
431         /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
432          *     from CACHE1 too?
433          */
434
435         /* disable the fifo caches */
436         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
437         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
438         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
439         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
440
441         /* stop the fifo, otherwise it could be running and
442          * it will crash when removing gpu objects
443          *XXX: from real-world evidence, absolutely useless..
444          */
445         NV_WRITE(chan->get, chan->pushbuf_base);
446         NV_WRITE(chan->put, chan->pushbuf_base);
447
448         // FIXME XXX needs more code
449
450         engine->fifo.destroy_context(chan);
451
452         /* Cleanup PGRAPH state */
453         engine->graph.destroy_context(chan);
454
455         /* reenable the fifo caches */
456         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
457                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
458         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
459         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
460         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
461
462         /* Deallocate push buffer */
463         nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
464         if (chan->pushbuf_mem) {
465                 nouveau_mem_free(dev, chan->pushbuf_mem);
466                 chan->pushbuf_mem = NULL;
467         }
468
469         /* Destroy objects belonging to the channel */
470         nouveau_gpuobj_channel_takedown(chan);
471
472         nouveau_notifier_takedown_channel(chan);
473
474         dev_priv->fifos[chan->id] = NULL;
475         dev_priv->fifo_alloc_count--;
476         drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
477 }
478
479 /* cleanups all the fifos from file_priv */
480 void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
481 {
482         struct drm_nouveau_private *dev_priv = dev->dev_private;
483         struct nouveau_engine *engine = &dev_priv->Engine;
484         int i;
485
486         DRM_DEBUG("clearing FIFO enables from file_priv\n");
487         for(i = 0; i < engine->fifo.channels; i++) {
488                 struct nouveau_channel *chan = dev_priv->fifos[i];
489
490                 if (chan && chan->file_priv == file_priv)
491                         nouveau_fifo_free(chan);
492         }
493 }
494
495 int
496 nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
497                    int channel)
498 {
499         struct drm_nouveau_private *dev_priv = dev->dev_private;
500         struct nouveau_engine *engine = &dev_priv->Engine;
501
502         if (channel >= engine->fifo.channels)
503                 return 0;
504         if (dev_priv->fifos[channel] == NULL)
505                 return 0;
506         return (dev_priv->fifos[channel]->file_priv == file_priv);
507 }
508
509 /***********************************
510  * ioctls wrapping the functions
511  ***********************************/
512
513 static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
514                                     struct drm_file *file_priv)
515 {
516         struct drm_nouveau_private *dev_priv = dev->dev_private;
517         struct drm_nouveau_channel_alloc *init = data;
518         struct drm_map_list *entry;
519         struct nouveau_channel *chan;
520         struct mem_block *pushbuf;
521         int res;
522
523         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
524
525         if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
526                 return -EINVAL;
527
528         pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
529         if (!pushbuf)
530                 return -ENOMEM;
531
532         res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
533                                  init->fb_ctxdma_handle,
534                                  init->tt_ctxdma_handle);
535         if (res)
536                 return res;
537         init->channel  = chan->id;
538         init->put_base = chan->pushbuf_base;
539
540         /* make the fifo available to user space */
541         /* first, the fifo control regs */
542         init->ctrl = dev_priv->mmio->offset + chan->user;
543         init->ctrl_size = chan->user_size;
544         res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
545                          0, &chan->regs);
546         if (res != 0)
547                 return res;
548
549         entry = drm_find_matching_map(dev, chan->regs);
550         if (!entry)
551                 return -EINVAL;
552         init->ctrl = entry->user_token;
553
554         /* pass back FIFO map info to the caller */
555         init->cmdbuf      = chan->pushbuf_mem->map_handle;
556         init->cmdbuf_size = chan->pushbuf_mem->size;
557
558         /* and the notifier block */
559         init->notifier      = chan->notifier_block->map_handle;
560         init->notifier_size = chan->notifier_block->size;
561
562         return 0;
563 }
564
565 static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
566                                    struct drm_file *file_priv)
567 {
568         struct drm_nouveau_channel_free *cfree = data;
569         struct nouveau_channel *chan;
570
571         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
572         NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
573
574         nouveau_fifo_free(chan);
575         return 0;
576 }
577
578 /***********************************
579  * finally, the ioctl table
580  ***********************************/
581
582 struct drm_ioctl_desc nouveau_ioctls[] = {
583         DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
584         DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
585         DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
586         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
587         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
588         DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
589         DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
590         DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
591         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
592         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
593 };
594
595 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);