OSDN Git Service

drm/bo: fix stupid lock imbalance
[android-x86/external-libdrm.git] / shared-core / nouveau_fifo.c
1 /*
2  * Copyright 2005-2006 Stephane Marchesin
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24
25 #include "drmP.h"
26 #include "drm.h"
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29
30
31 /* returns the size of fifo context */
32 int nouveau_fifo_ctx_size(struct drm_device *dev)
33 {
34         struct drm_nouveau_private *dev_priv=dev->dev_private;
35
36         if (dev_priv->card_type >= NV_40)
37                 return 128;
38         else if (dev_priv->card_type >= NV_17)
39                 return 64;
40         else
41                 return 32;
42 }
43
44 /***********************************
45  * functions doing the actual work
46  ***********************************/
47
48 static int nouveau_fifo_instmem_configure(struct drm_device *dev)
49 {
50         struct drm_nouveau_private *dev_priv = dev->dev_private;
51
52         NV_WRITE(NV03_PFIFO_RAMHT,
53                         (0x03 << 24) /* search 128 */ |
54                         ((dev_priv->ramht_bits - 9) << 16) |
55                         (dev_priv->ramht_offset >> 8)
56                         );
57
58         NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
59
60         switch(dev_priv->card_type)
61         {
62                 case NV_40:
63                         switch (dev_priv->chipset) {
64                         case 0x47:
65                         case 0x49:
66                         case 0x4b:
67                                 NV_WRITE(0x2230, 1);
68                                 break;
69                         default:
70                                 break;
71                         }
72                         NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
73                         break;
74                 case NV_44:
75                         NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
76                                         (2 << 16));
77                         break;
78                 case NV_30:
79                 case NV_20:
80                 case NV_17:
81                         NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
82                                         (1 << 16) /* 64 Bytes entry*/);
83                         /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
84                         break;
85                 case NV_11:
86                 case NV_10:
87                 case NV_04:
88                         NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
89                         break;
90         }
91
92         return 0;
93 }
94
95 int nouveau_fifo_init(struct drm_device *dev)
96 {
97         struct drm_nouveau_private *dev_priv = dev->dev_private;
98         int ret;
99
100         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
101                         ~NV_PMC_ENABLE_PFIFO);
102         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
103                          NV_PMC_ENABLE_PFIFO);
104
105         /* Enable PFIFO error reporting */
106         NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
107         NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
108
109         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
110
111         ret = nouveau_fifo_instmem_configure(dev);
112         if (ret) {
113                 DRM_ERROR("Failed to configure instance memory\n");
114                 return ret;
115         }
116
117         /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
118
119         DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
120
121         /* All channels into PIO mode */
122         NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
123
124         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
125         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
126         /* Channel 0 active, PIO mode */
127         NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
128         /* PUT and GET to 0 */
129         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
130         NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
131         /* No cmdbuf object */
132         NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
133         NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
134         NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x00000000);
135         NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
136         NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
137         NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
138         NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
139         NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
140         NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
141
142         NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
143                                       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
144                                       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
145 #ifdef __BIG_ENDIAN
146                                       NV_PFIFO_CACHE1_BIG_ENDIAN |
147 #endif
148                                       0x00000000);
149
150         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
151         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
152         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
153         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
154
155         /* FIXME on NV04 */
156         if (dev_priv->card_type >= NV_10) {
157                 NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
158                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
159                 if (dev_priv->card_type >= NV_40)
160                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
161                 else
162                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
163         } else {
164                 NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
165                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
166                 NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
167         }
168
169         NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
170         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
171         return 0;
172 }
173
174 static int
175 nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
176 {
177         struct drm_device *dev = chan->dev;
178         struct drm_nouveau_private *dev_priv = dev->dev_private;
179         struct mem_block *pb = chan->pushbuf_mem;
180         struct nouveau_gpuobj *pushbuf = NULL;
181         int ret;
182
183         if (pb->flags & NOUVEAU_MEM_AGP) {
184                 ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
185                                                   NV_DMA_ACCESS_RO,
186                                                   &pushbuf,
187                                                   &chan->pushbuf_base);
188         } else
189         if (pb->flags & NOUVEAU_MEM_PCI) {
190                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
191                                              pb->start, pb->size,
192                                              NV_DMA_ACCESS_RO,
193                                              NV_DMA_TARGET_PCI_NONLINEAR,
194                                              &pushbuf);
195                 chan->pushbuf_base = 0;
196         } else if (dev_priv->card_type != NV_04) {
197                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
198                                              pb->start, pb->size,
199                                              NV_DMA_ACCESS_RO,
200                                              NV_DMA_TARGET_VIDMEM, &pushbuf);
201                 chan->pushbuf_base = 0;
202         } else {
203                 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
204                  * exact reason for existing :)  PCI access to cmdbuf in
205                  * VRAM.
206                  */
207                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
208                                              pb->start +
209                                                drm_get_resource_start(dev, 1),
210                                              pb->size, NV_DMA_ACCESS_RO,
211                                              NV_DMA_TARGET_PCI, &pushbuf);
212                 chan->pushbuf_base = 0;
213         }
214
215         if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
216                                           &chan->pushbuf))) {
217                 DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
218                 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
219                         nouveau_gpuobj_del(dev, &pushbuf);
220                 return ret;
221         }
222
223         return 0;
224 }
225
226 static struct mem_block *
227 nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
228 {
229         struct drm_nouveau_private *dev_priv = dev->dev_private;
230         struct nouveau_config *config = &dev_priv->config;
231         struct mem_block *pb;
232         int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
233
234         /* Defaults for unconfigured values */
235         if (!config->cmdbuf.location)
236                 config->cmdbuf.location = NOUVEAU_MEM_FB;
237         if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
238                 config->cmdbuf.size = pb_min_size;
239
240         pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
241                                config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
242                                (struct drm_file *)-2);
243         if (!pb)
244                 DRM_ERROR("Couldn't allocate DMA push buffer.\n");
245
246         return pb;
247 }
248
249 /* allocates and initializes a fifo for user space consumption */
250 int
251 nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
252                    struct drm_file *file_priv, struct mem_block *pushbuf,
253                    uint32_t vram_handle, uint32_t tt_handle)
254 {
255         int ret;
256         struct drm_nouveau_private *dev_priv = dev->dev_private;
257         struct nouveau_engine *engine = &dev_priv->Engine;
258         struct nouveau_channel *chan;
259         int channel;
260
261         /*
262          * Alright, here is the full story
263          * Nvidia cards have multiple hw fifo contexts (praise them for that,
264          * no complicated crash-prone context switches)
265          * We allocate a new context for each app and let it write to it directly
266          * (woo, full userspace command submission !)
267          * When there are no more contexts, you lost
268          */
269         for (channel = 0; channel < engine->fifo.channels; channel++) {
270                 if (dev_priv->fifos[channel] == NULL)
271                         break;
272         }
273
274         /* no more fifos. you lost. */
275         if (channel == engine->fifo.channels)
276                 return -EINVAL;
277
278         dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
279                                               DRM_MEM_DRIVER);
280         if (!dev_priv->fifos[channel])
281                 return -ENOMEM;
282         dev_priv->fifo_alloc_count++;
283         chan = dev_priv->fifos[channel];
284         chan->dev = dev;
285         chan->id = channel;
286         chan->file_priv = file_priv;
287         chan->pushbuf_mem = pushbuf;
288
289         DRM_INFO("Allocating FIFO number %d\n", channel);
290
291         /* Locate channel's user control regs */
292         if (dev_priv->card_type < NV_40) {
293                 chan->user = NV03_USER(channel);
294                 chan->user_size = NV03_USER_SIZE;
295                 chan->put = NV03_USER_DMA_PUT(channel);
296                 chan->get = NV03_USER_DMA_GET(channel);
297                 chan->ref_cnt = NV03_USER_REF_CNT(channel);
298         } else
299         if (dev_priv->card_type < NV_50) {
300                 chan->user = NV40_USER(channel);
301                 chan->user_size = NV40_USER_SIZE;
302                 chan->put = NV40_USER_DMA_PUT(channel);
303                 chan->get = NV40_USER_DMA_GET(channel);
304                 chan->ref_cnt = NV40_USER_REF_CNT(channel);
305         } else {
306                 chan->user = NV50_USER(channel);
307                 chan->user_size = NV50_USER_SIZE;
308                 chan->put = NV50_USER_DMA_PUT(channel);
309                 chan->get = NV50_USER_DMA_GET(channel);
310                 chan->ref_cnt = NV50_USER_REF_CNT(channel);
311         }
312
313         /* Allocate space for per-channel fixed notifier memory */
314         ret = nouveau_notifier_init_channel(chan);
315         if (ret) {
316                 nouveau_fifo_free(chan);
317                 return ret;
318         }
319
320         /* Setup channel's default objects */
321         ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
322         if (ret) {
323                 nouveau_fifo_free(chan);
324                 return ret;
325         }
326
327         /* Create a dma object for the push buffer */
328         ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
329         if (ret) {
330                 nouveau_fifo_free(chan);
331                 return ret;
332         }
333
334         nouveau_wait_for_idle(dev);
335
336         /* disable the fifo caches */
337         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
338         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
339         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
340         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
341
342         /* Create a graphics context for new channel */
343         ret = engine->graph.create_context(chan);
344         if (ret) {
345                 nouveau_fifo_free(chan);
346                 return ret;
347         }
348
349         /* Construct inital RAMFC for new channel */
350         ret = engine->fifo.create_context(chan);
351         if (ret) {
352                 nouveau_fifo_free(chan);
353                 return ret;
354         }
355
356         /* setup channel's default get/put values
357          * XXX: quite possibly extremely pointless..
358          */
359         NV_WRITE(chan->get, chan->pushbuf_base);
360         NV_WRITE(chan->put, chan->pushbuf_base);
361
362         /* If this is the first channel, setup PFIFO ourselves.  For any
363          * other case, the GPU will handle this when it switches contexts.
364          */
365         if (dev_priv->fifo_alloc_count == 1) {
366                 ret = engine->fifo.load_context(chan);
367                 if (ret) {
368                         nouveau_fifo_free(chan);
369                         return ret;
370                 }
371
372                 ret = engine->graph.load_context(chan);
373                 if (ret) {
374                         nouveau_fifo_free(chan);
375                         return ret;
376                 }
377         }
378
379         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
380                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
381         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
382         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
383         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
384
385         /* reenable the fifo caches */
386         NV_WRITE(NV03_PFIFO_CACHES, 1);
387
388         DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
389         *chan_ret = chan;
390         return 0;
391 }
392
393 int
394 nouveau_channel_idle(struct nouveau_channel *chan)
395 {
396         struct drm_device *dev = chan->dev;
397         struct drm_nouveau_private *dev_priv = dev->dev_private;
398         struct nouveau_engine *engine = &dev_priv->Engine;
399         uint32_t caches;
400         int idle;
401
402         caches = NV_READ(NV03_PFIFO_CACHES);
403         NV_WRITE(NV03_PFIFO_CACHES, caches & ~1);
404
405         if (engine->fifo.channel_id(dev) != chan->id) {
406                 struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
407
408                 if (INSTANCE_RD(ramfc, 0) != INSTANCE_RD(ramfc, 1))
409                         idle = 0;
410                 else
411                         idle = 1;
412         } else {
413                 idle = (NV_READ(NV04_PFIFO_CACHE1_DMA_GET) ==
414                         NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
415         }
416
417         NV_WRITE(NV03_PFIFO_CACHES, caches);
418         return idle;
419 }
420
421 /* stops a fifo */
422 void nouveau_fifo_free(struct nouveau_channel *chan)
423 {
424         struct drm_device *dev = chan->dev;
425         struct drm_nouveau_private *dev_priv = dev->dev_private;
426         struct nouveau_engine *engine = &dev_priv->Engine;
427         uint64_t t_start;
428
429         DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
430
431         /* Give the channel a chance to idle, wait 2s (hopefully) */
432         t_start = engine->timer.read(dev);
433         while (!nouveau_channel_idle(chan)) {
434                 if (engine->timer.read(dev) - t_start > 2000000000ULL) {
435                         DRM_ERROR("Failed to idle channel %d before destroy."
436                                   "Prepare for strangeness..\n", chan->id);
437                         break;
438                 }
439         }
440
441         /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
442          *     from CACHE1 too?
443          */
444
445         /* disable the fifo caches */
446         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
447         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
448         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
449         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
450
451         // FIXME XXX needs more code
452
453         engine->fifo.destroy_context(chan);
454
455         /* Cleanup PGRAPH state */
456         engine->graph.destroy_context(chan);
457
458         /* reenable the fifo caches */
459         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
460                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
461         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
462         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
463         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
464
465         /* Deallocate push buffer */
466         nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
467         if (chan->pushbuf_mem) {
468                 nouveau_mem_free(dev, chan->pushbuf_mem);
469                 chan->pushbuf_mem = NULL;
470         }
471
472         /* Destroy objects belonging to the channel */
473         nouveau_gpuobj_channel_takedown(chan);
474
475         nouveau_notifier_takedown_channel(chan);
476
477         dev_priv->fifos[chan->id] = NULL;
478         dev_priv->fifo_alloc_count--;
479         drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
480 }
481
482 /* cleanups all the fifos from file_priv */
483 void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
484 {
485         struct drm_nouveau_private *dev_priv = dev->dev_private;
486         struct nouveau_engine *engine = &dev_priv->Engine;
487         int i;
488
489         DRM_DEBUG("clearing FIFO enables from file_priv\n");
490         for(i = 0; i < engine->fifo.channels; i++) {
491                 struct nouveau_channel *chan = dev_priv->fifos[i];
492
493                 if (chan && chan->file_priv == file_priv)
494                         nouveau_fifo_free(chan);
495         }
496 }
497
498 int
499 nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
500                    int channel)
501 {
502         struct drm_nouveau_private *dev_priv = dev->dev_private;
503         struct nouveau_engine *engine = &dev_priv->Engine;
504
505         if (channel >= engine->fifo.channels)
506                 return 0;
507         if (dev_priv->fifos[channel] == NULL)
508                 return 0;
509         return (dev_priv->fifos[channel]->file_priv == file_priv);
510 }
511
512 /***********************************
513  * ioctls wrapping the functions
514  ***********************************/
515
516 static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
517                                     struct drm_file *file_priv)
518 {
519         struct drm_nouveau_private *dev_priv = dev->dev_private;
520         struct drm_nouveau_channel_alloc *init = data;
521         struct drm_map_list *entry;
522         struct nouveau_channel *chan;
523         struct mem_block *pushbuf;
524         int res;
525
526         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
527
528         if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
529                 return -EINVAL;
530
531         pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
532         if (!pushbuf)
533                 return -ENOMEM;
534
535         res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
536                                  init->fb_ctxdma_handle,
537                                  init->tt_ctxdma_handle);
538         if (res)
539                 return res;
540         init->channel  = chan->id;
541         init->put_base = chan->pushbuf_base;
542
543         /* make the fifo available to user space */
544         /* first, the fifo control regs */
545         init->ctrl = dev_priv->mmio->offset + chan->user;
546         init->ctrl_size = chan->user_size;
547         res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
548                          0, &chan->regs);
549         if (res != 0)
550                 return res;
551
552         entry = drm_find_matching_map(dev, chan->regs);
553         if (!entry)
554                 return -EINVAL;
555         init->ctrl = entry->user_token;
556
557         /* pass back FIFO map info to the caller */
558         init->cmdbuf      = chan->pushbuf_mem->map_handle;
559         init->cmdbuf_size = chan->pushbuf_mem->size;
560
561         /* and the notifier block */
562         init->notifier      = chan->notifier_block->map_handle;
563         init->notifier_size = chan->notifier_block->size;
564
565         return 0;
566 }
567
568 static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
569                                    struct drm_file *file_priv)
570 {
571         struct drm_nouveau_channel_free *cfree = data;
572         struct nouveau_channel *chan;
573
574         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
575         NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
576
577         nouveau_fifo_free(chan);
578         return 0;
579 }
580
581 /***********************************
582  * finally, the ioctl table
583  ***********************************/
584
585 struct drm_ioctl_desc nouveau_ioctls[] = {
586         DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
587         DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
588         DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
589         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
590         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
591         DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
592         DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
593         DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
594         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
595         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
596         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_TILE, nouveau_ioctl_mem_tile, DRM_AUTH),
597         DRM_IOCTL_DEF(DRM_NOUVEAU_SUSPEND, nouveau_ioctl_suspend, DRM_AUTH),
598         DRM_IOCTL_DEF(DRM_NOUVEAU_RESUME, nouveau_ioctl_resume, DRM_AUTH),
599 };
600
601 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);