OSDN Git Service

i915 make relocs use copy from user
[android-x86/external-libdrm.git] / shared-core / nouveau_fifo.c
1 /*
2  * Copyright 2005-2006 Stephane Marchesin
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24
25 #include "drmP.h"
26 #include "drm.h"
27 #include "nouveau_drv.h"
28 #include "nouveau_drm.h"
29
30
31 /* returns the size of fifo context */
32 int nouveau_fifo_ctx_size(struct drm_device *dev)
33 {
34         struct drm_nouveau_private *dev_priv=dev->dev_private;
35
36         if (dev_priv->card_type >= NV_40)
37                 return 128;
38         else if (dev_priv->card_type >= NV_17)
39                 return 64;
40         else
41                 return 32;
42 }
43
44 /***********************************
45  * functions doing the actual work
46  ***********************************/
47
48 /* voir nv_xaa.c : NVResetGraphics
49  * mémoire mappée par nv_driver.c : NVMapMem
50  * voir nv_driver.c : NVPreInit
51  */
52
53 static int nouveau_fifo_instmem_configure(struct drm_device *dev)
54 {
55         struct drm_nouveau_private *dev_priv = dev->dev_private;
56
57         NV_WRITE(NV03_PFIFO_RAMHT,
58                         (0x03 << 24) /* search 128 */ |
59                         ((dev_priv->ramht_bits - 9) << 16) |
60                         (dev_priv->ramht_offset >> 8)
61                         );
62
63         NV_WRITE(NV03_PFIFO_RAMRO, dev_priv->ramro_offset>>8);
64
65         switch(dev_priv->card_type)
66         {
67                 case NV_50:
68                 case NV_40:
69                         switch (dev_priv->chipset) {
70                         case 0x47:
71                         case 0x49:
72                         case 0x4b:
73                                 NV_WRITE(0x2230, 1);
74                                 break;
75                         default:
76                                 break;
77                         }
78                         NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
79                         break;
80                 case NV_44:
81                         NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
82                                         (2 << 16));
83                         break;
84                 case NV_30:
85                 case NV_20:
86                 case NV_17:
87                         NV_WRITE(NV03_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) |
88                                         (1 << 16) /* 64 Bytes entry*/);
89                         /* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
90                         break;
91                 case NV_11:
92                 case NV_10:
93                 case NV_04:
94                         NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
95                         break;
96         }
97
98         return 0;
99 }
100
101 int nouveau_fifo_init(struct drm_device *dev)
102 {
103         struct drm_nouveau_private *dev_priv = dev->dev_private;
104         int ret;
105
106         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
107                         ~NV_PMC_ENABLE_PFIFO);
108         NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
109                          NV_PMC_ENABLE_PFIFO);
110
111         /* Enable PFIFO error reporting */
112         NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
113         NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
114
115         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
116
117         ret = nouveau_fifo_instmem_configure(dev);
118         if (ret) {
119                 DRM_ERROR("Failed to configure instance memory\n");
120                 return ret;
121         }
122
123         /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */
124
125         DRM_DEBUG("Setting defaults for remaining PFIFO regs\n");
126
127         /* All channels into PIO mode */
128         NV_WRITE(NV04_PFIFO_MODE, 0x00000000);
129
130         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
131         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
132         /* Channel 0 active, PIO mode */
133         NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, 0x00000000);
134         /* PUT and GET to 0 */
135         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, 0x00000000);
136         NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, 0x00000000);
137         /* No cmdbuf object */
138         NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, 0x00000000);
139         NV_WRITE(NV03_PFIFO_CACHE0_PUSH0, 0x00000000);
140         NV_WRITE(NV03_PFIFO_CACHE0_PULL0, 0x00000000);
141         NV_WRITE(NV04_PFIFO_SIZE, 0x0000FFFF);
142         NV_WRITE(NV04_PFIFO_CACHE1_HASH, 0x0000FFFF);
143         NV_WRITE(NV04_PFIFO_CACHE0_PULL1, 0x00000001);
144         NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, 0x00000000);
145         NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
146         NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, 0x00000000);
147
148         NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_112_BYTES |
149                                       NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
150                                       NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |
151 #ifdef __BIG_ENDIAN
152                                       NV_PFIFO_CACHE1_BIG_ENDIAN |
153 #endif
154                                       0x00000000);
155
156         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
157         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
158         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
159         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
160
161         /* FIXME on NV04 */
162         if (dev_priv->card_type >= NV_10) {
163                 NV_WRITE(NV10_PGRAPH_CTX_USER, 0x0);
164                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
165                 if (dev_priv->card_type >= NV_40)
166                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x00002001);
167                 else
168                         NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10110000);
169         } else {
170                 NV_WRITE(NV04_PGRAPH_CTX_USER, 0x0);
171                 NV_WRITE(NV04_PFIFO_DELAY_0, 0xff /* retrycount*/ );
172                 NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10110000);
173         }
174
175         NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x001fffff);
176         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
177         return 0;
178 }
179
180 static int
181 nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
182 {
183         struct drm_device *dev = chan->dev;
184         struct drm_nouveau_private *dev_priv = dev->dev_private;
185         struct mem_block *pb = chan->pushbuf_mem;
186         struct nouveau_gpuobj *pushbuf = NULL;
187         int ret;
188
189         if (pb->flags & NOUVEAU_MEM_AGP) {
190                 ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
191                                                   NV_DMA_ACCESS_RO,
192                                                   &pushbuf,
193                                                   &chan->pushbuf_base);
194         } else
195         if (pb->flags & NOUVEAU_MEM_PCI) {
196                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
197                                              pb->start, pb->size,
198                                              NV_DMA_ACCESS_RO,
199                                              NV_DMA_TARGET_PCI_NONLINEAR,
200                                              &pushbuf);
201                 chan->pushbuf_base = 0;
202         } else if (dev_priv->card_type != NV_04) {
203                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
204                                              pb->start, pb->size,
205                                              NV_DMA_ACCESS_RO,
206                                              NV_DMA_TARGET_VIDMEM, &pushbuf);
207                 chan->pushbuf_base = 0;
208         } else {
209                 /* NV04 cmdbuf hack, from original ddx.. not sure of it's
210                  * exact reason for existing :)  PCI access to cmdbuf in
211                  * VRAM.
212                  */
213                 ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
214                                              pb->start +
215                                                drm_get_resource_start(dev, 1),
216                                              pb->size, NV_DMA_ACCESS_RO,
217                                              NV_DMA_TARGET_PCI, &pushbuf);
218                 chan->pushbuf_base = 0;
219         }
220
221         if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
222                                           &chan->pushbuf))) {
223                 DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
224                 if (pushbuf != dev_priv->gart_info.sg_ctxdma)
225                         nouveau_gpuobj_del(dev, &pushbuf);
226                 return ret;
227         }
228
229         return 0;
230 }
231
232 static struct mem_block *
233 nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
234 {
235         struct drm_nouveau_private *dev_priv = dev->dev_private;
236         struct nouveau_config *config = &dev_priv->config;
237         struct mem_block *pb;
238         int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
239
240         /* Defaults for unconfigured values */
241         if (!config->cmdbuf.location)
242                 config->cmdbuf.location = NOUVEAU_MEM_FB;
243         if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
244                 config->cmdbuf.size = pb_min_size;
245
246         pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
247                                config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
248                                (struct drm_file *)-2);
249         if (!pb)
250                 DRM_ERROR("Couldn't allocate DMA push buffer.\n");
251
252         return pb;
253 }
254
255 /* allocates and initializes a fifo for user space consumption */
256 int
257 nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
258                    struct drm_file *file_priv, struct mem_block *pushbuf,
259                    uint32_t vram_handle, uint32_t tt_handle)
260 {
261         int ret;
262         struct drm_nouveau_private *dev_priv = dev->dev_private;
263         struct nouveau_engine *engine = &dev_priv->Engine;
264         struct nouveau_channel *chan;
265         int channel;
266
267         /*
268          * Alright, here is the full story
269          * Nvidia cards have multiple hw fifo contexts (praise them for that,
270          * no complicated crash-prone context switches)
271          * We allocate a new context for each app and let it write to it directly
272          * (woo, full userspace command submission !)
273          * When there are no more contexts, you lost
274          */
275         for (channel = 0; channel < engine->fifo.channels; channel++) {
276                 if (dev_priv->fifos[channel] == NULL)
277                         break;
278         }
279
280         /* no more fifos. you lost. */
281         if (channel == engine->fifo.channels)
282                 return -EINVAL;
283
284         dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
285                                               DRM_MEM_DRIVER);
286         if (!dev_priv->fifos[channel])
287                 return -ENOMEM;
288         dev_priv->fifo_alloc_count++;
289         chan = dev_priv->fifos[channel];
290         chan->dev = dev;
291         chan->id = channel;
292         chan->file_priv = file_priv;
293         chan->pushbuf_mem = pushbuf;
294
295         DRM_INFO("Allocating FIFO number %d\n", channel);
296
297         /* Locate channel's user control regs */
298         if (dev_priv->card_type < NV_40) {
299                 chan->user = NV03_USER(channel);
300                 chan->user_size = NV03_USER_SIZE;
301                 chan->put = NV03_USER_DMA_PUT(channel);
302                 chan->get = NV03_USER_DMA_GET(channel);
303                 chan->ref_cnt = NV03_USER_REF_CNT(channel);
304         } else
305         if (dev_priv->card_type < NV_50) {
306                 chan->user = NV40_USER(channel);
307                 chan->user_size = NV40_USER_SIZE;
308                 chan->put = NV40_USER_DMA_PUT(channel);
309                 chan->get = NV40_USER_DMA_GET(channel);
310                 chan->ref_cnt = NV40_USER_REF_CNT(channel);
311         } else {
312                 chan->user = NV50_USER(channel);
313                 chan->user_size = NV50_USER_SIZE;
314                 chan->put = NV50_USER_DMA_PUT(channel);
315                 chan->get = NV50_USER_DMA_GET(channel);
316                 chan->ref_cnt = NV50_USER_REF_CNT(channel);
317         }
318
319         /* Allocate space for per-channel fixed notifier memory */
320         ret = nouveau_notifier_init_channel(chan);
321         if (ret) {
322                 nouveau_fifo_free(chan);
323                 return ret;
324         }
325
326         /* Setup channel's default objects */
327         ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
328         if (ret) {
329                 nouveau_fifo_free(chan);
330                 return ret;
331         }
332
333         /* Create a dma object for the push buffer */
334         ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
335         if (ret) {
336                 nouveau_fifo_free(chan);
337                 return ret;
338         }
339
340         nouveau_wait_for_idle(dev);
341
342         /* disable the fifo caches */
343         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
344         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
345         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
346         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
347
348         /* Create a graphics context for new channel */
349         ret = engine->graph.create_context(chan);
350         if (ret) {
351                 nouveau_fifo_free(chan);
352                 return ret;
353         }
354
355         /* Construct inital RAMFC for new channel */
356         ret = engine->fifo.create_context(chan);
357         if (ret) {
358                 nouveau_fifo_free(chan);
359                 return ret;
360         }
361
362         /* setup channel's default get/put values
363          * XXX: quite possibly extremely pointless..
364          */
365         NV_WRITE(chan->get, chan->pushbuf_base);
366         NV_WRITE(chan->put, chan->pushbuf_base);
367
368         /* If this is the first channel, setup PFIFO ourselves.  For any
369          * other case, the GPU will handle this when it switches contexts.
370          */
371         if (dev_priv->fifo_alloc_count == 1) {
372                 ret = engine->fifo.load_context(chan);
373                 if (ret) {
374                         nouveau_fifo_free(chan);
375                         return ret;
376                 }
377
378                 ret = engine->graph.load_context(chan);
379                 if (ret) {
380                         nouveau_fifo_free(chan);
381                         return ret;
382                 }
383         }
384
385         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
386                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
387         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
388         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
389         NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
390
391         /* reenable the fifo caches */
392         NV_WRITE(NV03_PFIFO_CACHES, 1);
393
394         DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
395         *chan_ret = chan;
396         return 0;
397 }
398
399 /* stops a fifo */
400 void nouveau_fifo_free(struct nouveau_channel *chan)
401 {
402         struct drm_device *dev = chan->dev;
403         struct drm_nouveau_private *dev_priv = dev->dev_private;
404         struct nouveau_engine *engine = &dev_priv->Engine;
405         uint64_t t_start;
406
407         DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
408
409         /* Disable channel switching, if this channel isn't currenly
410          * active re-enable it if there's still pending commands.
411          * We really should do a manual context switch here, but I'm
412          * not sure I trust our ability to do this reliably yet..
413          */
414         NV_WRITE(NV03_PFIFO_CACHES, 0);
415         if (engine->fifo.channel_id(dev) != chan->id &&
416             NV_READ(chan->get) != NV_READ(chan->put)) {
417                 NV_WRITE(NV03_PFIFO_CACHES, 1);
418         }
419
420         /* Give the channel a chance to idle, wait 2s (hopefully) */
421         t_start = engine->timer.read(dev);
422         while (NV_READ(chan->get) != NV_READ(chan->put) ||
423                NV_READ(NV03_PFIFO_CACHE1_GET) !=
424                NV_READ(NV03_PFIFO_CACHE1_PUT)) {
425                 if (engine->timer.read(dev) - t_start > 2000000000ULL) {
426                         DRM_ERROR("Failed to idle channel %d before destroy."
427                                   "Prepare for strangeness..\n", chan->id);
428                         break;
429                 }
430         }
431
432         /*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched
433          *     from CACHE1 too?
434          */
435
436         /* disable the fifo caches */
437         NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
438         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1));
439         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000000);
440         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
441
442         /* stop the fifo, otherwise it could be running and
443          * it will crash when removing gpu objects
444          *XXX: from real-world evidence, absolutely useless..
445          */
446         NV_WRITE(chan->get, chan->pushbuf_base);
447         NV_WRITE(chan->put, chan->pushbuf_base);
448
449         // FIXME XXX needs more code
450
451         engine->fifo.destroy_context(chan);
452
453         /* Cleanup PGRAPH state */
454         engine->graph.destroy_context(chan);
455
456         /* reenable the fifo caches */
457         NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
458                  NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
459         NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
460         NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
461         NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
462
463         /* Deallocate push buffer */
464         nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
465         if (chan->pushbuf_mem) {
466                 nouveau_mem_free(dev, chan->pushbuf_mem);
467                 chan->pushbuf_mem = NULL;
468         }
469
470         /* Destroy objects belonging to the channel */
471         nouveau_gpuobj_channel_takedown(chan);
472
473         nouveau_notifier_takedown_channel(chan);
474
475         dev_priv->fifos[chan->id] = NULL;
476         dev_priv->fifo_alloc_count--;
477         drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
478 }
479
480 /* cleanups all the fifos from file_priv */
481 void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
482 {
483         struct drm_nouveau_private *dev_priv = dev->dev_private;
484         struct nouveau_engine *engine = &dev_priv->Engine;
485         int i;
486
487         DRM_DEBUG("clearing FIFO enables from file_priv\n");
488         for(i = 0; i < engine->fifo.channels; i++) {
489                 struct nouveau_channel *chan = dev_priv->fifos[i];
490
491                 if (chan && chan->file_priv == file_priv)
492                         nouveau_fifo_free(chan);
493         }
494 }
495
496 int
497 nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
498                    int channel)
499 {
500         struct drm_nouveau_private *dev_priv = dev->dev_private;
501         struct nouveau_engine *engine = &dev_priv->Engine;
502
503         if (channel >= engine->fifo.channels)
504                 return 0;
505         if (dev_priv->fifos[channel] == NULL)
506                 return 0;
507         return (dev_priv->fifos[channel]->file_priv == file_priv);
508 }
509
510 /***********************************
511  * ioctls wrapping the functions
512  ***********************************/
513
514 static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
515                                     struct drm_file *file_priv)
516 {
517         struct drm_nouveau_private *dev_priv = dev->dev_private;
518         struct drm_nouveau_channel_alloc *init = data;
519         struct drm_map_list *entry;
520         struct nouveau_channel *chan;
521         struct mem_block *pushbuf;
522         int res;
523
524         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
525
526         if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
527                 return -EINVAL;
528
529         pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
530         if (!pushbuf)
531                 return -ENOMEM;
532
533         res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
534                                  init->fb_ctxdma_handle,
535                                  init->tt_ctxdma_handle);
536         if (res)
537                 return res;
538         init->channel  = chan->id;
539         init->put_base = chan->pushbuf_base;
540
541         /* make the fifo available to user space */
542         /* first, the fifo control regs */
543         init->ctrl = dev_priv->mmio->offset + chan->user;
544         init->ctrl_size = chan->user_size;
545         res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
546                          0, &chan->regs);
547         if (res != 0)
548                 return res;
549
550         entry = drm_find_matching_map(dev, chan->regs);
551         if (!entry)
552                 return -EINVAL;
553         init->ctrl = entry->user_token;
554
555         /* pass back FIFO map info to the caller */
556         init->cmdbuf      = chan->pushbuf_mem->map_handle;
557         init->cmdbuf_size = chan->pushbuf_mem->size;
558
559         /* and the notifier block */
560         init->notifier      = chan->notifier_block->map_handle;
561         init->notifier_size = chan->notifier_block->size;
562
563         return 0;
564 }
565
566 static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
567                                    struct drm_file *file_priv)
568 {
569         struct drm_nouveau_channel_free *cfree = data;
570         struct nouveau_channel *chan;
571
572         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
573         NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
574
575         nouveau_fifo_free(chan);
576         return 0;
577 }
578
579 /***********************************
580  * finally, the ioctl table
581  ***********************************/
582
583 struct drm_ioctl_desc nouveau_ioctls[] = {
584         DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
585         DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
586         DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
587         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
588         DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
589         DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
590         DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
591         DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
592         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
593         DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
594 };
595
596 int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);