OSDN Git Service

drm: convert drawable handling to use Linux idr
[android-x86/external-libdrm.git] / shared-core / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Keith Whitwell <keith@tungstengraphics.com>
30  */
31
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
37
38 static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
39                 DRMFILE filp)
40 {
41         /* Maybe cut off the start of an existing block */
42         if (start > p->start) {
43                 struct mem_block *newblock =
44                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
45                 if (!newblock)
46                         goto out;
47                 newblock->start = start;
48                 newblock->size = p->size - (start - p->start);
49                 newblock->filp = NULL;
50                 newblock->next = p->next;
51                 newblock->prev = p;
52                 p->next->prev = newblock;
53                 p->next = newblock;
54                 p->size -= newblock->size;
55                 p = newblock;
56         }
57
58         /* Maybe cut off the end of an existing block */
59         if (size < p->size) {
60                 struct mem_block *newblock =
61                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
62                 if (!newblock)
63                         goto out;
64                 newblock->start = start + size;
65                 newblock->size = p->size - size;
66                 newblock->filp = NULL;
67                 newblock->next = p->next;
68                 newblock->prev = p;
69                 p->next->prev = newblock;
70                 p->next = newblock;
71                 p->size = size;
72         }
73
74 out:
75         /* Our block is in the middle */
76         p->filp = filp;
77         return p;
78 }
79
80 static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size,
81                 int align2, DRMFILE filp)
82 {
83         struct mem_block *p;
84         uint64_t mask = (1 << align2) - 1;
85
86         if (!heap)
87                 return NULL;
88
89         list_for_each(p, heap) {
90                 uint64_t start = (p->start + mask) & ~mask;
91                 if (p->filp == 0 && start + size <= p->start + p->size)
92                         return split_block(p, start, size, filp);
93         }
94
95         return NULL;
96 }
97
98 static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
99 {
100         struct mem_block *p;
101
102         list_for_each(p, heap)
103                 if (p->start == start)
104                         return p;
105
106         return NULL;
107 }
108
109 static void free_block(struct mem_block *p)
110 {
111         p->filp = NULL;
112
113         /* Assumes a single contiguous range.  Needs a special filp in
114          * 'heap' to stop it being subsumed.
115          */
116         if (p->next->filp == 0) {
117                 struct mem_block *q = p->next;
118                 p->size += q->size;
119                 p->next = q->next;
120                 p->next->prev = p;
121                 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
122         }
123
124         if (p->prev->filp == 0) {
125                 struct mem_block *q = p->prev;
126                 q->size += p->size;
127                 q->next = p->next;
128                 q->next->prev = q;
129                 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
130         }
131 }
132
133 /* Initialize.  How to check for an uninitialized heap?
134  */
135 static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size)
136 {
137         struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
138
139         if (!blocks)
140                 return DRM_ERR(ENOMEM);
141
142         *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
143         if (!*heap) {
144                 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
145                 return DRM_ERR(ENOMEM);
146         }
147
148         blocks->start = start;
149         blocks->size = size;
150         blocks->filp = NULL;
151         blocks->next = blocks->prev = *heap;
152
153         memset(*heap, 0, sizeof(**heap));
154         (*heap)->filp = (DRMFILE) - 1;
155         (*heap)->next = (*heap)->prev = blocks;
156         return 0;
157 }
158
159 /* 
160  * Free all blocks associated with the releasing filp
161  */
162 void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
163 {
164         struct mem_block *p;
165
166         if (!heap || !heap->next)
167                 return;
168
169         list_for_each(p, heap) {
170                 if (p->filp == filp)
171                         p->filp = NULL;
172         }
173
174         /* Assumes a single contiguous range.  Needs a special filp in
175          * 'heap' to stop it being subsumed.
176          */
177         list_for_each(p, heap) {
178                 while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) {
179                         struct mem_block *q = p->next;
180                         p->size += q->size;
181                         p->next = q->next;
182                         p->next->prev = p;
183                         drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
184                 }
185         }
186 }
187
188 /* 
189  * Cleanup everything
190  */
191 static void nouveau_mem_takedown(struct mem_block **heap)
192 {
193         struct mem_block *p;
194
195         if (!*heap)
196                 return;
197
198         for (p = (*heap)->next; p != *heap;) {
199                 struct mem_block *q = p;
200                 p = p->next;
201                 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
202         }
203
204         drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
205         *heap = NULL;
206 }
207
208 void nouveau_mem_close(struct drm_device *dev)
209 {
210         drm_nouveau_private_t *dev_priv = dev->dev_private;
211         nouveau_mem_takedown(&dev_priv->agp_heap);
212         nouveau_mem_takedown(&dev_priv->fb_heap);
213 }
214
215 /* returns the amount of FB ram in bytes */
216 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
217 {
218         drm_nouveau_private_t *dev_priv=dev->dev_private;
219         switch(dev_priv->card_type)
220         {
221                 case NV_03:
222                         switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
223                         {
224                                 case NV03_BOOT_0_RAM_AMOUNT_8MB:
225                                 case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM:
226                                         return 8*1024*1024;
227                                 case NV03_BOOT_0_RAM_AMOUNT_4MB:
228                                         return 4*1024*1024;
229                                 case NV03_BOOT_0_RAM_AMOUNT_2MB:
230                                         return 2*1024*1024;
231                         }
232                         break;
233                 case NV_04:
234                 case NV_05:
235                         if (NV_READ(NV03_BOOT_0) & 0x00000100) {
236                                 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
237                         } else
238                         switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
239                         {
240                                 case NV04_BOOT_0_RAM_AMOUNT_32MB:
241                                         return 32*1024*1024;
242                                 case NV04_BOOT_0_RAM_AMOUNT_16MB:
243                                         return 16*1024*1024;
244                                 case NV04_BOOT_0_RAM_AMOUNT_8MB:
245                                         return 8*1024*1024;
246                                 case NV04_BOOT_0_RAM_AMOUNT_4MB:
247                                         return 4*1024*1024;
248                         }
249                         break;
250                 case NV_10:
251                 case NV_17:
252                 case NV_20:
253                 case NV_30:
254                 case NV_40:
255                 case NV_44:
256                 case NV_50:
257                 default:
258                         // XXX won't work on BSD because of pci_read_config_dword
259                         if (dev_priv->flags&NV_NFORCE) {
260                                 uint32_t mem;
261                                 pci_read_config_dword(dev->pdev, 0x7C, &mem);
262                                 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
263                         } else if(dev_priv->flags&NV_NFORCE2) {
264                                 uint32_t mem;
265                                 pci_read_config_dword(dev->pdev, 0x84, &mem);
266                                 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
267                         } else {
268                                 uint64_t mem;
269                                 mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
270                                 return mem*1024*1024;
271                         }
272                         break;
273         }
274
275         DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
276         return 0;
277 }
278
279
280
281 int nouveau_mem_init(struct drm_device *dev)
282 {
283         drm_nouveau_private_t *dev_priv = dev->dev_private;
284         uint32_t fb_size;
285         dev_priv->agp_phys=0;
286         dev_priv->fb_phys=0;
287
288         /* init AGP */
289         dev_priv->agp_heap=NULL;
290         if (drm_device_is_agp(dev))
291         {
292                 int err;
293                 drm_agp_info_t info;
294                 drm_agp_mode_t mode;
295                 drm_agp_buffer_t agp_req;
296                 drm_agp_binding_t bind_req;
297
298                 err = drm_agp_acquire(dev);
299                 if (err) {
300                         DRM_ERROR("Unable to acquire AGP: %d\n", err);
301                         goto no_agp;
302                 }
303
304                 err = drm_agp_info(dev, &info);
305                 if (err) {
306                         DRM_ERROR("Unable to get AGP info: %d\n", err);
307                         goto no_agp;
308                 }
309
310                 /* see agp.h for the AGPSTAT_* modes available */
311                 mode.mode = info.mode;
312                 err = drm_agp_enable(dev, mode);
313                 if (err) {
314                         DRM_ERROR("Unable to enable AGP: %d\n", err);
315                         goto no_agp;
316                 }
317
318                 agp_req.size = info.aperture_size;
319                 agp_req.type = 0;
320                 err = drm_agp_alloc(dev, &agp_req);
321                 if (err) {
322                         DRM_ERROR("Unable to alloc AGP: %d\n", err);
323                         goto no_agp;
324                 }
325
326                 bind_req.handle = agp_req.handle;
327                 bind_req.offset = 0;
328                 err = drm_agp_bind(dev, &bind_req);
329                 if (err) {
330                         DRM_ERROR("Unable to bind AGP: %d\n", err);
331                         goto no_agp;
332                 }
333
334                 if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size))
335                         goto no_agp;
336
337                 dev_priv->agp_phys              = info.aperture_base;
338                 dev_priv->agp_available_size    = info.aperture_size;
339         }
340 no_agp:
341
342         /* setup a mtrr over the FB */
343         dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
344                                          nouveau_mem_fb_amount(dev),
345                                          DRM_MTRR_WC);
346
347         /* Init FB */
348         dev_priv->fb_phys=drm_get_resource_start(dev,1);
349         fb_size = nouveau_mem_fb_amount(dev);
350         /* On at least NV40, RAMIN is actually at the end of vram.
351          * We don't want to allocate this... */
352         if (dev_priv->card_type >= NV_40)
353                 fb_size -= dev_priv->ramin_size;
354         dev_priv->fb_available_size = fb_size;
355         DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
356
357         if (fb_size>256*1024*1024) {
358                 /* On cards with > 256Mb, you can't map everything. 
359                  * So we create a second FB heap for that type of memory */
360                 if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024))
361                         return DRM_ERR(ENOMEM);
362                 if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024))
363                         return DRM_ERR(ENOMEM);
364         } else {
365                 if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size))
366                         return DRM_ERR(ENOMEM);
367                 dev_priv->fb_nomap_heap=NULL;
368         }
369
370         return 0;
371 }
372
373 struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp)
374 {
375         struct mem_block *block;
376         int type;
377         drm_nouveau_private_t *dev_priv = dev->dev_private;
378
379         /* 
380          * Make things easier on ourselves: all allocations are page-aligned. 
381          * We need that to map allocated regions into the user space
382          */
383         if (alignment < PAGE_SHIFT)
384                 alignment = PAGE_SHIFT;
385
386         /*
387          * Warn about 0 sized allocations, but let it go through. It'll return 1 page
388          */
389         if (size == 0)
390                 DRM_INFO("warning : 0 byte allocation\n");
391
392         /*
393          * Keep alloc size a multiple of the page size to keep drm_addmap() happy
394          */
395         if (size & (~PAGE_MASK))
396                 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
397
398         if (flags&NOUVEAU_MEM_AGP) {
399                 type=NOUVEAU_MEM_AGP;
400                 block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
401                 if (block) goto alloc_ok;
402         }
403         if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) {
404                 type=NOUVEAU_MEM_FB;
405                 if (!(flags&NOUVEAU_MEM_MAPPED)) {
406                         block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp);
407                         if (block) goto alloc_ok;
408                 }
409                 block = alloc_block(dev_priv->fb_heap, size, alignment, filp);
410                 if (block) goto alloc_ok;       
411         }
412         if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) {
413                 type=NOUVEAU_MEM_AGP;
414                 block = alloc_block(dev_priv->agp_heap, size, alignment, filp);
415                 if (block) goto alloc_ok;
416         }
417
418         return NULL;
419
420 alloc_ok:
421         block->flags=type;
422
423         if (flags&NOUVEAU_MEM_MAPPED)
424         {
425                 int ret;
426                 block->flags|=NOUVEAU_MEM_MAPPED;
427
428                 if (type == NOUVEAU_MEM_AGP)
429                         ret = drm_addmap(dev, block->start - dev->agp->base, block->size, 
430                                         _DRM_AGP, 0, &block->map);
431                 else
432                         ret = drm_addmap(dev, block->start, block->size,
433                                         _DRM_FRAME_BUFFER, 0, &block->map);
434                 if (ret) { 
435                         free_block(block);
436                         return NULL;
437                 }
438         }
439
440         DRM_INFO("allocated 0x%llx\n", block->start);
441         return block;
442 }
443
444 void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
445 {
446         DRM_INFO("freeing 0x%llx\n", block->start);
447         if (block->flags&NOUVEAU_MEM_MAPPED)
448                 drm_rmmap(dev, block->map);
449         free_block(block);
450 }
451
452 static void
453 nouveau_instmem_determine_amount(struct drm_device *dev)
454 {
455         drm_nouveau_private_t *dev_priv = dev->dev_private;
456         int i;
457
458         /* Figure out how much instance memory we need */
459         switch (dev_priv->card_type) {
460         case NV_40:
461                 /* We'll want more instance memory than this on some NV4x cards.
462                  * There's a 16MB aperture to play with that maps onto the end
463                  * of vram.  For now, only reserve a small piece until we know
464                  * more about what each chipset requires.
465                  */
466                 dev_priv->ramin_size = (1*1024* 1024);
467                 break;
468         default:
469                 /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
470                  *     exist in vram on those cards as well?
471                  */
472                 dev_priv->ramin_size = (512*1024);
473                 break;
474         }
475         DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10);
476
477         /* Clear all of it, except the BIOS image that's in the first 64KiB */
478         if (dev_priv->ramin) {
479                 for (i=(64*1024); i<dev_priv->ramin_size; i+=4)
480                         DRM_WRITE32(dev_priv->ramin, i, 0x00000000);
481         } else {
482                 for (i=(64*1024); i<dev_priv->ramin_size; i+=4)
483                         DRM_WRITE32(dev_priv->mmio, NV_RAMIN + i, 0x00000000);
484         }
485 }
486
487 static void
488 nouveau_instmem_configure_fixed_tables(struct drm_device *dev)
489 {
490         drm_nouveau_private_t *dev_priv = dev->dev_private;
491
492         /* FIFO hash table (RAMHT)
493          *   use 4k hash table at RAMIN+0x10000
494          *   TODO: extend the hash table
495          */
496         dev_priv->ramht_offset = 0x10000;
497         dev_priv->ramht_bits   = 9;
498         dev_priv->ramht_size   = (1 << dev_priv->ramht_bits);
499         DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
500                                                   dev_priv->ramht_size);
501
502         /* FIFO runout table (RAMRO) - 512k at 0x11200 */
503         dev_priv->ramro_offset = 0x11200;
504         dev_priv->ramro_size   = 512;
505         DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
506                                                   dev_priv->ramro_size);
507
508         /* FIFO context table (RAMFC)
509          *   NV40  : Not sure exactly how to position RAMFC on some cards,
510          *           0x30002 seems to position it at RAMIN+0x20000 on these
511          *           cards.  RAMFC is 4kb (32 fifos, 128byte entries).
512          *   Others: Position RAMFC at RAMIN+0x11400
513          */
514         switch(dev_priv->card_type)
515         {
516                 case NV_50:
517                 case NV_40:
518                 case NV_44:
519                         dev_priv->ramfc_offset = 0x20000;
520                         dev_priv->ramfc_size   = nouveau_fifo_number(dev) *
521                                 nouveau_fifo_ctx_size(dev);
522                         break;
523                 case NV_30:
524                 case NV_20:
525                 case NV_17:
526                 case NV_10:
527                 case NV_04:
528                 case NV_03:
529                 default:
530                         dev_priv->ramfc_offset = 0x11400;
531                         dev_priv->ramfc_size   = nouveau_fifo_number(dev) *
532                                 nouveau_fifo_ctx_size(dev);
533                         break;
534         }
535         DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
536                                                   dev_priv->ramfc_size);
537 }
538
539 int nouveau_instmem_init(struct drm_device *dev)
540 {
541         drm_nouveau_private_t *dev_priv = dev->dev_private;
542         uint32_t offset;
543         int ret = 0;
544
545         nouveau_instmem_determine_amount(dev);
546         nouveau_instmem_configure_fixed_tables(dev);
547
548         /* Create a heap to manage RAMIN allocations, we don't allocate
549          * the space that was reserved for RAMHT/FC/RO.
550          */
551         offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
552         ret = init_heap(&dev_priv->ramin_heap,
553                          offset, dev_priv->ramin_size - offset);
554         if (ret) {
555                 dev_priv->ramin_heap = NULL;
556                 DRM_ERROR("Failed to init RAMIN heap\n");
557         }
558
559         return ret;
560 }
561
562 struct mem_block *nouveau_instmem_alloc(struct drm_device *dev,
563                                         uint32_t size, uint32_t align)
564 {
565         drm_nouveau_private_t *dev_priv = dev->dev_private;
566         struct mem_block *block;
567
568         if (!dev_priv->ramin_heap) {
569                 DRM_ERROR("instmem alloc called without init\n");
570                 return NULL;
571         }
572
573         block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2);
574         if (block) {
575                 block->flags = NOUVEAU_MEM_INSTANCE;
576                 DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n",
577                                 size, (1<<align), (uint32_t)block->start);
578         }
579
580         return block;
581 }
582
583 void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block)
584 {
585         if (dev && block) {
586                 free_block(block);
587         }
588 }
589
590 uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv,
591                              struct mem_block *mem, int index)
592 {
593         uint32_t ofs = (uint32_t)mem->start + (index<<2);
594
595         if (dev_priv->ramin) {
596 #if defined(__powerpc__)
597                 return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs);
598 #else
599                 return DRM_READ32(dev_priv->ramin, ofs);
600 #endif
601         } else {
602                 return NV_READ(NV_RAMIN+ofs);
603         }
604 }
605
606 void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv,
607                          struct mem_block *mem, int index, uint32_t val)
608 {
609         uint32_t ofs = (uint32_t)mem->start + (index<<2);
610
611         if (dev_priv->ramin) {
612 #if defined(__powerpc__)
613                 out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val);
614 #else
615                 DRM_WRITE32(dev_priv->ramin, ofs, val);
616 #endif
617         } else {
618                 NV_WRITE(NV_RAMIN+ofs, val);
619         }
620 }
621
622 /*
623  * Ioctls
624  */
625
626 int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS)
627 {
628         DRM_DEVICE;
629         drm_nouveau_private_t *dev_priv = dev->dev_private;
630         drm_nouveau_mem_alloc_t alloc;
631         struct mem_block *block;
632
633         if (!dev_priv) {
634                 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
635                 return DRM_ERR(EINVAL);
636         }
637
638         DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data,
639                                  sizeof(alloc));
640
641         block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp);
642         if (!block)
643                 return DRM_ERR(ENOMEM);
644         alloc.region_offset=block->start;
645         alloc.flags=block->flags;
646
647         DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc));
648
649         return 0;
650 }
651
652 int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS)
653 {
654         DRM_DEVICE;
655         drm_nouveau_private_t *dev_priv = dev->dev_private;
656         drm_nouveau_mem_free_t memfree;
657         struct mem_block *block;
658
659         DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data,
660                                  sizeof(memfree));
661
662         block=NULL;
663         if (memfree.flags&NOUVEAU_MEM_FB)
664                 block = find_block(dev_priv->fb_heap, memfree.region_offset);
665         else if (memfree.flags&NOUVEAU_MEM_AGP)
666                 block = find_block(dev_priv->agp_heap, memfree.region_offset);
667         if (!block)
668                 return DRM_ERR(EFAULT);
669         if (block->filp != filp)
670                 return DRM_ERR(EPERM);
671
672         nouveau_mem_free(dev, block);
673         return 0;
674 }
675
676