OSDN Git Service

Fix vblank enable/disable callbacks
[android-x86/external-libdrm.git] / shared-core / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Keith Whitwell <keith@tungstengraphics.com>
30  */
31
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
37
38 static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
39                 struct drm_file *file_priv)
40 {
41         /* Maybe cut off the start of an existing block */
42         if (start > p->start) {
43                 struct mem_block *newblock =
44                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
45                 if (!newblock)
46                         goto out;
47                 newblock->start = start;
48                 newblock->size = p->size - (start - p->start);
49                 newblock->file_priv = NULL;
50                 newblock->next = p->next;
51                 newblock->prev = p;
52                 p->next->prev = newblock;
53                 p->next = newblock;
54                 p->size -= newblock->size;
55                 p = newblock;
56         }
57
58         /* Maybe cut off the end of an existing block */
59         if (size < p->size) {
60                 struct mem_block *newblock =
61                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
62                 if (!newblock)
63                         goto out;
64                 newblock->start = start + size;
65                 newblock->size = p->size - size;
66                 newblock->file_priv = NULL;
67                 newblock->next = p->next;
68                 newblock->prev = p;
69                 p->next->prev = newblock;
70                 p->next = newblock;
71                 p->size = size;
72         }
73
74 out:
75         /* Our block is in the middle */
76         p->file_priv = file_priv;
77         return p;
78 }
79
80 struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap,
81                                           uint64_t size,
82                                           int align2,
83                                           struct drm_file *file_priv)
84 {
85         struct mem_block *p;
86         uint64_t mask = (1 << align2) - 1;
87
88         if (!heap)
89                 return NULL;
90
91         list_for_each(p, heap) {
92                 uint64_t start = (p->start + mask) & ~mask;
93                 if (p->file_priv == 0 && start + size <= p->start + p->size)
94                         return split_block(p, start, size, file_priv);
95         }
96
97         return NULL;
98 }
99
100 static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
101 {
102         struct mem_block *p;
103
104         list_for_each(p, heap)
105                 if (p->start == start)
106                         return p;
107
108         return NULL;
109 }
110
111 void nouveau_mem_free_block(struct mem_block *p)
112 {
113         p->file_priv = NULL;
114
115         /* Assumes a single contiguous range.  Needs a special file_priv in
116          * 'heap' to stop it being subsumed.
117          */
118         if (p->next->file_priv == 0) {
119                 struct mem_block *q = p->next;
120                 p->size += q->size;
121                 p->next = q->next;
122                 p->next->prev = p;
123                 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
124         }
125
126         if (p->prev->file_priv == 0) {
127                 struct mem_block *q = p->prev;
128                 q->size += p->size;
129                 q->next = p->next;
130                 q->next->prev = q;
131                 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
132         }
133 }
134
135 /* Initialize.  How to check for an uninitialized heap?
136  */
137 int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
138                           uint64_t size)
139 {
140         struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
141
142         if (!blocks)
143                 return -ENOMEM;
144
145         *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
146         if (!*heap) {
147                 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
148                 return -ENOMEM;
149         }
150
151         blocks->start = start;
152         blocks->size = size;
153         blocks->file_priv = NULL;
154         blocks->next = blocks->prev = *heap;
155
156         memset(*heap, 0, sizeof(**heap));
157         (*heap)->file_priv = (struct drm_file *) - 1;
158         (*heap)->next = (*heap)->prev = blocks;
159         return 0;
160 }
161
162 /*
163  * Free all blocks associated with the releasing file_priv
164  */
165 void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
166 {
167         struct mem_block *p;
168
169         if (!heap || !heap->next)
170                 return;
171
172         list_for_each(p, heap) {
173                 if (p->file_priv == file_priv)
174                         p->file_priv = NULL;
175         }
176
177         /* Assumes a single contiguous range.  Needs a special file_priv in
178          * 'heap' to stop it being subsumed.
179          */
180         list_for_each(p, heap) {
181                 while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
182                        (p->next!=heap)) {
183                         struct mem_block *q = p->next;
184                         p->size += q->size;
185                         p->next = q->next;
186                         p->next->prev = p;
187                         drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
188                 }
189         }
190 }
191
192 /*
193  * Cleanup everything
194  */
195 void nouveau_mem_takedown(struct mem_block **heap)
196 {
197         struct mem_block *p;
198
199         if (!*heap)
200                 return;
201
202         for (p = (*heap)->next; p != *heap;) {
203                 struct mem_block *q = p;
204                 p = p->next;
205                 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
206         }
207
208         drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
209         *heap = NULL;
210 }
211
212 void nouveau_mem_close(struct drm_device *dev)
213 {
214         struct drm_nouveau_private *dev_priv = dev->dev_private;
215
216         nouveau_mem_takedown(&dev_priv->agp_heap);
217         nouveau_mem_takedown(&dev_priv->fb_heap);
218         if (dev_priv->pci_heap)
219                 nouveau_mem_takedown(&dev_priv->pci_heap);
220 }
221
222 /*XXX won't work on BSD because of pci_read_config_dword */
223 static uint32_t
224 nouveau_mem_fb_amount_igp(struct drm_device *dev)
225 {
226 #if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct pci_dev *bridge;
229         uint32_t mem;
230
231         bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
232         if (!bridge) {
233                 DRM_ERROR("no bridge device\n");
234                 return 0;
235         }
236
237         if (dev_priv->flags&NV_NFORCE) {
238                 pci_read_config_dword(bridge, 0x7C, &mem);
239                 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
240         } else
241         if(dev_priv->flags&NV_NFORCE2) {
242                 pci_read_config_dword(bridge, 0x84, &mem);
243                 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
244         }
245
246         DRM_ERROR("impossible!\n");
247 #else
248         DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
249 #endif
250
251         return 0;
252 }
253
254 /* returns the amount of FB ram in bytes */
255 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
256 {
257         struct drm_nouveau_private *dev_priv=dev->dev_private;
258         switch(dev_priv->card_type)
259         {
260                 case NV_04:
261                 case NV_05:
262                         if (NV_READ(NV03_BOOT_0) & 0x00000100) {
263                                 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
264                         } else
265                         switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
266                         {
267                                 case NV04_BOOT_0_RAM_AMOUNT_32MB:
268                                         return 32*1024*1024;
269                                 case NV04_BOOT_0_RAM_AMOUNT_16MB:
270                                         return 16*1024*1024;
271                                 case NV04_BOOT_0_RAM_AMOUNT_8MB:
272                                         return 8*1024*1024;
273                                 case NV04_BOOT_0_RAM_AMOUNT_4MB:
274                                         return 4*1024*1024;
275                         }
276                         break;
277                 case NV_10:
278                 case NV_11:
279                 case NV_17:
280                 case NV_20:
281                 case NV_30:
282                 case NV_40:
283                 case NV_44:
284                 case NV_50:
285                 default:
286                         if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
287                                 return nouveau_mem_fb_amount_igp(dev);
288                         } else {
289                                 uint64_t mem;
290
291                                 mem = (NV_READ(NV04_FIFO_DATA) &
292                                        NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
293                                       NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
294                                 return mem*1024*1024;
295                         }
296                         break;
297         }
298
299         DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
300         return 0;
301 }
302
303 static int
304 nouveau_mem_init_agp(struct drm_device *dev, int ttm)
305 {
306         struct drm_nouveau_private *dev_priv = dev->dev_private;
307         struct drm_agp_info info;
308         struct drm_agp_mode mode;
309         int ret;
310
311         ret = drm_agp_acquire(dev);
312         if (ret) {
313                 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
314                 return ret;
315         }
316
317         ret = drm_agp_info(dev, &info);
318         if (ret) {
319                 DRM_ERROR("Unable to get AGP info: %d\n", ret);
320                 return ret;
321         }
322
323         /* see agp.h for the AGPSTAT_* modes available */
324         mode.mode = info.mode;
325         ret = drm_agp_enable(dev, mode);
326         if (ret) {
327                 DRM_ERROR("Unable to enable AGP: %d\n", ret);
328                 return ret;
329         }
330
331         if (!ttm) {
332                 struct drm_agp_buffer agp_req;
333                 struct drm_agp_binding bind_req;
334
335                 agp_req.size = info.aperture_size;
336                 agp_req.type = 0;
337                 ret = drm_agp_alloc(dev, &agp_req);
338                 if (ret) {
339                         DRM_ERROR("Unable to alloc AGP: %d\n", ret);
340                                 return ret;
341                 }
342
343                 bind_req.handle = agp_req.handle;
344                 bind_req.offset = 0;
345                 ret = drm_agp_bind(dev, &bind_req);
346                 if (ret) {
347                         DRM_ERROR("Unable to bind AGP: %d\n", ret);
348                         return ret;
349                 }
350         }
351
352         dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
353         dev_priv->gart_info.aper_base   = info.aperture_base;
354         dev_priv->gart_info.aper_size   = info.aperture_size;
355         return 0;
356 }
357
358 #define HACK_OLD_MM
359 int
360 nouveau_mem_init_ttm(struct drm_device *dev)
361 {
362         struct drm_nouveau_private *dev_priv = dev->dev_private;
363         uint32_t vram_size, bar1_size;
364         int ret;
365
366         dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
367         dev_priv->fb_phys = drm_get_resource_start(dev,1);
368         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
369
370         drm_bo_driver_init(dev);
371
372         /* non-mappable vram */
373         dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
374         dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
375         vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
376         bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
377         if (bar1_size < vram_size) {
378                 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
379                                           bar1_size, vram_size - bar1_size))) {
380                         DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
381                         return ret;
382                 }
383                 vram_size = bar1_size;
384         }
385
386         /* mappable vram */
387 #ifdef HACK_OLD_MM
388         vram_size /= 4;
389 #endif
390         if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size))) {
391                 DRM_ERROR("Failed VRAM mm init: %d\n", ret);
392                 return ret;
393         }
394
395         /* GART */
396 #ifndef __powerpc__
397         if (drm_device_is_agp(dev) && dev->agp) {
398                 if ((ret = nouveau_mem_init_agp(dev, 1)))
399                         DRM_ERROR("Error initialising AGP: %d\n", ret);
400         }
401 #endif
402
403         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
404                 if ((ret = nouveau_sgdma_init(dev)))
405                         DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
406         }
407
408         if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
409                                   dev_priv->gart_info.aper_size >>
410                                   PAGE_SHIFT))) {
411                 DRM_ERROR("Failed TT mm init: %d\n", ret);
412                 return ret;
413         }
414
415 #ifdef HACK_OLD_MM
416         vram_size <<= PAGE_SHIFT;
417         DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
418         if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
419                 return -ENOMEM;
420 #endif
421
422         return 0;
423 }
424
425 int nouveau_mem_init(struct drm_device *dev)
426 {
427         struct drm_nouveau_private *dev_priv = dev->dev_private;
428         uint32_t fb_size;
429         int ret = 0;
430
431         dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
432         dev_priv->fb_phys = 0;
433         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
434
435         /* setup a mtrr over the FB */
436         dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
437                                          nouveau_mem_fb_amount(dev),
438                                          DRM_MTRR_WC);
439
440         /* Init FB */
441         dev_priv->fb_phys=drm_get_resource_start(dev,1);
442         fb_size = nouveau_mem_fb_amount(dev);
443         /* On at least NV40, RAMIN is actually at the end of vram.
444          * We don't want to allocate this... */
445         if (dev_priv->card_type >= NV_40)
446                 fb_size -= dev_priv->ramin_rsvd_vram;
447         dev_priv->fb_available_size = fb_size;
448         DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
449
450         if (fb_size>256*1024*1024) {
451                 /* On cards with > 256Mb, you can't map everything.
452                  * So we create a second FB heap for that type of memory */
453                 if (nouveau_mem_init_heap(&dev_priv->fb_heap,
454                                           0, 256*1024*1024))
455                         return -ENOMEM;
456                 if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
457                                           256*1024*1024, fb_size-256*1024*1024))
458                         return -ENOMEM;
459         } else {
460                 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
461                         return -ENOMEM;
462                 dev_priv->fb_nomap_heap=NULL;
463         }
464
465 #ifndef __powerpc__
466         /* Init AGP / NV50 PCIEGART */
467         if (drm_device_is_agp(dev) && dev->agp) {
468                 if ((ret = nouveau_mem_init_agp(dev, 0)))
469                         DRM_ERROR("Error initialising AGP: %d\n", ret);
470         }
471 #endif
472
473         /*Note: this is *not* just NV50 code, but only used on NV50 for now */
474         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
475             dev_priv->card_type >= NV_50) {
476                 ret = nouveau_sgdma_init(dev);
477                 if (!ret) {
478                         ret = nouveau_sgdma_nottm_hack_init(dev);
479                         if (ret)
480                                 nouveau_sgdma_takedown(dev);
481                 }
482
483                 if (ret)
484                         DRM_ERROR("Error initialising SG DMA: %d\n", ret);
485         }
486
487         if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
488                 if (nouveau_mem_init_heap(&dev_priv->agp_heap,
489                                           0, dev_priv->gart_info.aper_size)) {
490                         if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
491                                 nouveau_sgdma_nottm_hack_takedown(dev);
492                                 nouveau_sgdma_takedown(dev);
493                         }
494                 }
495         }
496
497         /* NV04-NV40 PCIEGART */
498         if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
499                 struct drm_scatter_gather sgreq;
500
501                 DRM_DEBUG("Allocating sg memory for PCI DMA\n");
502                 sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
503
504                 if (drm_sg_alloc(dev, &sgreq)) {
505                         DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
506                                   " pages for PCI DMA!",sgreq.size>>20);
507                 } else {
508                         if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
509                                                   dev->sg->pages * PAGE_SIZE)) {
510                                 DRM_ERROR("Unable to initialize pci_heap!");
511                         }
512                 }
513         }
514
515         return 0;
516 }
517
518 struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
519                                     uint64_t size, int flags,
520                                     struct drm_file *file_priv)
521 {
522         struct mem_block *block;
523         int type;
524         struct drm_nouveau_private *dev_priv = dev->dev_private;
525
526         /*
527          * Make things easier on ourselves: all allocations are page-aligned.
528          * We need that to map allocated regions into the user space
529          */
530         if (alignment < PAGE_SHIFT)
531                 alignment = PAGE_SHIFT;
532
533         /*
534          * Warn about 0 sized allocations, but let it go through. It'll return 1 page
535          */
536         if (size == 0)
537                 DRM_INFO("warning : 0 byte allocation\n");
538
539         /*
540          * Keep alloc size a multiple of the page size to keep drm_addmap() happy
541          */
542         if (size & (~PAGE_MASK))
543                 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
544
545
546 #define NOUVEAU_MEM_ALLOC_AGP {\
547                 type=NOUVEAU_MEM_AGP;\
548                 block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
549                                                 alignment, file_priv); \
550                 if (block) goto alloc_ok;\
551                 }
552
553 #define NOUVEAU_MEM_ALLOC_PCI {\
554                 type = NOUVEAU_MEM_PCI;\
555                 block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
556                                                 alignment, file_priv); \
557                 if ( block ) goto alloc_ok;\
558                 }
559
560 #define NOUVEAU_MEM_ALLOC_FB {\
561                 type=NOUVEAU_MEM_FB;\
562                 if (!(flags&NOUVEAU_MEM_MAPPED)) {\
563                         block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
564                                                         size, alignment, \
565                                                         file_priv); \
566                         if (block) goto alloc_ok;\
567                 }\
568                 block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
569                                                 alignment, file_priv);\
570                 if (block) goto alloc_ok;\
571                 }
572
573
574         if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
575         if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
576         if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
577         if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
578         if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
579         if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
580
581
582         return NULL;
583
584 alloc_ok:
585         block->flags=type;
586
587         if (flags&NOUVEAU_MEM_MAPPED)
588         {
589                 struct drm_map_list *entry;
590                 int ret = 0;
591                 block->flags|=NOUVEAU_MEM_MAPPED;
592
593                 if (type == NOUVEAU_MEM_AGP) {
594                         if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
595                         ret = drm_addmap(dev, block->start, block->size,
596                                          _DRM_AGP, 0, &block->map);
597                         else
598                         ret = drm_addmap(dev, block->start, block->size,
599                                          _DRM_SCATTER_GATHER, 0, &block->map);
600                 }
601                 else if (type == NOUVEAU_MEM_FB)
602                         ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
603                                          block->size, _DRM_FRAME_BUFFER,
604                                          0, &block->map);
605                 else if (type == NOUVEAU_MEM_PCI)
606                         ret = drm_addmap(dev, block->start, block->size,
607                                          _DRM_SCATTER_GATHER, 0, &block->map);
608
609                 if (ret) {
610                         nouveau_mem_free_block(block);
611                         return NULL;
612                 }
613
614                 entry = drm_find_matching_map(dev, block->map);
615                 if (!entry) {
616                         nouveau_mem_free_block(block);
617                         return NULL;
618                 }
619                 block->map_handle = entry->user_token;
620         }
621
622         DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
623         return block;
624 }
625
626 void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
627 {
628         DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
629         if (block->flags&NOUVEAU_MEM_MAPPED)
630                 drm_rmmap(dev, block->map);
631         nouveau_mem_free_block(block);
632 }
633
634 /*
635  * Ioctls
636  */
637
638 int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
639 {
640         struct drm_nouveau_mem_alloc *alloc = data;
641         struct mem_block *block;
642
643         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
644
645         block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
646                                 alloc->flags, file_priv);
647         if (!block)
648                 return -ENOMEM;
649         alloc->map_handle=block->map_handle;
650         alloc->offset=block->start;
651         alloc->flags=block->flags;
652
653         return 0;
654 }
655
656 int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
657 {
658         struct drm_nouveau_private *dev_priv = dev->dev_private;
659         struct drm_nouveau_mem_free *memfree = data;
660         struct mem_block *block;
661
662         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
663
664         block=NULL;
665         if (memfree->flags & NOUVEAU_MEM_FB)
666                 block = find_block(dev_priv->fb_heap, memfree->offset);
667         else if (memfree->flags & NOUVEAU_MEM_AGP)
668                 block = find_block(dev_priv->agp_heap, memfree->offset);
669         else if (memfree->flags & NOUVEAU_MEM_PCI)
670                 block = find_block(dev_priv->pci_heap, memfree->offset);
671         if (!block)
672                 return -EFAULT;
673         if (block->file_priv != file_priv)
674                 return -EPERM;
675
676         nouveau_mem_free(dev, block);
677         return 0;
678 }