OSDN Git Service

nouveau: NV9x cards exist as well.
[android-x86/external-libdrm.git] / shared-core / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Keith Whitwell <keith@tungstengraphics.com>
30  */
31
32
33 #include "drmP.h"
34 #include "drm.h"
35 #include "drm_sarea.h"
36 #include "nouveau_drv.h"
37
38 static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
39                 struct drm_file *file_priv)
40 {
41         /* Maybe cut off the start of an existing block */
42         if (start > p->start) {
43                 struct mem_block *newblock =
44                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
45                 if (!newblock)
46                         goto out;
47                 newblock->start = start;
48                 newblock->size = p->size - (start - p->start);
49                 newblock->file_priv = NULL;
50                 newblock->next = p->next;
51                 newblock->prev = p;
52                 p->next->prev = newblock;
53                 p->next = newblock;
54                 p->size -= newblock->size;
55                 p = newblock;
56         }
57
58         /* Maybe cut off the end of an existing block */
59         if (size < p->size) {
60                 struct mem_block *newblock =
61                         drm_alloc(sizeof(*newblock), DRM_MEM_BUFS);
62                 if (!newblock)
63                         goto out;
64                 newblock->start = start + size;
65                 newblock->size = p->size - size;
66                 newblock->file_priv = NULL;
67                 newblock->next = p->next;
68                 newblock->prev = p;
69                 p->next->prev = newblock;
70                 p->next = newblock;
71                 p->size = size;
72         }
73
74 out:
75         /* Our block is in the middle */
76         p->file_priv = file_priv;
77         return p;
78 }
79
80 struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap,
81                                           uint64_t size,
82                                           int align2,
83                                           struct drm_file *file_priv)
84 {
85         struct mem_block *p;
86         uint64_t mask = (1 << align2) - 1;
87
88         if (!heap)
89                 return NULL;
90
91         list_for_each(p, heap) {
92                 uint64_t start = (p->start + mask) & ~mask;
93                 if (p->file_priv == 0 && start + size <= p->start + p->size)
94                         return split_block(p, start, size, file_priv);
95         }
96
97         return NULL;
98 }
99
100 static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
101 {
102         struct mem_block *p;
103
104         list_for_each(p, heap)
105                 if (p->start == start)
106                         return p;
107
108         return NULL;
109 }
110
111 void nouveau_mem_free_block(struct mem_block *p)
112 {
113         p->file_priv = NULL;
114
115         /* Assumes a single contiguous range.  Needs a special file_priv in
116          * 'heap' to stop it being subsumed.
117          */
118         if (p->next->file_priv == 0) {
119                 struct mem_block *q = p->next;
120                 p->size += q->size;
121                 p->next = q->next;
122                 p->next->prev = p;
123                 drm_free(q, sizeof(*q), DRM_MEM_BUFS);
124         }
125
126         if (p->prev->file_priv == 0) {
127                 struct mem_block *q = p->prev;
128                 q->size += p->size;
129                 q->next = p->next;
130                 q->next->prev = q;
131                 drm_free(p, sizeof(*q), DRM_MEM_BUFS);
132         }
133 }
134
135 /* Initialize.  How to check for an uninitialized heap?
136  */
137 int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
138                           uint64_t size)
139 {
140         struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
141
142         if (!blocks)
143                 return -ENOMEM;
144
145         *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
146         if (!*heap) {
147                 drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
148                 return -ENOMEM;
149         }
150
151         blocks->start = start;
152         blocks->size = size;
153         blocks->file_priv = NULL;
154         blocks->next = blocks->prev = *heap;
155
156         memset(*heap, 0, sizeof(**heap));
157         (*heap)->file_priv = (struct drm_file *) - 1;
158         (*heap)->next = (*heap)->prev = blocks;
159         return 0;
160 }
161
162 /*
163  * Free all blocks associated with the releasing file_priv
164  */
165 void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
166 {
167         struct mem_block *p;
168
169         if (!heap || !heap->next)
170                 return;
171
172         list_for_each(p, heap) {
173                 if (p->file_priv == file_priv)
174                         p->file_priv = NULL;
175         }
176
177         /* Assumes a single contiguous range.  Needs a special file_priv in
178          * 'heap' to stop it being subsumed.
179          */
180         list_for_each(p, heap) {
181                 while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
182                        (p->next!=heap)) {
183                         struct mem_block *q = p->next;
184                         p->size += q->size;
185                         p->next = q->next;
186                         p->next->prev = p;
187                         drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
188                 }
189         }
190 }
191
192 /*
193  * Cleanup everything
194  */
195 void nouveau_mem_takedown(struct mem_block **heap)
196 {
197         struct mem_block *p;
198
199         if (!*heap)
200                 return;
201
202         for (p = (*heap)->next; p != *heap;) {
203                 struct mem_block *q = p;
204                 p = p->next;
205                 drm_free(q, sizeof(*q), DRM_MEM_DRIVER);
206         }
207
208         drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER);
209         *heap = NULL;
210 }
211
212 void nouveau_mem_close(struct drm_device *dev)
213 {
214         struct drm_nouveau_private *dev_priv = dev->dev_private;
215
216         nouveau_mem_takedown(&dev_priv->agp_heap);
217         nouveau_mem_takedown(&dev_priv->fb_heap);
218         if (dev_priv->pci_heap)
219                 nouveau_mem_takedown(&dev_priv->pci_heap);
220 }
221
222 /*XXX won't work on BSD because of pci_read_config_dword */
223 static uint32_t
224 nouveau_mem_fb_amount_igp(struct drm_device *dev)
225 {
226 #if defined(__linux__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
227         struct drm_nouveau_private *dev_priv = dev->dev_private;
228         struct pci_dev *bridge;
229         uint32_t mem;
230
231         bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
232         if (!bridge) {
233                 DRM_ERROR("no bridge device\n");
234                 return 0;
235         }
236
237         if (dev_priv->flags&NV_NFORCE) {
238                 pci_read_config_dword(bridge, 0x7C, &mem);
239                 return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
240         } else
241         if(dev_priv->flags&NV_NFORCE2) {
242                 pci_read_config_dword(bridge, 0x84, &mem);
243                 return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
244         }
245
246         DRM_ERROR("impossible!\n");
247 #else
248         DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
249 #endif
250
251         return 0;
252 }
253
254 /* returns the amount of FB ram in bytes */
255 uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
256 {
257         struct drm_nouveau_private *dev_priv=dev->dev_private;
258         switch(dev_priv->card_type)
259         {
260                 case NV_04:
261                 case NV_05:
262                         if (NV_READ(NV03_BOOT_0) & 0x00000100) {
263                                 return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024;
264                         } else
265                         switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
266                         {
267                                 case NV04_BOOT_0_RAM_AMOUNT_32MB:
268                                         return 32*1024*1024;
269                                 case NV04_BOOT_0_RAM_AMOUNT_16MB:
270                                         return 16*1024*1024;
271                                 case NV04_BOOT_0_RAM_AMOUNT_8MB:
272                                         return 8*1024*1024;
273                                 case NV04_BOOT_0_RAM_AMOUNT_4MB:
274                                         return 4*1024*1024;
275                         }
276                         break;
277                 case NV_10:
278                 case NV_11:
279                 case NV_17:
280                 case NV_20:
281                 case NV_30:
282                 case NV_40:
283                 case NV_44:
284                 case NV_50:
285                 default:
286                         if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
287                                 return nouveau_mem_fb_amount_igp(dev);
288                         } else {
289                                 uint64_t mem;
290
291                                 mem = (NV_READ(NV04_FIFO_DATA) &
292                                        NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
293                                       NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
294                                 return mem*1024*1024;
295                         }
296                         break;
297         }
298
299         DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n");
300         return 0;
301 }
302
303 static void nouveau_mem_reset_agp(struct drm_device *dev)
304 {
305         struct drm_nouveau_private *dev_priv = dev->dev_private;
306         uint32_t saved_pci_nv_1, saved_pci_nv_19, pmc_enable;
307
308         saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1);
309         saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19);
310
311         /* clear busmaster bit */
312         NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
313         /* clear SBA and AGP bits */
314         NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);
315
316         /* power cycle pgraph, if enabled */
317         pmc_enable = NV_READ(NV03_PMC_ENABLE);
318         if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
319                 NV_WRITE(NV03_PMC_ENABLE, pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
320                 NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
321                                 NV_PMC_ENABLE_PGRAPH);
322         }
323
324         /* and restore (gives effect of resetting AGP) */
325         NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19);
326         NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
327 }
328
329 static int
330 nouveau_mem_init_agp(struct drm_device *dev, int ttm)
331 {
332         struct drm_nouveau_private *dev_priv = dev->dev_private;
333         struct drm_agp_info info;
334         struct drm_agp_mode mode;
335         int ret;
336
337         nouveau_mem_reset_agp(dev);
338
339         ret = drm_agp_acquire(dev);
340         if (ret) {
341                 DRM_ERROR("Unable to acquire AGP: %d\n", ret);
342                 return ret;
343         }
344
345         ret = drm_agp_info(dev, &info);
346         if (ret) {
347                 DRM_ERROR("Unable to get AGP info: %d\n", ret);
348                 return ret;
349         }
350
351         /* see agp.h for the AGPSTAT_* modes available */
352         mode.mode = info.mode;
353         ret = drm_agp_enable(dev, mode);
354         if (ret) {
355                 DRM_ERROR("Unable to enable AGP: %d\n", ret);
356                 return ret;
357         }
358
359         if (!ttm) {
360                 struct drm_agp_buffer agp_req;
361                 struct drm_agp_binding bind_req;
362
363                 agp_req.size = info.aperture_size;
364                 agp_req.type = 0;
365                 ret = drm_agp_alloc(dev, &agp_req);
366                 if (ret) {
367                         DRM_ERROR("Unable to alloc AGP: %d\n", ret);
368                                 return ret;
369                 }
370
371                 bind_req.handle = agp_req.handle;
372                 bind_req.offset = 0;
373                 ret = drm_agp_bind(dev, &bind_req);
374                 if (ret) {
375                         DRM_ERROR("Unable to bind AGP: %d\n", ret);
376                         return ret;
377                 }
378         }
379
380         dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
381         dev_priv->gart_info.aper_base   = info.aperture_base;
382         dev_priv->gart_info.aper_size   = info.aperture_size;
383         return 0;
384 }
385
386 #define HACK_OLD_MM
387 int
388 nouveau_mem_init_ttm(struct drm_device *dev)
389 {
390         struct drm_nouveau_private *dev_priv = dev->dev_private;
391         uint32_t vram_size, bar1_size;
392         int ret;
393
394         dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
395         dev_priv->fb_phys = drm_get_resource_start(dev,1);
396         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
397
398         drm_bo_driver_init(dev);
399
400         /* non-mappable vram */
401         dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
402         dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
403         vram_size = dev_priv->fb_available_size >> PAGE_SHIFT;
404         bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT;
405         if (bar1_size < vram_size) {
406                 if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0,
407                                           bar1_size, vram_size - bar1_size, 1))) {
408                         DRM_ERROR("Failed PRIV0 mm init: %d\n", ret);
409                         return ret;
410                 }
411                 vram_size = bar1_size;
412         }
413
414         /* mappable vram */
415 #ifdef HACK_OLD_MM
416         vram_size /= 4;
417 #endif
418         if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size, 1))) {
419                 DRM_ERROR("Failed VRAM mm init: %d\n", ret);
420                 return ret;
421         }
422
423         /* GART */
424 #if !defined(__powerpc__) && !defined(__ia64__)
425         if (drm_device_is_agp(dev) && dev->agp) {
426                 if ((ret = nouveau_mem_init_agp(dev, 1)))
427                         DRM_ERROR("Error initialising AGP: %d\n", ret);
428         }
429 #endif
430
431         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
432                 if ((ret = nouveau_sgdma_init(dev)))
433                         DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret);
434         }
435
436         if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0,
437                                   dev_priv->gart_info.aper_size >>
438                                   PAGE_SHIFT, 1))) {
439                 DRM_ERROR("Failed TT mm init: %d\n", ret);
440                 return ret;
441         }
442
443 #ifdef HACK_OLD_MM
444         vram_size <<= PAGE_SHIFT;
445         DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10);
446         if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3))
447                 return -ENOMEM;
448 #endif
449
450         return 0;
451 }
452
453 int nouveau_mem_init(struct drm_device *dev)
454 {
455         struct drm_nouveau_private *dev_priv = dev->dev_private;
456         uint32_t fb_size;
457         int ret = 0;
458
459         dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
460         dev_priv->fb_phys = 0;
461         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
462
463         /* setup a mtrr over the FB */
464         dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
465                                          nouveau_mem_fb_amount(dev),
466                                          DRM_MTRR_WC);
467
468         /* Init FB */
469         dev_priv->fb_phys=drm_get_resource_start(dev,1);
470         fb_size = nouveau_mem_fb_amount(dev);
471         /* On G80, limit VRAM to 512MiB temporarily due to limits in how
472          * we handle VRAM page tables.
473          */
474         if (dev_priv->card_type >= NV_50 && fb_size > (512 * 1024 * 1024))
475                 fb_size = (512 * 1024 * 1024);
476         /* On at least NV40, RAMIN is actually at the end of vram.
477          * We don't want to allocate this... */
478         if (dev_priv->card_type >= NV_40)
479                 fb_size -= dev_priv->ramin_rsvd_vram;
480         dev_priv->fb_available_size = fb_size;
481         DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
482
483         if (fb_size>256*1024*1024) {
484                 /* On cards with > 256Mb, you can't map everything.
485                  * So we create a second FB heap for that type of memory */
486                 if (nouveau_mem_init_heap(&dev_priv->fb_heap,
487                                           0, 256*1024*1024))
488                         return -ENOMEM;
489                 if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
490                                           256*1024*1024, fb_size-256*1024*1024))
491                         return -ENOMEM;
492         } else {
493                 if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
494                         return -ENOMEM;
495                 dev_priv->fb_nomap_heap=NULL;
496         }
497
498 #if !defined(__powerpc__) && !defined(__ia64__)
499         /* Init AGP / NV50 PCIEGART */
500         if (drm_device_is_agp(dev) && dev->agp) {
501                 if ((ret = nouveau_mem_init_agp(dev, 0)))
502                         DRM_ERROR("Error initialising AGP: %d\n", ret);
503         }
504 #endif
505
506         /*Note: this is *not* just NV50 code, but only used on NV50 for now */
507         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
508             dev_priv->card_type >= NV_50) {
509                 ret = nouveau_sgdma_init(dev);
510                 if (!ret) {
511                         ret = nouveau_sgdma_nottm_hack_init(dev);
512                         if (ret)
513                                 nouveau_sgdma_takedown(dev);
514                 }
515
516                 if (ret)
517                         DRM_ERROR("Error initialising SG DMA: %d\n", ret);
518         }
519
520         if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
521                 if (nouveau_mem_init_heap(&dev_priv->agp_heap,
522                                           0, dev_priv->gart_info.aper_size)) {
523                         if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
524                                 nouveau_sgdma_nottm_hack_takedown(dev);
525                                 nouveau_sgdma_takedown(dev);
526                         }
527                 }
528         }
529
530         /* NV04-NV40 PCIEGART */
531         if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
532                 struct drm_scatter_gather sgreq;
533
534                 DRM_DEBUG("Allocating sg memory for PCI DMA\n");
535                 sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
536
537                 if (drm_sg_alloc(dev, &sgreq)) {
538                         DRM_ERROR("Unable to allocate %ldMB of scatter-gather"
539                                   " pages for PCI DMA!",sgreq.size>>20);
540                 } else {
541                         if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
542                                                   dev->sg->pages * PAGE_SIZE)) {
543                                 DRM_ERROR("Unable to initialize pci_heap!");
544                         }
545                 }
546         }
547
548         /* G8x: Allocate shared page table to map real VRAM pages into */
549         if (dev_priv->card_type >= NV_50) {
550                 unsigned size = ((512 * 1024 * 1024) / 65536) * 8;
551
552                 ret = nouveau_gpuobj_new(dev, NULL, size, 0,
553                                          NVOBJ_FLAG_ZERO_ALLOC |
554                                          NVOBJ_FLAG_ALLOW_NO_REFS,
555                                          &dev_priv->vm_vram_pt);
556                 if (ret) {
557                         DRM_ERROR("Error creating VRAM page table: %d\n", ret);
558                         return ret;
559                 }
560         }
561
562
563         return 0;
564 }
565
566 struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
567                                     uint64_t size, int flags,
568                                     struct drm_file *file_priv)
569 {
570         struct mem_block *block;
571         int type;
572         struct drm_nouveau_private *dev_priv = dev->dev_private;
573
574         /*
575          * Make things easier on ourselves: all allocations are page-aligned.
576          * We need that to map allocated regions into the user space
577          */
578         if (alignment < PAGE_SHIFT)
579                 alignment = PAGE_SHIFT;
580
581         /* Align allocation sizes to 64KiB blocks on G8x.  We use a 64KiB
582          * page size in the GPU VM.
583          */
584         if (flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50)
585                 size = (size + (64 * 1024)) & ~((64 * 1024) - 1);
586
587         /*
588          * Warn about 0 sized allocations, but let it go through. It'll return 1 page
589          */
590         if (size == 0)
591                 DRM_INFO("warning : 0 byte allocation\n");
592
593         /*
594          * Keep alloc size a multiple of the page size to keep drm_addmap() happy
595          */
596         if (size & (~PAGE_MASK))
597                 size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
598
599
600 #define NOUVEAU_MEM_ALLOC_AGP {\
601                 type=NOUVEAU_MEM_AGP;\
602                 block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
603                                                 alignment, file_priv); \
604                 if (block) goto alloc_ok;\
605                 }
606
607 #define NOUVEAU_MEM_ALLOC_PCI {\
608                 type = NOUVEAU_MEM_PCI;\
609                 block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
610                                                 alignment, file_priv); \
611                 if ( block ) goto alloc_ok;\
612                 }
613
614 #define NOUVEAU_MEM_ALLOC_FB {\
615                 type=NOUVEAU_MEM_FB;\
616                 if (!(flags&NOUVEAU_MEM_MAPPED)) {\
617                         block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
618                                                         size, alignment, \
619                                                         file_priv); \
620                         if (block) goto alloc_ok;\
621                 }\
622                 block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
623                                                 alignment, file_priv);\
624                 if (block) goto alloc_ok;\
625                 }
626
627
628         if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
629         if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
630         if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
631         if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
632         if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
633         if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
634
635
636         return NULL;
637
638 alloc_ok:
639         block->flags=type;
640
641         /* On G8x, map memory into VM */
642         if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
643             !(flags & NOUVEAU_MEM_NOVM)) {
644                 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
645                 unsigned offset = block->start;
646                 unsigned count = block->size / 65536;
647
648                 if (!pt) {
649                         DRM_ERROR("vm alloc without vm pt\n");
650                         nouveau_mem_free_block(block);
651                         return NULL;
652                 }
653
654                 while (count--) {
655                         unsigned pte = offset / 65536;
656
657                         INSTANCE_WR(pt, (pte * 2) + 0, offset | 1);
658                         INSTANCE_WR(pt, (pte * 2) + 1, 0x00000000);
659                         offset += 65536;
660                 }
661         } else {
662                 block->flags |= NOUVEAU_MEM_NOVM;
663         }       
664
665         if (flags&NOUVEAU_MEM_MAPPED)
666         {
667                 struct drm_map_list *entry;
668                 int ret = 0;
669                 block->flags|=NOUVEAU_MEM_MAPPED;
670
671                 if (type == NOUVEAU_MEM_AGP) {
672                         if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
673                         ret = drm_addmap(dev, block->start, block->size,
674                                          _DRM_AGP, 0, &block->map);
675                         else
676                         ret = drm_addmap(dev, block->start, block->size,
677                                          _DRM_SCATTER_GATHER, 0, &block->map);
678                 }
679                 else if (type == NOUVEAU_MEM_FB)
680                         ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
681                                          block->size, _DRM_FRAME_BUFFER,
682                                          0, &block->map);
683                 else if (type == NOUVEAU_MEM_PCI)
684                         ret = drm_addmap(dev, block->start, block->size,
685                                          _DRM_SCATTER_GATHER, 0, &block->map);
686
687                 if (ret) {
688                         nouveau_mem_free_block(block);
689                         return NULL;
690                 }
691
692                 entry = drm_find_matching_map(dev, block->map);
693                 if (!entry) {
694                         nouveau_mem_free_block(block);
695                         return NULL;
696                 }
697                 block->map_handle = entry->user_token;
698         }
699
700         DRM_DEBUG("allocated %lld bytes at 0x%llx type=0x%08x\n", block->size, block->start, block->flags);
701         return block;
702 }
703
704 void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
705 {
706         struct drm_nouveau_private *dev_priv = dev->dev_private;
707
708         DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
709
710         if (block->flags&NOUVEAU_MEM_MAPPED)
711                 drm_rmmap(dev, block->map);
712
713         /* G8x: Remove pages from vm */
714         if (block->flags & NOUVEAU_MEM_FB && dev_priv->card_type >= NV_50 &&
715             !(block->flags & NOUVEAU_MEM_NOVM)) {
716                 struct nouveau_gpuobj *pt = dev_priv->vm_vram_pt;
717                 unsigned offset = block->start;
718                 unsigned count = block->size / 65536;
719
720                 if (!pt) {
721                         DRM_ERROR("vm free without vm pt\n");
722                         goto out_free;
723                 }
724
725                 while (count--) {
726                         unsigned pte = offset / 65536;
727                         INSTANCE_WR(pt, (pte * 2) + 0, 0);
728                         INSTANCE_WR(pt, (pte * 2) + 1, 0);
729                         offset += 65536;
730                 }
731         }
732
733 out_free:
734         nouveau_mem_free_block(block);
735 }
736
737 /*
738  * Ioctls
739  */
740
741 int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
742 {
743         struct drm_nouveau_mem_alloc *alloc = data;
744         struct mem_block *block;
745
746         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
747
748         if (alloc->flags & NOUVEAU_MEM_INTERNAL)
749                 return -EINVAL;
750
751         block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
752                                 alloc->flags, file_priv);
753         if (!block)
754                 return -ENOMEM;
755         alloc->map_handle=block->map_handle;
756         alloc->offset=block->start;
757         alloc->flags=block->flags;
758
759         return 0;
760 }
761
762 int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
763 {
764         struct drm_nouveau_private *dev_priv = dev->dev_private;
765         struct drm_nouveau_mem_free *memfree = data;
766         struct mem_block *block;
767
768         NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
769
770         block=NULL;
771         if (memfree->flags & NOUVEAU_MEM_FB)
772                 block = find_block(dev_priv->fb_heap, memfree->offset);
773         else if (memfree->flags & NOUVEAU_MEM_AGP)
774                 block = find_block(dev_priv->agp_heap, memfree->offset);
775         else if (memfree->flags & NOUVEAU_MEM_PCI)
776                 block = find_block(dev_priv->pci_heap, memfree->offset);
777         if (!block)
778                 return -EFAULT;
779         if (block->file_priv != file_priv)
780                 return -EPERM;
781
782         nouveau_mem_free(dev, block);
783         return 0;
784 }