OSDN Git Service

- SMPng lock the DRM. This is only partial -- there are a few code paths
[android-x86/external-libdrm.git] / bsd-core / drm_bufs.c
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  *
31  */
32
33 #include "drmP.h"
34
35 #ifndef __HAVE_PCI_DMA
36 #define __HAVE_PCI_DMA          0
37 #endif
38
39 #ifndef __HAVE_SG
40 #define __HAVE_SG               0
41 #endif
42
43 #ifndef DRIVER_BUF_PRIV_T
44 #define DRIVER_BUF_PRIV_T               u32
45 #endif
46 #ifndef DRIVER_AGP_BUFFERS_MAP
47 #if __HAVE_AGP && __HAVE_DMA
48 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
49 #else
50 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
51 #endif
52 #endif
53
54 /*
55  * Compute order.  Can be made faster.
56  */
57 int DRM(order)( unsigned long size )
58 {
59         int order;
60         unsigned long tmp;
61
62         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
63
64         if ( size & ~(1 << order) )
65                 ++order;
66
67         return order;
68 }
69
70 int DRM(addmap)( DRM_IOCTL_ARGS )
71 {
72         DRM_DEVICE;
73         drm_map_t request;
74         drm_local_map_t *map;
75         drm_map_list_entry_t *list;
76         
77         if (!(dev->flags & (FREAD|FWRITE)))
78                 return DRM_ERR(EACCES); /* Require read/write */
79
80         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
81
82         map = (drm_local_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
83         if ( !map )
84                 return DRM_ERR(ENOMEM);
85
86         map->offset = request.offset;
87         map->size = request.size;
88         map->type = request.type;
89         map->flags = request.flags;
90         map->mtrr   = -1;
91         map->handle = 0;
92         
93         /* Only allow shared memory to be removable since we only keep enough
94          * book keeping information about shared memory to allow for removal
95          * when processes fork.
96          */
97         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
98                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
99                 return DRM_ERR(EINVAL);
100         }
101         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
102                    map->offset, map->size, map->type );
103         if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
104                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
105                 return DRM_ERR(EINVAL);
106         }
107
108         switch ( map->type ) {
109         case _DRM_REGISTERS:
110         case _DRM_FRAME_BUFFER:
111                 if ( map->offset + map->size < map->offset ) {
112                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113                         return DRM_ERR(EINVAL);
114                 }
115 #if __REALLY_HAVE_MTRR
116                 if ( map->type == _DRM_FRAME_BUFFER ||
117                      (map->flags & _DRM_WRITE_COMBINING) ) {
118 #ifdef __FreeBSD__
119                         int retcode = 0, act;
120                         struct mem_range_desc mrdesc;
121                         mrdesc.mr_base = map->offset;
122                         mrdesc.mr_len = map->size;
123                         mrdesc.mr_flags = MDF_WRITECOMBINE;
124                         act = MEMRANGE_SET_UPDATE;
125                         bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
126                         retcode = mem_range_attr_set(&mrdesc, &act);
127                         map->mtrr=1;
128 #elif defined __NetBSD__
129                         struct mtrr mtrrmap;
130                         int one = 1;
131                         mtrrmap.base = map->offset;
132                         mtrrmap.len = map->size;
133                         mtrrmap.type = MTRR_TYPE_WC;
134                         mtrrmap.flags = MTRR_VALID;
135                         map->mtrr = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL );
136 #endif
137                 }
138 #endif /* __REALLY_HAVE_MTRR */
139                 DRM_IOREMAP(map, dev);
140                 break;
141
142         case _DRM_SHM:
143                 map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA);
144                 DRM_DEBUG( "%lu %d %p\n",
145                            map->size, DRM(order)( map->size ), map->handle );
146                 if ( !map->handle ) {
147                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
148                         return DRM_ERR(ENOMEM);
149                 }
150                 map->offset = (unsigned long)map->handle;
151                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
152                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
153                 }
154                 break;
155 #if __REALLY_HAVE_AGP
156         case _DRM_AGP:
157                 map->offset += dev->agp->base;
158                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
159                 break;
160 #endif
161         case _DRM_SCATTER_GATHER:
162                 if (!dev->sg) {
163                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
164                         return DRM_ERR(EINVAL);
165                 }
166                 map->offset = map->offset + dev->sg->handle;
167                 break;
168
169         default:
170                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
171                 return DRM_ERR(EINVAL);
172         }
173
174         list = DRM(calloc)(1, sizeof(*list), DRM_MEM_MAPS);
175         if (list == NULL) {
176                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
177                 return DRM_ERR(EINVAL);
178         }
179         list->map = map;
180
181         DRM_LOCK();
182         TAILQ_INSERT_TAIL(dev->maplist, list, link);
183         DRM_UNLOCK();
184
185         request.offset = map->offset;
186         request.size = map->size;
187         request.type = map->type;
188         request.flags = map->flags;
189         request.mtrr   = map->mtrr;
190         request.handle = map->handle;
191
192         if ( request.type != _DRM_SHM ) {
193                 request.handle = (void *)request.offset;
194         }
195
196         DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
197
198         return 0;
199 }
200
201
202 /* Remove a map private from list and deallocate resources if the mapping
203  * isn't in use.
204  */
205
206 int DRM(rmmap)( DRM_IOCTL_ARGS )
207 {
208         DRM_DEVICE;
209         drm_map_list_entry_t *list;
210         drm_local_map_t *map;
211         drm_map_t request;
212
213         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
214
215         DRM_LOCK();
216         TAILQ_FOREACH(list, dev->maplist, link) {
217                 map = list->map;
218                 if (map->handle == request.handle &&
219                     map->flags & _DRM_REMOVABLE)
220                         break;
221         }
222
223         /* No match found. */
224         if (list == NULL) {
225                 DRM_UNLOCK();
226                 return DRM_ERR(EINVAL);
227         }
228         TAILQ_REMOVE(dev->maplist, list, link);
229         DRM_UNLOCK();
230
231         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
232
233         switch (map->type) {
234         case _DRM_REGISTERS:
235         case _DRM_FRAME_BUFFER:
236 #if __REALLY_HAVE_MTRR
237                 if (map->mtrr >= 0) {
238                         int retcode;
239 #ifdef __FreeBSD__
240                         int act;
241                         struct mem_range_desc mrdesc;
242                         mrdesc.mr_base = map->offset;
243                         mrdesc.mr_len = map->size;
244                         mrdesc.mr_flags = MDF_WRITECOMBINE;
245                         act = MEMRANGE_SET_REMOVE;
246                         bcopy(DRIVER_NAME, &mrdesc.mr_owner,
247                             strlen(DRIVER_NAME));
248                         retcode = mem_range_attr_set(&mrdesc, &act);
249 #elif defined __NetBSD__
250                         struct mtrr mtrrmap;
251                         int one = 1;
252                         mtrrmap.base = map->offset;
253                         mtrrmap.len = map->size;
254                         mtrrmap.type = 0;
255                         mtrrmap.flags = 0;
256                         mtrrmap.owner = p->p_pid;
257                         retcode = mtrr_set(&mtrrmap, &one, p,
258                             MTRR_GETSET_KERNEL);
259                         DRM_DEBUG("mtrr_del = %d\n", retcode);
260 #endif
261                 }
262 #endif
263                 DRM(ioremapfree)(map);
264                 break;
265         case _DRM_SHM:
266                 DRM(free)(map->handle, map->size, DRM_MEM_SAREA);
267                 break;
268         case _DRM_AGP:
269         case _DRM_SCATTER_GATHER:
270                 break;
271         }
272         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
273         return 0;
274 }
275
276 #if __HAVE_DMA
277
278
279 static void DRM(cleanup_buf_error)(drm_device_t *dev, drm_buf_entry_t *entry)
280 {
281         int i;
282
283 #if __HAVE_PCI_DMA
284         if (entry->seg_count) {
285                 for (i = 0; i < entry->seg_count; i++) {
286                         if (entry->seglist[i] != NULL)
287                                 DRM(pci_free)(dev, entry->buf_size,
288                                     (void *)entry->seglist[i],
289                                     entry->seglist_bus[i]);
290                 }
291                 DRM(free)(entry->seglist,
292                           entry->seg_count *
293                           sizeof(*entry->seglist),
294                           DRM_MEM_SEGS);
295                 DRM(free)(entry->seglist_bus, entry->seg_count *
296                           sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
297
298                 entry->seg_count = 0;
299         }
300 #endif /* __HAVE_PCI_DMA */
301
302         if (entry->buf_count) {
303                 for (i = 0; i < entry->buf_count; i++) {
304                         DRM(free)(entry->buflist[i].dev_private,
305                             entry->buflist[i].dev_priv_size, DRM_MEM_BUFS);
306                 }
307                 DRM(free)(entry->buflist,
308                           entry->buf_count *
309                           sizeof(*entry->buflist),
310                           DRM_MEM_BUFS);
311
312                 entry->buf_count = 0;
313         }
314 }
315
316 #if __REALLY_HAVE_AGP
317 static int DRM(addbufs_agp)(drm_device_t *dev, drm_buf_desc_t *request)
318 {
319         drm_device_dma_t *dma = dev->dma;
320         drm_buf_entry_t *entry;
321         drm_buf_t *buf;
322         unsigned long offset;
323         unsigned long agp_offset;
324         int count;
325         int order;
326         int size;
327         int alignment;
328         int page_order;
329         int total;
330         int byte_count;
331         int i;
332         drm_buf_t **temp_buflist;
333
334         count = request->count;
335         order = DRM(order)(request->size);
336         size = 1 << order;
337
338         alignment  = (request->flags & _DRM_PAGE_ALIGN)
339                 ? round_page(size) : size;
340         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
341         total = PAGE_SIZE << page_order;
342
343         byte_count = 0;
344         agp_offset = dev->agp->base + request->agp_start;
345
346         DRM_DEBUG( "count:      %d\n",  count );
347         DRM_DEBUG( "order:      %d\n",  order );
348         DRM_DEBUG( "size:       %d\n",  size );
349         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
350         DRM_DEBUG( "alignment:  %d\n",  alignment );
351         DRM_DEBUG( "page_order: %d\n",  page_order );
352         DRM_DEBUG( "total:      %d\n",  total );
353
354         entry = &dma->bufs[order];
355
356         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
357                                     DRM_MEM_BUFS );
358         if ( !entry->buflist ) {
359                 return DRM_ERR(ENOMEM);
360         }
361         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
362
363         entry->buf_size = size;
364         entry->page_order = page_order;
365
366         offset = 0;
367
368         while ( entry->buf_count < count ) {
369                 buf          = &entry->buflist[entry->buf_count];
370                 buf->idx     = dma->buf_count + entry->buf_count;
371                 buf->total   = alignment;
372                 buf->order   = order;
373                 buf->used    = 0;
374
375                 buf->offset  = (dma->byte_count + offset);
376                 buf->bus_address = agp_offset + offset;
377                 buf->address = (void *)(agp_offset + offset);
378                 buf->next    = NULL;
379                 buf->pending = 0;
380                 buf->filp    = NULL;
381
382                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
383                 buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
384                     DRM_MEM_BUFS);
385                 if (buf->dev_private == NULL) {
386                         /* Set count correctly so we free the proper amount. */
387                         entry->buf_count = count;
388                         DRM(cleanup_buf_error)(dev, entry);
389                         return DRM_ERR(ENOMEM);
390                 }
391
392                 offset += alignment;
393                 entry->buf_count++;
394                 byte_count += PAGE_SIZE << page_order;
395         }
396
397         DRM_DEBUG( "byte_count: %d\n", byte_count );
398
399         temp_buflist = DRM(realloc)( dma->buflist,
400                                      dma->buf_count * sizeof(*dma->buflist),
401                                      (dma->buf_count + entry->buf_count)
402                                      * sizeof(*dma->buflist),
403                                      DRM_MEM_BUFS );
404         if (temp_buflist == NULL) {
405                 /* Free the entry because it isn't valid */
406                 DRM(cleanup_buf_error)(dev, entry);
407                 return DRM_ERR(ENOMEM);
408         }
409         dma->buflist = temp_buflist;
410
411         for ( i = 0 ; i < entry->buf_count ; i++ ) {
412                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
413         }
414
415         dma->buf_count += entry->buf_count;
416         dma->byte_count += byte_count;
417
418         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
419         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
420
421         request->count = entry->buf_count;
422         request->size = size;
423
424         dma->flags = _DRM_DMA_USE_AGP;
425
426         return 0;
427 }
428 #endif /* __REALLY_HAVE_AGP */
429
430 #if __HAVE_PCI_DMA
431 static int DRM(addbufs_pci)(drm_device_t *dev, drm_buf_desc_t *request)
432 {
433         drm_device_dma_t *dma = dev->dma;
434         int count;
435         int order;
436         int size;
437         int total;
438         int page_order;
439         drm_buf_entry_t *entry;
440         vm_offset_t vaddr;
441         drm_buf_t *buf;
442         int alignment;
443         unsigned long offset;
444         int i;
445         int byte_count;
446         int page_count;
447         unsigned long *temp_pagelist;
448         drm_buf_t **temp_buflist;
449         dma_addr_t bus_addr;
450
451         count = request->count;
452         order = DRM(order)(request->size);
453         size = 1 << order;
454
455         DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
456                    request->count, request->size, size, order );
457
458         alignment = (request->flags & _DRM_PAGE_ALIGN)
459                 ? round_page(size) : size;
460         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
461         total = PAGE_SIZE << page_order;
462
463         entry = &dma->bufs[order];
464
465         entry->buflist = DRM(alloc)(count * sizeof(*entry->buflist),
466             DRM_MEM_BUFS);
467         entry->seglist = DRM(alloc)(count * sizeof(*entry->seglist),
468             DRM_MEM_SEGS);
469         entry->seglist_bus = DRM(alloc)(count * sizeof(*entry->seglist_bus),
470             DRM_MEM_SEGS);
471
472         /* Keep the original pagelist until we know all the allocations
473          * have succeeded
474          */
475         temp_pagelist = DRM(alloc)((dma->page_count + (count << page_order)) *
476             sizeof(*dma->pagelist), DRM_MEM_PAGES);
477
478         if (entry->buflist == NULL || entry->seglist == NULL || 
479             temp_pagelist == NULL) {
480                 DRM(free)(entry->buflist, count * sizeof(*entry->buflist),
481                     DRM_MEM_BUFS);
482                 DRM(free)(entry->seglist, count * sizeof(*entry->seglist),
483                     DRM_MEM_SEGS);
484                 DRM(free)(entry->seglist_bus, count *
485                     sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
486                 return DRM_ERR(ENOMEM);
487         }
488
489         bzero(entry->buflist, count * sizeof(*entry->buflist));
490         bzero(entry->seglist, count * sizeof(*entry->seglist));
491         
492         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
493             sizeof(*dma->pagelist));
494
495         DRM_DEBUG( "pagelist: %d entries\n",
496                    dma->page_count + (count << page_order) );
497
498         entry->buf_size = size;
499         entry->page_order = page_order;
500         byte_count = 0;
501         page_count = 0;
502
503         while ( entry->buf_count < count ) {
504                 vaddr = (vm_offset_t) DRM(pci_alloc)(dev, size, alignment,
505                     0xfffffffful, &bus_addr);
506                 if (vaddr == NULL) {
507                         /* Set count correctly so we free the proper amount. */
508                         entry->buf_count = count;
509                         entry->seg_count = count;
510                         DRM(cleanup_buf_error)(dev, entry);
511                         DRM(free)(temp_pagelist, (dma->page_count +
512                             (count << page_order)) * sizeof(*dma->pagelist),
513                             DRM_MEM_PAGES);
514                         return DRM_ERR(ENOMEM);
515                 }
516         
517                 entry->seglist_bus[entry->seg_count] = bus_addr;
518                 entry->seglist[entry->seg_count++] = vaddr;
519                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
520                         DRM_DEBUG( "page %d @ 0x%08lx\n",
521                                    dma->page_count + page_count,
522                                    (long)vaddr + PAGE_SIZE * i );
523                         temp_pagelist[dma->page_count + page_count++] = 
524                             vaddr + PAGE_SIZE * i;
525                 }
526                 for ( offset = 0 ;
527                       offset + size <= total && entry->buf_count < count ;
528                       offset += alignment, ++entry->buf_count ) {
529                         buf          = &entry->buflist[entry->buf_count];
530                         buf->idx     = dma->buf_count + entry->buf_count;
531                         buf->total   = alignment;
532                         buf->order   = order;
533                         buf->used    = 0;
534                         buf->offset  = (dma->byte_count + byte_count + offset);
535                         buf->address = (void *)(vaddr + offset);
536                         buf->bus_address = bus_addr + offset;
537                         buf->next    = NULL;
538                         buf->pending = 0;
539                         buf->filp    = NULL;
540
541                         buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
542                         buf->dev_private = DRM(alloc)(sizeof(DRIVER_BUF_PRIV_T),
543                             DRM_MEM_BUFS);
544                         if (buf->dev_private == NULL) {
545                                 /* Set count correctly so we free the proper amount. */
546                                 entry->buf_count = count;
547                                 entry->seg_count = count;
548                                 DRM(cleanup_buf_error)(dev, entry);
549                                 DRM(free)(temp_pagelist, (dma->page_count + 
550                                     (count << page_order)) *
551                                     sizeof(*dma->pagelist), DRM_MEM_PAGES );
552                                 return DRM_ERR(ENOMEM);
553                         }
554                         bzero(buf->dev_private, buf->dev_priv_size);
555
556                         DRM_DEBUG( "buffer %d @ %p\n",
557                                    entry->buf_count, buf->address );
558                 }
559                 byte_count += PAGE_SIZE << page_order;
560         }
561
562         temp_buflist = DRM(realloc)( dma->buflist,
563                                      dma->buf_count * sizeof(*dma->buflist),
564                                      (dma->buf_count + entry->buf_count)
565                                      * sizeof(*dma->buflist),
566                                      DRM_MEM_BUFS );
567         if (temp_buflist == NULL) {
568                 /* Free the entry because it isn't valid */
569                 DRM(cleanup_buf_error)(dev, entry);
570                 DRM(free)(temp_pagelist, (dma->page_count + 
571                     (count << page_order)) * sizeof(*dma->pagelist),
572                     DRM_MEM_PAGES);
573                 return DRM_ERR(ENOMEM);
574         }
575         dma->buflist = temp_buflist;
576
577         for ( i = 0 ; i < entry->buf_count ; i++ ) {
578                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
579         }
580
581         /* No allocations failed, so now we can replace the orginal pagelist
582          * with the new one.
583          */
584         DRM(free)(dma->pagelist, dma->page_count * sizeof(*dma->pagelist),
585             DRM_MEM_PAGES);
586         dma->pagelist = temp_pagelist;
587
588         dma->buf_count += entry->buf_count;
589         dma->seg_count += entry->seg_count;
590         dma->page_count += entry->seg_count << page_order;
591         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
592
593         request->count = entry->buf_count;
594         request->size = size;
595
596         return 0;
597
598 }
599 #endif /* __HAVE_PCI_DMA */
600
601 #if __REALLY_HAVE_SG
602 static int DRM(addbufs_sg)(drm_device_t *dev, drm_buf_desc_t *request)
603 {
604         drm_device_dma_t *dma = dev->dma;
605         drm_buf_entry_t *entry;
606         drm_buf_t *buf;
607         unsigned long offset;
608         unsigned long agp_offset;
609         int count;
610         int order;
611         int size;
612         int alignment;
613         int page_order;
614         int total;
615         int byte_count;
616         int i;
617         drm_buf_t **temp_buflist;
618
619         count = request->count;
620         order = DRM(order)(request->size);
621         size = 1 << order;
622
623         alignment  = (request->flags & _DRM_PAGE_ALIGN)
624                 ? round_page(size) : size;
625         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
626         total = PAGE_SIZE << page_order;
627
628         byte_count = 0;
629         agp_offset = request->agp_start;
630
631         DRM_DEBUG( "count:      %d\n",  count );
632         DRM_DEBUG( "order:      %d\n",  order );
633         DRM_DEBUG( "size:       %d\n",  size );
634         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
635         DRM_DEBUG( "alignment:  %d\n",  alignment );
636         DRM_DEBUG( "page_order: %d\n",  page_order );
637         DRM_DEBUG( "total:      %d\n",  total );
638
639         entry = &dma->bufs[order];
640
641         entry->buflist = DRM(calloc)(1, count * sizeof(*entry->buflist),
642             DRM_MEM_BUFS);
643         if (entry->buflist == NULL)
644                 return DRM_ERR(ENOMEM);
645
646         entry->buf_size = size;
647         entry->page_order = page_order;
648
649         offset = 0;
650
651         while ( entry->buf_count < count ) {
652                 buf          = &entry->buflist[entry->buf_count];
653                 buf->idx     = dma->buf_count + entry->buf_count;
654                 buf->total   = alignment;
655                 buf->order   = order;
656                 buf->used    = 0;
657
658                 buf->offset  = (dma->byte_count + offset);
659                 buf->bus_address = agp_offset + offset;
660                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
661                 buf->next    = NULL;
662                 buf->pending = 0;
663                 buf->filp    = NULL;
664
665                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
666                 buf->dev_private = DRM(calloc)(1, buf->dev_priv_size,
667                     DRM_MEM_BUFS);
668                 if (buf->dev_private == NULL) {
669                         /* Set count correctly so we free the proper amount. */
670                         entry->buf_count = count;
671                         DRM(cleanup_buf_error)(dev, entry);
672                         return DRM_ERR(ENOMEM);
673                 }
674
675                 DRM_DEBUG( "buffer %d @ %p\n",
676                            entry->buf_count, buf->address );
677
678                 offset += alignment;
679                 entry->buf_count++;
680                 byte_count += PAGE_SIZE << page_order;
681         }
682
683         DRM_DEBUG( "byte_count: %d\n", byte_count );
684
685         temp_buflist = DRM(realloc)( dma->buflist,
686                                      dma->buf_count * sizeof(*dma->buflist),
687                                      (dma->buf_count + entry->buf_count)
688                                      * sizeof(*dma->buflist),
689                                      DRM_MEM_BUFS );
690         if (temp_buflist == NULL) {
691                 /* Free the entry because it isn't valid */
692                 DRM(cleanup_buf_error)(dev, entry);
693                 return DRM_ERR(ENOMEM);
694         }
695         dma->buflist = temp_buflist;
696
697         for ( i = 0 ; i < entry->buf_count ; i++ ) {
698                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
699         }
700
701         dma->buf_count += entry->buf_count;
702         dma->byte_count += byte_count;
703
704         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
705         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
706
707         request->count = entry->buf_count;
708         request->size = size;
709
710         dma->flags = _DRM_DMA_USE_SG;
711
712         return 0;
713 }
714 #endif /* __REALLY_HAVE_SG */
715
716 int DRM(addbufs)( DRM_IOCTL_ARGS )
717 {
718         DRM_DEVICE;
719         drm_buf_desc_t request;
720         int err;
721         int order;
722
723         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
724
725         if (request.count < 0 || request.count > 4096)
726                 return DRM_ERR(EINVAL);
727
728         order = DRM(order)(request.size);
729         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
730                 return DRM_ERR(EINVAL);
731
732         DRM_SPINLOCK(&dev->dma_lock);
733         /* No more allocations after first buffer-using ioctl. */
734         if (dev->buf_use != 0) {
735                 DRM_SPINUNLOCK(&dev->dma_lock);
736                 return DRM_ERR(EBUSY);
737         }
738         /* No more than one allocation per order */
739         if (dev->dma->bufs[order].buf_count != 0) {
740                 DRM_SPINUNLOCK(&dev->dma_lock);
741                 return DRM_ERR(ENOMEM);
742         }
743
744 #if __REALLY_HAVE_AGP
745         if ( request.flags & _DRM_AGP_BUFFER )
746                 err = DRM(addbufs_agp)(dev, &request);
747         else
748 #endif
749 #if __REALLY_HAVE_SG
750         if ( request.flags & _DRM_SG_BUFFER )
751                 err = DRM(addbufs_sg)(dev, &request);
752         else
753 #endif
754 #if __HAVE_PCI_DMA
755                 err = DRM(addbufs_pci)(dev, &request);
756 #else
757                 err = DRM_ERR(EINVAL);
758 #endif
759         DRM_SPINUNLOCK(&dev->dma_lock);
760
761         DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
762
763         return err;
764 }
765
766 int DRM(infobufs)( DRM_IOCTL_ARGS )
767 {
768         DRM_DEVICE;
769         drm_device_dma_t *dma = dev->dma;
770         drm_buf_info_t request;
771         int i;
772         int count;
773         int retcode = 0;
774
775         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
776
777         DRM_SPINLOCK(&dev->dma_lock);
778         ++dev->buf_use;         /* Can't allocate more after this call */
779         DRM_SPINUNLOCK(&dev->dma_lock);
780
781         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
782                 if ( dma->bufs[i].buf_count ) ++count;
783         }
784
785         DRM_DEBUG( "count = %d\n", count );
786
787         if ( request.count >= count ) {
788                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
789                         if ( dma->bufs[i].buf_count ) {
790                                 drm_buf_desc_t from;
791
792                                 from.count = dma->bufs[i].buf_count;
793                                 from.size = dma->bufs[i].buf_size;
794                                 from.low_mark = dma->bufs[i].freelist.low_mark;
795                                 from.high_mark = dma->bufs[i].freelist.high_mark;
796
797                                 if (DRM_COPY_TO_USER(&request.list[count], &from,
798                                     sizeof(drm_buf_desc_t)) != 0) {
799                                         retcode = DRM_ERR(EFAULT);
800                                         break;
801                                 }
802
803                                 DRM_DEBUG( "%d %d %d %d %d\n",
804                                            i,
805                                            dma->bufs[i].buf_count,
806                                            dma->bufs[i].buf_size,
807                                            dma->bufs[i].freelist.low_mark,
808                                            dma->bufs[i].freelist.high_mark );
809                                 ++count;
810                         }
811                 }
812         }
813         request.count = count;
814
815         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
816
817         return retcode;
818 }
819
820 int DRM(markbufs)( DRM_IOCTL_ARGS )
821 {
822         DRM_DEVICE;
823         drm_device_dma_t *dma = dev->dma;
824         drm_buf_desc_t request;
825         int order;
826
827         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
828
829         DRM_DEBUG( "%d, %d, %d\n",
830                    request.size, request.low_mark, request.high_mark );
831         
832
833         order = DRM(order)(request.size);       
834         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
835             request.low_mark < 0 || request.high_mark < 0) {
836                 return DRM_ERR(EINVAL);
837         }
838
839         DRM_SPINLOCK(&dev->dma_lock);
840         if (request.low_mark > dma->bufs[order].buf_count ||
841             request.high_mark > dma->bufs[order].buf_count) {
842                 return DRM_ERR(EINVAL);
843         }
844
845         dma->bufs[order].freelist.low_mark  = request.low_mark;
846         dma->bufs[order].freelist.high_mark = request.high_mark;
847         DRM_SPINUNLOCK(&dev->dma_lock);
848
849         return 0;
850 }
851
852 int DRM(freebufs)( DRM_IOCTL_ARGS )
853 {
854         DRM_DEVICE;
855         drm_device_dma_t *dma = dev->dma;
856         drm_buf_free_t request;
857         int i;
858         int idx;
859         drm_buf_t *buf;
860         int retcode = 0;
861
862         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
863
864         DRM_DEBUG( "%d\n", request.count );
865         
866         DRM_SPINLOCK(&dev->dma_lock);
867         for ( i = 0 ; i < request.count ; i++ ) {
868                 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
869                         retcode = DRM_ERR(EFAULT);
870                         break;
871                 }
872                 if ( idx < 0 || idx >= dma->buf_count ) {
873                         DRM_ERROR( "Index %d (of %d max)\n",
874                                    idx, dma->buf_count - 1 );
875                         retcode = DRM_ERR(EINVAL);
876                         break;
877                 }
878                 buf = dma->buflist[idx];
879                 if ( buf->filp != filp ) {
880                         DRM_ERROR("Process %d freeing buffer not owned\n",
881                                    DRM_CURRENTPID);
882                         retcode = DRM_ERR(EINVAL);
883                         break;
884                 }
885                 DRM(free_buffer)( dev, buf );
886         }
887         DRM_SPINUNLOCK(&dev->dma_lock);
888
889         return retcode;
890 }
891
892 int DRM(mapbufs)( DRM_IOCTL_ARGS )
893 {
894         DRM_DEVICE;
895         drm_device_dma_t *dma = dev->dma;
896         int retcode = 0;
897         const int zero = 0;
898         vm_offset_t address;
899         struct vmspace *vms;
900 #ifdef __FreeBSD__
901         vm_ooffset_t foff;
902         vm_size_t size;
903         vm_offset_t vaddr;
904 #endif /* __FreeBSD__ */
905 #ifdef __NetBSD__
906         struct vnode *vn;
907         vm_size_t size;
908         vaddr_t vaddr;
909 #endif /* __NetBSD__ */
910
911         drm_buf_map_t request;
912         int i;
913
914         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
915
916 #ifdef __NetBSD__
917         if (!vfinddev(kdev, VCHR, &vn))
918                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
919 #endif /* __NetBSD__ */
920
921 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
922         vms = p->td_proc->p_vmspace;
923 #else
924         vms = p->p_vmspace;
925 #endif
926
927         DRM_SPINLOCK(&dev->dma_lock);
928         dev->buf_use++;         /* Can't allocate more after this call */
929         DRM_SPINUNLOCK(&dev->dma_lock);
930
931         if (request.count < dma->buf_count)
932                 goto done;
933
934         if ((__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
935             (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG))) {
936                 drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP(dev);
937
938                 if (map == NULL) {
939                         retcode = EINVAL;
940                         goto done;
941                 }
942                 size = round_page(map->size);
943                 foff = map->offset;
944         } else {
945                 size = round_page(dma->byte_count),
946                 foff = 0;
947         }
948
949 #ifdef __FreeBSD__
950         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
951         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
952             VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
953 #elif defined(__NetBSD__)
954         vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
955         retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
956             UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
957             &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
958 #endif /* __NetBSD__ */
959         if (retcode)
960                 goto done;
961
962         request.virtual = (void *)vaddr;
963
964         for ( i = 0 ; i < dma->buf_count ; i++ ) {
965                 if (DRM_COPY_TO_USER(&request.list[i].idx,
966                     &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
967                         retcode = EFAULT;
968                         goto done;
969                 }
970                 if (DRM_COPY_TO_USER(&request.list[i].total,
971                     &dma->buflist[i]->total, sizeof(request.list[0].total))) {
972                         retcode = EFAULT;
973                         goto done;
974                 }
975                 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
976                     sizeof(zero))) {
977                         retcode = EFAULT;
978                         goto done;
979                 }
980                 address = vaddr + dma->buflist[i]->offset; /* *** */
981                 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
982                     sizeof(address))) {
983                         retcode = EFAULT;
984                         goto done;
985                 }
986         }
987
988  done:
989         request.count = dma->buf_count;
990
991         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
992
993         DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
994
995         return DRM_ERR(retcode);
996 }
997
998 #endif /* __HAVE_DMA */