OSDN Git Service

Merge back from FreeBSD-current, adding FreeBSD ID tags to aid future
[android-x86/external-libdrm.git] / bsd-core / drm_bufs.c
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  * $FreeBSD: src/sys/dev/drm/drm_bufs.h,v 1.4 2003/03/09 02:08:28 anholt Exp $
31  */
32
33 #include "drmP.h"
34
35 #ifndef __HAVE_PCI_DMA
36 #define __HAVE_PCI_DMA          0
37 #endif
38
39 #ifndef __HAVE_SG
40 #define __HAVE_SG               0
41 #endif
42
43 #ifndef DRIVER_BUF_PRIV_T
44 #define DRIVER_BUF_PRIV_T               u32
45 #endif
46 #ifndef DRIVER_AGP_BUFFERS_MAP
47 #if __HAVE_AGP && __HAVE_DMA
48 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
49 #else
50 #define DRIVER_AGP_BUFFERS_MAP( dev )   NULL
51 #endif
52 #endif
53
54 /*
55  * Compute order.  Can be made faster.
56  */
57 int DRM(order)( unsigned long size )
58 {
59         int order;
60         unsigned long tmp;
61
62         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
63
64         if ( size & ~(1 << order) )
65                 ++order;
66
67         return order;
68 }
69
70 int DRM(addmap)( DRM_IOCTL_ARGS )
71 {
72         DRM_DEVICE;
73         drm_map_t request;
74         drm_local_map_t *map;
75         drm_map_list_entry_t *list;
76         
77         if (!(dev->flags & (FREAD|FWRITE)))
78                 return DRM_ERR(EACCES); /* Require read/write */
79
80         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
81
82         map = (drm_local_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
83         if ( !map )
84                 return DRM_ERR(ENOMEM);
85
86         map->offset = request.offset;
87         map->size = request.size;
88         map->type = request.type;
89         map->flags = request.flags;
90         map->mtrr   = -1;
91         map->handle = 0;
92         
93         /* Only allow shared memory to be removable since we only keep enough
94          * book keeping information about shared memory to allow for removal
95          * when processes fork.
96          */
97         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
98                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
99                 return DRM_ERR(EINVAL);
100         }
101         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
102                    map->offset, map->size, map->type );
103         if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
104                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
105                 return DRM_ERR(EINVAL);
106         }
107
108         switch ( map->type ) {
109         case _DRM_REGISTERS:
110         case _DRM_FRAME_BUFFER:
111                 if ( map->offset + map->size < map->offset ) {
112                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113                         return DRM_ERR(EINVAL);
114                 }
115 #if __REALLY_HAVE_MTRR
116                 if ( map->type == _DRM_FRAME_BUFFER ||
117                      (map->flags & _DRM_WRITE_COMBINING) ) {
118 #ifdef __FreeBSD__
119                         int retcode = 0, act;
120                         struct mem_range_desc mrdesc;
121                         mrdesc.mr_base = map->offset;
122                         mrdesc.mr_len = map->size;
123                         mrdesc.mr_flags = MDF_WRITECOMBINE;
124                         act = MEMRANGE_SET_UPDATE;
125                         bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
126                         retcode = mem_range_attr_set(&mrdesc, &act);
127                         map->mtrr=1;
128 #elif defined __NetBSD__
129                         struct mtrr mtrrmap;
130                         int one = 1;
131                         mtrrmap.base = map->offset;
132                         mtrrmap.len = map->size;
133                         mtrrmap.type = MTRR_TYPE_WC;
134                         mtrrmap.flags = MTRR_VALID;
135                         map->mtrr = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL );
136 #endif
137                 }
138 #endif /* __REALLY_HAVE_MTRR */
139                 DRM_IOREMAP(map);
140                 break;
141
142         case _DRM_SHM:
143                 map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA);
144                 DRM_DEBUG( "%ld %d %p\n",
145                            map->size, DRM(order)( map->size ), map->handle );
146                 if ( !map->handle ) {
147                         DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
148                         return DRM_ERR(ENOMEM);
149                 }
150                 map->offset = (unsigned long)map->handle;
151                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
152                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
153                 }
154                 break;
155 #if __REALLY_HAVE_AGP
156         case _DRM_AGP:
157                 map->offset += dev->agp->base;
158                 map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
159                 break;
160 #endif
161         case _DRM_SCATTER_GATHER:
162                 if (!dev->sg) {
163                         DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
164                         return DRM_ERR(EINVAL);
165                 }
166                 map->offset = map->offset + dev->sg->handle;
167                 break;
168
169         default:
170                 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
171                 return DRM_ERR(EINVAL);
172         }
173
174         list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
175         if(!list) {
176                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
177                 return DRM_ERR(EINVAL);
178         }
179         memset(list, 0, sizeof(*list));
180         list->map = map;
181
182         DRM_LOCK;
183         TAILQ_INSERT_TAIL(dev->maplist, list, link);
184         DRM_UNLOCK;
185
186         request.offset = map->offset;
187         request.size = map->size;
188         request.type = map->type;
189         request.flags = map->flags;
190         request.mtrr   = map->mtrr;
191         request.handle = map->handle;
192
193         if ( request.type != _DRM_SHM ) {
194                 request.handle = (void *)request.offset;
195         }
196
197         DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
198
199         return 0;
200 }
201
202
203 /* Remove a map private from list and deallocate resources if the mapping
204  * isn't in use.
205  */
206
207 int DRM(rmmap)( DRM_IOCTL_ARGS )
208 {
209         DRM_DEVICE;
210         drm_map_list_entry_t *list;
211         drm_local_map_t *map;
212         drm_map_t request;
213         int found_maps = 0;
214
215         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
216
217         DRM_LOCK;
218         TAILQ_FOREACH(list, dev->maplist, link) {
219                 map = list->map;
220                 if(map->handle == request.handle &&
221                    map->flags & _DRM_REMOVABLE) break;
222         }
223
224         /* List has wrapped around to the head pointer, or its empty we didn't
225          * find anything.
226          */
227         if(list == NULL) {
228                 DRM_UNLOCK;
229                 return DRM_ERR(EINVAL);
230         }
231         TAILQ_REMOVE(dev->maplist, list, link);
232         DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
233
234
235         if(!found_maps) {
236                 switch (map->type) {
237                 case _DRM_REGISTERS:
238                 case _DRM_FRAME_BUFFER:
239 #if __REALLY_HAVE_MTRR
240                         if (map->mtrr >= 0) {
241                                 int retcode;
242 #ifdef __FreeBSD__
243                                 int act;
244                                 struct mem_range_desc mrdesc;
245                                 mrdesc.mr_base = map->offset;
246                                 mrdesc.mr_len = map->size;
247                                 mrdesc.mr_flags = MDF_WRITECOMBINE;
248                                 act = MEMRANGE_SET_REMOVE;
249                                 bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
250                                 retcode = mem_range_attr_set(&mrdesc, &act);
251 #elif defined __NetBSD__
252                                 struct mtrr mtrrmap;
253                                 int one = 1;
254                                 mtrrmap.base = map->offset;
255                                 mtrrmap.len = map->size;
256                                 mtrrmap.type = 0;
257                                 mtrrmap.flags = 0;
258                                 mtrrmap.owner = p->p_pid;
259                                 retcode = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL);
260                                 DRM_DEBUG("mtrr_del = %d\n", retcode);
261 #endif
262                         }
263 #endif
264                         DRM(ioremapfree)( map );
265                         break;
266                 case _DRM_SHM:
267                         DRM(free)( map->handle, map->size, DRM_MEM_SAREA );
268                         break;
269                 case _DRM_AGP:
270                 case _DRM_SCATTER_GATHER:
271                         break;
272                 }
273                 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
274         }
275         DRM_UNLOCK;
276         return 0;
277 }
278
279 #if __HAVE_DMA
280
281
282 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
283 {
284         int i;
285
286         if (entry->seg_count) {
287                 for (i = 0; i < entry->seg_count; i++) {
288                         DRM(free)((void *)entry->seglist[i],
289                                         entry->buf_size,
290                                         DRM_MEM_DMA);
291                 }
292                 DRM(free)(entry->seglist,
293                           entry->seg_count *
294                           sizeof(*entry->seglist),
295                           DRM_MEM_SEGS);
296
297                 entry->seg_count = 0;
298         }
299
300         if(entry->buf_count) {
301                 for(i = 0; i < entry->buf_count; i++) {
302                         if(entry->buflist[i].dev_private) {
303                                 DRM(free)(entry->buflist[i].dev_private,
304                                           entry->buflist[i].dev_priv_size,
305                                           DRM_MEM_BUFS);
306                         }
307                 }
308                 DRM(free)(entry->buflist,
309                           entry->buf_count *
310                           sizeof(*entry->buflist),
311                           DRM_MEM_BUFS);
312
313 #if __HAVE_DMA_FREELIST
314                 DRM(freelist_destroy)(&entry->freelist);
315 #endif
316
317                 entry->buf_count = 0;
318         }
319 }
320
321 #if __REALLY_HAVE_AGP
322 int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
323 {
324         DRM_DEVICE;
325         drm_device_dma_t *dma = dev->dma;
326         drm_buf_desc_t request;
327         drm_buf_entry_t *entry;
328         drm_buf_t *buf;
329         unsigned long offset;
330         unsigned long agp_offset;
331         int count;
332         int order;
333         int size;
334         int alignment;
335         int page_order;
336         int total;
337         int byte_count;
338         int i;
339         drm_buf_t **temp_buflist;
340
341         if ( !dma ) return DRM_ERR(EINVAL);
342
343         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
344
345         count = request.count;
346         order = DRM(order)( request.size );
347         size = 1 << order;
348
349         alignment  = (request.flags & _DRM_PAGE_ALIGN)
350                 ? round_page(size) : size;
351         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
352         total = PAGE_SIZE << page_order;
353
354         byte_count = 0;
355         agp_offset = dev->agp->base + request.agp_start;
356
357         DRM_DEBUG( "count:      %d\n",  count );
358         DRM_DEBUG( "order:      %d\n",  order );
359         DRM_DEBUG( "size:       %d\n",  size );
360         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
361         DRM_DEBUG( "alignment:  %d\n",  alignment );
362         DRM_DEBUG( "page_order: %d\n",  page_order );
363         DRM_DEBUG( "total:      %d\n",  total );
364
365         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
366                 return DRM_ERR(EINVAL);
367         if ( dev->queue_count ) 
368                 return DRM_ERR(EBUSY); /* Not while in use */
369
370         DRM_SPINLOCK( &dev->count_lock );
371         if ( dev->buf_use ) {
372                 DRM_SPINUNLOCK( &dev->count_lock );
373                 return DRM_ERR(EBUSY);
374         }
375         atomic_inc( &dev->buf_alloc );
376         DRM_SPINUNLOCK( &dev->count_lock );
377
378         DRM_LOCK;
379         entry = &dma->bufs[order];
380         if ( entry->buf_count ) {
381                 DRM_UNLOCK;
382                 atomic_dec( &dev->buf_alloc );
383                 return DRM_ERR(ENOMEM); /* May only call once for each order */
384         }
385
386         if (count < 0 || count > 4096) {
387                 DRM_UNLOCK;
388                 atomic_dec( &dev->buf_alloc );
389                 return DRM_ERR(EINVAL);
390         }
391
392         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
393                                     DRM_MEM_BUFS );
394         if ( !entry->buflist ) {
395                 DRM_UNLOCK;
396                 atomic_dec( &dev->buf_alloc );
397                 return DRM_ERR(ENOMEM);
398         }
399         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
400
401         entry->buf_size = size;
402         entry->page_order = page_order;
403
404         offset = 0;
405
406         while ( entry->buf_count < count ) {
407                 buf          = &entry->buflist[entry->buf_count];
408                 buf->idx     = dma->buf_count + entry->buf_count;
409                 buf->total   = alignment;
410                 buf->order   = order;
411                 buf->used    = 0;
412
413                 buf->offset  = (dma->byte_count + offset);
414                 buf->bus_address = agp_offset + offset;
415                 buf->address = (void *)(agp_offset + offset);
416                 buf->next    = NULL;
417                 buf->waiting = 0;
418                 buf->pending = 0;
419                 buf->dma_wait = 0;
420                 buf->pid     = 0;
421
422                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
423                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
424                                                DRM_MEM_BUFS );
425                 if(!buf->dev_private) {
426                         /* Set count correctly so we free the proper amount. */
427                         entry->buf_count = count;
428                         DRM(cleanup_buf_error)(entry);
429                 }
430                 memset( buf->dev_private, 0, buf->dev_priv_size );
431
432 #if __HAVE_DMA_HISTOGRAM
433                 buf->time_queued = 0;
434                 buf->time_dispatched = 0;
435                 buf->time_completed = 0;
436                 buf->time_freed = 0;
437 #endif
438
439                 offset += alignment;
440                 entry->buf_count++;
441                 byte_count += PAGE_SIZE << page_order;
442         }
443
444         DRM_DEBUG( "byte_count: %d\n", byte_count );
445
446         temp_buflist = DRM(realloc)( dma->buflist,
447                                      dma->buf_count * sizeof(*dma->buflist),
448                                      (dma->buf_count + entry->buf_count)
449                                      * sizeof(*dma->buflist),
450                                      DRM_MEM_BUFS );
451         if(!temp_buflist) {
452                 /* Free the entry because it isn't valid */
453                 DRM(cleanup_buf_error)(entry);
454                 DRM_UNLOCK;
455                 atomic_dec( &dev->buf_alloc );
456                 return DRM_ERR(ENOMEM);
457         }
458         dma->buflist = temp_buflist;
459
460         for ( i = 0 ; i < entry->buf_count ; i++ ) {
461                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
462         }
463
464         dma->buf_count += entry->buf_count;
465         dma->byte_count += byte_count;
466
467         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
468         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
469
470 #if __HAVE_DMA_FREELIST
471         DRM(freelist_create)( &entry->freelist, entry->buf_count );
472         for ( i = 0 ; i < entry->buf_count ; i++ ) {
473                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
474         }
475 #endif
476         DRM_UNLOCK;
477
478         request.count = entry->buf_count;
479         request.size = size;
480
481         DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
482
483         dma->flags = _DRM_DMA_USE_AGP;
484
485         atomic_dec( &dev->buf_alloc );
486         return 0;
487 }
488 #endif /* __REALLY_HAVE_AGP */
489
490 #if __HAVE_PCI_DMA
491 int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
492 {
493         DRM_DEVICE;
494         drm_device_dma_t *dma = dev->dma;
495         drm_buf_desc_t request;
496         int count;
497         int order;
498         int size;
499         int total;
500         int page_order;
501         drm_buf_entry_t *entry;
502         unsigned long page;
503         drm_buf_t *buf;
504         int alignment;
505         unsigned long offset;
506         int i;
507         int byte_count;
508         int page_count;
509         unsigned long *temp_pagelist;
510         drm_buf_t **temp_buflist;
511
512         if ( !dma ) return DRM_ERR(EINVAL);
513
514         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
515
516         count = request.count;
517         order = DRM(order)( request.size );
518         size = 1 << order;
519
520         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
521                    request.count, request.size, size,
522                    order, dev->queue_count );
523
524         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
525                 return DRM_ERR(EINVAL);
526         if ( dev->queue_count ) 
527                 return DRM_ERR(EBUSY); /* Not while in use */
528
529         alignment = (request.flags & _DRM_PAGE_ALIGN)
530                 ? round_page(size) : size;
531         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
532         total = PAGE_SIZE << page_order;
533
534         DRM_SPINLOCK( &dev->count_lock );
535         if ( dev->buf_use ) {
536                 DRM_SPINUNLOCK( &dev->count_lock );
537                 return DRM_ERR(EBUSY);
538         }
539         atomic_inc( &dev->buf_alloc );
540         DRM_SPINUNLOCK( &dev->count_lock );
541
542         DRM_LOCK;
543         entry = &dma->bufs[order];
544         if ( entry->buf_count ) {
545                 DRM_UNLOCK;
546                 atomic_dec( &dev->buf_alloc );
547                 return DRM_ERR(ENOMEM); /* May only call once for each order */
548         }
549
550         if (count < 0 || count > 4096) {
551                 DRM_UNLOCK;
552                 atomic_dec( &dev->buf_alloc );
553                 return DRM_ERR(EINVAL);
554         }
555
556         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
557                                     DRM_MEM_BUFS );
558         if ( !entry->buflist ) {
559                 DRM_UNLOCK;
560                 atomic_dec( &dev->buf_alloc );
561                 return DRM_ERR(ENOMEM);
562         }
563         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
564
565         entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
566                                     DRM_MEM_SEGS );
567         if ( !entry->seglist ) {
568                 DRM(free)( entry->buflist,
569                           count * sizeof(*entry->buflist),
570                           DRM_MEM_BUFS );
571                 DRM_UNLOCK;
572                 atomic_dec( &dev->buf_alloc );
573                 return DRM_ERR(ENOMEM);
574         }
575         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
576
577         temp_pagelist = DRM(realloc)( dma->pagelist,
578                                       dma->page_count * sizeof(*dma->pagelist),
579                                       (dma->page_count + (count << page_order))
580                                       * sizeof(*dma->pagelist),
581                                       DRM_MEM_PAGES );
582         if(!temp_pagelist) {
583                 DRM(free)( entry->buflist,
584                            count * sizeof(*entry->buflist),
585                            DRM_MEM_BUFS );
586                 DRM(free)( entry->seglist,
587                            count * sizeof(*entry->seglist),
588                            DRM_MEM_SEGS );
589                 DRM_UNLOCK;
590                 atomic_dec( &dev->buf_alloc );
591                 return DRM_ERR(ENOMEM);
592         }
593
594         dma->pagelist = temp_pagelist;
595         DRM_DEBUG( "pagelist: %d entries\n",
596                    dma->page_count + (count << page_order) );
597
598         entry->buf_size = size;
599         entry->page_order = page_order;
600         byte_count = 0;
601         page_count = 0;
602
603         while ( entry->buf_count < count ) {
604                 page = (unsigned long)DRM(alloc)( size, DRM_MEM_DMA );
605                 if ( !page ) break;
606                 entry->seglist[entry->seg_count++] = page;
607                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
608                         DRM_DEBUG( "page %d @ 0x%08lx\n",
609                                    dma->page_count + page_count,
610                                    page + PAGE_SIZE * i );
611                         dma->pagelist[dma->page_count + page_count++]
612                                 = page + PAGE_SIZE * i;
613                 }
614                 for ( offset = 0 ;
615                       offset + size <= total && entry->buf_count < count ;
616                       offset += alignment, ++entry->buf_count ) {
617                         buf          = &entry->buflist[entry->buf_count];
618                         buf->idx     = dma->buf_count + entry->buf_count;
619                         buf->total   = alignment;
620                         buf->order   = order;
621                         buf->used    = 0;
622                         buf->offset  = (dma->byte_count + byte_count + offset);
623                         buf->address = (void *)(page + offset);
624                         buf->next    = NULL;
625                         buf->waiting = 0;
626                         buf->pending = 0;
627                         buf->dma_wait = 0;
628                         buf->pid     = 0;
629 #if __HAVE_DMA_HISTOGRAM
630                         buf->time_queued     = 0;
631                         buf->time_dispatched = 0;
632                         buf->time_completed  = 0;
633                         buf->time_freed      = 0;
634 #endif
635                         DRM_DEBUG( "buffer %d @ %p\n",
636                                    entry->buf_count, buf->address );
637                 }
638                 byte_count += PAGE_SIZE << page_order;
639         }
640
641         temp_buflist = DRM(realloc)( dma->buflist,
642                                      dma->buf_count * sizeof(*dma->buflist),
643                                      (dma->buf_count + entry->buf_count)
644                                      * sizeof(*dma->buflist),
645                                      DRM_MEM_BUFS );
646         if(!temp_buflist) {
647                 /* Free the entry because it isn't valid */
648                 DRM(cleanup_buf_error)(entry);
649                 DRM_UNLOCK;
650                 atomic_dec( &dev->buf_alloc );
651                 return DRM_ERR(ENOMEM);
652         }
653         dma->buflist = temp_buflist;
654
655         for ( i = 0 ; i < entry->buf_count ; i++ ) {
656                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
657         }
658
659         dma->buf_count += entry->buf_count;
660         dma->seg_count += entry->seg_count;
661         dma->page_count += entry->seg_count << page_order;
662         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
663
664 #if __HAVE_DMA_FREELIST
665         DRM(freelist_create)( &entry->freelist, entry->buf_count );
666         for ( i = 0 ; i < entry->buf_count ; i++ ) {
667                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
668         }
669 #endif
670         DRM_UNLOCK;
671
672         request.count = entry->buf_count;
673         request.size = size;
674
675         DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
676
677         atomic_dec( &dev->buf_alloc );
678         return 0;
679
680 }
681 #endif /* __HAVE_PCI_DMA */
682
683 #if __REALLY_HAVE_SG
684 int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
685 {
686         DRM_DEVICE;
687         drm_device_dma_t *dma = dev->dma;
688         drm_buf_desc_t request;
689         drm_buf_entry_t *entry;
690         drm_buf_t *buf;
691         unsigned long offset;
692         unsigned long agp_offset;
693         int count;
694         int order;
695         int size;
696         int alignment;
697         int page_order;
698         int total;
699         int byte_count;
700         int i;
701         drm_buf_t **temp_buflist;
702
703         if ( !dma ) return DRM_ERR(EINVAL);
704
705         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
706
707         count = request.count;
708         order = DRM(order)( request.size );
709         size = 1 << order;
710
711         alignment  = (request.flags & _DRM_PAGE_ALIGN)
712                 ? round_page(size) : size;
713         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
714         total = PAGE_SIZE << page_order;
715
716         byte_count = 0;
717         agp_offset = request.agp_start;
718
719         DRM_DEBUG( "count:      %d\n",  count );
720         DRM_DEBUG( "order:      %d\n",  order );
721         DRM_DEBUG( "size:       %d\n",  size );
722         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
723         DRM_DEBUG( "alignment:  %d\n",  alignment );
724         DRM_DEBUG( "page_order: %d\n",  page_order );
725         DRM_DEBUG( "total:      %d\n",  total );
726
727         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
728                 return DRM_ERR(EINVAL);
729         if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */
730
731         DRM_SPINLOCK( &dev->count_lock );
732         if ( dev->buf_use ) {
733                 DRM_SPINUNLOCK( &dev->count_lock );
734                 return DRM_ERR(EBUSY);
735         }
736         atomic_inc( &dev->buf_alloc );
737         DRM_SPINUNLOCK( &dev->count_lock );
738
739         DRM_LOCK;
740         entry = &dma->bufs[order];
741         if ( entry->buf_count ) {
742                 DRM_UNLOCK;
743                 atomic_dec( &dev->buf_alloc );
744                 return DRM_ERR(ENOMEM); /* May only call once for each order */
745         }
746
747         if (count < 0 || count > 4096) {
748                 DRM_UNLOCK;
749                 atomic_dec( &dev->buf_alloc );
750                 return DRM_ERR(EINVAL);
751         }
752
753         entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
754                                      DRM_MEM_BUFS );
755         if ( !entry->buflist ) {
756                 DRM_UNLOCK;
757                 atomic_dec( &dev->buf_alloc );
758                 return DRM_ERR(ENOMEM);
759         }
760         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
761
762         entry->buf_size = size;
763         entry->page_order = page_order;
764
765         offset = 0;
766
767         while ( entry->buf_count < count ) {
768                 buf          = &entry->buflist[entry->buf_count];
769                 buf->idx     = dma->buf_count + entry->buf_count;
770                 buf->total   = alignment;
771                 buf->order   = order;
772                 buf->used    = 0;
773
774                 buf->offset  = (dma->byte_count + offset);
775                 buf->bus_address = agp_offset + offset;
776                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
777                 buf->next    = NULL;
778                 buf->waiting = 0;
779                 buf->pending = 0;
780                 buf->dma_wait = 0;
781                 buf->pid     = 0;
782
783                 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
784                 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
785                                                DRM_MEM_BUFS );
786                 if(!buf->dev_private) {
787                         /* Set count correctly so we free the proper amount. */
788                         entry->buf_count = count;
789                         DRM(cleanup_buf_error)(entry);
790                         DRM_UNLOCK;
791                         atomic_dec( &dev->buf_alloc );
792                         return DRM_ERR(ENOMEM);
793                 }
794
795                 memset( buf->dev_private, 0, buf->dev_priv_size );
796
797 # if __HAVE_DMA_HISTOGRAM
798                 buf->time_queued = 0;
799                 buf->time_dispatched = 0;
800                 buf->time_completed = 0;
801                 buf->time_freed = 0;
802 # endif
803                 DRM_DEBUG( "buffer %d @ %p\n",
804                            entry->buf_count, buf->address );
805
806                 offset += alignment;
807                 entry->buf_count++;
808                 byte_count += PAGE_SIZE << page_order;
809         }
810
811         DRM_DEBUG( "byte_count: %d\n", byte_count );
812
813         temp_buflist = DRM(realloc)( dma->buflist,
814                                      dma->buf_count * sizeof(*dma->buflist),
815                                      (dma->buf_count + entry->buf_count)
816                                      * sizeof(*dma->buflist),
817                                      DRM_MEM_BUFS );
818         if(!temp_buflist) {
819                 /* Free the entry because it isn't valid */
820                 DRM(cleanup_buf_error)(entry);
821                 DRM_UNLOCK;
822                 atomic_dec( &dev->buf_alloc );
823                 return DRM_ERR(ENOMEM);
824         }
825         dma->buflist = temp_buflist;
826
827         for ( i = 0 ; i < entry->buf_count ; i++ ) {
828                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
829         }
830
831         dma->buf_count += entry->buf_count;
832         dma->byte_count += byte_count;
833
834         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
835         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
836
837 #if __HAVE_DMA_FREELIST
838         DRM(freelist_create)( &entry->freelist, entry->buf_count );
839         for ( i = 0 ; i < entry->buf_count ; i++ ) {
840                 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
841         }
842 #endif
843         DRM_UNLOCK;
844
845         request.count = entry->buf_count;
846         request.size = size;
847
848         DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
849
850         dma->flags = _DRM_DMA_USE_SG;
851
852         atomic_dec( &dev->buf_alloc );
853         return 0;
854 }
855 #endif /* __REALLY_HAVE_SG */
856
857 int DRM(addbufs)( DRM_IOCTL_ARGS )
858 {
859         drm_buf_desc_t request;
860
861         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
862
863 #if __REALLY_HAVE_AGP
864         if ( request.flags & _DRM_AGP_BUFFER )
865                 return DRM(addbufs_agp)( kdev, cmd, data, flags, p );
866         else
867 #endif
868 #if __REALLY_HAVE_SG
869         if ( request.flags & _DRM_SG_BUFFER )
870                 return DRM(addbufs_sg)( kdev, cmd, data, flags, p );
871         else
872 #endif
873 #if __HAVE_PCI_DMA
874                 return DRM(addbufs_pci)( kdev, cmd, data, flags, p );
875 #else
876                 return DRM_ERR(EINVAL);
877 #endif
878 }
879
880 int DRM(infobufs)( DRM_IOCTL_ARGS )
881 {
882         DRM_DEVICE;
883         drm_device_dma_t *dma = dev->dma;
884         drm_buf_info_t request;
885         int i;
886         int count;
887
888         if ( !dma ) return DRM_ERR(EINVAL);
889
890         DRM_SPINLOCK( &dev->count_lock );
891         if ( atomic_read( &dev->buf_alloc ) ) {
892                 DRM_SPINUNLOCK( &dev->count_lock );
893                 return DRM_ERR(EBUSY);
894         }
895         ++dev->buf_use;         /* Can't allocate more after this call */
896         DRM_SPINUNLOCK( &dev->count_lock );
897
898         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
899
900         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
901                 if ( dma->bufs[i].buf_count ) ++count;
902         }
903
904         DRM_DEBUG( "count = %d\n", count );
905
906         if ( request.count >= count ) {
907                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
908                         if ( dma->bufs[i].buf_count ) {
909                                 drm_buf_desc_t *to = &request.list[count];
910                                 drm_buf_entry_t *from = &dma->bufs[i];
911                                 drm_freelist_t *list = &dma->bufs[i].freelist;
912                                 if ( DRM_COPY_TO_USER( &to->count,
913                                                    &from->buf_count,
914                                                    sizeof(from->buf_count) ) ||
915                                      DRM_COPY_TO_USER( &to->size,
916                                                    &from->buf_size,
917                                                    sizeof(from->buf_size) ) ||
918                                      DRM_COPY_TO_USER( &to->low_mark,
919                                                    &list->low_mark,
920                                                    sizeof(list->low_mark) ) ||
921                                      DRM_COPY_TO_USER( &to->high_mark,
922                                                    &list->high_mark,
923                                                    sizeof(list->high_mark) ) )
924                                         return DRM_ERR(EFAULT);
925
926                                 DRM_DEBUG( "%d %d %d %d %d\n",
927                                            i,
928                                            dma->bufs[i].buf_count,
929                                            dma->bufs[i].buf_size,
930                                            dma->bufs[i].freelist.low_mark,
931                                            dma->bufs[i].freelist.high_mark );
932                                 ++count;
933                         }
934                 }
935         }
936         request.count = count;
937
938         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
939
940         return 0;
941 }
942
943 int DRM(markbufs)( DRM_IOCTL_ARGS )
944 {
945         DRM_DEVICE;
946         drm_device_dma_t *dma = dev->dma;
947         drm_buf_desc_t request;
948         int order;
949         drm_buf_entry_t *entry;
950
951         if ( !dma ) return DRM_ERR(EINVAL);
952
953         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
954
955         DRM_DEBUG( "%d, %d, %d\n",
956                    request.size, request.low_mark, request.high_mark );
957         order = DRM(order)( request.size );
958         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) 
959                 return DRM_ERR(EINVAL);
960         entry = &dma->bufs[order];
961
962         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
963                 return DRM_ERR(EINVAL);
964         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
965                 return DRM_ERR(EINVAL);
966
967         entry->freelist.low_mark  = request.low_mark;
968         entry->freelist.high_mark = request.high_mark;
969
970         return 0;
971 }
972
973 int DRM(freebufs)( DRM_IOCTL_ARGS )
974 {
975         DRM_DEVICE;
976         drm_device_dma_t *dma = dev->dma;
977         drm_buf_free_t request;
978         int i;
979         int idx;
980         drm_buf_t *buf;
981
982         if ( !dma ) return DRM_ERR(EINVAL);
983
984         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
985
986         DRM_DEBUG( "%d\n", request.count );
987         for ( i = 0 ; i < request.count ; i++ ) {
988                 if ( DRM_COPY_FROM_USER( &idx,
989                                      &request.list[i],
990                                      sizeof(idx) ) )
991                         return DRM_ERR(EFAULT);
992                 if ( idx < 0 || idx >= dma->buf_count ) {
993                         DRM_ERROR( "Index %d (of %d max)\n",
994                                    idx, dma->buf_count - 1 );
995                         return DRM_ERR(EINVAL);
996                 }
997                 buf = dma->buflist[idx];
998                 if ( buf->pid != DRM_CURRENTPID ) {
999                         DRM_ERROR( "Process %d freeing buffer owned by %d\n",
1000                                    DRM_CURRENTPID, buf->pid );
1001                         return DRM_ERR(EINVAL);
1002                 }
1003                 DRM(free_buffer)( dev, buf );
1004         }
1005
1006         return 0;
1007 }
1008
1009 int DRM(mapbufs)( DRM_IOCTL_ARGS )
1010 {
1011         DRM_DEVICE;
1012         drm_device_dma_t *dma = dev->dma;
1013         int retcode = 0;
1014         const int zero = 0;
1015         vm_offset_t virtual, address;
1016 #ifdef __FreeBSD__
1017 #if __FreeBSD_version >= 500000
1018         struct vmspace *vms = p->td_proc->p_vmspace;
1019 #else
1020         struct vmspace *vms = p->p_vmspace;
1021 #endif
1022 #endif /* __FreeBSD__ */
1023 #ifdef __NetBSD__
1024         struct vnode *vn;
1025         struct vmspace *vms = p->p_vmspace;
1026 #endif /* __NetBSD__ */
1027
1028         drm_buf_map_t request;
1029         int i;
1030
1031         if ( !dma ) return DRM_ERR(EINVAL);
1032
1033         DRM_SPINLOCK( &dev->count_lock );
1034         if ( atomic_read( &dev->buf_alloc ) ) {
1035                 DRM_SPINUNLOCK( &dev->count_lock );
1036                 return DRM_ERR(EBUSY);
1037         }
1038         dev->buf_use++;         /* Can't allocate more after this call */
1039         DRM_SPINUNLOCK( &dev->count_lock );
1040
1041         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
1042
1043 #ifdef __NetBSD__
1044         if(!vfinddev(kdev, VCHR, &vn))
1045                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
1046 #endif /* __NetBSD__ */
1047
1048         if ( request.count >= dma->buf_count ) {
1049                 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1050                      (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1051                         drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1052
1053                         if ( !map ) {
1054                                 retcode = EINVAL;
1055                                 goto done;
1056                         }
1057
1058 #ifdef __FreeBSD__
1059                         virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1060                         retcode = vm_mmap(&vms->vm_map,
1061                                           &virtual,
1062                                           round_page(map->size),
1063                                           PROT_READ|PROT_WRITE, VM_PROT_ALL,
1064                                           MAP_SHARED,
1065                                           SLIST_FIRST(&kdev->si_hlist),
1066                                           (unsigned long)map->offset );
1067 #elif defined(__NetBSD__)
1068                         virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1069                         retcode = uvm_mmap(&vms->vm_map,
1070                                            (vaddr_t *)&virtual,
1071                                            round_page(map->size),
1072                                            UVM_PROT_READ | UVM_PROT_WRITE,
1073                                            UVM_PROT_ALL, MAP_SHARED,
1074                                            &vn->v_uobj, map->offset,
1075                                            p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1076 #endif /* __NetBSD__ */
1077                 } else {
1078 #ifdef __FreeBSD__
1079                         virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1080                         retcode = vm_mmap(&vms->vm_map,
1081                                           &virtual,
1082                                           round_page(dma->byte_count),
1083                                           PROT_READ|PROT_WRITE, VM_PROT_ALL,
1084                                           MAP_SHARED,
1085                                           SLIST_FIRST(&kdev->si_hlist),
1086                                           0);
1087 #elif defined(__NetBSD__)
1088                         virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1089                         retcode = uvm_mmap(&vms->vm_map,
1090                                            (vaddr_t *)&virtual,
1091                                            round_page(dma->byte_count),
1092                                            UVM_PROT_READ | UVM_PROT_WRITE,
1093                                            UVM_PROT_ALL, MAP_SHARED,
1094                                            &vn->v_uobj, 0,
1095                                            p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1096 #endif /* __NetBSD__ */
1097                 }
1098                 if (retcode)
1099                         goto done;
1100                 request.virtual = (void *)virtual;
1101
1102                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1103                         if ( DRM_COPY_TO_USER( &request.list[i].idx,
1104                                            &dma->buflist[i]->idx,
1105                                            sizeof(request.list[0].idx) ) ) {
1106                                 retcode = EFAULT;
1107                                 goto done;
1108                         }
1109                         if ( DRM_COPY_TO_USER( &request.list[i].total,
1110                                            &dma->buflist[i]->total,
1111                                            sizeof(request.list[0].total) ) ) {
1112                                 retcode = EFAULT;
1113                                 goto done;
1114                         }
1115                         if ( DRM_COPY_TO_USER( &request.list[i].used,
1116                                            &zero,
1117                                            sizeof(zero) ) ) {
1118                                 retcode = EFAULT;
1119                                 goto done;
1120                         }
1121                         address = virtual + dma->buflist[i]->offset; /* *** */
1122                         if ( DRM_COPY_TO_USER( &request.list[i].address,
1123                                            &address,
1124                                            sizeof(address) ) ) {
1125                                 retcode = EFAULT;
1126                                 goto done;
1127                         }
1128                 }
1129         }
1130  done:
1131         request.count = dma->buf_count;
1132
1133         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1134
1135         DRM_COPY_TO_USER_IOCTL( (drm_buf_map_t *)data, request, sizeof(request) );
1136
1137         return DRM_ERR(retcode);
1138 }
1139
1140 #endif /* __HAVE_DMA */
1141