OSDN Git Service

Commit WIP of BSD conversion to core model. Compiles for r128, radeon, but
[android-x86/external-libdrm.git] / bsd-core / drm_bufs.c
1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
3  *
4  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining a
9  * copy of this software and associated documentation files (the "Software"),
10  * to deal in the Software without restriction, including without limitation
11  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12  * and/or sell copies of the Software, and to permit persons to whom the
13  * Software is furnished to do so, subject to the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the next
16  * paragraph) shall be included in all copies or substantial portions of the
17  * Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
22  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25  * OTHER DEALINGS IN THE SOFTWARE.
26  *
27  * Authors:
28  *    Rickard E. (Rik) Faith <faith@valinux.com>
29  *    Gareth Hughes <gareth@valinux.com>
30  *
31  */
32
33 #include "drmP.h"
34
35 /*
36  * Compute order.  Can be made faster.
37  */
38 int drm_order(unsigned long size)
39 {
40         int order;
41         unsigned long tmp;
42
43         for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
44
45         if ( size & ~(1 << order) )
46                 ++order;
47
48         return order;
49 }
50
51 int drm_addmap(DRM_IOCTL_ARGS)
52 {
53         DRM_DEVICE;
54         drm_map_t request;
55         drm_local_map_t *map;
56         drm_map_list_entry_t *list;
57         
58         if (!(dev->flags & (FREAD|FWRITE)))
59                 return DRM_ERR(EACCES); /* Require read/write */
60
61         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
62
63         map = (drm_local_map_t *) drm_alloc(sizeof(*map), DRM_MEM_MAPS);
64         if ( !map )
65                 return DRM_ERR(ENOMEM);
66
67         map->offset = request.offset;
68         map->size = request.size;
69         map->type = request.type;
70         map->flags = request.flags;
71         map->mtrr = 0;
72         map->handle = 0;
73         
74         /* Only allow shared memory to be removable since we only keep enough
75          * book keeping information about shared memory to allow for removal
76          * when processes fork.
77          */
78         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
79                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
80                 return DRM_ERR(EINVAL);
81         }
82         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
83                    map->offset, map->size, map->type );
84         if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
85                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
86                 return DRM_ERR(EINVAL);
87         }
88         if (map->offset + map->size < map->offset) {
89                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
90                 return DRM_ERR(EINVAL);
91         }
92
93         switch ( map->type ) {
94         case _DRM_REGISTERS:
95                 drm_core_ioremap(map, dev);
96                 if (!(map->flags & _DRM_WRITE_COMBINING))
97                         break;
98                 /* FALLTHROUGH */
99         case _DRM_FRAME_BUFFER:
100                 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
101                         map->mtrr = 1;
102                 break;
103         case _DRM_SHM:
104                 map->handle = (void *)drm_alloc(map->size, DRM_MEM_SAREA);
105                 DRM_DEBUG( "%lu %d %p\n",
106                            map->size, drm_order(map->size), map->handle );
107                 if ( !map->handle ) {
108                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
109                         return DRM_ERR(ENOMEM);
110                 }
111                 map->offset = (unsigned long)map->handle;
112                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
113                         /* Prevent a 2nd X Server from creating a 2nd lock */
114                         DRM_LOCK();
115                         if (dev->lock.hw_lock != NULL) {
116                                 DRM_UNLOCK();
117                                 drm_free(map->handle, map->size, DRM_MEM_SAREA);
118                                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
119                                 return DRM_ERR(EBUSY);
120                         }
121                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
122                         DRM_UNLOCK();
123                 }
124                 break;
125         case _DRM_AGP:
126                 map->offset += dev->agp->base;
127                 map->mtrr   = dev->agp->mtrr; /* for getmap */
128                 break;
129         case _DRM_SCATTER_GATHER:
130                 if (!dev->sg) {
131                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
132                         return DRM_ERR(EINVAL);
133                 }
134                 map->offset = map->offset + dev->sg->handle;
135                 break;
136
137         default:
138                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
139                 return DRM_ERR(EINVAL);
140         }
141
142         list = drm_calloc(1, sizeof(*list), DRM_MEM_MAPS);
143         if (list == NULL) {
144                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
145                 return DRM_ERR(EINVAL);
146         }
147         list->map = map;
148
149         DRM_LOCK();
150         TAILQ_INSERT_TAIL(dev->maplist, list, link);
151         DRM_UNLOCK();
152
153         request.offset = map->offset;
154         request.size = map->size;
155         request.type = map->type;
156         request.flags = map->flags;
157         request.mtrr   = map->mtrr;
158         request.handle = map->handle;
159
160         if ( request.type != _DRM_SHM ) {
161                 request.handle = (void *)request.offset;
162         }
163
164         DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
165
166         return 0;
167 }
168
169
170 /* Remove a map private from list and deallocate resources if the mapping
171  * isn't in use.
172  */
173
174 int drm_rmmap(DRM_IOCTL_ARGS)
175 {
176         DRM_DEVICE;
177         drm_map_list_entry_t *list;
178         drm_local_map_t *map;
179         drm_map_t request;
180
181         DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
182
183         DRM_LOCK();
184         TAILQ_FOREACH(list, dev->maplist, link) {
185                 map = list->map;
186                 if (map->handle == request.handle &&
187                     map->flags & _DRM_REMOVABLE)
188                         break;
189         }
190
191         /* No match found. */
192         if (list == NULL) {
193                 DRM_UNLOCK();
194                 return DRM_ERR(EINVAL);
195         }
196         TAILQ_REMOVE(dev->maplist, list, link);
197         DRM_UNLOCK();
198
199         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
200
201         switch (map->type) {
202         case _DRM_REGISTERS:
203         case _DRM_FRAME_BUFFER:
204                 if (map->mtrr) {
205                         int __unused retcode;
206                         
207                         retcode = drm_mtrr_del(map->offset, map->size,
208                             DRM_MTRR_WC);
209                         DRM_DEBUG("mtrr_del = %d\n", retcode);
210                 }
211                 drm_ioremapfree(map);
212                 break;
213         case _DRM_SHM:
214                 drm_free(map->handle, map->size, DRM_MEM_SAREA);
215                 break;
216         case _DRM_AGP:
217         case _DRM_SCATTER_GATHER:
218                 break;
219         }
220         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
221         return 0;
222 }
223
224
225 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
226 {
227         int i;
228
229         if (entry->seg_count) {
230                 for (i = 0; i < entry->seg_count; i++) {
231                         if (entry->seglist[i] != 0)
232                                 drm_pci_free(dev, entry->buf_size,
233                                     (void *)entry->seglist[i],
234                                     entry->seglist_bus[i]);
235                 }
236                 drm_free(entry->seglist,
237                           entry->seg_count *
238                           sizeof(*entry->seglist),
239                           DRM_MEM_SEGS);
240                 drm_free(entry->seglist_bus, entry->seg_count *
241                           sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
242
243                 entry->seg_count = 0;
244         }
245
246         if (entry->buf_count) {
247                 for (i = 0; i < entry->buf_count; i++) {
248                         drm_free(entry->buflist[i].dev_private,
249                             entry->buflist[i].dev_priv_size, DRM_MEM_BUFS);
250                 }
251                 drm_free(entry->buflist,
252                           entry->buf_count *
253                           sizeof(*entry->buflist),
254                           DRM_MEM_BUFS);
255
256                 entry->buf_count = 0;
257         }
258 }
259
260 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
261 {
262         drm_device_dma_t *dma = dev->dma;
263         drm_buf_entry_t *entry;
264         drm_buf_t *buf;
265         unsigned long offset;
266         unsigned long agp_offset;
267         int count;
268         int order;
269         int size;
270         int alignment;
271         int page_order;
272         int total;
273         int byte_count;
274         int i;
275         drm_buf_t **temp_buflist;
276
277         count = request->count;
278         order = drm_order(request->size);
279         size = 1 << order;
280
281         alignment  = (request->flags & _DRM_PAGE_ALIGN)
282                 ? round_page(size) : size;
283         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
284         total = PAGE_SIZE << page_order;
285
286         byte_count = 0;
287         agp_offset = dev->agp->base + request->agp_start;
288
289         DRM_DEBUG( "count:      %d\n",  count );
290         DRM_DEBUG( "order:      %d\n",  order );
291         DRM_DEBUG( "size:       %d\n",  size );
292         DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
293         DRM_DEBUG( "alignment:  %d\n",  alignment );
294         DRM_DEBUG( "page_order: %d\n",  page_order );
295         DRM_DEBUG( "total:      %d\n",  total );
296
297         entry = &dma->bufs[order];
298
299         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
300                                    DRM_MEM_BUFS);
301         if ( !entry->buflist ) {
302                 return DRM_ERR(ENOMEM);
303         }
304         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
305
306         entry->buf_size = size;
307         entry->page_order = page_order;
308
309         offset = 0;
310
311         while ( entry->buf_count < count ) {
312                 buf          = &entry->buflist[entry->buf_count];
313                 buf->idx     = dma->buf_count + entry->buf_count;
314                 buf->total   = alignment;
315                 buf->order   = order;
316                 buf->used    = 0;
317
318                 buf->offset  = (dma->byte_count + offset);
319                 buf->bus_address = agp_offset + offset;
320                 buf->address = (void *)(agp_offset + offset);
321                 buf->next    = NULL;
322                 buf->pending = 0;
323                 buf->filp    = NULL;
324
325                 buf->dev_priv_size = dev->dev_priv_size;
326                 buf->dev_private = drm_calloc(1, buf->dev_priv_size,
327                     DRM_MEM_BUFS);
328                 if (buf->dev_private == NULL) {
329                         /* Set count correctly so we free the proper amount. */
330                         entry->buf_count = count;
331                         drm_cleanup_buf_error(dev, entry);
332                         return DRM_ERR(ENOMEM);
333                 }
334
335                 offset += alignment;
336                 entry->buf_count++;
337                 byte_count += PAGE_SIZE << page_order;
338         }
339
340         DRM_DEBUG( "byte_count: %d\n", byte_count );
341
342         temp_buflist = drm_realloc(dma->buflist,
343                                      dma->buf_count * sizeof(*dma->buflist),
344                                      (dma->buf_count + entry->buf_count)
345                                      * sizeof(*dma->buflist),
346                                      DRM_MEM_BUFS );
347         if (temp_buflist == NULL) {
348                 /* Free the entry because it isn't valid */
349                 drm_cleanup_buf_error(dev, entry);
350                 return DRM_ERR(ENOMEM);
351         }
352         dma->buflist = temp_buflist;
353
354         for ( i = 0 ; i < entry->buf_count ; i++ ) {
355                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
356         }
357
358         dma->buf_count += entry->buf_count;
359         dma->byte_count += byte_count;
360
361         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
362         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
363
364         request->count = entry->buf_count;
365         request->size = size;
366
367         dma->flags = _DRM_DMA_USE_AGP;
368
369         return 0;
370 }
371
372 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
373 {
374         drm_device_dma_t *dma = dev->dma;
375         int count;
376         int order;
377         int size;
378         int total;
379         int page_order;
380         drm_buf_entry_t *entry;
381         vm_offset_t vaddr;
382         drm_buf_t *buf;
383         int alignment;
384         unsigned long offset;
385         int i;
386         int byte_count;
387         int page_count;
388         unsigned long *temp_pagelist;
389         drm_buf_t **temp_buflist;
390         dma_addr_t bus_addr;
391
392         count = request->count;
393         order = drm_order(request->size);
394         size = 1 << order;
395
396         DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
397                    request->count, request->size, size, order );
398
399         alignment = (request->flags & _DRM_PAGE_ALIGN)
400                 ? round_page(size) : size;
401         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
402         total = PAGE_SIZE << page_order;
403
404         entry = &dma->bufs[order];
405
406         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
407             DRM_MEM_BUFS);
408         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
409             DRM_MEM_SEGS);
410         entry->seglist_bus = drm_alloc(count * sizeof(*entry->seglist_bus),
411             DRM_MEM_SEGS);
412
413         /* Keep the original pagelist until we know all the allocations
414          * have succeeded
415          */
416         temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) *
417             sizeof(*dma->pagelist), DRM_MEM_PAGES);
418
419         if (entry->buflist == NULL || entry->seglist == NULL || 
420             temp_pagelist == NULL) {
421                 drm_free(entry->buflist, count * sizeof(*entry->buflist),
422                     DRM_MEM_BUFS);
423                 drm_free(entry->seglist, count * sizeof(*entry->seglist),
424                     DRM_MEM_SEGS);
425                 drm_free(entry->seglist_bus, count *
426                     sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
427                 return DRM_ERR(ENOMEM);
428         }
429
430         bzero(entry->buflist, count * sizeof(*entry->buflist));
431         bzero(entry->seglist, count * sizeof(*entry->seglist));
432         
433         memcpy(temp_pagelist, dma->pagelist, dma->page_count * 
434             sizeof(*dma->pagelist));
435
436         DRM_DEBUG( "pagelist: %d entries\n",
437                    dma->page_count + (count << page_order) );
438
439         entry->buf_size = size;
440         entry->page_order = page_order;
441         byte_count = 0;
442         page_count = 0;
443
444         while ( entry->buf_count < count ) {
445                 vaddr = (vm_offset_t)drm_pci_alloc(dev, size, alignment,
446                     0xfffffffful, &bus_addr);
447                 if (vaddr == 0) {
448                         /* Set count correctly so we free the proper amount. */
449                         entry->buf_count = count;
450                         entry->seg_count = count;
451                         drm_cleanup_buf_error(dev, entry);
452                         drm_free(temp_pagelist, (dma->page_count +
453                             (count << page_order)) * sizeof(*dma->pagelist),
454                             DRM_MEM_PAGES);
455                         return DRM_ERR(ENOMEM);
456                 }
457         
458                 entry->seglist_bus[entry->seg_count] = bus_addr;
459                 entry->seglist[entry->seg_count++] = vaddr;
460                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
461                         DRM_DEBUG( "page %d @ 0x%08lx\n",
462                                    dma->page_count + page_count,
463                                    (long)vaddr + PAGE_SIZE * i );
464                         temp_pagelist[dma->page_count + page_count++] = 
465                             vaddr + PAGE_SIZE * i;
466                 }
467                 for ( offset = 0 ;
468                       offset + size <= total && entry->buf_count < count ;
469                       offset += alignment, ++entry->buf_count ) {
470                         buf          = &entry->buflist[entry->buf_count];
471                         buf->idx     = dma->buf_count + entry->buf_count;
472                         buf->total   = alignment;
473                         buf->order   = order;
474                         buf->used    = 0;
475                         buf->offset  = (dma->byte_count + byte_count + offset);
476                         buf->address = (void *)(vaddr + offset);
477                         buf->bus_address = bus_addr + offset;
478                         buf->next    = NULL;
479                         buf->pending = 0;
480                         buf->filp    = NULL;
481
482                         buf->dev_priv_size = dev->dev_priv_size;
483                         buf->dev_private = drm_alloc(buf->dev_priv_size,
484                             DRM_MEM_BUFS);
485                         if (buf->dev_private == NULL) {
486                                 /* Set count correctly so we free the proper amount. */
487                                 entry->buf_count = count;
488                                 entry->seg_count = count;
489                                 drm_cleanup_buf_error(dev, entry);
490                                 drm_free(temp_pagelist, (dma->page_count + 
491                                     (count << page_order)) *
492                                     sizeof(*dma->pagelist), DRM_MEM_PAGES );
493                                 return DRM_ERR(ENOMEM);
494                         }
495                         bzero(buf->dev_private, buf->dev_priv_size);
496
497                         DRM_DEBUG( "buffer %d @ %p\n",
498                                    entry->buf_count, buf->address );
499                 }
500                 byte_count += PAGE_SIZE << page_order;
501         }
502
503         temp_buflist = drm_realloc(dma->buflist,
504                                    dma->buf_count * sizeof(*dma->buflist),
505                                    (dma->buf_count + entry->buf_count)
506                                    * sizeof(*dma->buflist),
507                                    DRM_MEM_BUFS);
508         if (temp_buflist == NULL) {
509                 /* Free the entry because it isn't valid */
510                 drm_cleanup_buf_error(dev, entry);
511                 drm_free(temp_pagelist, (dma->page_count + 
512                     (count << page_order)) * sizeof(*dma->pagelist),
513                     DRM_MEM_PAGES);
514                 return DRM_ERR(ENOMEM);
515         }
516         dma->buflist = temp_buflist;
517
518         for ( i = 0 ; i < entry->buf_count ; i++ ) {
519                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
520         }
521
522         /* No allocations failed, so now we can replace the orginal pagelist
523          * with the new one.
524          */
525         drm_free(dma->pagelist, dma->page_count * sizeof(*dma->pagelist),
526             DRM_MEM_PAGES);
527         dma->pagelist = temp_pagelist;
528
529         dma->buf_count += entry->buf_count;
530         dma->seg_count += entry->seg_count;
531         dma->page_count += entry->seg_count << page_order;
532         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
533
534         request->count = entry->buf_count;
535         request->size = size;
536
537         return 0;
538
539 }
540
541 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
542 {
543         drm_device_dma_t *dma = dev->dma;
544         drm_buf_entry_t *entry;
545         drm_buf_t *buf;
546         unsigned long offset;
547         unsigned long agp_offset;
548         int count;
549         int order;
550         int size;
551         int alignment;
552         int page_order;
553         int total;
554         int byte_count;
555         int i;
556         drm_buf_t **temp_buflist;
557
558         count = request->count;
559         order = drm_order(request->size);
560         size = 1 << order;
561
562         alignment  = (request->flags & _DRM_PAGE_ALIGN)
563                 ? round_page(size) : size;
564         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
565         total = PAGE_SIZE << page_order;
566
567         byte_count = 0;
568         agp_offset = request->agp_start;
569
570         DRM_DEBUG( "count:      %d\n",  count );
571         DRM_DEBUG( "order:      %d\n",  order );
572         DRM_DEBUG( "size:       %d\n",  size );
573         DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
574         DRM_DEBUG( "alignment:  %d\n",  alignment );
575         DRM_DEBUG( "page_order: %d\n",  page_order );
576         DRM_DEBUG( "total:      %d\n",  total );
577
578         entry = &dma->bufs[order];
579
580         entry->buflist = drm_calloc(1, count * sizeof(*entry->buflist),
581             DRM_MEM_BUFS);
582         if (entry->buflist == NULL)
583                 return DRM_ERR(ENOMEM);
584
585         entry->buf_size = size;
586         entry->page_order = page_order;
587
588         offset = 0;
589
590         while ( entry->buf_count < count ) {
591                 buf          = &entry->buflist[entry->buf_count];
592                 buf->idx     = dma->buf_count + entry->buf_count;
593                 buf->total   = alignment;
594                 buf->order   = order;
595                 buf->used    = 0;
596
597                 buf->offset  = (dma->byte_count + offset);
598                 buf->bus_address = agp_offset + offset;
599                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
600                 buf->next    = NULL;
601                 buf->pending = 0;
602                 buf->filp    = NULL;
603
604                 buf->dev_priv_size = dev->dev_priv_size;
605                 buf->dev_private = drm_calloc(1, buf->dev_priv_size,
606                     DRM_MEM_BUFS);
607                 if (buf->dev_private == NULL) {
608                         /* Set count correctly so we free the proper amount. */
609                         entry->buf_count = count;
610                         drm_cleanup_buf_error(dev, entry);
611                         return DRM_ERR(ENOMEM);
612                 }
613
614                 DRM_DEBUG( "buffer %d @ %p\n",
615                            entry->buf_count, buf->address );
616
617                 offset += alignment;
618                 entry->buf_count++;
619                 byte_count += PAGE_SIZE << page_order;
620         }
621
622         DRM_DEBUG( "byte_count: %d\n", byte_count );
623
624         temp_buflist = drm_realloc(dma->buflist,
625                                      dma->buf_count * sizeof(*dma->buflist),
626                                      (dma->buf_count + entry->buf_count)
627                                      * sizeof(*dma->buflist),
628                                      DRM_MEM_BUFS );
629         if (temp_buflist == NULL) {
630                 /* Free the entry because it isn't valid */
631                 drm_cleanup_buf_error(dev, entry);
632                 return DRM_ERR(ENOMEM);
633         }
634         dma->buflist = temp_buflist;
635
636         for ( i = 0 ; i < entry->buf_count ; i++ ) {
637                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
638         }
639
640         dma->buf_count += entry->buf_count;
641         dma->byte_count += byte_count;
642
643         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
644         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
645
646         request->count = entry->buf_count;
647         request->size = size;
648
649         dma->flags = _DRM_DMA_USE_SG;
650
651         return 0;
652 }
653
654 int drm_addbufs(DRM_IOCTL_ARGS)
655 {
656         DRM_DEVICE;
657         drm_buf_desc_t request;
658         int err;
659         int order;
660
661         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
662
663         if (request.count < 0 || request.count > 4096)
664                 return DRM_ERR(EINVAL);
665
666         order = drm_order(request.size);
667         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
668                 return DRM_ERR(EINVAL);
669
670         DRM_SPINLOCK(&dev->dma_lock);
671         /* No more allocations after first buffer-using ioctl. */
672         if (dev->buf_use != 0) {
673                 DRM_SPINUNLOCK(&dev->dma_lock);
674                 return DRM_ERR(EBUSY);
675         }
676         /* No more than one allocation per order */
677         if (dev->dma->bufs[order].buf_count != 0) {
678                 DRM_SPINUNLOCK(&dev->dma_lock);
679                 return DRM_ERR(ENOMEM);
680         }
681
682         if ( request.flags & _DRM_AGP_BUFFER )
683                 err = drm_addbufs_agp(dev, &request);
684         else
685         if ( request.flags & _DRM_SG_BUFFER )
686                 err = drm_addbufs_sg(dev, &request);
687         else
688                 err = drm_addbufs_pci(dev, &request);
689         DRM_SPINUNLOCK(&dev->dma_lock);
690
691         DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
692
693         return err;
694 }
695
696 int drm_infobufs(DRM_IOCTL_ARGS)
697 {
698         DRM_DEVICE;
699         drm_device_dma_t *dma = dev->dma;
700         drm_buf_info_t request;
701         int i;
702         int count;
703         int retcode = 0;
704
705         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
706
707         DRM_SPINLOCK(&dev->dma_lock);
708         ++dev->buf_use;         /* Can't allocate more after this call */
709         DRM_SPINUNLOCK(&dev->dma_lock);
710
711         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
712                 if ( dma->bufs[i].buf_count ) ++count;
713         }
714
715         DRM_DEBUG( "count = %d\n", count );
716
717         if ( request.count >= count ) {
718                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
719                         if ( dma->bufs[i].buf_count ) {
720                                 drm_buf_desc_t from;
721
722                                 from.count = dma->bufs[i].buf_count;
723                                 from.size = dma->bufs[i].buf_size;
724                                 from.low_mark = dma->bufs[i].freelist.low_mark;
725                                 from.high_mark = dma->bufs[i].freelist.high_mark;
726
727                                 if (DRM_COPY_TO_USER(&request.list[count], &from,
728                                     sizeof(drm_buf_desc_t)) != 0) {
729                                         retcode = DRM_ERR(EFAULT);
730                                         break;
731                                 }
732
733                                 DRM_DEBUG( "%d %d %d %d %d\n",
734                                            i,
735                                            dma->bufs[i].buf_count,
736                                            dma->bufs[i].buf_size,
737                                            dma->bufs[i].freelist.low_mark,
738                                            dma->bufs[i].freelist.high_mark );
739                                 ++count;
740                         }
741                 }
742         }
743         request.count = count;
744
745         DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
746
747         return retcode;
748 }
749
750 int drm_markbufs(DRM_IOCTL_ARGS)
751 {
752         DRM_DEVICE;
753         drm_device_dma_t *dma = dev->dma;
754         drm_buf_desc_t request;
755         int order;
756
757         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
758
759         DRM_DEBUG( "%d, %d, %d\n",
760                    request.size, request.low_mark, request.high_mark );
761         
762
763         order = drm_order(request.size);        
764         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
765             request.low_mark < 0 || request.high_mark < 0) {
766                 return DRM_ERR(EINVAL);
767         }
768
769         DRM_SPINLOCK(&dev->dma_lock);
770         if (request.low_mark > dma->bufs[order].buf_count ||
771             request.high_mark > dma->bufs[order].buf_count) {
772                 return DRM_ERR(EINVAL);
773         }
774
775         dma->bufs[order].freelist.low_mark  = request.low_mark;
776         dma->bufs[order].freelist.high_mark = request.high_mark;
777         DRM_SPINUNLOCK(&dev->dma_lock);
778
779         return 0;
780 }
781
782 int drm_freebufs(DRM_IOCTL_ARGS)
783 {
784         DRM_DEVICE;
785         drm_device_dma_t *dma = dev->dma;
786         drm_buf_free_t request;
787         int i;
788         int idx;
789         drm_buf_t *buf;
790         int retcode = 0;
791
792         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
793
794         DRM_DEBUG( "%d\n", request.count );
795         
796         DRM_SPINLOCK(&dev->dma_lock);
797         for ( i = 0 ; i < request.count ; i++ ) {
798                 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
799                         retcode = DRM_ERR(EFAULT);
800                         break;
801                 }
802                 if ( idx < 0 || idx >= dma->buf_count ) {
803                         DRM_ERROR( "Index %d (of %d max)\n",
804                                    idx, dma->buf_count - 1 );
805                         retcode = DRM_ERR(EINVAL);
806                         break;
807                 }
808                 buf = dma->buflist[idx];
809                 if ( buf->filp != filp ) {
810                         DRM_ERROR("Process %d freeing buffer not owned\n",
811                                    DRM_CURRENTPID);
812                         retcode = DRM_ERR(EINVAL);
813                         break;
814                 }
815                 drm_free_buffer(dev, buf);
816         }
817         DRM_SPINUNLOCK(&dev->dma_lock);
818
819         return retcode;
820 }
821
822 int drm_mapbufs(DRM_IOCTL_ARGS)
823 {
824         DRM_DEVICE;
825         drm_device_dma_t *dma = dev->dma;
826         int retcode = 0;
827         const int zero = 0;
828         vm_offset_t address;
829         struct vmspace *vms;
830 #ifdef __FreeBSD__
831         vm_ooffset_t foff;
832         vm_size_t size;
833         vm_offset_t vaddr;
834 #endif /* __FreeBSD__ */
835 #ifdef __NetBSD__
836         struct vnode *vn;
837         vm_size_t size;
838         vaddr_t vaddr;
839 #endif /* __NetBSD__ */
840
841         drm_buf_map_t request;
842         int i;
843
844         DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
845
846 #ifdef __NetBSD__
847         if (!vfinddev(kdev, VCHR, &vn))
848                 return 0;       /* FIXME: Shouldn't this be EINVAL or something? */
849 #endif /* __NetBSD__ */
850
851 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
852         vms = p->td_proc->p_vmspace;
853 #else
854         vms = p->p_vmspace;
855 #endif
856
857         DRM_SPINLOCK(&dev->dma_lock);
858         dev->buf_use++;         /* Can't allocate more after this call */
859         DRM_SPINUNLOCK(&dev->dma_lock);
860
861         if (request.count < dma->buf_count)
862                 goto done;
863
864         if ((dev->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
865             (dev->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
866                 drm_local_map_t *map = dev->agp_buffer_map;
867
868                 if (map == NULL) {
869                         retcode = EINVAL;
870                         goto done;
871                 }
872                 size = round_page(map->size);
873                 foff = map->offset;
874         } else {
875                 size = round_page(dma->byte_count),
876                 foff = 0;
877         }
878
879 #ifdef __FreeBSD__
880         vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
881         retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
882             VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
883 #elif defined(__NetBSD__)
884         vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
885         retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
886             UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
887             &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
888 #endif /* __NetBSD__ */
889         if (retcode)
890                 goto done;
891
892         request.virtual = (void *)vaddr;
893
894         for ( i = 0 ; i < dma->buf_count ; i++ ) {
895                 if (DRM_COPY_TO_USER(&request.list[i].idx,
896                     &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
897                         retcode = EFAULT;
898                         goto done;
899                 }
900                 if (DRM_COPY_TO_USER(&request.list[i].total,
901                     &dma->buflist[i]->total, sizeof(request.list[0].total))) {
902                         retcode = EFAULT;
903                         goto done;
904                 }
905                 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
906                     sizeof(zero))) {
907                         retcode = EFAULT;
908                         goto done;
909                 }
910                 address = vaddr + dma->buflist[i]->offset; /* *** */
911                 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
912                     sizeof(address))) {
913                         retcode = EFAULT;
914                         goto done;
915                 }
916         }
917
918  done:
919         request.count = dma->buf_count;
920
921         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
922
923         DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
924
925         return DRM_ERR(retcode);
926 }