1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
30 * $FreeBSD: src/sys/dev/drm/drm_bufs.h,v 1.4 2003/03/09 02:08:28 anholt Exp $
35 #ifndef __HAVE_PCI_DMA
36 #define __HAVE_PCI_DMA 0
43 #ifndef DRIVER_BUF_PRIV_T
44 #define DRIVER_BUF_PRIV_T u32
46 #ifndef DRIVER_AGP_BUFFERS_MAP
47 #if __HAVE_AGP && __HAVE_DMA
48 #error "You must define DRIVER_AGP_BUFFERS_MAP()"
50 #define DRIVER_AGP_BUFFERS_MAP( dev ) NULL
55 * Compute order. Can be made faster.
57 int DRM(order)( unsigned long size )
62 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
64 if ( size & ~(1 << order) )
70 int DRM(addmap)( DRM_IOCTL_ARGS )
75 drm_map_list_entry_t *list;
77 if (!(dev->flags & (FREAD|FWRITE)))
78 return DRM_ERR(EACCES); /* Require read/write */
80 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
82 map = (drm_local_map_t *) DRM(alloc)( sizeof(*map), DRM_MEM_MAPS );
84 return DRM_ERR(ENOMEM);
86 map->offset = request.offset;
87 map->size = request.size;
88 map->type = request.type;
89 map->flags = request.flags;
93 /* Only allow shared memory to be removable since we only keep enough
94 * book keeping information about shared memory to allow for removal
95 * when processes fork.
97 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
98 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
99 return DRM_ERR(EINVAL);
101 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
102 map->offset, map->size, map->type );
103 if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
104 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
105 return DRM_ERR(EINVAL);
108 switch ( map->type ) {
110 case _DRM_FRAME_BUFFER:
111 if ( map->offset + map->size < map->offset ) {
112 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
113 return DRM_ERR(EINVAL);
115 #if __REALLY_HAVE_MTRR
116 if ( map->type == _DRM_FRAME_BUFFER ||
117 (map->flags & _DRM_WRITE_COMBINING) ) {
119 int retcode = 0, act;
120 struct mem_range_desc mrdesc;
121 mrdesc.mr_base = map->offset;
122 mrdesc.mr_len = map->size;
123 mrdesc.mr_flags = MDF_WRITECOMBINE;
124 act = MEMRANGE_SET_UPDATE;
125 bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
126 retcode = mem_range_attr_set(&mrdesc, &act);
128 #elif defined __NetBSD__
131 mtrrmap.base = map->offset;
132 mtrrmap.len = map->size;
133 mtrrmap.type = MTRR_TYPE_WC;
134 mtrrmap.flags = MTRR_VALID;
135 map->mtrr = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL );
138 #endif /* __REALLY_HAVE_MTRR */
143 map->handle = (void *)DRM(alloc)(map->size, DRM_MEM_SAREA);
144 DRM_DEBUG( "%ld %d %p\n",
145 map->size, DRM(order)( map->size ), map->handle );
146 if ( !map->handle ) {
147 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
148 return DRM_ERR(ENOMEM);
150 map->offset = (unsigned long)map->handle;
151 if ( map->flags & _DRM_CONTAINS_LOCK ) {
152 dev->lock.hw_lock = map->handle; /* Pointer to lock */
155 #if __REALLY_HAVE_AGP
157 map->offset += dev->agp->base;
158 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
161 case _DRM_SCATTER_GATHER:
163 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
164 return DRM_ERR(EINVAL);
166 map->offset = map->offset + dev->sg->handle;
170 DRM(free)( map, sizeof(*map), DRM_MEM_MAPS );
171 return DRM_ERR(EINVAL);
174 list = DRM(alloc)(sizeof(*list), DRM_MEM_MAPS);
176 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
177 return DRM_ERR(EINVAL);
179 memset(list, 0, sizeof(*list));
183 TAILQ_INSERT_TAIL(dev->maplist, list, link);
186 request.offset = map->offset;
187 request.size = map->size;
188 request.type = map->type;
189 request.flags = map->flags;
190 request.mtrr = map->mtrr;
191 request.handle = map->handle;
193 if ( request.type != _DRM_SHM ) {
194 request.handle = (void *)request.offset;
197 DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
203 /* Remove a map private from list and deallocate resources if the mapping
207 int DRM(rmmap)( DRM_IOCTL_ARGS )
210 drm_map_list_entry_t *list;
211 drm_local_map_t *map;
215 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
218 TAILQ_FOREACH(list, dev->maplist, link) {
220 if(map->handle == request.handle &&
221 map->flags & _DRM_REMOVABLE) break;
224 /* List has wrapped around to the head pointer, or its empty we didn't
229 return DRM_ERR(EINVAL);
231 TAILQ_REMOVE(dev->maplist, list, link);
232 DRM(free)(list, sizeof(*list), DRM_MEM_MAPS);
238 case _DRM_FRAME_BUFFER:
239 #if __REALLY_HAVE_MTRR
240 if (map->mtrr >= 0) {
244 struct mem_range_desc mrdesc;
245 mrdesc.mr_base = map->offset;
246 mrdesc.mr_len = map->size;
247 mrdesc.mr_flags = MDF_WRITECOMBINE;
248 act = MEMRANGE_SET_REMOVE;
249 bcopy(DRIVER_NAME, &mrdesc.mr_owner, strlen(DRIVER_NAME));
250 retcode = mem_range_attr_set(&mrdesc, &act);
251 #elif defined __NetBSD__
254 mtrrmap.base = map->offset;
255 mtrrmap.len = map->size;
258 mtrrmap.owner = p->p_pid;
259 retcode = mtrr_set( &mtrrmap, &one, p, MTRR_GETSET_KERNEL);
260 DRM_DEBUG("mtrr_del = %d\n", retcode);
264 DRM(ioremapfree)( map );
267 DRM(free)( map->handle, map->size, DRM_MEM_SAREA );
270 case _DRM_SCATTER_GATHER:
273 DRM(free)(map, sizeof(*map), DRM_MEM_MAPS);
282 static void DRM(cleanup_buf_error)(drm_buf_entry_t *entry)
286 if (entry->seg_count) {
287 for (i = 0; i < entry->seg_count; i++) {
288 DRM(free)((void *)entry->seglist[i],
292 DRM(free)(entry->seglist,
294 sizeof(*entry->seglist),
297 entry->seg_count = 0;
300 if(entry->buf_count) {
301 for(i = 0; i < entry->buf_count; i++) {
302 if(entry->buflist[i].dev_private) {
303 DRM(free)(entry->buflist[i].dev_private,
304 entry->buflist[i].dev_priv_size,
308 DRM(free)(entry->buflist,
310 sizeof(*entry->buflist),
313 #if __HAVE_DMA_FREELIST
314 DRM(freelist_destroy)(&entry->freelist);
317 entry->buf_count = 0;
321 #if __REALLY_HAVE_AGP
322 int DRM(addbufs_agp)( DRM_IOCTL_ARGS )
325 drm_device_dma_t *dma = dev->dma;
326 drm_buf_desc_t request;
327 drm_buf_entry_t *entry;
329 unsigned long offset;
330 unsigned long agp_offset;
339 drm_buf_t **temp_buflist;
341 if ( !dma ) return DRM_ERR(EINVAL);
343 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
345 count = request.count;
346 order = DRM(order)( request.size );
349 alignment = (request.flags & _DRM_PAGE_ALIGN)
350 ? round_page(size) : size;
351 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
352 total = PAGE_SIZE << page_order;
355 agp_offset = dev->agp->base + request.agp_start;
357 DRM_DEBUG( "count: %d\n", count );
358 DRM_DEBUG( "order: %d\n", order );
359 DRM_DEBUG( "size: %d\n", size );
360 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
361 DRM_DEBUG( "alignment: %d\n", alignment );
362 DRM_DEBUG( "page_order: %d\n", page_order );
363 DRM_DEBUG( "total: %d\n", total );
365 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
366 return DRM_ERR(EINVAL);
367 if ( dev->queue_count )
368 return DRM_ERR(EBUSY); /* Not while in use */
370 DRM_SPINLOCK( &dev->count_lock );
371 if ( dev->buf_use ) {
372 DRM_SPINUNLOCK( &dev->count_lock );
373 return DRM_ERR(EBUSY);
375 atomic_inc( &dev->buf_alloc );
376 DRM_SPINUNLOCK( &dev->count_lock );
379 entry = &dma->bufs[order];
380 if ( entry->buf_count ) {
382 atomic_dec( &dev->buf_alloc );
383 return DRM_ERR(ENOMEM); /* May only call once for each order */
386 if (count < 0 || count > 4096) {
388 atomic_dec( &dev->buf_alloc );
389 return DRM_ERR(EINVAL);
392 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
394 if ( !entry->buflist ) {
396 atomic_dec( &dev->buf_alloc );
397 return DRM_ERR(ENOMEM);
399 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
401 entry->buf_size = size;
402 entry->page_order = page_order;
406 while ( entry->buf_count < count ) {
407 buf = &entry->buflist[entry->buf_count];
408 buf->idx = dma->buf_count + entry->buf_count;
409 buf->total = alignment;
413 buf->offset = (dma->byte_count + offset);
414 buf->bus_address = agp_offset + offset;
415 buf->address = (void *)(agp_offset + offset);
422 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
423 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
425 if(!buf->dev_private) {
426 /* Set count correctly so we free the proper amount. */
427 entry->buf_count = count;
428 DRM(cleanup_buf_error)(entry);
430 memset( buf->dev_private, 0, buf->dev_priv_size );
432 #if __HAVE_DMA_HISTOGRAM
433 buf->time_queued = 0;
434 buf->time_dispatched = 0;
435 buf->time_completed = 0;
441 byte_count += PAGE_SIZE << page_order;
444 DRM_DEBUG( "byte_count: %d\n", byte_count );
446 temp_buflist = DRM(realloc)( dma->buflist,
447 dma->buf_count * sizeof(*dma->buflist),
448 (dma->buf_count + entry->buf_count)
449 * sizeof(*dma->buflist),
452 /* Free the entry because it isn't valid */
453 DRM(cleanup_buf_error)(entry);
455 atomic_dec( &dev->buf_alloc );
456 return DRM_ERR(ENOMEM);
458 dma->buflist = temp_buflist;
460 for ( i = 0 ; i < entry->buf_count ; i++ ) {
461 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
464 dma->buf_count += entry->buf_count;
465 dma->byte_count += byte_count;
467 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
468 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
470 #if __HAVE_DMA_FREELIST
471 DRM(freelist_create)( &entry->freelist, entry->buf_count );
472 for ( i = 0 ; i < entry->buf_count ; i++ ) {
473 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
478 request.count = entry->buf_count;
481 DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
483 dma->flags = _DRM_DMA_USE_AGP;
485 atomic_dec( &dev->buf_alloc );
488 #endif /* __REALLY_HAVE_AGP */
491 int DRM(addbufs_pci)( DRM_IOCTL_ARGS )
494 drm_device_dma_t *dma = dev->dma;
495 drm_buf_desc_t request;
501 drm_buf_entry_t *entry;
505 unsigned long offset;
509 unsigned long *temp_pagelist;
510 drm_buf_t **temp_buflist;
512 if ( !dma ) return DRM_ERR(EINVAL);
514 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
516 count = request.count;
517 order = DRM(order)( request.size );
520 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
521 request.count, request.size, size,
522 order, dev->queue_count );
524 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
525 return DRM_ERR(EINVAL);
526 if ( dev->queue_count )
527 return DRM_ERR(EBUSY); /* Not while in use */
529 alignment = (request.flags & _DRM_PAGE_ALIGN)
530 ? round_page(size) : size;
531 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
532 total = PAGE_SIZE << page_order;
534 DRM_SPINLOCK( &dev->count_lock );
535 if ( dev->buf_use ) {
536 DRM_SPINUNLOCK( &dev->count_lock );
537 return DRM_ERR(EBUSY);
539 atomic_inc( &dev->buf_alloc );
540 DRM_SPINUNLOCK( &dev->count_lock );
543 entry = &dma->bufs[order];
544 if ( entry->buf_count ) {
546 atomic_dec( &dev->buf_alloc );
547 return DRM_ERR(ENOMEM); /* May only call once for each order */
550 if (count < 0 || count > 4096) {
552 atomic_dec( &dev->buf_alloc );
553 return DRM_ERR(EINVAL);
556 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
558 if ( !entry->buflist ) {
560 atomic_dec( &dev->buf_alloc );
561 return DRM_ERR(ENOMEM);
563 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
565 entry->seglist = DRM(alloc)( count * sizeof(*entry->seglist),
567 if ( !entry->seglist ) {
568 DRM(free)( entry->buflist,
569 count * sizeof(*entry->buflist),
572 atomic_dec( &dev->buf_alloc );
573 return DRM_ERR(ENOMEM);
575 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
577 temp_pagelist = DRM(realloc)( dma->pagelist,
578 dma->page_count * sizeof(*dma->pagelist),
579 (dma->page_count + (count << page_order))
580 * sizeof(*dma->pagelist),
583 DRM(free)( entry->buflist,
584 count * sizeof(*entry->buflist),
586 DRM(free)( entry->seglist,
587 count * sizeof(*entry->seglist),
590 atomic_dec( &dev->buf_alloc );
591 return DRM_ERR(ENOMEM);
594 dma->pagelist = temp_pagelist;
595 DRM_DEBUG( "pagelist: %d entries\n",
596 dma->page_count + (count << page_order) );
598 entry->buf_size = size;
599 entry->page_order = page_order;
603 while ( entry->buf_count < count ) {
604 page = (unsigned long)DRM(alloc)( size, DRM_MEM_DMA );
606 entry->seglist[entry->seg_count++] = page;
607 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
608 DRM_DEBUG( "page %d @ 0x%08lx\n",
609 dma->page_count + page_count,
610 page + PAGE_SIZE * i );
611 dma->pagelist[dma->page_count + page_count++]
612 = page + PAGE_SIZE * i;
615 offset + size <= total && entry->buf_count < count ;
616 offset += alignment, ++entry->buf_count ) {
617 buf = &entry->buflist[entry->buf_count];
618 buf->idx = dma->buf_count + entry->buf_count;
619 buf->total = alignment;
622 buf->offset = (dma->byte_count + byte_count + offset);
623 buf->address = (void *)(page + offset);
629 #if __HAVE_DMA_HISTOGRAM
630 buf->time_queued = 0;
631 buf->time_dispatched = 0;
632 buf->time_completed = 0;
635 DRM_DEBUG( "buffer %d @ %p\n",
636 entry->buf_count, buf->address );
638 byte_count += PAGE_SIZE << page_order;
641 temp_buflist = DRM(realloc)( dma->buflist,
642 dma->buf_count * sizeof(*dma->buflist),
643 (dma->buf_count + entry->buf_count)
644 * sizeof(*dma->buflist),
647 /* Free the entry because it isn't valid */
648 DRM(cleanup_buf_error)(entry);
650 atomic_dec( &dev->buf_alloc );
651 return DRM_ERR(ENOMEM);
653 dma->buflist = temp_buflist;
655 for ( i = 0 ; i < entry->buf_count ; i++ ) {
656 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
659 dma->buf_count += entry->buf_count;
660 dma->seg_count += entry->seg_count;
661 dma->page_count += entry->seg_count << page_order;
662 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
664 #if __HAVE_DMA_FREELIST
665 DRM(freelist_create)( &entry->freelist, entry->buf_count );
666 for ( i = 0 ; i < entry->buf_count ; i++ ) {
667 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
672 request.count = entry->buf_count;
675 DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
677 atomic_dec( &dev->buf_alloc );
681 #endif /* __HAVE_PCI_DMA */
684 int DRM(addbufs_sg)( DRM_IOCTL_ARGS )
687 drm_device_dma_t *dma = dev->dma;
688 drm_buf_desc_t request;
689 drm_buf_entry_t *entry;
691 unsigned long offset;
692 unsigned long agp_offset;
701 drm_buf_t **temp_buflist;
703 if ( !dma ) return DRM_ERR(EINVAL);
705 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
707 count = request.count;
708 order = DRM(order)( request.size );
711 alignment = (request.flags & _DRM_PAGE_ALIGN)
712 ? round_page(size) : size;
713 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
714 total = PAGE_SIZE << page_order;
717 agp_offset = request.agp_start;
719 DRM_DEBUG( "count: %d\n", count );
720 DRM_DEBUG( "order: %d\n", order );
721 DRM_DEBUG( "size: %d\n", size );
722 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
723 DRM_DEBUG( "alignment: %d\n", alignment );
724 DRM_DEBUG( "page_order: %d\n", page_order );
725 DRM_DEBUG( "total: %d\n", total );
727 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
728 return DRM_ERR(EINVAL);
729 if ( dev->queue_count ) return DRM_ERR(EBUSY); /* Not while in use */
731 DRM_SPINLOCK( &dev->count_lock );
732 if ( dev->buf_use ) {
733 DRM_SPINUNLOCK( &dev->count_lock );
734 return DRM_ERR(EBUSY);
736 atomic_inc( &dev->buf_alloc );
737 DRM_SPINUNLOCK( &dev->count_lock );
740 entry = &dma->bufs[order];
741 if ( entry->buf_count ) {
743 atomic_dec( &dev->buf_alloc );
744 return DRM_ERR(ENOMEM); /* May only call once for each order */
747 if (count < 0 || count > 4096) {
749 atomic_dec( &dev->buf_alloc );
750 return DRM_ERR(EINVAL);
753 entry->buflist = DRM(alloc)( count * sizeof(*entry->buflist),
755 if ( !entry->buflist ) {
757 atomic_dec( &dev->buf_alloc );
758 return DRM_ERR(ENOMEM);
760 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
762 entry->buf_size = size;
763 entry->page_order = page_order;
767 while ( entry->buf_count < count ) {
768 buf = &entry->buflist[entry->buf_count];
769 buf->idx = dma->buf_count + entry->buf_count;
770 buf->total = alignment;
774 buf->offset = (dma->byte_count + offset);
775 buf->bus_address = agp_offset + offset;
776 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
783 buf->dev_priv_size = sizeof(DRIVER_BUF_PRIV_T);
784 buf->dev_private = DRM(alloc)( sizeof(DRIVER_BUF_PRIV_T),
786 if(!buf->dev_private) {
787 /* Set count correctly so we free the proper amount. */
788 entry->buf_count = count;
789 DRM(cleanup_buf_error)(entry);
791 atomic_dec( &dev->buf_alloc );
792 return DRM_ERR(ENOMEM);
795 memset( buf->dev_private, 0, buf->dev_priv_size );
797 # if __HAVE_DMA_HISTOGRAM
798 buf->time_queued = 0;
799 buf->time_dispatched = 0;
800 buf->time_completed = 0;
803 DRM_DEBUG( "buffer %d @ %p\n",
804 entry->buf_count, buf->address );
808 byte_count += PAGE_SIZE << page_order;
811 DRM_DEBUG( "byte_count: %d\n", byte_count );
813 temp_buflist = DRM(realloc)( dma->buflist,
814 dma->buf_count * sizeof(*dma->buflist),
815 (dma->buf_count + entry->buf_count)
816 * sizeof(*dma->buflist),
819 /* Free the entry because it isn't valid */
820 DRM(cleanup_buf_error)(entry);
822 atomic_dec( &dev->buf_alloc );
823 return DRM_ERR(ENOMEM);
825 dma->buflist = temp_buflist;
827 for ( i = 0 ; i < entry->buf_count ; i++ ) {
828 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
831 dma->buf_count += entry->buf_count;
832 dma->byte_count += byte_count;
834 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
835 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
837 #if __HAVE_DMA_FREELIST
838 DRM(freelist_create)( &entry->freelist, entry->buf_count );
839 for ( i = 0 ; i < entry->buf_count ; i++ ) {
840 DRM(freelist_put)( dev, &entry->freelist, &entry->buflist[i] );
845 request.count = entry->buf_count;
848 DRM_COPY_TO_USER_IOCTL( (drm_buf_desc_t *)data, request, sizeof(request) );
850 dma->flags = _DRM_DMA_USE_SG;
852 atomic_dec( &dev->buf_alloc );
855 #endif /* __REALLY_HAVE_SG */
857 int DRM(addbufs)( DRM_IOCTL_ARGS )
859 drm_buf_desc_t request;
861 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
863 #if __REALLY_HAVE_AGP
864 if ( request.flags & _DRM_AGP_BUFFER )
865 return DRM(addbufs_agp)( kdev, cmd, data, flags, p );
869 if ( request.flags & _DRM_SG_BUFFER )
870 return DRM(addbufs_sg)( kdev, cmd, data, flags, p );
874 return DRM(addbufs_pci)( kdev, cmd, data, flags, p );
876 return DRM_ERR(EINVAL);
880 int DRM(infobufs)( DRM_IOCTL_ARGS )
883 drm_device_dma_t *dma = dev->dma;
884 drm_buf_info_t request;
888 if ( !dma ) return DRM_ERR(EINVAL);
890 DRM_SPINLOCK( &dev->count_lock );
891 if ( atomic_read( &dev->buf_alloc ) ) {
892 DRM_SPINUNLOCK( &dev->count_lock );
893 return DRM_ERR(EBUSY);
895 ++dev->buf_use; /* Can't allocate more after this call */
896 DRM_SPINUNLOCK( &dev->count_lock );
898 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
900 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
901 if ( dma->bufs[i].buf_count ) ++count;
904 DRM_DEBUG( "count = %d\n", count );
906 if ( request.count >= count ) {
907 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
908 if ( dma->bufs[i].buf_count ) {
909 drm_buf_desc_t *to = &request.list[count];
910 drm_buf_entry_t *from = &dma->bufs[i];
911 drm_freelist_t *list = &dma->bufs[i].freelist;
912 if ( DRM_COPY_TO_USER( &to->count,
914 sizeof(from->buf_count) ) ||
915 DRM_COPY_TO_USER( &to->size,
917 sizeof(from->buf_size) ) ||
918 DRM_COPY_TO_USER( &to->low_mark,
920 sizeof(list->low_mark) ) ||
921 DRM_COPY_TO_USER( &to->high_mark,
923 sizeof(list->high_mark) ) )
924 return DRM_ERR(EFAULT);
926 DRM_DEBUG( "%d %d %d %d %d\n",
928 dma->bufs[i].buf_count,
929 dma->bufs[i].buf_size,
930 dma->bufs[i].freelist.low_mark,
931 dma->bufs[i].freelist.high_mark );
936 request.count = count;
938 DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
943 int DRM(markbufs)( DRM_IOCTL_ARGS )
946 drm_device_dma_t *dma = dev->dma;
947 drm_buf_desc_t request;
949 drm_buf_entry_t *entry;
951 if ( !dma ) return DRM_ERR(EINVAL);
953 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
955 DRM_DEBUG( "%d, %d, %d\n",
956 request.size, request.low_mark, request.high_mark );
957 order = DRM(order)( request.size );
958 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER )
959 return DRM_ERR(EINVAL);
960 entry = &dma->bufs[order];
962 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
963 return DRM_ERR(EINVAL);
964 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
965 return DRM_ERR(EINVAL);
967 entry->freelist.low_mark = request.low_mark;
968 entry->freelist.high_mark = request.high_mark;
973 int DRM(freebufs)( DRM_IOCTL_ARGS )
976 drm_device_dma_t *dma = dev->dma;
977 drm_buf_free_t request;
982 if ( !dma ) return DRM_ERR(EINVAL);
984 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
986 DRM_DEBUG( "%d\n", request.count );
987 for ( i = 0 ; i < request.count ; i++ ) {
988 if ( DRM_COPY_FROM_USER( &idx,
991 return DRM_ERR(EFAULT);
992 if ( idx < 0 || idx >= dma->buf_count ) {
993 DRM_ERROR( "Index %d (of %d max)\n",
994 idx, dma->buf_count - 1 );
995 return DRM_ERR(EINVAL);
997 buf = dma->buflist[idx];
998 if ( buf->pid != DRM_CURRENTPID ) {
999 DRM_ERROR( "Process %d freeing buffer owned by %d\n",
1000 DRM_CURRENTPID, buf->pid );
1001 return DRM_ERR(EINVAL);
1003 DRM(free_buffer)( dev, buf );
1009 int DRM(mapbufs)( DRM_IOCTL_ARGS )
1012 drm_device_dma_t *dma = dev->dma;
1015 vm_offset_t virtual, address;
1017 #if __FreeBSD_version >= 500000
1018 struct vmspace *vms = p->td_proc->p_vmspace;
1020 struct vmspace *vms = p->p_vmspace;
1022 #endif /* __FreeBSD__ */
1025 struct vmspace *vms = p->p_vmspace;
1026 #endif /* __NetBSD__ */
1028 drm_buf_map_t request;
1031 if ( !dma ) return DRM_ERR(EINVAL);
1033 DRM_SPINLOCK( &dev->count_lock );
1034 if ( atomic_read( &dev->buf_alloc ) ) {
1035 DRM_SPINUNLOCK( &dev->count_lock );
1036 return DRM_ERR(EBUSY);
1038 dev->buf_use++; /* Can't allocate more after this call */
1039 DRM_SPINUNLOCK( &dev->count_lock );
1041 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
1044 if(!vfinddev(kdev, VCHR, &vn))
1045 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1046 #endif /* __NetBSD__ */
1048 if ( request.count >= dma->buf_count ) {
1049 if ( (__HAVE_AGP && (dma->flags & _DRM_DMA_USE_AGP)) ||
1050 (__HAVE_SG && (dma->flags & _DRM_DMA_USE_SG)) ) {
1051 drm_local_map_t *map = DRIVER_AGP_BUFFERS_MAP( dev );
1059 virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1060 retcode = vm_mmap(&vms->vm_map,
1062 round_page(map->size),
1063 PROT_READ|PROT_WRITE, VM_PROT_ALL,
1065 SLIST_FIRST(&kdev->si_hlist),
1066 (unsigned long)map->offset );
1067 #elif defined(__NetBSD__)
1068 virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1069 retcode = uvm_mmap(&vms->vm_map,
1070 (vaddr_t *)&virtual,
1071 round_page(map->size),
1072 UVM_PROT_READ | UVM_PROT_WRITE,
1073 UVM_PROT_ALL, MAP_SHARED,
1074 &vn->v_uobj, map->offset,
1075 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1076 #endif /* __NetBSD__ */
1079 virtual = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
1080 retcode = vm_mmap(&vms->vm_map,
1082 round_page(dma->byte_count),
1083 PROT_READ|PROT_WRITE, VM_PROT_ALL,
1085 SLIST_FIRST(&kdev->si_hlist),
1087 #elif defined(__NetBSD__)
1088 virtual = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
1089 retcode = uvm_mmap(&vms->vm_map,
1090 (vaddr_t *)&virtual,
1091 round_page(dma->byte_count),
1092 UVM_PROT_READ | UVM_PROT_WRITE,
1093 UVM_PROT_ALL, MAP_SHARED,
1095 p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
1096 #endif /* __NetBSD__ */
1100 request.virtual = (void *)virtual;
1102 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1103 if ( DRM_COPY_TO_USER( &request.list[i].idx,
1104 &dma->buflist[i]->idx,
1105 sizeof(request.list[0].idx) ) ) {
1109 if ( DRM_COPY_TO_USER( &request.list[i].total,
1110 &dma->buflist[i]->total,
1111 sizeof(request.list[0].total) ) ) {
1115 if ( DRM_COPY_TO_USER( &request.list[i].used,
1121 address = virtual + dma->buflist[i]->offset; /* *** */
1122 if ( DRM_COPY_TO_USER( &request.list[i].address,
1124 sizeof(address) ) ) {
1131 request.count = dma->buf_count;
1133 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1135 DRM_COPY_TO_USER_IOCTL( (drm_buf_map_t *)data, request, sizeof(request) );
1137 return DRM_ERR(retcode);
1140 #endif /* __HAVE_DMA */