1 /* drm_bufs.h -- Generic buffer template -*- linux-c -*-
2 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
4 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
29 * Gareth Hughes <gareth@valinux.com>
36 * Compute order. Can be made faster.
38 int drm_order(unsigned long size)
43 for ( order = 0, tmp = size ; tmp >>= 1 ; ++order );
45 if ( size & ~(1 << order) )
51 int drm_addmap(DRM_IOCTL_ARGS)
56 drm_map_list_entry_t *list;
58 if (!(dev->flags & (FREAD|FWRITE)))
59 return DRM_ERR(EACCES); /* Require read/write */
61 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(drm_map_t) );
63 map = (drm_local_map_t *) drm_alloc(sizeof(*map), DRM_MEM_MAPS);
65 return DRM_ERR(ENOMEM);
67 map->offset = request.offset;
68 map->size = request.size;
69 map->type = request.type;
70 map->flags = request.flags;
74 /* Only allow shared memory to be removable since we only keep enough
75 * book keeping information about shared memory to allow for removal
76 * when processes fork.
78 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
79 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
80 return DRM_ERR(EINVAL);
82 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
83 map->offset, map->size, map->type );
84 if ( (map->offset & PAGE_MASK) || (map->size & PAGE_MASK) ) {
85 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
86 return DRM_ERR(EINVAL);
88 if (map->offset + map->size < map->offset) {
89 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
90 return DRM_ERR(EINVAL);
93 switch ( map->type ) {
95 drm_core_ioremap(map, dev);
96 if (!(map->flags & _DRM_WRITE_COMBINING))
99 case _DRM_FRAME_BUFFER:
100 if (drm_mtrr_add(map->offset, map->size, DRM_MTRR_WC) == 0)
104 map->handle = (void *)drm_alloc(map->size, DRM_MEM_SAREA);
105 DRM_DEBUG( "%lu %d %p\n",
106 map->size, drm_order(map->size), map->handle );
107 if ( !map->handle ) {
108 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
109 return DRM_ERR(ENOMEM);
111 map->offset = (unsigned long)map->handle;
112 if ( map->flags & _DRM_CONTAINS_LOCK ) {
113 /* Prevent a 2nd X Server from creating a 2nd lock */
115 if (dev->lock.hw_lock != NULL) {
117 drm_free(map->handle, map->size, DRM_MEM_SAREA);
118 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
119 return DRM_ERR(EBUSY);
121 dev->lock.hw_lock = map->handle; /* Pointer to lock */
126 map->offset += dev->agp->base;
127 map->mtrr = dev->agp->mtrr; /* for getmap */
129 case _DRM_SCATTER_GATHER:
131 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
132 return DRM_ERR(EINVAL);
134 map->offset = map->offset + dev->sg->handle;
138 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
139 return DRM_ERR(EINVAL);
142 list = drm_calloc(1, sizeof(*list), DRM_MEM_MAPS);
144 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
145 return DRM_ERR(EINVAL);
150 TAILQ_INSERT_TAIL(dev->maplist, list, link);
153 request.offset = map->offset;
154 request.size = map->size;
155 request.type = map->type;
156 request.flags = map->flags;
157 request.mtrr = map->mtrr;
158 request.handle = map->handle;
160 if ( request.type != _DRM_SHM ) {
161 request.handle = (void *)request.offset;
164 DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, request, sizeof(drm_map_t) );
170 /* Remove a map private from list and deallocate resources if the mapping
174 int drm_rmmap(DRM_IOCTL_ARGS)
177 drm_map_list_entry_t *list;
178 drm_local_map_t *map;
181 DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
184 TAILQ_FOREACH(list, dev->maplist, link) {
186 if (map->handle == request.handle &&
187 map->flags & _DRM_REMOVABLE)
191 /* No match found. */
194 return DRM_ERR(EINVAL);
196 TAILQ_REMOVE(dev->maplist, list, link);
199 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
203 case _DRM_FRAME_BUFFER:
205 int __unused retcode;
207 retcode = drm_mtrr_del(map->offset, map->size,
209 DRM_DEBUG("mtrr_del = %d\n", retcode);
211 drm_ioremapfree(map);
214 drm_free(map->handle, map->size, DRM_MEM_SAREA);
217 case _DRM_SCATTER_GATHER:
220 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
225 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
229 if (entry->seg_count) {
230 for (i = 0; i < entry->seg_count; i++) {
231 if (entry->seglist[i] != 0)
232 drm_pci_free(dev, entry->buf_size,
233 (void *)entry->seglist[i],
234 entry->seglist_bus[i]);
236 drm_free(entry->seglist,
238 sizeof(*entry->seglist),
240 drm_free(entry->seglist_bus, entry->seg_count *
241 sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
243 entry->seg_count = 0;
246 if (entry->buf_count) {
247 for (i = 0; i < entry->buf_count; i++) {
248 drm_free(entry->buflist[i].dev_private,
249 entry->buflist[i].dev_priv_size, DRM_MEM_BUFS);
251 drm_free(entry->buflist,
253 sizeof(*entry->buflist),
256 entry->buf_count = 0;
260 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
262 drm_device_dma_t *dma = dev->dma;
263 drm_buf_entry_t *entry;
265 unsigned long offset;
266 unsigned long agp_offset;
275 drm_buf_t **temp_buflist;
277 count = request->count;
278 order = drm_order(request->size);
281 alignment = (request->flags & _DRM_PAGE_ALIGN)
282 ? round_page(size) : size;
283 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
284 total = PAGE_SIZE << page_order;
287 agp_offset = dev->agp->base + request->agp_start;
289 DRM_DEBUG( "count: %d\n", count );
290 DRM_DEBUG( "order: %d\n", order );
291 DRM_DEBUG( "size: %d\n", size );
292 DRM_DEBUG( "agp_offset: 0x%lx\n", agp_offset );
293 DRM_DEBUG( "alignment: %d\n", alignment );
294 DRM_DEBUG( "page_order: %d\n", page_order );
295 DRM_DEBUG( "total: %d\n", total );
297 entry = &dma->bufs[order];
299 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
301 if ( !entry->buflist ) {
302 return DRM_ERR(ENOMEM);
304 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
306 entry->buf_size = size;
307 entry->page_order = page_order;
311 while ( entry->buf_count < count ) {
312 buf = &entry->buflist[entry->buf_count];
313 buf->idx = dma->buf_count + entry->buf_count;
314 buf->total = alignment;
318 buf->offset = (dma->byte_count + offset);
319 buf->bus_address = agp_offset + offset;
320 buf->address = (void *)(agp_offset + offset);
325 buf->dev_priv_size = dev->dev_priv_size;
326 buf->dev_private = drm_calloc(1, buf->dev_priv_size,
328 if (buf->dev_private == NULL) {
329 /* Set count correctly so we free the proper amount. */
330 entry->buf_count = count;
331 drm_cleanup_buf_error(dev, entry);
332 return DRM_ERR(ENOMEM);
337 byte_count += PAGE_SIZE << page_order;
340 DRM_DEBUG( "byte_count: %d\n", byte_count );
342 temp_buflist = drm_realloc(dma->buflist,
343 dma->buf_count * sizeof(*dma->buflist),
344 (dma->buf_count + entry->buf_count)
345 * sizeof(*dma->buflist),
347 if (temp_buflist == NULL) {
348 /* Free the entry because it isn't valid */
349 drm_cleanup_buf_error(dev, entry);
350 return DRM_ERR(ENOMEM);
352 dma->buflist = temp_buflist;
354 for ( i = 0 ; i < entry->buf_count ; i++ ) {
355 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
358 dma->buf_count += entry->buf_count;
359 dma->byte_count += byte_count;
361 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
362 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
364 request->count = entry->buf_count;
365 request->size = size;
367 dma->flags = _DRM_DMA_USE_AGP;
372 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
374 drm_device_dma_t *dma = dev->dma;
380 drm_buf_entry_t *entry;
384 unsigned long offset;
388 unsigned long *temp_pagelist;
389 drm_buf_t **temp_buflist;
392 count = request->count;
393 order = drm_order(request->size);
396 DRM_DEBUG( "count=%d, size=%d (%d), order=%d\n",
397 request->count, request->size, size, order );
399 alignment = (request->flags & _DRM_PAGE_ALIGN)
400 ? round_page(size) : size;
401 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
402 total = PAGE_SIZE << page_order;
404 entry = &dma->bufs[order];
406 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
408 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
410 entry->seglist_bus = drm_alloc(count * sizeof(*entry->seglist_bus),
413 /* Keep the original pagelist until we know all the allocations
416 temp_pagelist = drm_alloc((dma->page_count + (count << page_order)) *
417 sizeof(*dma->pagelist), DRM_MEM_PAGES);
419 if (entry->buflist == NULL || entry->seglist == NULL ||
420 temp_pagelist == NULL) {
421 drm_free(entry->buflist, count * sizeof(*entry->buflist),
423 drm_free(entry->seglist, count * sizeof(*entry->seglist),
425 drm_free(entry->seglist_bus, count *
426 sizeof(*entry->seglist_bus), DRM_MEM_SEGS);
427 return DRM_ERR(ENOMEM);
430 bzero(entry->buflist, count * sizeof(*entry->buflist));
431 bzero(entry->seglist, count * sizeof(*entry->seglist));
433 memcpy(temp_pagelist, dma->pagelist, dma->page_count *
434 sizeof(*dma->pagelist));
436 DRM_DEBUG( "pagelist: %d entries\n",
437 dma->page_count + (count << page_order) );
439 entry->buf_size = size;
440 entry->page_order = page_order;
444 while ( entry->buf_count < count ) {
445 vaddr = (vm_offset_t)drm_pci_alloc(dev, size, alignment,
446 0xfffffffful, &bus_addr);
448 /* Set count correctly so we free the proper amount. */
449 entry->buf_count = count;
450 entry->seg_count = count;
451 drm_cleanup_buf_error(dev, entry);
452 drm_free(temp_pagelist, (dma->page_count +
453 (count << page_order)) * sizeof(*dma->pagelist),
455 return DRM_ERR(ENOMEM);
458 entry->seglist_bus[entry->seg_count] = bus_addr;
459 entry->seglist[entry->seg_count++] = vaddr;
460 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
461 DRM_DEBUG( "page %d @ 0x%08lx\n",
462 dma->page_count + page_count,
463 (long)vaddr + PAGE_SIZE * i );
464 temp_pagelist[dma->page_count + page_count++] =
465 vaddr + PAGE_SIZE * i;
468 offset + size <= total && entry->buf_count < count ;
469 offset += alignment, ++entry->buf_count ) {
470 buf = &entry->buflist[entry->buf_count];
471 buf->idx = dma->buf_count + entry->buf_count;
472 buf->total = alignment;
475 buf->offset = (dma->byte_count + byte_count + offset);
476 buf->address = (void *)(vaddr + offset);
477 buf->bus_address = bus_addr + offset;
482 buf->dev_priv_size = dev->dev_priv_size;
483 buf->dev_private = drm_alloc(buf->dev_priv_size,
485 if (buf->dev_private == NULL) {
486 /* Set count correctly so we free the proper amount. */
487 entry->buf_count = count;
488 entry->seg_count = count;
489 drm_cleanup_buf_error(dev, entry);
490 drm_free(temp_pagelist, (dma->page_count +
491 (count << page_order)) *
492 sizeof(*dma->pagelist), DRM_MEM_PAGES );
493 return DRM_ERR(ENOMEM);
495 bzero(buf->dev_private, buf->dev_priv_size);
497 DRM_DEBUG( "buffer %d @ %p\n",
498 entry->buf_count, buf->address );
500 byte_count += PAGE_SIZE << page_order;
503 temp_buflist = drm_realloc(dma->buflist,
504 dma->buf_count * sizeof(*dma->buflist),
505 (dma->buf_count + entry->buf_count)
506 * sizeof(*dma->buflist),
508 if (temp_buflist == NULL) {
509 /* Free the entry because it isn't valid */
510 drm_cleanup_buf_error(dev, entry);
511 drm_free(temp_pagelist, (dma->page_count +
512 (count << page_order)) * sizeof(*dma->pagelist),
514 return DRM_ERR(ENOMEM);
516 dma->buflist = temp_buflist;
518 for ( i = 0 ; i < entry->buf_count ; i++ ) {
519 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
522 /* No allocations failed, so now we can replace the orginal pagelist
525 drm_free(dma->pagelist, dma->page_count * sizeof(*dma->pagelist),
527 dma->pagelist = temp_pagelist;
529 dma->buf_count += entry->buf_count;
530 dma->seg_count += entry->seg_count;
531 dma->page_count += entry->seg_count << page_order;
532 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
534 request->count = entry->buf_count;
535 request->size = size;
541 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
543 drm_device_dma_t *dma = dev->dma;
544 drm_buf_entry_t *entry;
546 unsigned long offset;
547 unsigned long agp_offset;
556 drm_buf_t **temp_buflist;
558 count = request->count;
559 order = drm_order(request->size);
562 alignment = (request->flags & _DRM_PAGE_ALIGN)
563 ? round_page(size) : size;
564 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
565 total = PAGE_SIZE << page_order;
568 agp_offset = request->agp_start;
570 DRM_DEBUG( "count: %d\n", count );
571 DRM_DEBUG( "order: %d\n", order );
572 DRM_DEBUG( "size: %d\n", size );
573 DRM_DEBUG( "agp_offset: %ld\n", agp_offset );
574 DRM_DEBUG( "alignment: %d\n", alignment );
575 DRM_DEBUG( "page_order: %d\n", page_order );
576 DRM_DEBUG( "total: %d\n", total );
578 entry = &dma->bufs[order];
580 entry->buflist = drm_calloc(1, count * sizeof(*entry->buflist),
582 if (entry->buflist == NULL)
583 return DRM_ERR(ENOMEM);
585 entry->buf_size = size;
586 entry->page_order = page_order;
590 while ( entry->buf_count < count ) {
591 buf = &entry->buflist[entry->buf_count];
592 buf->idx = dma->buf_count + entry->buf_count;
593 buf->total = alignment;
597 buf->offset = (dma->byte_count + offset);
598 buf->bus_address = agp_offset + offset;
599 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
604 buf->dev_priv_size = dev->dev_priv_size;
605 buf->dev_private = drm_calloc(1, buf->dev_priv_size,
607 if (buf->dev_private == NULL) {
608 /* Set count correctly so we free the proper amount. */
609 entry->buf_count = count;
610 drm_cleanup_buf_error(dev, entry);
611 return DRM_ERR(ENOMEM);
614 DRM_DEBUG( "buffer %d @ %p\n",
615 entry->buf_count, buf->address );
619 byte_count += PAGE_SIZE << page_order;
622 DRM_DEBUG( "byte_count: %d\n", byte_count );
624 temp_buflist = drm_realloc(dma->buflist,
625 dma->buf_count * sizeof(*dma->buflist),
626 (dma->buf_count + entry->buf_count)
627 * sizeof(*dma->buflist),
629 if (temp_buflist == NULL) {
630 /* Free the entry because it isn't valid */
631 drm_cleanup_buf_error(dev, entry);
632 return DRM_ERR(ENOMEM);
634 dma->buflist = temp_buflist;
636 for ( i = 0 ; i < entry->buf_count ; i++ ) {
637 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
640 dma->buf_count += entry->buf_count;
641 dma->byte_count += byte_count;
643 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
644 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
646 request->count = entry->buf_count;
647 request->size = size;
649 dma->flags = _DRM_DMA_USE_SG;
654 int drm_addbufs(DRM_IOCTL_ARGS)
657 drm_buf_desc_t request;
661 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
663 if (request.count < 0 || request.count > 4096)
664 return DRM_ERR(EINVAL);
666 order = drm_order(request.size);
667 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
668 return DRM_ERR(EINVAL);
670 DRM_SPINLOCK(&dev->dma_lock);
671 /* No more allocations after first buffer-using ioctl. */
672 if (dev->buf_use != 0) {
673 DRM_SPINUNLOCK(&dev->dma_lock);
674 return DRM_ERR(EBUSY);
676 /* No more than one allocation per order */
677 if (dev->dma->bufs[order].buf_count != 0) {
678 DRM_SPINUNLOCK(&dev->dma_lock);
679 return DRM_ERR(ENOMEM);
682 if ( request.flags & _DRM_AGP_BUFFER )
683 err = drm_addbufs_agp(dev, &request);
685 if ( request.flags & _DRM_SG_BUFFER )
686 err = drm_addbufs_sg(dev, &request);
688 err = drm_addbufs_pci(dev, &request);
689 DRM_SPINUNLOCK(&dev->dma_lock);
691 DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request, sizeof(request));
696 int drm_infobufs(DRM_IOCTL_ARGS)
699 drm_device_dma_t *dma = dev->dma;
700 drm_buf_info_t request;
705 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
707 DRM_SPINLOCK(&dev->dma_lock);
708 ++dev->buf_use; /* Can't allocate more after this call */
709 DRM_SPINUNLOCK(&dev->dma_lock);
711 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
712 if ( dma->bufs[i].buf_count ) ++count;
715 DRM_DEBUG( "count = %d\n", count );
717 if ( request.count >= count ) {
718 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
719 if ( dma->bufs[i].buf_count ) {
722 from.count = dma->bufs[i].buf_count;
723 from.size = dma->bufs[i].buf_size;
724 from.low_mark = dma->bufs[i].freelist.low_mark;
725 from.high_mark = dma->bufs[i].freelist.high_mark;
727 if (DRM_COPY_TO_USER(&request.list[count], &from,
728 sizeof(drm_buf_desc_t)) != 0) {
729 retcode = DRM_ERR(EFAULT);
733 DRM_DEBUG( "%d %d %d %d %d\n",
735 dma->bufs[i].buf_count,
736 dma->bufs[i].buf_size,
737 dma->bufs[i].freelist.low_mark,
738 dma->bufs[i].freelist.high_mark );
743 request.count = count;
745 DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
750 int drm_markbufs(DRM_IOCTL_ARGS)
753 drm_device_dma_t *dma = dev->dma;
754 drm_buf_desc_t request;
757 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
759 DRM_DEBUG( "%d, %d, %d\n",
760 request.size, request.low_mark, request.high_mark );
763 order = drm_order(request.size);
764 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
765 request.low_mark < 0 || request.high_mark < 0) {
766 return DRM_ERR(EINVAL);
769 DRM_SPINLOCK(&dev->dma_lock);
770 if (request.low_mark > dma->bufs[order].buf_count ||
771 request.high_mark > dma->bufs[order].buf_count) {
772 return DRM_ERR(EINVAL);
775 dma->bufs[order].freelist.low_mark = request.low_mark;
776 dma->bufs[order].freelist.high_mark = request.high_mark;
777 DRM_SPINUNLOCK(&dev->dma_lock);
782 int drm_freebufs(DRM_IOCTL_ARGS)
785 drm_device_dma_t *dma = dev->dma;
786 drm_buf_free_t request;
792 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
794 DRM_DEBUG( "%d\n", request.count );
796 DRM_SPINLOCK(&dev->dma_lock);
797 for ( i = 0 ; i < request.count ; i++ ) {
798 if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
799 retcode = DRM_ERR(EFAULT);
802 if ( idx < 0 || idx >= dma->buf_count ) {
803 DRM_ERROR( "Index %d (of %d max)\n",
804 idx, dma->buf_count - 1 );
805 retcode = DRM_ERR(EINVAL);
808 buf = dma->buflist[idx];
809 if ( buf->filp != filp ) {
810 DRM_ERROR("Process %d freeing buffer not owned\n",
812 retcode = DRM_ERR(EINVAL);
815 drm_free_buffer(dev, buf);
817 DRM_SPINUNLOCK(&dev->dma_lock);
822 int drm_mapbufs(DRM_IOCTL_ARGS)
825 drm_device_dma_t *dma = dev->dma;
834 #endif /* __FreeBSD__ */
839 #endif /* __NetBSD__ */
841 drm_buf_map_t request;
844 DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
847 if (!vfinddev(kdev, VCHR, &vn))
848 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
849 #endif /* __NetBSD__ */
851 #if defined(__FreeBSD__) && __FreeBSD_version >= 500000
852 vms = p->td_proc->p_vmspace;
857 DRM_SPINLOCK(&dev->dma_lock);
858 dev->buf_use++; /* Can't allocate more after this call */
859 DRM_SPINUNLOCK(&dev->dma_lock);
861 if (request.count < dma->buf_count)
864 if ((dev->use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
865 (dev->use_sg && (dma->flags & _DRM_DMA_USE_SG))) {
866 drm_local_map_t *map = dev->agp_buffer_map;
872 size = round_page(map->size);
875 size = round_page(dma->byte_count),
880 vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
881 retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
882 VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
883 #elif defined(__NetBSD__)
884 vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
885 retcode = uvm_mmap(&vms->vm_map, &vaddr, size,
886 UVM_PROT_READ | UVM_PROT_WRITE, UVM_PROT_ALL, MAP_SHARED,
887 &vn->v_uobj, foff, p->p_rlimit[RLIMIT_MEMLOCK].rlim_cur);
888 #endif /* __NetBSD__ */
892 request.virtual = (void *)vaddr;
894 for ( i = 0 ; i < dma->buf_count ; i++ ) {
895 if (DRM_COPY_TO_USER(&request.list[i].idx,
896 &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
900 if (DRM_COPY_TO_USER(&request.list[i].total,
901 &dma->buflist[i]->total, sizeof(request.list[0].total))) {
905 if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
910 address = vaddr + dma->buflist[i]->offset; /* *** */
911 if (DRM_COPY_TO_USER(&request.list[i].address, &address,
919 request.count = dma->buf_count;
921 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
923 DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
925 return DRM_ERR(retcode);