3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
41 return pci_resource_start(dev->pdev, resource);
43 EXPORT_SYMBOL(drm_get_resource_start);
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
47 return pci_resource_len(dev->pdev, resource);
49 EXPORT_SYMBOL(drm_get_resource_len);
51 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
54 drm_map_list_t *entry;
55 list_for_each_entry(entry, &dev->maplist, head) {
56 if (entry->map && map->type == entry->map->type &&
57 ((entry->map->offset == map->offset) ||
58 (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
66 static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
67 unsigned long user_token, int hashed_handle)
69 int use_hashed_handle;
71 #if (BITS_PER_LONG == 64)
72 use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
73 #elif (BITS_PER_LONG == 32)
74 use_hashed_handle = hashed_handle;
76 #error Unsupported long size. Neither 64 nor 32 bits.
79 if (!use_hashed_handle) {
81 hash->key = user_token >> PAGE_SHIFT;
82 ret = drm_ht_insert_item(&dev->map_hash, hash);
86 return drm_ht_just_insert_please(&dev->map_hash, hash,
87 user_token, 32 - PAGE_SHIFT - 3,
88 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
92 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
94 * \param inode device inode.
95 * \param filp file pointer.
97 * \param arg pointer to a drm_map structure.
98 * \return zero on success or a negative value on error.
100 * Adjusts the memory offset to its absolute value according to the mapping
101 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
102 * applicable and if supported by the kernel.
104 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
105 unsigned int size, drm_map_type_t type,
106 drm_map_flags_t flags, drm_map_list_t ** maplist)
109 drm_map_list_t *list;
110 drm_dma_handle_t *dmah;
111 unsigned long user_token;
114 map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
118 map->offset = offset;
123 /* Only allow shared memory to be removable since we only keep enough
124 * book keeping information about shared memory to allow for removal
125 * when processes fork.
127 if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
128 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
131 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
132 map->offset, map->size, map->type);
133 if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
134 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
142 case _DRM_FRAME_BUFFER:
143 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
144 if (map->offset + (map->size - 1) < map->offset ||
145 map->offset < virt_to_phys(high_memory)) {
146 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
151 map->offset += dev->hose->mem_space->start;
153 /* Some drivers preinitialize some maps, without the X Server
154 * needing to be aware of it. Therefore, we just return success
155 * when the server tries to create a duplicate map.
157 list = drm_find_matching_map(dev, map);
159 if (list->map->size != map->size) {
160 DRM_DEBUG("Matching maps of type %d with "
161 "mismatched sizes, (%ld vs %ld)\n",
162 map->type, map->size,
164 list->map->size = map->size;
167 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
172 if (drm_core_has_MTRR(dev)) {
173 if (map->type == _DRM_FRAME_BUFFER ||
174 (map->flags & _DRM_WRITE_COMBINING)) {
175 map->mtrr = mtrr_add(map->offset, map->size,
176 MTRR_TYPE_WRCOMB, 1);
179 if (map->type == _DRM_REGISTERS)
180 map->handle = ioremap(map->offset, map->size);
183 list = drm_find_matching_map(dev, map);
185 if(list->map->size != map->size) {
186 DRM_DEBUG("Matching maps of type %d with "
187 "mismatched sizes, (%ld vs %ld)\n",
188 map->type, map->size, list->map->size);
189 list->map->size = map->size;
192 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
196 map->handle = vmalloc_user(map->size);
197 DRM_DEBUG("%lu %d %p\n",
198 map->size, drm_order(map->size), map->handle);
200 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
203 map->offset = (unsigned long)map->handle;
204 if (map->flags & _DRM_CONTAINS_LOCK) {
205 /* Prevent a 2nd X Server from creating a 2nd lock */
206 if (dev->lock.hw_lock != NULL) {
208 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211 dev->sigdata.lock = dev->lock.hw_lock = map->handle; /* Pointer to lock */
215 drm_agp_mem_t *entry;
218 if (!drm_core_has_AGP(dev)) {
219 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
223 map->offset += dev->hose->mem_space->start;
225 /* Note: dev->agp->base may actually be 0 when the DRM
226 * is not in control of AGP space. But if user space is
227 * it should already have added the AGP base itself.
229 map->offset += dev->agp->base;
230 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
232 /* This assumes the DRM is in total control of AGP space.
233 * It's not always the case as AGP can be in the control
234 * of user space (i.e. i810 driver). So this loop will get
235 * skipped and we double check that dev->agp->memory is
236 * actually set as well as being invalid before EPERM'ing
238 list_for_each_entry(entry, &dev->agp->memory, head) {
239 if ((map->offset >= entry->bound) &&
240 (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
245 if (!list_empty(&dev->agp->memory) && !valid) {
246 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
249 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
252 case _DRM_SCATTER_GATHER:
254 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
257 map->offset += (unsigned long)dev->sg->virtual;
259 case _DRM_CONSISTENT:
260 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
261 * As we're limiting the address to 2^32-1 (or less),
262 * casting it down to 32 bits is no problem, but we
263 * need to point to a 64bit variable first. */
264 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
266 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
269 map->handle = dmah->vaddr;
270 map->offset = (unsigned long)dmah->busaddr;
274 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
278 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
280 if (map->type == _DRM_REGISTERS)
281 iounmap(map->handle);
282 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
285 memset(list, 0, sizeof(*list));
288 mutex_lock(&dev->struct_mutex);
289 list_add(&list->head, &dev->maplist);
291 /* Assign a 32-bit handle */
293 user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
295 ret = drm_map_handle(dev, &list->hash, user_token, 0);
298 if (map->type == _DRM_REGISTERS)
299 iounmap(map->handle);
300 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
301 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
302 mutex_unlock(&dev->struct_mutex);
306 list->user_token = list->hash.key << PAGE_SHIFT;
307 mutex_unlock(&dev->struct_mutex);
313 int drm_addmap(drm_device_t * dev, unsigned int offset,
314 unsigned int size, drm_map_type_t type,
315 drm_map_flags_t flags, drm_local_map_t ** map_ptr)
317 drm_map_list_t *list;
320 rc = drm_addmap_core(dev, offset, size, type, flags, &list);
322 *map_ptr = list->map;
326 EXPORT_SYMBOL(drm_addmap);
328 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
329 unsigned int cmd, unsigned long arg)
331 drm_file_t *priv = filp->private_data;
332 drm_device_t *dev = priv->head->dev;
334 drm_map_list_t *maplist;
335 drm_map_t __user *argp = (void __user *)arg;
338 if (!(filp->f_mode & 3))
339 return -EACCES; /* Require read/write */
341 if (copy_from_user(&map, argp, sizeof(map))) {
345 if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
348 err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
354 if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
357 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
358 if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
364 * Remove a map private from list and deallocate resources if the mapping
367 * \param inode device inode.
368 * \param filp file pointer.
369 * \param cmd command.
370 * \param arg pointer to a drm_map_t structure.
371 * \return zero on success or a negative value on error.
373 * Searches the map on drm_device::maplist, removes it from the list, see if
374 * its being used, and free any associate resource (such as MTRR's) if it's not
379 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
381 drm_map_list_t *r_list = NULL, *list_t;
382 drm_dma_handle_t dmah;
385 /* Find the list entry for the map and remove it */
386 list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
387 if (r_list->map == map) {
388 list_del(&r_list->head);
389 drm_ht_remove_key(&dev->map_hash,
390 r_list->user_token >> PAGE_SHIFT);
391 drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
400 /* List has wrapped around to the head pointer, or it's empty and we
401 * didn't find anything.
406 iounmap(map->handle);
408 case _DRM_FRAME_BUFFER:
409 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
411 retcode = mtrr_del(map->mtrr, map->offset, map->size);
412 DRM_DEBUG("mtrr_del=%d\n", retcode);
419 case _DRM_SCATTER_GATHER:
421 case _DRM_CONSISTENT:
422 dmah.vaddr = map->handle;
423 dmah.busaddr = map->offset;
424 dmah.size = map->size;
425 __drm_pci_free(dev, &dmah);
430 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
434 EXPORT_SYMBOL(drm_rmmap_locked);
436 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
440 mutex_lock(&dev->struct_mutex);
441 ret = drm_rmmap_locked(dev, map);
442 mutex_unlock(&dev->struct_mutex);
446 EXPORT_SYMBOL(drm_rmmap);
448 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
449 * the last close of the device, and this is necessary for cleanup when things
450 * exit uncleanly. Therefore, having userland manually remove mappings seems
451 * like a pointless exercise since they're going away anyway.
453 * One use case might be after addmap is allowed for normal users for SHM and
454 * gets used by drivers that the server doesn't need to care about. This seems
457 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
458 unsigned int cmd, unsigned long arg)
460 drm_file_t *priv = filp->private_data;
461 drm_device_t *dev = priv->head->dev;
463 drm_local_map_t *map = NULL;
464 drm_map_list_t *r_list;
467 if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
471 mutex_lock(&dev->struct_mutex);
472 list_for_each_entry(r_list, &dev->maplist, head) {
474 r_list->user_token == (unsigned long)request.handle &&
475 r_list->map->flags & _DRM_REMOVABLE) {
481 /* List has wrapped around to the head pointer, or its empty we didn't
484 if (list_empty(&dev->maplist) || !map) {
485 mutex_unlock(&dev->struct_mutex);
490 mutex_unlock(&dev->struct_mutex);
494 /* Register and framebuffer maps are permanent */
495 if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
496 mutex_unlock(&dev->struct_mutex);
500 ret = drm_rmmap_locked(dev, map);
502 mutex_unlock(&dev->struct_mutex);
508 * Cleanup after an error on one of the addbufs() functions.
510 * \param dev DRM device.
511 * \param entry buffer entry where the error occurred.
513 * Frees any pages and buffers associated with the given entry.
515 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
519 if (entry->seg_count) {
520 for (i = 0; i < entry->seg_count; i++) {
521 if (entry->seglist[i]) {
522 drm_pci_free(dev, entry->seglist[i]);
525 drm_free(entry->seglist,
527 sizeof(*entry->seglist), DRM_MEM_SEGS);
529 entry->seg_count = 0;
532 if (entry->buf_count) {
533 for (i = 0; i < entry->buf_count; i++) {
534 if (entry->buflist[i].dev_private) {
535 drm_free(entry->buflist[i].dev_private,
536 entry->buflist[i].dev_priv_size,
540 drm_free(entry->buflist,
542 sizeof(*entry->buflist), DRM_MEM_BUFS);
544 entry->buf_count = 0;
550 * Add AGP buffers for DMA transfers
552 * \param dev drm_device_t to which the buffers are to be added.
553 * \param request pointer to a drm_buf_desc_t describing the request.
554 * \return zero on success or a negative number on failure.
556 * After some sanity checks creates a drm_buf structure for each buffer and
557 * reallocates the buffer list of the same size order to accommodate the new
560 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
562 drm_device_dma_t *dma = dev->dma;
563 drm_buf_entry_t *entry;
564 drm_agp_mem_t *agp_entry;
566 unsigned long offset;
567 unsigned long agp_offset;
576 drm_buf_t **temp_buflist;
581 count = request->count;
582 order = drm_order(request->size);
585 alignment = (request->flags & _DRM_PAGE_ALIGN)
586 ? PAGE_ALIGN(size) : size;
587 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
588 total = PAGE_SIZE << page_order;
591 agp_offset = dev->agp->base + request->agp_start;
593 DRM_DEBUG("count: %d\n", count);
594 DRM_DEBUG("order: %d\n", order);
595 DRM_DEBUG("size: %d\n", size);
596 DRM_DEBUG("agp_offset: %lx\n", agp_offset);
597 DRM_DEBUG("alignment: %d\n", alignment);
598 DRM_DEBUG("page_order: %d\n", page_order);
599 DRM_DEBUG("total: %d\n", total);
601 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
603 if (dev->queue_count)
604 return -EBUSY; /* Not while in use */
606 /* Make sure buffers are located in AGP memory that we own */
608 list_for_each_entry(agp_entry, &dev->agp->memory, head) {
609 if ((agp_offset >= agp_entry->bound) &&
610 (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
615 if (!list_empty(&dev->agp->memory) && !valid) {
616 DRM_DEBUG("zone invalid\n");
619 spin_lock(&dev->count_lock);
621 spin_unlock(&dev->count_lock);
624 atomic_inc(&dev->buf_alloc);
625 spin_unlock(&dev->count_lock);
627 mutex_lock(&dev->struct_mutex);
628 entry = &dma->bufs[order];
629 if (entry->buf_count) {
630 mutex_unlock(&dev->struct_mutex);
631 atomic_dec(&dev->buf_alloc);
632 return -ENOMEM; /* May only call once for each order */
635 if (count < 0 || count > 4096) {
636 mutex_unlock(&dev->struct_mutex);
637 atomic_dec(&dev->buf_alloc);
641 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
643 if (!entry->buflist) {
644 mutex_unlock(&dev->struct_mutex);
645 atomic_dec(&dev->buf_alloc);
648 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
650 entry->buf_size = size;
651 entry->page_order = page_order;
655 while (entry->buf_count < count) {
656 buf = &entry->buflist[entry->buf_count];
657 buf->idx = dma->buf_count + entry->buf_count;
658 buf->total = alignment;
662 buf->offset = (dma->byte_count + offset);
663 buf->bus_address = agp_offset + offset;
664 buf->address = (void *)(agp_offset + offset);
668 init_waitqueue_head(&buf->dma_wait);
671 buf->dev_priv_size = dev->driver->dev_priv_size;
672 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
673 if (!buf->dev_private) {
674 /* Set count correctly so we free the proper amount. */
675 entry->buf_count = count;
676 drm_cleanup_buf_error(dev, entry);
677 mutex_unlock(&dev->struct_mutex);
678 atomic_dec(&dev->buf_alloc);
681 memset(buf->dev_private, 0, buf->dev_priv_size);
683 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
687 byte_count += PAGE_SIZE << page_order;
690 DRM_DEBUG("byte_count: %d\n", byte_count);
692 temp_buflist = drm_realloc(dma->buflist,
693 dma->buf_count * sizeof(*dma->buflist),
694 (dma->buf_count + entry->buf_count)
695 * sizeof(*dma->buflist), DRM_MEM_BUFS);
697 /* Free the entry because it isn't valid */
698 drm_cleanup_buf_error(dev, entry);
699 mutex_unlock(&dev->struct_mutex);
700 atomic_dec(&dev->buf_alloc);
703 dma->buflist = temp_buflist;
705 for (i = 0; i < entry->buf_count; i++) {
706 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
709 dma->buf_count += entry->buf_count;
710 dma->seg_count += entry->seg_count;
711 dma->page_count += byte_count >> PAGE_SHIFT;
712 dma->byte_count += byte_count;
714 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
715 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
717 mutex_unlock(&dev->struct_mutex);
719 request->count = entry->buf_count;
720 request->size = size;
722 dma->flags = _DRM_DMA_USE_AGP;
724 atomic_dec(&dev->buf_alloc);
727 EXPORT_SYMBOL(drm_addbufs_agp);
728 #endif /* __OS_HAS_AGP */
730 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
732 drm_device_dma_t *dma = dev->dma;
738 drm_buf_entry_t *entry;
739 drm_dma_handle_t *dmah;
742 unsigned long offset;
746 unsigned long *temp_pagelist;
747 drm_buf_t **temp_buflist;
749 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
755 if (!capable(CAP_SYS_ADMIN))
758 count = request->count;
759 order = drm_order(request->size);
762 DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
763 request->count, request->size, size, order, dev->queue_count);
765 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
767 if (dev->queue_count)
768 return -EBUSY; /* Not while in use */
770 alignment = (request->flags & _DRM_PAGE_ALIGN)
771 ? PAGE_ALIGN(size) : size;
772 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
773 total = PAGE_SIZE << page_order;
775 spin_lock(&dev->count_lock);
777 spin_unlock(&dev->count_lock);
780 atomic_inc(&dev->buf_alloc);
781 spin_unlock(&dev->count_lock);
783 mutex_lock(&dev->struct_mutex);
784 entry = &dma->bufs[order];
785 if (entry->buf_count) {
786 mutex_unlock(&dev->struct_mutex);
787 atomic_dec(&dev->buf_alloc);
788 return -ENOMEM; /* May only call once for each order */
791 if (count < 0 || count > 4096) {
792 mutex_unlock(&dev->struct_mutex);
793 atomic_dec(&dev->buf_alloc);
797 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
799 if (!entry->buflist) {
800 mutex_unlock(&dev->struct_mutex);
801 atomic_dec(&dev->buf_alloc);
804 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
806 entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
808 if (!entry->seglist) {
809 drm_free(entry->buflist,
810 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
811 mutex_unlock(&dev->struct_mutex);
812 atomic_dec(&dev->buf_alloc);
815 memset(entry->seglist, 0, count * sizeof(*entry->seglist));
817 /* Keep the original pagelist until we know all the allocations
820 temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
821 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
822 if (!temp_pagelist) {
823 drm_free(entry->buflist,
824 count * sizeof(*entry->buflist), DRM_MEM_BUFS);
825 drm_free(entry->seglist,
826 count * sizeof(*entry->seglist), DRM_MEM_SEGS);
827 mutex_unlock(&dev->struct_mutex);
828 atomic_dec(&dev->buf_alloc);
831 memcpy(temp_pagelist,
832 dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
833 DRM_DEBUG("pagelist: %d entries\n",
834 dma->page_count + (count << page_order));
836 entry->buf_size = size;
837 entry->page_order = page_order;
841 while (entry->buf_count < count) {
843 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
846 /* Set count correctly so we free the proper amount. */
847 entry->buf_count = count;
848 entry->seg_count = count;
849 drm_cleanup_buf_error(dev, entry);
850 drm_free(temp_pagelist,
851 (dma->page_count + (count << page_order))
852 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
853 mutex_unlock(&dev->struct_mutex);
854 atomic_dec(&dev->buf_alloc);
857 entry->seglist[entry->seg_count++] = dmah;
858 for (i = 0; i < (1 << page_order); i++) {
859 DRM_DEBUG("page %d @ 0x%08lx\n",
860 dma->page_count + page_count,
861 (unsigned long)dmah->vaddr + PAGE_SIZE * i);
862 temp_pagelist[dma->page_count + page_count++]
863 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
866 offset + size <= total && entry->buf_count < count;
867 offset += alignment, ++entry->buf_count) {
868 buf = &entry->buflist[entry->buf_count];
869 buf->idx = dma->buf_count + entry->buf_count;
870 buf->total = alignment;
873 buf->offset = (dma->byte_count + byte_count + offset);
874 buf->address = (void *)(dmah->vaddr + offset);
875 buf->bus_address = dmah->busaddr + offset;
879 init_waitqueue_head(&buf->dma_wait);
882 buf->dev_priv_size = dev->driver->dev_priv_size;
883 buf->dev_private = drm_alloc(buf->dev_priv_size,
885 if (!buf->dev_private) {
886 /* Set count correctly so we free the proper amount. */
887 entry->buf_count = count;
888 entry->seg_count = count;
889 drm_cleanup_buf_error(dev, entry);
890 drm_free(temp_pagelist,
892 (count << page_order))
893 * sizeof(*dma->pagelist),
895 mutex_unlock(&dev->struct_mutex);
896 atomic_dec(&dev->buf_alloc);
899 memset(buf->dev_private, 0, buf->dev_priv_size);
901 DRM_DEBUG("buffer %d @ %p\n",
902 entry->buf_count, buf->address);
904 byte_count += PAGE_SIZE << page_order;
907 temp_buflist = drm_realloc(dma->buflist,
908 dma->buf_count * sizeof(*dma->buflist),
909 (dma->buf_count + entry->buf_count)
910 * sizeof(*dma->buflist), DRM_MEM_BUFS);
912 /* Free the entry because it isn't valid */
913 drm_cleanup_buf_error(dev, entry);
914 drm_free(temp_pagelist,
915 (dma->page_count + (count << page_order))
916 * sizeof(*dma->pagelist), DRM_MEM_PAGES);
917 mutex_unlock(&dev->struct_mutex);
918 atomic_dec(&dev->buf_alloc);
921 dma->buflist = temp_buflist;
923 for (i = 0; i < entry->buf_count; i++) {
924 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
927 /* No allocations failed, so now we can replace the orginal pagelist
930 if (dma->page_count) {
931 drm_free(dma->pagelist,
932 dma->page_count * sizeof(*dma->pagelist),
935 dma->pagelist = temp_pagelist;
937 dma->buf_count += entry->buf_count;
938 dma->seg_count += entry->seg_count;
939 dma->page_count += entry->seg_count << page_order;
940 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
942 mutex_unlock(&dev->struct_mutex);
944 request->count = entry->buf_count;
945 request->size = size;
947 if (request->flags & _DRM_PCI_BUFFER_RO)
948 dma->flags = _DRM_DMA_USE_PCI_RO;
950 atomic_dec(&dev->buf_alloc);
954 EXPORT_SYMBOL(drm_addbufs_pci);
956 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
958 drm_device_dma_t *dma = dev->dma;
959 drm_buf_entry_t *entry;
961 unsigned long offset;
962 unsigned long agp_offset;
971 drm_buf_t **temp_buflist;
973 if (!drm_core_check_feature(dev, DRIVER_SG))
979 if (!capable(CAP_SYS_ADMIN))
982 count = request->count;
983 order = drm_order(request->size);
986 alignment = (request->flags & _DRM_PAGE_ALIGN)
987 ? PAGE_ALIGN(size) : size;
988 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
989 total = PAGE_SIZE << page_order;
992 agp_offset = request->agp_start;
994 DRM_DEBUG("count: %d\n", count);
995 DRM_DEBUG("order: %d\n", order);
996 DRM_DEBUG("size: %d\n", size);
997 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
998 DRM_DEBUG("alignment: %d\n", alignment);
999 DRM_DEBUG("page_order: %d\n", page_order);
1000 DRM_DEBUG("total: %d\n", total);
1002 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1004 if (dev->queue_count)
1005 return -EBUSY; /* Not while in use */
1007 spin_lock(&dev->count_lock);
1009 spin_unlock(&dev->count_lock);
1012 atomic_inc(&dev->buf_alloc);
1013 spin_unlock(&dev->count_lock);
1015 mutex_lock(&dev->struct_mutex);
1016 entry = &dma->bufs[order];
1017 if (entry->buf_count) {
1018 mutex_unlock(&dev->struct_mutex);
1019 atomic_dec(&dev->buf_alloc);
1020 return -ENOMEM; /* May only call once for each order */
1023 if (count < 0 || count > 4096) {
1024 mutex_unlock(&dev->struct_mutex);
1025 atomic_dec(&dev->buf_alloc);
1029 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1031 if (!entry->buflist) {
1032 mutex_unlock(&dev->struct_mutex);
1033 atomic_dec(&dev->buf_alloc);
1036 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1038 entry->buf_size = size;
1039 entry->page_order = page_order;
1043 while (entry->buf_count < count) {
1044 buf = &entry->buflist[entry->buf_count];
1045 buf->idx = dma->buf_count + entry->buf_count;
1046 buf->total = alignment;
1050 buf->offset = (dma->byte_count + offset);
1051 buf->bus_address = agp_offset + offset;
1052 buf->address = (void *)(agp_offset + offset
1053 + (unsigned long)dev->sg->virtual);
1057 init_waitqueue_head(&buf->dma_wait);
1060 buf->dev_priv_size = dev->driver->dev_priv_size;
1061 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1062 if (!buf->dev_private) {
1063 /* Set count correctly so we free the proper amount. */
1064 entry->buf_count = count;
1065 drm_cleanup_buf_error(dev, entry);
1066 mutex_unlock(&dev->struct_mutex);
1067 atomic_dec(&dev->buf_alloc);
1071 memset(buf->dev_private, 0, buf->dev_priv_size);
1073 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1075 offset += alignment;
1077 byte_count += PAGE_SIZE << page_order;
1080 DRM_DEBUG("byte_count: %d\n", byte_count);
1082 temp_buflist = drm_realloc(dma->buflist,
1083 dma->buf_count * sizeof(*dma->buflist),
1084 (dma->buf_count + entry->buf_count)
1085 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1086 if (!temp_buflist) {
1087 /* Free the entry because it isn't valid */
1088 drm_cleanup_buf_error(dev, entry);
1089 mutex_unlock(&dev->struct_mutex);
1090 atomic_dec(&dev->buf_alloc);
1093 dma->buflist = temp_buflist;
1095 for (i = 0; i < entry->buf_count; i++) {
1096 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1099 dma->buf_count += entry->buf_count;
1100 dma->seg_count += entry->seg_count;
1101 dma->page_count += byte_count >> PAGE_SHIFT;
1102 dma->byte_count += byte_count;
1104 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1105 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1107 mutex_unlock(&dev->struct_mutex);
1109 request->count = entry->buf_count;
1110 request->size = size;
1112 dma->flags = _DRM_DMA_USE_SG;
1114 atomic_dec(&dev->buf_alloc);
1118 int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1120 drm_device_dma_t *dma = dev->dma;
1121 drm_buf_entry_t *entry;
1123 unsigned long offset;
1124 unsigned long agp_offset;
1133 drm_buf_t **temp_buflist;
1135 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1141 if (!capable(CAP_SYS_ADMIN))
1144 count = request->count;
1145 order = drm_order(request->size);
1148 alignment = (request->flags & _DRM_PAGE_ALIGN)
1149 ? PAGE_ALIGN(size) : size;
1150 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1151 total = PAGE_SIZE << page_order;
1154 agp_offset = request->agp_start;
1156 DRM_DEBUG("count: %d\n", count);
1157 DRM_DEBUG("order: %d\n", order);
1158 DRM_DEBUG("size: %d\n", size);
1159 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1160 DRM_DEBUG("alignment: %d\n", alignment);
1161 DRM_DEBUG("page_order: %d\n", page_order);
1162 DRM_DEBUG("total: %d\n", total);
1164 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1166 if (dev->queue_count)
1167 return -EBUSY; /* Not while in use */
1169 spin_lock(&dev->count_lock);
1171 spin_unlock(&dev->count_lock);
1174 atomic_inc(&dev->buf_alloc);
1175 spin_unlock(&dev->count_lock);
1177 mutex_lock(&dev->struct_mutex);
1178 entry = &dma->bufs[order];
1179 if (entry->buf_count) {
1180 mutex_unlock(&dev->struct_mutex);
1181 atomic_dec(&dev->buf_alloc);
1182 return -ENOMEM; /* May only call once for each order */
1185 if (count < 0 || count > 4096) {
1186 mutex_unlock(&dev->struct_mutex);
1187 atomic_dec(&dev->buf_alloc);
1191 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1193 if (!entry->buflist) {
1194 mutex_unlock(&dev->struct_mutex);
1195 atomic_dec(&dev->buf_alloc);
1198 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1200 entry->buf_size = size;
1201 entry->page_order = page_order;
1205 while (entry->buf_count < count) {
1206 buf = &entry->buflist[entry->buf_count];
1207 buf->idx = dma->buf_count + entry->buf_count;
1208 buf->total = alignment;
1212 buf->offset = (dma->byte_count + offset);
1213 buf->bus_address = agp_offset + offset;
1214 buf->address = (void *)(agp_offset + offset);
1218 init_waitqueue_head(&buf->dma_wait);
1221 buf->dev_priv_size = dev->driver->dev_priv_size;
1222 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1223 if (!buf->dev_private) {
1224 /* Set count correctly so we free the proper amount. */
1225 entry->buf_count = count;
1226 drm_cleanup_buf_error(dev, entry);
1227 mutex_unlock(&dev->struct_mutex);
1228 atomic_dec(&dev->buf_alloc);
1231 memset(buf->dev_private, 0, buf->dev_priv_size);
1233 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1235 offset += alignment;
1237 byte_count += PAGE_SIZE << page_order;
1240 DRM_DEBUG("byte_count: %d\n", byte_count);
1242 temp_buflist = drm_realloc(dma->buflist,
1243 dma->buf_count * sizeof(*dma->buflist),
1244 (dma->buf_count + entry->buf_count)
1245 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1246 if (!temp_buflist) {
1247 /* Free the entry because it isn't valid */
1248 drm_cleanup_buf_error(dev, entry);
1249 mutex_unlock(&dev->struct_mutex);
1250 atomic_dec(&dev->buf_alloc);
1253 dma->buflist = temp_buflist;
1255 for (i = 0; i < entry->buf_count; i++) {
1256 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1259 dma->buf_count += entry->buf_count;
1260 dma->seg_count += entry->seg_count;
1261 dma->page_count += byte_count >> PAGE_SHIFT;
1262 dma->byte_count += byte_count;
1264 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1265 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1267 mutex_unlock(&dev->struct_mutex);
1269 request->count = entry->buf_count;
1270 request->size = size;
1272 dma->flags = _DRM_DMA_USE_FB;
1274 atomic_dec(&dev->buf_alloc);
1277 EXPORT_SYMBOL(drm_addbufs_fb);
1281 * Add buffers for DMA transfers (ioctl).
1283 * \param inode device inode.
1284 * \param filp file pointer.
1285 * \param cmd command.
1286 * \param arg pointer to a drm_buf_desc_t request.
1287 * \return zero on success or a negative number on failure.
1289 * According with the memory type specified in drm_buf_desc::flags and the
1290 * build options, it dispatches the call either to addbufs_agp(),
1291 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1292 * PCI memory respectively.
1294 int drm_addbufs(struct inode *inode, struct file *filp,
1295 unsigned int cmd, unsigned long arg)
1297 drm_buf_desc_t request;
1298 drm_file_t *priv = filp->private_data;
1299 drm_device_t *dev = priv->head->dev;
1302 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1305 if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1310 if (request.flags & _DRM_AGP_BUFFER)
1311 ret = drm_addbufs_agp(dev, &request);
1314 if (request.flags & _DRM_SG_BUFFER)
1315 ret = drm_addbufs_sg(dev, &request);
1316 else if (request.flags & _DRM_FB_BUFFER)
1317 ret = drm_addbufs_fb(dev, &request);
1319 ret = drm_addbufs_pci(dev, &request);
1322 if (copy_to_user((void __user *) arg, &request,
1331 * Get information about the buffer mappings.
1333 * This was originally mean for debugging purposes, or by a sophisticated
1334 * client library to determine how best to use the available buffers (e.g.,
1335 * large buffers can be used for image transfer).
1337 * \param inode device inode.
1338 * \param filp file pointer.
1339 * \param cmd command.
1340 * \param arg pointer to a drm_buf_info structure.
1341 * \return zero on success or a negative number on failure.
1343 * Increments drm_device::buf_use while holding the drm_device::count_lock
1344 * lock, preventing of allocating more buffers after this call. Information
1345 * about each requested buffer is then copied into user space.
1347 int drm_infobufs(struct inode *inode, struct file *filp,
1348 unsigned int cmd, unsigned long arg)
1350 drm_file_t *priv = filp->private_data;
1351 drm_device_t *dev = priv->head->dev;
1352 drm_device_dma_t *dma = dev->dma;
1353 drm_buf_info_t request;
1354 drm_buf_info_t __user *argp = (void __user *)arg;
1358 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1364 spin_lock(&dev->count_lock);
1365 if (atomic_read(&dev->buf_alloc)) {
1366 spin_unlock(&dev->count_lock);
1369 ++dev->buf_use; /* Can't allocate more after this call */
1370 spin_unlock(&dev->count_lock);
1372 if (copy_from_user(&request, argp, sizeof(request)))
1375 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1376 if (dma->bufs[i].buf_count)
1380 DRM_DEBUG("count = %d\n", count);
1382 if (request.count >= count) {
1383 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1384 if (dma->bufs[i].buf_count) {
1385 drm_buf_desc_t __user *to =
1386 &request.list[count];
1387 drm_buf_entry_t *from = &dma->bufs[i];
1388 drm_freelist_t *list = &dma->bufs[i].freelist;
1389 if (copy_to_user(&to->count,
1391 sizeof(from->buf_count)) ||
1392 copy_to_user(&to->size,
1394 sizeof(from->buf_size)) ||
1395 copy_to_user(&to->low_mark,
1397 sizeof(list->low_mark)) ||
1398 copy_to_user(&to->high_mark,
1400 sizeof(list->high_mark)))
1403 DRM_DEBUG("%d %d %d %d %d\n",
1405 dma->bufs[i].buf_count,
1406 dma->bufs[i].buf_size,
1407 dma->bufs[i].freelist.low_mark,
1408 dma->bufs[i].freelist.high_mark);
1413 request.count = count;
1415 if (copy_to_user(argp, &request, sizeof(request)))
1422 * Specifies a low and high water mark for buffer allocation
1424 * \param inode device inode.
1425 * \param filp file pointer.
1426 * \param cmd command.
1427 * \param arg a pointer to a drm_buf_desc structure.
1428 * \return zero on success or a negative number on failure.
1430 * Verifies that the size order is bounded between the admissible orders and
1431 * updates the respective drm_device_dma::bufs entry low and high water mark.
1433 * \note This ioctl is deprecated and mostly never used.
1435 int drm_markbufs(struct inode *inode, struct file *filp,
1436 unsigned int cmd, unsigned long arg)
1438 drm_file_t *priv = filp->private_data;
1439 drm_device_t *dev = priv->head->dev;
1440 drm_device_dma_t *dma = dev->dma;
1441 drm_buf_desc_t request;
1443 drm_buf_entry_t *entry;
1445 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1451 if (copy_from_user(&request,
1452 (drm_buf_desc_t __user *) arg, sizeof(request)))
1455 DRM_DEBUG("%d, %d, %d\n",
1456 request.size, request.low_mark, request.high_mark);
1457 order = drm_order(request.size);
1458 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1460 entry = &dma->bufs[order];
1462 if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1464 if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1467 entry->freelist.low_mark = request.low_mark;
1468 entry->freelist.high_mark = request.high_mark;
1474 * Unreserve the buffers in list, previously reserved using drmDMA.
1476 * \param inode device inode.
1477 * \param filp file pointer.
1478 * \param cmd command.
1479 * \param arg pointer to a drm_buf_free structure.
1480 * \return zero on success or a negative number on failure.
1482 * Calls free_buffer() for each used buffer.
1483 * This function is primarily used for debugging.
1485 int drm_freebufs(struct inode *inode, struct file *filp,
1486 unsigned int cmd, unsigned long arg)
1488 drm_file_t *priv = filp->private_data;
1489 drm_device_t *dev = priv->head->dev;
1490 drm_device_dma_t *dma = dev->dma;
1491 drm_buf_free_t request;
1496 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1502 if (copy_from_user(&request,
1503 (drm_buf_free_t __user *) arg, sizeof(request)))
1506 DRM_DEBUG("%d\n", request.count);
1507 for (i = 0; i < request.count; i++) {
1508 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1510 if (idx < 0 || idx >= dma->buf_count) {
1511 DRM_ERROR("Index %d (of %d max)\n",
1512 idx, dma->buf_count - 1);
1515 buf = dma->buflist[idx];
1516 if (buf->filp != filp) {
1517 DRM_ERROR("Process %d freeing buffer not owned\n",
1521 drm_free_buffer(dev, buf);
1528 * Maps all of the DMA buffers into client-virtual space (ioctl).
1530 * \param inode device inode.
1531 * \param filp file pointer.
1532 * \param cmd command.
1533 * \param arg pointer to a drm_buf_map structure.
1534 * \return zero on success or a negative number on failure.
1536 * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1537 * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1538 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1541 int drm_mapbufs(struct inode *inode, struct file *filp,
1542 unsigned int cmd, unsigned long arg)
1544 drm_file_t *priv = filp->private_data;
1545 drm_device_t *dev = priv->head->dev;
1546 drm_device_dma_t *dma = dev->dma;
1547 drm_buf_map_t __user *argp = (void __user *)arg;
1550 unsigned long virtual;
1551 unsigned long address;
1552 drm_buf_map_t request;
1555 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1561 spin_lock(&dev->count_lock);
1562 if (atomic_read(&dev->buf_alloc)) {
1563 spin_unlock(&dev->count_lock);
1566 dev->buf_use++; /* Can't allocate more after this call */
1567 spin_unlock(&dev->count_lock);
1569 if (copy_from_user(&request, argp, sizeof(request)))
1572 if (request.count >= dma->buf_count) {
1573 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1574 || (drm_core_check_feature(dev, DRIVER_SG)
1575 && (dma->flags & _DRM_DMA_USE_SG))
1576 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1577 && (dma->flags & _DRM_DMA_USE_FB))) {
1578 drm_map_t *map = dev->agp_buffer_map;
1579 unsigned long token = dev->agp_buffer_token;
1585 down_write(¤t->mm->mmap_sem);
1586 virtual = do_mmap(filp, 0, map->size,
1587 PROT_READ | PROT_WRITE,
1590 up_write(¤t->mm->mmap_sem);
1592 down_write(¤t->mm->mmap_sem);
1593 virtual = do_mmap(filp, 0, dma->byte_count,
1594 PROT_READ | PROT_WRITE,
1596 up_write(¤t->mm->mmap_sem);
1598 if (virtual > -1024UL) {
1600 retcode = (signed long)virtual;
1603 request.virtual = (void __user *)virtual;
1605 for (i = 0; i < dma->buf_count; i++) {
1606 if (copy_to_user(&request.list[i].idx,
1607 &dma->buflist[i]->idx,
1608 sizeof(request.list[0].idx))) {
1612 if (copy_to_user(&request.list[i].total,
1613 &dma->buflist[i]->total,
1614 sizeof(request.list[0].total))) {
1618 if (copy_to_user(&request.list[i].used,
1619 &zero, sizeof(zero))) {
1623 address = virtual + dma->buflist[i]->offset; /* *** */
1624 if (copy_to_user(&request.list[i].address,
1625 &address, sizeof(address))) {
1632 request.count = dma->buf_count;
1633 DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1635 if (copy_to_user(argp, &request, sizeof(request)))
1642 * Compute size order. Returns the exponent of the smaller power of two which
1643 * is greater or equal to given number.
1648 * \todo Can be made faster.
1650 int drm_order(unsigned long size)
1655 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1657 if (size & (size - 1))
1662 EXPORT_SYMBOL(drm_order);