1 /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
3 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 * Partially based on code obtained from Digeo Inc.
31 * Unmaps the DMA mappings.
32 * FIXME: Is this a NoOp on x86? Also
33 * FIXME: What happens if this one is called and a pending blit has previously done
34 * the same DMA mappings?
40 #include "via_dmablit.h"
42 #include <linux/pagemap.h>
44 #define VIA_PGDN(x) (((unsigned long)(x)) & PAGE_MASK)
45 #define VIA_PGOFF(x) (((unsigned long)(x)) & ~PAGE_MASK)
46 #define VIA_PFN(x) ((unsigned long)(x) >> PAGE_SHIFT)
48 typedef struct _drm_via_descriptor {
53 } drm_via_descriptor_t;
57 * Unmap a DMA mapping.
63 via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
65 int num_desc = vsg->num_desc;
66 unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
67 unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
68 drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
70 dma_addr_t next = vsg->chain_start;
73 if (descriptor_this_page-- == 0) {
74 cur_descriptor_page--;
75 descriptor_this_page = vsg->descriptors_per_page - 1;
76 desc_ptr = vsg->desc_pages[cur_descriptor_page] +
79 dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
80 dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
81 next = (dma_addr_t) desc_ptr->next;
87 * If mode = 0, count how many descriptors are needed.
88 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
89 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
90 * 'next' field without syncing calls when the descriptor is already mapped.
94 via_map_blit_for_device(struct pci_dev *pdev,
95 const drm_via_dmablit_t *xfer,
96 drm_via_sg_info_t *vsg,
99 unsigned cur_descriptor_page = 0;
100 unsigned num_descriptors_this_page = 0;
101 unsigned char *mem_addr = xfer->mem_addr;
102 unsigned char *cur_mem;
103 unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
104 uint32_t fb_addr = xfer->fb_addr;
106 unsigned long line_len;
107 unsigned remaining_len;
110 dma_addr_t next = 0 | VIA_DMA_DPR_EC;
111 drm_via_descriptor_t *desc_ptr = NULL;
114 desc_ptr = vsg->desc_pages[cur_descriptor_page];
116 for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
118 line_len = xfer->line_length;
122 while (line_len > 0) {
124 remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
125 line_len -= remaining_len;
128 desc_ptr->mem_addr = dma_map_page(&pdev->dev,
129 vsg->pages[VIA_PFN(cur_mem) -
130 VIA_PFN(first_addr)],
131 VIA_PGOFF(cur_mem), remaining_len,
133 desc_ptr->dev_addr = cur_fb;
135 desc_ptr->size = remaining_len;
136 desc_ptr->next = (uint32_t) next;
137 next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
140 if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
141 num_descriptors_this_page = 0;
142 desc_ptr = vsg->desc_pages[++cur_descriptor_page];
147 cur_mem += remaining_len;
148 cur_fb += remaining_len;
151 mem_addr += xfer->mem_stride;
152 fb_addr += xfer->fb_stride;
156 vsg->chain_start = next;
157 vsg->state = dr_via_device_mapped;
159 vsg->num_desc = num_desc;
163 * Function that frees up all resources for a blit. It is usable even if the
164 * blit info has only been partially built as long as the status enum is consistent
165 * with the actual status of the used resources.
170 via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
176 case dr_via_device_mapped:
177 via_unmap_blit_from_device(pdev, vsg);
178 case dr_via_desc_pages_alloc:
179 for (i=0; i<vsg->num_desc_pages; ++i) {
180 if (vsg->desc_pages[i] != NULL)
181 free_page((unsigned long)vsg->desc_pages[i]);
183 kfree(vsg->desc_pages);
184 case dr_via_pages_locked:
185 for (i=0; i<vsg->num_pages; ++i) {
186 if ( NULL != (page = vsg->pages[i])) {
187 if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
189 page_cache_release(page);
192 case dr_via_pages_alloc:
195 vsg->state = dr_via_sg_init;
197 if (vsg->bounce_buffer) {
198 vfree(vsg->bounce_buffer);
199 vsg->bounce_buffer = NULL;
201 vsg->free_on_sequence = 0;
205 * Fire a blit engine.
209 via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
211 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
213 VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
214 VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
215 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
217 VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
218 VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
219 VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
220 DRM_WRITEMEMORYBARRIER();
221 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
222 VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04);
226 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
227 * occur here if the calling user does not have access to the submitted address.
231 via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
234 unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
235 vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
238 if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
239 return DRM_ERR(ENOMEM);
240 memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
241 down_read(¤t->mm->mmap_sem);
242 ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
243 vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
244 0, vsg->pages, NULL);
246 up_read(¤t->mm->mmap_sem);
247 if (ret != vsg->num_pages) {
250 vsg->state = dr_via_pages_locked;
251 return DRM_ERR(EINVAL);
253 vsg->state = dr_via_pages_locked;
254 DRM_DEBUG("DMA pages locked\n");
259 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
260 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
261 * quite large for some blits, and pages don't need to be contingous.
265 via_alloc_desc_pages(drm_via_sg_info_t *vsg)
269 vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
270 vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
271 vsg->descriptors_per_page;
273 if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
274 return DRM_ERR(ENOMEM);
276 memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
277 vsg->state = dr_via_desc_pages_alloc;
278 for (i=0; i<vsg->num_desc_pages; ++i) {
279 if (NULL == (vsg->desc_pages[i] =
280 (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
281 return DRM_ERR(ENOMEM);
283 DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
289 via_abort_dmablit(drm_device_t *dev, int engine)
291 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
293 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
297 via_dmablit_engine_off(drm_device_t *dev, int engine)
299 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
301 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
307 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
308 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
309 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
310 * the workqueue task takes care of processing associated with the old blit.
314 via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
316 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
317 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
320 unsigned long irqsave=0;
323 DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
324 engine, from_irq, (unsigned long) blitq);
327 spin_lock(&blitq->blit_lock);
329 spin_lock_irqsave(&blitq->blit_lock, irqsave);
332 done_transfer = blitq->is_active &&
333 (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
334 done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
339 blitq->blits[cur]->aborted = blitq->aborting;
340 blitq->done_blit_handle++;
341 DRM_WAKEUP(blitq->blit_queue + cur);
344 if (cur >= VIA_NUM_BLIT_SLOTS)
349 * Clear transfer done flag.
352 VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD);
354 blitq->is_active = 0;
356 schedule_work(&blitq->wq);
358 } else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
361 * Abort transfer after one second.
364 via_abort_dmablit(dev, engine);
366 blitq->end = jiffies + DRM_HZ;
369 if (!blitq->is_active) {
370 if (blitq->num_outstanding) {
371 via_fire_dmablit(dev, blitq->blits[cur], engine);
372 blitq->is_active = 1;
374 blitq->num_outstanding--;
375 blitq->end = jiffies + DRM_HZ;
376 if (!timer_pending(&blitq->poll_timer)) {
377 blitq->poll_timer.expires = jiffies+1;
378 add_timer(&blitq->poll_timer);
381 if (timer_pending(&blitq->poll_timer)) {
382 del_timer(&blitq->poll_timer);
384 via_dmablit_engine_off(dev, engine);
389 spin_unlock(&blitq->blit_lock);
391 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
398 * Check whether this blit is still active, performing necessary locking.
402 via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
404 unsigned long irqsave;
408 spin_lock_irqsave(&blitq->blit_lock, irqsave);
411 * Allow for handle wraparounds.
414 active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
415 ((blitq->cur_blit_handle - handle) <= (1 << 23));
417 if (queue && active) {
418 slot = handle - blitq->done_blit_handle + blitq->cur -1;
419 if (slot >= VIA_NUM_BLIT_SLOTS) {
420 slot -= VIA_NUM_BLIT_SLOTS;
422 *queue = blitq->blit_queue + slot;
425 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
431 * Sync. Wait for at least three seconds for the blit to be performed.
435 via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
438 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
439 drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
440 wait_queue_head_t *queue;
443 if (via_dmablit_active(blitq, engine, handle, &queue)) {
444 DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
445 !via_dmablit_active(blitq, engine, handle, NULL));
447 DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
448 handle, engine, ret);
455 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
456 * a) Broken hardware (typically those that don't have any video capture facility).
457 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
458 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
459 * irqs, it will shorten the latency somewhat.
465 via_dmablit_timer(unsigned long data)
467 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
468 drm_device_t *dev = blitq->dev;
470 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
472 DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
473 (unsigned long) jiffies);
475 via_dmablit_handler(dev, engine, 0);
477 if (!timer_pending(&blitq->poll_timer)) {
478 blitq->poll_timer.expires = jiffies+1;
479 add_timer(&blitq->poll_timer);
482 * Rerun handler to delete timer if engines are off, and
483 * to shorten abort latency. This is a little nasty.
486 via_dmablit_handler(dev, engine, 0);
494 * Workqueue task that frees data and mappings associated with a blit.
495 * Also wakes up waiting processes. Each of these tasks handles one
496 * blit engine only and may not be called on each interrupt.
501 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
502 via_dmablit_workqueue(void *data)
504 via_dmablit_workqueue(struct work_struct *work)
507 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
508 drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
510 drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
512 drm_device_t *dev = blitq->dev;
513 unsigned long irqsave;
514 drm_via_sg_info_t *cur_sg;
518 DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
519 (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
521 spin_lock_irqsave(&blitq->blit_lock, irqsave);
523 while(blitq->serviced != blitq->cur) {
525 cur_released = blitq->serviced++;
527 DRM_DEBUG("Releasing blit slot %d\n", cur_released);
529 if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
532 cur_sg = blitq->blits[cur_released];
535 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
537 DRM_WAKEUP(&blitq->busy_queue);
539 via_free_sg_info(dev->pdev, cur_sg);
542 spin_lock_irqsave(&blitq->blit_lock, irqsave);
545 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
550 * Init all blit engines. Currently we use two, but some hardware have 4.
555 via_init_dmablit(drm_device_t *dev)
558 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
559 drm_via_blitq_t *blitq;
561 pci_set_master(dev->pdev);
563 for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
564 blitq = dev_priv->blit_queues + i;
566 blitq->cur_blit_handle = 0;
567 blitq->done_blit_handle = 0;
571 blitq->num_free = VIA_NUM_BLIT_SLOTS;
572 blitq->num_outstanding = 0;
573 blitq->is_active = 0;
575 spin_lock_init(&blitq->blit_lock);
576 for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
577 DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
579 DRM_INIT_WAITQUEUE(&blitq->busy_queue);
580 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
581 INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
583 INIT_WORK(&blitq->wq, via_dmablit_workqueue);
585 init_timer(&blitq->poll_timer);
586 blitq->poll_timer.function = &via_dmablit_timer;
587 blitq->poll_timer.data = (unsigned long) blitq;
592 * Build all info and do all mappings required for a blit.
597 via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
599 int draw = xfer->to_fb;
602 vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
603 vsg->bounce_buffer = NULL;
605 vsg->state = dr_via_sg_init;
607 if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
608 DRM_ERROR("Zero size bitblt.\n");
609 return DRM_ERR(EINVAL);
613 * Below check is a driver limitation, not a hardware one. We
614 * don't want to lock unused pages, and don't want to incoporate the
615 * extra logic of avoiding them. Make sure there are no.
616 * (Not a big limitation anyway.)
619 if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
620 DRM_ERROR("Too large system memory stride. Stride: %d, "
621 "Length: %d\n", xfer->mem_stride, xfer->line_length);
622 return DRM_ERR(EINVAL);
625 if ((xfer->mem_stride == xfer->line_length) &&
626 (xfer->fb_stride == xfer->line_length)) {
627 xfer->mem_stride *= xfer->num_lines;
628 xfer->line_length = xfer->mem_stride;
629 xfer->fb_stride = xfer->mem_stride;
634 * Don't lock an arbitrary large number of pages, since that causes a
638 if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
639 DRM_ERROR("Too large PCI DMA bitblt.\n");
640 return DRM_ERR(EINVAL);
644 * we allow a negative fb stride to allow flipping of images in
648 if (xfer->mem_stride < xfer->line_length ||
649 abs(xfer->fb_stride) < xfer->line_length) {
650 DRM_ERROR("Invalid frame-buffer / memory stride.\n");
651 return DRM_ERR(EINVAL);
655 * A hardware bug seems to be worked around if system memory addresses start on
656 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
657 * about this. Meanwhile, impose the following restrictions:
661 if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
662 ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
663 DRM_ERROR("Invalid DRM bitblt alignment.\n");
664 return DRM_ERR(EINVAL);
667 if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
668 ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
669 DRM_ERROR("Invalid DRM bitblt alignment.\n");
670 return DRM_ERR(EINVAL);
674 if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
675 DRM_ERROR("Could not lock DMA pages.\n");
676 via_free_sg_info(dev->pdev, vsg);
680 via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
681 if (0 != (ret = via_alloc_desc_pages(vsg))) {
682 DRM_ERROR("Could not allocate DMA descriptor pages.\n");
683 via_free_sg_info(dev->pdev, vsg);
686 via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
693 * Reserve one free slot in the blit queue. Will wait for one second for one
694 * to become available. Otherwise -EBUSY is returned.
698 via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
701 unsigned long irqsave;
703 DRM_DEBUG("Num free is %d\n", blitq->num_free);
704 spin_lock_irqsave(&blitq->blit_lock, irqsave);
705 while(blitq->num_free == 0) {
706 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
708 DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
710 return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
713 spin_lock_irqsave(&blitq->blit_lock, irqsave);
717 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
723 * Hand back a free slot if we changed our mind.
727 via_dmablit_release_slot(drm_via_blitq_t *blitq)
729 unsigned long irqsave;
731 spin_lock_irqsave(&blitq->blit_lock, irqsave);
733 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
734 DRM_WAKEUP( &blitq->busy_queue );
738 * Grab a free slot. Build blit info and queue a blit.
743 via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
745 drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
746 drm_via_sg_info_t *vsg;
747 drm_via_blitq_t *blitq;
750 unsigned long irqsave;
752 if (dev_priv == NULL) {
753 DRM_ERROR("Called without initialization.\n");
754 return DRM_ERR(EINVAL);
757 engine = (xfer->to_fb) ? 0 : 1;
758 blitq = dev_priv->blit_queues + engine;
759 if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
762 if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
763 via_dmablit_release_slot(blitq);
764 return DRM_ERR(ENOMEM);
766 if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
767 via_dmablit_release_slot(blitq);
771 spin_lock_irqsave(&blitq->blit_lock, irqsave);
773 blitq->blits[blitq->head++] = vsg;
774 if (blitq->head >= VIA_NUM_BLIT_SLOTS)
776 blitq->num_outstanding++;
777 xfer->sync.sync_handle = ++blitq->cur_blit_handle;
779 spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
780 xfer->sync.engine = engine;
782 via_dmablit_handler(dev, engine, 0);
788 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
789 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
790 * case it returns with -EAGAIN for the signal to be delivered.
791 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
795 via_dma_blit_sync( DRM_IOCTL_ARGS )
797 drm_via_blitsync_t sync;
801 DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
803 if (sync.engine >= VIA_NUM_BLIT_ENGINES)
804 return DRM_ERR(EINVAL);
806 err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
808 if (DRM_ERR(EINTR) == err)
809 err = DRM_ERR(EAGAIN);
816 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
817 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
818 * be reissued. See the above IOCTL code.
822 via_dma_blit( DRM_IOCTL_ARGS )
824 drm_via_dmablit_t xfer;
828 DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
830 err = via_dmablit(dev, &xfer);
832 DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));