2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
30 #include "drm_compat.h"
33 #include <linux/swap.h>
36 i915_gem_object_set_domain(struct drm_gem_object *obj,
37 uint32_t read_domains,
38 uint32_t write_domain);
40 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
43 uint32_t read_domains,
44 uint32_t write_domain);
46 i915_gem_set_domain(struct drm_gem_object *obj,
47 struct drm_file *file_priv,
48 uint32_t read_domains,
49 uint32_t write_domain);
50 static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
51 static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
55 i915_gem_init_ioctl(struct drm_device *dev, void *data,
56 struct drm_file *file_priv)
58 drm_i915_private_t *dev_priv = dev->dev_private;
59 struct drm_i915_gem_init *args = data;
61 mutex_lock(&dev->struct_mutex);
63 if (args->gtt_start >= args->gtt_end ||
64 (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
65 (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
66 mutex_unlock(&dev->struct_mutex);
70 drm_memrange_init(&dev_priv->mm.gtt_space, args->gtt_start,
71 args->gtt_end - args->gtt_start);
73 dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
75 mutex_unlock(&dev->struct_mutex);
82 * Creates a new mm object and returns a handle to it.
85 i915_gem_create_ioctl(struct drm_device *dev, void *data,
86 struct drm_file *file_priv)
88 struct drm_i915_gem_create *args = data;
89 struct drm_gem_object *obj;
92 args->size = roundup(args->size, PAGE_SIZE);
94 /* Allocate the new object */
95 obj = drm_gem_object_alloc(dev, args->size);
99 ret = drm_gem_handle_create(file_priv, obj, &handle);
100 mutex_lock(&dev->struct_mutex);
101 drm_gem_object_handle_unreference(obj);
102 mutex_unlock(&dev->struct_mutex);
107 args->handle = handle;
113 * Reads data from the object referenced by handle.
115 * On error, the contents of *data are undefined.
118 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
119 struct drm_file *file_priv)
121 struct drm_i915_gem_pread *args = data;
122 struct drm_gem_object *obj;
123 struct drm_i915_gem_object *obj_priv;
128 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
131 obj_priv = obj->driver_private;
133 /* Bounds check source.
135 * XXX: This could use review for overflow issues...
137 if (args->offset > obj->size || args->size > obj->size ||
138 args->offset + args->size > obj->size) {
139 drm_gem_object_unreference(obj);
143 mutex_lock(&dev->struct_mutex);
145 ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
146 I915_GEM_DOMAIN_CPU, 0);
148 drm_gem_object_unreference(obj);
149 mutex_unlock(&dev->struct_mutex);
152 offset = args->offset;
154 read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
155 args->size, &offset);
156 if (read != args->size) {
157 drm_gem_object_unreference(obj);
158 mutex_unlock(&dev->struct_mutex);
165 drm_gem_object_unreference(obj);
166 mutex_unlock(&dev->struct_mutex);
171 #include "drm_compat.h"
174 i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
175 struct drm_i915_gem_pwrite *args,
176 struct drm_file *file_priv)
178 struct drm_i915_gem_object *obj_priv = obj->driver_private;
181 char __user *user_data;
186 unsigned long unwritten;
188 user_data = (char __user *) (uintptr_t) args->data_ptr;
190 if (!access_ok(VERIFY_READ, user_data, remain))
194 mutex_lock(&dev->struct_mutex);
195 ret = i915_gem_object_pin(obj, 0);
197 mutex_unlock(&dev->struct_mutex);
200 ret = i915_gem_set_domain(obj, file_priv,
201 I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
205 obj_priv = obj->driver_private;
206 offset = obj_priv->gtt_offset + args->offset;
210 /* Operation in this page
213 * o = offset within page
216 i = offset >> PAGE_SHIFT;
217 o = offset & (PAGE_SIZE-1);
219 if ((o + l) > PAGE_SIZE)
222 pfn = (dev->agp->base >> PAGE_SHIFT) + i;
224 #ifdef DRM_KMAP_ATOMIC_PROT_PFN
225 /* kmap_atomic can't map IO pages on non-HIGHMEM kernels
227 vaddr = kmap_atomic_prot_pfn(pfn, KM_USER0,
228 __pgprot(__PAGE_KERNEL));
230 DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
231 i, o, l, pfn, vaddr);
233 unwritten = __copy_from_user_inatomic_nocache(vaddr + o,
235 kunmap_atomic(vaddr, KM_USER0);
240 vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
242 DRM_INFO("pwrite slow i %d o %d l %d "
243 "pfn %ld vaddr %p\n",
244 i, o, l, pfn, vaddr);
250 unwritten = __copy_from_user(vaddr + o, user_data, l);
252 DRM_INFO("unwritten %ld\n", unwritten);
265 #if WATCH_PWRITE && 1
266 i915_gem_clflush_object(obj);
267 i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
268 i915_gem_clflush_object(obj);
272 i915_gem_object_unpin(obj);
273 mutex_unlock(&dev->struct_mutex);
279 i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
280 struct drm_i915_gem_pwrite *args,
281 struct drm_file *file_priv)
287 mutex_lock(&dev->struct_mutex);
289 ret = i915_gem_set_domain(obj, file_priv,
290 I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
292 mutex_unlock(&dev->struct_mutex);
296 offset = args->offset;
298 written = vfs_write(obj->filp,
299 (char __user *)(uintptr_t) args->data_ptr,
300 args->size, &offset);
301 if (written != args->size) {
302 mutex_unlock(&dev->struct_mutex);
309 mutex_unlock(&dev->struct_mutex);
315 * Writes data to the object referenced by handle.
317 * On error, the contents of the buffer that were to be modified are undefined.
320 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
321 struct drm_file *file_priv)
323 struct drm_i915_gem_pwrite *args = data;
324 struct drm_gem_object *obj;
325 struct drm_i915_gem_object *obj_priv;
328 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
331 obj_priv = obj->driver_private;
333 /* Bounds check destination.
335 * XXX: This could use review for overflow issues...
337 if (args->offset > obj->size || args->size > obj->size ||
338 args->offset + args->size > obj->size) {
339 drm_gem_object_unreference(obj);
343 /* We can only do the GTT pwrite on untiled buffers, as otherwise
344 * it would end up going through the fenced access, and we'll get
345 * different detiling behavior between reading and writing.
346 * pread/pwrite currently are reading and writing from the CPU
347 * perspective, requiring manual detiling by the client.
349 if (obj_priv->tiling_mode == I915_TILING_NONE &&
351 ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
353 ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
357 DRM_INFO("pwrite failed %d\n", ret);
360 drm_gem_object_unreference(obj);
366 * Called when user space prepares to use an object
369 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
370 struct drm_file *file_priv)
372 struct drm_i915_gem_set_domain *args = data;
373 struct drm_gem_object *obj;
376 if (!(dev->driver->driver_features & DRIVER_GEM))
379 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
383 mutex_lock(&dev->struct_mutex);
384 ret = i915_gem_set_domain(obj, file_priv,
385 args->read_domains, args->write_domain);
386 drm_gem_object_unreference(obj);
387 mutex_unlock(&dev->struct_mutex);
392 * Called when user space has done writes to this buffer
395 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
396 struct drm_file *file_priv)
398 struct drm_i915_gem_sw_finish *args = data;
399 struct drm_gem_object *obj;
400 struct drm_i915_gem_object *obj_priv;
403 if (!(dev->driver->driver_features & DRIVER_GEM))
406 mutex_lock(&dev->struct_mutex);
407 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
409 mutex_unlock(&dev->struct_mutex);
414 DRM_INFO("%s: sw_finish %d (%p)\n",
415 __func__, args->handle, obj);
417 obj_priv = obj->driver_private;
419 /* Pinned buffers may be scanout, so flush the cache */
420 if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
421 i915_gem_clflush_object(obj);
422 drm_agp_chipset_flush(dev);
424 drm_gem_object_unreference(obj);
425 mutex_unlock(&dev->struct_mutex);
430 * Maps the contents of an object, returning the address it is mapped
433 * While the mapping holds a reference on the contents of the object, it doesn't
434 * imply a ref on the object itself.
437 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
438 struct drm_file *file_priv)
440 struct drm_i915_gem_mmap *args = data;
441 struct drm_gem_object *obj;
445 if (!(dev->driver->driver_features & DRIVER_GEM))
448 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
452 offset = args->offset;
454 down_write(¤t->mm->mmap_sem);
455 addr = do_mmap(obj->filp, 0, args->size,
456 PROT_READ | PROT_WRITE, MAP_SHARED,
458 up_write(¤t->mm->mmap_sem);
459 mutex_lock(&dev->struct_mutex);
460 drm_gem_object_unreference(obj);
461 mutex_unlock(&dev->struct_mutex);
462 if (IS_ERR((void *)addr))
465 args->addr_ptr = (uint64_t) addr;
471 i915_gem_object_free_page_list(struct drm_gem_object *obj)
473 struct drm_i915_gem_object *obj_priv = obj->driver_private;
474 int page_count = obj->size / PAGE_SIZE;
477 if (obj_priv->page_list == NULL)
481 for (i = 0; i < page_count; i++)
482 if (obj_priv->page_list[i] != NULL) {
484 set_page_dirty(obj_priv->page_list[i]);
485 mark_page_accessed(obj_priv->page_list[i]);
486 page_cache_release(obj_priv->page_list[i]);
490 drm_free(obj_priv->page_list,
491 page_count * sizeof(struct page *),
493 obj_priv->page_list = NULL;
497 i915_gem_object_move_to_active(struct drm_gem_object *obj)
499 struct drm_device *dev = obj->dev;
500 drm_i915_private_t *dev_priv = dev->dev_private;
501 struct drm_i915_gem_object *obj_priv = obj->driver_private;
503 /* Add a reference if we're newly entering the active list. */
504 if (!obj_priv->active) {
505 drm_gem_object_reference(obj);
506 obj_priv->active = 1;
508 /* Move from whatever list we were on to the tail of execution. */
509 list_move_tail(&obj_priv->list,
510 &dev_priv->mm.active_list);
515 i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
517 struct drm_device *dev = obj->dev;
518 drm_i915_private_t *dev_priv = dev->dev_private;
519 struct drm_i915_gem_object *obj_priv = obj->driver_private;
521 i915_verify_inactive(dev, __FILE__, __LINE__);
522 if (obj_priv->pin_count != 0)
523 list_del_init(&obj_priv->list);
525 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
527 if (obj_priv->active) {
528 obj_priv->active = 0;
529 drm_gem_object_unreference(obj);
531 i915_verify_inactive(dev, __FILE__, __LINE__);
535 * Creates a new sequence number, emitting a write of it to the status page
536 * plus an interrupt, which will trigger i915_user_interrupt_handler.
538 * Must be called with struct_lock held.
540 * Returned sequence numbers are nonzero on success.
543 i915_add_request(struct drm_device *dev, uint32_t flush_domains)
545 drm_i915_private_t *dev_priv = dev->dev_private;
546 struct drm_i915_gem_request *request;
551 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
555 /* Grab the seqno we're going to make this request be, and bump the
556 * next (skipping 0 so it can be the reserved no-seqno value).
558 seqno = dev_priv->mm.next_gem_seqno;
559 dev_priv->mm.next_gem_seqno++;
560 if (dev_priv->mm.next_gem_seqno == 0)
561 dev_priv->mm.next_gem_seqno++;
564 OUT_RING(CMD_STORE_DWORD_IDX);
565 OUT_RING(I915_GEM_HWS_INDEX << STORE_DWORD_INDEX_SHIFT);
568 OUT_RING(GFX_OP_USER_INTERRUPT);
571 DRM_DEBUG("%d\n", seqno);
573 request->seqno = seqno;
574 request->emitted_jiffies = jiffies;
575 request->flush_domains = flush_domains;
576 was_empty = list_empty(&dev_priv->mm.request_list);
577 list_add_tail(&request->list, &dev_priv->mm.request_list);
580 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
585 * Command execution barrier
587 * Ensures that all commands in the ring are finished
588 * before signalling the CPU
591 i915_retire_commands(struct drm_device *dev)
593 drm_i915_private_t *dev_priv = dev->dev_private;
594 uint32_t cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
595 uint32_t flush_domains = 0;
598 /* The sampler always gets flushed on i965 (sigh) */
600 flush_domains |= I915_GEM_DOMAIN_SAMPLER;
603 OUT_RING(0); /* noop */
605 return flush_domains;
609 * Moves buffers associated only with the given active seqno from the active
610 * to inactive list, potentially freeing them.
613 i915_gem_retire_request(struct drm_device *dev,
614 struct drm_i915_gem_request *request)
616 drm_i915_private_t *dev_priv = dev->dev_private;
618 if (request->flush_domains != 0) {
619 struct drm_i915_gem_object *obj_priv, *next;
621 /* First clear any buffers that were only waiting for a flush
622 * matching the one just retired.
625 list_for_each_entry_safe(obj_priv, next,
626 &dev_priv->mm.flushing_list, list) {
627 struct drm_gem_object *obj = obj_priv->obj;
629 if (obj->write_domain & request->flush_domains) {
630 obj->write_domain = 0;
631 i915_gem_object_move_to_inactive(obj);
637 /* Move any buffers on the active list that are no longer referenced
638 * by the ringbuffer to the flushing/inactive lists as appropriate.
640 while (!list_empty(&dev_priv->mm.active_list)) {
641 struct drm_gem_object *obj;
642 struct drm_i915_gem_object *obj_priv;
644 obj_priv = list_first_entry(&dev_priv->mm.active_list,
645 struct drm_i915_gem_object,
649 /* If the seqno being retired doesn't match the oldest in the
650 * list, then the oldest in the list must still be newer than
653 if (obj_priv->last_rendering_seqno != request->seqno)
656 DRM_INFO("%s: retire %d moves to inactive list %p\n",
657 __func__, request->seqno, obj);
660 if (obj->write_domain != 0) {
661 list_move_tail(&obj_priv->list,
662 &dev_priv->mm.flushing_list);
664 i915_gem_object_move_to_inactive(obj);
670 * Returns true if seq1 is later than seq2.
673 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
675 return (int32_t)(seq1 - seq2) >= 0;
679 i915_get_gem_seqno(struct drm_device *dev)
681 drm_i915_private_t *dev_priv = dev->dev_private;
683 return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
687 * This function clears the request list as sequence numbers are passed.
690 i915_gem_retire_requests(struct drm_device *dev)
692 drm_i915_private_t *dev_priv = dev->dev_private;
695 seqno = i915_get_gem_seqno(dev);
697 while (!list_empty(&dev_priv->mm.request_list)) {
698 struct drm_i915_gem_request *request;
699 uint32_t retiring_seqno;
701 request = list_first_entry(&dev_priv->mm.request_list,
702 struct drm_i915_gem_request,
704 retiring_seqno = request->seqno;
706 if (i915_seqno_passed(seqno, retiring_seqno) ||
707 dev_priv->mm.wedged) {
708 i915_gem_retire_request(dev, request);
710 list_del(&request->list);
711 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
718 i915_gem_retire_work_handler(struct work_struct *work)
720 drm_i915_private_t *dev_priv;
721 struct drm_device *dev;
723 dev_priv = container_of(work, drm_i915_private_t,
724 mm.retire_work.work);
727 mutex_lock(&dev->struct_mutex);
728 i915_gem_retire_requests(dev);
729 if (!list_empty(&dev_priv->mm.request_list))
730 schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
731 mutex_unlock(&dev->struct_mutex);
735 * Waits for a sequence number to be signaled, and cleans up the
736 * request and object lists appropriately for that event.
739 i915_wait_request(struct drm_device *dev, uint32_t seqno)
741 drm_i915_private_t *dev_priv = dev->dev_private;
746 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
747 dev_priv->mm.waiting_gem_seqno = seqno;
748 i915_user_irq_on(dev_priv);
749 ret = wait_event_interruptible(dev_priv->irq_queue,
750 i915_seqno_passed(i915_get_gem_seqno(dev),
752 dev_priv->mm.wedged);
753 i915_user_irq_off(dev_priv);
754 dev_priv->mm.waiting_gem_seqno = 0;
756 if (dev_priv->mm.wedged)
760 DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
761 __func__, ret, seqno, i915_get_gem_seqno(dev));
763 /* Directly dispatch request retiring. While we have the work queue
764 * to handle this, the waiter on a request often wants an associated
765 * buffer to have made it to the inactive list, and we would need
766 * a separate wait queue to handle that.
769 i915_gem_retire_requests(dev);
775 i915_gem_flush(struct drm_device *dev,
776 uint32_t invalidate_domains,
777 uint32_t flush_domains)
779 drm_i915_private_t *dev_priv = dev->dev_private;
784 DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
785 invalidate_domains, flush_domains);
788 if (flush_domains & I915_GEM_DOMAIN_CPU)
789 drm_agp_chipset_flush(dev);
791 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
792 I915_GEM_DOMAIN_GTT)) {
796 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
797 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
798 * also flushed at 2d versus 3d pipeline switches.
802 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
803 * MI_READ_FLUSH is set, and is always flushed on 965.
805 * I915_GEM_DOMAIN_COMMAND may not exist?
807 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
808 * invalidated when MI_EXE_FLUSH is set.
810 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
811 * invalidated with every MI_FLUSH.
815 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
816 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
817 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
818 * are flushed at any MI_FLUSH.
821 cmd = CMD_MI_FLUSH | MI_NO_WRITE_FLUSH;
822 if ((invalidate_domains|flush_domains) &
823 I915_GEM_DOMAIN_RENDER)
824 cmd &= ~MI_NO_WRITE_FLUSH;
825 if (!IS_I965G(dev)) {
827 * On the 965, the sampler cache always gets flushed
828 * and this bit is reserved.
830 if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
831 cmd |= MI_READ_FLUSH;
833 if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
837 DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
841 OUT_RING(0); /* noop */
847 * Ensures that all rendering to the object has completed and the object is
848 * safe to unbind from the GTT or access from the CPU.
851 i915_gem_object_wait_rendering(struct drm_gem_object *obj)
853 struct drm_device *dev = obj->dev;
854 struct drm_i915_gem_object *obj_priv = obj->driver_private;
857 /* If there are writes queued to the buffer, flush and
858 * create a new seqno to wait for.
860 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
861 uint32_t write_domain = obj->write_domain;
863 DRM_INFO("%s: flushing object %p from write domain %08x\n",
864 __func__, obj, write_domain);
866 i915_gem_flush(dev, 0, write_domain);
867 obj->write_domain = 0;
869 i915_gem_object_move_to_active(obj);
870 obj_priv->last_rendering_seqno = i915_add_request(dev,
872 BUG_ON(obj_priv->last_rendering_seqno == 0);
874 DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
877 /* If there is rendering queued on the buffer being evicted, wait for
880 if (obj_priv->active) {
882 DRM_INFO("%s: object %p wait for seqno %08x\n",
883 __func__, obj, obj_priv->last_rendering_seqno);
885 ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
894 * Unbinds an object from the GTT aperture.
897 i915_gem_object_unbind(struct drm_gem_object *obj)
899 struct drm_device *dev = obj->dev;
900 struct drm_i915_gem_object *obj_priv = obj->driver_private;
904 DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
905 DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
907 if (obj_priv->gtt_space == NULL)
910 if (obj_priv->pin_count != 0) {
911 DRM_ERROR("Attempting to unbind pinned buffer\n");
915 /* Wait for any rendering to complete
917 ret = i915_gem_object_wait_rendering(obj);
919 DRM_ERROR("wait_rendering failed: %d\n", ret);
923 /* Move the object to the CPU domain to ensure that
924 * any possible CPU writes while it's not in the GTT
925 * are flushed when we go to remap it. This will
926 * also ensure that all pending GPU writes are finished
929 ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
930 I915_GEM_DOMAIN_CPU);
932 DRM_ERROR("set_domain failed: %d\n", ret);
936 if (obj_priv->agp_mem != NULL) {
937 drm_unbind_agp(obj_priv->agp_mem);
938 drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
939 obj_priv->agp_mem = NULL;
942 BUG_ON(obj_priv->active);
944 i915_gem_object_free_page_list(obj);
946 if (obj_priv->gtt_space) {
947 atomic_dec(&dev->gtt_count);
948 atomic_sub(obj->size, &dev->gtt_memory);
950 drm_memrange_put_block(obj_priv->gtt_space);
951 obj_priv->gtt_space = NULL;
954 /* Remove ourselves from the LRU list if present. */
955 if (!list_empty(&obj_priv->list))
956 list_del_init(&obj_priv->list);
962 i915_gem_evict_something(struct drm_device *dev)
964 drm_i915_private_t *dev_priv = dev->dev_private;
965 struct drm_gem_object *obj;
966 struct drm_i915_gem_object *obj_priv;
970 /* If there's an inactive buffer available now, grab it
973 if (!list_empty(&dev_priv->mm.inactive_list)) {
974 obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
975 struct drm_i915_gem_object,
978 BUG_ON(obj_priv->pin_count != 0);
980 DRM_INFO("%s: evicting %p\n", __func__, obj);
982 BUG_ON(obj_priv->active);
984 /* Wait on the rendering and unbind the buffer. */
985 ret = i915_gem_object_unbind(obj);
989 /* If we didn't get anything, but the ring is still processing
990 * things, wait for one of those things to finish and hopefully
991 * leave us a buffer to evict.
993 if (!list_empty(&dev_priv->mm.request_list)) {
994 struct drm_i915_gem_request *request;
996 request = list_first_entry(&dev_priv->mm.request_list,
997 struct drm_i915_gem_request,
1000 ret = i915_wait_request(dev, request->seqno);
1004 /* if waiting caused an object to become inactive,
1005 * then loop around and wait for it. Otherwise, we
1006 * assume that waiting freed and unbound something,
1007 * so there should now be some space in the GTT
1009 if (!list_empty(&dev_priv->mm.inactive_list))
1014 /* If we didn't have anything on the request list but there
1015 * are buffers awaiting a flush, emit one and try again.
1016 * When we wait on it, those buffers waiting for that flush
1017 * will get moved to inactive.
1019 if (!list_empty(&dev_priv->mm.flushing_list)) {
1020 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1021 struct drm_i915_gem_object,
1023 obj = obj_priv->obj;
1028 i915_add_request(dev, obj->write_domain);
1034 DRM_ERROR("inactive empty %d request empty %d "
1035 "flushing empty %d\n",
1036 list_empty(&dev_priv->mm.inactive_list),
1037 list_empty(&dev_priv->mm.request_list),
1038 list_empty(&dev_priv->mm.flushing_list));
1039 /* If we didn't do any of the above, there's nothing to be done
1040 * and we just can't fit it in.
1048 i915_gem_object_get_page_list(struct drm_gem_object *obj)
1050 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1052 struct address_space *mapping;
1053 struct inode *inode;
1057 if (obj_priv->page_list)
1060 /* Get the list of pages out of our struct file. They'll be pinned
1061 * at this point until we release them.
1063 page_count = obj->size / PAGE_SIZE;
1064 BUG_ON(obj_priv->page_list != NULL);
1065 obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
1067 if (obj_priv->page_list == NULL) {
1068 DRM_ERROR("Faled to allocate page list\n");
1072 inode = obj->filp->f_path.dentry->d_inode;
1073 mapping = inode->i_mapping;
1074 for (i = 0; i < page_count; i++) {
1075 page = find_get_page(mapping, i);
1076 if (page == NULL || !PageUptodate(page)) {
1078 page_cache_release(page);
1081 ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL);
1084 DRM_ERROR("shmem_getpage failed: %d\n", ret);
1085 i915_gem_object_free_page_list(obj);
1090 obj_priv->page_list[i] = page;
1096 * Finds free space in the GTT aperture and binds the object there.
1099 i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1101 struct drm_device *dev = obj->dev;
1102 drm_i915_private_t *dev_priv = dev->dev_private;
1103 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1104 struct drm_memrange_node *free_space;
1105 int page_count, ret;
1108 alignment = PAGE_SIZE;
1109 if (alignment & (PAGE_SIZE - 1)) {
1110 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1115 free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space,
1118 if (free_space != NULL) {
1119 obj_priv->gtt_space =
1120 drm_memrange_get_block(free_space, obj->size,
1122 if (obj_priv->gtt_space != NULL) {
1123 obj_priv->gtt_space->private = obj;
1124 obj_priv->gtt_offset = obj_priv->gtt_space->start;
1127 if (obj_priv->gtt_space == NULL) {
1128 /* If the gtt is empty and we're still having trouble
1129 * fitting our object in, we're out of memory.
1132 DRM_INFO("%s: GTT full, evicting something\n", __func__);
1134 if (list_empty(&dev_priv->mm.inactive_list) &&
1135 list_empty(&dev_priv->mm.flushing_list) &&
1136 list_empty(&dev_priv->mm.active_list)) {
1137 DRM_ERROR("GTT full, but LRU list empty\n");
1141 ret = i915_gem_evict_something(dev);
1143 DRM_ERROR("Failed to evict a buffer %d\n", ret);
1150 DRM_INFO("Binding object of size %d at 0x%08x\n",
1151 obj->size, obj_priv->gtt_offset);
1153 ret = i915_gem_object_get_page_list(obj);
1155 drm_memrange_put_block(obj_priv->gtt_space);
1156 obj_priv->gtt_space = NULL;
1160 page_count = obj->size / PAGE_SIZE;
1161 /* Create an AGP memory structure pointing at our pages, and bind it
1164 obj_priv->agp_mem = drm_agp_bind_pages(dev,
1165 obj_priv->page_list,
1167 obj_priv->gtt_offset);
1168 if (obj_priv->agp_mem == NULL) {
1169 i915_gem_object_free_page_list(obj);
1170 drm_memrange_put_block(obj_priv->gtt_space);
1171 obj_priv->gtt_space = NULL;
1174 atomic_inc(&dev->gtt_count);
1175 atomic_add(obj->size, &dev->gtt_memory);
1177 /* Assert that the object is not currently in any GPU domain. As it
1178 * wasn't in the GTT, there shouldn't be any way it could have been in
1181 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1182 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
1188 i915_gem_clflush_object(struct drm_gem_object *obj)
1190 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1192 /* If we don't have a page list set up, then we're not pinned
1193 * to GPU, and we can ignore the cache flush because it'll happen
1194 * again at bind time.
1196 if (obj_priv->page_list == NULL)
1199 drm_ttm_cache_flush(obj_priv->page_list, obj->size / PAGE_SIZE);
1203 * Set the next domain for the specified object. This
1204 * may not actually perform the necessary flushing/invaliding though,
1205 * as that may want to be batched with other set_domain operations
1207 * This is (we hope) the only really tricky part of gem. The goal
1208 * is fairly simple -- track which caches hold bits of the object
1209 * and make sure they remain coherent. A few concrete examples may
1210 * help to explain how it works. For shorthand, we use the notation
1211 * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
1212 * a pair of read and write domain masks.
1214 * Case 1: the batch buffer
1220 * 5. Unmapped from GTT
1223 * Let's take these a step at a time
1226 * Pages allocated from the kernel may still have
1227 * cache contents, so we set them to (CPU, CPU) always.
1228 * 2. Written by CPU (using pwrite)
1229 * The pwrite function calls set_domain (CPU, CPU) and
1230 * this function does nothing (as nothing changes)
1232 * This function asserts that the object is not
1233 * currently in any GPU-based read or write domains
1235 * i915_gem_execbuffer calls set_domain (COMMAND, 0).
1236 * As write_domain is zero, this function adds in the
1237 * current read domains (CPU+COMMAND, 0).
1238 * flush_domains is set to CPU.
1239 * invalidate_domains is set to COMMAND
1240 * clflush is run to get data out of the CPU caches
1241 * then i915_dev_set_domain calls i915_gem_flush to
1242 * emit an MI_FLUSH and drm_agp_chipset_flush
1243 * 5. Unmapped from GTT
1244 * i915_gem_object_unbind calls set_domain (CPU, CPU)
1245 * flush_domains and invalidate_domains end up both zero
1246 * so no flushing/invalidating happens
1250 * Case 2: The shared render buffer
1254 * 3. Read/written by GPU
1255 * 4. set_domain to (CPU,CPU)
1256 * 5. Read/written by CPU
1257 * 6. Read/written by GPU
1260 * Same as last example, (CPU, CPU)
1262 * Nothing changes (assertions find that it is not in the GPU)
1263 * 3. Read/written by GPU
1264 * execbuffer calls set_domain (RENDER, RENDER)
1265 * flush_domains gets CPU
1266 * invalidate_domains gets GPU
1268 * MI_FLUSH and drm_agp_chipset_flush
1269 * 4. set_domain (CPU, CPU)
1270 * flush_domains gets GPU
1271 * invalidate_domains gets CPU
1272 * wait_rendering (obj) to make sure all drawing is complete.
1273 * This will include an MI_FLUSH to get the data from GPU
1275 * clflush (obj) to invalidate the CPU cache
1276 * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
1277 * 5. Read/written by CPU
1278 * cache lines are loaded and dirtied
1279 * 6. Read written by GPU
1280 * Same as last GPU access
1282 * Case 3: The constant buffer
1287 * 4. Updated (written) by CPU again
1296 * flush_domains = CPU
1297 * invalidate_domains = RENDER
1300 * drm_agp_chipset_flush
1301 * 4. Updated (written) by CPU again
1303 * flush_domains = 0 (no previous write domain)
1304 * invalidate_domains = 0 (no new read domains)
1307 * flush_domains = CPU
1308 * invalidate_domains = RENDER
1311 * drm_agp_chipset_flush
1314 i915_gem_object_set_domain(struct drm_gem_object *obj,
1315 uint32_t read_domains,
1316 uint32_t write_domain)
1318 struct drm_device *dev = obj->dev;
1319 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1320 uint32_t invalidate_domains = 0;
1321 uint32_t flush_domains = 0;
1325 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1327 obj->read_domains, read_domains,
1328 obj->write_domain, write_domain);
1331 * If the object isn't moving to a new write domain,
1332 * let the object stay in multiple read domains
1334 if (write_domain == 0)
1335 read_domains |= obj->read_domains;
1337 obj_priv->dirty = 1;
1340 * Flush the current write domain if
1341 * the new read domains don't match. Invalidate
1342 * any read domains which differ from the old
1345 if (obj->write_domain && obj->write_domain != read_domains) {
1346 flush_domains |= obj->write_domain;
1347 invalidate_domains |= read_domains & ~obj->write_domain;
1350 * Invalidate any read caches which may have
1351 * stale data. That is, any new read domains.
1353 invalidate_domains |= read_domains & ~obj->read_domains;
1354 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
1356 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
1357 __func__, flush_domains, invalidate_domains);
1360 * If we're invaliding the CPU cache and flushing a GPU cache,
1361 * then pause for rendering so that the GPU caches will be
1362 * flushed before the cpu cache is invalidated
1364 if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
1365 (flush_domains & ~(I915_GEM_DOMAIN_CPU |
1366 I915_GEM_DOMAIN_GTT))) {
1367 ret = i915_gem_object_wait_rendering(obj);
1371 i915_gem_clflush_object(obj);
1374 if ((write_domain | flush_domains) != 0)
1375 obj->write_domain = write_domain;
1377 /* If we're invalidating the CPU domain, clear the per-page CPU
1378 * domain list as well.
1380 if (obj_priv->page_cpu_valid != NULL &&
1381 (obj->read_domains & I915_GEM_DOMAIN_CPU) &&
1382 ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) {
1383 memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
1385 obj->read_domains = read_domains;
1387 dev->invalidate_domains |= invalidate_domains;
1388 dev->flush_domains |= flush_domains;
1390 DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
1392 obj->read_domains, obj->write_domain,
1393 dev->invalidate_domains, dev->flush_domains);
1399 * Set the read/write domain on a range of the object.
1401 * Currently only implemented for CPU reads, otherwise drops to normal
1402 * i915_gem_object_set_domain().
1405 i915_gem_object_set_domain_range(struct drm_gem_object *obj,
1408 uint32_t read_domains,
1409 uint32_t write_domain)
1411 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1414 if (obj->read_domains & I915_GEM_DOMAIN_CPU)
1417 if (read_domains != I915_GEM_DOMAIN_CPU ||
1419 return i915_gem_object_set_domain(obj,
1420 read_domains, write_domain);
1422 /* Wait on any GPU rendering to the object to be flushed. */
1423 if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
1424 ret = i915_gem_object_wait_rendering(obj);
1429 if (obj_priv->page_cpu_valid == NULL) {
1430 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
1434 /* Flush the cache on any pages that are still invalid from the CPU's
1437 for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) {
1438 if (obj_priv->page_cpu_valid[i])
1441 drm_ttm_cache_flush(obj_priv->page_list + i, 1);
1443 obj_priv->page_cpu_valid[i] = 1;
1450 * Once all of the objects have been set in the proper domain,
1451 * perform the necessary flush and invalidate operations.
1453 * Returns the write domains flushed, for use in flush tracking.
1456 i915_gem_dev_set_domain(struct drm_device *dev)
1458 uint32_t flush_domains = dev->flush_domains;
1461 * Now that all the buffers are synced to the proper domains,
1462 * flush and invalidate the collected domains
1464 if (dev->invalidate_domains | dev->flush_domains) {
1466 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
1468 dev->invalidate_domains,
1469 dev->flush_domains);
1472 dev->invalidate_domains,
1473 dev->flush_domains);
1474 dev->invalidate_domains = 0;
1475 dev->flush_domains = 0;
1478 return flush_domains;
1482 * Pin an object to the GTT and evaluate the relocations landing in it.
1485 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
1486 struct drm_file *file_priv,
1487 struct drm_i915_gem_exec_object *entry)
1489 struct drm_device *dev = obj->dev;
1490 struct drm_i915_gem_relocation_entry reloc;
1491 struct drm_i915_gem_relocation_entry __user *relocs;
1492 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1494 uint32_t last_reloc_offset = -1;
1495 void *reloc_page = NULL;
1497 /* Choose the GTT offset for our buffer and put it there. */
1498 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
1502 entry->offset = obj_priv->gtt_offset;
1504 relocs = (struct drm_i915_gem_relocation_entry __user *)
1505 (uintptr_t) entry->relocs_ptr;
1506 /* Apply the relocations, using the GTT aperture to avoid cache
1507 * flushing requirements.
1509 for (i = 0; i < entry->relocation_count; i++) {
1510 struct drm_gem_object *target_obj;
1511 struct drm_i915_gem_object *target_obj_priv;
1512 uint32_t reloc_val, reloc_offset, *reloc_entry;
1515 ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
1517 i915_gem_object_unpin(obj);
1521 target_obj = drm_gem_object_lookup(obj->dev, file_priv,
1522 reloc.target_handle);
1523 if (target_obj == NULL) {
1524 i915_gem_object_unpin(obj);
1527 target_obj_priv = target_obj->driver_private;
1529 /* The target buffer should have appeared before us in the
1530 * exec_object list, so it should have a GTT space bound by now.
1532 if (target_obj_priv->gtt_space == NULL) {
1533 DRM_ERROR("No GTT space found for object %d\n",
1534 reloc.target_handle);
1535 drm_gem_object_unreference(target_obj);
1536 i915_gem_object_unpin(obj);
1540 if (reloc.offset > obj->size - 4) {
1541 DRM_ERROR("Relocation beyond object bounds: "
1542 "obj %p target %d offset %d size %d.\n",
1543 obj, reloc.target_handle,
1544 (int) reloc.offset, (int) obj->size);
1545 drm_gem_object_unreference(target_obj);
1546 i915_gem_object_unpin(obj);
1549 if (reloc.offset & 3) {
1550 DRM_ERROR("Relocation not 4-byte aligned: "
1551 "obj %p target %d offset %d.\n",
1552 obj, reloc.target_handle,
1553 (int) reloc.offset);
1554 drm_gem_object_unreference(target_obj);
1555 i915_gem_object_unpin(obj);
1559 if (reloc.write_domain && target_obj->pending_write_domain &&
1560 reloc.write_domain != target_obj->pending_write_domain) {
1561 DRM_ERROR("Write domain conflict: "
1562 "obj %p target %d offset %d "
1563 "new %08x old %08x\n",
1564 obj, reloc.target_handle,
1567 target_obj->pending_write_domain);
1568 drm_gem_object_unreference(target_obj);
1569 i915_gem_object_unpin(obj);
1574 DRM_INFO("%s: obj %p offset %08x target %d "
1575 "read %08x write %08x gtt %08x "
1576 "presumed %08x delta %08x\n",
1580 (int) reloc.target_handle,
1581 (int) reloc.read_domains,
1582 (int) reloc.write_domain,
1583 (int) target_obj_priv->gtt_offset,
1584 (int) reloc.presumed_offset,
1588 target_obj->pending_read_domains |= reloc.read_domains;
1589 target_obj->pending_write_domain |= reloc.write_domain;
1591 /* If the relocation already has the right value in it, no
1592 * more work needs to be done.
1594 if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
1595 drm_gem_object_unreference(target_obj);
1599 /* Now that we're going to actually write some data in,
1600 * make sure that any rendering using this buffer's contents
1603 i915_gem_object_wait_rendering(obj);
1605 /* As we're writing through the gtt, flush
1606 * any CPU writes before we write the relocations
1608 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
1609 i915_gem_clflush_object(obj);
1610 drm_agp_chipset_flush(dev);
1611 obj->write_domain = 0;
1614 /* Map the page containing the relocation we're going to
1617 reloc_offset = obj_priv->gtt_offset + reloc.offset;
1618 if (reloc_page == NULL ||
1619 (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
1620 (reloc_offset & ~(PAGE_SIZE - 1))) {
1621 if (reloc_page != NULL)
1622 iounmap(reloc_page);
1624 reloc_page = ioremap(dev->agp->base +
1625 (reloc_offset & ~(PAGE_SIZE - 1)),
1627 last_reloc_offset = reloc_offset;
1628 if (reloc_page == NULL) {
1629 drm_gem_object_unreference(target_obj);
1630 i915_gem_object_unpin(obj);
1635 reloc_entry = (uint32_t *)((char *)reloc_page +
1636 (reloc_offset & (PAGE_SIZE - 1)));
1637 reloc_val = target_obj_priv->gtt_offset + reloc.delta;
1640 DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
1641 obj, (unsigned int) reloc.offset,
1642 readl(reloc_entry), reloc_val);
1644 writel(reloc_val, reloc_entry);
1646 /* Write the updated presumed offset for this entry back out
1649 reloc.presumed_offset = target_obj_priv->gtt_offset;
1650 ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
1652 drm_gem_object_unreference(target_obj);
1653 i915_gem_object_unpin(obj);
1657 drm_gem_object_unreference(target_obj);
1660 if (reloc_page != NULL)
1661 iounmap(reloc_page);
1665 i915_gem_dump_object(obj, 128, __func__, ~0);
1670 /** Dispatch a batchbuffer to the ring
1673 i915_dispatch_gem_execbuffer(struct drm_device *dev,
1674 struct drm_i915_gem_execbuffer *exec,
1675 uint64_t exec_offset)
1677 drm_i915_private_t *dev_priv = dev->dev_private;
1678 struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
1679 (uintptr_t) exec->cliprects_ptr;
1680 int nbox = exec->num_cliprects;
1682 uint32_t exec_start, exec_len;
1685 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
1686 exec_len = (uint32_t) exec->batch_len;
1688 if ((exec_start | exec_len) & 0x7) {
1689 DRM_ERROR("alignment\n");
1696 count = nbox ? nbox : 1;
1698 for (i = 0; i < count; i++) {
1700 int ret = i915_emit_box(dev, boxes, i,
1701 exec->DR1, exec->DR4);
1706 if (IS_I830(dev) || IS_845G(dev)) {
1708 OUT_RING(MI_BATCH_BUFFER);
1709 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1710 OUT_RING(exec_start + exec_len - 4);
1715 if (IS_I965G(dev)) {
1716 OUT_RING(MI_BATCH_BUFFER_START |
1718 MI_BATCH_NON_SECURE_I965);
1719 OUT_RING(exec_start);
1721 OUT_RING(MI_BATCH_BUFFER_START |
1723 OUT_RING(exec_start | MI_BATCH_NON_SECURE);
1729 /* XXX breadcrumb */
1733 /* Throttle our rendering by waiting until the ring has completed our requests
1734 * emitted over 20 msec ago.
1736 * This should get us reasonable parallelism between CPU and GPU but also
1737 * relatively low latency when blocking on a particular request to finish.
1740 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
1742 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1746 mutex_lock(&dev->struct_mutex);
1747 seqno = i915_file_priv->mm.last_gem_throttle_seqno;
1748 i915_file_priv->mm.last_gem_throttle_seqno =
1749 i915_file_priv->mm.last_gem_seqno;
1751 ret = i915_wait_request(dev, seqno);
1752 mutex_unlock(&dev->struct_mutex);
1757 i915_gem_execbuffer(struct drm_device *dev, void *data,
1758 struct drm_file *file_priv)
1760 drm_i915_private_t *dev_priv = dev->dev_private;
1761 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
1762 struct drm_i915_gem_execbuffer *args = data;
1763 struct drm_i915_gem_exec_object *exec_list = NULL;
1764 struct drm_gem_object **object_list = NULL;
1765 struct drm_gem_object *batch_obj;
1766 int ret, i, pinned = 0;
1767 uint64_t exec_offset;
1768 uint32_t seqno, flush_domains;
1771 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
1772 (int) args->buffers_ptr, args->buffer_count, args->batch_len);
1775 /* Copy in the exec list from userland */
1776 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
1778 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
1780 if (exec_list == NULL || object_list == NULL) {
1781 DRM_ERROR("Failed to allocate exec or object list "
1783 args->buffer_count);
1787 ret = copy_from_user(exec_list,
1788 (struct drm_i915_relocation_entry __user *)
1789 (uintptr_t) args->buffers_ptr,
1790 sizeof(*exec_list) * args->buffer_count);
1792 DRM_ERROR("copy %d exec entries failed %d\n",
1793 args->buffer_count, ret);
1797 mutex_lock(&dev->struct_mutex);
1799 i915_verify_inactive(dev, __FILE__, __LINE__);
1801 if (dev_priv->mm.wedged) {
1802 DRM_ERROR("Execbuf while wedged\n");
1803 mutex_unlock(&dev->struct_mutex);
1807 if (dev_priv->mm.suspended) {
1808 DRM_ERROR("Execbuf while VT-switched.\n");
1809 mutex_unlock(&dev->struct_mutex);
1813 /* Zero the gloabl flush/invalidate flags. These
1814 * will be modified as each object is bound to the
1817 dev->invalidate_domains = 0;
1818 dev->flush_domains = 0;
1820 /* Look up object handles and perform the relocations */
1821 for (i = 0; i < args->buffer_count; i++) {
1822 object_list[i] = drm_gem_object_lookup(dev, file_priv,
1823 exec_list[i].handle);
1824 if (object_list[i] == NULL) {
1825 DRM_ERROR("Invalid object handle %d at index %d\n",
1826 exec_list[i].handle, i);
1831 object_list[i]->pending_read_domains = 0;
1832 object_list[i]->pending_write_domain = 0;
1833 ret = i915_gem_object_pin_and_relocate(object_list[i],
1837 DRM_ERROR("object bind and relocate failed %d\n", ret);
1843 /* Set the pending read domains for the batch buffer to COMMAND */
1844 batch_obj = object_list[args->buffer_count-1];
1845 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1846 batch_obj->pending_write_domain = 0;
1848 i915_verify_inactive(dev, __FILE__, __LINE__);
1850 for (i = 0; i < args->buffer_count; i++) {
1851 struct drm_gem_object *obj = object_list[i];
1852 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1854 if (obj_priv->gtt_space == NULL) {
1855 /* We evicted the buffer in the process of validating
1856 * our set of buffers in. We could try to recover by
1857 * kicking them everything out and trying again from
1864 /* make sure all previous memory operations have passed */
1865 ret = i915_gem_object_set_domain(obj,
1866 obj->pending_read_domains,
1867 obj->pending_write_domain);
1872 i915_verify_inactive(dev, __FILE__, __LINE__);
1874 /* Flush/invalidate caches and chipset buffer */
1875 flush_domains = i915_gem_dev_set_domain(dev);
1877 i915_verify_inactive(dev, __FILE__, __LINE__);
1880 for (i = 0; i < args->buffer_count; i++) {
1881 i915_gem_object_check_coherency(object_list[i],
1882 exec_list[i].handle);
1886 exec_offset = exec_list[args->buffer_count - 1].offset;
1889 i915_gem_dump_object(object_list[args->buffer_count - 1],
1895 /* Exec the batchbuffer */
1896 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
1898 DRM_ERROR("dispatch failed %d\n", ret);
1903 * Ensure that the commands in the batch buffer are
1904 * finished before the interrupt fires
1906 flush_domains |= i915_retire_commands(dev);
1908 i915_verify_inactive(dev, __FILE__, __LINE__);
1911 * Get a seqno representing the execution of the current buffer,
1912 * which we can wait on. We would like to mitigate these interrupts,
1913 * likely by only creating seqnos occasionally (so that we have
1914 * *some* interrupts representing completion of buffers that we can
1915 * wait on when trying to clear up gtt space).
1917 seqno = i915_add_request(dev, flush_domains);
1919 i915_file_priv->mm.last_gem_seqno = seqno;
1920 for (i = 0; i < args->buffer_count; i++) {
1921 struct drm_gem_object *obj = object_list[i];
1922 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1924 i915_gem_object_move_to_active(obj);
1925 obj_priv->last_rendering_seqno = seqno;
1927 DRM_INFO("%s: move to exec list %p\n", __func__, obj);
1931 i915_dump_lru(dev, __func__);
1934 i915_verify_inactive(dev, __FILE__, __LINE__);
1936 /* Copy the new buffer offsets back to the user's exec list. */
1937 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
1938 (uintptr_t) args->buffers_ptr,
1940 sizeof(*exec_list) * args->buffer_count);
1942 DRM_ERROR("failed to copy %d exec entries "
1943 "back to user (%d)\n",
1944 args->buffer_count, ret);
1946 if (object_list != NULL) {
1947 for (i = 0; i < pinned; i++)
1948 i915_gem_object_unpin(object_list[i]);
1950 for (i = 0; i < args->buffer_count; i++)
1951 drm_gem_object_unreference(object_list[i]);
1953 mutex_unlock(&dev->struct_mutex);
1956 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
1958 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
1965 i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
1967 struct drm_device *dev = obj->dev;
1968 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1971 i915_verify_inactive(dev, __FILE__, __LINE__);
1972 if (obj_priv->gtt_space == NULL) {
1973 ret = i915_gem_object_bind_to_gtt(obj, alignment);
1975 DRM_ERROR("Failure to bind: %d", ret);
1979 obj_priv->pin_count++;
1981 /* If the object is not active and not pending a flush,
1982 * remove it from the inactive list
1984 if (obj_priv->pin_count == 1) {
1985 atomic_inc(&dev->pin_count);
1986 atomic_add(obj->size, &dev->pin_memory);
1987 if (!obj_priv->active &&
1988 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
1989 I915_GEM_DOMAIN_GTT)) == 0 &&
1990 !list_empty(&obj_priv->list))
1991 list_del_init(&obj_priv->list);
1993 i915_verify_inactive(dev, __FILE__, __LINE__);
1999 i915_gem_object_unpin(struct drm_gem_object *obj)
2001 struct drm_device *dev = obj->dev;
2002 drm_i915_private_t *dev_priv = dev->dev_private;
2003 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2005 i915_verify_inactive(dev, __FILE__, __LINE__);
2006 obj_priv->pin_count--;
2007 BUG_ON(obj_priv->pin_count < 0);
2008 BUG_ON(obj_priv->gtt_space == NULL);
2010 /* If the object is no longer pinned, and is
2011 * neither active nor being flushed, then stick it on
2014 if (obj_priv->pin_count == 0) {
2015 if (!obj_priv->active &&
2016 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
2017 I915_GEM_DOMAIN_GTT)) == 0)
2018 list_move_tail(&obj_priv->list,
2019 &dev_priv->mm.inactive_list);
2020 atomic_dec(&dev->pin_count);
2021 atomic_sub(obj->size, &dev->pin_memory);
2023 i915_verify_inactive(dev, __FILE__, __LINE__);
2027 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2028 struct drm_file *file_priv)
2030 struct drm_i915_gem_pin *args = data;
2031 struct drm_gem_object *obj;
2032 struct drm_i915_gem_object *obj_priv;
2035 mutex_lock(&dev->struct_mutex);
2037 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2039 DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
2041 mutex_unlock(&dev->struct_mutex);
2044 obj_priv = obj->driver_private;
2046 ret = i915_gem_object_pin(obj, args->alignment);
2048 drm_gem_object_unreference(obj);
2049 mutex_unlock(&dev->struct_mutex);
2053 /* XXX - flush the CPU caches for pinned objects
2054 * as the X server doesn't manage domains yet
2056 if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
2057 i915_gem_clflush_object(obj);
2058 drm_agp_chipset_flush(dev);
2059 obj->write_domain = 0;
2061 args->offset = obj_priv->gtt_offset;
2062 drm_gem_object_unreference(obj);
2063 mutex_unlock(&dev->struct_mutex);
2069 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
2070 struct drm_file *file_priv)
2072 struct drm_i915_gem_pin *args = data;
2073 struct drm_gem_object *obj;
2075 mutex_lock(&dev->struct_mutex);
2077 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2079 DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
2081 mutex_unlock(&dev->struct_mutex);
2085 i915_gem_object_unpin(obj);
2087 drm_gem_object_unreference(obj);
2088 mutex_unlock(&dev->struct_mutex);
2093 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2094 struct drm_file *file_priv)
2096 struct drm_i915_gem_busy *args = data;
2097 struct drm_gem_object *obj;
2098 struct drm_i915_gem_object *obj_priv;
2100 mutex_lock(&dev->struct_mutex);
2101 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
2103 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
2105 mutex_unlock(&dev->struct_mutex);
2109 obj_priv = obj->driver_private;
2110 args->busy = obj_priv->active;
2112 drm_gem_object_unreference(obj);
2113 mutex_unlock(&dev->struct_mutex);
2118 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
2119 struct drm_file *file_priv)
2121 return i915_gem_ring_throttle(dev, file_priv);
2124 int i915_gem_init_object(struct drm_gem_object *obj)
2126 struct drm_i915_gem_object *obj_priv;
2128 obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
2129 if (obj_priv == NULL)
2133 * We've just allocated pages from the kernel,
2134 * so they've just been written by the CPU with
2135 * zeros. They'll need to be clflushed before we
2136 * use them with the GPU.
2138 obj->write_domain = I915_GEM_DOMAIN_CPU;
2139 obj->read_domains = I915_GEM_DOMAIN_CPU;
2141 obj->driver_private = obj_priv;
2142 obj_priv->obj = obj;
2143 INIT_LIST_HEAD(&obj_priv->list);
2147 void i915_gem_free_object(struct drm_gem_object *obj)
2149 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2151 while (obj_priv->pin_count > 0)
2152 i915_gem_object_unpin(obj);
2154 i915_gem_object_unbind(obj);
2156 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2157 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
2161 i915_gem_set_domain(struct drm_gem_object *obj,
2162 struct drm_file *file_priv,
2163 uint32_t read_domains,
2164 uint32_t write_domain)
2166 struct drm_device *dev = obj->dev;
2168 uint32_t flush_domains;
2170 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
2172 ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
2175 flush_domains = i915_gem_dev_set_domain(obj->dev);
2177 if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
2178 (void) i915_add_request(dev, flush_domains);
2183 /** Unbinds all objects that are on the given buffer list. */
2185 i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2187 struct drm_gem_object *obj;
2188 struct drm_i915_gem_object *obj_priv;
2191 while (!list_empty(head)) {
2192 obj_priv = list_first_entry(head,
2193 struct drm_i915_gem_object,
2195 obj = obj_priv->obj;
2197 if (obj_priv->pin_count != 0) {
2198 DRM_ERROR("Pinned object in unbind list\n");
2199 mutex_unlock(&dev->struct_mutex);
2203 ret = i915_gem_object_unbind(obj);
2205 DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
2207 mutex_unlock(&dev->struct_mutex);
2217 i915_gem_idle(struct drm_device *dev)
2219 drm_i915_private_t *dev_priv = dev->dev_private;
2220 uint32_t seqno, cur_seqno, last_seqno;
2223 if (dev_priv->mm.suspended)
2226 /* Hack! Don't let anybody do execbuf while we don't control the chip.
2227 * We need to replace this with a semaphore, or something.
2229 dev_priv->mm.suspended = 1;
2231 i915_kernel_lost_context(dev);
2233 /* Flush the GPU along with all non-CPU write domains
2235 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
2236 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
2237 seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
2238 I915_GEM_DOMAIN_GTT));
2241 mutex_unlock(&dev->struct_mutex);
2245 dev_priv->mm.waiting_gem_seqno = seqno;
2249 cur_seqno = i915_get_gem_seqno(dev);
2250 if (i915_seqno_passed(cur_seqno, seqno))
2252 if (last_seqno == cur_seqno) {
2253 if (stuck++ > 100) {
2254 DRM_ERROR("hardware wedged\n");
2255 dev_priv->mm.wedged = 1;
2256 DRM_WAKEUP(&dev_priv->irq_queue);
2261 last_seqno = cur_seqno;
2263 dev_priv->mm.waiting_gem_seqno = 0;
2265 i915_gem_retire_requests(dev);
2267 /* Active and flushing should now be empty as we've
2268 * waited for a sequence higher than any pending execbuffer
2270 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2271 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2273 /* Request should now be empty as we've also waited
2274 * for the last request in the list
2276 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2278 /* Move all buffers out of the GTT. */
2279 i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
2281 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2282 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2283 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2284 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2289 i915_gem_init_hws(struct drm_device *dev)
2291 drm_i915_private_t *dev_priv = dev->dev_private;
2292 struct drm_gem_object *obj;
2293 struct drm_i915_gem_object *obj_priv;
2296 /* If we need a physical address for the status page, it's already
2297 * initialized at driver load time.
2299 if (!I915_NEED_GFX_HWS(dev))
2302 obj = drm_gem_object_alloc(dev, 4096);
2304 DRM_ERROR("Failed to allocate status page\n");
2307 obj_priv = obj->driver_private;
2309 ret = i915_gem_object_pin(obj, 4096);
2311 drm_gem_object_unreference(obj);
2315 dev_priv->status_gfx_addr = obj_priv->gtt_offset;
2316 dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset;
2317 dev_priv->hws_map.size = 4096;
2318 dev_priv->hws_map.type = 0;
2319 dev_priv->hws_map.flags = 0;
2320 dev_priv->hws_map.mtrr = 0;
2322 drm_core_ioremap(&dev_priv->hws_map, dev);
2323 if (dev_priv->hws_map.handle == NULL) {
2324 DRM_ERROR("Failed to map status page.\n");
2325 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2326 drm_gem_object_unreference(obj);
2329 dev_priv->hws_obj = obj;
2330 dev_priv->hw_status_page = dev_priv->hws_map.handle;
2331 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
2332 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
2333 DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
2339 i915_gem_init_ringbuffer(struct drm_device *dev)
2341 drm_i915_private_t *dev_priv = dev->dev_private;
2342 struct drm_gem_object *obj;
2343 struct drm_i915_gem_object *obj_priv;
2346 ret = i915_gem_init_hws(dev);
2350 obj = drm_gem_object_alloc(dev, 128 * 1024);
2352 DRM_ERROR("Failed to allocate ringbuffer\n");
2355 obj_priv = obj->driver_private;
2357 ret = i915_gem_object_pin(obj, 4096);
2359 drm_gem_object_unreference(obj);
2363 /* Set up the kernel mapping for the ring. */
2364 dev_priv->ring.Size = obj->size;
2365 dev_priv->ring.tail_mask = obj->size - 1;
2367 dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
2368 dev_priv->ring.map.size = obj->size;
2369 dev_priv->ring.map.type = 0;
2370 dev_priv->ring.map.flags = 0;
2371 dev_priv->ring.map.mtrr = 0;
2373 drm_core_ioremap(&dev_priv->ring.map, dev);
2374 if (dev_priv->ring.map.handle == NULL) {
2375 DRM_ERROR("Failed to map ringbuffer.\n");
2376 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2377 drm_gem_object_unreference(obj);
2380 dev_priv->ring.ring_obj = obj;
2381 dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
2383 /* Stop the ring if it's running. */
2384 I915_WRITE(LP_RING + RING_LEN, 0);
2385 I915_WRITE(LP_RING + RING_HEAD, 0);
2386 I915_WRITE(LP_RING + RING_TAIL, 0);
2387 I915_WRITE(LP_RING + RING_START, 0);
2389 /* Initialize the ring. */
2390 I915_WRITE(LP_RING + RING_START, obj_priv->gtt_offset);
2391 I915_WRITE(LP_RING + RING_LEN,
2392 ((obj->size - 4096) & RING_NR_PAGES) |
2396 /* Update our cache of the ring state */
2397 i915_kernel_lost_context(dev);
2403 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
2405 drm_i915_private_t *dev_priv = dev->dev_private;
2407 if (dev_priv->ring.ring_obj == NULL)
2410 drm_core_ioremapfree(&dev_priv->ring.map, dev);
2412 i915_gem_object_unpin(dev_priv->ring.ring_obj);
2413 drm_gem_object_unreference(dev_priv->ring.ring_obj);
2414 dev_priv->ring.ring_obj = NULL;
2415 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
2417 if (dev_priv->hws_obj != NULL) {
2418 i915_gem_object_unpin(dev_priv->hws_obj);
2419 drm_gem_object_unreference(dev_priv->hws_obj);
2420 dev_priv->hws_obj = NULL;
2421 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
2423 /* Write high address into HWS_PGA when disabling. */
2424 I915_WRITE(HWS_PGA, 0x1ffff000);
2429 i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
2430 struct drm_file *file_priv)
2432 drm_i915_private_t *dev_priv = dev->dev_private;
2435 if (dev_priv->mm.wedged) {
2436 DRM_ERROR("Reenabling wedged hardware, good luck\n");
2437 dev_priv->mm.wedged = 0;
2440 ret = i915_gem_init_ringbuffer(dev);
2444 mutex_lock(&dev->struct_mutex);
2445 BUG_ON(!list_empty(&dev_priv->mm.active_list));
2446 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2447 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
2448 BUG_ON(!list_empty(&dev_priv->mm.request_list));
2449 dev_priv->mm.suspended = 0;
2450 mutex_unlock(&dev->struct_mutex);
2455 i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
2456 struct drm_file *file_priv)
2460 mutex_lock(&dev->struct_mutex);
2461 ret = i915_gem_idle(dev);
2463 i915_gem_cleanup_ringbuffer(dev);
2464 mutex_unlock(&dev->struct_mutex);
2470 i915_gem_lastclose(struct drm_device *dev)
2473 drm_i915_private_t *dev_priv = dev->dev_private;
2475 mutex_lock(&dev->struct_mutex);
2477 if (dev_priv->ring.ring_obj != NULL) {
2478 ret = i915_gem_idle(dev);
2480 DRM_ERROR("failed to idle hardware: %d\n", ret);
2482 i915_gem_cleanup_ringbuffer(dev);
2485 mutex_unlock(&dev->struct_mutex);
2488 void i915_gem_load(struct drm_device *dev)
2490 drm_i915_private_t *dev_priv = dev->dev_private;
2492 INIT_LIST_HEAD(&dev_priv->mm.active_list);
2493 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
2494 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
2495 INIT_LIST_HEAD(&dev_priv->mm.request_list);
2496 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
2497 i915_gem_retire_work_handler);
2498 dev_priv->mm.next_gem_seqno = 1;
2500 i915_gem_detect_bit_6_swizzle(dev);