2 * Copyright © 2008,2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include <linux/dma_remapping.h>
35 #include <linux/uaccess.h>
37 #define __EXEC_OBJECT_HAS_PIN (1<<31)
38 #define __EXEC_OBJECT_HAS_FENCE (1<<30)
39 #define __EXEC_OBJECT_NEEDS_MAP (1<<29)
40 #define __EXEC_OBJECT_NEEDS_BIAS (1<<28)
42 #define BATCH_OFFSET_BIAS (256*1024)
45 struct list_head vmas;
48 struct i915_vma *lut[0];
49 struct hlist_head buckets[0];
53 static struct eb_vmas *
54 eb_create(struct drm_i915_gem_execbuffer2 *args)
56 struct eb_vmas *eb = NULL;
58 if (args->flags & I915_EXEC_HANDLE_LUT) {
59 unsigned size = args->buffer_count;
60 size *= sizeof(struct i915_vma *);
61 size += sizeof(struct eb_vmas);
62 eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
66 unsigned size = args->buffer_count;
67 unsigned count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
68 BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
69 while (count > 2*size)
71 eb = kzalloc(count*sizeof(struct hlist_head) +
72 sizeof(struct eb_vmas),
79 eb->and = -args->buffer_count;
81 INIT_LIST_HEAD(&eb->vmas);
86 eb_reset(struct eb_vmas *eb)
89 memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
93 eb_lookup_vmas(struct eb_vmas *eb,
94 struct drm_i915_gem_exec_object2 *exec,
95 const struct drm_i915_gem_execbuffer2 *args,
96 struct i915_address_space *vm,
97 struct drm_file *file)
99 struct drm_i915_gem_object *obj;
100 struct list_head objects;
103 INIT_LIST_HEAD(&objects);
104 spin_lock(&file->table_lock);
105 /* Grab a reference to the object and release the lock so we can lookup
106 * or create the VMA without using GFP_ATOMIC */
107 for (i = 0; i < args->buffer_count; i++) {
108 obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
110 spin_unlock(&file->table_lock);
111 DRM_DEBUG("Invalid object handle %d at index %d\n",
117 if (!list_empty(&obj->obj_exec_link)) {
118 spin_unlock(&file->table_lock);
119 DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
120 obj, exec[i].handle, i);
125 drm_gem_object_reference(&obj->base);
126 list_add_tail(&obj->obj_exec_link, &objects);
128 spin_unlock(&file->table_lock);
131 while (!list_empty(&objects)) {
132 struct i915_vma *vma;
134 obj = list_first_entry(&objects,
135 struct drm_i915_gem_object,
139 * NOTE: We can leak any vmas created here when something fails
140 * later on. But that's no issue since vma_unbind can deal with
141 * vmas which are not actually bound. And since only
142 * lookup_or_create exists as an interface to get at the vma
143 * from the (obj, vm) we don't run the risk of creating
144 * duplicated vmas for the same vm.
146 vma = i915_gem_obj_lookup_or_create_vma(obj, vm);
148 DRM_DEBUG("Failed to lookup VMA\n");
153 /* Transfer ownership from the objects list to the vmas list. */
154 list_add_tail(&vma->exec_list, &eb->vmas);
155 list_del_init(&obj->obj_exec_link);
157 vma->exec_entry = &exec[i];
161 uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
162 vma->exec_handle = handle;
163 hlist_add_head(&vma->exec_node,
164 &eb->buckets[handle & eb->and]);
173 while (!list_empty(&objects)) {
174 obj = list_first_entry(&objects,
175 struct drm_i915_gem_object,
177 list_del_init(&obj->obj_exec_link);
178 drm_gem_object_unreference(&obj->base);
181 * Objects already transfered to the vmas list will be unreferenced by
188 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
191 if (handle >= -eb->and)
193 return eb->lut[handle];
195 struct hlist_head *head;
196 struct hlist_node *node;
198 head = &eb->buckets[handle & eb->and];
199 hlist_for_each(node, head) {
200 struct i915_vma *vma;
202 vma = hlist_entry(node, struct i915_vma, exec_node);
203 if (vma->exec_handle == handle)
211 i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
213 struct drm_i915_gem_exec_object2 *entry;
214 struct drm_i915_gem_object *obj = vma->obj;
216 if (!drm_mm_node_allocated(&vma->node))
219 entry = vma->exec_entry;
221 if (entry->flags & __EXEC_OBJECT_HAS_FENCE)
222 i915_gem_object_unpin_fence(obj);
224 if (entry->flags & __EXEC_OBJECT_HAS_PIN)
227 entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
230 static void eb_destroy(struct eb_vmas *eb)
232 while (!list_empty(&eb->vmas)) {
233 struct i915_vma *vma;
235 vma = list_first_entry(&eb->vmas,
238 list_del_init(&vma->exec_list);
239 i915_gem_execbuffer_unreserve_vma(vma);
240 drm_gem_object_unreference(&vma->obj->base);
245 static inline int use_cpu_reloc(struct drm_i915_gem_object *obj)
247 return (HAS_LLC(obj->base.dev) ||
248 obj->base.write_domain == I915_GEM_DOMAIN_CPU ||
249 obj->cache_level != I915_CACHE_NONE);
253 relocate_entry_cpu(struct drm_i915_gem_object *obj,
254 struct drm_i915_gem_relocation_entry *reloc,
255 uint64_t target_offset)
257 struct drm_device *dev = obj->base.dev;
258 uint32_t page_offset = offset_in_page(reloc->offset);
259 uint64_t delta = reloc->delta + target_offset;
263 ret = i915_gem_object_set_to_cpu_domain(obj, true);
267 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
268 reloc->offset >> PAGE_SHIFT));
269 *(uint32_t *)(vaddr + page_offset) = lower_32_bits(delta);
271 if (INTEL_INFO(dev)->gen >= 8) {
272 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
274 if (page_offset == 0) {
275 kunmap_atomic(vaddr);
276 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
277 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
280 *(uint32_t *)(vaddr + page_offset) = upper_32_bits(delta);
283 kunmap_atomic(vaddr);
289 relocate_entry_gtt(struct drm_i915_gem_object *obj,
290 struct drm_i915_gem_relocation_entry *reloc,
291 uint64_t target_offset)
293 struct drm_device *dev = obj->base.dev;
294 struct drm_i915_private *dev_priv = dev->dev_private;
295 uint64_t delta = reloc->delta + target_offset;
297 void __iomem *reloc_page;
300 ret = i915_gem_object_set_to_gtt_domain(obj, true);
304 ret = i915_gem_object_put_fence(obj);
308 /* Map the page containing the relocation we're going to perform. */
309 offset = i915_gem_obj_ggtt_offset(obj);
310 offset += reloc->offset;
311 reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
313 iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
315 if (INTEL_INFO(dev)->gen >= 8) {
316 offset += sizeof(uint32_t);
318 if (offset_in_page(offset) == 0) {
319 io_mapping_unmap_atomic(reloc_page);
321 io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
325 iowrite32(upper_32_bits(delta),
326 reloc_page + offset_in_page(offset));
329 io_mapping_unmap_atomic(reloc_page);
335 clflush_write32(void *addr, uint32_t value)
337 /* This is not a fast path, so KISS. */
338 drm_clflush_virt_range(addr, sizeof(uint32_t));
339 *(uint32_t *)addr = value;
340 drm_clflush_virt_range(addr, sizeof(uint32_t));
344 relocate_entry_clflush(struct drm_i915_gem_object *obj,
345 struct drm_i915_gem_relocation_entry *reloc,
346 uint64_t target_offset)
348 struct drm_device *dev = obj->base.dev;
349 uint32_t page_offset = offset_in_page(reloc->offset);
350 uint64_t delta = (int)reloc->delta + target_offset;
354 ret = i915_gem_object_set_to_gtt_domain(obj, true);
358 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
359 reloc->offset >> PAGE_SHIFT));
360 clflush_write32(vaddr + page_offset, lower_32_bits(delta));
362 if (INTEL_INFO(dev)->gen >= 8) {
363 page_offset = offset_in_page(page_offset + sizeof(uint32_t));
365 if (page_offset == 0) {
366 kunmap_atomic(vaddr);
367 vaddr = kmap_atomic(i915_gem_object_get_page(obj,
368 (reloc->offset + sizeof(uint32_t)) >> PAGE_SHIFT));
371 clflush_write32(vaddr + page_offset, upper_32_bits(delta));
374 kunmap_atomic(vaddr);
380 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
382 struct drm_i915_gem_relocation_entry *reloc)
384 struct drm_device *dev = obj->base.dev;
385 struct drm_gem_object *target_obj;
386 struct drm_i915_gem_object *target_i915_obj;
387 struct i915_vma *target_vma;
388 uint64_t target_offset;
391 /* we've already hold a reference to all valid objects */
392 target_vma = eb_get_vma(eb, reloc->target_handle);
393 if (unlikely(target_vma == NULL))
395 target_i915_obj = target_vma->obj;
396 target_obj = &target_vma->obj->base;
398 target_offset = target_vma->node.start;
400 /* Sandybridge PPGTT errata: We need a global gtt mapping for MI and
401 * pipe_control writes because the gpu doesn't properly redirect them
402 * through the ppgtt for non_secure batchbuffers. */
403 if (unlikely(IS_GEN6(dev) &&
404 reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION)) {
405 ret = i915_vma_bind(target_vma, target_i915_obj->cache_level,
407 if (WARN_ONCE(ret, "Unexpected failure to bind target VMA!"))
411 /* Validate that the target is in a valid r/w GPU domain */
412 if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
413 DRM_DEBUG("reloc with multiple write domains: "
414 "obj %p target %d offset %d "
415 "read %08x write %08x",
416 obj, reloc->target_handle,
419 reloc->write_domain);
422 if (unlikely((reloc->write_domain | reloc->read_domains)
423 & ~I915_GEM_GPU_DOMAINS)) {
424 DRM_DEBUG("reloc with read/write non-GPU domains: "
425 "obj %p target %d offset %d "
426 "read %08x write %08x",
427 obj, reloc->target_handle,
430 reloc->write_domain);
434 target_obj->pending_read_domains |= reloc->read_domains;
435 target_obj->pending_write_domain |= reloc->write_domain;
437 /* If the relocation already has the right value in it, no
438 * more work needs to be done.
440 if (target_offset == reloc->presumed_offset)
443 /* Check that the relocation address is valid... */
444 if (unlikely(reloc->offset >
445 obj->base.size - (INTEL_INFO(dev)->gen >= 8 ? 8 : 4))) {
446 DRM_DEBUG("Relocation beyond object bounds: "
447 "obj %p target %d offset %d size %d.\n",
448 obj, reloc->target_handle,
450 (int) obj->base.size);
453 if (unlikely(reloc->offset & 3)) {
454 DRM_DEBUG("Relocation not 4-byte aligned: "
455 "obj %p target %d offset %d.\n",
456 obj, reloc->target_handle,
457 (int) reloc->offset);
461 /* We can't wait for rendering with pagefaults disabled */
462 if (obj->active && pagefault_disabled())
465 if (use_cpu_reloc(obj))
466 ret = relocate_entry_cpu(obj, reloc, target_offset);
467 else if (obj->map_and_fenceable)
468 ret = relocate_entry_gtt(obj, reloc, target_offset);
469 else if (cpu_has_clflush)
470 ret = relocate_entry_clflush(obj, reloc, target_offset);
472 WARN_ONCE(1, "Impossible case in relocation handling\n");
479 /* and update the user's relocation entry */
480 reloc->presumed_offset = target_offset;
486 i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
489 #define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
490 struct drm_i915_gem_relocation_entry stack_reloc[N_RELOC(512)];
491 struct drm_i915_gem_relocation_entry __user *user_relocs;
492 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
495 user_relocs = u64_to_user_ptr(entry->relocs_ptr);
497 remain = entry->relocation_count;
499 struct drm_i915_gem_relocation_entry *r = stack_reloc;
501 if (count > ARRAY_SIZE(stack_reloc))
502 count = ARRAY_SIZE(stack_reloc);
505 if (__copy_from_user_inatomic(r, user_relocs, count*sizeof(r[0])))
509 u64 offset = r->presumed_offset;
511 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, r);
515 if (r->presumed_offset != offset &&
516 __put_user(r->presumed_offset, &user_relocs->presumed_offset)) {
530 i915_gem_execbuffer_relocate_vma_slow(struct i915_vma *vma,
532 struct drm_i915_gem_relocation_entry *relocs)
534 const struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
537 for (i = 0; i < entry->relocation_count; i++) {
538 ret = i915_gem_execbuffer_relocate_entry(vma->obj, eb, &relocs[i]);
547 i915_gem_execbuffer_relocate(struct eb_vmas *eb)
549 struct i915_vma *vma;
552 /* This is the fast path and we cannot handle a pagefault whilst
553 * holding the struct mutex lest the user pass in the relocations
554 * contained within a mmaped bo. For in such a case we, the page
555 * fault handler would call i915_gem_fault() and we would try to
556 * acquire the struct mutex again. Obviously this is bad and so
557 * lockdep complains vehemently.
560 list_for_each_entry(vma, &eb->vmas, exec_list) {
561 ret = i915_gem_execbuffer_relocate_vma(vma, eb);
570 static bool only_mappable_for_reloc(unsigned int flags)
572 return (flags & (EXEC_OBJECT_NEEDS_FENCE | __EXEC_OBJECT_NEEDS_MAP)) ==
573 __EXEC_OBJECT_NEEDS_MAP;
577 i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
578 struct intel_engine_cs *ring,
581 struct drm_i915_gem_object *obj = vma->obj;
582 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
587 if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
590 if (!drm_mm_node_allocated(&vma->node)) {
591 /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
592 * limit address to the first 4GBs for unflagged objects.
594 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0)
595 flags |= PIN_ZONE_4G;
596 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP)
597 flags |= PIN_GLOBAL | PIN_MAPPABLE;
598 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS)
599 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
600 if ((flags & PIN_MAPPABLE) == 0)
604 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
605 if ((ret == -ENOSPC || ret == -E2BIG) &&
606 only_mappable_for_reloc(entry->flags))
607 ret = i915_gem_object_pin(obj, vma->vm,
609 flags & ~PIN_MAPPABLE);
613 entry->flags |= __EXEC_OBJECT_HAS_PIN;
615 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
616 ret = i915_gem_object_get_fence(obj);
620 if (i915_gem_object_pin_fence(obj))
621 entry->flags |= __EXEC_OBJECT_HAS_FENCE;
624 if (entry->offset != vma->node.start) {
625 entry->offset = vma->node.start;
629 if (entry->flags & EXEC_OBJECT_WRITE) {
630 obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
631 obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
638 need_reloc_mappable(struct i915_vma *vma)
640 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
642 if (entry->relocation_count == 0)
645 if (!i915_is_ggtt(vma->vm))
648 /* See also use_cpu_reloc() */
649 if (HAS_LLC(vma->obj->base.dev))
652 if (vma->obj->base.write_domain == I915_GEM_DOMAIN_CPU)
659 eb_vma_misplaced(struct i915_vma *vma)
661 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
662 struct drm_i915_gem_object *obj = vma->obj;
664 WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
665 !i915_is_ggtt(vma->vm));
667 if (entry->alignment &&
668 vma->node.start & (entry->alignment - 1))
671 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS &&
672 vma->node.start < BATCH_OFFSET_BIAS)
675 /* avoid costly ping-pong once a batch bo ended up non-mappable */
676 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable)
677 return !only_mappable_for_reloc(entry->flags);
679 if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 &&
680 (vma->node.start + vma->node.size - 1) >> 32)
687 i915_gem_execbuffer_reserve(struct intel_engine_cs *ring,
688 struct list_head *vmas,
689 struct intel_context *ctx,
692 struct drm_i915_gem_object *obj;
693 struct i915_vma *vma;
694 struct i915_address_space *vm;
695 struct list_head ordered_vmas;
696 bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
699 i915_gem_retire_requests_ring(ring);
701 vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
703 INIT_LIST_HEAD(&ordered_vmas);
704 while (!list_empty(vmas)) {
705 struct drm_i915_gem_exec_object2 *entry;
706 bool need_fence, need_mappable;
708 vma = list_first_entry(vmas, struct i915_vma, exec_list);
710 entry = vma->exec_entry;
712 if (ctx->flags & CONTEXT_NO_ZEROMAP)
713 entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
715 if (!has_fenced_gpu_access)
716 entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
718 entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
719 obj->tiling_mode != I915_TILING_NONE;
720 need_mappable = need_fence || need_reloc_mappable(vma);
723 entry->flags |= __EXEC_OBJECT_NEEDS_MAP;
724 list_move(&vma->exec_list, &ordered_vmas);
726 list_move_tail(&vma->exec_list, &ordered_vmas);
728 obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
729 obj->base.pending_write_domain = 0;
731 list_splice(&ordered_vmas, vmas);
733 /* Attempt to pin all of the buffers into the GTT.
734 * This is done in 3 phases:
736 * 1a. Unbind all objects that do not match the GTT constraints for
737 * the execbuffer (fenceable, mappable, alignment etc).
738 * 1b. Increment pin count for already bound objects.
739 * 2. Bind new objects.
740 * 3. Decrement pin count.
742 * This avoid unnecessary unbinding of later objects in order to make
743 * room for the earlier objects *unless* we need to defragment.
749 /* Unbind any ill-fitting objects or pin. */
750 list_for_each_entry(vma, vmas, exec_list) {
751 if (!drm_mm_node_allocated(&vma->node))
754 if (eb_vma_misplaced(vma))
755 ret = i915_vma_unbind(vma);
757 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
762 /* Bind fresh objects */
763 list_for_each_entry(vma, vmas, exec_list) {
764 if (drm_mm_node_allocated(&vma->node))
767 ret = i915_gem_execbuffer_reserve_vma(vma, ring, need_relocs);
773 if (ret != -ENOSPC || retry++)
776 /* Decrement pin count for bound objects */
777 list_for_each_entry(vma, vmas, exec_list)
778 i915_gem_execbuffer_unreserve_vma(vma);
780 ret = i915_gem_evict_vm(vm, true);
787 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
788 struct drm_i915_gem_execbuffer2 *args,
789 struct drm_file *file,
790 struct intel_engine_cs *ring,
792 struct drm_i915_gem_exec_object2 *exec,
793 struct intel_context *ctx)
795 struct drm_i915_gem_relocation_entry *reloc;
796 struct i915_address_space *vm;
797 struct i915_vma *vma;
801 unsigned count = args->buffer_count;
803 vm = list_first_entry(&eb->vmas, struct i915_vma, exec_list)->vm;
805 /* We may process another execbuffer during the unlock... */
806 while (!list_empty(&eb->vmas)) {
807 vma = list_first_entry(&eb->vmas, struct i915_vma, exec_list);
808 list_del_init(&vma->exec_list);
809 i915_gem_execbuffer_unreserve_vma(vma);
810 drm_gem_object_unreference(&vma->obj->base);
813 mutex_unlock(&dev->struct_mutex);
816 for (i = 0; i < count; i++)
817 total += exec[i].relocation_count;
819 reloc_offset = drm_malloc_ab(count, sizeof(*reloc_offset));
820 reloc = drm_malloc_ab(total, sizeof(*reloc));
821 if (reloc == NULL || reloc_offset == NULL) {
822 drm_free_large(reloc);
823 drm_free_large(reloc_offset);
824 mutex_lock(&dev->struct_mutex);
829 for (i = 0; i < count; i++) {
830 struct drm_i915_gem_relocation_entry __user *user_relocs;
831 u64 invalid_offset = (u64)-1;
834 user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
836 if (copy_from_user(reloc+total, user_relocs,
837 exec[i].relocation_count * sizeof(*reloc))) {
839 mutex_lock(&dev->struct_mutex);
843 /* As we do not update the known relocation offsets after
844 * relocating (due to the complexities in lock handling),
845 * we need to mark them as invalid now so that we force the
846 * relocation processing next time. Just in case the target
847 * object is evicted and then rebound into its old
848 * presumed_offset before the next execbuffer - if that
849 * happened we would make the mistake of assuming that the
850 * relocations were valid.
852 for (j = 0; j < exec[i].relocation_count; j++) {
853 if (__copy_to_user(&user_relocs[j].presumed_offset,
855 sizeof(invalid_offset))) {
857 mutex_lock(&dev->struct_mutex);
862 reloc_offset[i] = total;
863 total += exec[i].relocation_count;
866 ret = i915_mutex_lock_interruptible(dev);
868 mutex_lock(&dev->struct_mutex);
872 /* reacquire the objects */
874 ret = eb_lookup_vmas(eb, exec, args, vm, file);
878 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
879 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
883 list_for_each_entry(vma, &eb->vmas, exec_list) {
884 int offset = vma->exec_entry - exec;
885 ret = i915_gem_execbuffer_relocate_vma_slow(vma, eb,
886 reloc + reloc_offset[offset]);
891 /* Leave the user relocations as are, this is the painfully slow path,
892 * and we want to avoid the complication of dropping the lock whilst
893 * having buffers reserved in the aperture and so causing spurious
894 * ENOSPC for random operations.
898 drm_free_large(reloc);
899 drm_free_large(reloc_offset);
904 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
905 struct list_head *vmas)
907 const unsigned other_rings = ~intel_ring_flag(req->ring);
908 struct i915_vma *vma;
909 uint32_t flush_domains = 0;
910 bool flush_chipset = false;
913 list_for_each_entry(vma, vmas, exec_list) {
914 struct drm_i915_gem_object *obj = vma->obj;
916 if (obj->active & other_rings) {
917 ret = i915_gem_object_sync(obj, req->ring, &req);
922 if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
923 flush_chipset |= i915_gem_clflush_object(obj, false);
925 flush_domains |= obj->base.write_domain;
929 i915_gem_chipset_flush(req->ring->dev);
931 if (flush_domains & I915_GEM_DOMAIN_GTT)
934 /* Unconditionally invalidate gpu caches and ensure that we do flush
935 * any residual writes from the previous batch.
937 return intel_ring_invalidate_all_caches(req);
941 i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
943 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
946 /* Kernel clipping was a DRI1 misfeature */
947 if (exec->num_cliprects || exec->cliprects_ptr)
950 if (exec->DR4 == 0xffffffff) {
951 DRM_DEBUG("UXA submitting garbage DR4, fixing up\n");
954 if (exec->DR1 || exec->DR4)
957 if ((exec->batch_start_offset | exec->batch_len) & 0x7)
964 validate_exec_list(struct drm_device *dev,
965 struct drm_i915_gem_exec_object2 *exec,
968 unsigned relocs_total = 0;
969 unsigned relocs_max = UINT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
970 unsigned invalid_flags;
973 invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
974 if (USES_FULL_PPGTT(dev))
975 invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
977 for (i = 0; i < count; i++) {
978 char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
979 int length; /* limited by fault_in_pages_readable() */
981 if (exec[i].flags & invalid_flags)
984 if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
987 /* First check for malicious input causing overflow in
988 * the worst case where we need to allocate the entire
989 * relocation tree as a single array.
991 if (exec[i].relocation_count > relocs_max - relocs_total)
993 relocs_total += exec[i].relocation_count;
995 length = exec[i].relocation_count *
996 sizeof(struct drm_i915_gem_relocation_entry);
998 * We must check that the entire relocation array is safe
999 * to read, but since we may need to update the presumed
1000 * offsets during execution, check for full write access.
1002 if (!access_ok(VERIFY_WRITE, ptr, length))
1005 if (likely(!i915.prefault_disable)) {
1006 if (fault_in_multipages_readable(ptr, length))
1014 static struct intel_context *
1015 i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
1016 struct intel_engine_cs *ring, const u32 ctx_id)
1018 struct intel_context *ctx = NULL;
1019 struct i915_ctx_hang_stats *hs;
1021 if (ring->id != RCS && ctx_id != DEFAULT_CONTEXT_HANDLE)
1022 return ERR_PTR(-EINVAL);
1024 ctx = i915_gem_context_get(file->driver_priv, ctx_id);
1028 hs = &ctx->hang_stats;
1030 DRM_DEBUG("Context %u tried to submit while banned\n", ctx_id);
1031 return ERR_PTR(-EIO);
1034 if (i915.enable_execlists && !ctx->engine[ring->id].state) {
1035 int ret = intel_lr_context_deferred_alloc(ctx, ring);
1037 DRM_DEBUG("Could not create LRC %u: %d\n", ctx_id, ret);
1038 return ERR_PTR(ret);
1046 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
1047 struct drm_i915_gem_request *req)
1049 struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
1050 struct i915_vma *vma;
1052 list_for_each_entry(vma, vmas, exec_list) {
1053 struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
1054 struct drm_i915_gem_object *obj = vma->obj;
1055 u32 old_read = obj->base.read_domains;
1056 u32 old_write = obj->base.write_domain;
1058 obj->dirty = 1; /* be paranoid */
1059 obj->base.write_domain = obj->base.pending_write_domain;
1060 if (obj->base.write_domain == 0)
1061 obj->base.pending_read_domains |= obj->base.read_domains;
1062 obj->base.read_domains = obj->base.pending_read_domains;
1064 i915_vma_move_to_active(vma, req);
1065 if (obj->base.write_domain) {
1066 i915_gem_request_assign(&obj->last_write_req, req);
1068 intel_fb_obj_invalidate(obj, ORIGIN_CS);
1070 /* update for the implicit flush after a batch */
1071 obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
1073 if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
1074 i915_gem_request_assign(&obj->last_fenced_req, req);
1075 if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
1076 struct drm_i915_private *dev_priv = to_i915(ring->dev);
1077 list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
1078 &dev_priv->mm.fence_list);
1082 trace_i915_gem_object_change_domain(obj, old_read, old_write);
1087 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
1089 /* Unconditionally force add_request to emit a full flush. */
1090 params->ring->gpu_caches_dirty = true;
1092 /* Add a breadcrumb for the completion of the batch buffer */
1093 __i915_add_request(params->request, params->batch_obj, true);
1097 i915_reset_gen7_sol_offsets(struct drm_device *dev,
1098 struct drm_i915_gem_request *req)
1100 struct intel_engine_cs *ring = req->ring;
1101 struct drm_i915_private *dev_priv = dev->dev_private;
1104 if (!IS_GEN7(dev) || ring != &dev_priv->ring[RCS]) {
1105 DRM_DEBUG("sol reset is gen7/rcs only\n");
1109 ret = intel_ring_begin(req, 4 * 3);
1113 for (i = 0; i < 4; i++) {
1114 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1115 intel_ring_emit(ring, GEN7_SO_WRITE_OFFSET(i));
1116 intel_ring_emit(ring, 0);
1119 intel_ring_advance(ring);
1124 static struct i915_vma*
1125 shadow_batch_pin(struct drm_i915_gem_object *obj, struct i915_address_space *vm)
1127 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1128 struct i915_address_space *pin_vm = vm;
1133 * PPGTT backed shadow buffers must be mapped RO, to prevent
1134 * post-scan tampering
1136 if (CMDPARSER_USES_GGTT(dev_priv)) {
1138 pin_vm = &dev_priv->gtt.base;
1139 } else if (vm->has_read_only) {
1143 DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
1144 return ERR_PTR(-EINVAL);
1147 ret = i915_gem_object_pin(obj, pin_vm, 0, flags);
1149 return ERR_PTR(ret);
1151 return i915_gem_obj_to_vma(obj, pin_vm);
1154 static struct drm_i915_gem_object*
1155 i915_gem_execbuffer_parse(struct intel_context *ctx,
1156 struct intel_engine_cs *ring,
1157 struct drm_i915_gem_exec_object2 *shadow_exec_entry,
1159 struct i915_address_space *vm,
1160 struct drm_i915_gem_object *batch_obj,
1161 u32 batch_start_offset,
1164 struct drm_i915_gem_object *shadow_batch_obj;
1165 struct i915_vma *vma;
1166 struct i915_vma *user_vma = list_entry(eb->vmas.prev,
1167 typeof(*user_vma), exec_list);
1169 u64 shadow_batch_start;
1172 shadow_batch_obj = i915_gem_batch_pool_get(&ring->batch_pool,
1173 PAGE_ALIGN(batch_len));
1174 if (IS_ERR(shadow_batch_obj))
1175 return shadow_batch_obj;
1177 vma = shadow_batch_pin(shadow_batch_obj, vm);
1183 batch_start = user_vma->node.start + batch_start_offset;
1185 shadow_batch_start = vma->node.start;
1187 ret = i915_parse_cmds(ctx,
1194 shadow_batch_start);
1196 WARN_ON(vma->pin_count == 0);
1201 i915_gem_object_unpin_pages(shadow_batch_obj);
1203 memset(shadow_exec_entry, 0, sizeof(*shadow_exec_entry));
1205 vma->exec_entry = shadow_exec_entry;
1206 vma->exec_entry->flags = __EXEC_OBJECT_HAS_PIN;
1207 drm_gem_object_reference(&shadow_batch_obj->base);
1208 list_add_tail(&vma->exec_list, &eb->vmas);
1210 shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
1212 return shadow_batch_obj;
1215 i915_gem_object_unpin_pages(shadow_batch_obj);
1218 * Unsafe GGTT-backed buffers can still be submitted safely
1220 * For PPGTT backing however, we have no choice but to forcibly
1221 * reject unsafe buffers
1223 if (CMDPARSER_USES_GGTT(batch_obj->base.dev) && (ret == -EACCES))
1226 return ERR_PTR(ret);
1230 i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
1231 struct drm_i915_gem_execbuffer2 *args,
1232 struct list_head *vmas)
1234 struct drm_device *dev = params->dev;
1235 struct intel_engine_cs *ring = params->ring;
1236 struct drm_i915_private *dev_priv = dev->dev_private;
1237 u64 exec_start, exec_len;
1242 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
1246 ret = i915_switch_context(params->request);
1250 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id),
1251 "%s didn't clear reload\n", ring->name);
1253 instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
1254 instp_mask = I915_EXEC_CONSTANTS_MASK;
1255 switch (instp_mode) {
1256 case I915_EXEC_CONSTANTS_REL_GENERAL:
1257 case I915_EXEC_CONSTANTS_ABSOLUTE:
1258 case I915_EXEC_CONSTANTS_REL_SURFACE:
1259 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) {
1260 DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
1264 if (instp_mode != dev_priv->relative_constants_mode) {
1265 if (INTEL_INFO(dev)->gen < 4) {
1266 DRM_DEBUG("no rel constants on pre-gen4\n");
1270 if (INTEL_INFO(dev)->gen > 5 &&
1271 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
1272 DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
1276 /* The HW changed the meaning on this bit on gen6 */
1277 if (INTEL_INFO(dev)->gen >= 6)
1278 instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
1282 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
1286 if (ring == &dev_priv->ring[RCS] &&
1287 instp_mode != dev_priv->relative_constants_mode) {
1288 ret = intel_ring_begin(params->request, 4);
1292 intel_ring_emit(ring, MI_NOOP);
1293 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
1294 intel_ring_emit(ring, INSTPM);
1295 intel_ring_emit(ring, instp_mask << 16 | instp_mode);
1296 intel_ring_advance(ring);
1298 dev_priv->relative_constants_mode = instp_mode;
1301 if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
1302 ret = i915_reset_gen7_sol_offsets(dev, params->request);
1307 exec_len = args->batch_len;
1308 exec_start = params->batch_obj_vm_offset +
1309 params->args_batch_start_offset;
1311 ret = ring->dispatch_execbuffer(params->request,
1312 exec_start, exec_len,
1313 params->dispatch_flags);
1317 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
1319 i915_gem_execbuffer_move_to_active(vmas, params->request);
1320 i915_gem_execbuffer_retire_commands(params);
1326 * Find one BSD ring to dispatch the corresponding BSD command.
1327 * The Ring ID is returned.
1329 static int gen8_dispatch_bsd_ring(struct drm_device *dev,
1330 struct drm_file *file)
1332 struct drm_i915_private *dev_priv = dev->dev_private;
1333 struct drm_i915_file_private *file_priv = file->driver_priv;
1335 /* Check whether the file_priv is using one ring */
1336 if (file_priv->bsd_ring)
1337 return file_priv->bsd_ring->id;
1339 /* If no, use the ping-pong mechanism to select one ring */
1342 mutex_lock(&dev->struct_mutex);
1343 if (dev_priv->mm.bsd_ring_dispatch_index == 0) {
1345 dev_priv->mm.bsd_ring_dispatch_index = 1;
1348 dev_priv->mm.bsd_ring_dispatch_index = 0;
1350 file_priv->bsd_ring = &dev_priv->ring[ring_id];
1351 mutex_unlock(&dev->struct_mutex);
1356 static struct drm_i915_gem_object *
1357 eb_get_batch(struct eb_vmas *eb)
1359 struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
1362 * SNA is doing fancy tricks with compressing batch buffers, which leads
1363 * to negative relocation deltas. Usually that works out ok since the
1364 * relocate address is still positive, except when the batch is placed
1365 * very low in the GTT. Ensure this doesn't happen.
1367 * Note that actual hangs have only been observed on gen7, but for
1368 * paranoia do it everywhere.
1370 vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
1375 static inline bool use_cmdparser(const struct intel_engine_cs *ring,
1378 return ring->requires_cmd_parser ||
1379 (ring->using_cmd_parser && batch_len && USES_PPGTT(ring->dev));
1383 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
1384 struct drm_file *file,
1385 struct drm_i915_gem_execbuffer2 *args,
1386 struct drm_i915_gem_exec_object2 *exec)
1388 struct drm_i915_private *dev_priv = dev->dev_private;
1390 struct drm_i915_gem_object *batch_obj;
1391 struct drm_i915_gem_exec_object2 shadow_exec_entry;
1392 struct intel_engine_cs *ring;
1393 struct intel_context *ctx;
1394 struct i915_address_space *vm;
1395 struct i915_execbuffer_params params_master; /* XXX: will be removed later */
1396 struct i915_execbuffer_params *params = ¶ms_master;
1397 const u32 ctx_id = i915_execbuffer2_get_context_id(*args);
1402 if (!i915_gem_check_execbuffer(args))
1405 ret = validate_exec_list(dev, exec, args->buffer_count);
1410 if (args->flags & I915_EXEC_SECURE) {
1411 /* Return -EPERM to trigger fallback code on old binaries. */
1412 if (!HAS_SECURE_BATCHES(dev_priv))
1415 if (!file->is_master || !capable(CAP_SYS_ADMIN))
1418 dispatch_flags |= I915_DISPATCH_SECURE;
1420 if (args->flags & I915_EXEC_IS_PINNED)
1421 dispatch_flags |= I915_DISPATCH_PINNED;
1423 if ((args->flags & I915_EXEC_RING_MASK) > LAST_USER_RING) {
1424 DRM_DEBUG("execbuf with unknown ring: %d\n",
1425 (int)(args->flags & I915_EXEC_RING_MASK));
1429 if (((args->flags & I915_EXEC_RING_MASK) != I915_EXEC_BSD) &&
1430 ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
1431 DRM_DEBUG("execbuf with non bsd ring but with invalid "
1432 "bsd dispatch flags: %d\n", (int)(args->flags));
1436 if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_DEFAULT)
1437 ring = &dev_priv->ring[RCS];
1438 else if ((args->flags & I915_EXEC_RING_MASK) == I915_EXEC_BSD) {
1439 if (HAS_BSD2(dev)) {
1442 switch (args->flags & I915_EXEC_BSD_MASK) {
1443 case I915_EXEC_BSD_DEFAULT:
1444 ring_id = gen8_dispatch_bsd_ring(dev, file);
1445 ring = &dev_priv->ring[ring_id];
1447 case I915_EXEC_BSD_RING1:
1448 ring = &dev_priv->ring[VCS];
1450 case I915_EXEC_BSD_RING2:
1451 ring = &dev_priv->ring[VCS2];
1454 DRM_DEBUG("execbuf with unknown bsd ring: %d\n",
1455 (int)(args->flags & I915_EXEC_BSD_MASK));
1459 ring = &dev_priv->ring[VCS];
1461 ring = &dev_priv->ring[(args->flags & I915_EXEC_RING_MASK) - 1];
1463 if (!intel_ring_initialized(ring)) {
1464 DRM_DEBUG("execbuf with invalid ring: %d\n",
1465 (int)(args->flags & I915_EXEC_RING_MASK));
1469 if (args->buffer_count < 1) {
1470 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1474 if (args->flags & I915_EXEC_RESOURCE_STREAMER) {
1475 if (!HAS_RESOURCE_STREAMER(dev)) {
1476 DRM_DEBUG("RS is only allowed for Haswell, Gen8 and above\n");
1479 if (ring->id != RCS) {
1480 DRM_DEBUG("RS is not available on %s\n",
1485 dispatch_flags |= I915_DISPATCH_RS;
1488 intel_runtime_pm_get(dev_priv);
1490 ret = i915_mutex_lock_interruptible(dev);
1494 ctx = i915_gem_validate_context(dev, file, ring, ctx_id);
1496 mutex_unlock(&dev->struct_mutex);
1501 i915_gem_context_reference(ctx);
1504 vm = &ctx->ppgtt->base;
1506 vm = &dev_priv->gtt.base;
1508 memset(¶ms_master, 0x00, sizeof(params_master));
1510 eb = eb_create(args);
1512 i915_gem_context_unreference(ctx);
1513 mutex_unlock(&dev->struct_mutex);
1518 /* Look up object handles */
1519 ret = eb_lookup_vmas(eb, exec, args, vm, file);
1523 /* take note of the batch buffer before we might reorder the lists */
1524 batch_obj = eb_get_batch(eb);
1526 /* Move the objects en-masse into the GTT, evicting if necessary. */
1527 need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
1528 ret = i915_gem_execbuffer_reserve(ring, &eb->vmas, ctx, &need_relocs);
1532 /* The objects are in their final locations, apply the relocations. */
1534 ret = i915_gem_execbuffer_relocate(eb);
1536 if (ret == -EFAULT) {
1537 ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
1539 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
1545 /* Set the pending read domains for the batch buffer to COMMAND */
1546 if (batch_obj->base.pending_write_domain) {
1547 DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
1552 params->args_batch_start_offset = args->batch_start_offset;
1553 if (use_cmdparser(ring, args->batch_len)) {
1554 struct drm_i915_gem_object *parsed_batch_obj;
1556 u32 batch_off = args->batch_start_offset;
1557 u32 batch_len = args->batch_len;
1559 batch_len = batch_obj->base.size - batch_off;
1561 parsed_batch_obj = i915_gem_execbuffer_parse(ctx, ring,
1567 if (IS_ERR(parsed_batch_obj)) {
1568 ret = PTR_ERR(parsed_batch_obj);
1573 * parsed_batch_obj == batch_obj means batch not fully parsed:
1574 * Accept, but don't promote to secure.
1576 if (parsed_batch_obj != batch_obj) {
1577 if (CMDPARSER_USES_GGTT(dev_priv))
1578 dispatch_flags |= I915_DISPATCH_SECURE;
1579 params->args_batch_start_offset = 0;
1580 batch_obj = parsed_batch_obj;
1584 batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
1586 /* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
1587 * batch" bit. Hence we need to pin secure batches into the global gtt.
1588 * hsw should have this fixed, but bdw mucks it up again. */
1589 if (dispatch_flags & I915_DISPATCH_SECURE) {
1591 * So on first glance it looks freaky that we pin the batch here
1592 * outside of the reservation loop. But:
1593 * - The batch is already pinned into the relevant ppgtt, so we
1594 * already have the backing storage fully allocated.
1595 * - No other BO uses the global gtt (well contexts, but meh),
1596 * so we don't really have issues with multiple objects not
1597 * fitting due to fragmentation.
1598 * So this is actually safe.
1600 ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
1604 params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
1606 params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
1608 /* Allocate a request for this batch buffer nice and early. */
1609 ret = i915_gem_request_alloc(ring, ctx, ¶ms->request);
1611 goto err_batch_unpin;
1613 ret = i915_gem_request_add_to_client(params->request, file);
1615 goto err_batch_unpin;
1618 * Save assorted stuff away to pass through to *_submission().
1619 * NB: This data should be 'persistent' and not local as it will
1620 * kept around beyond the duration of the IOCTL once the GPU
1621 * scheduler arrives.
1624 params->file = file;
1625 params->ring = ring;
1626 params->dispatch_flags = dispatch_flags;
1627 params->batch_obj = batch_obj;
1630 ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
1634 * FIXME: We crucially rely upon the active tracking for the (ppgtt)
1635 * batch vma for correctness. For less ugly and less fragility this
1636 * needs to be adjusted to also track the ggtt batch vma properly as
1639 if (dispatch_flags & I915_DISPATCH_SECURE)
1640 i915_gem_object_ggtt_unpin(batch_obj);
1643 /* the request owns the ref now */
1644 i915_gem_context_unreference(ctx);
1648 * If the request was created but not successfully submitted then it
1649 * must be freed again. If it was submitted then it is being tracked
1650 * on the active request list and no clean up is required here.
1652 if (ret && params->request)
1653 i915_gem_request_cancel(params->request);
1655 mutex_unlock(&dev->struct_mutex);
1658 /* intel_gpu_busy should also get a ref, so it will free when the device
1659 * is really idle. */
1660 intel_runtime_pm_put(dev_priv);
1665 * Legacy execbuffer just creates an exec2 list from the original exec object
1666 * list array and passes it to the real function.
1669 i915_gem_execbuffer(struct drm_device *dev, void *data,
1670 struct drm_file *file)
1672 struct drm_i915_gem_execbuffer *args = data;
1673 struct drm_i915_gem_execbuffer2 exec2;
1674 struct drm_i915_gem_exec_object *exec_list = NULL;
1675 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1678 if (args->buffer_count < 1) {
1679 DRM_DEBUG("execbuf with %d buffers\n", args->buffer_count);
1683 /* Copy in the exec list from userland */
1684 exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
1685 exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
1686 if (exec_list == NULL || exec2_list == NULL) {
1687 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1688 args->buffer_count);
1689 drm_free_large(exec_list);
1690 drm_free_large(exec2_list);
1693 ret = copy_from_user(exec_list,
1694 u64_to_user_ptr(args->buffers_ptr),
1695 sizeof(*exec_list) * args->buffer_count);
1697 DRM_DEBUG("copy %d exec entries failed %d\n",
1698 args->buffer_count, ret);
1699 drm_free_large(exec_list);
1700 drm_free_large(exec2_list);
1704 for (i = 0; i < args->buffer_count; i++) {
1705 exec2_list[i].handle = exec_list[i].handle;
1706 exec2_list[i].relocation_count = exec_list[i].relocation_count;
1707 exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
1708 exec2_list[i].alignment = exec_list[i].alignment;
1709 exec2_list[i].offset = exec_list[i].offset;
1710 if (INTEL_INFO(dev)->gen < 4)
1711 exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
1713 exec2_list[i].flags = 0;
1716 exec2.buffers_ptr = args->buffers_ptr;
1717 exec2.buffer_count = args->buffer_count;
1718 exec2.batch_start_offset = args->batch_start_offset;
1719 exec2.batch_len = args->batch_len;
1720 exec2.DR1 = args->DR1;
1721 exec2.DR4 = args->DR4;
1722 exec2.num_cliprects = args->num_cliprects;
1723 exec2.cliprects_ptr = args->cliprects_ptr;
1724 exec2.flags = I915_EXEC_RENDER;
1725 i915_execbuffer2_set_context_id(exec2, 0);
1727 ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
1729 struct drm_i915_gem_exec_object __user *user_exec_list =
1730 u64_to_user_ptr(args->buffers_ptr);
1732 /* Copy the new buffer offsets back to the user's exec list. */
1733 for (i = 0; i < args->buffer_count; i++) {
1734 ret = __copy_to_user(&user_exec_list[i].offset,
1735 &exec2_list[i].offset,
1736 sizeof(user_exec_list[i].offset));
1739 DRM_DEBUG("failed to copy %d exec entries "
1740 "back to user (%d)\n",
1741 args->buffer_count, ret);
1747 drm_free_large(exec_list);
1748 drm_free_large(exec2_list);
1753 i915_gem_execbuffer2(struct drm_device *dev, void *data,
1754 struct drm_file *file)
1756 struct drm_i915_gem_execbuffer2 *args = data;
1757 struct drm_i915_gem_exec_object2 *exec2_list = NULL;
1760 if (args->buffer_count < 1 ||
1761 args->buffer_count > UINT_MAX / sizeof(*exec2_list)) {
1762 DRM_DEBUG("execbuf2 with %d buffers\n", args->buffer_count);
1766 if (args->rsvd2 != 0) {
1767 DRM_DEBUG("dirty rvsd2 field\n");
1771 exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
1772 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
1773 if (exec2_list == NULL)
1774 exec2_list = drm_malloc_ab(sizeof(*exec2_list),
1775 args->buffer_count);
1776 if (exec2_list == NULL) {
1777 DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
1778 args->buffer_count);
1781 ret = copy_from_user(exec2_list,
1782 u64_to_user_ptr(args->buffers_ptr),
1783 sizeof(*exec2_list) * args->buffer_count);
1785 DRM_DEBUG("copy %d exec entries failed %d\n",
1786 args->buffer_count, ret);
1787 drm_free_large(exec2_list);
1791 ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
1793 /* Copy the new buffer offsets back to the user's exec list. */
1794 struct drm_i915_gem_exec_object2 __user *user_exec_list =
1795 u64_to_user_ptr(args->buffers_ptr);
1798 for (i = 0; i < args->buffer_count; i++) {
1799 ret = __copy_to_user(&user_exec_list[i].offset,
1800 &exec2_list[i].offset,
1801 sizeof(user_exec_list[i].offset));
1804 DRM_DEBUG("failed to copy %d exec entries "
1806 args->buffer_count);
1812 drm_free_large(exec2_list);