2 * SPDX-License-Identifier: MIT
4 * Copyright © 2016 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gt/intel_engine_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_gt_pm.h"
12 #include "gem/i915_gem_region.h"
13 #include "huge_gem_object.h"
14 #include "i915_selftest.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_mmap.h"
28 static u64 swizzle_bit(unsigned int bit, u64 offset)
30 return (offset & BIT_ULL(bit)) >> (bit - 6);
33 static u64 tiled_offset(const struct tile *tile, u64 v)
37 if (tile->tiling == I915_TILING_NONE)
40 y = div64_u64_rem(v, tile->stride, &x);
41 v = div64_u64_rem(y, tile->height, &y) * tile->stride * tile->height;
43 if (tile->tiling == I915_TILING_X) {
45 v += div64_u64_rem(x, tile->width, &x) << tile->size;
47 } else if (tile->width == 128) {
48 const unsigned int ytile_span = 16;
49 const unsigned int ytile_height = 512;
52 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
55 const unsigned int ytile_span = 32;
56 const unsigned int ytile_height = 256;
59 v += div64_u64_rem(x, ytile_span, &x) * ytile_height;
63 switch (tile->swizzle) {
64 case I915_BIT_6_SWIZZLE_9:
65 v ^= swizzle_bit(9, v);
67 case I915_BIT_6_SWIZZLE_9_10:
68 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v);
70 case I915_BIT_6_SWIZZLE_9_11:
71 v ^= swizzle_bit(9, v) ^ swizzle_bit(11, v);
73 case I915_BIT_6_SWIZZLE_9_10_11:
74 v ^= swizzle_bit(9, v) ^ swizzle_bit(10, v) ^ swizzle_bit(11, v);
81 static int check_partial_mapping(struct drm_i915_gem_object *obj,
82 const struct tile *tile,
83 struct rnd_state *prng)
85 const unsigned long npages = obj->base.size / PAGE_SIZE;
86 struct i915_ggtt_view view;
96 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
98 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
99 tile->tiling, tile->stride, err);
103 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
104 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
106 i915_gem_object_lock(obj);
107 err = i915_gem_object_set_to_gtt_domain(obj, true);
108 i915_gem_object_unlock(obj);
110 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
114 page = i915_prandom_u32_max_state(npages, prng);
115 view = compute_partial_view(obj, page, MIN_CHUNK_PAGES);
117 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
119 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
120 page, (int)PTR_ERR(vma));
124 n = page - view.partial.offset;
125 GEM_BUG_ON(n >= view.partial.size);
127 io = i915_vma_pin_iomap(vma);
130 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
131 page, (int)PTR_ERR(io));
136 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
137 i915_vma_unpin_iomap(vma);
139 offset = tiled_offset(tile, page << PAGE_SHIFT);
140 if (offset >= obj->base.size)
143 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
145 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
146 cpu = kmap(p) + offset_in_page(offset);
147 drm_clflush_virt_range(cpu, sizeof(*cpu));
148 if (*cpu != (u32)page) {
149 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
153 vma->size >> PAGE_SHIFT,
154 tile->tiling ? tile_row_pages(obj) : 0,
155 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
156 offset >> PAGE_SHIFT,
157 (unsigned int)offset_in_page(offset),
163 drm_clflush_virt_range(cpu, sizeof(*cpu));
171 static int check_partial_mappings(struct drm_i915_gem_object *obj,
172 const struct tile *tile,
173 unsigned long end_time)
175 const unsigned int nreal = obj->scratch / PAGE_SIZE;
176 const unsigned long npages = obj->base.size / PAGE_SIZE;
177 struct i915_vma *vma;
181 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride);
183 pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n",
184 tile->tiling, tile->stride, err);
188 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling);
189 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride);
191 i915_gem_object_lock(obj);
192 err = i915_gem_object_set_to_gtt_domain(obj, true);
193 i915_gem_object_unlock(obj);
195 pr_err("Failed to flush to GTT write domain; err=%d\n", err);
199 for_each_prime_number_from(page, 1, npages) {
200 struct i915_ggtt_view view =
201 compute_partial_view(obj, page, MIN_CHUNK_PAGES);
208 GEM_BUG_ON(view.partial.size > nreal);
211 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
213 pr_err("Failed to pin partial view: offset=%lu; err=%d\n",
214 page, (int)PTR_ERR(vma));
218 n = page - view.partial.offset;
219 GEM_BUG_ON(n >= view.partial.size);
221 io = i915_vma_pin_iomap(vma);
224 pr_err("Failed to iomap partial view: offset=%lu; err=%d\n",
225 page, (int)PTR_ERR(io));
229 iowrite32(page, io + n * PAGE_SIZE / sizeof(*io));
230 i915_vma_unpin_iomap(vma);
232 offset = tiled_offset(tile, page << PAGE_SHIFT);
233 if (offset >= obj->base.size)
236 intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt);
238 p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT);
239 cpu = kmap(p) + offset_in_page(offset);
240 drm_clflush_virt_range(cpu, sizeof(*cpu));
241 if (*cpu != (u32)page) {
242 pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n",
246 vma->size >> PAGE_SHIFT,
247 tile->tiling ? tile_row_pages(obj) : 0,
248 vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride,
249 offset >> PAGE_SHIFT,
250 (unsigned int)offset_in_page(offset),
256 drm_clflush_virt_range(cpu, sizeof(*cpu));
263 if (igt_timeout(end_time,
264 "%s: timed out after tiling=%d stride=%d\n",
265 __func__, tile->tiling, tile->stride))
273 setup_tile_size(struct tile *tile, struct drm_i915_private *i915)
275 if (INTEL_GEN(i915) <= 2) {
279 } else if (tile->tiling == I915_TILING_Y &&
280 HAS_128_BYTE_Y_TILING(i915)) {
290 if (INTEL_GEN(i915) < 4)
291 return 8192 / tile->width;
292 else if (INTEL_GEN(i915) < 7)
293 return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width;
295 return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width;
298 static int igt_partial_tiling(void *arg)
300 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
301 struct drm_i915_private *i915 = arg;
302 struct drm_i915_gem_object *obj;
303 intel_wakeref_t wakeref;
307 if (!i915_ggtt_has_aperture(&i915->ggtt))
310 /* We want to check the page mapping and fencing of a large object
311 * mmapped through the GTT. The object we create is larger than can
312 * possibly be mmaped as a whole, and so we must use partial GGTT vma.
313 * We then check that a write through each partial GGTT vma ends up
314 * in the right set of pages within the object, and with the expected
315 * tiling, which we verify by manual swizzling.
318 obj = huge_gem_object(i915,
320 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
324 err = i915_gem_object_pin_pages(obj);
326 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
327 nreal, obj->base.size / PAGE_SIZE, err);
331 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
341 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
342 tile.tiling = I915_TILING_NONE;
344 err = check_partial_mappings(obj, &tile, end);
345 if (err && err != -EINTR)
349 for (tiling = I915_TILING_X; tiling <= I915_TILING_Y; tiling++) {
351 unsigned int max_pitch;
355 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
357 * The swizzling pattern is actually unknown as it
358 * varies based on physical address of each page.
359 * See i915_gem_detect_bit_6_swizzle().
363 tile.tiling = tiling;
366 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
369 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
373 GEM_BUG_ON(tile.swizzle == I915_BIT_6_SWIZZLE_UNKNOWN);
374 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
375 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
378 max_pitch = setup_tile_size(&tile, i915);
380 for (pitch = max_pitch; pitch; pitch >>= 1) {
381 tile.stride = tile.width * pitch;
382 err = check_partial_mappings(obj, &tile, end);
388 if (pitch > 2 && INTEL_GEN(i915) >= 4) {
389 tile.stride = tile.width * (pitch - 1);
390 err = check_partial_mappings(obj, &tile, end);
397 if (pitch < max_pitch && INTEL_GEN(i915) >= 4) {
398 tile.stride = tile.width * (pitch + 1);
399 err = check_partial_mappings(obj, &tile, end);
407 if (INTEL_GEN(i915) >= 4) {
408 for_each_prime_number(pitch, max_pitch) {
409 tile.stride = tile.width * pitch;
410 err = check_partial_mappings(obj, &tile, end);
422 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
423 i915_gem_object_unpin_pages(obj);
425 i915_gem_object_put(obj);
429 static int igt_smoke_tiling(void *arg)
431 const unsigned int nreal = 1 << 12; /* largest tile row x2 */
432 struct drm_i915_private *i915 = arg;
433 struct drm_i915_gem_object *obj;
434 intel_wakeref_t wakeref;
435 I915_RND_STATE(prng);
440 if (!i915_ggtt_has_aperture(&i915->ggtt))
444 * igt_partial_tiling() does an exhastive check of partial tiling
445 * chunking, but will undoubtably run out of time. Here, we do a
446 * randomised search and hope over many runs of 1s with different
447 * seeds we will do a thorough check.
449 * Remember to look at the st_seed if we see a flip-flop in BAT!
452 if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES)
455 obj = huge_gem_object(i915,
457 (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT);
461 err = i915_gem_object_pin_pages(obj);
463 pr_err("Failed to allocate %u pages (%lu total), err=%d\n",
464 nreal, obj->base.size / PAGE_SIZE, err);
468 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
475 i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng);
476 switch (tile.tiling) {
477 case I915_TILING_NONE:
482 tile.swizzle = I915_BIT_6_SWIZZLE_NONE;
486 tile.swizzle = i915->ggtt.bit_6_swizzle_x;
489 tile.swizzle = i915->ggtt.bit_6_swizzle_y;
493 if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 ||
494 tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17)
497 if (tile.tiling != I915_TILING_NONE) {
498 unsigned int max_pitch = setup_tile_size(&tile, i915);
501 i915_prandom_u32_max_state(max_pitch, &prng);
502 tile.stride = (1 + tile.stride) * tile.width;
503 if (INTEL_GEN(i915) < 4)
504 tile.stride = rounddown_pow_of_two(tile.stride);
507 err = check_partial_mapping(obj, &tile, &prng);
512 } while (!__igt_timeout(end, NULL));
514 pr_info("%s: Completed %lu trials\n", __func__, count);
516 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
517 i915_gem_object_unpin_pages(obj);
519 i915_gem_object_put(obj);
523 static int make_obj_busy(struct drm_i915_gem_object *obj)
525 struct drm_i915_private *i915 = to_i915(obj->base.dev);
526 struct intel_engine_cs *engine;
528 for_each_uabi_engine(engine, i915) {
529 struct i915_request *rq;
530 struct i915_vma *vma;
533 vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
537 err = i915_vma_pin(vma, 0, 0, PIN_USER);
541 rq = intel_engine_create_kernel_request(engine);
548 err = i915_request_await_object(rq, vma->obj, true);
550 err = i915_vma_move_to_active(vma, rq,
552 i915_vma_unlock(vma);
554 i915_request_add(rq);
560 i915_gem_object_put(obj); /* leave it only alive via its active ref */
564 static bool assert_mmap_offset(struct drm_i915_private *i915,
568 struct drm_i915_gem_object *obj;
569 struct i915_mmap_offset *mmo;
571 obj = i915_gem_object_create_internal(i915, size);
575 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
576 i915_gem_object_put(obj);
578 return PTR_ERR_OR_ZERO(mmo) == expected;
581 static void disable_retire_worker(struct drm_i915_private *i915)
583 i915_gem_driver_unregister__shrinker(i915);
584 intel_gt_pm_get(&i915->gt);
585 cancel_delayed_work_sync(&i915->gt.requests.retire_work);
588 static void restore_retire_worker(struct drm_i915_private *i915)
590 igt_flush_test(i915);
591 intel_gt_pm_put(&i915->gt);
592 i915_gem_driver_register__shrinker(i915);
595 static void mmap_offset_lock(struct drm_i915_private *i915)
596 __acquires(&i915->drm.vma_offset_manager->vm_lock)
598 write_lock(&i915->drm.vma_offset_manager->vm_lock);
601 static void mmap_offset_unlock(struct drm_i915_private *i915)
602 __releases(&i915->drm.vma_offset_manager->vm_lock)
604 write_unlock(&i915->drm.vma_offset_manager->vm_lock);
607 static int igt_mmap_offset_exhaustion(void *arg)
609 struct drm_i915_private *i915 = arg;
610 struct drm_mm *mm = &i915->drm.vma_offset_manager->vm_addr_space_mm;
611 struct drm_i915_gem_object *obj;
612 struct drm_mm_node *hole, *next;
613 struct i915_mmap_offset *mmo;
616 /* Disable background reaper */
617 disable_retire_worker(i915);
618 GEM_BUG_ON(!i915->gt.awake);
619 intel_gt_retire_requests(&i915->gt);
620 i915_gem_drain_freed_objects(i915);
622 /* Trim the device mmap space to only a page */
623 mmap_offset_lock(i915);
624 loop = 1; /* PAGE_SIZE units */
625 list_for_each_entry_safe(hole, next, &mm->hole_stack, hole_stack) {
626 struct drm_mm_node *resv;
628 resv = kzalloc(sizeof(*resv), GFP_NOWAIT);
634 resv->start = drm_mm_hole_node_start(hole) + loop;
635 resv->size = hole->hole_size - loop;
644 pr_debug("Reserving hole [%llx + %llx]\n",
645 resv->start, resv->size);
647 err = drm_mm_reserve_node(mm, resv);
649 pr_err("Failed to trim VMA manager, err=%d\n", err);
654 GEM_BUG_ON(!list_is_singular(&mm->hole_stack));
655 mmap_offset_unlock(i915);
658 if (!assert_mmap_offset(i915, PAGE_SIZE, 0)) {
659 pr_err("Unable to insert object into single page hole\n");
665 if (!assert_mmap_offset(i915, 2 * PAGE_SIZE, -ENOSPC)) {
666 pr_err("Unexpectedly succeeded in inserting too large object into single page hole\n");
671 /* Fill the hole, further allocation attempts should then fail */
672 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
678 mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
680 pr_err("Unable to insert object into reclaimed hole\n");
685 if (!assert_mmap_offset(i915, PAGE_SIZE, -ENOSPC)) {
686 pr_err("Unexpectedly succeeded in inserting object into no holes!\n");
691 i915_gem_object_put(obj);
693 /* Now fill with busy dead objects that we expect to reap */
694 for (loop = 0; loop < 3; loop++) {
695 if (intel_gt_is_wedged(&i915->gt))
698 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
704 err = make_obj_busy(obj);
706 pr_err("[loop %d] Failed to busy the object\n", loop);
712 mmap_offset_lock(i915);
714 drm_mm_for_each_node_safe(hole, next, mm) {
715 if (hole->color != -1ul)
718 drm_mm_remove_node(hole);
721 mmap_offset_unlock(i915);
722 restore_retire_worker(i915);
725 i915_gem_object_put(obj);
729 static int gtt_set(struct drm_i915_gem_object *obj)
731 struct i915_vma *vma;
735 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
739 intel_gt_pm_get(vma->vm->gt);
740 map = i915_vma_pin_iomap(vma);
747 memset_io(map, POISON_INUSE, obj->base.size);
748 i915_vma_unpin_iomap(vma);
751 intel_gt_pm_put(vma->vm->gt);
755 static int gtt_check(struct drm_i915_gem_object *obj)
757 struct i915_vma *vma;
761 vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
765 intel_gt_pm_get(vma->vm->gt);
766 map = i915_vma_pin_iomap(vma);
773 if (memchr_inv((void __force *)map, POISON_FREE, obj->base.size)) {
774 pr_err("%s: Write via mmap did not land in backing store (GTT)\n",
775 obj->mm.region->name);
778 i915_vma_unpin_iomap(vma);
781 intel_gt_pm_put(vma->vm->gt);
785 static int wc_set(struct drm_i915_gem_object *obj)
789 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
791 return PTR_ERR(vaddr);
793 memset(vaddr, POISON_INUSE, obj->base.size);
794 i915_gem_object_flush_map(obj);
795 i915_gem_object_unpin_map(obj);
800 static int wc_check(struct drm_i915_gem_object *obj)
805 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
807 return PTR_ERR(vaddr);
809 if (memchr_inv(vaddr, POISON_FREE, obj->base.size)) {
810 pr_err("%s: Write via mmap did not land in backing store (WC)\n",
811 obj->mm.region->name);
814 i915_gem_object_unpin_map(obj);
819 static bool can_mmap(struct drm_i915_gem_object *obj, enum i915_mmap_type type)
821 if (type == I915_MMAP_TYPE_GTT &&
822 !i915_ggtt_has_aperture(&to_i915(obj->base.dev)->ggtt))
825 if (type != I915_MMAP_TYPE_GTT &&
826 !i915_gem_object_type_has(obj,
827 I915_GEM_OBJECT_HAS_STRUCT_PAGE |
828 I915_GEM_OBJECT_HAS_IOMEM))
834 #define expand32(x) (((x) << 0) | ((x) << 8) | ((x) << 16) | ((x) << 24))
835 static int __igt_mmap(struct drm_i915_private *i915,
836 struct drm_i915_gem_object *obj,
837 enum i915_mmap_type type)
839 struct i915_mmap_offset *mmo;
840 struct vm_area_struct *area;
844 if (!can_mmap(obj, type))
853 mmo = mmap_offset_attach(obj, type, NULL);
857 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
858 if (IS_ERR_VALUE(addr))
861 pr_debug("igt_mmap(%s, %d) @ %lx\n", obj->mm.region->name, type, addr);
863 area = find_vma(current->mm, addr);
865 pr_err("%s: Did not create a vm_area_struct for the mmap\n",
866 obj->mm.region->name);
871 if (area->vm_private_data != mmo) {
872 pr_err("%s: vm_area_struct did not point back to our mmap_offset object!\n",
873 obj->mm.region->name);
878 for (i = 0; i < obj->base.size / sizeof(u32); i++) {
879 u32 __user *ux = u64_to_user_ptr((u64)(addr + i * sizeof(*ux)));
882 if (get_user(x, ux)) {
883 pr_err("%s: Unable to read from mmap, offset:%zd\n",
884 obj->mm.region->name, i * sizeof(x));
889 if (x != expand32(POISON_INUSE)) {
890 pr_err("%s: Read incorrect value from mmap, offset:%zd, found:%x, expected:%x\n",
891 obj->mm.region->name,
892 i * sizeof(x), x, expand32(POISON_INUSE));
897 x = expand32(POISON_FREE);
898 if (put_user(x, ux)) {
899 pr_err("%s: Unable to write to mmap, offset:%zd\n",
900 obj->mm.region->name, i * sizeof(x));
906 if (type == I915_MMAP_TYPE_GTT)
907 intel_gt_flush_ggtt_writes(&i915->gt);
911 err = gtt_check(obj);
913 vm_munmap(addr, obj->base.size);
917 static int igt_mmap(void *arg)
919 struct drm_i915_private *i915 = arg;
920 struct intel_memory_region *mr;
921 enum intel_region_id id;
923 for_each_memory_region(mr, i915, id) {
924 unsigned long sizes[] = {
931 for (i = 0; i < ARRAY_SIZE(sizes); i++) {
932 struct drm_i915_gem_object *obj;
935 obj = i915_gem_object_create_region(mr, sizes[i], 0);
936 if (obj == ERR_PTR(-ENODEV))
942 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_GTT);
944 err = __igt_mmap(i915, obj, I915_MMAP_TYPE_WC);
946 i915_gem_object_put(obj);
955 static int __igt_mmap_gpu(struct drm_i915_private *i915,
956 struct drm_i915_gem_object *obj,
957 enum i915_mmap_type type)
959 struct intel_engine_cs *engine;
960 struct i915_mmap_offset *mmo;
967 * Verify that the mmap access into the backing store aligns with
968 * that of the GPU, i.e. that mmap is indeed writing into the same
969 * page as being read by the GPU.
972 if (!can_mmap(obj, type))
981 mmo = mmap_offset_attach(obj, type, NULL);
985 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
986 if (IS_ERR_VALUE(addr))
989 ux = u64_to_user_ptr((u64)addr);
990 bbe = MI_BATCH_BUFFER_END;
991 if (put_user(bbe, ux)) {
992 pr_err("%s: Unable to write to mmap\n", obj->mm.region->name);
997 if (type == I915_MMAP_TYPE_GTT)
998 intel_gt_flush_ggtt_writes(&i915->gt);
1000 for_each_uabi_engine(engine, i915) {
1001 struct i915_request *rq;
1002 struct i915_vma *vma;
1004 vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
1010 err = i915_vma_pin(vma, 0, 0, PIN_USER);
1014 rq = i915_request_create(engine->kernel_context);
1021 err = i915_request_await_object(rq, vma->obj, false);
1023 err = i915_vma_move_to_active(vma, rq, 0);
1024 i915_vma_unlock(vma);
1026 err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
1027 i915_request_get(rq);
1028 i915_request_add(rq);
1030 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
1031 struct drm_printer p =
1032 drm_info_printer(engine->i915->drm.dev);
1034 pr_err("%s(%s, %s): Failed to execute batch\n",
1035 __func__, engine->name, obj->mm.region->name);
1036 intel_engine_dump(engine, &p,
1037 "%s\n", engine->name);
1039 intel_gt_set_wedged(engine->gt);
1042 i915_request_put(rq);
1045 i915_vma_unpin(vma);
1051 vm_munmap(addr, obj->base.size);
1055 static int igt_mmap_gpu(void *arg)
1057 struct drm_i915_private *i915 = arg;
1058 struct intel_memory_region *mr;
1059 enum intel_region_id id;
1061 for_each_memory_region(mr, i915, id) {
1062 struct drm_i915_gem_object *obj;
1065 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1066 if (obj == ERR_PTR(-ENODEV))
1070 return PTR_ERR(obj);
1072 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_GTT);
1074 err = __igt_mmap_gpu(i915, obj, I915_MMAP_TYPE_WC);
1076 i915_gem_object_put(obj);
1084 static int check_present_pte(pte_t *pte, unsigned long addr, void *data)
1086 if (!pte_present(*pte) || pte_none(*pte)) {
1087 pr_err("missing PTE:%lx\n",
1088 (addr - (unsigned long)data) >> PAGE_SHIFT);
1095 static int check_absent_pte(pte_t *pte, unsigned long addr, void *data)
1097 if (pte_present(*pte) && !pte_none(*pte)) {
1098 pr_err("present PTE:%lx; expected to be revoked\n",
1099 (addr - (unsigned long)data) >> PAGE_SHIFT);
1106 static int check_present(unsigned long addr, unsigned long len)
1108 return apply_to_page_range(current->mm, addr, len,
1109 check_present_pte, (void *)addr);
1112 static int check_absent(unsigned long addr, unsigned long len)
1114 return apply_to_page_range(current->mm, addr, len,
1115 check_absent_pte, (void *)addr);
1118 static int prefault_range(u64 start, u64 len)
1120 const char __user *addr, *end;
1121 char __maybe_unused c;
1124 addr = u64_to_user_ptr(start);
1127 for (; addr < end; addr += PAGE_SIZE) {
1128 err = __get_user(c, addr);
1133 return __get_user(c, end - 1);
1136 static int __igt_mmap_revoke(struct drm_i915_private *i915,
1137 struct drm_i915_gem_object *obj,
1138 enum i915_mmap_type type)
1140 struct i915_mmap_offset *mmo;
1144 if (!can_mmap(obj, type))
1147 mmo = mmap_offset_attach(obj, type, NULL);
1149 return PTR_ERR(mmo);
1151 addr = igt_mmap_node(i915, &mmo->vma_node, 0, PROT_WRITE, MAP_SHARED);
1152 if (IS_ERR_VALUE(addr))
1155 err = prefault_range(addr, obj->base.size);
1159 GEM_BUG_ON(mmo->mmap_type == I915_MMAP_TYPE_GTT &&
1160 !atomic_read(&obj->bind_count));
1162 err = check_present(addr, obj->base.size);
1164 pr_err("%s: was not present\n", obj->mm.region->name);
1169 * After unbinding the object from the GGTT, its address may be reused
1170 * for other objects. Ergo we have to revoke the previous mmap PTE
1171 * access as it no longer points to the same object.
1173 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
1175 pr_err("Failed to unbind object!\n");
1178 GEM_BUG_ON(atomic_read(&obj->bind_count));
1180 if (type != I915_MMAP_TYPE_GTT) {
1181 __i915_gem_object_put_pages(obj);
1182 if (i915_gem_object_has_pages(obj)) {
1183 pr_err("Failed to put-pages object!\n");
1189 err = check_absent(addr, obj->base.size);
1191 pr_err("%s: was not absent\n", obj->mm.region->name);
1196 vm_munmap(addr, obj->base.size);
1200 static int igt_mmap_revoke(void *arg)
1202 struct drm_i915_private *i915 = arg;
1203 struct intel_memory_region *mr;
1204 enum intel_region_id id;
1206 for_each_memory_region(mr, i915, id) {
1207 struct drm_i915_gem_object *obj;
1210 obj = i915_gem_object_create_region(mr, PAGE_SIZE, 0);
1211 if (obj == ERR_PTR(-ENODEV))
1215 return PTR_ERR(obj);
1217 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_GTT);
1219 err = __igt_mmap_revoke(i915, obj, I915_MMAP_TYPE_WC);
1221 i915_gem_object_put(obj);
1229 int i915_gem_mman_live_selftests(struct drm_i915_private *i915)
1231 static const struct i915_subtest tests[] = {
1232 SUBTEST(igt_partial_tiling),
1233 SUBTEST(igt_smoke_tiling),
1234 SUBTEST(igt_mmap_offset_exhaustion),
1236 SUBTEST(igt_mmap_revoke),
1237 SUBTEST(igt_mmap_gpu),
1240 return i915_subtests(tests, i915);