2 * Copyright © 2016 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
31 #include "mock_context.h"
33 #include "mock_gem_device.h"
35 static void cleanup_freed_objects(struct drm_i915_private *i915)
38 * As we may hold onto the struct_mutex for inordinate lengths of
39 * time, the NMI khungtaskd detector may fire for the free objects
42 mutex_unlock(&i915->drm.struct_mutex);
44 i915_gem_drain_freed_objects(i915);
46 mutex_lock(&i915->drm.struct_mutex);
49 static void fake_free_pages(struct drm_i915_gem_object *obj,
50 struct sg_table *pages)
56 static int fake_get_pages(struct drm_i915_gem_object *obj)
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 #define PFN_BIAS 0x1000
60 struct sg_table *pages;
61 struct scatterlist *sg;
62 unsigned int sg_page_sizes;
63 typeof(obj->base.size) rem;
65 pages = kmalloc(sizeof(*pages), GFP);
69 rem = round_up(obj->base.size, BIT(31)) >> 31;
70 if (sg_alloc_table(pages, rem, GFP)) {
77 for (sg = pages->sgl; sg; sg = sg_next(sg)) {
78 unsigned long len = min_t(typeof(rem), rem, BIT(31));
81 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
82 sg_dma_address(sg) = page_to_phys(sg_page(sg));
90 obj->mm.madv = I915_MADV_DONTNEED;
92 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
98 static void fake_put_pages(struct drm_i915_gem_object *obj,
99 struct sg_table *pages)
101 fake_free_pages(obj, pages);
102 obj->mm.dirty = false;
103 obj->mm.madv = I915_MADV_WILLNEED;
106 static const struct drm_i915_gem_object_ops fake_ops = {
107 .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
108 .get_pages = fake_get_pages,
109 .put_pages = fake_put_pages,
112 static struct drm_i915_gem_object *
113 fake_dma_object(struct drm_i915_private *i915, u64 size)
115 struct drm_i915_gem_object *obj;
118 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
120 if (overflows_type(size, obj->base.size))
121 return ERR_PTR(-E2BIG);
123 obj = i915_gem_object_alloc(i915);
127 drm_gem_private_object_init(&i915->drm, &obj->base, size);
128 i915_gem_object_init(obj, &fake_ops);
130 obj->write_domain = I915_GEM_DOMAIN_CPU;
131 obj->read_domains = I915_GEM_DOMAIN_CPU;
132 obj->cache_level = I915_CACHE_NONE;
134 /* Preallocate the "backing storage" */
135 if (i915_gem_object_pin_pages(obj))
138 i915_gem_object_unpin_pages(obj);
142 i915_gem_object_put(obj);
144 return ERR_PTR(-ENOMEM);
147 static int igt_ppgtt_alloc(void *arg)
149 struct drm_i915_private *dev_priv = arg;
150 struct i915_hw_ppgtt *ppgtt;
154 /* Allocate a ppggt and try to fill the entire range */
156 if (!USES_PPGTT(dev_priv))
159 ppgtt = __hw_ppgtt_create(dev_priv);
161 return PTR_ERR(ppgtt);
163 if (!ppgtt->vm.allocate_va_range)
164 goto err_ppgtt_cleanup;
166 /* Check we can allocate the entire range */
168 size <= ppgtt->vm.total;
170 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
172 if (err == -ENOMEM) {
173 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
175 err = 0; /* virtual space too large! */
177 goto err_ppgtt_cleanup;
182 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
185 /* Check we can incrementally allocate the entire range */
186 for (last = 0, size = 4096;
187 size <= ppgtt->vm.total;
188 last = size, size <<= 2) {
189 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
192 if (err == -ENOMEM) {
193 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
194 last, size - last, ilog2(size));
195 err = 0; /* virtual space too large! */
197 goto err_ppgtt_cleanup;
204 mutex_lock(&dev_priv->drm.struct_mutex);
205 ppgtt->vm.cleanup(&ppgtt->vm);
206 mutex_unlock(&dev_priv->drm.struct_mutex);
211 static int lowlevel_hole(struct drm_i915_private *i915,
212 struct i915_address_space *vm,
213 u64 hole_start, u64 hole_end,
214 unsigned long end_time)
216 I915_RND_STATE(seed_prng);
218 struct i915_vma mock_vma;
220 memset(&mock_vma, 0, sizeof(struct i915_vma));
222 /* Keep creating larger objects until one cannot fit into the hole */
223 for (size = 12; (hole_end - hole_start) >> size; size++) {
224 I915_RND_SUBSTATE(prng, seed_prng);
225 struct drm_i915_gem_object *obj;
226 unsigned int *order, count, n;
229 hole_size = (hole_end - hole_start) >> size;
230 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
231 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
232 count = hole_size >> 1;
234 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
235 __func__, hole_start, hole_end, size, hole_size);
240 order = i915_random_order(count, &prng);
243 } while (count >>= 1);
248 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
249 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
251 /* Ignore allocation failures (i.e. don't report them as
252 * a test failure) as we are purposefully allocating very
253 * large objects without checking that we have sufficient
254 * memory. We expect to hit -ENOMEM.
257 obj = fake_dma_object(i915, BIT_ULL(size));
263 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
265 if (i915_gem_object_pin_pages(obj)) {
266 i915_gem_object_put(obj);
271 for (n = 0; n < count; n++) {
272 u64 addr = hole_start + order[n] * BIT_ULL(size);
274 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
276 if (igt_timeout(end_time,
277 "%s timed out before %d/%d\n",
278 __func__, n, count)) {
279 hole_end = hole_start; /* quit */
283 if (vm->allocate_va_range &&
284 vm->allocate_va_range(vm, addr, BIT_ULL(size)))
287 mock_vma.pages = obj->mm.pages;
288 mock_vma.node.size = BIT_ULL(size);
289 mock_vma.node.start = addr;
291 intel_runtime_pm_get(i915);
292 vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
293 intel_runtime_pm_put(i915);
297 i915_random_reorder(order, count, &prng);
298 for (n = 0; n < count; n++) {
299 u64 addr = hole_start + order[n] * BIT_ULL(size);
301 GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
302 vm->clear_range(vm, addr, BIT_ULL(size));
305 i915_gem_object_unpin_pages(obj);
306 i915_gem_object_put(obj);
310 cleanup_freed_objects(i915);
316 static void close_object_list(struct list_head *objects,
317 struct i915_address_space *vm)
319 struct drm_i915_gem_object *obj, *on;
322 list_for_each_entry_safe(obj, on, objects, st_link) {
323 struct i915_vma *vma;
325 vma = i915_vma_instance(obj, vm, NULL);
327 ignored = i915_vma_unbind(vma);
328 /* Only ppgtt vma may be closed before the object is freed */
329 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
332 list_del(&obj->st_link);
333 i915_gem_object_put(obj);
337 static int fill_hole(struct drm_i915_private *i915,
338 struct i915_address_space *vm,
339 u64 hole_start, u64 hole_end,
340 unsigned long end_time)
342 const u64 hole_size = hole_end - hole_start;
343 struct drm_i915_gem_object *obj;
344 const unsigned long max_pages =
345 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
346 const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
347 unsigned long npages, prime, flags;
348 struct i915_vma *vma;
352 /* Try binding many VMA working inwards from either edge */
354 flags = PIN_OFFSET_FIXED | PIN_USER;
355 if (i915_is_ggtt(vm))
358 for_each_prime_number_from(prime, 2, max_step) {
359 for (npages = 1; npages <= max_pages; npages *= prime) {
360 const u64 full_size = npages << PAGE_SHIFT;
366 { "top-down", hole_end, -1, },
367 { "bottom-up", hole_start, 1, },
371 obj = fake_dma_object(i915, full_size);
375 list_add(&obj->st_link, &objects);
377 /* Align differing sized objects against the edges, and
378 * check we don't walk off into the void when binding
381 for (p = phases; p->name; p++) {
385 list_for_each_entry(obj, &objects, st_link) {
386 vma = i915_vma_instance(obj, vm, NULL);
391 if (offset < hole_start + obj->base.size)
393 offset -= obj->base.size;
396 err = i915_vma_pin(vma, 0, 0, offset | flags);
398 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
399 __func__, p->name, err, npages, prime, offset);
403 if (!drm_mm_node_allocated(&vma->node) ||
404 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
405 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
406 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
415 if (offset + obj->base.size > hole_end)
417 offset += obj->base.size;
422 list_for_each_entry(obj, &objects, st_link) {
423 vma = i915_vma_instance(obj, vm, NULL);
428 if (offset < hole_start + obj->base.size)
430 offset -= obj->base.size;
433 if (!drm_mm_node_allocated(&vma->node) ||
434 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
435 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
436 __func__, p->name, vma->node.start, vma->node.size,
442 err = i915_vma_unbind(vma);
444 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
445 __func__, p->name, vma->node.start, vma->node.size,
451 if (offset + obj->base.size > hole_end)
453 offset += obj->base.size;
458 list_for_each_entry_reverse(obj, &objects, st_link) {
459 vma = i915_vma_instance(obj, vm, NULL);
464 if (offset < hole_start + obj->base.size)
466 offset -= obj->base.size;
469 err = i915_vma_pin(vma, 0, 0, offset | flags);
471 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
472 __func__, p->name, err, npages, prime, offset);
476 if (!drm_mm_node_allocated(&vma->node) ||
477 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
478 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
479 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
488 if (offset + obj->base.size > hole_end)
490 offset += obj->base.size;
495 list_for_each_entry_reverse(obj, &objects, st_link) {
496 vma = i915_vma_instance(obj, vm, NULL);
501 if (offset < hole_start + obj->base.size)
503 offset -= obj->base.size;
506 if (!drm_mm_node_allocated(&vma->node) ||
507 i915_vma_misplaced(vma, 0, 0, offset | flags)) {
508 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
509 __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
515 err = i915_vma_unbind(vma);
517 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
518 __func__, p->name, vma->node.start, vma->node.size,
524 if (offset + obj->base.size > hole_end)
526 offset += obj->base.size;
531 if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
532 __func__, npages, prime)) {
538 close_object_list(&objects, vm);
539 cleanup_freed_objects(i915);
545 close_object_list(&objects, vm);
549 static int walk_hole(struct drm_i915_private *i915,
550 struct i915_address_space *vm,
551 u64 hole_start, u64 hole_end,
552 unsigned long end_time)
554 const u64 hole_size = hole_end - hole_start;
555 const unsigned long max_pages =
556 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
560 /* Try binding a single VMA in different positions within the hole */
562 flags = PIN_OFFSET_FIXED | PIN_USER;
563 if (i915_is_ggtt(vm))
566 for_each_prime_number_from(size, 1, max_pages) {
567 struct drm_i915_gem_object *obj;
568 struct i915_vma *vma;
572 obj = fake_dma_object(i915, size << PAGE_SHIFT);
576 vma = i915_vma_instance(obj, vm, NULL);
582 for (addr = hole_start;
583 addr + obj->base.size < hole_end;
584 addr += obj->base.size) {
585 err = i915_vma_pin(vma, 0, 0, addr | flags);
587 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
588 __func__, addr, vma->size,
589 hole_start, hole_end, err);
594 if (!drm_mm_node_allocated(&vma->node) ||
595 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
596 pr_err("%s incorrect at %llx + %llx\n",
597 __func__, addr, vma->size);
602 err = i915_vma_unbind(vma);
604 pr_err("%s unbind failed at %llx + %llx with err=%d\n",
605 __func__, addr, vma->size, err);
609 GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
611 if (igt_timeout(end_time,
612 "%s timed out at %llx\n",
620 if (!i915_vma_is_ggtt(vma))
623 i915_gem_object_put(obj);
627 cleanup_freed_objects(i915);
633 static int pot_hole(struct drm_i915_private *i915,
634 struct i915_address_space *vm,
635 u64 hole_start, u64 hole_end,
636 unsigned long end_time)
638 struct drm_i915_gem_object *obj;
639 struct i915_vma *vma;
644 flags = PIN_OFFSET_FIXED | PIN_USER;
645 if (i915_is_ggtt(vm))
648 obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
652 vma = i915_vma_instance(obj, vm, NULL);
658 /* Insert a pair of pages across every pot boundary within the hole */
659 for (pot = fls64(hole_end - 1) - 1;
660 pot > ilog2(2 * I915_GTT_PAGE_SIZE);
662 u64 step = BIT_ULL(pot);
665 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
666 addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
668 err = i915_vma_pin(vma, 0, 0, addr | flags);
670 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
673 hole_start, hole_end,
678 if (!drm_mm_node_allocated(&vma->node) ||
679 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
680 pr_err("%s incorrect at %llx + %llx\n",
681 __func__, addr, vma->size);
683 err = i915_vma_unbind(vma);
689 err = i915_vma_unbind(vma);
693 if (igt_timeout(end_time,
694 "%s timed out after %d/%d\n",
695 __func__, pot, fls64(hole_end - 1) - 1)) {
702 if (!i915_vma_is_ggtt(vma))
705 i915_gem_object_put(obj);
709 static int drunk_hole(struct drm_i915_private *i915,
710 struct i915_address_space *vm,
711 u64 hole_start, u64 hole_end,
712 unsigned long end_time)
714 I915_RND_STATE(prng);
718 flags = PIN_OFFSET_FIXED | PIN_USER;
719 if (i915_is_ggtt(vm))
722 /* Keep creating larger objects until one cannot fit into the hole */
723 for (size = 12; (hole_end - hole_start) >> size; size++) {
724 struct drm_i915_gem_object *obj;
725 unsigned int *order, count, n;
726 struct i915_vma *vma;
730 hole_size = (hole_end - hole_start) >> size;
731 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
732 hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
733 count = hole_size >> 1;
735 pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
736 __func__, hole_start, hole_end, size, hole_size);
741 order = i915_random_order(count, &prng);
744 } while (count >>= 1);
749 /* Ignore allocation failures (i.e. don't report them as
750 * a test failure) as we are purposefully allocating very
751 * large objects without checking that we have sufficient
752 * memory. We expect to hit -ENOMEM.
755 obj = fake_dma_object(i915, BIT_ULL(size));
761 vma = i915_vma_instance(obj, vm, NULL);
767 GEM_BUG_ON(vma->size != BIT_ULL(size));
769 for (n = 0; n < count; n++) {
770 u64 addr = hole_start + order[n] * BIT_ULL(size);
772 err = i915_vma_pin(vma, 0, 0, addr | flags);
774 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
777 hole_start, hole_end,
782 if (!drm_mm_node_allocated(&vma->node) ||
783 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
784 pr_err("%s incorrect at %llx + %llx\n",
785 __func__, addr, BIT_ULL(size));
787 err = i915_vma_unbind(vma);
793 err = i915_vma_unbind(vma);
796 if (igt_timeout(end_time,
797 "%s timed out after %d/%d\n",
798 __func__, n, count)) {
805 if (!i915_vma_is_ggtt(vma))
808 i915_gem_object_put(obj);
813 cleanup_freed_objects(i915);
819 static int __shrink_hole(struct drm_i915_private *i915,
820 struct i915_address_space *vm,
821 u64 hole_start, u64 hole_end,
822 unsigned long end_time)
824 struct drm_i915_gem_object *obj;
825 unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
826 unsigned int order = 12;
831 /* Keep creating larger objects until one cannot fit into the hole */
832 for (addr = hole_start; addr < hole_end; ) {
833 struct i915_vma *vma;
834 u64 size = BIT_ULL(order++);
836 size = min(size, hole_end - addr);
837 obj = fake_dma_object(i915, size);
843 list_add(&obj->st_link, &objects);
845 vma = i915_vma_instance(obj, vm, NULL);
851 GEM_BUG_ON(vma->size != size);
853 err = i915_vma_pin(vma, 0, 0, addr | flags);
855 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
856 __func__, addr, size, hole_start, hole_end, err);
860 if (!drm_mm_node_allocated(&vma->node) ||
861 i915_vma_misplaced(vma, 0, 0, addr | flags)) {
862 pr_err("%s incorrect at %llx + %llx\n",
863 __func__, addr, size);
865 err = i915_vma_unbind(vma);
873 if (igt_timeout(end_time,
874 "%s timed out at ofset %llx [%llx - %llx]\n",
875 __func__, addr, hole_start, hole_end)) {
881 close_object_list(&objects, vm);
882 cleanup_freed_objects(i915);
886 static int shrink_hole(struct drm_i915_private *i915,
887 struct i915_address_space *vm,
888 u64 hole_start, u64 hole_end,
889 unsigned long end_time)
894 vm->fault_attr.probability = 999;
895 atomic_set(&vm->fault_attr.times, -1);
897 for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
898 vm->fault_attr.interval = prime;
899 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
904 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
909 static int shrink_boom(struct drm_i915_private *i915,
910 struct i915_address_space *vm,
911 u64 hole_start, u64 hole_end,
912 unsigned long end_time)
914 unsigned int sizes[] = { SZ_2M, SZ_1G };
915 struct drm_i915_gem_object *purge;
916 struct drm_i915_gem_object *explode;
921 * Catch the case which shrink_hole seems to miss. The setup here
922 * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
923 * ensuring that all vma assiocated with the respective pd/pdp are
924 * unpinned at the time.
927 for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
928 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
929 unsigned int size = sizes[i];
930 struct i915_vma *vma;
932 purge = fake_dma_object(i915, size);
934 return PTR_ERR(purge);
936 vma = i915_vma_instance(purge, vm, NULL);
942 err = i915_vma_pin(vma, 0, 0, flags);
946 /* Should now be ripe for purging */
949 explode = fake_dma_object(i915, size);
950 if (IS_ERR(explode)) {
951 err = PTR_ERR(explode);
955 vm->fault_attr.probability = 100;
956 vm->fault_attr.interval = 1;
957 atomic_set(&vm->fault_attr.times, -1);
959 vma = i915_vma_instance(explode, vm, NULL);
965 err = i915_vma_pin(vma, 0, 0, flags | size);
971 i915_gem_object_put(purge);
972 i915_gem_object_put(explode);
974 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
975 cleanup_freed_objects(i915);
981 i915_gem_object_put(explode);
983 i915_gem_object_put(purge);
984 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
988 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
989 int (*func)(struct drm_i915_private *i915,
990 struct i915_address_space *vm,
991 u64 hole_start, u64 hole_end,
992 unsigned long end_time))
994 struct drm_file *file;
995 struct i915_hw_ppgtt *ppgtt;
996 IGT_TIMEOUT(end_time);
999 if (!USES_FULL_PPGTT(dev_priv))
1002 file = mock_file(dev_priv);
1004 return PTR_ERR(file);
1006 mutex_lock(&dev_priv->drm.struct_mutex);
1007 ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
1008 if (IS_ERR(ppgtt)) {
1009 err = PTR_ERR(ppgtt);
1012 GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1013 GEM_BUG_ON(ppgtt->vm.closed);
1015 err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1017 i915_ppgtt_close(&ppgtt->vm);
1018 i915_ppgtt_put(ppgtt);
1020 mutex_unlock(&dev_priv->drm.struct_mutex);
1022 mock_file_free(dev_priv, file);
1026 static int igt_ppgtt_fill(void *arg)
1028 return exercise_ppgtt(arg, fill_hole);
1031 static int igt_ppgtt_walk(void *arg)
1033 return exercise_ppgtt(arg, walk_hole);
1036 static int igt_ppgtt_pot(void *arg)
1038 return exercise_ppgtt(arg, pot_hole);
1041 static int igt_ppgtt_drunk(void *arg)
1043 return exercise_ppgtt(arg, drunk_hole);
1046 static int igt_ppgtt_lowlevel(void *arg)
1048 return exercise_ppgtt(arg, lowlevel_hole);
1051 static int igt_ppgtt_shrink(void *arg)
1053 return exercise_ppgtt(arg, shrink_hole);
1056 static int igt_ppgtt_shrink_boom(void *arg)
1058 return exercise_ppgtt(arg, shrink_boom);
1061 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1063 struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1064 struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1066 if (a->start < b->start)
1072 static int exercise_ggtt(struct drm_i915_private *i915,
1073 int (*func)(struct drm_i915_private *i915,
1074 struct i915_address_space *vm,
1075 u64 hole_start, u64 hole_end,
1076 unsigned long end_time))
1078 struct i915_ggtt *ggtt = &i915->ggtt;
1079 u64 hole_start, hole_end, last = 0;
1080 struct drm_mm_node *node;
1081 IGT_TIMEOUT(end_time);
1084 mutex_lock(&i915->drm.struct_mutex);
1086 list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1087 drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1088 if (hole_start < last)
1091 if (ggtt->vm.mm.color_adjust)
1092 ggtt->vm.mm.color_adjust(node, 0,
1093 &hole_start, &hole_end);
1094 if (hole_start >= hole_end)
1097 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1101 /* As we have manipulated the drm_mm, the list may be corrupt */
1105 mutex_unlock(&i915->drm.struct_mutex);
1110 static int igt_ggtt_fill(void *arg)
1112 return exercise_ggtt(arg, fill_hole);
1115 static int igt_ggtt_walk(void *arg)
1117 return exercise_ggtt(arg, walk_hole);
1120 static int igt_ggtt_pot(void *arg)
1122 return exercise_ggtt(arg, pot_hole);
1125 static int igt_ggtt_drunk(void *arg)
1127 return exercise_ggtt(arg, drunk_hole);
1130 static int igt_ggtt_lowlevel(void *arg)
1132 return exercise_ggtt(arg, lowlevel_hole);
1135 static int igt_ggtt_page(void *arg)
1137 const unsigned int count = PAGE_SIZE/sizeof(u32);
1138 I915_RND_STATE(prng);
1139 struct drm_i915_private *i915 = arg;
1140 struct i915_ggtt *ggtt = &i915->ggtt;
1141 struct drm_i915_gem_object *obj;
1142 struct drm_mm_node tmp;
1143 unsigned int *order, n;
1146 mutex_lock(&i915->drm.struct_mutex);
1148 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1154 err = i915_gem_object_pin_pages(obj);
1158 memset(&tmp, 0, sizeof(tmp));
1159 err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1160 count * PAGE_SIZE, 0,
1161 I915_COLOR_UNEVICTABLE,
1162 0, ggtt->mappable_end,
1167 intel_runtime_pm_get(i915);
1169 for (n = 0; n < count; n++) {
1170 u64 offset = tmp.start + n * PAGE_SIZE;
1172 ggtt->vm.insert_page(&ggtt->vm,
1173 i915_gem_object_get_dma_address(obj, 0),
1174 offset, I915_CACHE_NONE, 0);
1177 order = i915_random_order(count, &prng);
1183 for (n = 0; n < count; n++) {
1184 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1187 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1188 iowrite32(n, vaddr + n);
1189 io_mapping_unmap_atomic(vaddr);
1191 i915_gem_flush_ggtt_writes(i915);
1193 i915_random_reorder(order, count, &prng);
1194 for (n = 0; n < count; n++) {
1195 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1199 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1200 val = ioread32(vaddr + n);
1201 io_mapping_unmap_atomic(vaddr);
1204 pr_err("insert page failed: found %d, expected %d\n",
1213 ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1214 intel_runtime_pm_put(i915);
1215 drm_mm_remove_node(&tmp);
1217 i915_gem_object_unpin_pages(obj);
1219 i915_gem_object_put(obj);
1221 mutex_unlock(&i915->drm.struct_mutex);
1225 static void track_vma_bind(struct i915_vma *vma)
1227 struct drm_i915_gem_object *obj = vma->obj;
1229 obj->bind_count++; /* track for eviction later */
1230 __i915_gem_object_pin_pages(obj);
1232 vma->pages = obj->mm.pages;
1233 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1236 static int exercise_mock(struct drm_i915_private *i915,
1237 int (*func)(struct drm_i915_private *i915,
1238 struct i915_address_space *vm,
1239 u64 hole_start, u64 hole_end,
1240 unsigned long end_time))
1242 struct i915_gem_context *ctx;
1243 struct i915_hw_ppgtt *ppgtt;
1244 IGT_TIMEOUT(end_time);
1247 ctx = mock_context(i915, "mock");
1254 err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1256 mock_context_close(ctx);
1260 static int igt_mock_fill(void *arg)
1262 return exercise_mock(arg, fill_hole);
1265 static int igt_mock_walk(void *arg)
1267 return exercise_mock(arg, walk_hole);
1270 static int igt_mock_pot(void *arg)
1272 return exercise_mock(arg, pot_hole);
1275 static int igt_mock_drunk(void *arg)
1277 return exercise_mock(arg, drunk_hole);
1280 static int igt_gtt_reserve(void *arg)
1282 struct drm_i915_private *i915 = arg;
1283 struct drm_i915_gem_object *obj, *on;
1288 /* i915_gem_gtt_reserve() tries to reserve the precise range
1289 * for the node, and evicts if it has to. So our test checks that
1290 * it can give us the requsted space and prevent overlaps.
1293 /* Start by filling the GGTT */
1295 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1296 total += 2*I915_GTT_PAGE_SIZE) {
1297 struct i915_vma *vma;
1299 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1305 err = i915_gem_object_pin_pages(obj);
1307 i915_gem_object_put(obj);
1311 list_add(&obj->st_link, &objects);
1313 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1319 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1325 pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1326 total, i915->ggtt.vm.total, err);
1329 track_vma_bind(vma);
1331 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1332 if (vma->node.start != total ||
1333 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1334 pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1335 vma->node.start, vma->node.size,
1336 total, 2*I915_GTT_PAGE_SIZE);
1342 /* Now we start forcing evictions */
1343 for (total = I915_GTT_PAGE_SIZE;
1344 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1345 total += 2*I915_GTT_PAGE_SIZE) {
1346 struct i915_vma *vma;
1348 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1354 err = i915_gem_object_pin_pages(obj);
1356 i915_gem_object_put(obj);
1360 list_add(&obj->st_link, &objects);
1362 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1368 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1374 pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1375 total, i915->ggtt.vm.total, err);
1378 track_vma_bind(vma);
1380 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1381 if (vma->node.start != total ||
1382 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1383 pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1384 vma->node.start, vma->node.size,
1385 total, 2*I915_GTT_PAGE_SIZE);
1391 /* And then try at random */
1392 list_for_each_entry_safe(obj, on, &objects, st_link) {
1393 struct i915_vma *vma;
1396 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1402 err = i915_vma_unbind(vma);
1404 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1408 offset = random_offset(0, i915->ggtt.vm.total,
1409 2*I915_GTT_PAGE_SIZE,
1410 I915_GTT_MIN_ALIGNMENT);
1412 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1418 pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1419 total, i915->ggtt.vm.total, err);
1422 track_vma_bind(vma);
1424 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1425 if (vma->node.start != offset ||
1426 vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1427 pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1428 vma->node.start, vma->node.size,
1429 offset, 2*I915_GTT_PAGE_SIZE);
1436 list_for_each_entry_safe(obj, on, &objects, st_link) {
1437 i915_gem_object_unpin_pages(obj);
1438 i915_gem_object_put(obj);
1443 static int igt_gtt_insert(void *arg)
1445 struct drm_i915_private *i915 = arg;
1446 struct drm_i915_gem_object *obj, *on;
1447 struct drm_mm_node tmp = {};
1448 const struct invalid_insert {
1452 } invalid_insert[] = {
1454 i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
1455 0, i915->ggtt.vm.total,
1458 2*I915_GTT_PAGE_SIZE, 0,
1459 0, I915_GTT_PAGE_SIZE,
1462 -(u64)I915_GTT_PAGE_SIZE, 0,
1463 0, 4*I915_GTT_PAGE_SIZE,
1466 -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1467 0, 4*I915_GTT_PAGE_SIZE,
1470 I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1471 I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1479 /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1480 * to the node, evicting if required.
1483 /* Check a couple of obviously invalid requests */
1484 for (ii = invalid_insert; ii->size; ii++) {
1485 err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
1486 ii->size, ii->alignment,
1487 I915_COLOR_UNEVICTABLE,
1490 if (err != -ENOSPC) {
1491 pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1492 ii->size, ii->alignment, ii->start, ii->end,
1498 /* Start by filling the GGTT */
1500 total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1501 total += I915_GTT_PAGE_SIZE) {
1502 struct i915_vma *vma;
1504 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1510 err = i915_gem_object_pin_pages(obj);
1512 i915_gem_object_put(obj);
1516 list_add(&obj->st_link, &objects);
1518 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1524 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1525 obj->base.size, 0, obj->cache_level,
1526 0, i915->ggtt.vm.total,
1528 if (err == -ENOSPC) {
1529 /* maxed out the GGTT space */
1530 i915_gem_object_put(obj);
1534 pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1535 total, i915->ggtt.vm.total, err);
1538 track_vma_bind(vma);
1539 __i915_vma_pin(vma);
1541 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1544 list_for_each_entry(obj, &objects, st_link) {
1545 struct i915_vma *vma;
1547 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1553 if (!drm_mm_node_allocated(&vma->node)) {
1554 pr_err("VMA was unexpectedly evicted!\n");
1559 __i915_vma_unpin(vma);
1562 /* If we then reinsert, we should find the same hole */
1563 list_for_each_entry_safe(obj, on, &objects, st_link) {
1564 struct i915_vma *vma;
1567 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1573 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1574 offset = vma->node.start;
1576 err = i915_vma_unbind(vma);
1578 pr_err("i915_vma_unbind failed with err=%d!\n", err);
1582 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1583 obj->base.size, 0, obj->cache_level,
1584 0, i915->ggtt.vm.total,
1587 pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1588 total, i915->ggtt.vm.total, err);
1591 track_vma_bind(vma);
1593 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1594 if (vma->node.start != offset) {
1595 pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1596 offset, vma->node.start);
1602 /* And then force evictions */
1604 total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1605 total += 2*I915_GTT_PAGE_SIZE) {
1606 struct i915_vma *vma;
1608 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1614 err = i915_gem_object_pin_pages(obj);
1616 i915_gem_object_put(obj);
1620 list_add(&obj->st_link, &objects);
1622 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1628 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1629 obj->base.size, 0, obj->cache_level,
1630 0, i915->ggtt.vm.total,
1633 pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1634 total, i915->ggtt.vm.total, err);
1637 track_vma_bind(vma);
1639 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1643 list_for_each_entry_safe(obj, on, &objects, st_link) {
1644 i915_gem_object_unpin_pages(obj);
1645 i915_gem_object_put(obj);
1650 int i915_gem_gtt_mock_selftests(void)
1652 static const struct i915_subtest tests[] = {
1653 SUBTEST(igt_mock_drunk),
1654 SUBTEST(igt_mock_walk),
1655 SUBTEST(igt_mock_pot),
1656 SUBTEST(igt_mock_fill),
1657 SUBTEST(igt_gtt_reserve),
1658 SUBTEST(igt_gtt_insert),
1660 struct drm_i915_private *i915;
1663 i915 = mock_gem_device();
1667 mutex_lock(&i915->drm.struct_mutex);
1668 err = i915_subtests(tests, i915);
1669 mutex_unlock(&i915->drm.struct_mutex);
1671 drm_dev_put(&i915->drm);
1675 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1677 static const struct i915_subtest tests[] = {
1678 SUBTEST(igt_ppgtt_alloc),
1679 SUBTEST(igt_ppgtt_lowlevel),
1680 SUBTEST(igt_ppgtt_drunk),
1681 SUBTEST(igt_ppgtt_walk),
1682 SUBTEST(igt_ppgtt_pot),
1683 SUBTEST(igt_ppgtt_fill),
1684 SUBTEST(igt_ppgtt_shrink),
1685 SUBTEST(igt_ppgtt_shrink_boom),
1686 SUBTEST(igt_ggtt_lowlevel),
1687 SUBTEST(igt_ggtt_drunk),
1688 SUBTEST(igt_ggtt_walk),
1689 SUBTEST(igt_ggtt_pot),
1690 SUBTEST(igt_ggtt_fill),
1691 SUBTEST(igt_ggtt_page),
1694 GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1696 return i915_subtests(tests, i915);