2 * SPDX-License-Identifier: MIT
4 * Copyright © 2017 Intel Corporation
7 #include <linux/prime_numbers.h>
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_reset.h"
12 #include "i915_selftest.h"
14 #include "gem/selftests/igt_gem_utils.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_live_test.h"
18 #include "selftests/igt_reset.h"
19 #include "selftests/igt_spinner.h"
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
23 #include "huge_gem_object.h"
24 #include "igt_gem_utils.h"
26 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
28 static int live_nop_switch(void *arg)
30 const unsigned int nctx = 1024;
31 struct drm_i915_private *i915 = arg;
32 struct intel_engine_cs *engine;
33 struct i915_gem_context **ctx;
34 enum intel_engine_id id;
35 struct igt_live_test t;
36 struct drm_file *file;
41 * Create as many contexts as we can feasibly get away with
42 * and check we can switch between them rapidly.
44 * Serves as very simple stress test for submission and HW switching
48 if (!DRIVER_CAPS(i915)->has_logical_contexts)
51 file = mock_file(i915);
55 mutex_lock(&i915->drm.struct_mutex);
57 ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
63 for (n = 0; n < nctx; n++) {
64 ctx[n] = live_context(i915, file);
66 err = PTR_ERR(ctx[n]);
71 for_each_engine(engine, i915, id) {
72 struct i915_request *rq;
73 unsigned long end_time, prime;
74 ktime_t times[2] = {};
76 times[0] = ktime_get_raw();
77 for (n = 0; n < nctx; n++) {
78 rq = igt_request_alloc(ctx[n], engine);
85 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
86 pr_err("Failed to populated %d contexts\n", nctx);
87 intel_gt_set_wedged(&i915->gt);
92 times[1] = ktime_get_raw();
94 pr_info("Populated %d contexts on %s in %lluns\n",
95 nctx, engine->name, ktime_to_ns(times[1] - times[0]));
97 err = igt_live_test_begin(&t, i915, __func__, engine->name);
101 end_time = jiffies + i915_selftest.timeout_jiffies;
102 for_each_prime_number_from(prime, 2, 8192) {
103 times[1] = ktime_get_raw();
105 for (n = 0; n < prime; n++) {
106 rq = igt_request_alloc(ctx[n % nctx], engine);
113 * This space is left intentionally blank.
115 * We do not actually want to perform any
116 * action with this request, we just want
117 * to measure the latency in allocation
118 * and submission of our breadcrumbs -
119 * ensuring that the bare request is sufficient
120 * for the system to work (i.e. proper HEAD
121 * tracking of the rings, interrupt handling,
122 * etc). It also gives us the lowest bounds
126 i915_request_add(rq);
128 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
129 pr_err("Switching between %ld contexts timed out\n",
131 intel_gt_set_wedged(&i915->gt);
135 times[1] = ktime_sub(ktime_get_raw(), times[1]);
139 if (__igt_timeout(end_time, NULL))
143 err = igt_live_test_end(&t);
147 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
149 ktime_to_ns(times[0]),
150 prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
154 mutex_unlock(&i915->drm.struct_mutex);
155 mock_file_free(i915, file);
159 struct parallel_switch {
160 struct task_struct *tsk;
161 struct intel_context *ce[2];
164 static int __live_parallel_switch1(void *data)
166 struct parallel_switch *arg = data;
167 struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
168 IGT_TIMEOUT(end_time);
173 struct i915_request *rq = NULL;
176 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
177 i915_request_put(rq);
179 mutex_lock(&i915->drm.struct_mutex);
180 rq = i915_request_create(arg->ce[n]);
182 mutex_unlock(&i915->drm.struct_mutex);
186 i915_request_get(rq);
187 i915_request_add(rq);
188 mutex_unlock(&i915->drm.struct_mutex);
192 if (i915_request_wait(rq, 0, HZ / 5) < 0)
194 i915_request_put(rq);
199 } while (!__igt_timeout(end_time, NULL));
201 pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
205 static int __live_parallel_switchN(void *data)
207 struct parallel_switch *arg = data;
208 struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
209 IGT_TIMEOUT(end_time);
215 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
216 struct i915_request *rq;
218 mutex_lock(&i915->drm.struct_mutex);
219 rq = i915_request_create(arg->ce[n]);
221 mutex_unlock(&i915->drm.struct_mutex);
225 i915_request_add(rq);
226 mutex_unlock(&i915->drm.struct_mutex);
230 } while (!__igt_timeout(end_time, NULL));
232 pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
236 static int live_parallel_switch(void *arg)
238 struct drm_i915_private *i915 = arg;
239 static int (* const func[])(void *arg) = {
240 __live_parallel_switch1,
241 __live_parallel_switchN,
244 struct parallel_switch *data = NULL;
245 struct i915_gem_engines *engines;
246 struct i915_gem_engines_iter it;
247 int (* const *fn)(void *arg);
248 struct i915_gem_context *ctx;
249 struct intel_context *ce;
250 struct drm_file *file;
255 * Check we can process switches on all engines simultaneously.
258 if (!DRIVER_CAPS(i915)->has_logical_contexts)
261 file = mock_file(i915);
263 return PTR_ERR(file);
265 mutex_lock(&i915->drm.struct_mutex);
267 ctx = live_context(i915, file);
273 engines = i915_gem_context_lock_engines(ctx);
274 count = engines->num_engines;
276 data = kcalloc(count, sizeof(*data), GFP_KERNEL);
278 i915_gem_context_unlock_engines(ctx);
283 m = 0; /* Use the first context as our template for the engines */
284 for_each_gem_engine(ce, engines, it) {
285 err = intel_context_pin(ce);
287 i915_gem_context_unlock_engines(ctx);
290 data[m++].ce[0] = intel_context_get(ce);
292 i915_gem_context_unlock_engines(ctx);
294 /* Clone the same set of engines into the other contexts */
295 for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
296 ctx = live_context(i915, file);
302 for (m = 0; m < count; m++) {
306 ce = intel_context_create(ctx, data[m].ce[0]->engine);
310 err = intel_context_pin(ce);
312 intel_context_put(ce);
320 mutex_unlock(&i915->drm.struct_mutex);
322 for (fn = func; !err && *fn; fn++) {
323 struct igt_live_test t;
326 mutex_lock(&i915->drm.struct_mutex);
327 err = igt_live_test_begin(&t, i915, __func__, "");
328 mutex_unlock(&i915->drm.struct_mutex);
332 for (n = 0; n < count; n++) {
336 data[n].tsk = kthread_run(*fn, &data[n],
338 data[n].ce[0]->engine->name);
339 if (IS_ERR(data[n].tsk)) {
340 err = PTR_ERR(data[n].tsk);
343 get_task_struct(data[n].tsk);
346 for (n = 0; n < count; n++) {
349 if (IS_ERR_OR_NULL(data[n].tsk))
352 status = kthread_stop(data[n].tsk);
356 put_task_struct(data[n].tsk);
360 mutex_lock(&i915->drm.struct_mutex);
361 if (igt_live_test_end(&t))
363 mutex_unlock(&i915->drm.struct_mutex);
366 mutex_lock(&i915->drm.struct_mutex);
368 for (n = 0; n < count; n++) {
369 for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
373 intel_context_unpin(data[n].ce[m]);
374 intel_context_put(data[n].ce[m]);
377 mutex_unlock(&i915->drm.struct_mutex);
379 mock_file_free(i915, file);
383 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
385 return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
388 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
390 return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
393 static int gpu_fill(struct intel_context *ce,
394 struct drm_i915_gem_object *obj,
397 struct i915_vma *vma;
400 GEM_BUG_ON(obj->base.size > ce->vm->total);
401 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
403 vma = i915_vma_instance(obj, ce->vm, NULL);
407 err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
412 * Within the GTT the huge objects maps every page onto
413 * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
414 * We set the nth dword within the page using the nth
415 * mapping via the GTT - this should exercise the GTT mapping
416 * whilst checking that each context provides a unique view
419 err = igt_gpu_fill_dw(ce, vma,
420 (dw * real_page_count(obj)) << PAGE_SHIFT |
422 real_page_count(obj),
429 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
431 const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
432 unsigned int n, m, need_flush;
435 err = i915_gem_object_prepare_write(obj, &need_flush);
439 for (n = 0; n < real_page_count(obj); n++) {
442 map = kmap_atomic(i915_gem_object_get_page(obj, n));
443 for (m = 0; m < DW_PER_PAGE; m++)
446 drm_clflush_virt_range(map, PAGE_SIZE);
450 i915_gem_object_finish_access(obj);
451 obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
452 obj->write_domain = 0;
456 static noinline int cpu_check(struct drm_i915_gem_object *obj,
457 unsigned int idx, unsigned int max)
459 unsigned int n, m, needs_flush;
462 err = i915_gem_object_prepare_read(obj, &needs_flush);
466 for (n = 0; n < real_page_count(obj); n++) {
469 map = kmap_atomic(i915_gem_object_get_page(obj, n));
470 if (needs_flush & CLFLUSH_BEFORE)
471 drm_clflush_virt_range(map, PAGE_SIZE);
473 for (m = 0; m < max; m++) {
475 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
476 __builtin_return_address(0), idx,
477 n, real_page_count(obj), m, max,
484 for (; m < DW_PER_PAGE; m++) {
485 if (map[m] != STACK_MAGIC) {
486 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
487 __builtin_return_address(0), idx, n, m,
488 map[m], STACK_MAGIC);
500 i915_gem_object_finish_access(obj);
504 static int file_add_object(struct drm_file *file,
505 struct drm_i915_gem_object *obj)
509 GEM_BUG_ON(obj->base.handle_count);
511 /* tie the object to the drm_file for easy reaping */
512 err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
516 i915_gem_object_get(obj);
517 obj->base.handle_count++;
521 static struct drm_i915_gem_object *
522 create_test_object(struct i915_address_space *vm,
523 struct drm_file *file,
524 struct list_head *objects)
526 struct drm_i915_gem_object *obj;
530 /* Keep in GEM's good graces */
531 i915_retire_requests(vm->i915);
533 size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
534 size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
536 obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
540 err = file_add_object(file, obj);
541 i915_gem_object_put(obj);
545 err = cpu_fill(obj, STACK_MAGIC);
547 pr_err("Failed to fill object with cpu, err=%d\n",
552 list_add_tail(&obj->st_link, objects);
556 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
558 unsigned long npages = fake_page_count(obj);
560 GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
561 return npages / DW_PER_PAGE;
564 static void throttle_release(struct i915_request **q, int count)
568 for (i = 0; i < count; i++) {
569 if (IS_ERR_OR_NULL(q[i]))
572 i915_request_put(fetch_and_zero(&q[i]));
576 static int throttle(struct intel_context *ce,
577 struct i915_request **q, int count)
581 if (!IS_ERR_OR_NULL(q[0])) {
582 if (i915_request_wait(q[0],
583 I915_WAIT_INTERRUPTIBLE,
584 MAX_SCHEDULE_TIMEOUT) < 0)
587 i915_request_put(q[0]);
590 for (i = 0; i < count - 1; i++)
593 q[i] = intel_context_create_request(ce);
595 return PTR_ERR(q[i]);
597 i915_request_get(q[i]);
598 i915_request_add(q[i]);
603 static int igt_ctx_exec(void *arg)
605 struct drm_i915_private *i915 = arg;
606 struct intel_engine_cs *engine;
607 enum intel_engine_id id;
611 * Create a few different contexts (with different mm) and write
612 * through each ctx/mm using the GPU making sure those writes end
613 * up in the expected pages of our obj.
616 if (!DRIVER_CAPS(i915)->has_logical_contexts)
619 for_each_engine(engine, i915, id) {
620 struct drm_i915_gem_object *obj = NULL;
621 unsigned long ncontexts, ndwords, dw;
622 struct i915_request *tq[5] = {};
623 struct igt_live_test t;
624 struct drm_file *file;
625 IGT_TIMEOUT(end_time);
628 if (!intel_engine_can_store_dword(engine))
631 if (!engine->context_size)
632 continue; /* No logical context support in HW */
634 file = mock_file(i915);
636 return PTR_ERR(file);
638 mutex_lock(&i915->drm.struct_mutex);
640 err = igt_live_test_begin(&t, i915, __func__, engine->name);
647 while (!time_after(jiffies, end_time)) {
648 struct i915_gem_context *ctx;
649 struct intel_context *ce;
651 ctx = kernel_context(i915);
657 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
658 GEM_BUG_ON(IS_ERR(ce));
661 obj = create_test_object(ce->vm, file, &objects);
664 intel_context_put(ce);
665 kernel_context_close(ctx);
670 err = gpu_fill(ce, obj, dw);
672 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
673 ndwords, dw, max_dwords(obj),
674 engine->name, ctx->hw_id,
675 yesno(!!ctx->vm), err);
676 intel_context_put(ce);
677 kernel_context_close(ctx);
681 err = throttle(ce, tq, ARRAY_SIZE(tq));
683 intel_context_put(ce);
684 kernel_context_close(ctx);
688 if (++dw == max_dwords(obj)) {
696 intel_context_put(ce);
697 kernel_context_close(ctx);
700 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
701 ncontexts, engine->name, ndwords);
704 list_for_each_entry(obj, &objects, st_link) {
706 min_t(unsigned int, ndwords - dw, max_dwords(obj));
708 err = cpu_check(obj, ncontexts++, rem);
716 throttle_release(tq, ARRAY_SIZE(tq));
717 if (igt_live_test_end(&t))
719 mutex_unlock(&i915->drm.struct_mutex);
721 mock_file_free(i915, file);
725 i915_gem_drain_freed_objects(i915);
731 static int igt_shared_ctx_exec(void *arg)
733 struct drm_i915_private *i915 = arg;
734 struct i915_request *tq[5] = {};
735 struct i915_gem_context *parent;
736 struct intel_engine_cs *engine;
737 enum intel_engine_id id;
738 struct igt_live_test t;
739 struct drm_file *file;
743 * Create a few different contexts with the same mm and write
744 * through each ctx using the GPU making sure those writes end
745 * up in the expected pages of our obj.
747 if (!DRIVER_CAPS(i915)->has_logical_contexts)
750 file = mock_file(i915);
752 return PTR_ERR(file);
754 mutex_lock(&i915->drm.struct_mutex);
756 parent = live_context(i915, file);
757 if (IS_ERR(parent)) {
758 err = PTR_ERR(parent);
762 if (!parent->vm) { /* not full-ppgtt; nothing to share */
767 err = igt_live_test_begin(&t, i915, __func__, "");
771 for_each_engine(engine, i915, id) {
772 unsigned long ncontexts, ndwords, dw;
773 struct drm_i915_gem_object *obj = NULL;
774 IGT_TIMEOUT(end_time);
777 if (!intel_engine_can_store_dword(engine))
783 while (!time_after(jiffies, end_time)) {
784 struct i915_gem_context *ctx;
785 struct intel_context *ce;
787 ctx = kernel_context(i915);
793 __assign_ppgtt(ctx, parent->vm);
795 ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
796 GEM_BUG_ON(IS_ERR(ce));
799 obj = create_test_object(parent->vm, file, &objects);
802 intel_context_put(ce);
803 kernel_context_close(ctx);
808 err = gpu_fill(ce, obj, dw);
810 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
811 ndwords, dw, max_dwords(obj),
812 engine->name, ctx->hw_id,
813 yesno(!!ctx->vm), err);
814 intel_context_put(ce);
815 kernel_context_close(ctx);
819 err = throttle(ce, tq, ARRAY_SIZE(tq));
821 intel_context_put(ce);
822 kernel_context_close(ctx);
826 if (++dw == max_dwords(obj)) {
834 intel_context_put(ce);
835 kernel_context_close(ctx);
837 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
838 ncontexts, engine->name, ndwords);
841 list_for_each_entry(obj, &objects, st_link) {
843 min_t(unsigned int, ndwords - dw, max_dwords(obj));
845 err = cpu_check(obj, ncontexts++, rem);
852 mutex_unlock(&i915->drm.struct_mutex);
853 i915_gem_drain_freed_objects(i915);
854 mutex_lock(&i915->drm.struct_mutex);
857 throttle_release(tq, ARRAY_SIZE(tq));
858 if (igt_live_test_end(&t))
861 mutex_unlock(&i915->drm.struct_mutex);
863 mock_file_free(i915, file);
867 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
869 struct drm_i915_gem_object *obj;
873 if (INTEL_GEN(vma->vm->i915) < 8)
874 return ERR_PTR(-EINVAL);
876 obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
878 return ERR_CAST(obj);
880 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
886 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
887 *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
888 *cmd++ = lower_32_bits(vma->node.start);
889 *cmd++ = upper_32_bits(vma->node.start);
890 *cmd = MI_BATCH_BUFFER_END;
892 __i915_gem_object_flush_map(obj, 0, 64);
893 i915_gem_object_unpin_map(obj);
895 intel_gt_chipset_flush(vma->vm->gt);
897 vma = i915_vma_instance(obj, vma->vm, NULL);
903 err = i915_vma_pin(vma, 0, 0, PIN_USER);
910 i915_gem_object_put(obj);
915 emit_rpcs_query(struct drm_i915_gem_object *obj,
916 struct intel_context *ce,
917 struct i915_request **rq_out)
919 struct i915_request *rq;
920 struct i915_vma *batch;
921 struct i915_vma *vma;
924 GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
926 vma = i915_vma_instance(obj, ce->vm, NULL);
930 i915_gem_object_lock(obj);
931 err = i915_gem_object_set_to_gtt_domain(obj, false);
932 i915_gem_object_unlock(obj);
936 err = i915_vma_pin(vma, 0, 0, PIN_USER);
940 batch = rpcs_query_batch(vma);
942 err = PTR_ERR(batch);
946 rq = i915_request_create(ce);
952 err = rq->engine->emit_bb_start(rq,
953 batch->node.start, batch->node.size,
958 i915_vma_lock(batch);
959 err = i915_request_await_object(rq, batch->obj, false);
961 err = i915_vma_move_to_active(batch, rq, 0);
962 i915_vma_unlock(batch);
967 err = i915_request_await_object(rq, vma->obj, true);
969 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
970 i915_vma_unlock(vma);
974 i915_vma_unpin(batch);
975 i915_vma_close(batch);
980 *rq_out = i915_request_get(rq);
982 i915_request_add(rq);
987 i915_request_skip(rq, err);
989 i915_request_add(rq);
991 i915_vma_unpin(batch);
999 #define TEST_IDLE BIT(0)
1000 #define TEST_BUSY BIT(1)
1001 #define TEST_RESET BIT(2)
1004 __sseu_prepare(const char *name,
1006 struct intel_context *ce,
1007 struct igt_spinner **spin)
1009 struct i915_request *rq;
1013 if (!(flags & (TEST_BUSY | TEST_RESET)))
1016 *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
1020 ret = igt_spinner_init(*spin, ce->engine->gt);
1024 rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
1030 i915_request_add(rq);
1032 if (!igt_wait_for_spinner(*spin, rq)) {
1033 pr_err("%s: Spinner failed to start!\n", name);
1041 igt_spinner_end(*spin);
1043 igt_spinner_fini(*spin);
1045 kfree(fetch_and_zero(spin));
1050 __read_slice_count(struct intel_context *ce,
1051 struct drm_i915_gem_object *obj,
1052 struct igt_spinner *spin,
1055 struct i915_request *rq = NULL;
1056 u32 s_mask, s_shift;
1061 ret = emit_rpcs_query(obj, ce, &rq);
1066 igt_spinner_end(spin);
1068 ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1069 i915_request_put(rq);
1073 buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
1079 if (INTEL_GEN(ce->engine->i915) >= 11) {
1080 s_mask = GEN11_RPCS_S_CNT_MASK;
1081 s_shift = GEN11_RPCS_S_CNT_SHIFT;
1083 s_mask = GEN8_RPCS_S_CNT_MASK;
1084 s_shift = GEN8_RPCS_S_CNT_SHIFT;
1088 cnt = (val & s_mask) >> s_shift;
1091 i915_gem_object_unpin_map(obj);
1097 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
1098 const char *prefix, const char *suffix)
1100 if (slices == expected)
1104 pr_err("%s: %s read slice count failed with %d%s\n",
1105 name, prefix, slices, suffix);
1109 pr_err("%s: %s slice count %d is not %u%s\n",
1110 name, prefix, slices, expected, suffix);
1112 pr_info("RPCS=0x%x; %u%sx%u%s\n",
1114 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
1115 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
1116 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
1122 __sseu_finish(const char *name,
1124 struct intel_context *ce,
1125 struct drm_i915_gem_object *obj,
1126 unsigned int expected,
1127 struct igt_spinner *spin)
1129 unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
1133 if (flags & TEST_RESET) {
1134 ret = intel_engine_reset(ce->engine, "sseu");
1139 ret = __read_slice_count(ce, obj,
1140 flags & TEST_RESET ? NULL : spin, &rpcs);
1141 ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
1145 ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
1146 ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
1150 igt_spinner_end(spin);
1152 if ((flags & TEST_IDLE) && ret == 0) {
1153 ret = i915_gem_wait_for_idle(ce->engine->i915,
1154 0, MAX_SCHEDULE_TIMEOUT);
1158 ret = __read_slice_count(ce, obj, NULL, &rpcs);
1159 ret = __check_rpcs(name, rpcs, ret, expected,
1160 "Context", " after idle!");
1167 __sseu_test(const char *name,
1169 struct intel_context *ce,
1170 struct drm_i915_gem_object *obj,
1171 struct intel_sseu sseu)
1173 struct igt_spinner *spin = NULL;
1176 ret = __sseu_prepare(name, flags, ce, &spin);
1180 ret = __intel_context_reconfigure_sseu(ce, sseu);
1184 ret = __sseu_finish(name, flags, ce, obj,
1185 hweight32(sseu.slice_mask), spin);
1189 igt_spinner_end(spin);
1190 igt_spinner_fini(spin);
1197 __igt_ctx_sseu(struct drm_i915_private *i915,
1201 struct intel_engine_cs *engine = i915->engine[RCS0];
1202 struct drm_i915_gem_object *obj;
1203 struct i915_gem_context *ctx;
1204 struct intel_context *ce;
1205 struct intel_sseu pg_sseu;
1206 struct drm_file *file;
1209 if (INTEL_GEN(i915) < 9 || !engine)
1212 if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1215 if (hweight32(engine->sseu.slice_mask) < 2)
1219 * Gen11 VME friendly power-gated configuration with half enabled
1222 pg_sseu = engine->sseu;
1223 pg_sseu.slice_mask = 1;
1224 pg_sseu.subslice_mask =
1225 ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
1227 pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1228 name, flags, hweight32(engine->sseu.slice_mask),
1229 hweight32(pg_sseu.slice_mask));
1231 file = mock_file(i915);
1233 return PTR_ERR(file);
1235 if (flags & TEST_RESET)
1236 igt_global_reset_lock(&i915->gt);
1238 mutex_lock(&i915->drm.struct_mutex);
1240 ctx = live_context(i915, file);
1245 i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1247 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1253 ce = i915_gem_context_get_engine(ctx, RCS0);
1259 ret = intel_context_pin(ce);
1263 /* First set the default mask. */
1264 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1268 /* Then set a power-gated configuration. */
1269 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1273 /* Back to defaults. */
1274 ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1278 /* One last power-gated configuration for the road. */
1279 ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1284 if (igt_flush_test(i915, I915_WAIT_LOCKED))
1287 intel_context_unpin(ce);
1289 intel_context_put(ce);
1291 i915_gem_object_put(obj);
1294 mutex_unlock(&i915->drm.struct_mutex);
1296 if (flags & TEST_RESET)
1297 igt_global_reset_unlock(&i915->gt);
1299 mock_file_free(i915, file);
1302 pr_err("%s: Failed with %d!\n", name, ret);
1307 static int igt_ctx_sseu(void *arg)
1312 } *phase, phases[] = {
1313 { .name = "basic", .flags = 0 },
1314 { .name = "idle", .flags = TEST_IDLE },
1315 { .name = "busy", .flags = TEST_BUSY },
1316 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1317 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1318 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1323 for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1325 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1330 static int igt_ctx_readonly(void *arg)
1332 struct drm_i915_private *i915 = arg;
1333 struct drm_i915_gem_object *obj = NULL;
1334 struct i915_request *tq[5] = {};
1335 struct i915_address_space *vm;
1336 struct i915_gem_context *ctx;
1337 unsigned long idx, ndwords, dw;
1338 struct igt_live_test t;
1339 struct drm_file *file;
1340 I915_RND_STATE(prng);
1341 IGT_TIMEOUT(end_time);
1346 * Create a few read-only objects (with the occasional writable object)
1347 * and try to write into these object checking that the GPU discards
1348 * any write to a read-only object.
1351 file = mock_file(i915);
1353 return PTR_ERR(file);
1355 mutex_lock(&i915->drm.struct_mutex);
1357 err = igt_live_test_begin(&t, i915, __func__, "");
1361 ctx = live_context(i915, file);
1367 vm = ctx->vm ?: &i915->ggtt.alias->vm;
1368 if (!vm || !vm->has_read_only) {
1375 while (!time_after(jiffies, end_time)) {
1376 struct i915_gem_engines_iter it;
1377 struct intel_context *ce;
1379 for_each_gem_engine(ce,
1380 i915_gem_context_lock_engines(ctx), it) {
1381 if (!intel_engine_can_store_dword(ce->engine))
1385 obj = create_test_object(ce->vm, file, &objects);
1388 i915_gem_context_unlock_engines(ctx);
1392 if (prandom_u32_state(&prng) & 1)
1393 i915_gem_object_set_readonly(obj);
1396 err = gpu_fill(ce, obj, dw);
1398 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1399 ndwords, dw, max_dwords(obj),
1400 ce->engine->name, ctx->hw_id,
1401 yesno(!!ctx->vm), err);
1402 i915_gem_context_unlock_engines(ctx);
1406 err = throttle(ce, tq, ARRAY_SIZE(tq));
1408 i915_gem_context_unlock_engines(ctx);
1412 if (++dw == max_dwords(obj)) {
1418 i915_gem_context_unlock_engines(ctx);
1420 pr_info("Submitted %lu dwords (across %u engines)\n",
1421 ndwords, RUNTIME_INFO(i915)->num_engines);
1425 list_for_each_entry(obj, &objects, st_link) {
1427 min_t(unsigned int, ndwords - dw, max_dwords(obj));
1428 unsigned int num_writes;
1431 if (i915_gem_object_is_readonly(obj))
1434 err = cpu_check(obj, idx++, num_writes);
1442 throttle_release(tq, ARRAY_SIZE(tq));
1443 if (igt_live_test_end(&t))
1445 mutex_unlock(&i915->drm.struct_mutex);
1447 mock_file_free(i915, file);
1451 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1453 struct drm_mm_node *node =
1454 __drm_mm_interval_first(&ctx->vm->mm,
1455 offset, offset + sizeof(u32) - 1);
1456 if (!node || node->start > offset)
1459 GEM_BUG_ON(offset >= node->start + node->size);
1461 pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1462 upper_32_bits(offset), lower_32_bits(offset));
1466 static int write_to_scratch(struct i915_gem_context *ctx,
1467 struct intel_engine_cs *engine,
1468 u64 offset, u32 value)
1470 struct drm_i915_private *i915 = ctx->i915;
1471 struct drm_i915_gem_object *obj;
1472 struct i915_request *rq;
1473 struct i915_vma *vma;
1477 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1479 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1481 return PTR_ERR(obj);
1483 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1489 *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1490 if (INTEL_GEN(i915) >= 8) {
1491 *cmd++ = lower_32_bits(offset);
1492 *cmd++ = upper_32_bits(offset);
1498 *cmd = MI_BATCH_BUFFER_END;
1499 __i915_gem_object_flush_map(obj, 0, 64);
1500 i915_gem_object_unpin_map(obj);
1502 intel_gt_chipset_flush(engine->gt);
1504 vma = i915_vma_instance(obj, ctx->vm, NULL);
1510 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1514 err = check_scratch(ctx, offset);
1518 rq = igt_request_alloc(ctx, engine);
1524 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1529 err = i915_request_await_object(rq, vma->obj, false);
1531 err = i915_vma_move_to_active(vma, rq, 0);
1532 i915_vma_unlock(vma);
1536 i915_vma_unpin(vma);
1537 i915_vma_close(vma);
1540 i915_request_add(rq);
1545 i915_request_skip(rq, err);
1547 i915_request_add(rq);
1549 i915_vma_unpin(vma);
1551 i915_gem_object_put(obj);
1555 static int read_from_scratch(struct i915_gem_context *ctx,
1556 struct intel_engine_cs *engine,
1557 u64 offset, u32 *value)
1559 struct drm_i915_private *i915 = ctx->i915;
1560 struct drm_i915_gem_object *obj;
1561 const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1562 const u32 result = 0x100;
1563 struct i915_request *rq;
1564 struct i915_vma *vma;
1568 GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1570 obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1572 return PTR_ERR(obj);
1574 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1580 memset(cmd, POISON_INUSE, PAGE_SIZE);
1581 if (INTEL_GEN(i915) >= 8) {
1582 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1584 *cmd++ = lower_32_bits(offset);
1585 *cmd++ = upper_32_bits(offset);
1586 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1591 *cmd++ = MI_LOAD_REGISTER_MEM;
1594 *cmd++ = MI_STORE_REGISTER_MEM;
1598 *cmd = MI_BATCH_BUFFER_END;
1600 i915_gem_object_flush_map(obj);
1601 i915_gem_object_unpin_map(obj);
1603 intel_gt_chipset_flush(engine->gt);
1605 vma = i915_vma_instance(obj, ctx->vm, NULL);
1611 err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1615 err = check_scratch(ctx, offset);
1619 rq = igt_request_alloc(ctx, engine);
1625 err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1630 err = i915_request_await_object(rq, vma->obj, true);
1632 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1633 i915_vma_unlock(vma);
1637 i915_vma_unpin(vma);
1638 i915_vma_close(vma);
1640 i915_request_add(rq);
1642 i915_gem_object_lock(obj);
1643 err = i915_gem_object_set_to_cpu_domain(obj, false);
1644 i915_gem_object_unlock(obj);
1648 cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1654 *value = cmd[result / sizeof(*cmd)];
1655 i915_gem_object_unpin_map(obj);
1656 i915_gem_object_put(obj);
1661 i915_request_skip(rq, err);
1663 i915_request_add(rq);
1665 i915_vma_unpin(vma);
1667 i915_gem_object_put(obj);
1671 static int igt_vm_isolation(void *arg)
1673 struct drm_i915_private *i915 = arg;
1674 struct i915_gem_context *ctx_a, *ctx_b;
1675 struct intel_engine_cs *engine;
1676 struct igt_live_test t;
1677 struct drm_file *file;
1678 I915_RND_STATE(prng);
1679 unsigned long count;
1684 if (INTEL_GEN(i915) < 7)
1688 * The simple goal here is that a write into one context is not
1689 * observed in a second (separate page tables and scratch).
1692 file = mock_file(i915);
1694 return PTR_ERR(file);
1696 mutex_lock(&i915->drm.struct_mutex);
1698 err = igt_live_test_begin(&t, i915, __func__, "");
1702 ctx_a = live_context(i915, file);
1703 if (IS_ERR(ctx_a)) {
1704 err = PTR_ERR(ctx_a);
1708 ctx_b = live_context(i915, file);
1709 if (IS_ERR(ctx_b)) {
1710 err = PTR_ERR(ctx_b);
1714 /* We can only test vm isolation, if the vm are distinct */
1715 if (ctx_a->vm == ctx_b->vm)
1718 vm_total = ctx_a->vm->total;
1719 GEM_BUG_ON(ctx_b->vm->total != vm_total);
1720 vm_total -= I915_GTT_PAGE_SIZE;
1723 for_each_engine(engine, i915, id) {
1724 IGT_TIMEOUT(end_time);
1725 unsigned long this = 0;
1727 if (!intel_engine_can_store_dword(engine))
1730 while (!__igt_timeout(end_time, NULL)) {
1731 u32 value = 0xc5c5c5c5;
1734 div64_u64_rem(i915_prandom_u64_state(&prng),
1736 offset = round_down(offset, alignof_dword);
1737 offset += I915_GTT_PAGE_SIZE;
1739 err = write_to_scratch(ctx_a, engine,
1740 offset, 0xdeadbeef);
1742 err = read_from_scratch(ctx_b, engine,
1748 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1749 engine->name, value,
1750 upper_32_bits(offset),
1751 lower_32_bits(offset),
1761 pr_info("Checked %lu scratch offsets across %d engines\n",
1762 count, RUNTIME_INFO(i915)->num_engines);
1765 if (igt_live_test_end(&t))
1767 mutex_unlock(&i915->drm.struct_mutex);
1769 mock_file_free(i915, file);
1773 static bool skip_unused_engines(struct intel_context *ce, void *data)
1778 static void mock_barrier_task(void *data)
1780 unsigned int *counter = data;
1785 static int mock_context_barrier(void *arg)
1788 #define pr_fmt(x) "context_barrier_task():" # x
1789 struct drm_i915_private *i915 = arg;
1790 struct i915_gem_context *ctx;
1791 struct i915_request *rq;
1792 unsigned int counter;
1796 * The context barrier provides us with a callback after it emits
1797 * a request; useful for retiring old state after loading new.
1800 mutex_lock(&i915->drm.struct_mutex);
1802 ctx = mock_context(i915, "mock");
1809 err = context_barrier_task(ctx, 0,
1810 NULL, NULL, mock_barrier_task, &counter);
1812 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1816 pr_err("Did not retire immediately with 0 engines\n");
1822 err = context_barrier_task(ctx, ALL_ENGINES,
1823 skip_unused_engines,
1828 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1832 pr_err("Did not retire immediately for all unused engines\n");
1837 rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1839 pr_err("Request allocation failed!\n");
1842 i915_request_add(rq);
1845 context_barrier_inject_fault = BIT(RCS0);
1846 err = context_barrier_task(ctx, ALL_ENGINES,
1847 NULL, NULL, mock_barrier_task, &counter);
1848 context_barrier_inject_fault = 0;
1852 pr_err("Did not hit fault injection!\n");
1854 pr_err("Invoked callback on error!\n");
1861 err = context_barrier_task(ctx, ALL_ENGINES,
1862 skip_unused_engines,
1867 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1870 mock_device_flush(i915);
1872 pr_err("Did not retire on each active engines\n");
1878 mock_context_close(ctx);
1880 mutex_unlock(&i915->drm.struct_mutex);
1886 int i915_gem_context_mock_selftests(void)
1888 static const struct i915_subtest tests[] = {
1889 SUBTEST(mock_context_barrier),
1891 struct drm_i915_private *i915;
1894 i915 = mock_gem_device();
1898 err = i915_subtests(tests, i915);
1900 drm_dev_put(&i915->drm);
1904 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
1906 static const struct i915_subtest tests[] = {
1907 SUBTEST(live_nop_switch),
1908 SUBTEST(live_parallel_switch),
1909 SUBTEST(igt_ctx_exec),
1910 SUBTEST(igt_ctx_readonly),
1911 SUBTEST(igt_ctx_sseu),
1912 SUBTEST(igt_shared_ctx_exec),
1913 SUBTEST(igt_vm_isolation),
1916 if (intel_gt_is_wedged(&i915->gt))
1919 return i915_live_subtests(tests, i915);