2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gt/intel_engine_pm.h"
8 #include "i915_selftest.h"
10 #include "gem/selftests/mock_context.h"
11 #include "selftests/igt_reset.h"
12 #include "selftests/igt_spinner.h"
15 struct drm_i915_mocs_table mocs;
16 struct drm_i915_mocs_table l3cc;
17 struct i915_vma *scratch;
21 static int request_add_sync(struct i915_request *rq, int err)
25 if (i915_request_wait(rq, 0, HZ / 5) < 0)
32 static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
38 if (spin && !igt_wait_for_spinner(spin, rq))
45 static struct i915_vma *create_scratch(struct intel_gt *gt)
47 struct drm_i915_gem_object *obj;
51 obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
55 i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
57 vma = i915_vma_instance(obj, >->ggtt->vm, NULL);
59 i915_gem_object_put(obj);
63 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
65 i915_gem_object_put(obj);
72 static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
74 struct drm_i915_mocs_table table;
78 memset(arg, 0, sizeof(*arg));
80 flags = get_mocs_settings(gt->i915, &table);
84 if (flags & HAS_RENDER_L3CC)
87 if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
90 arg->scratch = create_scratch(gt);
91 if (IS_ERR(arg->scratch))
92 return PTR_ERR(arg->scratch);
94 arg->vaddr = i915_gem_object_pin_map(arg->scratch->obj, I915_MAP_WB);
95 if (IS_ERR(arg->vaddr)) {
96 err = PTR_ERR(arg->vaddr);
103 i915_vma_unpin_and_release(&arg->scratch, 0);
107 static void live_mocs_fini(struct live_mocs *arg)
109 i915_vma_unpin_and_release(&arg->scratch, I915_VMA_RELEASE_MAP);
112 static int read_regs(struct i915_request *rq,
113 u32 addr, unsigned int count,
119 GEM_BUG_ON(!IS_ALIGNED(*offset, sizeof(u32)));
121 cs = intel_ring_begin(rq, 4 * count);
125 for (i = 0; i < count; i++) {
126 *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
132 *offset += sizeof(u32);
135 intel_ring_advance(rq, cs);
140 static int read_mocs_table(struct i915_request *rq,
141 const struct drm_i915_mocs_table *table,
146 if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915))
147 addr = global_mocs_offset();
149 addr = mocs_offset(rq->engine);
151 return read_regs(rq, addr, table->n_entries, offset);
154 static int read_l3cc_table(struct i915_request *rq,
155 const struct drm_i915_mocs_table *table,
158 u32 addr = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
160 return read_regs(rq, addr, (table->n_entries + 1) / 2, offset);
163 static int check_mocs_table(struct intel_engine_cs *engine,
164 const struct drm_i915_mocs_table *table,
170 for_each_mocs(expect, table, i) {
171 if (**vaddr != expect) {
172 pr_err("%s: Invalid MOCS[%d] entry, found %08x, expected %08x\n",
173 engine->name, i, **vaddr, expect);
182 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
185 * Registers in this range are affected by the MCR selector
186 * which only controls CPU initiated MMIO. Routing does not
187 * work for CS access so we cannot verify them on this path.
189 return INTEL_GEN(i915) >= 8 && offset >= 0xb000 && offset <= 0xb4ff;
192 static int check_l3cc_table(struct intel_engine_cs *engine,
193 const struct drm_i915_mocs_table *table,
196 /* Can we read the MCR range 0xb00 directly? See intel_workarounds! */
197 u32 reg = i915_mmio_reg_offset(GEN9_LNCFCMOCS(0));
201 for_each_l3cc(expect, table, i) {
202 if (!mcr_range(engine->i915, reg) && **vaddr != expect) {
203 pr_err("%s: Invalid L3CC[%d] entry, found %08x, expected %08x\n",
204 engine->name, i, **vaddr, expect);
214 static int check_mocs_engine(struct live_mocs *arg,
215 struct intel_context *ce)
217 struct i915_vma *vma = arg->scratch;
218 struct i915_request *rq;
223 memset32(arg->vaddr, STACK_MAGIC, PAGE_SIZE / sizeof(u32));
225 rq = intel_context_create_request(ce);
230 err = i915_request_await_object(rq, vma->obj, true);
232 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
233 i915_vma_unlock(vma);
235 /* Read the mocs tables back using SRM */
236 offset = i915_ggtt_offset(vma);
238 err = read_mocs_table(rq, &arg->mocs, &offset);
239 if (!err && ce->engine->class == RENDER_CLASS)
240 err = read_l3cc_table(rq, &arg->l3cc, &offset);
241 offset -= i915_ggtt_offset(vma);
242 GEM_BUG_ON(offset > PAGE_SIZE);
244 err = request_add_sync(rq, err);
248 /* Compare the results against the expected tables */
251 err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
252 if (!err && ce->engine->class == RENDER_CLASS)
253 err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
257 GEM_BUG_ON(arg->vaddr + offset != vaddr);
261 static int live_mocs_kernel(void *arg)
263 struct intel_gt *gt = arg;
264 struct intel_engine_cs *engine;
265 enum intel_engine_id id;
266 struct live_mocs mocs;
269 /* Basic check the system is configured with the expected mocs table */
271 err = live_mocs_init(&mocs, gt);
275 for_each_engine(engine, gt, id) {
276 intel_engine_pm_get(engine);
277 err = check_mocs_engine(&mocs, engine->kernel_context);
278 intel_engine_pm_put(engine);
283 live_mocs_fini(&mocs);
287 static int live_mocs_clean(void *arg)
289 struct intel_gt *gt = arg;
290 struct intel_engine_cs *engine;
291 enum intel_engine_id id;
292 struct live_mocs mocs;
295 /* Every new context should see the same mocs table */
297 err = live_mocs_init(&mocs, gt);
301 for_each_engine(engine, gt, id) {
302 struct intel_context *ce;
304 ce = intel_context_create(engine);
310 err = check_mocs_engine(&mocs, ce);
311 intel_context_put(ce);
316 live_mocs_fini(&mocs);
320 static int active_engine_reset(struct intel_context *ce,
323 struct igt_spinner spin;
324 struct i915_request *rq;
327 err = igt_spinner_init(&spin, ce->engine->gt);
331 rq = igt_spinner_create_request(&spin, ce, MI_NOOP);
333 igt_spinner_fini(&spin);
337 err = request_add_spin(rq, &spin);
339 err = intel_engine_reset(ce->engine, reason);
341 igt_spinner_end(&spin);
342 igt_spinner_fini(&spin);
347 static int __live_mocs_reset(struct live_mocs *mocs,
348 struct intel_context *ce)
352 err = intel_engine_reset(ce->engine, "mocs");
356 err = check_mocs_engine(mocs, ce);
360 err = active_engine_reset(ce, "mocs");
364 err = check_mocs_engine(mocs, ce);
368 intel_gt_reset(ce->engine->gt, ce->engine->mask, "mocs");
370 err = check_mocs_engine(mocs, ce);
377 static int live_mocs_reset(void *arg)
379 struct intel_gt *gt = arg;
380 struct intel_engine_cs *engine;
381 enum intel_engine_id id;
382 struct live_mocs mocs;
385 /* Check the mocs setup is retained over per-engine and global resets */
387 if (!intel_has_reset_engine(gt))
390 err = live_mocs_init(&mocs, gt);
394 igt_global_reset_lock(gt);
395 for_each_engine(engine, gt, id) {
396 struct intel_context *ce;
398 ce = intel_context_create(engine);
404 intel_engine_pm_get(engine);
405 err = __live_mocs_reset(&mocs, ce);
406 intel_engine_pm_put(engine);
408 intel_context_put(ce);
412 igt_global_reset_unlock(gt);
414 live_mocs_fini(&mocs);
418 int intel_mocs_live_selftests(struct drm_i915_private *i915)
420 static const struct i915_subtest tests[] = {
421 SUBTEST(live_mocs_kernel),
422 SUBTEST(live_mocs_clean),
423 SUBTEST(live_mocs_reset),
425 struct drm_i915_mocs_table table;
427 if (!get_mocs_settings(i915, &table))
430 return intel_gt_live_subtests(tests, &i915->gt);