OSDN Git Service

drm/i915/selftests: Exercise context switching in parallel
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / gem / selftests / i915_gem_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2017 Intel Corporation
5  */
6
7 #include <linux/prime_numbers.h>
8
9 #include "gem/i915_gem_pm.h"
10 #include "gt/intel_gt.h"
11 #include "gt/intel_reset.h"
12 #include "i915_selftest.h"
13
14 #include "gem/selftests/igt_gem_utils.h"
15 #include "selftests/i915_random.h"
16 #include "selftests/igt_flush_test.h"
17 #include "selftests/igt_live_test.h"
18 #include "selftests/igt_reset.h"
19 #include "selftests/igt_spinner.h"
20 #include "selftests/mock_drm.h"
21 #include "selftests/mock_gem_device.h"
22
23 #include "huge_gem_object.h"
24 #include "igt_gem_utils.h"
25
26 #define DW_PER_PAGE (PAGE_SIZE / sizeof(u32))
27
28 static int live_nop_switch(void *arg)
29 {
30         const unsigned int nctx = 1024;
31         struct drm_i915_private *i915 = arg;
32         struct intel_engine_cs *engine;
33         struct i915_gem_context **ctx;
34         enum intel_engine_id id;
35         struct igt_live_test t;
36         struct drm_file *file;
37         unsigned long n;
38         int err = -ENODEV;
39
40         /*
41          * Create as many contexts as we can feasibly get away with
42          * and check we can switch between them rapidly.
43          *
44          * Serves as very simple stress test for submission and HW switching
45          * between contexts.
46          */
47
48         if (!DRIVER_CAPS(i915)->has_logical_contexts)
49                 return 0;
50
51         file = mock_file(i915);
52         if (IS_ERR(file))
53                 return PTR_ERR(file);
54
55         mutex_lock(&i915->drm.struct_mutex);
56
57         ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL);
58         if (!ctx) {
59                 err = -ENOMEM;
60                 goto out_unlock;
61         }
62
63         for (n = 0; n < nctx; n++) {
64                 ctx[n] = live_context(i915, file);
65                 if (IS_ERR(ctx[n])) {
66                         err = PTR_ERR(ctx[n]);
67                         goto out_unlock;
68                 }
69         }
70
71         for_each_engine(engine, i915, id) {
72                 struct i915_request *rq;
73                 unsigned long end_time, prime;
74                 ktime_t times[2] = {};
75
76                 times[0] = ktime_get_raw();
77                 for (n = 0; n < nctx; n++) {
78                         rq = igt_request_alloc(ctx[n], engine);
79                         if (IS_ERR(rq)) {
80                                 err = PTR_ERR(rq);
81                                 goto out_unlock;
82                         }
83                         i915_request_add(rq);
84                 }
85                 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
86                         pr_err("Failed to populated %d contexts\n", nctx);
87                         intel_gt_set_wedged(&i915->gt);
88                         err = -EIO;
89                         goto out_unlock;
90                 }
91
92                 times[1] = ktime_get_raw();
93
94                 pr_info("Populated %d contexts on %s in %lluns\n",
95                         nctx, engine->name, ktime_to_ns(times[1] - times[0]));
96
97                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
98                 if (err)
99                         goto out_unlock;
100
101                 end_time = jiffies + i915_selftest.timeout_jiffies;
102                 for_each_prime_number_from(prime, 2, 8192) {
103                         times[1] = ktime_get_raw();
104
105                         for (n = 0; n < prime; n++) {
106                                 rq = igt_request_alloc(ctx[n % nctx], engine);
107                                 if (IS_ERR(rq)) {
108                                         err = PTR_ERR(rq);
109                                         goto out_unlock;
110                                 }
111
112                                 /*
113                                  * This space is left intentionally blank.
114                                  *
115                                  * We do not actually want to perform any
116                                  * action with this request, we just want
117                                  * to measure the latency in allocation
118                                  * and submission of our breadcrumbs -
119                                  * ensuring that the bare request is sufficient
120                                  * for the system to work (i.e. proper HEAD
121                                  * tracking of the rings, interrupt handling,
122                                  * etc). It also gives us the lowest bounds
123                                  * for latency.
124                                  */
125
126                                 i915_request_add(rq);
127                         }
128                         if (i915_request_wait(rq, 0, HZ / 5) < 0) {
129                                 pr_err("Switching between %ld contexts timed out\n",
130                                        prime);
131                                 intel_gt_set_wedged(&i915->gt);
132                                 break;
133                         }
134
135                         times[1] = ktime_sub(ktime_get_raw(), times[1]);
136                         if (prime == 2)
137                                 times[0] = times[1];
138
139                         if (__igt_timeout(end_time, NULL))
140                                 break;
141                 }
142
143                 err = igt_live_test_end(&t);
144                 if (err)
145                         goto out_unlock;
146
147                 pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n",
148                         engine->name,
149                         ktime_to_ns(times[0]),
150                         prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1));
151         }
152
153 out_unlock:
154         mutex_unlock(&i915->drm.struct_mutex);
155         mock_file_free(i915, file);
156         return err;
157 }
158
159 struct parallel_switch {
160         struct task_struct *tsk;
161         struct intel_context *ce[2];
162 };
163
164 static int __live_parallel_switch1(void *data)
165 {
166         struct parallel_switch *arg = data;
167         struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
168         IGT_TIMEOUT(end_time);
169         unsigned long count;
170
171         count = 0;
172         do {
173                 struct i915_request *rq = NULL;
174                 int err, n;
175
176                 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
177                         i915_request_put(rq);
178
179                         mutex_lock(&i915->drm.struct_mutex);
180                         rq = i915_request_create(arg->ce[n]);
181                         if (IS_ERR(rq)) {
182                                 mutex_unlock(&i915->drm.struct_mutex);
183                                 return PTR_ERR(rq);
184                         }
185
186                         i915_request_get(rq);
187                         i915_request_add(rq);
188                         mutex_unlock(&i915->drm.struct_mutex);
189                 }
190
191                 err = 0;
192                 if (i915_request_wait(rq, 0, HZ / 5) < 0)
193                         err = -ETIME;
194                 i915_request_put(rq);
195                 if (err)
196                         return err;
197
198                 count++;
199         } while (!__igt_timeout(end_time, NULL));
200
201         pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
202         return 0;
203 }
204
205 static int __live_parallel_switchN(void *data)
206 {
207         struct parallel_switch *arg = data;
208         struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
209         IGT_TIMEOUT(end_time);
210         unsigned long count;
211         int n;
212
213         count = 0;
214         do {
215                 for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
216                         struct i915_request *rq;
217
218                         mutex_lock(&i915->drm.struct_mutex);
219                         rq = i915_request_create(arg->ce[n]);
220                         if (IS_ERR(rq)) {
221                                 mutex_unlock(&i915->drm.struct_mutex);
222                                 return PTR_ERR(rq);
223                         }
224
225                         i915_request_add(rq);
226                         mutex_unlock(&i915->drm.struct_mutex);
227                 }
228
229                 count++;
230         } while (!__igt_timeout(end_time, NULL));
231
232         pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
233         return 0;
234 }
235
236 static int live_parallel_switch(void *arg)
237 {
238         struct drm_i915_private *i915 = arg;
239         static int (* const func[])(void *arg) = {
240                 __live_parallel_switch1,
241                 __live_parallel_switchN,
242                 NULL,
243         };
244         struct parallel_switch *data = NULL;
245         struct i915_gem_engines *engines;
246         struct i915_gem_engines_iter it;
247         int (* const *fn)(void *arg);
248         struct i915_gem_context *ctx;
249         struct intel_context *ce;
250         struct drm_file *file;
251         int n, m, count;
252         int err = 0;
253
254         /*
255          * Check we can process switches on all engines simultaneously.
256          */
257
258         if (!DRIVER_CAPS(i915)->has_logical_contexts)
259                 return 0;
260
261         file = mock_file(i915);
262         if (IS_ERR(file))
263                 return PTR_ERR(file);
264
265         mutex_lock(&i915->drm.struct_mutex);
266
267         ctx = live_context(i915, file);
268         if (IS_ERR(ctx)) {
269                 err = PTR_ERR(ctx);
270                 goto out_locked;
271         }
272
273         engines = i915_gem_context_lock_engines(ctx);
274         count = engines->num_engines;
275
276         data = kcalloc(count, sizeof(*data), GFP_KERNEL);
277         if (!data) {
278                 i915_gem_context_unlock_engines(ctx);
279                 err = -ENOMEM;
280                 goto out_locked;
281         }
282
283         m = 0; /* Use the first context as our template for the engines */
284         for_each_gem_engine(ce, engines, it) {
285                 err = intel_context_pin(ce);
286                 if (err) {
287                         i915_gem_context_unlock_engines(ctx);
288                         goto out_locked;
289                 }
290                 data[m++].ce[0] = intel_context_get(ce);
291         }
292         i915_gem_context_unlock_engines(ctx);
293
294         /* Clone the same set of engines into the other contexts */
295         for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
296                 ctx = live_context(i915, file);
297                 if (IS_ERR(ctx)) {
298                         err = PTR_ERR(ctx);
299                         goto out_locked;
300                 }
301
302                 for (m = 0; m < count; m++) {
303                         if (!data[m].ce[0])
304                                 continue;
305
306                         ce = intel_context_create(ctx, data[m].ce[0]->engine);
307                         if (IS_ERR(ce))
308                                 goto out_locked;
309
310                         err = intel_context_pin(ce);
311                         if (err) {
312                                 intel_context_put(ce);
313                                 goto out_locked;
314                         }
315
316                         data[m].ce[n] = ce;
317                 }
318         }
319
320         mutex_unlock(&i915->drm.struct_mutex);
321
322         for (fn = func; !err && *fn; fn++) {
323                 struct igt_live_test t;
324                 int n;
325
326                 mutex_lock(&i915->drm.struct_mutex);
327                 err = igt_live_test_begin(&t, i915, __func__, "");
328                 mutex_unlock(&i915->drm.struct_mutex);
329                 if (err)
330                         break;
331
332                 for (n = 0; n < count; n++) {
333                         if (!data[n].ce[0])
334                                 continue;
335
336                         data[n].tsk = kthread_run(*fn, &data[n],
337                                                   "igt/parallel:%s",
338                                                   data[n].ce[0]->engine->name);
339                         if (IS_ERR(data[n].tsk)) {
340                                 err = PTR_ERR(data[n].tsk);
341                                 break;
342                         }
343                         get_task_struct(data[n].tsk);
344                 }
345
346                 for (n = 0; n < count; n++) {
347                         int status;
348
349                         if (IS_ERR_OR_NULL(data[n].tsk))
350                                 continue;
351
352                         status = kthread_stop(data[n].tsk);
353                         if (status && !err)
354                                 err = status;
355
356                         put_task_struct(data[n].tsk);
357                         data[n].tsk = NULL;
358                 }
359
360                 mutex_lock(&i915->drm.struct_mutex);
361                 if (igt_live_test_end(&t))
362                         err = -EIO;
363                 mutex_unlock(&i915->drm.struct_mutex);
364         }
365
366         mutex_lock(&i915->drm.struct_mutex);
367 out_locked:
368         for (n = 0; n < count; n++) {
369                 for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
370                         if (!data[n].ce[m])
371                                 continue;
372
373                         intel_context_unpin(data[n].ce[m]);
374                         intel_context_put(data[n].ce[m]);
375                 }
376         }
377         mutex_unlock(&i915->drm.struct_mutex);
378         kfree(data);
379         mock_file_free(i915, file);
380         return err;
381 }
382
383 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
384 {
385         return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
386 }
387
388 static unsigned long fake_page_count(struct drm_i915_gem_object *obj)
389 {
390         return huge_gem_object_dma_size(obj) >> PAGE_SHIFT;
391 }
392
393 static int gpu_fill(struct intel_context *ce,
394                     struct drm_i915_gem_object *obj,
395                     unsigned int dw)
396 {
397         struct i915_vma *vma;
398         int err;
399
400         GEM_BUG_ON(obj->base.size > ce->vm->total);
401         GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
402
403         vma = i915_vma_instance(obj, ce->vm, NULL);
404         if (IS_ERR(vma))
405                 return PTR_ERR(vma);
406
407         err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER);
408         if (err)
409                 return err;
410
411         /*
412          * Within the GTT the huge objects maps every page onto
413          * its 1024 real pages (using phys_pfn = dma_pfn % 1024).
414          * We set the nth dword within the page using the nth
415          * mapping via the GTT - this should exercise the GTT mapping
416          * whilst checking that each context provides a unique view
417          * into the object.
418          */
419         err = igt_gpu_fill_dw(ce, vma,
420                               (dw * real_page_count(obj)) << PAGE_SHIFT |
421                               (dw * sizeof(u32)),
422                               real_page_count(obj),
423                               dw);
424         i915_vma_unpin(vma);
425
426         return err;
427 }
428
429 static int cpu_fill(struct drm_i915_gem_object *obj, u32 value)
430 {
431         const bool has_llc = HAS_LLC(to_i915(obj->base.dev));
432         unsigned int n, m, need_flush;
433         int err;
434
435         err = i915_gem_object_prepare_write(obj, &need_flush);
436         if (err)
437                 return err;
438
439         for (n = 0; n < real_page_count(obj); n++) {
440                 u32 *map;
441
442                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
443                 for (m = 0; m < DW_PER_PAGE; m++)
444                         map[m] = value;
445                 if (!has_llc)
446                         drm_clflush_virt_range(map, PAGE_SIZE);
447                 kunmap_atomic(map);
448         }
449
450         i915_gem_object_finish_access(obj);
451         obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU;
452         obj->write_domain = 0;
453         return 0;
454 }
455
456 static noinline int cpu_check(struct drm_i915_gem_object *obj,
457                               unsigned int idx, unsigned int max)
458 {
459         unsigned int n, m, needs_flush;
460         int err;
461
462         err = i915_gem_object_prepare_read(obj, &needs_flush);
463         if (err)
464                 return err;
465
466         for (n = 0; n < real_page_count(obj); n++) {
467                 u32 *map;
468
469                 map = kmap_atomic(i915_gem_object_get_page(obj, n));
470                 if (needs_flush & CLFLUSH_BEFORE)
471                         drm_clflush_virt_range(map, PAGE_SIZE);
472
473                 for (m = 0; m < max; m++) {
474                         if (map[m] != m) {
475                                 pr_err("%pS: Invalid value at object %d page %d/%ld, offset %d/%d: found %x expected %x\n",
476                                        __builtin_return_address(0), idx,
477                                        n, real_page_count(obj), m, max,
478                                        map[m], m);
479                                 err = -EINVAL;
480                                 goto out_unmap;
481                         }
482                 }
483
484                 for (; m < DW_PER_PAGE; m++) {
485                         if (map[m] != STACK_MAGIC) {
486                                 pr_err("%pS: Invalid value at object %d page %d, offset %d: found %x expected %x (uninitialised)\n",
487                                        __builtin_return_address(0), idx, n, m,
488                                        map[m], STACK_MAGIC);
489                                 err = -EINVAL;
490                                 goto out_unmap;
491                         }
492                 }
493
494 out_unmap:
495                 kunmap_atomic(map);
496                 if (err)
497                         break;
498         }
499
500         i915_gem_object_finish_access(obj);
501         return err;
502 }
503
504 static int file_add_object(struct drm_file *file,
505                             struct drm_i915_gem_object *obj)
506 {
507         int err;
508
509         GEM_BUG_ON(obj->base.handle_count);
510
511         /* tie the object to the drm_file for easy reaping */
512         err = idr_alloc(&file->object_idr, &obj->base, 1, 0, GFP_KERNEL);
513         if (err < 0)
514                 return  err;
515
516         i915_gem_object_get(obj);
517         obj->base.handle_count++;
518         return 0;
519 }
520
521 static struct drm_i915_gem_object *
522 create_test_object(struct i915_address_space *vm,
523                    struct drm_file *file,
524                    struct list_head *objects)
525 {
526         struct drm_i915_gem_object *obj;
527         u64 size;
528         int err;
529
530         /* Keep in GEM's good graces */
531         i915_retire_requests(vm->i915);
532
533         size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE);
534         size = round_down(size, DW_PER_PAGE * PAGE_SIZE);
535
536         obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size);
537         if (IS_ERR(obj))
538                 return obj;
539
540         err = file_add_object(file, obj);
541         i915_gem_object_put(obj);
542         if (err)
543                 return ERR_PTR(err);
544
545         err = cpu_fill(obj, STACK_MAGIC);
546         if (err) {
547                 pr_err("Failed to fill object with cpu, err=%d\n",
548                        err);
549                 return ERR_PTR(err);
550         }
551
552         list_add_tail(&obj->st_link, objects);
553         return obj;
554 }
555
556 static unsigned long max_dwords(struct drm_i915_gem_object *obj)
557 {
558         unsigned long npages = fake_page_count(obj);
559
560         GEM_BUG_ON(!IS_ALIGNED(npages, DW_PER_PAGE));
561         return npages / DW_PER_PAGE;
562 }
563
564 static void throttle_release(struct i915_request **q, int count)
565 {
566         int i;
567
568         for (i = 0; i < count; i++) {
569                 if (IS_ERR_OR_NULL(q[i]))
570                         continue;
571
572                 i915_request_put(fetch_and_zero(&q[i]));
573         }
574 }
575
576 static int throttle(struct intel_context *ce,
577                     struct i915_request **q, int count)
578 {
579         int i;
580
581         if (!IS_ERR_OR_NULL(q[0])) {
582                 if (i915_request_wait(q[0],
583                                       I915_WAIT_INTERRUPTIBLE,
584                                       MAX_SCHEDULE_TIMEOUT) < 0)
585                         return -EINTR;
586
587                 i915_request_put(q[0]);
588         }
589
590         for (i = 0; i < count - 1; i++)
591                 q[i] = q[i + 1];
592
593         q[i] = intel_context_create_request(ce);
594         if (IS_ERR(q[i]))
595                 return PTR_ERR(q[i]);
596
597         i915_request_get(q[i]);
598         i915_request_add(q[i]);
599
600         return 0;
601 }
602
603 static int igt_ctx_exec(void *arg)
604 {
605         struct drm_i915_private *i915 = arg;
606         struct intel_engine_cs *engine;
607         enum intel_engine_id id;
608         int err = -ENODEV;
609
610         /*
611          * Create a few different contexts (with different mm) and write
612          * through each ctx/mm using the GPU making sure those writes end
613          * up in the expected pages of our obj.
614          */
615
616         if (!DRIVER_CAPS(i915)->has_logical_contexts)
617                 return 0;
618
619         for_each_engine(engine, i915, id) {
620                 struct drm_i915_gem_object *obj = NULL;
621                 unsigned long ncontexts, ndwords, dw;
622                 struct i915_request *tq[5] = {};
623                 struct igt_live_test t;
624                 struct drm_file *file;
625                 IGT_TIMEOUT(end_time);
626                 LIST_HEAD(objects);
627
628                 if (!intel_engine_can_store_dword(engine))
629                         continue;
630
631                 if (!engine->context_size)
632                         continue; /* No logical context support in HW */
633
634                 file = mock_file(i915);
635                 if (IS_ERR(file))
636                         return PTR_ERR(file);
637
638                 mutex_lock(&i915->drm.struct_mutex);
639
640                 err = igt_live_test_begin(&t, i915, __func__, engine->name);
641                 if (err)
642                         goto out_unlock;
643
644                 ncontexts = 0;
645                 ndwords = 0;
646                 dw = 0;
647                 while (!time_after(jiffies, end_time)) {
648                         struct i915_gem_context *ctx;
649                         struct intel_context *ce;
650
651                         ctx = kernel_context(i915);
652                         if (IS_ERR(ctx)) {
653                                 err = PTR_ERR(ctx);
654                                 goto out_unlock;
655                         }
656
657                         ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
658                         GEM_BUG_ON(IS_ERR(ce));
659
660                         if (!obj) {
661                                 obj = create_test_object(ce->vm, file, &objects);
662                                 if (IS_ERR(obj)) {
663                                         err = PTR_ERR(obj);
664                                         intel_context_put(ce);
665                                         kernel_context_close(ctx);
666                                         goto out_unlock;
667                                 }
668                         }
669
670                         err = gpu_fill(ce, obj, dw);
671                         if (err) {
672                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
673                                        ndwords, dw, max_dwords(obj),
674                                        engine->name, ctx->hw_id,
675                                        yesno(!!ctx->vm), err);
676                                 intel_context_put(ce);
677                                 kernel_context_close(ctx);
678                                 goto out_unlock;
679                         }
680
681                         err = throttle(ce, tq, ARRAY_SIZE(tq));
682                         if (err) {
683                                 intel_context_put(ce);
684                                 kernel_context_close(ctx);
685                                 goto out_unlock;
686                         }
687
688                         if (++dw == max_dwords(obj)) {
689                                 obj = NULL;
690                                 dw = 0;
691                         }
692
693                         ndwords++;
694                         ncontexts++;
695
696                         intel_context_put(ce);
697                         kernel_context_close(ctx);
698                 }
699
700                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
701                         ncontexts, engine->name, ndwords);
702
703                 ncontexts = dw = 0;
704                 list_for_each_entry(obj, &objects, st_link) {
705                         unsigned int rem =
706                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
707
708                         err = cpu_check(obj, ncontexts++, rem);
709                         if (err)
710                                 break;
711
712                         dw += rem;
713                 }
714
715 out_unlock:
716                 throttle_release(tq, ARRAY_SIZE(tq));
717                 if (igt_live_test_end(&t))
718                         err = -EIO;
719                 mutex_unlock(&i915->drm.struct_mutex);
720
721                 mock_file_free(i915, file);
722                 if (err)
723                         return err;
724
725                 i915_gem_drain_freed_objects(i915);
726         }
727
728         return 0;
729 }
730
731 static int igt_shared_ctx_exec(void *arg)
732 {
733         struct drm_i915_private *i915 = arg;
734         struct i915_request *tq[5] = {};
735         struct i915_gem_context *parent;
736         struct intel_engine_cs *engine;
737         enum intel_engine_id id;
738         struct igt_live_test t;
739         struct drm_file *file;
740         int err = 0;
741
742         /*
743          * Create a few different contexts with the same mm and write
744          * through each ctx using the GPU making sure those writes end
745          * up in the expected pages of our obj.
746          */
747         if (!DRIVER_CAPS(i915)->has_logical_contexts)
748                 return 0;
749
750         file = mock_file(i915);
751         if (IS_ERR(file))
752                 return PTR_ERR(file);
753
754         mutex_lock(&i915->drm.struct_mutex);
755
756         parent = live_context(i915, file);
757         if (IS_ERR(parent)) {
758                 err = PTR_ERR(parent);
759                 goto out_unlock;
760         }
761
762         if (!parent->vm) { /* not full-ppgtt; nothing to share */
763                 err = 0;
764                 goto out_unlock;
765         }
766
767         err = igt_live_test_begin(&t, i915, __func__, "");
768         if (err)
769                 goto out_unlock;
770
771         for_each_engine(engine, i915, id) {
772                 unsigned long ncontexts, ndwords, dw;
773                 struct drm_i915_gem_object *obj = NULL;
774                 IGT_TIMEOUT(end_time);
775                 LIST_HEAD(objects);
776
777                 if (!intel_engine_can_store_dword(engine))
778                         continue;
779
780                 dw = 0;
781                 ndwords = 0;
782                 ncontexts = 0;
783                 while (!time_after(jiffies, end_time)) {
784                         struct i915_gem_context *ctx;
785                         struct intel_context *ce;
786
787                         ctx = kernel_context(i915);
788                         if (IS_ERR(ctx)) {
789                                 err = PTR_ERR(ctx);
790                                 goto out_test;
791                         }
792
793                         __assign_ppgtt(ctx, parent->vm);
794
795                         ce = i915_gem_context_get_engine(ctx, engine->legacy_idx);
796                         GEM_BUG_ON(IS_ERR(ce));
797
798                         if (!obj) {
799                                 obj = create_test_object(parent->vm, file, &objects);
800                                 if (IS_ERR(obj)) {
801                                         err = PTR_ERR(obj);
802                                         intel_context_put(ce);
803                                         kernel_context_close(ctx);
804                                         goto out_test;
805                                 }
806                         }
807
808                         err = gpu_fill(ce, obj, dw);
809                         if (err) {
810                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
811                                        ndwords, dw, max_dwords(obj),
812                                        engine->name, ctx->hw_id,
813                                        yesno(!!ctx->vm), err);
814                                 intel_context_put(ce);
815                                 kernel_context_close(ctx);
816                                 goto out_test;
817                         }
818
819                         err = throttle(ce, tq, ARRAY_SIZE(tq));
820                         if (err) {
821                                 intel_context_put(ce);
822                                 kernel_context_close(ctx);
823                                 goto out_test;
824                         }
825
826                         if (++dw == max_dwords(obj)) {
827                                 obj = NULL;
828                                 dw = 0;
829                         }
830
831                         ndwords++;
832                         ncontexts++;
833
834                         intel_context_put(ce);
835                         kernel_context_close(ctx);
836                 }
837                 pr_info("Submitted %lu contexts to %s, filling %lu dwords\n",
838                         ncontexts, engine->name, ndwords);
839
840                 ncontexts = dw = 0;
841                 list_for_each_entry(obj, &objects, st_link) {
842                         unsigned int rem =
843                                 min_t(unsigned int, ndwords - dw, max_dwords(obj));
844
845                         err = cpu_check(obj, ncontexts++, rem);
846                         if (err)
847                                 goto out_test;
848
849                         dw += rem;
850                 }
851
852                 mutex_unlock(&i915->drm.struct_mutex);
853                 i915_gem_drain_freed_objects(i915);
854                 mutex_lock(&i915->drm.struct_mutex);
855         }
856 out_test:
857         throttle_release(tq, ARRAY_SIZE(tq));
858         if (igt_live_test_end(&t))
859                 err = -EIO;
860 out_unlock:
861         mutex_unlock(&i915->drm.struct_mutex);
862
863         mock_file_free(i915, file);
864         return err;
865 }
866
867 static struct i915_vma *rpcs_query_batch(struct i915_vma *vma)
868 {
869         struct drm_i915_gem_object *obj;
870         u32 *cmd;
871         int err;
872
873         if (INTEL_GEN(vma->vm->i915) < 8)
874                 return ERR_PTR(-EINVAL);
875
876         obj = i915_gem_object_create_internal(vma->vm->i915, PAGE_SIZE);
877         if (IS_ERR(obj))
878                 return ERR_CAST(obj);
879
880         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
881         if (IS_ERR(cmd)) {
882                 err = PTR_ERR(cmd);
883                 goto err;
884         }
885
886         *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
887         *cmd++ = i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE);
888         *cmd++ = lower_32_bits(vma->node.start);
889         *cmd++ = upper_32_bits(vma->node.start);
890         *cmd = MI_BATCH_BUFFER_END;
891
892         __i915_gem_object_flush_map(obj, 0, 64);
893         i915_gem_object_unpin_map(obj);
894
895         intel_gt_chipset_flush(vma->vm->gt);
896
897         vma = i915_vma_instance(obj, vma->vm, NULL);
898         if (IS_ERR(vma)) {
899                 err = PTR_ERR(vma);
900                 goto err;
901         }
902
903         err = i915_vma_pin(vma, 0, 0, PIN_USER);
904         if (err)
905                 goto err;
906
907         return vma;
908
909 err:
910         i915_gem_object_put(obj);
911         return ERR_PTR(err);
912 }
913
914 static int
915 emit_rpcs_query(struct drm_i915_gem_object *obj,
916                 struct intel_context *ce,
917                 struct i915_request **rq_out)
918 {
919         struct i915_request *rq;
920         struct i915_vma *batch;
921         struct i915_vma *vma;
922         int err;
923
924         GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine));
925
926         vma = i915_vma_instance(obj, ce->vm, NULL);
927         if (IS_ERR(vma))
928                 return PTR_ERR(vma);
929
930         i915_gem_object_lock(obj);
931         err = i915_gem_object_set_to_gtt_domain(obj, false);
932         i915_gem_object_unlock(obj);
933         if (err)
934                 return err;
935
936         err = i915_vma_pin(vma, 0, 0, PIN_USER);
937         if (err)
938                 return err;
939
940         batch = rpcs_query_batch(vma);
941         if (IS_ERR(batch)) {
942                 err = PTR_ERR(batch);
943                 goto err_vma;
944         }
945
946         rq = i915_request_create(ce);
947         if (IS_ERR(rq)) {
948                 err = PTR_ERR(rq);
949                 goto err_batch;
950         }
951
952         err = rq->engine->emit_bb_start(rq,
953                                         batch->node.start, batch->node.size,
954                                         0);
955         if (err)
956                 goto err_request;
957
958         i915_vma_lock(batch);
959         err = i915_request_await_object(rq, batch->obj, false);
960         if (err == 0)
961                 err = i915_vma_move_to_active(batch, rq, 0);
962         i915_vma_unlock(batch);
963         if (err)
964                 goto skip_request;
965
966         i915_vma_lock(vma);
967         err = i915_request_await_object(rq, vma->obj, true);
968         if (err == 0)
969                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
970         i915_vma_unlock(vma);
971         if (err)
972                 goto skip_request;
973
974         i915_vma_unpin(batch);
975         i915_vma_close(batch);
976         i915_vma_put(batch);
977
978         i915_vma_unpin(vma);
979
980         *rq_out = i915_request_get(rq);
981
982         i915_request_add(rq);
983
984         return 0;
985
986 skip_request:
987         i915_request_skip(rq, err);
988 err_request:
989         i915_request_add(rq);
990 err_batch:
991         i915_vma_unpin(batch);
992         i915_vma_put(batch);
993 err_vma:
994         i915_vma_unpin(vma);
995
996         return err;
997 }
998
999 #define TEST_IDLE       BIT(0)
1000 #define TEST_BUSY       BIT(1)
1001 #define TEST_RESET      BIT(2)
1002
1003 static int
1004 __sseu_prepare(const char *name,
1005                unsigned int flags,
1006                struct intel_context *ce,
1007                struct igt_spinner **spin)
1008 {
1009         struct i915_request *rq;
1010         int ret;
1011
1012         *spin = NULL;
1013         if (!(flags & (TEST_BUSY | TEST_RESET)))
1014                 return 0;
1015
1016         *spin = kzalloc(sizeof(**spin), GFP_KERNEL);
1017         if (!*spin)
1018                 return -ENOMEM;
1019
1020         ret = igt_spinner_init(*spin, ce->engine->gt);
1021         if (ret)
1022                 goto err_free;
1023
1024         rq = igt_spinner_create_request(*spin, ce, MI_NOOP);
1025         if (IS_ERR(rq)) {
1026                 ret = PTR_ERR(rq);
1027                 goto err_fini;
1028         }
1029
1030         i915_request_add(rq);
1031
1032         if (!igt_wait_for_spinner(*spin, rq)) {
1033                 pr_err("%s: Spinner failed to start!\n", name);
1034                 ret = -ETIMEDOUT;
1035                 goto err_end;
1036         }
1037
1038         return 0;
1039
1040 err_end:
1041         igt_spinner_end(*spin);
1042 err_fini:
1043         igt_spinner_fini(*spin);
1044 err_free:
1045         kfree(fetch_and_zero(spin));
1046         return ret;
1047 }
1048
1049 static int
1050 __read_slice_count(struct intel_context *ce,
1051                    struct drm_i915_gem_object *obj,
1052                    struct igt_spinner *spin,
1053                    u32 *rpcs)
1054 {
1055         struct i915_request *rq = NULL;
1056         u32 s_mask, s_shift;
1057         unsigned int cnt;
1058         u32 *buf, val;
1059         long ret;
1060
1061         ret = emit_rpcs_query(obj, ce, &rq);
1062         if (ret)
1063                 return ret;
1064
1065         if (spin)
1066                 igt_spinner_end(spin);
1067
1068         ret = i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
1069         i915_request_put(rq);
1070         if (ret < 0)
1071                 return ret;
1072
1073         buf = i915_gem_object_pin_map(obj, I915_MAP_WB);
1074         if (IS_ERR(buf)) {
1075                 ret = PTR_ERR(buf);
1076                 return ret;
1077         }
1078
1079         if (INTEL_GEN(ce->engine->i915) >= 11) {
1080                 s_mask = GEN11_RPCS_S_CNT_MASK;
1081                 s_shift = GEN11_RPCS_S_CNT_SHIFT;
1082         } else {
1083                 s_mask = GEN8_RPCS_S_CNT_MASK;
1084                 s_shift = GEN8_RPCS_S_CNT_SHIFT;
1085         }
1086
1087         val = *buf;
1088         cnt = (val & s_mask) >> s_shift;
1089         *rpcs = val;
1090
1091         i915_gem_object_unpin_map(obj);
1092
1093         return cnt;
1094 }
1095
1096 static int
1097 __check_rpcs(const char *name, u32 rpcs, int slices, unsigned int expected,
1098              const char *prefix, const char *suffix)
1099 {
1100         if (slices == expected)
1101                 return 0;
1102
1103         if (slices < 0) {
1104                 pr_err("%s: %s read slice count failed with %d%s\n",
1105                        name, prefix, slices, suffix);
1106                 return slices;
1107         }
1108
1109         pr_err("%s: %s slice count %d is not %u%s\n",
1110                name, prefix, slices, expected, suffix);
1111
1112         pr_info("RPCS=0x%x; %u%sx%u%s\n",
1113                 rpcs, slices,
1114                 (rpcs & GEN8_RPCS_S_CNT_ENABLE) ? "*" : "",
1115                 (rpcs & GEN8_RPCS_SS_CNT_MASK) >> GEN8_RPCS_SS_CNT_SHIFT,
1116                 (rpcs & GEN8_RPCS_SS_CNT_ENABLE) ? "*" : "");
1117
1118         return -EINVAL;
1119 }
1120
1121 static int
1122 __sseu_finish(const char *name,
1123               unsigned int flags,
1124               struct intel_context *ce,
1125               struct drm_i915_gem_object *obj,
1126               unsigned int expected,
1127               struct igt_spinner *spin)
1128 {
1129         unsigned int slices = hweight32(ce->engine->sseu.slice_mask);
1130         u32 rpcs = 0;
1131         int ret = 0;
1132
1133         if (flags & TEST_RESET) {
1134                 ret = intel_engine_reset(ce->engine, "sseu");
1135                 if (ret)
1136                         goto out;
1137         }
1138
1139         ret = __read_slice_count(ce, obj,
1140                                  flags & TEST_RESET ? NULL : spin, &rpcs);
1141         ret = __check_rpcs(name, rpcs, ret, expected, "Context", "!");
1142         if (ret)
1143                 goto out;
1144
1145         ret = __read_slice_count(ce->engine->kernel_context, obj, NULL, &rpcs);
1146         ret = __check_rpcs(name, rpcs, ret, slices, "Kernel context", "!");
1147
1148 out:
1149         if (spin)
1150                 igt_spinner_end(spin);
1151
1152         if ((flags & TEST_IDLE) && ret == 0) {
1153                 ret = i915_gem_wait_for_idle(ce->engine->i915,
1154                                              0, MAX_SCHEDULE_TIMEOUT);
1155                 if (ret)
1156                         return ret;
1157
1158                 ret = __read_slice_count(ce, obj, NULL, &rpcs);
1159                 ret = __check_rpcs(name, rpcs, ret, expected,
1160                                    "Context", " after idle!");
1161         }
1162
1163         return ret;
1164 }
1165
1166 static int
1167 __sseu_test(const char *name,
1168             unsigned int flags,
1169             struct intel_context *ce,
1170             struct drm_i915_gem_object *obj,
1171             struct intel_sseu sseu)
1172 {
1173         struct igt_spinner *spin = NULL;
1174         int ret;
1175
1176         ret = __sseu_prepare(name, flags, ce, &spin);
1177         if (ret)
1178                 return ret;
1179
1180         ret = __intel_context_reconfigure_sseu(ce, sseu);
1181         if (ret)
1182                 goto out_spin;
1183
1184         ret = __sseu_finish(name, flags, ce, obj,
1185                             hweight32(sseu.slice_mask), spin);
1186
1187 out_spin:
1188         if (spin) {
1189                 igt_spinner_end(spin);
1190                 igt_spinner_fini(spin);
1191                 kfree(spin);
1192         }
1193         return ret;
1194 }
1195
1196 static int
1197 __igt_ctx_sseu(struct drm_i915_private *i915,
1198                const char *name,
1199                unsigned int flags)
1200 {
1201         struct intel_engine_cs *engine = i915->engine[RCS0];
1202         struct drm_i915_gem_object *obj;
1203         struct i915_gem_context *ctx;
1204         struct intel_context *ce;
1205         struct intel_sseu pg_sseu;
1206         struct drm_file *file;
1207         int ret;
1208
1209         if (INTEL_GEN(i915) < 9 || !engine)
1210                 return 0;
1211
1212         if (!RUNTIME_INFO(i915)->sseu.has_slice_pg)
1213                 return 0;
1214
1215         if (hweight32(engine->sseu.slice_mask) < 2)
1216                 return 0;
1217
1218         /*
1219          * Gen11 VME friendly power-gated configuration with half enabled
1220          * sub-slices.
1221          */
1222         pg_sseu = engine->sseu;
1223         pg_sseu.slice_mask = 1;
1224         pg_sseu.subslice_mask =
1225                 ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2));
1226
1227         pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n",
1228                 name, flags, hweight32(engine->sseu.slice_mask),
1229                 hweight32(pg_sseu.slice_mask));
1230
1231         file = mock_file(i915);
1232         if (IS_ERR(file))
1233                 return PTR_ERR(file);
1234
1235         if (flags & TEST_RESET)
1236                 igt_global_reset_lock(&i915->gt);
1237
1238         mutex_lock(&i915->drm.struct_mutex);
1239
1240         ctx = live_context(i915, file);
1241         if (IS_ERR(ctx)) {
1242                 ret = PTR_ERR(ctx);
1243                 goto out_unlock;
1244         }
1245         i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */
1246
1247         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1248         if (IS_ERR(obj)) {
1249                 ret = PTR_ERR(obj);
1250                 goto out_unlock;
1251         }
1252
1253         ce = i915_gem_context_get_engine(ctx, RCS0);
1254         if (IS_ERR(ce)) {
1255                 ret = PTR_ERR(ce);
1256                 goto out_put;
1257         }
1258
1259         ret = intel_context_pin(ce);
1260         if (ret)
1261                 goto out_context;
1262
1263         /* First set the default mask. */
1264         ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1265         if (ret)
1266                 goto out_fail;
1267
1268         /* Then set a power-gated configuration. */
1269         ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1270         if (ret)
1271                 goto out_fail;
1272
1273         /* Back to defaults. */
1274         ret = __sseu_test(name, flags, ce, obj, engine->sseu);
1275         if (ret)
1276                 goto out_fail;
1277
1278         /* One last power-gated configuration for the road. */
1279         ret = __sseu_test(name, flags, ce, obj, pg_sseu);
1280         if (ret)
1281                 goto out_fail;
1282
1283 out_fail:
1284         if (igt_flush_test(i915, I915_WAIT_LOCKED))
1285                 ret = -EIO;
1286
1287         intel_context_unpin(ce);
1288 out_context:
1289         intel_context_put(ce);
1290 out_put:
1291         i915_gem_object_put(obj);
1292
1293 out_unlock:
1294         mutex_unlock(&i915->drm.struct_mutex);
1295
1296         if (flags & TEST_RESET)
1297                 igt_global_reset_unlock(&i915->gt);
1298
1299         mock_file_free(i915, file);
1300
1301         if (ret)
1302                 pr_err("%s: Failed with %d!\n", name, ret);
1303
1304         return ret;
1305 }
1306
1307 static int igt_ctx_sseu(void *arg)
1308 {
1309         struct {
1310                 const char *name;
1311                 unsigned int flags;
1312         } *phase, phases[] = {
1313                 { .name = "basic", .flags = 0 },
1314                 { .name = "idle", .flags = TEST_IDLE },
1315                 { .name = "busy", .flags = TEST_BUSY },
1316                 { .name = "busy-reset", .flags = TEST_BUSY | TEST_RESET },
1317                 { .name = "busy-idle", .flags = TEST_BUSY | TEST_IDLE },
1318                 { .name = "reset-idle", .flags = TEST_RESET | TEST_IDLE },
1319         };
1320         unsigned int i;
1321         int ret = 0;
1322
1323         for (i = 0, phase = phases; ret == 0 && i < ARRAY_SIZE(phases);
1324              i++, phase++)
1325                 ret = __igt_ctx_sseu(arg, phase->name, phase->flags);
1326
1327         return ret;
1328 }
1329
1330 static int igt_ctx_readonly(void *arg)
1331 {
1332         struct drm_i915_private *i915 = arg;
1333         struct drm_i915_gem_object *obj = NULL;
1334         struct i915_request *tq[5] = {};
1335         struct i915_address_space *vm;
1336         struct i915_gem_context *ctx;
1337         unsigned long idx, ndwords, dw;
1338         struct igt_live_test t;
1339         struct drm_file *file;
1340         I915_RND_STATE(prng);
1341         IGT_TIMEOUT(end_time);
1342         LIST_HEAD(objects);
1343         int err = -ENODEV;
1344
1345         /*
1346          * Create a few read-only objects (with the occasional writable object)
1347          * and try to write into these object checking that the GPU discards
1348          * any write to a read-only object.
1349          */
1350
1351         file = mock_file(i915);
1352         if (IS_ERR(file))
1353                 return PTR_ERR(file);
1354
1355         mutex_lock(&i915->drm.struct_mutex);
1356
1357         err = igt_live_test_begin(&t, i915, __func__, "");
1358         if (err)
1359                 goto out_unlock;
1360
1361         ctx = live_context(i915, file);
1362         if (IS_ERR(ctx)) {
1363                 err = PTR_ERR(ctx);
1364                 goto out_unlock;
1365         }
1366
1367         vm = ctx->vm ?: &i915->ggtt.alias->vm;
1368         if (!vm || !vm->has_read_only) {
1369                 err = 0;
1370                 goto out_unlock;
1371         }
1372
1373         ndwords = 0;
1374         dw = 0;
1375         while (!time_after(jiffies, end_time)) {
1376                 struct i915_gem_engines_iter it;
1377                 struct intel_context *ce;
1378
1379                 for_each_gem_engine(ce,
1380                                     i915_gem_context_lock_engines(ctx), it) {
1381                         if (!intel_engine_can_store_dword(ce->engine))
1382                                 continue;
1383
1384                         if (!obj) {
1385                                 obj = create_test_object(ce->vm, file, &objects);
1386                                 if (IS_ERR(obj)) {
1387                                         err = PTR_ERR(obj);
1388                                         i915_gem_context_unlock_engines(ctx);
1389                                         goto out_unlock;
1390                                 }
1391
1392                                 if (prandom_u32_state(&prng) & 1)
1393                                         i915_gem_object_set_readonly(obj);
1394                         }
1395
1396                         err = gpu_fill(ce, obj, dw);
1397                         if (err) {
1398                                 pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n",
1399                                        ndwords, dw, max_dwords(obj),
1400                                        ce->engine->name, ctx->hw_id,
1401                                        yesno(!!ctx->vm), err);
1402                                 i915_gem_context_unlock_engines(ctx);
1403                                 goto out_unlock;
1404                         }
1405
1406                         err = throttle(ce, tq, ARRAY_SIZE(tq));
1407                         if (err) {
1408                                 i915_gem_context_unlock_engines(ctx);
1409                                 goto out_unlock;
1410                         }
1411
1412                         if (++dw == max_dwords(obj)) {
1413                                 obj = NULL;
1414                                 dw = 0;
1415                         }
1416                         ndwords++;
1417                 }
1418                 i915_gem_context_unlock_engines(ctx);
1419         }
1420         pr_info("Submitted %lu dwords (across %u engines)\n",
1421                 ndwords, RUNTIME_INFO(i915)->num_engines);
1422
1423         dw = 0;
1424         idx = 0;
1425         list_for_each_entry(obj, &objects, st_link) {
1426                 unsigned int rem =
1427                         min_t(unsigned int, ndwords - dw, max_dwords(obj));
1428                 unsigned int num_writes;
1429
1430                 num_writes = rem;
1431                 if (i915_gem_object_is_readonly(obj))
1432                         num_writes = 0;
1433
1434                 err = cpu_check(obj, idx++, num_writes);
1435                 if (err)
1436                         break;
1437
1438                 dw += rem;
1439         }
1440
1441 out_unlock:
1442         throttle_release(tq, ARRAY_SIZE(tq));
1443         if (igt_live_test_end(&t))
1444                 err = -EIO;
1445         mutex_unlock(&i915->drm.struct_mutex);
1446
1447         mock_file_free(i915, file);
1448         return err;
1449 }
1450
1451 static int check_scratch(struct i915_gem_context *ctx, u64 offset)
1452 {
1453         struct drm_mm_node *node =
1454                 __drm_mm_interval_first(&ctx->vm->mm,
1455                                         offset, offset + sizeof(u32) - 1);
1456         if (!node || node->start > offset)
1457                 return 0;
1458
1459         GEM_BUG_ON(offset >= node->start + node->size);
1460
1461         pr_err("Target offset 0x%08x_%08x overlaps with a node in the mm!\n",
1462                upper_32_bits(offset), lower_32_bits(offset));
1463         return -EINVAL;
1464 }
1465
1466 static int write_to_scratch(struct i915_gem_context *ctx,
1467                             struct intel_engine_cs *engine,
1468                             u64 offset, u32 value)
1469 {
1470         struct drm_i915_private *i915 = ctx->i915;
1471         struct drm_i915_gem_object *obj;
1472         struct i915_request *rq;
1473         struct i915_vma *vma;
1474         u32 *cmd;
1475         int err;
1476
1477         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1478
1479         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1480         if (IS_ERR(obj))
1481                 return PTR_ERR(obj);
1482
1483         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1484         if (IS_ERR(cmd)) {
1485                 err = PTR_ERR(cmd);
1486                 goto err;
1487         }
1488
1489         *cmd++ = MI_STORE_DWORD_IMM_GEN4;
1490         if (INTEL_GEN(i915) >= 8) {
1491                 *cmd++ = lower_32_bits(offset);
1492                 *cmd++ = upper_32_bits(offset);
1493         } else {
1494                 *cmd++ = 0;
1495                 *cmd++ = offset;
1496         }
1497         *cmd++ = value;
1498         *cmd = MI_BATCH_BUFFER_END;
1499         __i915_gem_object_flush_map(obj, 0, 64);
1500         i915_gem_object_unpin_map(obj);
1501
1502         intel_gt_chipset_flush(engine->gt);
1503
1504         vma = i915_vma_instance(obj, ctx->vm, NULL);
1505         if (IS_ERR(vma)) {
1506                 err = PTR_ERR(vma);
1507                 goto err;
1508         }
1509
1510         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1511         if (err)
1512                 goto err;
1513
1514         err = check_scratch(ctx, offset);
1515         if (err)
1516                 goto err_unpin;
1517
1518         rq = igt_request_alloc(ctx, engine);
1519         if (IS_ERR(rq)) {
1520                 err = PTR_ERR(rq);
1521                 goto err_unpin;
1522         }
1523
1524         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1525         if (err)
1526                 goto err_request;
1527
1528         i915_vma_lock(vma);
1529         err = i915_request_await_object(rq, vma->obj, false);
1530         if (err == 0)
1531                 err = i915_vma_move_to_active(vma, rq, 0);
1532         i915_vma_unlock(vma);
1533         if (err)
1534                 goto skip_request;
1535
1536         i915_vma_unpin(vma);
1537         i915_vma_close(vma);
1538         i915_vma_put(vma);
1539
1540         i915_request_add(rq);
1541
1542         return 0;
1543
1544 skip_request:
1545         i915_request_skip(rq, err);
1546 err_request:
1547         i915_request_add(rq);
1548 err_unpin:
1549         i915_vma_unpin(vma);
1550 err:
1551         i915_gem_object_put(obj);
1552         return err;
1553 }
1554
1555 static int read_from_scratch(struct i915_gem_context *ctx,
1556                              struct intel_engine_cs *engine,
1557                              u64 offset, u32 *value)
1558 {
1559         struct drm_i915_private *i915 = ctx->i915;
1560         struct drm_i915_gem_object *obj;
1561         const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
1562         const u32 result = 0x100;
1563         struct i915_request *rq;
1564         struct i915_vma *vma;
1565         u32 *cmd;
1566         int err;
1567
1568         GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
1569
1570         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1571         if (IS_ERR(obj))
1572                 return PTR_ERR(obj);
1573
1574         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1575         if (IS_ERR(cmd)) {
1576                 err = PTR_ERR(cmd);
1577                 goto err;
1578         }
1579
1580         memset(cmd, POISON_INUSE, PAGE_SIZE);
1581         if (INTEL_GEN(i915) >= 8) {
1582                 *cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
1583                 *cmd++ = RCS_GPR0;
1584                 *cmd++ = lower_32_bits(offset);
1585                 *cmd++ = upper_32_bits(offset);
1586                 *cmd++ = MI_STORE_REGISTER_MEM_GEN8;
1587                 *cmd++ = RCS_GPR0;
1588                 *cmd++ = result;
1589                 *cmd++ = 0;
1590         } else {
1591                 *cmd++ = MI_LOAD_REGISTER_MEM;
1592                 *cmd++ = RCS_GPR0;
1593                 *cmd++ = offset;
1594                 *cmd++ = MI_STORE_REGISTER_MEM;
1595                 *cmd++ = RCS_GPR0;
1596                 *cmd++ = result;
1597         }
1598         *cmd = MI_BATCH_BUFFER_END;
1599
1600         i915_gem_object_flush_map(obj);
1601         i915_gem_object_unpin_map(obj);
1602
1603         intel_gt_chipset_flush(engine->gt);
1604
1605         vma = i915_vma_instance(obj, ctx->vm, NULL);
1606         if (IS_ERR(vma)) {
1607                 err = PTR_ERR(vma);
1608                 goto err;
1609         }
1610
1611         err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
1612         if (err)
1613                 goto err;
1614
1615         err = check_scratch(ctx, offset);
1616         if (err)
1617                 goto err_unpin;
1618
1619         rq = igt_request_alloc(ctx, engine);
1620         if (IS_ERR(rq)) {
1621                 err = PTR_ERR(rq);
1622                 goto err_unpin;
1623         }
1624
1625         err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
1626         if (err)
1627                 goto err_request;
1628
1629         i915_vma_lock(vma);
1630         err = i915_request_await_object(rq, vma->obj, true);
1631         if (err == 0)
1632                 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
1633         i915_vma_unlock(vma);
1634         if (err)
1635                 goto skip_request;
1636
1637         i915_vma_unpin(vma);
1638         i915_vma_close(vma);
1639
1640         i915_request_add(rq);
1641
1642         i915_gem_object_lock(obj);
1643         err = i915_gem_object_set_to_cpu_domain(obj, false);
1644         i915_gem_object_unlock(obj);
1645         if (err)
1646                 goto err;
1647
1648         cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
1649         if (IS_ERR(cmd)) {
1650                 err = PTR_ERR(cmd);
1651                 goto err;
1652         }
1653
1654         *value = cmd[result / sizeof(*cmd)];
1655         i915_gem_object_unpin_map(obj);
1656         i915_gem_object_put(obj);
1657
1658         return 0;
1659
1660 skip_request:
1661         i915_request_skip(rq, err);
1662 err_request:
1663         i915_request_add(rq);
1664 err_unpin:
1665         i915_vma_unpin(vma);
1666 err:
1667         i915_gem_object_put(obj);
1668         return err;
1669 }
1670
1671 static int igt_vm_isolation(void *arg)
1672 {
1673         struct drm_i915_private *i915 = arg;
1674         struct i915_gem_context *ctx_a, *ctx_b;
1675         struct intel_engine_cs *engine;
1676         struct igt_live_test t;
1677         struct drm_file *file;
1678         I915_RND_STATE(prng);
1679         unsigned long count;
1680         unsigned int id;
1681         u64 vm_total;
1682         int err;
1683
1684         if (INTEL_GEN(i915) < 7)
1685                 return 0;
1686
1687         /*
1688          * The simple goal here is that a write into one context is not
1689          * observed in a second (separate page tables and scratch).
1690          */
1691
1692         file = mock_file(i915);
1693         if (IS_ERR(file))
1694                 return PTR_ERR(file);
1695
1696         mutex_lock(&i915->drm.struct_mutex);
1697
1698         err = igt_live_test_begin(&t, i915, __func__, "");
1699         if (err)
1700                 goto out_unlock;
1701
1702         ctx_a = live_context(i915, file);
1703         if (IS_ERR(ctx_a)) {
1704                 err = PTR_ERR(ctx_a);
1705                 goto out_unlock;
1706         }
1707
1708         ctx_b = live_context(i915, file);
1709         if (IS_ERR(ctx_b)) {
1710                 err = PTR_ERR(ctx_b);
1711                 goto out_unlock;
1712         }
1713
1714         /* We can only test vm isolation, if the vm are distinct */
1715         if (ctx_a->vm == ctx_b->vm)
1716                 goto out_unlock;
1717
1718         vm_total = ctx_a->vm->total;
1719         GEM_BUG_ON(ctx_b->vm->total != vm_total);
1720         vm_total -= I915_GTT_PAGE_SIZE;
1721
1722         count = 0;
1723         for_each_engine(engine, i915, id) {
1724                 IGT_TIMEOUT(end_time);
1725                 unsigned long this = 0;
1726
1727                 if (!intel_engine_can_store_dword(engine))
1728                         continue;
1729
1730                 while (!__igt_timeout(end_time, NULL)) {
1731                         u32 value = 0xc5c5c5c5;
1732                         u64 offset;
1733
1734                         div64_u64_rem(i915_prandom_u64_state(&prng),
1735                                       vm_total, &offset);
1736                         offset = round_down(offset, alignof_dword);
1737                         offset += I915_GTT_PAGE_SIZE;
1738
1739                         err = write_to_scratch(ctx_a, engine,
1740                                                offset, 0xdeadbeef);
1741                         if (err == 0)
1742                                 err = read_from_scratch(ctx_b, engine,
1743                                                         offset, &value);
1744                         if (err)
1745                                 goto out_unlock;
1746
1747                         if (value) {
1748                                 pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
1749                                        engine->name, value,
1750                                        upper_32_bits(offset),
1751                                        lower_32_bits(offset),
1752                                        this);
1753                                 err = -EINVAL;
1754                                 goto out_unlock;
1755                         }
1756
1757                         this++;
1758                 }
1759                 count += this;
1760         }
1761         pr_info("Checked %lu scratch offsets across %d engines\n",
1762                 count, RUNTIME_INFO(i915)->num_engines);
1763
1764 out_unlock:
1765         if (igt_live_test_end(&t))
1766                 err = -EIO;
1767         mutex_unlock(&i915->drm.struct_mutex);
1768
1769         mock_file_free(i915, file);
1770         return err;
1771 }
1772
1773 static bool skip_unused_engines(struct intel_context *ce, void *data)
1774 {
1775         return !ce->state;
1776 }
1777
1778 static void mock_barrier_task(void *data)
1779 {
1780         unsigned int *counter = data;
1781
1782         ++*counter;
1783 }
1784
1785 static int mock_context_barrier(void *arg)
1786 {
1787 #undef pr_fmt
1788 #define pr_fmt(x) "context_barrier_task():" # x
1789         struct drm_i915_private *i915 = arg;
1790         struct i915_gem_context *ctx;
1791         struct i915_request *rq;
1792         unsigned int counter;
1793         int err;
1794
1795         /*
1796          * The context barrier provides us with a callback after it emits
1797          * a request; useful for retiring old state after loading new.
1798          */
1799
1800         mutex_lock(&i915->drm.struct_mutex);
1801
1802         ctx = mock_context(i915, "mock");
1803         if (!ctx) {
1804                 err = -ENOMEM;
1805                 goto unlock;
1806         }
1807
1808         counter = 0;
1809         err = context_barrier_task(ctx, 0,
1810                                    NULL, NULL, mock_barrier_task, &counter);
1811         if (err) {
1812                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1813                 goto out;
1814         }
1815         if (counter == 0) {
1816                 pr_err("Did not retire immediately with 0 engines\n");
1817                 err = -EINVAL;
1818                 goto out;
1819         }
1820
1821         counter = 0;
1822         err = context_barrier_task(ctx, ALL_ENGINES,
1823                                    skip_unused_engines,
1824                                    NULL,
1825                                    mock_barrier_task,
1826                                    &counter);
1827         if (err) {
1828                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1829                 goto out;
1830         }
1831         if (counter == 0) {
1832                 pr_err("Did not retire immediately for all unused engines\n");
1833                 err = -EINVAL;
1834                 goto out;
1835         }
1836
1837         rq = igt_request_alloc(ctx, i915->engine[RCS0]);
1838         if (IS_ERR(rq)) {
1839                 pr_err("Request allocation failed!\n");
1840                 goto out;
1841         }
1842         i915_request_add(rq);
1843
1844         counter = 0;
1845         context_barrier_inject_fault = BIT(RCS0);
1846         err = context_barrier_task(ctx, ALL_ENGINES,
1847                                    NULL, NULL, mock_barrier_task, &counter);
1848         context_barrier_inject_fault = 0;
1849         if (err == -ENXIO)
1850                 err = 0;
1851         else
1852                 pr_err("Did not hit fault injection!\n");
1853         if (counter != 0) {
1854                 pr_err("Invoked callback on error!\n");
1855                 err = -EIO;
1856         }
1857         if (err)
1858                 goto out;
1859
1860         counter = 0;
1861         err = context_barrier_task(ctx, ALL_ENGINES,
1862                                    skip_unused_engines,
1863                                    NULL,
1864                                    mock_barrier_task,
1865                                    &counter);
1866         if (err) {
1867                 pr_err("Failed at line %d, err=%d\n", __LINE__, err);
1868                 goto out;
1869         }
1870         mock_device_flush(i915);
1871         if (counter == 0) {
1872                 pr_err("Did not retire on each active engines\n");
1873                 err = -EINVAL;
1874                 goto out;
1875         }
1876
1877 out:
1878         mock_context_close(ctx);
1879 unlock:
1880         mutex_unlock(&i915->drm.struct_mutex);
1881         return err;
1882 #undef pr_fmt
1883 #define pr_fmt(x) x
1884 }
1885
1886 int i915_gem_context_mock_selftests(void)
1887 {
1888         static const struct i915_subtest tests[] = {
1889                 SUBTEST(mock_context_barrier),
1890         };
1891         struct drm_i915_private *i915;
1892         int err;
1893
1894         i915 = mock_gem_device();
1895         if (!i915)
1896                 return -ENOMEM;
1897
1898         err = i915_subtests(tests, i915);
1899
1900         drm_dev_put(&i915->drm);
1901         return err;
1902 }
1903
1904 int i915_gem_context_live_selftests(struct drm_i915_private *i915)
1905 {
1906         static const struct i915_subtest tests[] = {
1907                 SUBTEST(live_nop_switch),
1908                 SUBTEST(live_parallel_switch),
1909                 SUBTEST(igt_ctx_exec),
1910                 SUBTEST(igt_ctx_readonly),
1911                 SUBTEST(igt_ctx_sseu),
1912                 SUBTEST(igt_shared_ctx_exec),
1913                 SUBTEST(igt_vm_isolation),
1914         };
1915
1916         if (intel_gt_is_wedged(&i915->gt))
1917                 return 0;
1918
1919         return i915_live_subtests(tests, i915);
1920 }