OSDN Git Service

drm/i915/gtt: Pull global wc page stash under its own locking
[android-x86/kernel.git] / drivers / gpu / drm / i915 / selftests / i915_gem_gtt.c
1 /*
2  * Copyright © 2016 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  */
24
25 #include <linux/list_sort.h>
26 #include <linux/prime_numbers.h>
27
28 #include "../i915_selftest.h"
29 #include "i915_random.h"
30
31 #include "mock_context.h"
32 #include "mock_drm.h"
33 #include "mock_gem_device.h"
34
35 static void cleanup_freed_objects(struct drm_i915_private *i915)
36 {
37         /*
38          * As we may hold onto the struct_mutex for inordinate lengths of
39          * time, the NMI khungtaskd detector may fire for the free objects
40          * worker.
41          */
42         mutex_unlock(&i915->drm.struct_mutex);
43
44         i915_gem_drain_freed_objects(i915);
45
46         mutex_lock(&i915->drm.struct_mutex);
47 }
48
49 static void fake_free_pages(struct drm_i915_gem_object *obj,
50                             struct sg_table *pages)
51 {
52         sg_free_table(pages);
53         kfree(pages);
54 }
55
56 static int fake_get_pages(struct drm_i915_gem_object *obj)
57 {
58 #define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
59 #define PFN_BIAS 0x1000
60         struct sg_table *pages;
61         struct scatterlist *sg;
62         unsigned int sg_page_sizes;
63         typeof(obj->base.size) rem;
64
65         pages = kmalloc(sizeof(*pages), GFP);
66         if (!pages)
67                 return -ENOMEM;
68
69         rem = round_up(obj->base.size, BIT(31)) >> 31;
70         if (sg_alloc_table(pages, rem, GFP)) {
71                 kfree(pages);
72                 return -ENOMEM;
73         }
74
75         sg_page_sizes = 0;
76         rem = obj->base.size;
77         for (sg = pages->sgl; sg; sg = sg_next(sg)) {
78                 unsigned long len = min_t(typeof(rem), rem, BIT(31));
79
80                 GEM_BUG_ON(!len);
81                 sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
82                 sg_dma_address(sg) = page_to_phys(sg_page(sg));
83                 sg_dma_len(sg) = len;
84                 sg_page_sizes |= len;
85
86                 rem -= len;
87         }
88         GEM_BUG_ON(rem);
89
90         obj->mm.madv = I915_MADV_DONTNEED;
91
92         __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
93
94         return 0;
95 #undef GFP
96 }
97
98 static void fake_put_pages(struct drm_i915_gem_object *obj,
99                            struct sg_table *pages)
100 {
101         fake_free_pages(obj, pages);
102         obj->mm.dirty = false;
103         obj->mm.madv = I915_MADV_WILLNEED;
104 }
105
106 static const struct drm_i915_gem_object_ops fake_ops = {
107         .flags = I915_GEM_OBJECT_IS_SHRINKABLE,
108         .get_pages = fake_get_pages,
109         .put_pages = fake_put_pages,
110 };
111
112 static struct drm_i915_gem_object *
113 fake_dma_object(struct drm_i915_private *i915, u64 size)
114 {
115         struct drm_i915_gem_object *obj;
116
117         GEM_BUG_ON(!size);
118         GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
119
120         if (overflows_type(size, obj->base.size))
121                 return ERR_PTR(-E2BIG);
122
123         obj = i915_gem_object_alloc(i915);
124         if (!obj)
125                 goto err;
126
127         drm_gem_private_object_init(&i915->drm, &obj->base, size);
128         i915_gem_object_init(obj, &fake_ops);
129
130         obj->write_domain = I915_GEM_DOMAIN_CPU;
131         obj->read_domains = I915_GEM_DOMAIN_CPU;
132         obj->cache_level = I915_CACHE_NONE;
133
134         /* Preallocate the "backing storage" */
135         if (i915_gem_object_pin_pages(obj))
136                 goto err_obj;
137
138         i915_gem_object_unpin_pages(obj);
139         return obj;
140
141 err_obj:
142         i915_gem_object_put(obj);
143 err:
144         return ERR_PTR(-ENOMEM);
145 }
146
147 static int igt_ppgtt_alloc(void *arg)
148 {
149         struct drm_i915_private *dev_priv = arg;
150         struct i915_hw_ppgtt *ppgtt;
151         u64 size, last;
152         int err = 0;
153
154         /* Allocate a ppggt and try to fill the entire range */
155
156         if (!USES_PPGTT(dev_priv))
157                 return 0;
158
159         ppgtt = __hw_ppgtt_create(dev_priv);
160         if (IS_ERR(ppgtt))
161                 return PTR_ERR(ppgtt);
162
163         if (!ppgtt->vm.allocate_va_range)
164                 goto err_ppgtt_cleanup;
165
166         /* Check we can allocate the entire range */
167         for (size = 4096;
168              size <= ppgtt->vm.total;
169              size <<= 2) {
170                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size);
171                 if (err) {
172                         if (err == -ENOMEM) {
173                                 pr_info("[1] Ran out of memory for va_range [0 + %llx] [bit %d]\n",
174                                         size, ilog2(size));
175                                 err = 0; /* virtual space too large! */
176                         }
177                         goto err_ppgtt_cleanup;
178                 }
179
180                 cond_resched();
181
182                 ppgtt->vm.clear_range(&ppgtt->vm, 0, size);
183         }
184
185         /* Check we can incrementally allocate the entire range */
186         for (last = 0, size = 4096;
187              size <= ppgtt->vm.total;
188              last = size, size <<= 2) {
189                 err = ppgtt->vm.allocate_va_range(&ppgtt->vm,
190                                                   last, size - last);
191                 if (err) {
192                         if (err == -ENOMEM) {
193                                 pr_info("[2] Ran out of memory for va_range [%llx + %llx] [bit %d]\n",
194                                         last, size - last, ilog2(size));
195                                 err = 0; /* virtual space too large! */
196                         }
197                         goto err_ppgtt_cleanup;
198                 }
199
200                 cond_resched();
201         }
202
203 err_ppgtt_cleanup:
204         mutex_lock(&dev_priv->drm.struct_mutex);
205         ppgtt->vm.cleanup(&ppgtt->vm);
206         mutex_unlock(&dev_priv->drm.struct_mutex);
207         kfree(ppgtt);
208         return err;
209 }
210
211 static int lowlevel_hole(struct drm_i915_private *i915,
212                          struct i915_address_space *vm,
213                          u64 hole_start, u64 hole_end,
214                          unsigned long end_time)
215 {
216         I915_RND_STATE(seed_prng);
217         unsigned int size;
218         struct i915_vma mock_vma;
219
220         memset(&mock_vma, 0, sizeof(struct i915_vma));
221
222         /* Keep creating larger objects until one cannot fit into the hole */
223         for (size = 12; (hole_end - hole_start) >> size; size++) {
224                 I915_RND_SUBSTATE(prng, seed_prng);
225                 struct drm_i915_gem_object *obj;
226                 unsigned int *order, count, n;
227                 u64 hole_size;
228
229                 hole_size = (hole_end - hole_start) >> size;
230                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
231                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
232                 count = hole_size >> 1;
233                 if (!count) {
234                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
235                                  __func__, hole_start, hole_end, size, hole_size);
236                         break;
237                 }
238
239                 do {
240                         order = i915_random_order(count, &prng);
241                         if (order)
242                                 break;
243                 } while (count >>= 1);
244                 if (!count)
245                         return -ENOMEM;
246                 GEM_BUG_ON(!order);
247
248                 GEM_BUG_ON(count * BIT_ULL(size) > vm->total);
249                 GEM_BUG_ON(hole_start + count * BIT_ULL(size) > hole_end);
250
251                 /* Ignore allocation failures (i.e. don't report them as
252                  * a test failure) as we are purposefully allocating very
253                  * large objects without checking that we have sufficient
254                  * memory. We expect to hit -ENOMEM.
255                  */
256
257                 obj = fake_dma_object(i915, BIT_ULL(size));
258                 if (IS_ERR(obj)) {
259                         kfree(order);
260                         break;
261                 }
262
263                 GEM_BUG_ON(obj->base.size != BIT_ULL(size));
264
265                 if (i915_gem_object_pin_pages(obj)) {
266                         i915_gem_object_put(obj);
267                         kfree(order);
268                         break;
269                 }
270
271                 for (n = 0; n < count; n++) {
272                         u64 addr = hole_start + order[n] * BIT_ULL(size);
273
274                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
275
276                         if (igt_timeout(end_time,
277                                         "%s timed out before %d/%d\n",
278                                         __func__, n, count)) {
279                                 hole_end = hole_start; /* quit */
280                                 break;
281                         }
282
283                         if (vm->allocate_va_range &&
284                             vm->allocate_va_range(vm, addr, BIT_ULL(size)))
285                                 break;
286
287                         mock_vma.pages = obj->mm.pages;
288                         mock_vma.node.size = BIT_ULL(size);
289                         mock_vma.node.start = addr;
290
291                         intel_runtime_pm_get(i915);
292                         vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0);
293                         intel_runtime_pm_put(i915);
294                 }
295                 count = n;
296
297                 i915_random_reorder(order, count, &prng);
298                 for (n = 0; n < count; n++) {
299                         u64 addr = hole_start + order[n] * BIT_ULL(size);
300
301                         GEM_BUG_ON(addr + BIT_ULL(size) > vm->total);
302                         vm->clear_range(vm, addr, BIT_ULL(size));
303                 }
304
305                 i915_gem_object_unpin_pages(obj);
306                 i915_gem_object_put(obj);
307
308                 kfree(order);
309
310                 cleanup_freed_objects(i915);
311         }
312
313         return 0;
314 }
315
316 static void close_object_list(struct list_head *objects,
317                               struct i915_address_space *vm)
318 {
319         struct drm_i915_gem_object *obj, *on;
320         int ignored;
321
322         list_for_each_entry_safe(obj, on, objects, st_link) {
323                 struct i915_vma *vma;
324
325                 vma = i915_vma_instance(obj, vm, NULL);
326                 if (!IS_ERR(vma))
327                         ignored = i915_vma_unbind(vma);
328                 /* Only ppgtt vma may be closed before the object is freed */
329                 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma))
330                         i915_vma_close(vma);
331
332                 list_del(&obj->st_link);
333                 i915_gem_object_put(obj);
334         }
335 }
336
337 static int fill_hole(struct drm_i915_private *i915,
338                      struct i915_address_space *vm,
339                      u64 hole_start, u64 hole_end,
340                      unsigned long end_time)
341 {
342         const u64 hole_size = hole_end - hole_start;
343         struct drm_i915_gem_object *obj;
344         const unsigned long max_pages =
345                 min_t(u64, ULONG_MAX - 1, hole_size/2 >> PAGE_SHIFT);
346         const unsigned long max_step = max(int_sqrt(max_pages), 2UL);
347         unsigned long npages, prime, flags;
348         struct i915_vma *vma;
349         LIST_HEAD(objects);
350         int err;
351
352         /* Try binding many VMA working inwards from either edge */
353
354         flags = PIN_OFFSET_FIXED | PIN_USER;
355         if (i915_is_ggtt(vm))
356                 flags |= PIN_GLOBAL;
357
358         for_each_prime_number_from(prime, 2, max_step) {
359                 for (npages = 1; npages <= max_pages; npages *= prime) {
360                         const u64 full_size = npages << PAGE_SHIFT;
361                         const struct {
362                                 const char *name;
363                                 u64 offset;
364                                 int step;
365                         } phases[] = {
366                                 { "top-down", hole_end, -1, },
367                                 { "bottom-up", hole_start, 1, },
368                                 { }
369                         }, *p;
370
371                         obj = fake_dma_object(i915, full_size);
372                         if (IS_ERR(obj))
373                                 break;
374
375                         list_add(&obj->st_link, &objects);
376
377                         /* Align differing sized objects against the edges, and
378                          * check we don't walk off into the void when binding
379                          * them into the GTT.
380                          */
381                         for (p = phases; p->name; p++) {
382                                 u64 offset;
383
384                                 offset = p->offset;
385                                 list_for_each_entry(obj, &objects, st_link) {
386                                         vma = i915_vma_instance(obj, vm, NULL);
387                                         if (IS_ERR(vma))
388                                                 continue;
389
390                                         if (p->step < 0) {
391                                                 if (offset < hole_start + obj->base.size)
392                                                         break;
393                                                 offset -= obj->base.size;
394                                         }
395
396                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
397                                         if (err) {
398                                                 pr_err("%s(%s) pin (forward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
399                                                        __func__, p->name, err, npages, prime, offset);
400                                                 goto err;
401                                         }
402
403                                         if (!drm_mm_node_allocated(&vma->node) ||
404                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
405                                                 pr_err("%s(%s) (forward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
406                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
407                                                        offset);
408                                                 err = -EINVAL;
409                                                 goto err;
410                                         }
411
412                                         i915_vma_unpin(vma);
413
414                                         if (p->step > 0) {
415                                                 if (offset + obj->base.size > hole_end)
416                                                         break;
417                                                 offset += obj->base.size;
418                                         }
419                                 }
420
421                                 offset = p->offset;
422                                 list_for_each_entry(obj, &objects, st_link) {
423                                         vma = i915_vma_instance(obj, vm, NULL);
424                                         if (IS_ERR(vma))
425                                                 continue;
426
427                                         if (p->step < 0) {
428                                                 if (offset < hole_start + obj->base.size)
429                                                         break;
430                                                 offset -= obj->base.size;
431                                         }
432
433                                         if (!drm_mm_node_allocated(&vma->node) ||
434                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
435                                                 pr_err("%s(%s) (forward) moved vma.node=%llx + %llx, expected offset %llx\n",
436                                                        __func__, p->name, vma->node.start, vma->node.size,
437                                                        offset);
438                                                 err = -EINVAL;
439                                                 goto err;
440                                         }
441
442                                         err = i915_vma_unbind(vma);
443                                         if (err) {
444                                                 pr_err("%s(%s) (forward) unbind of vma.node=%llx + %llx failed with err=%d\n",
445                                                        __func__, p->name, vma->node.start, vma->node.size,
446                                                        err);
447                                                 goto err;
448                                         }
449
450                                         if (p->step > 0) {
451                                                 if (offset + obj->base.size > hole_end)
452                                                         break;
453                                                 offset += obj->base.size;
454                                         }
455                                 }
456
457                                 offset = p->offset;
458                                 list_for_each_entry_reverse(obj, &objects, st_link) {
459                                         vma = i915_vma_instance(obj, vm, NULL);
460                                         if (IS_ERR(vma))
461                                                 continue;
462
463                                         if (p->step < 0) {
464                                                 if (offset < hole_start + obj->base.size)
465                                                         break;
466                                                 offset -= obj->base.size;
467                                         }
468
469                                         err = i915_vma_pin(vma, 0, 0, offset | flags);
470                                         if (err) {
471                                                 pr_err("%s(%s) pin (backward) failed with err=%d on size=%lu pages (prime=%lu), offset=%llx\n",
472                                                        __func__, p->name, err, npages, prime, offset);
473                                                 goto err;
474                                         }
475
476                                         if (!drm_mm_node_allocated(&vma->node) ||
477                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
478                                                 pr_err("%s(%s) (backward) insert failed: vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
479                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
480                                                        offset);
481                                                 err = -EINVAL;
482                                                 goto err;
483                                         }
484
485                                         i915_vma_unpin(vma);
486
487                                         if (p->step > 0) {
488                                                 if (offset + obj->base.size > hole_end)
489                                                         break;
490                                                 offset += obj->base.size;
491                                         }
492                                 }
493
494                                 offset = p->offset;
495                                 list_for_each_entry_reverse(obj, &objects, st_link) {
496                                         vma = i915_vma_instance(obj, vm, NULL);
497                                         if (IS_ERR(vma))
498                                                 continue;
499
500                                         if (p->step < 0) {
501                                                 if (offset < hole_start + obj->base.size)
502                                                         break;
503                                                 offset -= obj->base.size;
504                                         }
505
506                                         if (!drm_mm_node_allocated(&vma->node) ||
507                                             i915_vma_misplaced(vma, 0, 0, offset | flags)) {
508                                                 pr_err("%s(%s) (backward) moved vma.node=%llx + %llx [allocated? %d], expected offset %llx\n",
509                                                        __func__, p->name, vma->node.start, vma->node.size, drm_mm_node_allocated(&vma->node),
510                                                        offset);
511                                                 err = -EINVAL;
512                                                 goto err;
513                                         }
514
515                                         err = i915_vma_unbind(vma);
516                                         if (err) {
517                                                 pr_err("%s(%s) (backward) unbind of vma.node=%llx + %llx failed with err=%d\n",
518                                                        __func__, p->name, vma->node.start, vma->node.size,
519                                                        err);
520                                                 goto err;
521                                         }
522
523                                         if (p->step > 0) {
524                                                 if (offset + obj->base.size > hole_end)
525                                                         break;
526                                                 offset += obj->base.size;
527                                         }
528                                 }
529                         }
530
531                         if (igt_timeout(end_time, "%s timed out (npages=%lu, prime=%lu)\n",
532                                         __func__, npages, prime)) {
533                                 err = -EINTR;
534                                 goto err;
535                         }
536                 }
537
538                 close_object_list(&objects, vm);
539                 cleanup_freed_objects(i915);
540         }
541
542         return 0;
543
544 err:
545         close_object_list(&objects, vm);
546         return err;
547 }
548
549 static int walk_hole(struct drm_i915_private *i915,
550                      struct i915_address_space *vm,
551                      u64 hole_start, u64 hole_end,
552                      unsigned long end_time)
553 {
554         const u64 hole_size = hole_end - hole_start;
555         const unsigned long max_pages =
556                 min_t(u64, ULONG_MAX - 1, hole_size >> PAGE_SHIFT);
557         unsigned long flags;
558         u64 size;
559
560         /* Try binding a single VMA in different positions within the hole */
561
562         flags = PIN_OFFSET_FIXED | PIN_USER;
563         if (i915_is_ggtt(vm))
564                 flags |= PIN_GLOBAL;
565
566         for_each_prime_number_from(size, 1, max_pages) {
567                 struct drm_i915_gem_object *obj;
568                 struct i915_vma *vma;
569                 u64 addr;
570                 int err = 0;
571
572                 obj = fake_dma_object(i915, size << PAGE_SHIFT);
573                 if (IS_ERR(obj))
574                         break;
575
576                 vma = i915_vma_instance(obj, vm, NULL);
577                 if (IS_ERR(vma)) {
578                         err = PTR_ERR(vma);
579                         goto err_put;
580                 }
581
582                 for (addr = hole_start;
583                      addr + obj->base.size < hole_end;
584                      addr += obj->base.size) {
585                         err = i915_vma_pin(vma, 0, 0, addr | flags);
586                         if (err) {
587                                 pr_err("%s bind failed at %llx + %llx [hole %llx- %llx] with err=%d\n",
588                                        __func__, addr, vma->size,
589                                        hole_start, hole_end, err);
590                                 goto err_close;
591                         }
592                         i915_vma_unpin(vma);
593
594                         if (!drm_mm_node_allocated(&vma->node) ||
595                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
596                                 pr_err("%s incorrect at %llx + %llx\n",
597                                        __func__, addr, vma->size);
598                                 err = -EINVAL;
599                                 goto err_close;
600                         }
601
602                         err = i915_vma_unbind(vma);
603                         if (err) {
604                                 pr_err("%s unbind failed at %llx + %llx  with err=%d\n",
605                                        __func__, addr, vma->size, err);
606                                 goto err_close;
607                         }
608
609                         GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
610
611                         if (igt_timeout(end_time,
612                                         "%s timed out at %llx\n",
613                                         __func__, addr)) {
614                                 err = -EINTR;
615                                 goto err_close;
616                         }
617                 }
618
619 err_close:
620                 if (!i915_vma_is_ggtt(vma))
621                         i915_vma_close(vma);
622 err_put:
623                 i915_gem_object_put(obj);
624                 if (err)
625                         return err;
626
627                 cleanup_freed_objects(i915);
628         }
629
630         return 0;
631 }
632
633 static int pot_hole(struct drm_i915_private *i915,
634                     struct i915_address_space *vm,
635                     u64 hole_start, u64 hole_end,
636                     unsigned long end_time)
637 {
638         struct drm_i915_gem_object *obj;
639         struct i915_vma *vma;
640         unsigned long flags;
641         unsigned int pot;
642         int err = 0;
643
644         flags = PIN_OFFSET_FIXED | PIN_USER;
645         if (i915_is_ggtt(vm))
646                 flags |= PIN_GLOBAL;
647
648         obj = i915_gem_object_create_internal(i915, 2 * I915_GTT_PAGE_SIZE);
649         if (IS_ERR(obj))
650                 return PTR_ERR(obj);
651
652         vma = i915_vma_instance(obj, vm, NULL);
653         if (IS_ERR(vma)) {
654                 err = PTR_ERR(vma);
655                 goto err_obj;
656         }
657
658         /* Insert a pair of pages across every pot boundary within the hole */
659         for (pot = fls64(hole_end - 1) - 1;
660              pot > ilog2(2 * I915_GTT_PAGE_SIZE);
661              pot--) {
662                 u64 step = BIT_ULL(pot);
663                 u64 addr;
664
665                 for (addr = round_up(hole_start + I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
666                      addr <= round_down(hole_end - 2*I915_GTT_PAGE_SIZE, step) - I915_GTT_PAGE_SIZE;
667                      addr += step) {
668                         err = i915_vma_pin(vma, 0, 0, addr | flags);
669                         if (err) {
670                                 pr_err("%s failed to pin object at %llx in hole [%llx - %llx], with err=%d\n",
671                                        __func__,
672                                        addr,
673                                        hole_start, hole_end,
674                                        err);
675                                 goto err;
676                         }
677
678                         if (!drm_mm_node_allocated(&vma->node) ||
679                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
680                                 pr_err("%s incorrect at %llx + %llx\n",
681                                        __func__, addr, vma->size);
682                                 i915_vma_unpin(vma);
683                                 err = i915_vma_unbind(vma);
684                                 err = -EINVAL;
685                                 goto err;
686                         }
687
688                         i915_vma_unpin(vma);
689                         err = i915_vma_unbind(vma);
690                         GEM_BUG_ON(err);
691                 }
692
693                 if (igt_timeout(end_time,
694                                 "%s timed out after %d/%d\n",
695                                 __func__, pot, fls64(hole_end - 1) - 1)) {
696                         err = -EINTR;
697                         goto err;
698                 }
699         }
700
701 err:
702         if (!i915_vma_is_ggtt(vma))
703                 i915_vma_close(vma);
704 err_obj:
705         i915_gem_object_put(obj);
706         return err;
707 }
708
709 static int drunk_hole(struct drm_i915_private *i915,
710                       struct i915_address_space *vm,
711                       u64 hole_start, u64 hole_end,
712                       unsigned long end_time)
713 {
714         I915_RND_STATE(prng);
715         unsigned int size;
716         unsigned long flags;
717
718         flags = PIN_OFFSET_FIXED | PIN_USER;
719         if (i915_is_ggtt(vm))
720                 flags |= PIN_GLOBAL;
721
722         /* Keep creating larger objects until one cannot fit into the hole */
723         for (size = 12; (hole_end - hole_start) >> size; size++) {
724                 struct drm_i915_gem_object *obj;
725                 unsigned int *order, count, n;
726                 struct i915_vma *vma;
727                 u64 hole_size;
728                 int err = -ENODEV;
729
730                 hole_size = (hole_end - hole_start) >> size;
731                 if (hole_size > KMALLOC_MAX_SIZE / sizeof(u32))
732                         hole_size = KMALLOC_MAX_SIZE / sizeof(u32);
733                 count = hole_size >> 1;
734                 if (!count) {
735                         pr_debug("%s: hole is too small [%llx - %llx] >> %d: %lld\n",
736                                  __func__, hole_start, hole_end, size, hole_size);
737                         break;
738                 }
739
740                 do {
741                         order = i915_random_order(count, &prng);
742                         if (order)
743                                 break;
744                 } while (count >>= 1);
745                 if (!count)
746                         return -ENOMEM;
747                 GEM_BUG_ON(!order);
748
749                 /* Ignore allocation failures (i.e. don't report them as
750                  * a test failure) as we are purposefully allocating very
751                  * large objects without checking that we have sufficient
752                  * memory. We expect to hit -ENOMEM.
753                  */
754
755                 obj = fake_dma_object(i915, BIT_ULL(size));
756                 if (IS_ERR(obj)) {
757                         kfree(order);
758                         break;
759                 }
760
761                 vma = i915_vma_instance(obj, vm, NULL);
762                 if (IS_ERR(vma)) {
763                         err = PTR_ERR(vma);
764                         goto err_obj;
765                 }
766
767                 GEM_BUG_ON(vma->size != BIT_ULL(size));
768
769                 for (n = 0; n < count; n++) {
770                         u64 addr = hole_start + order[n] * BIT_ULL(size);
771
772                         err = i915_vma_pin(vma, 0, 0, addr | flags);
773                         if (err) {
774                                 pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
775                                        __func__,
776                                        addr, BIT_ULL(size),
777                                        hole_start, hole_end,
778                                        err);
779                                 goto err;
780                         }
781
782                         if (!drm_mm_node_allocated(&vma->node) ||
783                             i915_vma_misplaced(vma, 0, 0, addr | flags)) {
784                                 pr_err("%s incorrect at %llx + %llx\n",
785                                        __func__, addr, BIT_ULL(size));
786                                 i915_vma_unpin(vma);
787                                 err = i915_vma_unbind(vma);
788                                 err = -EINVAL;
789                                 goto err;
790                         }
791
792                         i915_vma_unpin(vma);
793                         err = i915_vma_unbind(vma);
794                         GEM_BUG_ON(err);
795
796                         if (igt_timeout(end_time,
797                                         "%s timed out after %d/%d\n",
798                                         __func__, n, count)) {
799                                 err = -EINTR;
800                                 goto err;
801                         }
802                 }
803
804 err:
805                 if (!i915_vma_is_ggtt(vma))
806                         i915_vma_close(vma);
807 err_obj:
808                 i915_gem_object_put(obj);
809                 kfree(order);
810                 if (err)
811                         return err;
812
813                 cleanup_freed_objects(i915);
814         }
815
816         return 0;
817 }
818
819 static int __shrink_hole(struct drm_i915_private *i915,
820                          struct i915_address_space *vm,
821                          u64 hole_start, u64 hole_end,
822                          unsigned long end_time)
823 {
824         struct drm_i915_gem_object *obj;
825         unsigned long flags = PIN_OFFSET_FIXED | PIN_USER;
826         unsigned int order = 12;
827         LIST_HEAD(objects);
828         int err = 0;
829         u64 addr;
830
831         /* Keep creating larger objects until one cannot fit into the hole */
832         for (addr = hole_start; addr < hole_end; ) {
833                 struct i915_vma *vma;
834                 u64 size = BIT_ULL(order++);
835
836                 size = min(size, hole_end - addr);
837                 obj = fake_dma_object(i915, size);
838                 if (IS_ERR(obj)) {
839                         err = PTR_ERR(obj);
840                         break;
841                 }
842
843                 list_add(&obj->st_link, &objects);
844
845                 vma = i915_vma_instance(obj, vm, NULL);
846                 if (IS_ERR(vma)) {
847                         err = PTR_ERR(vma);
848                         break;
849                 }
850
851                 GEM_BUG_ON(vma->size != size);
852
853                 err = i915_vma_pin(vma, 0, 0, addr | flags);
854                 if (err) {
855                         pr_err("%s failed to pin object at %llx + %llx in hole [%llx - %llx], with err=%d\n",
856                                __func__, addr, size, hole_start, hole_end, err);
857                         break;
858                 }
859
860                 if (!drm_mm_node_allocated(&vma->node) ||
861                     i915_vma_misplaced(vma, 0, 0, addr | flags)) {
862                         pr_err("%s incorrect at %llx + %llx\n",
863                                __func__, addr, size);
864                         i915_vma_unpin(vma);
865                         err = i915_vma_unbind(vma);
866                         err = -EINVAL;
867                         break;
868                 }
869
870                 i915_vma_unpin(vma);
871                 addr += size;
872
873                 if (igt_timeout(end_time,
874                                 "%s timed out at ofset %llx [%llx - %llx]\n",
875                                 __func__, addr, hole_start, hole_end)) {
876                         err = -EINTR;
877                         break;
878                 }
879         }
880
881         close_object_list(&objects, vm);
882         cleanup_freed_objects(i915);
883         return err;
884 }
885
886 static int shrink_hole(struct drm_i915_private *i915,
887                        struct i915_address_space *vm,
888                        u64 hole_start, u64 hole_end,
889                        unsigned long end_time)
890 {
891         unsigned long prime;
892         int err;
893
894         vm->fault_attr.probability = 999;
895         atomic_set(&vm->fault_attr.times, -1);
896
897         for_each_prime_number_from(prime, 0, ULONG_MAX - 1) {
898                 vm->fault_attr.interval = prime;
899                 err = __shrink_hole(i915, vm, hole_start, hole_end, end_time);
900                 if (err)
901                         break;
902         }
903
904         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
905
906         return err;
907 }
908
909 static int shrink_boom(struct drm_i915_private *i915,
910                        struct i915_address_space *vm,
911                        u64 hole_start, u64 hole_end,
912                        unsigned long end_time)
913 {
914         unsigned int sizes[] = { SZ_2M, SZ_1G };
915         struct drm_i915_gem_object *purge;
916         struct drm_i915_gem_object *explode;
917         int err;
918         int i;
919
920         /*
921          * Catch the case which shrink_hole seems to miss. The setup here
922          * requires invoking the shrinker as we do the alloc_pt/alloc_pd, while
923          * ensuring that all vma assiocated with the respective pd/pdp are
924          * unpinned at the time.
925          */
926
927         for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
928                 unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
929                 unsigned int size = sizes[i];
930                 struct i915_vma *vma;
931
932                 purge = fake_dma_object(i915, size);
933                 if (IS_ERR(purge))
934                         return PTR_ERR(purge);
935
936                 vma = i915_vma_instance(purge, vm, NULL);
937                 if (IS_ERR(vma)) {
938                         err = PTR_ERR(vma);
939                         goto err_purge;
940                 }
941
942                 err = i915_vma_pin(vma, 0, 0, flags);
943                 if (err)
944                         goto err_purge;
945
946                 /* Should now be ripe for purging */
947                 i915_vma_unpin(vma);
948
949                 explode = fake_dma_object(i915, size);
950                 if (IS_ERR(explode)) {
951                         err = PTR_ERR(explode);
952                         goto err_purge;
953                 }
954
955                 vm->fault_attr.probability = 100;
956                 vm->fault_attr.interval = 1;
957                 atomic_set(&vm->fault_attr.times, -1);
958
959                 vma = i915_vma_instance(explode, vm, NULL);
960                 if (IS_ERR(vma)) {
961                         err = PTR_ERR(vma);
962                         goto err_explode;
963                 }
964
965                 err = i915_vma_pin(vma, 0, 0, flags | size);
966                 if (err)
967                         goto err_explode;
968
969                 i915_vma_unpin(vma);
970
971                 i915_gem_object_put(purge);
972                 i915_gem_object_put(explode);
973
974                 memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
975                 cleanup_freed_objects(i915);
976         }
977
978         return 0;
979
980 err_explode:
981         i915_gem_object_put(explode);
982 err_purge:
983         i915_gem_object_put(purge);
984         memset(&vm->fault_attr, 0, sizeof(vm->fault_attr));
985         return err;
986 }
987
988 static int exercise_ppgtt(struct drm_i915_private *dev_priv,
989                           int (*func)(struct drm_i915_private *i915,
990                                       struct i915_address_space *vm,
991                                       u64 hole_start, u64 hole_end,
992                                       unsigned long end_time))
993 {
994         struct drm_file *file;
995         struct i915_hw_ppgtt *ppgtt;
996         IGT_TIMEOUT(end_time);
997         int err;
998
999         if (!USES_FULL_PPGTT(dev_priv))
1000                 return 0;
1001
1002         file = mock_file(dev_priv);
1003         if (IS_ERR(file))
1004                 return PTR_ERR(file);
1005
1006         mutex_lock(&dev_priv->drm.struct_mutex);
1007         ppgtt = i915_ppgtt_create(dev_priv, file->driver_priv);
1008         if (IS_ERR(ppgtt)) {
1009                 err = PTR_ERR(ppgtt);
1010                 goto out_unlock;
1011         }
1012         GEM_BUG_ON(offset_in_page(ppgtt->vm.total));
1013         GEM_BUG_ON(ppgtt->vm.closed);
1014
1015         err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1016
1017         i915_ppgtt_close(&ppgtt->vm);
1018         i915_ppgtt_put(ppgtt);
1019 out_unlock:
1020         mutex_unlock(&dev_priv->drm.struct_mutex);
1021
1022         mock_file_free(dev_priv, file);
1023         return err;
1024 }
1025
1026 static int igt_ppgtt_fill(void *arg)
1027 {
1028         return exercise_ppgtt(arg, fill_hole);
1029 }
1030
1031 static int igt_ppgtt_walk(void *arg)
1032 {
1033         return exercise_ppgtt(arg, walk_hole);
1034 }
1035
1036 static int igt_ppgtt_pot(void *arg)
1037 {
1038         return exercise_ppgtt(arg, pot_hole);
1039 }
1040
1041 static int igt_ppgtt_drunk(void *arg)
1042 {
1043         return exercise_ppgtt(arg, drunk_hole);
1044 }
1045
1046 static int igt_ppgtt_lowlevel(void *arg)
1047 {
1048         return exercise_ppgtt(arg, lowlevel_hole);
1049 }
1050
1051 static int igt_ppgtt_shrink(void *arg)
1052 {
1053         return exercise_ppgtt(arg, shrink_hole);
1054 }
1055
1056 static int igt_ppgtt_shrink_boom(void *arg)
1057 {
1058         return exercise_ppgtt(arg, shrink_boom);
1059 }
1060
1061 static int sort_holes(void *priv, struct list_head *A, struct list_head *B)
1062 {
1063         struct drm_mm_node *a = list_entry(A, typeof(*a), hole_stack);
1064         struct drm_mm_node *b = list_entry(B, typeof(*b), hole_stack);
1065
1066         if (a->start < b->start)
1067                 return -1;
1068         else
1069                 return 1;
1070 }
1071
1072 static int exercise_ggtt(struct drm_i915_private *i915,
1073                          int (*func)(struct drm_i915_private *i915,
1074                                      struct i915_address_space *vm,
1075                                      u64 hole_start, u64 hole_end,
1076                                      unsigned long end_time))
1077 {
1078         struct i915_ggtt *ggtt = &i915->ggtt;
1079         u64 hole_start, hole_end, last = 0;
1080         struct drm_mm_node *node;
1081         IGT_TIMEOUT(end_time);
1082         int err = 0;
1083
1084         mutex_lock(&i915->drm.struct_mutex);
1085 restart:
1086         list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes);
1087         drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) {
1088                 if (hole_start < last)
1089                         continue;
1090
1091                 if (ggtt->vm.mm.color_adjust)
1092                         ggtt->vm.mm.color_adjust(node, 0,
1093                                                  &hole_start, &hole_end);
1094                 if (hole_start >= hole_end)
1095                         continue;
1096
1097                 err = func(i915, &ggtt->vm, hole_start, hole_end, end_time);
1098                 if (err)
1099                         break;
1100
1101                 /* As we have manipulated the drm_mm, the list may be corrupt */
1102                 last = hole_end;
1103                 goto restart;
1104         }
1105         mutex_unlock(&i915->drm.struct_mutex);
1106
1107         return err;
1108 }
1109
1110 static int igt_ggtt_fill(void *arg)
1111 {
1112         return exercise_ggtt(arg, fill_hole);
1113 }
1114
1115 static int igt_ggtt_walk(void *arg)
1116 {
1117         return exercise_ggtt(arg, walk_hole);
1118 }
1119
1120 static int igt_ggtt_pot(void *arg)
1121 {
1122         return exercise_ggtt(arg, pot_hole);
1123 }
1124
1125 static int igt_ggtt_drunk(void *arg)
1126 {
1127         return exercise_ggtt(arg, drunk_hole);
1128 }
1129
1130 static int igt_ggtt_lowlevel(void *arg)
1131 {
1132         return exercise_ggtt(arg, lowlevel_hole);
1133 }
1134
1135 static int igt_ggtt_page(void *arg)
1136 {
1137         const unsigned int count = PAGE_SIZE/sizeof(u32);
1138         I915_RND_STATE(prng);
1139         struct drm_i915_private *i915 = arg;
1140         struct i915_ggtt *ggtt = &i915->ggtt;
1141         struct drm_i915_gem_object *obj;
1142         struct drm_mm_node tmp;
1143         unsigned int *order, n;
1144         int err;
1145
1146         mutex_lock(&i915->drm.struct_mutex);
1147
1148         obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
1149         if (IS_ERR(obj)) {
1150                 err = PTR_ERR(obj);
1151                 goto out_unlock;
1152         }
1153
1154         err = i915_gem_object_pin_pages(obj);
1155         if (err)
1156                 goto out_free;
1157
1158         memset(&tmp, 0, sizeof(tmp));
1159         err = drm_mm_insert_node_in_range(&ggtt->vm.mm, &tmp,
1160                                           count * PAGE_SIZE, 0,
1161                                           I915_COLOR_UNEVICTABLE,
1162                                           0, ggtt->mappable_end,
1163                                           DRM_MM_INSERT_LOW);
1164         if (err)
1165                 goto out_unpin;
1166
1167         intel_runtime_pm_get(i915);
1168
1169         for (n = 0; n < count; n++) {
1170                 u64 offset = tmp.start + n * PAGE_SIZE;
1171
1172                 ggtt->vm.insert_page(&ggtt->vm,
1173                                      i915_gem_object_get_dma_address(obj, 0),
1174                                      offset, I915_CACHE_NONE, 0);
1175         }
1176
1177         order = i915_random_order(count, &prng);
1178         if (!order) {
1179                 err = -ENOMEM;
1180                 goto out_remove;
1181         }
1182
1183         for (n = 0; n < count; n++) {
1184                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1185                 u32 __iomem *vaddr;
1186
1187                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1188                 iowrite32(n, vaddr + n);
1189                 io_mapping_unmap_atomic(vaddr);
1190         }
1191         i915_gem_flush_ggtt_writes(i915);
1192
1193         i915_random_reorder(order, count, &prng);
1194         for (n = 0; n < count; n++) {
1195                 u64 offset = tmp.start + order[n] * PAGE_SIZE;
1196                 u32 __iomem *vaddr;
1197                 u32 val;
1198
1199                 vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
1200                 val = ioread32(vaddr + n);
1201                 io_mapping_unmap_atomic(vaddr);
1202
1203                 if (val != n) {
1204                         pr_err("insert page failed: found %d, expected %d\n",
1205                                val, n);
1206                         err = -EINVAL;
1207                         break;
1208                 }
1209         }
1210
1211         kfree(order);
1212 out_remove:
1213         ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size);
1214         intel_runtime_pm_put(i915);
1215         drm_mm_remove_node(&tmp);
1216 out_unpin:
1217         i915_gem_object_unpin_pages(obj);
1218 out_free:
1219         i915_gem_object_put(obj);
1220 out_unlock:
1221         mutex_unlock(&i915->drm.struct_mutex);
1222         return err;
1223 }
1224
1225 static void track_vma_bind(struct i915_vma *vma)
1226 {
1227         struct drm_i915_gem_object *obj = vma->obj;
1228
1229         obj->bind_count++; /* track for eviction later */
1230         __i915_gem_object_pin_pages(obj);
1231
1232         vma->pages = obj->mm.pages;
1233         list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1234 }
1235
1236 static int exercise_mock(struct drm_i915_private *i915,
1237                          int (*func)(struct drm_i915_private *i915,
1238                                      struct i915_address_space *vm,
1239                                      u64 hole_start, u64 hole_end,
1240                                      unsigned long end_time))
1241 {
1242         struct i915_gem_context *ctx;
1243         struct i915_hw_ppgtt *ppgtt;
1244         IGT_TIMEOUT(end_time);
1245         int err;
1246
1247         ctx = mock_context(i915, "mock");
1248         if (!ctx)
1249                 return -ENOMEM;
1250
1251         ppgtt = ctx->ppgtt;
1252         GEM_BUG_ON(!ppgtt);
1253
1254         err = func(i915, &ppgtt->vm, 0, ppgtt->vm.total, end_time);
1255
1256         mock_context_close(ctx);
1257         return err;
1258 }
1259
1260 static int igt_mock_fill(void *arg)
1261 {
1262         return exercise_mock(arg, fill_hole);
1263 }
1264
1265 static int igt_mock_walk(void *arg)
1266 {
1267         return exercise_mock(arg, walk_hole);
1268 }
1269
1270 static int igt_mock_pot(void *arg)
1271 {
1272         return exercise_mock(arg, pot_hole);
1273 }
1274
1275 static int igt_mock_drunk(void *arg)
1276 {
1277         return exercise_mock(arg, drunk_hole);
1278 }
1279
1280 static int igt_gtt_reserve(void *arg)
1281 {
1282         struct drm_i915_private *i915 = arg;
1283         struct drm_i915_gem_object *obj, *on;
1284         LIST_HEAD(objects);
1285         u64 total;
1286         int err = -ENODEV;
1287
1288         /* i915_gem_gtt_reserve() tries to reserve the precise range
1289          * for the node, and evicts if it has to. So our test checks that
1290          * it can give us the requsted space and prevent overlaps.
1291          */
1292
1293         /* Start by filling the GGTT */
1294         for (total = 0;
1295              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1296              total += 2*I915_GTT_PAGE_SIZE) {
1297                 struct i915_vma *vma;
1298
1299                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1300                 if (IS_ERR(obj)) {
1301                         err = PTR_ERR(obj);
1302                         goto out;
1303                 }
1304
1305                 err = i915_gem_object_pin_pages(obj);
1306                 if (err) {
1307                         i915_gem_object_put(obj);
1308                         goto out;
1309                 }
1310
1311                 list_add(&obj->st_link, &objects);
1312
1313                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1314                 if (IS_ERR(vma)) {
1315                         err = PTR_ERR(vma);
1316                         goto out;
1317                 }
1318
1319                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1320                                            obj->base.size,
1321                                            total,
1322                                            obj->cache_level,
1323                                            0);
1324                 if (err) {
1325                         pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n",
1326                                total, i915->ggtt.vm.total, err);
1327                         goto out;
1328                 }
1329                 track_vma_bind(vma);
1330
1331                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1332                 if (vma->node.start != total ||
1333                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1334                         pr_err("i915_gem_gtt_reserve (pass 1) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1335                                vma->node.start, vma->node.size,
1336                                total, 2*I915_GTT_PAGE_SIZE);
1337                         err = -EINVAL;
1338                         goto out;
1339                 }
1340         }
1341
1342         /* Now we start forcing evictions */
1343         for (total = I915_GTT_PAGE_SIZE;
1344              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1345              total += 2*I915_GTT_PAGE_SIZE) {
1346                 struct i915_vma *vma;
1347
1348                 obj = i915_gem_object_create_internal(i915, 2*PAGE_SIZE);
1349                 if (IS_ERR(obj)) {
1350                         err = PTR_ERR(obj);
1351                         goto out;
1352                 }
1353
1354                 err = i915_gem_object_pin_pages(obj);
1355                 if (err) {
1356                         i915_gem_object_put(obj);
1357                         goto out;
1358                 }
1359
1360                 list_add(&obj->st_link, &objects);
1361
1362                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1363                 if (IS_ERR(vma)) {
1364                         err = PTR_ERR(vma);
1365                         goto out;
1366                 }
1367
1368                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1369                                            obj->base.size,
1370                                            total,
1371                                            obj->cache_level,
1372                                            0);
1373                 if (err) {
1374                         pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n",
1375                                total, i915->ggtt.vm.total, err);
1376                         goto out;
1377                 }
1378                 track_vma_bind(vma);
1379
1380                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1381                 if (vma->node.start != total ||
1382                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1383                         pr_err("i915_gem_gtt_reserve (pass 2) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1384                                vma->node.start, vma->node.size,
1385                                total, 2*I915_GTT_PAGE_SIZE);
1386                         err = -EINVAL;
1387                         goto out;
1388                 }
1389         }
1390
1391         /* And then try at random */
1392         list_for_each_entry_safe(obj, on, &objects, st_link) {
1393                 struct i915_vma *vma;
1394                 u64 offset;
1395
1396                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1397                 if (IS_ERR(vma)) {
1398                         err = PTR_ERR(vma);
1399                         goto out;
1400                 }
1401
1402                 err = i915_vma_unbind(vma);
1403                 if (err) {
1404                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1405                         goto out;
1406                 }
1407
1408                 offset = random_offset(0, i915->ggtt.vm.total,
1409                                        2*I915_GTT_PAGE_SIZE,
1410                                        I915_GTT_MIN_ALIGNMENT);
1411
1412                 err = i915_gem_gtt_reserve(&i915->ggtt.vm, &vma->node,
1413                                            obj->base.size,
1414                                            offset,
1415                                            obj->cache_level,
1416                                            0);
1417                 if (err) {
1418                         pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n",
1419                                total, i915->ggtt.vm.total, err);
1420                         goto out;
1421                 }
1422                 track_vma_bind(vma);
1423
1424                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1425                 if (vma->node.start != offset ||
1426                     vma->node.size != 2*I915_GTT_PAGE_SIZE) {
1427                         pr_err("i915_gem_gtt_reserve (pass 3) placement failed, found (%llx + %llx), expected (%llx + %lx)\n",
1428                                vma->node.start, vma->node.size,
1429                                offset, 2*I915_GTT_PAGE_SIZE);
1430                         err = -EINVAL;
1431                         goto out;
1432                 }
1433         }
1434
1435 out:
1436         list_for_each_entry_safe(obj, on, &objects, st_link) {
1437                 i915_gem_object_unpin_pages(obj);
1438                 i915_gem_object_put(obj);
1439         }
1440         return err;
1441 }
1442
1443 static int igt_gtt_insert(void *arg)
1444 {
1445         struct drm_i915_private *i915 = arg;
1446         struct drm_i915_gem_object *obj, *on;
1447         struct drm_mm_node tmp = {};
1448         const struct invalid_insert {
1449                 u64 size;
1450                 u64 alignment;
1451                 u64 start, end;
1452         } invalid_insert[] = {
1453                 {
1454                         i915->ggtt.vm.total + I915_GTT_PAGE_SIZE, 0,
1455                         0, i915->ggtt.vm.total,
1456                 },
1457                 {
1458                         2*I915_GTT_PAGE_SIZE, 0,
1459                         0, I915_GTT_PAGE_SIZE,
1460                 },
1461                 {
1462                         -(u64)I915_GTT_PAGE_SIZE, 0,
1463                         0, 4*I915_GTT_PAGE_SIZE,
1464                 },
1465                 {
1466                         -(u64)2*I915_GTT_PAGE_SIZE, 2*I915_GTT_PAGE_SIZE,
1467                         0, 4*I915_GTT_PAGE_SIZE,
1468                 },
1469                 {
1470                         I915_GTT_PAGE_SIZE, I915_GTT_MIN_ALIGNMENT << 1,
1471                         I915_GTT_MIN_ALIGNMENT, I915_GTT_MIN_ALIGNMENT << 1,
1472                 },
1473                 {}
1474         }, *ii;
1475         LIST_HEAD(objects);
1476         u64 total;
1477         int err = -ENODEV;
1478
1479         /* i915_gem_gtt_insert() tries to allocate some free space in the GTT
1480          * to the node, evicting if required.
1481          */
1482
1483         /* Check a couple of obviously invalid requests */
1484         for (ii = invalid_insert; ii->size; ii++) {
1485                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &tmp,
1486                                           ii->size, ii->alignment,
1487                                           I915_COLOR_UNEVICTABLE,
1488                                           ii->start, ii->end,
1489                                           0);
1490                 if (err != -ENOSPC) {
1491                         pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n",
1492                                ii->size, ii->alignment, ii->start, ii->end,
1493                                err);
1494                         return -EINVAL;
1495                 }
1496         }
1497
1498         /* Start by filling the GGTT */
1499         for (total = 0;
1500              total + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1501              total += I915_GTT_PAGE_SIZE) {
1502                 struct i915_vma *vma;
1503
1504                 obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
1505                 if (IS_ERR(obj)) {
1506                         err = PTR_ERR(obj);
1507                         goto out;
1508                 }
1509
1510                 err = i915_gem_object_pin_pages(obj);
1511                 if (err) {
1512                         i915_gem_object_put(obj);
1513                         goto out;
1514                 }
1515
1516                 list_add(&obj->st_link, &objects);
1517
1518                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1519                 if (IS_ERR(vma)) {
1520                         err = PTR_ERR(vma);
1521                         goto out;
1522                 }
1523
1524                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1525                                           obj->base.size, 0, obj->cache_level,
1526                                           0, i915->ggtt.vm.total,
1527                                           0);
1528                 if (err == -ENOSPC) {
1529                         /* maxed out the GGTT space */
1530                         i915_gem_object_put(obj);
1531                         break;
1532                 }
1533                 if (err) {
1534                         pr_err("i915_gem_gtt_insert (pass 1) failed at %llu/%llu with err=%d\n",
1535                                total, i915->ggtt.vm.total, err);
1536                         goto out;
1537                 }
1538                 track_vma_bind(vma);
1539                 __i915_vma_pin(vma);
1540
1541                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1542         }
1543
1544         list_for_each_entry(obj, &objects, st_link) {
1545                 struct i915_vma *vma;
1546
1547                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1548                 if (IS_ERR(vma)) {
1549                         err = PTR_ERR(vma);
1550                         goto out;
1551                 }
1552
1553                 if (!drm_mm_node_allocated(&vma->node)) {
1554                         pr_err("VMA was unexpectedly evicted!\n");
1555                         err = -EINVAL;
1556                         goto out;
1557                 }
1558
1559                 __i915_vma_unpin(vma);
1560         }
1561
1562         /* If we then reinsert, we should find the same hole */
1563         list_for_each_entry_safe(obj, on, &objects, st_link) {
1564                 struct i915_vma *vma;
1565                 u64 offset;
1566
1567                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1568                 if (IS_ERR(vma)) {
1569                         err = PTR_ERR(vma);
1570                         goto out;
1571                 }
1572
1573                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1574                 offset = vma->node.start;
1575
1576                 err = i915_vma_unbind(vma);
1577                 if (err) {
1578                         pr_err("i915_vma_unbind failed with err=%d!\n", err);
1579                         goto out;
1580                 }
1581
1582                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1583                                           obj->base.size, 0, obj->cache_level,
1584                                           0, i915->ggtt.vm.total,
1585                                           0);
1586                 if (err) {
1587                         pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n",
1588                                total, i915->ggtt.vm.total, err);
1589                         goto out;
1590                 }
1591                 track_vma_bind(vma);
1592
1593                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1594                 if (vma->node.start != offset) {
1595                         pr_err("i915_gem_gtt_insert did not return node to its previous location (the only hole), expected address %llx, found %llx\n",
1596                                offset, vma->node.start);
1597                         err = -EINVAL;
1598                         goto out;
1599                 }
1600         }
1601
1602         /* And then force evictions */
1603         for (total = 0;
1604              total + 2*I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
1605              total += 2*I915_GTT_PAGE_SIZE) {
1606                 struct i915_vma *vma;
1607
1608                 obj = i915_gem_object_create_internal(i915, 2*I915_GTT_PAGE_SIZE);
1609                 if (IS_ERR(obj)) {
1610                         err = PTR_ERR(obj);
1611                         goto out;
1612                 }
1613
1614                 err = i915_gem_object_pin_pages(obj);
1615                 if (err) {
1616                         i915_gem_object_put(obj);
1617                         goto out;
1618                 }
1619
1620                 list_add(&obj->st_link, &objects);
1621
1622                 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1623                 if (IS_ERR(vma)) {
1624                         err = PTR_ERR(vma);
1625                         goto out;
1626                 }
1627
1628                 err = i915_gem_gtt_insert(&i915->ggtt.vm, &vma->node,
1629                                           obj->base.size, 0, obj->cache_level,
1630                                           0, i915->ggtt.vm.total,
1631                                           0);
1632                 if (err) {
1633                         pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n",
1634                                total, i915->ggtt.vm.total, err);
1635                         goto out;
1636                 }
1637                 track_vma_bind(vma);
1638
1639                 GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
1640         }
1641
1642 out:
1643         list_for_each_entry_safe(obj, on, &objects, st_link) {
1644                 i915_gem_object_unpin_pages(obj);
1645                 i915_gem_object_put(obj);
1646         }
1647         return err;
1648 }
1649
1650 int i915_gem_gtt_mock_selftests(void)
1651 {
1652         static const struct i915_subtest tests[] = {
1653                 SUBTEST(igt_mock_drunk),
1654                 SUBTEST(igt_mock_walk),
1655                 SUBTEST(igt_mock_pot),
1656                 SUBTEST(igt_mock_fill),
1657                 SUBTEST(igt_gtt_reserve),
1658                 SUBTEST(igt_gtt_insert),
1659         };
1660         struct drm_i915_private *i915;
1661         int err;
1662
1663         i915 = mock_gem_device();
1664         if (!i915)
1665                 return -ENOMEM;
1666
1667         mutex_lock(&i915->drm.struct_mutex);
1668         err = i915_subtests(tests, i915);
1669         mutex_unlock(&i915->drm.struct_mutex);
1670
1671         drm_dev_put(&i915->drm);
1672         return err;
1673 }
1674
1675 int i915_gem_gtt_live_selftests(struct drm_i915_private *i915)
1676 {
1677         static const struct i915_subtest tests[] = {
1678                 SUBTEST(igt_ppgtt_alloc),
1679                 SUBTEST(igt_ppgtt_lowlevel),
1680                 SUBTEST(igt_ppgtt_drunk),
1681                 SUBTEST(igt_ppgtt_walk),
1682                 SUBTEST(igt_ppgtt_pot),
1683                 SUBTEST(igt_ppgtt_fill),
1684                 SUBTEST(igt_ppgtt_shrink),
1685                 SUBTEST(igt_ppgtt_shrink_boom),
1686                 SUBTEST(igt_ggtt_lowlevel),
1687                 SUBTEST(igt_ggtt_drunk),
1688                 SUBTEST(igt_ggtt_walk),
1689                 SUBTEST(igt_ggtt_pot),
1690                 SUBTEST(igt_ggtt_fill),
1691                 SUBTEST(igt_ggtt_page),
1692         };
1693
1694         GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total));
1695
1696         return i915_subtests(tests, i915);
1697 }