OSDN Git Service

drm/i915: Hold reference to intel_frontbuffer as we track activity
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / i915 / gem / i915_gem_clflush.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2016 Intel Corporation
5  */
6
7 #include "display/intel_frontbuffer.h"
8
9 #include "i915_drv.h"
10 #include "i915_gem_clflush.h"
11 #include "i915_sw_fence_work.h"
12 #include "i915_trace.h"
13
14 struct clflush {
15         struct dma_fence_work base;
16         struct drm_i915_gem_object *obj;
17 };
18
19 static void __do_clflush(struct drm_i915_gem_object *obj)
20 {
21         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
22         drm_clflush_sg(obj->mm.pages);
23
24         i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
25 }
26
27 static int clflush_work(struct dma_fence_work *base)
28 {
29         struct clflush *clflush = container_of(base, typeof(*clflush), base);
30         struct drm_i915_gem_object *obj = fetch_and_zero(&clflush->obj);
31         int err;
32
33         err = i915_gem_object_pin_pages(obj);
34         if (err)
35                 goto put;
36
37         __do_clflush(obj);
38         i915_gem_object_unpin_pages(obj);
39
40 put:
41         i915_gem_object_put(obj);
42         return err;
43 }
44
45 static void clflush_release(struct dma_fence_work *base)
46 {
47         struct clflush *clflush = container_of(base, typeof(*clflush), base);
48
49         if (clflush->obj)
50                 i915_gem_object_put(clflush->obj);
51 }
52
53 static const struct dma_fence_work_ops clflush_ops = {
54         .name = "clflush",
55         .work = clflush_work,
56         .release = clflush_release,
57 };
58
59 static struct clflush *clflush_work_create(struct drm_i915_gem_object *obj)
60 {
61         struct clflush *clflush;
62
63         GEM_BUG_ON(!obj->cache_dirty);
64
65         clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
66         if (!clflush)
67                 return NULL;
68
69         dma_fence_work_init(&clflush->base, &clflush_ops);
70         clflush->obj = i915_gem_object_get(obj); /* obj <-> clflush cycle */
71
72         return clflush;
73 }
74
75 bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
76                              unsigned int flags)
77 {
78         struct clflush *clflush;
79
80         assert_object_held(obj);
81
82         /*
83          * Stolen memory is always coherent with the GPU as it is explicitly
84          * marked as wc by the system, or the system is cache-coherent.
85          * Similarly, we only access struct pages through the CPU cache, so
86          * anything not backed by physical memory we consider to be always
87          * coherent and not need clflushing.
88          */
89         if (!i915_gem_object_has_struct_page(obj)) {
90                 obj->cache_dirty = false;
91                 return false;
92         }
93
94         /* If the GPU is snooping the contents of the CPU cache,
95          * we do not need to manually clear the CPU cache lines.  However,
96          * the caches are only snooped when the render cache is
97          * flushed/invalidated.  As we always have to emit invalidations
98          * and flushes when moving into and out of the RENDER domain, correct
99          * snooping behaviour occurs naturally as the result of our domain
100          * tracking.
101          */
102         if (!(flags & I915_CLFLUSH_FORCE) &&
103             obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
104                 return false;
105
106         trace_i915_gem_object_clflush(obj);
107
108         clflush = NULL;
109         if (!(flags & I915_CLFLUSH_SYNC))
110                 clflush = clflush_work_create(obj);
111         if (clflush) {
112                 i915_sw_fence_await_reservation(&clflush->base.chain,
113                                                 obj->base.resv, NULL, true,
114                                                 I915_FENCE_TIMEOUT,
115                                                 I915_FENCE_GFP);
116                 dma_resv_add_excl_fence(obj->base.resv, &clflush->base.dma);
117                 dma_fence_work_commit(&clflush->base);
118         } else if (obj->mm.pages) {
119                 __do_clflush(obj);
120         } else {
121                 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
122         }
123
124         obj->cache_dirty = false;
125         return true;
126 }