2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
23 * Please try to maintain the following order within this file unless it makes
24 * sense to do otherwise. From top to bottom:
26 * 2. #defines, and macros
27 * 3. structure definitions
28 * 4. function prototypes
30 * Within each section, please try to order by generation in ascending order,
31 * from top to bottom (ie. gen6 on the top, gen8 on the bottom).
34 #ifndef __I915_GEM_GTT_H__
35 #define __I915_GEM_GTT_H__
37 #include <linux/io-mapping.h>
40 #include "i915_gem_timeline.h"
41 #include "i915_gem_request.h"
43 #define I915_GTT_PAGE_SIZE 4096UL
44 #define I915_GTT_MIN_ALIGNMENT I915_GTT_PAGE_SIZE
46 #define I915_FENCE_REG_NONE -1
47 #define I915_MAX_NUM_FENCES 32
48 /* 32 fences + sign bit for FENCE_REG_NONE */
49 #define I915_MAX_NUM_FENCE_BITS 6
51 struct drm_i915_file_private;
52 struct drm_i915_fence_reg;
54 typedef uint32_t gen6_pte_t;
55 typedef uint64_t gen8_pte_t;
56 typedef uint64_t gen8_pde_t;
57 typedef uint64_t gen8_ppgtt_pdpe_t;
58 typedef uint64_t gen8_ppgtt_pml4e_t;
60 #define ggtt_total_entries(ggtt) ((ggtt)->base.total >> PAGE_SHIFT)
62 /* gen6-hsw has bit 11-4 for physical addr bit 39-32 */
63 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
64 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
65 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
66 #define GEN6_PTE_CACHE_LLC (2 << 1)
67 #define GEN6_PTE_UNCACHED (1 << 1)
68 #define GEN6_PTE_VALID (1 << 0)
70 #define I915_PTES(pte_len) (PAGE_SIZE / (pte_len))
71 #define I915_PTE_MASK(pte_len) (I915_PTES(pte_len) - 1)
73 #define I915_PDE_MASK (I915_PDES - 1)
74 #define NUM_PTE(pde_shift) (1 << (pde_shift - PAGE_SHIFT))
76 #define GEN6_PTES I915_PTES(sizeof(gen6_pte_t))
77 #define GEN6_PD_SIZE (I915_PDES * PAGE_SIZE)
78 #define GEN6_PD_ALIGN (PAGE_SIZE * 16)
79 #define GEN6_PDE_SHIFT 22
80 #define GEN6_PDE_VALID (1 << 0)
82 #define GEN7_PTE_CACHE_L3_LLC (3 << 1)
84 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
85 #define BYT_PTE_WRITEABLE (1 << 1)
87 /* Cacheability Control is a 4-bit value. The low three bits are stored in bits
88 * 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
90 #define HSW_CACHEABILITY_CONTROL(bits) ((((bits) & 0x7) << 1) | \
91 (((bits) & 0x8) << (11 - 3)))
92 #define HSW_WB_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x2)
93 #define HSW_WB_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x3)
94 #define HSW_WB_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x8)
95 #define HSW_WB_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0xb)
96 #define HSW_WT_ELLC_LLC_AGE3 HSW_CACHEABILITY_CONTROL(0x7)
97 #define HSW_WT_ELLC_LLC_AGE0 HSW_CACHEABILITY_CONTROL(0x6)
98 #define HSW_PTE_UNCACHED (0)
99 #define HSW_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0x7f0))
100 #define HSW_PTE_ADDR_ENCODE(addr) HSW_GTT_ADDR_ENCODE(addr)
102 /* GEN8 legacy style address is defined as a 3 level page table:
103 * 31:30 | 29:21 | 20:12 | 11:0
104 * PDPE | PDE | PTE | offset
105 * The difference as compared to normal x86 3 level page table is the PDPEs are
106 * programmed via register.
108 * GEN8 48b legacy style address is defined as a 4 level page table:
109 * 47:39 | 38:30 | 29:21 | 20:12 | 11:0
110 * PML4E | PDPE | PDE | PTE | offset
112 #define GEN8_PML4ES_PER_PML4 512
113 #define GEN8_PML4E_SHIFT 39
114 #define GEN8_PML4E_MASK (GEN8_PML4ES_PER_PML4 - 1)
115 #define GEN8_PDPE_SHIFT 30
116 /* NB: GEN8_PDPE_MASK is untrue for 32b platforms, but it has no impact on 32b page
118 #define GEN8_PDPE_MASK 0x1ff
119 #define GEN8_PDE_SHIFT 21
120 #define GEN8_PDE_MASK 0x1ff
121 #define GEN8_PTE_SHIFT 12
122 #define GEN8_PTE_MASK 0x1ff
123 #define GEN8_LEGACY_PDPES 4
124 #define GEN8_PTES I915_PTES(sizeof(gen8_pte_t))
126 #define I915_PDPES_PER_PDP(dev_priv) (USES_FULL_48BIT_PPGTT(dev_priv) ?\
127 GEN8_PML4ES_PER_PML4 : GEN8_LEGACY_PDPES)
129 #define PPAT_UNCACHED_INDEX (_PAGE_PWT | _PAGE_PCD)
130 #define PPAT_CACHED_PDE_INDEX 0 /* WB LLC */
131 #define PPAT_CACHED_INDEX _PAGE_PAT /* WB LLCeLLC */
132 #define PPAT_DISPLAY_ELLC_INDEX _PAGE_PCD /* WT eLLC */
134 #define CHV_PPAT_SNOOP (1<<6)
135 #define GEN8_PPAT_AGE(x) (x<<4)
136 #define GEN8_PPAT_LLCeLLC (3<<2)
137 #define GEN8_PPAT_LLCELLC (2<<2)
138 #define GEN8_PPAT_LLC (1<<2)
139 #define GEN8_PPAT_WB (3<<0)
140 #define GEN8_PPAT_WT (2<<0)
141 #define GEN8_PPAT_WC (1<<0)
142 #define GEN8_PPAT_UC (0<<0)
143 #define GEN8_PPAT_ELLC_OVERRIDE (0<<2)
144 #define GEN8_PPAT(i, x) ((uint64_t) (x) << ((i) * 8))
148 enum i915_ggtt_view_type {
149 I915_GGTT_VIEW_NORMAL = 0,
150 I915_GGTT_VIEW_ROTATED,
151 I915_GGTT_VIEW_PARTIAL,
154 struct intel_rotation_info {
157 unsigned int width, height, stride, offset;
161 struct i915_ggtt_view {
162 enum i915_ggtt_view_type type;
169 struct intel_rotation_info rotated;
173 extern const struct i915_ggtt_view i915_ggtt_view_normal;
174 extern const struct i915_ggtt_view i915_ggtt_view_rotated;
176 enum i915_cache_level;
180 struct i915_page_dma {
185 /* For gen6/gen7 only. This is the offset in the GGTT
186 * where the page directory entries for PPGTT begin
188 uint32_t ggtt_offset;
192 #define px_base(px) (&(px)->base)
193 #define px_page(px) (px_base(px)->page)
194 #define px_dma(px) (px_base(px)->daddr)
196 struct i915_page_table {
197 struct i915_page_dma base;
199 unsigned long *used_ptes;
202 struct i915_page_directory {
203 struct i915_page_dma base;
205 unsigned long *used_pdes;
206 struct i915_page_table *page_table[I915_PDES]; /* PDEs */
209 struct i915_page_directory_pointer {
210 struct i915_page_dma base;
212 unsigned long *used_pdpes;
213 struct i915_page_directory **page_directory;
217 struct i915_page_dma base;
219 DECLARE_BITMAP(used_pml4es, GEN8_PML4ES_PER_PML4);
220 struct i915_page_directory_pointer *pdps[GEN8_PML4ES_PER_PML4];
223 struct i915_address_space {
225 struct i915_gem_timeline timeline;
226 struct drm_i915_private *i915;
227 /* Every address space belongs to a struct file - except for the global
228 * GTT that is owned by the driver (and so @file is set to NULL). In
229 * principle, no information should leak from one context to another
230 * (or between files/processes etc) unless explicitly shared by the
231 * owner. Tracking the owner is important in order to free up per-file
232 * objects along with the file, to aide resource tracking, and to
235 struct drm_i915_file_private *file;
236 struct list_head global_link;
237 u64 start; /* Start offset always 0 for dri2 */
238 u64 total; /* size addr space maps (ex. 2GB for ggtt) */
242 struct i915_page_dma scratch_page;
243 struct i915_page_table *scratch_pt;
244 struct i915_page_directory *scratch_pd;
245 struct i915_page_directory_pointer *scratch_pdp; /* GEN8+ & 48b PPGTT */
248 * List of objects currently involved in rendering.
250 * Includes buffers having the contents of their GPU caches
251 * flushed, not necessarily primitives. last_read_req
252 * represents when the rendering involved will be completed.
254 * A reference is held on the buffer while on this list.
256 struct list_head active_list;
259 * LRU list of objects which are not in the ringbuffer and
260 * are ready to unbind, but are still in the GTT.
262 * last_read_req is NULL while an object is in this list.
264 * A reference is not held on the buffer while on this list,
265 * as merely being GTT-bound shouldn't prevent its being
266 * freed, and we'll pull it off the list in the free path.
268 struct list_head inactive_list;
271 * List of vma that have been unbound.
273 * A reference is not held on the buffer while on this list.
275 struct list_head unbound_list;
277 /* FIXME: Need a more generic return type */
278 gen6_pte_t (*pte_encode)(dma_addr_t addr,
279 enum i915_cache_level level,
280 u32 flags); /* Create a valid PTE */
281 /* flags for pte_encode */
282 #define PTE_READ_ONLY (1<<0)
283 int (*allocate_va_range)(struct i915_address_space *vm,
286 void (*clear_range)(struct i915_address_space *vm,
289 void (*insert_page)(struct i915_address_space *vm,
292 enum i915_cache_level cache_level,
294 void (*insert_entries)(struct i915_address_space *vm,
297 enum i915_cache_level cache_level, u32 flags);
298 void (*cleanup)(struct i915_address_space *vm);
299 /** Unmap an object from an address space. This usually consists of
300 * setting the valid PTE entries to a reserved scratch page. */
301 void (*unbind_vma)(struct i915_vma *vma);
302 /* Map an object into an address space with the given cache flags. */
303 int (*bind_vma)(struct i915_vma *vma,
304 enum i915_cache_level cache_level,
308 #define i915_is_ggtt(V) (!(V)->file)
310 /* The Graphics Translation Table is the way in which GEN hardware translates a
311 * Graphics Virtual Address into a Physical Address. In addition to the normal
312 * collateral associated with any va->pa translations GEN hardware also has a
313 * portion of the GTT which can be mapped by the CPU and remain both coherent
314 * and correct (in cases like swizzling). That region is referred to as GMADR in
318 struct i915_address_space base;
319 struct io_mapping mappable; /* Mapping to our CPU mappable region */
321 phys_addr_t mappable_base; /* PA of our GMADR */
322 u64 mappable_end; /* End offset that we can CPU map */
324 /* Stolen memory is segmented in hardware with different portions
325 * offlimits to certain functions.
327 * The drm_mm is initialised to the total accessible range, as found
328 * from the PCI config. On Broadwell+, this is further restricted to
329 * avoid the first page! The upper end of stolen memory is reserved for
330 * hardware functions and similarly removed from the accessible range.
332 u32 stolen_size; /* Total size of stolen memory */
333 u32 stolen_usable_size; /* Total size minus reserved ranges */
334 u32 stolen_reserved_base;
335 u32 stolen_reserved_size;
337 /** "Graphics Stolen Memory" holds the global PTEs */
339 void (*invalidate)(struct drm_i915_private *dev_priv);
345 struct drm_mm_node error_capture;
348 struct i915_hw_ppgtt {
349 struct i915_address_space base;
351 struct drm_mm_node node;
352 unsigned long pd_dirty_rings;
354 struct i915_pml4 pml4; /* GEN8+ & 48b PPGTT */
355 struct i915_page_directory_pointer pdp; /* GEN8+ */
356 struct i915_page_directory pd; /* GEN6-7 */
359 gen6_pte_t __iomem *pd_addr;
361 int (*enable)(struct i915_hw_ppgtt *ppgtt);
362 int (*switch_mm)(struct i915_hw_ppgtt *ppgtt,
363 struct drm_i915_gem_request *req);
364 void (*debug_dump)(struct i915_hw_ppgtt *ppgtt, struct seq_file *m);
368 * gen6_for_each_pde() iterates over every pde from start until start+length.
369 * If start and start+length are not perfectly divisible, the macro will round
370 * down and up as needed. Start=0 and length=2G effectively iterates over
371 * every PDE in the system. The macro modifies ALL its parameters except 'pd',
372 * so each of the other parameters should preferably be a simple variable, or
373 * at most an lvalue with no side-effects!
375 #define gen6_for_each_pde(pt, pd, start, length, iter) \
376 for (iter = gen6_pde_index(start); \
377 length > 0 && iter < I915_PDES && \
378 (pt = (pd)->page_table[iter], true); \
379 ({ u32 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT); \
380 temp = min(temp - start, length); \
381 start += temp, length -= temp; }), ++iter)
383 #define gen6_for_all_pdes(pt, pd, iter) \
385 iter < I915_PDES && \
386 (pt = (pd)->page_table[iter], true); \
389 static inline uint32_t i915_pte_index(uint64_t address, uint32_t pde_shift)
391 const uint32_t mask = NUM_PTE(pde_shift) - 1;
393 return (address >> PAGE_SHIFT) & mask;
396 /* Helper to counts the number of PTEs within the given length. This count
397 * does not cross a page table boundary, so the max value would be
398 * GEN6_PTES for GEN6, and GEN8_PTES for GEN8.
400 static inline uint32_t i915_pte_count(uint64_t addr, size_t length,
403 const uint64_t mask = ~((1ULL << pde_shift) - 1);
406 WARN_ON(length == 0);
407 WARN_ON(offset_in_page(addr|length));
411 if ((addr & mask) != (end & mask))
412 return NUM_PTE(pde_shift) - i915_pte_index(addr, pde_shift);
414 return i915_pte_index(end, pde_shift) - i915_pte_index(addr, pde_shift);
417 static inline uint32_t i915_pde_index(uint64_t addr, uint32_t shift)
419 return (addr >> shift) & I915_PDE_MASK;
422 static inline uint32_t gen6_pte_index(uint32_t addr)
424 return i915_pte_index(addr, GEN6_PDE_SHIFT);
427 static inline size_t gen6_pte_count(uint32_t addr, uint32_t length)
429 return i915_pte_count(addr, length, GEN6_PDE_SHIFT);
432 static inline uint32_t gen6_pde_index(uint32_t addr)
434 return i915_pde_index(addr, GEN6_PDE_SHIFT);
437 /* Equivalent to the gen6 version, For each pde iterates over every pde
438 * between from start until start + length. On gen8+ it simply iterates
439 * over every page directory entry in a page directory.
441 #define gen8_for_each_pde(pt, pd, start, length, iter) \
442 for (iter = gen8_pde_index(start); \
443 length > 0 && iter < I915_PDES && \
444 (pt = (pd)->page_table[iter], true); \
445 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT); \
446 temp = min(temp - start, length); \
447 start += temp, length -= temp; }), ++iter)
449 #define gen8_for_each_pdpe(pd, pdp, start, length, iter) \
450 for (iter = gen8_pdpe_index(start); \
451 length > 0 && iter < I915_PDPES_PER_PDP(dev) && \
452 (pd = (pdp)->page_directory[iter], true); \
453 ({ u64 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT); \
454 temp = min(temp - start, length); \
455 start += temp, length -= temp; }), ++iter)
457 #define gen8_for_each_pml4e(pdp, pml4, start, length, iter) \
458 for (iter = gen8_pml4e_index(start); \
459 length > 0 && iter < GEN8_PML4ES_PER_PML4 && \
460 (pdp = (pml4)->pdps[iter], true); \
461 ({ u64 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT); \
462 temp = min(temp - start, length); \
463 start += temp, length -= temp; }), ++iter)
465 static inline uint32_t gen8_pte_index(uint64_t address)
467 return i915_pte_index(address, GEN8_PDE_SHIFT);
470 static inline uint32_t gen8_pde_index(uint64_t address)
472 return i915_pde_index(address, GEN8_PDE_SHIFT);
475 static inline uint32_t gen8_pdpe_index(uint64_t address)
477 return (address >> GEN8_PDPE_SHIFT) & GEN8_PDPE_MASK;
480 static inline uint32_t gen8_pml4e_index(uint64_t address)
482 return (address >> GEN8_PML4E_SHIFT) & GEN8_PML4E_MASK;
485 static inline size_t gen8_pte_count(uint64_t address, uint64_t length)
487 return i915_pte_count(address, length, GEN8_PDE_SHIFT);
490 static inline dma_addr_t
491 i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
493 return test_bit(n, ppgtt->pdp.used_pdpes) ?
494 px_dma(ppgtt->pdp.page_directory[n]) :
495 px_dma(ppgtt->base.scratch_pd);
498 static inline struct i915_ggtt *
499 i915_vm_to_ggtt(struct i915_address_space *vm)
501 GEM_BUG_ON(!i915_is_ggtt(vm));
502 return container_of(vm, struct i915_ggtt, base);
505 int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv);
506 int i915_ggtt_init_hw(struct drm_i915_private *dev_priv);
507 int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv);
508 void i915_ggtt_enable_guc(struct drm_i915_private *i915);
509 void i915_ggtt_disable_guc(struct drm_i915_private *i915);
510 int i915_gem_init_ggtt(struct drm_i915_private *dev_priv);
511 void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv);
513 int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv);
514 void i915_ppgtt_release(struct kref *kref);
515 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
516 struct drm_i915_file_private *fpriv,
518 void i915_ppgtt_close(struct i915_address_space *vm);
519 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
522 kref_get(&ppgtt->ref);
524 static inline void i915_ppgtt_put(struct i915_hw_ppgtt *ppgtt)
527 kref_put(&ppgtt->ref, i915_ppgtt_release);
530 void i915_check_and_clear_faults(struct drm_i915_private *dev_priv);
531 void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv);
532 void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv);
534 int __must_check i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
535 struct sg_table *pages);
536 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
537 struct sg_table *pages);
539 int i915_gem_gtt_reserve(struct i915_address_space *vm,
540 struct drm_mm_node *node,
541 u64 size, u64 offset, unsigned long color,
544 int i915_gem_gtt_insert(struct i915_address_space *vm,
545 struct drm_mm_node *node,
546 u64 size, u64 alignment, unsigned long color,
547 u64 start, u64 end, unsigned int flags);
549 /* Flags used by pin/bind&friends. */
550 #define PIN_NONBLOCK BIT(0)
551 #define PIN_MAPPABLE BIT(1)
552 #define PIN_ZONE_4G BIT(2)
553 #define PIN_NONFAULT BIT(3)
555 #define PIN_MBZ BIT(5) /* I915_VMA_PIN_OVERFLOW */
556 #define PIN_GLOBAL BIT(6) /* I915_VMA_GLOBAL_BIND */
557 #define PIN_USER BIT(7) /* I915_VMA_LOCAL_BIND */
558 #define PIN_UPDATE BIT(8)
560 #define PIN_HIGH BIT(9)
561 #define PIN_OFFSET_BIAS BIT(10)
562 #define PIN_OFFSET_FIXED BIT(11)
563 #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE)