2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
39 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
44 #include "common/gen_device_info.h"
45 #include "blorp/blorp.h"
46 #include "brw_compiler.h"
47 #include "util/macros.h"
48 #include "util/list.h"
49 #include "util/u_vector.h"
50 #include "util/vk_alloc.h"
52 /* Pre-declarations needed for WSI entrypoints */
55 typedef struct xcb_connection_t xcb_connection_t;
56 typedef uint32_t xcb_visualid_t;
57 typedef uint32_t xcb_window_t;
61 #include <vulkan/vulkan.h>
62 #include <vulkan/vulkan_intel.h>
63 #include <vulkan/vk_icd.h>
65 #include "anv_entrypoints.h"
66 #include "brw_context.h"
69 #include "wsi_common.h"
78 #define MAX_VIEWPORTS 16
79 #define MAX_SCISSORS 16
80 #define MAX_PUSH_CONSTANTS_SIZE 128
81 #define MAX_DYNAMIC_BUFFERS 16
83 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
85 #define anv_noreturn __attribute__((__noreturn__))
86 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
88 static inline uint32_t
89 align_down_npot_u32(uint32_t v, uint32_t a)
94 static inline uint32_t
95 align_u32(uint32_t v, uint32_t a)
97 assert(a != 0 && a == (a & -a));
98 return (v + a - 1) & ~(a - 1);
101 static inline uint64_t
102 align_u64(uint64_t v, uint64_t a)
104 assert(a != 0 && a == (a & -a));
105 return (v + a - 1) & ~(a - 1);
108 static inline int32_t
109 align_i32(int32_t v, int32_t a)
111 assert(a != 0 && a == (a & -a));
112 return (v + a - 1) & ~(a - 1);
115 /** Alignment must be a power of 2. */
117 anv_is_aligned(uintmax_t n, uintmax_t a)
119 assert(a == (a & -a));
120 return (n & (a - 1)) == 0;
123 static inline uint32_t
124 anv_minify(uint32_t n, uint32_t levels)
126 if (unlikely(n == 0))
129 return MAX2(n >> levels, 1);
133 anv_clamp_f(float f, float min, float max)
146 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
148 if (*inout_mask & clear_mask) {
149 *inout_mask &= ~clear_mask;
156 #define for_each_bit(b, dword) \
157 for (uint32_t __dword = (dword); \
158 (b) = __builtin_ffs(__dword) - 1, __dword; \
159 __dword &= ~(1 << (b)))
161 #define typed_memcpy(dest, src, count) ({ \
162 STATIC_ASSERT(sizeof(*src) == sizeof(*dest)); \
163 memcpy((dest), (src), (count) * sizeof(*(src))); \
166 /* Define no kernel as 1, since that's an illegal offset for a kernel */
170 VkStructureType sType;
174 /* Whenever we generate an error, pass it through this function. Useful for
175 * debugging, where we can break on it. Only call at error site, not when
176 * propagating errors. Might be useful to plug in a stack trace here.
179 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
182 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
183 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
185 #define vk_error(error) error
186 #define vk_errorf(error, format, ...) error
189 void __anv_finishme(const char *file, int line, const char *format, ...)
190 anv_printflike(3, 4);
191 void anv_loge(const char *format, ...) anv_printflike(1, 2);
192 void anv_loge_v(const char *format, va_list va);
195 * Print a FINISHME message, including its source location.
197 #define anv_finishme(format, ...) ({ \
198 static bool reported = false; \
200 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__); \
205 /* A non-fatal assert. Useful for debugging. */
207 #define anv_assert(x) ({ \
208 if (unlikely(!(x))) \
209 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
212 #define anv_assert(x)
216 * If a block of code is annotated with anv_validate, then the block runs only
220 #define anv_validate if (1)
222 #define anv_validate if (0)
225 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
226 void anv_abortfv(const char *format, va_list va) anv_noreturn;
228 #define stub_return(v) \
230 anv_finishme("stub %s", __func__); \
236 anv_finishme("stub %s", __func__); \
241 * A dynamically growable, circular buffer. Elements are added at head and
242 * removed from tail. head and tail are free-running uint32_t indices and we
243 * only compute the modulo with size when accessing the array. This way,
244 * number of bytes in the queue is always head - tail, even in case of
251 /* Index into the current validation list. This is used by the
252 * validation list building alrogithm to track which buffers are already
253 * in the validation list so that we can ensure uniqueness.
257 /* Last known offset. This value is provided by the kernel when we
258 * execbuf and is used as the presumed offset for the next bunch of
266 /* We need to set the WRITE flag on winsys bos so GEM will know we're
267 * writing to them and synchronize uses on other rings (eg if the display
268 * server uses the blitter ring).
274 anv_bo_init(struct anv_bo *bo, uint32_t gem_handle, uint64_t size)
276 bo->gem_handle = gem_handle;
281 bo->is_winsys_bo = false;
284 /* Represents a lock-free linked list of "free" things. This is used by
285 * both the block pool and the state pools. Unfortunately, in order to
286 * solve the ABA problem, we can't use a single uint32_t head.
288 union anv_free_list {
292 /* A simple count that is incremented every time the head changes. */
298 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
300 struct anv_block_state {
310 struct anv_block_pool {
311 struct anv_device *device;
315 /* The offset from the start of the bo to the "center" of the block
316 * pool. Pointers to allocated blocks are given by
317 * bo.map + center_bo_offset + offsets.
319 uint32_t center_bo_offset;
321 /* Current memory map of the block pool. This pointer may or may not
322 * point to the actual beginning of the block pool memory. If
323 * anv_block_pool_alloc_back has ever been called, then this pointer
324 * will point to the "center" position of the buffer and all offsets
325 * (negative or positive) given out by the block pool alloc functions
326 * will be valid relative to this pointer.
328 * In particular, map == bo.map + center_offset
334 * Array of mmaps and gem handles owned by the block pool, reclaimed when
335 * the block pool is destroyed.
337 struct u_vector mmap_cleanups;
341 union anv_free_list free_list;
342 struct anv_block_state state;
344 union anv_free_list back_free_list;
345 struct anv_block_state back_state;
348 /* Block pools are backed by a fixed-size 2GB memfd */
349 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
351 /* The center of the block pool is also the middle of the memfd. This may
352 * change in the future if we decide differently for some reason.
354 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
356 static inline uint32_t
357 anv_block_pool_size(struct anv_block_pool *pool)
359 return pool->state.end + pool->back_state.end;
368 struct anv_fixed_size_state_pool {
370 union anv_free_list free_list;
371 struct anv_block_state block;
374 #define ANV_MIN_STATE_SIZE_LOG2 6
375 #define ANV_MAX_STATE_SIZE_LOG2 17
377 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2 + 1)
379 struct anv_state_pool {
380 struct anv_block_pool *block_pool;
381 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
384 struct anv_state_stream_block;
386 struct anv_state_stream {
387 struct anv_block_pool *block_pool;
389 /* The current working block */
390 struct anv_state_stream_block *block;
392 /* Offset at which the current block starts */
394 /* Offset at which to allocate the next state */
396 /* Offset at which the current block ends */
400 #define CACHELINE_SIZE 64
401 #define CACHELINE_MASK 63
404 anv_clflush_range(void *start, size_t size)
406 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
407 void *end = start + size;
409 __builtin_ia32_mfence();
411 __builtin_ia32_clflush(p);
417 anv_state_clflush(struct anv_state state)
419 anv_clflush_range(state.map, state.alloc_size);
422 void anv_block_pool_init(struct anv_block_pool *pool,
423 struct anv_device *device, uint32_t block_size);
424 void anv_block_pool_finish(struct anv_block_pool *pool);
425 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
426 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
427 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
428 void anv_state_pool_init(struct anv_state_pool *pool,
429 struct anv_block_pool *block_pool);
430 void anv_state_pool_finish(struct anv_state_pool *pool);
431 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
432 size_t state_size, size_t alignment);
433 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
434 void anv_state_stream_init(struct anv_state_stream *stream,
435 struct anv_block_pool *block_pool);
436 void anv_state_stream_finish(struct anv_state_stream *stream);
437 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
438 uint32_t size, uint32_t alignment);
441 * Implements a pool of re-usable BOs. The interface is identical to that
442 * of block_pool except that each block is its own BO.
445 struct anv_device *device;
450 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
451 void anv_bo_pool_finish(struct anv_bo_pool *pool);
452 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
454 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
456 struct anv_scratch_bo {
461 struct anv_scratch_pool {
462 /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
463 struct anv_scratch_bo bos[16][MESA_SHADER_STAGES];
466 void anv_scratch_pool_init(struct anv_device *device,
467 struct anv_scratch_pool *pool);
468 void anv_scratch_pool_finish(struct anv_device *device,
469 struct anv_scratch_pool *pool);
470 struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
471 struct anv_scratch_pool *pool,
472 gl_shader_stage stage,
473 unsigned per_thread_scratch);
475 extern struct anv_dispatch_table dtable;
477 #define VK_ICD_WSI_PLATFORM_MAX 5
479 struct anv_physical_device {
480 VK_LOADER_DATA _loader_data;
482 struct anv_instance * instance;
486 struct gen_device_info info;
487 uint64_t aperture_size;
488 struct brw_compiler * compiler;
489 struct isl_device isl_dev;
490 int cmd_parser_version;
493 uint32_t subslice_total;
495 struct wsi_device wsi_device;
498 struct anv_instance {
499 VK_LOADER_DATA _loader_data;
501 VkAllocationCallbacks alloc;
504 int physicalDeviceCount;
505 struct anv_physical_device physicalDevice;
508 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
509 void anv_finish_wsi(struct anv_physical_device *physical_device);
512 VK_LOADER_DATA _loader_data;
514 struct anv_device * device;
516 struct anv_state_pool * pool;
519 struct anv_pipeline_cache {
520 struct anv_device * device;
521 pthread_mutex_t mutex;
523 struct hash_table * cache;
526 struct anv_pipeline_bind_map;
528 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
529 struct anv_device *device,
531 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
533 struct anv_shader_bin *
534 anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
535 const void *key, uint32_t key_size);
536 struct anv_shader_bin *
537 anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
538 const void *key_data, uint32_t key_size,
539 const void *kernel_data, uint32_t kernel_size,
540 const struct brw_stage_prog_data *prog_data,
541 uint32_t prog_data_size,
542 const struct anv_pipeline_bind_map *bind_map);
545 VK_LOADER_DATA _loader_data;
547 VkAllocationCallbacks alloc;
549 struct anv_instance * instance;
551 struct gen_device_info info;
552 struct isl_device isl_dev;
555 bool can_chain_batches;
556 bool robust_buffer_access;
558 struct anv_bo_pool batch_bo_pool;
560 struct anv_block_pool dynamic_state_block_pool;
561 struct anv_state_pool dynamic_state_pool;
563 struct anv_block_pool instruction_block_pool;
564 struct anv_state_pool instruction_state_pool;
566 struct anv_block_pool surface_state_block_pool;
567 struct anv_state_pool surface_state_pool;
569 struct anv_bo workaround_bo;
571 struct anv_pipeline_cache blorp_shader_cache;
572 struct blorp_context blorp;
574 struct anv_state border_colors;
576 struct anv_queue queue;
578 struct anv_scratch_pool scratch_pool;
580 uint32_t default_mocs;
582 pthread_mutex_t mutex;
583 pthread_cond_t queue_submit;
586 void anv_device_get_cache_uuid(void *uuid);
588 void anv_device_init_blorp(struct anv_device *device);
589 void anv_device_finish_blorp(struct anv_device *device);
591 VkResult anv_device_execbuf(struct anv_device *device,
592 struct drm_i915_gem_execbuffer2 *execbuf,
593 struct anv_bo **execbuf_bos);
595 void* anv_gem_mmap(struct anv_device *device,
596 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
597 void anv_gem_munmap(void *p, uint64_t size);
598 uint32_t anv_gem_create(struct anv_device *device, size_t size);
599 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
600 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
601 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
602 int anv_gem_execbuffer(struct anv_device *device,
603 struct drm_i915_gem_execbuffer2 *execbuf);
604 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
605 uint32_t stride, uint32_t tiling);
606 int anv_gem_create_context(struct anv_device *device);
607 int anv_gem_destroy_context(struct anv_device *device, int context);
608 int anv_gem_get_param(int fd, uint32_t param);
609 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
610 int anv_gem_get_aperture(int fd, uint64_t *size);
611 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
612 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
613 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
614 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
615 uint32_t read_domains, uint32_t write_domain);
617 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
619 struct anv_reloc_list {
622 struct drm_i915_gem_relocation_entry * relocs;
623 struct anv_bo ** reloc_bos;
626 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
627 const VkAllocationCallbacks *alloc);
628 void anv_reloc_list_finish(struct anv_reloc_list *list,
629 const VkAllocationCallbacks *alloc);
631 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
632 const VkAllocationCallbacks *alloc,
633 uint32_t offset, struct anv_bo *target_bo,
636 struct anv_batch_bo {
637 /* Link in the anv_cmd_buffer.owned_batch_bos list */
638 struct list_head link;
642 /* Bytes actually consumed in this batch BO */
645 struct anv_reloc_list relocs;
649 const VkAllocationCallbacks * alloc;
655 struct anv_reloc_list * relocs;
657 /* This callback is called (with the associated user data) in the event
658 * that the batch runs out of space.
660 VkResult (*extend_cb)(struct anv_batch *, void *);
664 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
665 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
666 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
667 void *location, struct anv_bo *bo, uint32_t offset);
668 VkResult anv_device_submit_simple_batch(struct anv_device *device,
669 struct anv_batch *batch);
676 static inline uint64_t
677 _anv_combine_address(struct anv_batch *batch, void *location,
678 const struct anv_address address, uint32_t delta)
680 if (address.bo == NULL) {
681 return address.offset + delta;
683 assert(batch->start <= location && location < batch->end);
685 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
689 #define __gen_address_type struct anv_address
690 #define __gen_user_data struct anv_batch
691 #define __gen_combine_address _anv_combine_address
693 /* Wrapper macros needed to work around preprocessor argument issues. In
694 * particular, arguments don't get pre-evaluated if they are concatenated.
695 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
696 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
697 * We can work around this easily enough with these helpers.
699 #define __anv_cmd_length(cmd) cmd ## _length
700 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
701 #define __anv_cmd_header(cmd) cmd ## _header
702 #define __anv_cmd_pack(cmd) cmd ## _pack
703 #define __anv_reg_num(reg) reg ## _num
705 #define anv_pack_struct(dst, struc, ...) do { \
706 struct struc __template = { \
709 __anv_cmd_pack(struc)(NULL, dst, &__template); \
710 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
713 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
714 void *__dst = anv_batch_emit_dwords(batch, n); \
715 struct cmd __template = { \
716 __anv_cmd_header(cmd), \
717 .DWordLength = n - __anv_cmd_length_bias(cmd), \
720 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
724 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
728 STATIC_ASSERT(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1)); \
729 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
730 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
731 dw[i] = (dwords0)[i] | (dwords1)[i]; \
732 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
735 #define anv_batch_emit(batch, cmd, name) \
736 for (struct cmd name = { __anv_cmd_header(cmd) }, \
737 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
738 __builtin_expect(_dst != NULL, 1); \
739 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
740 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
744 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
745 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
746 struct anv_state __state = \
747 anv_state_pool_alloc((pool), __size, align); \
748 struct cmd __template = { \
751 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
752 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
753 if (!(pool)->block_pool->device->info.has_llc) \
754 anv_state_clflush(__state); \
758 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
759 .GraphicsDataTypeGFDT = 0, \
760 .LLCCacheabilityControlLLCCC = 0, \
761 .L3CacheabilityControlL3CC = 1, \
764 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
765 .LLCeLLCCacheabilityControlLLCCC = 0, \
766 .L3CacheabilityControlL3CC = 1, \
769 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
770 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
771 .TargetCache = L3DefertoPATforLLCeLLCselection, \
775 /* Skylake: MOCS is now an index into an array of 62 different caching
776 * configurations programmed by the kernel.
779 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
780 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
781 .IndextoMOCSTables = 2 \
784 #define GEN9_MOCS_PTE { \
785 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
786 .IndextoMOCSTables = 1 \
789 struct anv_device_memory {
792 VkDeviceSize map_size;
797 * Header for Vertex URB Entry (VUE)
799 struct anv_vue_header {
801 uint32_t RTAIndex; /* RenderTargetArrayIndex */
802 uint32_t ViewportIndex;
806 struct anv_descriptor_set_binding_layout {
808 /* The type of the descriptors in this binding */
809 VkDescriptorType type;
812 /* Number of array elements in this binding */
815 /* Index into the flattend descriptor set */
816 uint16_t descriptor_index;
818 /* Index into the dynamic state array for a dynamic buffer */
819 int16_t dynamic_offset_index;
821 /* Index into the descriptor set buffer views */
822 int16_t buffer_index;
825 /* Index into the binding table for the associated surface */
826 int16_t surface_index;
828 /* Index into the sampler table for the associated sampler */
829 int16_t sampler_index;
831 /* Index into the image table for the associated image */
833 } stage[MESA_SHADER_STAGES];
835 /* Immutable samplers (or NULL if no immutable samplers) */
836 struct anv_sampler **immutable_samplers;
839 struct anv_descriptor_set_layout {
840 /* Number of bindings in this descriptor set */
841 uint16_t binding_count;
843 /* Total size of the descriptor set with room for all array entries */
846 /* Shader stages affected by this descriptor set */
847 uint16_t shader_stages;
849 /* Number of buffers in this descriptor set */
850 uint16_t buffer_count;
852 /* Number of dynamic offsets used by this descriptor set */
853 uint16_t dynamic_offset_count;
855 /* Bindings in this descriptor set */
856 struct anv_descriptor_set_binding_layout binding[0];
859 struct anv_descriptor {
860 VkDescriptorType type;
864 struct anv_image_view *image_view;
865 struct anv_sampler *sampler;
868 struct anv_buffer_view *buffer_view;
872 struct anv_descriptor_set {
873 const struct anv_descriptor_set_layout *layout;
875 uint32_t buffer_count;
876 struct anv_buffer_view *buffer_views;
877 struct anv_descriptor descriptors[0];
880 struct anv_descriptor_pool {
885 struct anv_state_stream surface_state_stream;
886 void *surface_state_free_list;
892 anv_descriptor_set_create(struct anv_device *device,
893 struct anv_descriptor_pool *pool,
894 const struct anv_descriptor_set_layout *layout,
895 struct anv_descriptor_set **out_set);
898 anv_descriptor_set_destroy(struct anv_device *device,
899 struct anv_descriptor_pool *pool,
900 struct anv_descriptor_set *set);
902 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
904 struct anv_pipeline_binding {
905 /* The descriptor set this surface corresponds to. The special value of
906 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
907 * to a color attachment and not a regular descriptor.
911 /* Binding in the descriptor set */
914 /* Index in the binding */
918 struct anv_pipeline_layout {
920 struct anv_descriptor_set_layout *layout;
921 uint32_t dynamic_offset_start;
927 bool has_dynamic_offsets;
928 } stage[MESA_SHADER_STAGES];
930 unsigned char sha1[20];
934 struct anv_device * device;
937 VkBufferUsageFlags usage;
944 enum anv_cmd_dirty_bits {
945 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
946 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
947 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
948 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
949 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
950 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
951 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
952 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
953 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
954 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
955 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
956 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
957 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
959 typedef uint32_t anv_cmd_dirty_mask_t;
962 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
963 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
964 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
965 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
966 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
967 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
968 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
969 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
970 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
971 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
972 ANV_PIPE_CS_STALL_BIT = (1 << 20),
974 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
975 * a flush has happened but not a CS stall. The next time we do any sort
976 * of invalidation we need to insert a CS stall at that time. Otherwise,
977 * we would have to CS stall on every flush which could be bad.
979 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
982 #define ANV_PIPE_FLUSH_BITS ( \
983 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
984 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
985 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
987 #define ANV_PIPE_STALL_BITS ( \
988 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
989 ANV_PIPE_DEPTH_STALL_BIT | \
990 ANV_PIPE_CS_STALL_BIT)
992 #define ANV_PIPE_INVALIDATE_BITS ( \
993 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
994 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
995 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
996 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
997 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
998 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1000 struct anv_vertex_binding {
1001 struct anv_buffer * buffer;
1002 VkDeviceSize offset;
1005 struct anv_push_constants {
1006 /* Current allocated size of this push constants data structure.
1007 * Because a decent chunk of it may not be used (images on SKL, for
1008 * instance), we won't actually allocate the entire structure up-front.
1012 /* Push constant data provided by the client through vkPushConstants */
1013 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1015 /* Our hardware only provides zero-based vertex and instance id so, in
1016 * order to satisfy the vulkan requirements, we may have to push one or
1017 * both of these into the shader.
1019 uint32_t base_vertex;
1020 uint32_t base_instance;
1022 /* Offsets and ranges for dynamically bound buffers */
1026 } dynamic[MAX_DYNAMIC_BUFFERS];
1028 /* Image data for image_load_store on pre-SKL */
1029 struct brw_image_param images[MAX_IMAGES];
1032 struct anv_dynamic_state {
1035 VkViewport viewports[MAX_VIEWPORTS];
1040 VkRect2D scissors[MAX_SCISSORS];
1051 float blend_constants[4];
1061 } stencil_compare_mask;
1066 } stencil_write_mask;
1071 } stencil_reference;
1074 extern const struct anv_dynamic_state default_dynamic_state;
1076 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1077 const struct anv_dynamic_state *src,
1078 uint32_t copy_mask);
1081 * Attachment state when recording a renderpass instance.
1083 * The clear value is valid only if there exists a pending clear.
1085 struct anv_attachment_state {
1086 struct anv_state color_rt_state;
1088 VkImageAspectFlags pending_clear_aspects;
1089 VkClearValue clear_value;
1092 /** State required while building cmd buffer */
1093 struct anv_cmd_state {
1094 /* PIPELINE_SELECT.PipelineSelection */
1095 uint32_t current_pipeline;
1096 const struct gen_l3_config * current_l3_config;
1098 anv_cmd_dirty_mask_t dirty;
1099 anv_cmd_dirty_mask_t compute_dirty;
1100 enum anv_pipe_bits pending_pipe_bits;
1101 uint32_t num_workgroups_offset;
1102 struct anv_bo *num_workgroups_bo;
1103 VkShaderStageFlags descriptors_dirty;
1104 VkShaderStageFlags push_constants_dirty;
1105 uint32_t scratch_size;
1106 struct anv_pipeline * pipeline;
1107 struct anv_pipeline * compute_pipeline;
1108 struct anv_framebuffer * framebuffer;
1109 struct anv_render_pass * pass;
1110 struct anv_subpass * subpass;
1111 VkRect2D render_area;
1112 uint32_t restart_index;
1113 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1114 struct anv_descriptor_set * descriptors[MAX_SETS];
1115 VkShaderStageFlags push_constant_stages;
1116 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1117 struct anv_state binding_tables[MESA_SHADER_STAGES];
1118 struct anv_state samplers[MESA_SHADER_STAGES];
1119 struct anv_dynamic_state dynamic;
1123 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1124 * valid only when recording a render pass instance.
1126 struct anv_attachment_state * attachments;
1129 * Surface states for color render targets. These are stored in a single
1130 * flat array. For depth-stencil attachments, the surface state is simply
1133 struct anv_state render_pass_states;
1136 * A null surface state of the right size to match the framebuffer. This
1137 * is one of the states in render_pass_states.
1139 struct anv_state null_surface_state;
1142 struct anv_buffer * index_buffer;
1143 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1144 uint32_t index_offset;
1148 struct anv_cmd_pool {
1149 VkAllocationCallbacks alloc;
1150 struct list_head cmd_buffers;
1153 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1155 enum anv_cmd_buffer_exec_mode {
1156 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1157 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1158 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1159 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1160 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1163 struct anv_cmd_buffer {
1164 VK_LOADER_DATA _loader_data;
1166 struct anv_device * device;
1168 struct anv_cmd_pool * pool;
1169 struct list_head pool_link;
1171 struct anv_batch batch;
1173 /* Fields required for the actual chain of anv_batch_bo's.
1175 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1177 struct list_head batch_bos;
1178 enum anv_cmd_buffer_exec_mode exec_mode;
1180 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1181 * referenced by this command buffer
1183 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1185 struct u_vector seen_bbos;
1187 /* A vector of int32_t's for every block of binding tables.
1189 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1191 struct u_vector bt_blocks;
1194 struct anv_reloc_list surface_relocs;
1195 /** Last seen surface state block pool center bo offset */
1196 uint32_t last_ss_pool_center;
1198 /* Serial for tracking buffer completion */
1201 /* Stream objects for storing temporary data */
1202 struct anv_state_stream surface_state_stream;
1203 struct anv_state_stream dynamic_state_stream;
1205 VkCommandBufferUsageFlags usage_flags;
1206 VkCommandBufferLevel level;
1208 struct anv_cmd_state state;
1211 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1212 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1213 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1214 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1215 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1216 struct anv_cmd_buffer *secondary);
1217 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1218 VkResult anv_cmd_buffer_execbuf(struct anv_device *device,
1219 struct anv_cmd_buffer *cmd_buffer);
1221 VkResult anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer);
1224 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
1225 gl_shader_stage stage, uint32_t size);
1226 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
1227 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
1228 (offsetof(struct anv_push_constants, field) + \
1229 sizeof(cmd_buffer->state.push_constants[0]->field)))
1231 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1232 const void *data, uint32_t size, uint32_t alignment);
1233 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1234 uint32_t *a, uint32_t *b,
1235 uint32_t dwords, uint32_t alignment);
1238 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1240 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1241 uint32_t entries, uint32_t *state_offset);
1243 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1245 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1246 uint32_t size, uint32_t alignment);
1249 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1251 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1252 void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
1253 bool depth_clamp_enable);
1254 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1256 void anv_cmd_buffer_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1257 struct anv_render_pass *pass,
1258 struct anv_framebuffer *framebuffer,
1259 const VkClearValue *clear_values);
1261 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1264 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1265 gl_shader_stage stage);
1267 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1269 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1270 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1272 const struct anv_image_view *
1273 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1276 anv_cmd_buffer_alloc_blorp_binding_table(struct anv_cmd_buffer *cmd_buffer,
1277 uint32_t num_entries,
1278 uint32_t *state_offset);
1280 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1282 enum anv_fence_state {
1283 /** Indicates that this is a new (or newly reset fence) */
1284 ANV_FENCE_STATE_RESET,
1286 /** Indicates that this fence has been submitted to the GPU but is still
1287 * (as far as we know) in use by the GPU.
1289 ANV_FENCE_STATE_SUBMITTED,
1291 ANV_FENCE_STATE_SIGNALED,
1296 struct drm_i915_gem_execbuffer2 execbuf;
1297 struct drm_i915_gem_exec_object2 exec2_objects[1];
1298 enum anv_fence_state state;
1303 struct anv_state state;
1306 struct anv_shader_module {
1307 unsigned char sha1[20];
1312 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1313 struct anv_shader_module *module,
1314 const char *entrypoint,
1315 const struct anv_pipeline_layout *pipeline_layout,
1316 const VkSpecializationInfo *spec_info);
1318 static inline gl_shader_stage
1319 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1321 assert(__builtin_popcount(vk_stage) == 1);
1322 return ffs(vk_stage) - 1;
1325 static inline VkShaderStageFlagBits
1326 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1328 return (1 << mesa_stage);
1331 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1333 #define anv_foreach_stage(stage, stage_bits) \
1334 for (gl_shader_stage stage, \
1335 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1336 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1337 __tmp &= ~(1 << (stage)))
1339 struct anv_pipeline_bind_map {
1340 uint32_t surface_count;
1341 uint32_t sampler_count;
1342 uint32_t image_count;
1344 struct anv_pipeline_binding * surface_to_descriptor;
1345 struct anv_pipeline_binding * sampler_to_descriptor;
1348 struct anv_shader_bin_key {
1353 struct anv_shader_bin {
1356 const struct anv_shader_bin_key *key;
1358 struct anv_state kernel;
1359 uint32_t kernel_size;
1361 const struct brw_stage_prog_data *prog_data;
1362 uint32_t prog_data_size;
1364 struct anv_pipeline_bind_map bind_map;
1366 /* Prog data follows, then params, then the key, all aligned to 8-bytes */
1369 struct anv_shader_bin *
1370 anv_shader_bin_create(struct anv_device *device,
1371 const void *key, uint32_t key_size,
1372 const void *kernel, uint32_t kernel_size,
1373 const struct brw_stage_prog_data *prog_data,
1374 uint32_t prog_data_size, const void *prog_data_param,
1375 const struct anv_pipeline_bind_map *bind_map);
1378 anv_shader_bin_destroy(struct anv_device *device, struct anv_shader_bin *shader);
1381 anv_shader_bin_ref(struct anv_shader_bin *shader)
1383 assert(shader->ref_cnt >= 1);
1384 __sync_fetch_and_add(&shader->ref_cnt, 1);
1388 anv_shader_bin_unref(struct anv_device *device, struct anv_shader_bin *shader)
1390 assert(shader->ref_cnt >= 1);
1391 if (__sync_fetch_and_add(&shader->ref_cnt, -1) == 1)
1392 anv_shader_bin_destroy(device, shader);
1395 struct anv_pipeline {
1396 struct anv_device * device;
1397 struct anv_batch batch;
1398 uint32_t batch_data[512];
1399 struct anv_reloc_list batch_relocs;
1400 uint32_t dynamic_state_mask;
1401 struct anv_dynamic_state dynamic_state;
1403 struct anv_pipeline_layout * layout;
1405 bool needs_data_cache;
1407 struct anv_shader_bin * shaders[MESA_SHADER_STAGES];
1410 const struct gen_l3_config * l3_config;
1411 uint32_t total_size;
1414 VkShaderStageFlags active_stages;
1415 struct anv_state blend_state;
1418 uint32_t binding_stride[MAX_VBS];
1419 bool instancing_enable[MAX_VBS];
1420 bool primitive_restart;
1423 uint32_t cs_right_mask;
1425 bool depth_clamp_enable;
1429 uint32_t depth_stencil_state[3];
1435 uint32_t wm_depth_stencil[3];
1439 uint32_t wm_depth_stencil[4];
1442 uint32_t interface_descriptor_data[8];
1446 anv_pipeline_has_stage(const struct anv_pipeline *pipeline,
1447 gl_shader_stage stage)
1449 return (pipeline->active_stages & mesa_to_vk_shader_stage(stage)) != 0;
1452 #define ANV_DECL_GET_PROG_DATA_FUNC(prefix, stage) \
1453 static inline const struct brw_##prefix##_prog_data * \
1454 get_##prefix##_prog_data(struct anv_pipeline *pipeline) \
1456 if (anv_pipeline_has_stage(pipeline, stage)) { \
1457 return (const struct brw_##prefix##_prog_data *) \
1458 pipeline->shaders[stage]->prog_data; \
1464 ANV_DECL_GET_PROG_DATA_FUNC(vs, MESA_SHADER_VERTEX)
1465 ANV_DECL_GET_PROG_DATA_FUNC(gs, MESA_SHADER_GEOMETRY)
1466 ANV_DECL_GET_PROG_DATA_FUNC(wm, MESA_SHADER_FRAGMENT)
1467 ANV_DECL_GET_PROG_DATA_FUNC(cs, MESA_SHADER_COMPUTE)
1470 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1471 struct anv_pipeline_cache *cache,
1472 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1473 const VkAllocationCallbacks *alloc);
1476 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1477 struct anv_pipeline_cache *cache,
1478 const VkComputePipelineCreateInfo *info,
1479 struct anv_shader_module *module,
1480 const char *entrypoint,
1481 const VkSpecializationInfo *spec_info);
1484 enum isl_format isl_format:16;
1485 struct isl_swizzle swizzle;
1489 anv_get_format(const struct gen_device_info *devinfo, VkFormat format,
1490 VkImageAspectFlags aspect, VkImageTiling tiling);
1492 static inline enum isl_format
1493 anv_get_isl_format(const struct gen_device_info *devinfo, VkFormat vk_format,
1494 VkImageAspectFlags aspect, VkImageTiling tiling)
1496 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1500 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm);
1503 * Subsurface of an anv_image.
1505 struct anv_surface {
1506 /** Valid only if isl_surf::size > 0. */
1507 struct isl_surf isl;
1510 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1517 /* The original VkFormat provided by the client. This may not match any
1518 * of the actual surface formats.
1521 VkImageAspectFlags aspects;
1524 uint32_t array_size;
1525 uint32_t samples; /**< VkImageCreateInfo::samples */
1526 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1527 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1532 /* Set when bound */
1534 VkDeviceSize offset;
1539 * For each foo, anv_image::foo_surface is valid if and only if
1540 * anv_image::aspects has a foo aspect.
1542 * The hardware requires that the depth buffer and stencil buffer be
1543 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1544 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1545 * allocate the depth and stencil buffers as separate surfaces in the same
1549 struct anv_surface color_surface;
1552 struct anv_surface depth_surface;
1553 struct anv_surface stencil_surface;
1557 struct anv_surface aux_surface;
1560 static inline uint32_t
1561 anv_get_layerCount(const struct anv_image *image,
1562 const VkImageSubresourceRange *range)
1564 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1565 image->array_size - range->baseArrayLayer : range->layerCount;
1568 static inline uint32_t
1569 anv_get_levelCount(const struct anv_image *image,
1570 const VkImageSubresourceRange *range)
1572 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1573 image->levels - range->baseMipLevel : range->levelCount;
1577 struct anv_image_view {
1578 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1580 uint32_t offset; /**< Offset into bo. */
1582 struct isl_view isl;
1584 VkImageAspectFlags aspect_mask;
1586 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1588 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1589 struct anv_state sampler_surface_state;
1591 /** RENDER_SURFACE_STATE when using image as a storage image. */
1592 struct anv_state storage_surface_state;
1594 struct brw_image_param storage_image_param;
1597 struct anv_image_create_info {
1598 const VkImageCreateInfo *vk_info;
1600 /** An opt-in bitmask which filters an ISL-mapping of the Vulkan tiling. */
1601 isl_tiling_flags_t isl_tiling_flags;
1606 VkResult anv_image_create(VkDevice _device,
1607 const struct anv_image_create_info *info,
1608 const VkAllocationCallbacks* alloc,
1611 const struct anv_surface *
1612 anv_image_get_surface_for_aspect_mask(const struct anv_image *image,
1613 VkImageAspectFlags aspect_mask);
1616 anv_image_has_hiz(const struct anv_image *image)
1618 /* We must check the aspect because anv_image::aux_surface may be used for
1619 * any type of auxiliary surface, not just HiZ.
1621 return (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1622 image->aux_surface.isl.size > 0;
1625 struct anv_buffer_view {
1626 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1628 uint32_t offset; /**< Offset into bo. */
1629 uint64_t range; /**< VkBufferViewCreateInfo::range */
1631 struct anv_state surface_state;
1632 struct anv_state storage_surface_state;
1634 struct brw_image_param storage_image_param;
1638 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1640 static inline struct VkExtent3D
1641 anv_sanitize_image_extent(const VkImageType imageType,
1642 const struct VkExtent3D imageExtent)
1644 switch (imageType) {
1645 case VK_IMAGE_TYPE_1D:
1646 return (VkExtent3D) { imageExtent.width, 1, 1 };
1647 case VK_IMAGE_TYPE_2D:
1648 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1649 case VK_IMAGE_TYPE_3D:
1652 unreachable("invalid image type");
1656 static inline struct VkOffset3D
1657 anv_sanitize_image_offset(const VkImageType imageType,
1658 const struct VkOffset3D imageOffset)
1660 switch (imageType) {
1661 case VK_IMAGE_TYPE_1D:
1662 return (VkOffset3D) { imageOffset.x, 0, 0 };
1663 case VK_IMAGE_TYPE_2D:
1664 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1665 case VK_IMAGE_TYPE_3D:
1668 unreachable("invalid image type");
1673 void anv_fill_buffer_surface_state(struct anv_device *device,
1674 struct anv_state state,
1675 enum isl_format format,
1676 uint32_t offset, uint32_t range,
1679 void anv_image_view_fill_image_param(struct anv_device *device,
1680 struct anv_image_view *view,
1681 struct brw_image_param *param);
1682 void anv_buffer_view_fill_image_param(struct anv_device *device,
1683 struct anv_buffer_view *view,
1684 struct brw_image_param *param);
1686 struct anv_sampler {
1690 struct anv_framebuffer {
1695 uint32_t attachment_count;
1696 struct anv_image_view * attachments[0];
1699 struct anv_subpass {
1700 uint32_t input_count;
1701 uint32_t * input_attachments;
1702 uint32_t color_count;
1703 uint32_t * color_attachments;
1704 uint32_t * resolve_attachments;
1705 uint32_t depth_stencil_attachment;
1707 /** Subpass has at least one resolve attachment */
1711 struct anv_render_pass_attachment {
1714 VkAttachmentLoadOp load_op;
1715 VkAttachmentStoreOp store_op;
1716 VkAttachmentLoadOp stencil_load_op;
1719 struct anv_render_pass {
1720 uint32_t attachment_count;
1721 uint32_t subpass_count;
1722 uint32_t * subpass_attachments;
1723 struct anv_render_pass_attachment * attachments;
1724 struct anv_subpass subpasses[0];
1727 struct anv_query_pool_slot {
1733 struct anv_query_pool {
1739 void *anv_lookup_entrypoint(const struct gen_device_info *devinfo,
1742 void anv_dump_image_to_ppm(struct anv_device *device,
1743 struct anv_image *image, unsigned miplevel,
1744 unsigned array_layer, VkImageAspectFlagBits aspect,
1745 const char *filename);
1747 enum anv_dump_action {
1748 ANV_DUMP_FRAMEBUFFERS_BIT = 0x1,
1751 void anv_dump_start(struct anv_device *device, enum anv_dump_action actions);
1752 void anv_dump_finish(void);
1754 void anv_dump_add_framebuffer(struct anv_cmd_buffer *cmd_buffer,
1755 struct anv_framebuffer *fb);
1757 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1759 static inline struct __anv_type * \
1760 __anv_type ## _from_handle(__VkType _handle) \
1762 return (struct __anv_type *) _handle; \
1765 static inline __VkType \
1766 __anv_type ## _to_handle(struct __anv_type *_obj) \
1768 return (__VkType) _obj; \
1771 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1773 static inline struct __anv_type * \
1774 __anv_type ## _from_handle(__VkType _handle) \
1776 return (struct __anv_type *)(uintptr_t) _handle; \
1779 static inline __VkType \
1780 __anv_type ## _to_handle(struct __anv_type *_obj) \
1782 return (__VkType)(uintptr_t) _obj; \
1785 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1786 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1788 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1789 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1790 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1791 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1792 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1794 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1795 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1796 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1797 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1798 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1799 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1800 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1801 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1802 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1803 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1804 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1805 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1806 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1807 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1808 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1809 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1810 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1811 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1812 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1814 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1816 static inline const __VkType * \
1817 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1819 return (const __VkType *) __anv_obj; \
1822 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1823 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1825 ANV_DEFINE_STRUCT_CASTS(anv_common, VkMemoryBarrier)
1826 ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
1827 ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
1829 /* Gen-specific function declarations */
1831 # include "anv_genX.h"
1833 # define genX(x) gen7_##x
1834 # include "anv_genX.h"
1836 # define genX(x) gen75_##x
1837 # include "anv_genX.h"
1839 # define genX(x) gen8_##x
1840 # include "anv_genX.h"
1842 # define genX(x) gen9_##x
1843 # include "anv_genX.h"
1851 #endif /* ANV_PRIVATE_H */