2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
38 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
43 #include "brw_device_info.h"
44 #include "brw_compiler.h"
45 #include "util/macros.h"
46 #include "util/list.h"
48 /* Pre-declarations needed for WSI entrypoints */
51 typedef struct xcb_connection_t xcb_connection_t;
52 typedef uint32_t xcb_visualid_t;
53 typedef uint32_t xcb_window_t;
57 #include <vulkan/vulkan.h>
58 #include <vulkan/vulkan_intel.h>
59 #include <vulkan/vk_icd.h>
61 #include "anv_entrypoints.h"
62 #include "brw_context.h"
72 #define MAX_VIEWPORTS 16
73 #define MAX_SCISSORS 16
74 #define MAX_PUSH_CONSTANTS_SIZE 128
75 #define MAX_DYNAMIC_BUFFERS 16
77 #define MAX_SAMPLES_LOG2 4 /* SKL supports 16 samples */
79 #define anv_noreturn __attribute__((__noreturn__))
80 #define anv_printflike(a, b) __attribute__((__format__(__printf__, a, b)))
82 #define MIN(a, b) ((a) < (b) ? (a) : (b))
83 #define MAX(a, b) ((a) > (b) ? (a) : (b))
85 static inline uint32_t
86 align_u32(uint32_t v, uint32_t a)
88 assert(a != 0 && a == (a & -a));
89 return (v + a - 1) & ~(a - 1);
92 static inline uint64_t
93 align_u64(uint64_t v, uint64_t a)
95 assert(a != 0 && a == (a & -a));
96 return (v + a - 1) & ~(a - 1);
100 align_i32(int32_t v, int32_t a)
102 assert(a != 0 && a == (a & -a));
103 return (v + a - 1) & ~(a - 1);
106 /** Alignment must be a power of 2. */
108 anv_is_aligned(uintmax_t n, uintmax_t a)
110 assert(a == (a & -a));
111 return (n & (a - 1)) == 0;
114 static inline uint32_t
115 anv_minify(uint32_t n, uint32_t levels)
117 if (unlikely(n == 0))
120 return MAX(n >> levels, 1);
124 anv_clamp_f(float f, float min, float max)
137 anv_clear_mask(uint32_t *inout_mask, uint32_t clear_mask)
139 if (*inout_mask & clear_mask) {
140 *inout_mask &= ~clear_mask;
147 #define for_each_bit(b, dword) \
148 for (uint32_t __dword = (dword); \
149 (b) = __builtin_ffs(__dword) - 1, __dword; \
150 __dword &= ~(1 << (b)))
152 #define typed_memcpy(dest, src, count) ({ \
153 static_assert(sizeof(*src) == sizeof(*dest), ""); \
154 memcpy((dest), (src), (count) * sizeof(*(src))); \
157 #define zero(x) (memset(&(x), 0, sizeof(x)))
159 /* Define no kernel as 1, since that's an illegal offset for a kernel */
163 VkStructureType sType;
167 /* Whenever we generate an error, pass it through this function. Useful for
168 * debugging, where we can break on it. Only call at error site, not when
169 * propagating errors. Might be useful to plug in a stack trace here.
172 VkResult __vk_errorf(VkResult error, const char *file, int line, const char *format, ...);
175 #define vk_error(error) __vk_errorf(error, __FILE__, __LINE__, NULL);
176 #define vk_errorf(error, format, ...) __vk_errorf(error, __FILE__, __LINE__, format, ## __VA_ARGS__);
178 #define vk_error(error) error
179 #define vk_errorf(error, format, ...) error
182 void __anv_finishme(const char *file, int line, const char *format, ...)
183 anv_printflike(3, 4);
184 void anv_loge(const char *format, ...) anv_printflike(1, 2);
185 void anv_loge_v(const char *format, va_list va);
188 * Print a FINISHME message, including its source location.
190 #define anv_finishme(format, ...) \
191 __anv_finishme(__FILE__, __LINE__, format, ##__VA_ARGS__);
193 /* A non-fatal assert. Useful for debugging. */
195 #define anv_assert(x) ({ \
196 if (unlikely(!(x))) \
197 fprintf(stderr, "%s:%d ASSERT: %s\n", __FILE__, __LINE__, #x); \
200 #define anv_assert(x)
204 * If a block of code is annotated with anv_validate, then the block runs only
208 #define anv_validate if (1)
210 #define anv_validate if (0)
213 void anv_abortf(const char *format, ...) anv_noreturn anv_printflike(1, 2);
214 void anv_abortfv(const char *format, va_list va) anv_noreturn;
216 #define stub_return(v) \
218 anv_finishme("stub %s", __func__); \
224 anv_finishme("stub %s", __func__); \
229 * A dynamically growable, circular buffer. Elements are added at head and
230 * removed from tail. head and tail are free-running uint32_t indices and we
231 * only compute the modulo with size when accessing the array. This way,
232 * number of bytes in the queue is always head - tail, even in case of
239 uint32_t element_size;
244 int anv_vector_init(struct anv_vector *queue, uint32_t element_size, uint32_t size);
245 void *anv_vector_add(struct anv_vector *queue);
246 void *anv_vector_remove(struct anv_vector *queue);
249 anv_vector_length(struct anv_vector *queue)
251 return (queue->head - queue->tail) / queue->element_size;
255 anv_vector_head(struct anv_vector *vector)
257 assert(vector->tail < vector->head);
258 return (void *)((char *)vector->data +
259 ((vector->head - vector->element_size) &
260 (vector->size - 1)));
264 anv_vector_tail(struct anv_vector *vector)
266 return (void *)((char *)vector->data + (vector->tail & (vector->size - 1)));
270 anv_vector_finish(struct anv_vector *queue)
275 #define anv_vector_foreach(elem, queue) \
276 static_assert(__builtin_types_compatible_p(__typeof__(queue), struct anv_vector *), ""); \
277 for (uint32_t __anv_vector_offset = (queue)->tail; \
278 elem = (queue)->data + (__anv_vector_offset & ((queue)->size - 1)), __anv_vector_offset < (queue)->head; \
279 __anv_vector_offset += (queue)->element_size)
284 /* Index into the current validation list. This is used by the
285 * validation list building alrogithm to track which buffers are already
286 * in the validation list so that we can ensure uniqueness.
290 /* Last known offset. This value is provided by the kernel when we
291 * execbuf and is used as the presumed offset for the next bunch of
299 /* We need to set the WRITE flag on winsys bos so GEM will know we're
300 * writing to them and synchronize uses on other rings (eg if the display
301 * server uses the blitter ring).
306 /* Represents a lock-free linked list of "free" things. This is used by
307 * both the block pool and the state pools. Unfortunately, in order to
308 * solve the ABA problem, we can't use a single uint32_t head.
310 union anv_free_list {
314 /* A simple count that is incremented every time the head changes. */
320 #define ANV_FREE_LIST_EMPTY ((union anv_free_list) { { 1, 0 } })
322 struct anv_block_state {
332 struct anv_block_pool {
333 struct anv_device *device;
337 /* The offset from the start of the bo to the "center" of the block
338 * pool. Pointers to allocated blocks are given by
339 * bo.map + center_bo_offset + offsets.
341 uint32_t center_bo_offset;
343 /* Current memory map of the block pool. This pointer may or may not
344 * point to the actual beginning of the block pool memory. If
345 * anv_block_pool_alloc_back has ever been called, then this pointer
346 * will point to the "center" position of the buffer and all offsets
347 * (negative or positive) given out by the block pool alloc functions
348 * will be valid relative to this pointer.
350 * In particular, map == bo.map + center_offset
356 * Array of mmaps and gem handles owned by the block pool, reclaimed when
357 * the block pool is destroyed.
359 struct anv_vector mmap_cleanups;
363 union anv_free_list free_list;
364 struct anv_block_state state;
366 union anv_free_list back_free_list;
367 struct anv_block_state back_state;
370 /* Block pools are backed by a fixed-size 2GB memfd */
371 #define BLOCK_POOL_MEMFD_SIZE (1ull << 32)
373 /* The center of the block pool is also the middle of the memfd. This may
374 * change in the future if we decide differently for some reason.
376 #define BLOCK_POOL_MEMFD_CENTER (BLOCK_POOL_MEMFD_SIZE / 2)
378 static inline uint32_t
379 anv_block_pool_size(struct anv_block_pool *pool)
381 return pool->state.end + pool->back_state.end;
390 struct anv_fixed_size_state_pool {
392 union anv_free_list free_list;
393 struct anv_block_state block;
396 #define ANV_MIN_STATE_SIZE_LOG2 6
397 #define ANV_MAX_STATE_SIZE_LOG2 10
399 #define ANV_STATE_BUCKETS (ANV_MAX_STATE_SIZE_LOG2 - ANV_MIN_STATE_SIZE_LOG2)
401 struct anv_state_pool {
402 struct anv_block_pool *block_pool;
403 struct anv_fixed_size_state_pool buckets[ANV_STATE_BUCKETS];
406 struct anv_state_stream_block;
408 struct anv_state_stream {
409 struct anv_block_pool *block_pool;
411 /* The current working block */
412 struct anv_state_stream_block *block;
414 /* Offset at which the current block starts */
416 /* Offset at which to allocate the next state */
418 /* Offset at which the current block ends */
422 #define CACHELINE_SIZE 64
423 #define CACHELINE_MASK 63
426 anv_clflush_range(void *start, size_t size)
428 void *p = (void *) (((uintptr_t) start) & ~CACHELINE_MASK);
429 void *end = start + size;
431 __builtin_ia32_mfence();
433 __builtin_ia32_clflush(p);
439 anv_state_clflush(struct anv_state state)
441 anv_clflush_range(state.map, state.alloc_size);
444 void anv_block_pool_init(struct anv_block_pool *pool,
445 struct anv_device *device, uint32_t block_size);
446 void anv_block_pool_finish(struct anv_block_pool *pool);
447 int32_t anv_block_pool_alloc(struct anv_block_pool *pool);
448 int32_t anv_block_pool_alloc_back(struct anv_block_pool *pool);
449 void anv_block_pool_free(struct anv_block_pool *pool, int32_t offset);
450 void anv_state_pool_init(struct anv_state_pool *pool,
451 struct anv_block_pool *block_pool);
452 void anv_state_pool_finish(struct anv_state_pool *pool);
453 struct anv_state anv_state_pool_alloc(struct anv_state_pool *pool,
454 size_t state_size, size_t alignment);
455 void anv_state_pool_free(struct anv_state_pool *pool, struct anv_state state);
456 void anv_state_stream_init(struct anv_state_stream *stream,
457 struct anv_block_pool *block_pool);
458 void anv_state_stream_finish(struct anv_state_stream *stream);
459 struct anv_state anv_state_stream_alloc(struct anv_state_stream *stream,
460 uint32_t size, uint32_t alignment);
463 * Implements a pool of re-usable BOs. The interface is identical to that
464 * of block_pool except that each block is its own BO.
467 struct anv_device *device;
472 void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
473 void anv_bo_pool_finish(struct anv_bo_pool *pool);
474 VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
476 void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
479 void *anv_resolve_entrypoint(uint32_t index);
481 extern struct anv_dispatch_table dtable;
483 #define ANV_CALL(func) ({ \
484 if (dtable.func == NULL) { \
485 size_t idx = offsetof(struct anv_dispatch_table, func) / sizeof(void *); \
486 dtable.entrypoints[idx] = anv_resolve_entrypoint(idx); \
492 anv_alloc(const VkAllocationCallbacks *alloc,
493 size_t size, size_t align,
494 VkSystemAllocationScope scope)
496 return alloc->pfnAllocation(alloc->pUserData, size, align, scope);
500 anv_realloc(const VkAllocationCallbacks *alloc,
501 void *ptr, size_t size, size_t align,
502 VkSystemAllocationScope scope)
504 return alloc->pfnReallocation(alloc->pUserData, ptr, size, align, scope);
508 anv_free(const VkAllocationCallbacks *alloc, void *data)
510 alloc->pfnFree(alloc->pUserData, data);
514 anv_alloc2(const VkAllocationCallbacks *parent_alloc,
515 const VkAllocationCallbacks *alloc,
516 size_t size, size_t align,
517 VkSystemAllocationScope scope)
520 return anv_alloc(alloc, size, align, scope);
522 return anv_alloc(parent_alloc, size, align, scope);
526 anv_free2(const VkAllocationCallbacks *parent_alloc,
527 const VkAllocationCallbacks *alloc,
531 anv_free(alloc, data);
533 anv_free(parent_alloc, data);
536 struct anv_wsi_interaface;
538 #define VK_ICD_WSI_PLATFORM_MAX 5
540 struct anv_physical_device {
541 VK_LOADER_DATA _loader_data;
543 struct anv_instance * instance;
547 const struct brw_device_info * info;
548 uint64_t aperture_size;
549 struct brw_compiler * compiler;
550 struct isl_device isl_dev;
551 int cmd_parser_version;
553 struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
556 struct anv_instance {
557 VK_LOADER_DATA _loader_data;
559 VkAllocationCallbacks alloc;
562 int physicalDeviceCount;
563 struct anv_physical_device physicalDevice;
566 VkResult anv_init_wsi(struct anv_physical_device *physical_device);
567 void anv_finish_wsi(struct anv_physical_device *physical_device);
569 struct anv_meta_state {
570 VkAllocationCallbacks alloc;
573 * Use array element `i` for images with `2^i` samples.
577 * Pipeline N is used to clear color attachment N of the current
580 * HACK: We use one pipeline per color attachment to work around the
581 * compiler's inability to dynamically set the render target index of
582 * the render target write message.
584 struct anv_pipeline *color_pipelines[MAX_RTS];
586 struct anv_pipeline *depth_only_pipeline;
587 struct anv_pipeline *stencil_only_pipeline;
588 struct anv_pipeline *depthstencil_pipeline;
589 } clear[1 + MAX_SAMPLES_LOG2];
592 VkRenderPass render_pass;
594 /** Pipeline that blits from a 1D image. */
595 VkPipeline pipeline_1d_src;
597 /** Pipeline that blits from a 2D image. */
598 VkPipeline pipeline_2d_src;
600 /** Pipeline that blits from a 3D image. */
601 VkPipeline pipeline_3d_src;
603 VkPipelineLayout pipeline_layout;
604 VkDescriptorSetLayout ds_layout;
608 VkRenderPass render_pass;
610 VkPipelineLayout img_p_layout;
611 VkDescriptorSetLayout img_ds_layout;
612 VkPipelineLayout buf_p_layout;
613 VkDescriptorSetLayout buf_ds_layout;
615 /* Pipelines indexed by source and destination type. See the
616 * blit2d_src_type and blit2d_dst_type enums in anv_meta_blit2d.c to
617 * see what these mean.
619 VkPipeline pipelines[2][3];
623 /** Pipeline [i] resolves an image with 2^(i+1) samples. */
624 VkPipeline pipelines[MAX_SAMPLES_LOG2];
627 VkPipelineLayout pipeline_layout;
628 VkDescriptorSetLayout ds_layout;
633 VK_LOADER_DATA _loader_data;
635 struct anv_device * device;
637 struct anv_state_pool * pool;
640 struct anv_pipeline_cache {
641 struct anv_device * device;
642 struct anv_state_stream program_stream;
643 pthread_mutex_t mutex;
647 uint32_t kernel_count;
648 uint32_t * hash_table;
651 struct anv_pipeline_bind_map;
653 void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
654 struct anv_device *device);
655 void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
656 uint32_t anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
657 const unsigned char *sha1,
658 const struct brw_stage_prog_data **prog_data,
659 struct anv_pipeline_bind_map *map);
660 uint32_t anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
661 const unsigned char *sha1,
664 const struct brw_stage_prog_data **prog_data,
665 size_t prog_data_size,
666 struct anv_pipeline_bind_map *map);
669 VK_LOADER_DATA _loader_data;
671 VkAllocationCallbacks alloc;
673 struct anv_instance * instance;
675 struct brw_device_info info;
676 struct isl_device isl_dev;
679 bool can_chain_batches;
680 bool robust_buffer_access;
682 struct anv_bo_pool batch_bo_pool;
684 struct anv_block_pool dynamic_state_block_pool;
685 struct anv_state_pool dynamic_state_pool;
687 struct anv_block_pool instruction_block_pool;
688 struct anv_pipeline_cache default_pipeline_cache;
690 struct anv_block_pool surface_state_block_pool;
691 struct anv_state_pool surface_state_pool;
693 struct anv_bo workaround_bo;
695 struct anv_meta_state meta_state;
697 struct anv_state border_colors;
699 struct anv_queue queue;
701 struct anv_block_pool scratch_block_pool;
703 uint32_t default_mocs;
705 pthread_mutex_t mutex;
708 void anv_device_get_cache_uuid(void *uuid);
711 void* anv_gem_mmap(struct anv_device *device,
712 uint32_t gem_handle, uint64_t offset, uint64_t size, uint32_t flags);
713 void anv_gem_munmap(void *p, uint64_t size);
714 uint32_t anv_gem_create(struct anv_device *device, size_t size);
715 void anv_gem_close(struct anv_device *device, uint32_t gem_handle);
716 uint32_t anv_gem_userptr(struct anv_device *device, void *mem, size_t size);
717 int anv_gem_wait(struct anv_device *device, uint32_t gem_handle, int64_t *timeout_ns);
718 int anv_gem_execbuffer(struct anv_device *device,
719 struct drm_i915_gem_execbuffer2 *execbuf);
720 int anv_gem_set_tiling(struct anv_device *device, uint32_t gem_handle,
721 uint32_t stride, uint32_t tiling);
722 int anv_gem_create_context(struct anv_device *device);
723 int anv_gem_destroy_context(struct anv_device *device, int context);
724 int anv_gem_get_param(int fd, uint32_t param);
725 bool anv_gem_get_bit6_swizzle(int fd, uint32_t tiling);
726 int anv_gem_get_aperture(int fd, uint64_t *size);
727 int anv_gem_handle_to_fd(struct anv_device *device, uint32_t gem_handle);
728 uint32_t anv_gem_fd_to_handle(struct anv_device *device, int fd);
729 int anv_gem_set_caching(struct anv_device *device, uint32_t gem_handle, uint32_t caching);
730 int anv_gem_set_domain(struct anv_device *device, uint32_t gem_handle,
731 uint32_t read_domains, uint32_t write_domain);
733 VkResult anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size);
735 struct anv_reloc_list {
738 struct drm_i915_gem_relocation_entry * relocs;
739 struct anv_bo ** reloc_bos;
742 VkResult anv_reloc_list_init(struct anv_reloc_list *list,
743 const VkAllocationCallbacks *alloc);
744 void anv_reloc_list_finish(struct anv_reloc_list *list,
745 const VkAllocationCallbacks *alloc);
747 uint64_t anv_reloc_list_add(struct anv_reloc_list *list,
748 const VkAllocationCallbacks *alloc,
749 uint32_t offset, struct anv_bo *target_bo,
752 struct anv_batch_bo {
753 /* Link in the anv_cmd_buffer.owned_batch_bos list */
754 struct list_head link;
758 /* Bytes actually consumed in this batch BO */
761 /* Last seen surface state block pool bo offset */
762 uint32_t last_ss_pool_bo_offset;
764 struct anv_reloc_list relocs;
768 const VkAllocationCallbacks * alloc;
774 struct anv_reloc_list * relocs;
776 /* This callback is called (with the associated user data) in the event
777 * that the batch runs out of space.
779 VkResult (*extend_cb)(struct anv_batch *, void *);
783 void *anv_batch_emit_dwords(struct anv_batch *batch, int num_dwords);
784 void anv_batch_emit_batch(struct anv_batch *batch, struct anv_batch *other);
785 uint64_t anv_batch_emit_reloc(struct anv_batch *batch,
786 void *location, struct anv_bo *bo, uint32_t offset);
787 VkResult anv_device_submit_simple_batch(struct anv_device *device,
788 struct anv_batch *batch);
795 #define __gen_address_type struct anv_address
796 #define __gen_user_data struct anv_batch
798 static inline uint64_t
799 __gen_combine_address(struct anv_batch *batch, void *location,
800 const struct anv_address address, uint32_t delta)
802 if (address.bo == NULL) {
803 return address.offset + delta;
805 assert(batch->start <= location && location < batch->end);
807 return anv_batch_emit_reloc(batch, location, address.bo, address.offset + delta);
811 /* Wrapper macros needed to work around preprocessor argument issues. In
812 * particular, arguments don't get pre-evaluated if they are concatenated.
813 * This means that, if you pass GENX(3DSTATE_PS) into the emit macro, the
814 * GENX macro won't get evaluated if the emit macro contains "cmd ## foo".
815 * We can work around this easily enough with these helpers.
817 #define __anv_cmd_length(cmd) cmd ## _length
818 #define __anv_cmd_length_bias(cmd) cmd ## _length_bias
819 #define __anv_cmd_header(cmd) cmd ## _header
820 #define __anv_cmd_pack(cmd) cmd ## _pack
821 #define __anv_reg_num(reg) reg ## _num
823 #define anv_pack_struct(dst, struc, ...) do { \
824 struct struc __template = { \
827 __anv_cmd_pack(struc)(NULL, dst, &__template); \
828 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
831 #define anv_batch_emitn(batch, n, cmd, ...) ({ \
832 void *__dst = anv_batch_emit_dwords(batch, n); \
833 struct cmd __template = { \
834 __anv_cmd_header(cmd), \
835 .DWordLength = n - __anv_cmd_length_bias(cmd), \
838 __anv_cmd_pack(cmd)(batch, __dst, &__template); \
842 #define anv_batch_emit_merge(batch, dwords0, dwords1) \
846 static_assert(ARRAY_SIZE(dwords0) == ARRAY_SIZE(dwords1), "mismatch merge"); \
847 dw = anv_batch_emit_dwords((batch), ARRAY_SIZE(dwords0)); \
848 for (uint32_t i = 0; i < ARRAY_SIZE(dwords0); i++) \
849 dw[i] = (dwords0)[i] | (dwords1)[i]; \
850 VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
853 #define anv_batch_emit(batch, cmd, name) \
854 for (struct cmd name = { __anv_cmd_header(cmd) }, \
855 *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
856 __builtin_expect(_dst != NULL, 1); \
857 ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
858 VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
862 #define anv_state_pool_emit(pool, cmd, align, ...) ({ \
863 const uint32_t __size = __anv_cmd_length(cmd) * 4; \
864 struct anv_state __state = \
865 anv_state_pool_alloc((pool), __size, align); \
866 struct cmd __template = { \
869 __anv_cmd_pack(cmd)(NULL, __state.map, &__template); \
870 VG(VALGRIND_CHECK_MEM_IS_DEFINED(__state.map, __anv_cmd_length(cmd) * 4)); \
871 if (!(pool)->block_pool->device->info.has_llc) \
872 anv_state_clflush(__state); \
876 #define GEN7_MOCS (struct GEN7_MEMORY_OBJECT_CONTROL_STATE) { \
877 .GraphicsDataTypeGFDT = 0, \
878 .LLCCacheabilityControlLLCCC = 0, \
879 .L3CacheabilityControlL3CC = 1, \
882 #define GEN75_MOCS (struct GEN75_MEMORY_OBJECT_CONTROL_STATE) { \
883 .LLCeLLCCacheabilityControlLLCCC = 0, \
884 .L3CacheabilityControlL3CC = 1, \
887 #define GEN8_MOCS (struct GEN8_MEMORY_OBJECT_CONTROL_STATE) { \
888 .MemoryTypeLLCeLLCCacheabilityControl = WB, \
889 .TargetCache = L3DefertoPATforLLCeLLCselection, \
893 /* Skylake: MOCS is now an index into an array of 62 different caching
894 * configurations programmed by the kernel.
897 #define GEN9_MOCS (struct GEN9_MEMORY_OBJECT_CONTROL_STATE) { \
898 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
899 .IndextoMOCSTables = 2 \
902 #define GEN9_MOCS_PTE { \
903 /* TC=LLC/eLLC, LeCC=WB, LRUM=3, L3CC=WB */ \
904 .IndextoMOCSTables = 1 \
907 struct anv_device_memory {
910 VkDeviceSize map_size;
915 * Header for Vertex URB Entry (VUE)
917 struct anv_vue_header {
919 uint32_t RTAIndex; /* RenderTargetArrayIndex */
920 uint32_t ViewportIndex;
924 struct anv_descriptor_set_binding_layout {
925 /* Number of array elements in this binding */
928 /* Index into the flattend descriptor set */
929 uint16_t descriptor_index;
931 /* Index into the dynamic state array for a dynamic buffer */
932 int16_t dynamic_offset_index;
934 /* Index into the descriptor set buffer views */
935 int16_t buffer_index;
938 /* Index into the binding table for the associated surface */
939 int16_t surface_index;
941 /* Index into the sampler table for the associated sampler */
942 int16_t sampler_index;
944 /* Index into the image table for the associated image */
946 } stage[MESA_SHADER_STAGES];
948 /* Immutable samplers (or NULL if no immutable samplers) */
949 struct anv_sampler **immutable_samplers;
952 struct anv_descriptor_set_layout {
953 /* Number of bindings in this descriptor set */
954 uint16_t binding_count;
956 /* Total size of the descriptor set with room for all array entries */
959 /* Shader stages affected by this descriptor set */
960 uint16_t shader_stages;
962 /* Number of buffers in this descriptor set */
963 uint16_t buffer_count;
965 /* Number of dynamic offsets used by this descriptor set */
966 uint16_t dynamic_offset_count;
968 /* Bindings in this descriptor set */
969 struct anv_descriptor_set_binding_layout binding[0];
972 struct anv_descriptor {
973 VkDescriptorType type;
977 struct anv_image_view *image_view;
978 struct anv_sampler *sampler;
981 struct anv_buffer_view *buffer_view;
985 struct anv_descriptor_set {
986 const struct anv_descriptor_set_layout *layout;
988 uint32_t buffer_count;
989 struct anv_buffer_view *buffer_views;
990 struct anv_descriptor descriptors[0];
993 struct anv_descriptor_pool {
998 struct anv_state_stream surface_state_stream;
999 void *surface_state_free_list;
1005 anv_descriptor_set_create(struct anv_device *device,
1006 struct anv_descriptor_pool *pool,
1007 const struct anv_descriptor_set_layout *layout,
1008 struct anv_descriptor_set **out_set);
1011 anv_descriptor_set_destroy(struct anv_device *device,
1012 struct anv_descriptor_pool *pool,
1013 struct anv_descriptor_set *set);
1015 #define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT16_MAX
1017 struct anv_pipeline_binding {
1018 /* The descriptor set this surface corresponds to. The special value of
1019 * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
1020 * to a color attachment and not a regular descriptor.
1024 /* Offset into the descriptor set or attachment list. */
1028 struct anv_pipeline_layout {
1030 struct anv_descriptor_set_layout *layout;
1031 uint32_t dynamic_offset_start;
1037 bool has_dynamic_offsets;
1038 } stage[MESA_SHADER_STAGES];
1042 struct anv_device * device;
1045 VkBufferUsageFlags usage;
1047 /* Set when bound */
1049 VkDeviceSize offset;
1052 enum anv_cmd_dirty_bits {
1053 ANV_CMD_DIRTY_DYNAMIC_VIEWPORT = 1 << 0, /* VK_DYNAMIC_STATE_VIEWPORT */
1054 ANV_CMD_DIRTY_DYNAMIC_SCISSOR = 1 << 1, /* VK_DYNAMIC_STATE_SCISSOR */
1055 ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH = 1 << 2, /* VK_DYNAMIC_STATE_LINE_WIDTH */
1056 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS = 1 << 3, /* VK_DYNAMIC_STATE_DEPTH_BIAS */
1057 ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS = 1 << 4, /* VK_DYNAMIC_STATE_BLEND_CONSTANTS */
1058 ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS = 1 << 5, /* VK_DYNAMIC_STATE_DEPTH_BOUNDS */
1059 ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK = 1 << 6, /* VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK */
1060 ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK = 1 << 7, /* VK_DYNAMIC_STATE_STENCIL_WRITE_MASK */
1061 ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE = 1 << 8, /* VK_DYNAMIC_STATE_STENCIL_REFERENCE */
1062 ANV_CMD_DIRTY_DYNAMIC_ALL = (1 << 9) - 1,
1063 ANV_CMD_DIRTY_PIPELINE = 1 << 9,
1064 ANV_CMD_DIRTY_INDEX_BUFFER = 1 << 10,
1065 ANV_CMD_DIRTY_RENDER_TARGETS = 1 << 11,
1067 typedef uint32_t anv_cmd_dirty_mask_t;
1069 enum anv_pipe_bits {
1070 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
1071 ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
1072 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
1073 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
1074 ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
1075 ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
1076 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
1077 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
1078 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
1079 ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
1080 ANV_PIPE_CS_STALL_BIT = (1 << 20),
1082 /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
1083 * a flush has happened but not a CS stall. The next time we do any sort
1084 * of invalidation we need to insert a CS stall at that time. Otherwise,
1085 * we would have to CS stall on every flush which could be bad.
1087 ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
1090 #define ANV_PIPE_FLUSH_BITS ( \
1091 ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
1092 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1093 ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
1095 #define ANV_PIPE_STALL_BITS ( \
1096 ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
1097 ANV_PIPE_DEPTH_STALL_BIT | \
1098 ANV_PIPE_CS_STALL_BIT)
1100 #define ANV_PIPE_INVALIDATE_BITS ( \
1101 ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
1102 ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
1103 ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
1104 ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
1105 ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
1106 ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
1108 struct anv_vertex_binding {
1109 struct anv_buffer * buffer;
1110 VkDeviceSize offset;
1113 struct anv_push_constants {
1114 /* Current allocated size of this push constants data structure.
1115 * Because a decent chunk of it may not be used (images on SKL, for
1116 * instance), we won't actually allocate the entire structure up-front.
1120 /* Push constant data provided by the client through vkPushConstants */
1121 uint8_t client_data[MAX_PUSH_CONSTANTS_SIZE];
1123 /* Our hardware only provides zero-based vertex and instance id so, in
1124 * order to satisfy the vulkan requirements, we may have to push one or
1125 * both of these into the shader.
1127 uint32_t base_vertex;
1128 uint32_t base_instance;
1130 /* Offsets and ranges for dynamically bound buffers */
1134 } dynamic[MAX_DYNAMIC_BUFFERS];
1136 /* Image data for image_load_store on pre-SKL */
1137 struct brw_image_param images[MAX_IMAGES];
1140 struct anv_dynamic_state {
1143 VkViewport viewports[MAX_VIEWPORTS];
1148 VkRect2D scissors[MAX_SCISSORS];
1159 float blend_constants[4];
1169 } stencil_compare_mask;
1174 } stencil_write_mask;
1179 } stencil_reference;
1182 extern const struct anv_dynamic_state default_dynamic_state;
1184 void anv_dynamic_state_copy(struct anv_dynamic_state *dest,
1185 const struct anv_dynamic_state *src,
1186 uint32_t copy_mask);
1189 * Attachment state when recording a renderpass instance.
1191 * The clear value is valid only if there exists a pending clear.
1193 struct anv_attachment_state {
1194 VkImageAspectFlags pending_clear_aspects;
1195 VkClearValue clear_value;
1198 /** State required while building cmd buffer */
1199 struct anv_cmd_state {
1200 /* PIPELINE_SELECT.PipelineSelection */
1201 uint32_t current_pipeline;
1202 const struct anv_l3_config * current_l3_config;
1204 anv_cmd_dirty_mask_t dirty;
1205 anv_cmd_dirty_mask_t compute_dirty;
1206 enum anv_pipe_bits pending_pipe_bits;
1207 uint32_t num_workgroups_offset;
1208 struct anv_bo *num_workgroups_bo;
1209 VkShaderStageFlags descriptors_dirty;
1210 VkShaderStageFlags push_constants_dirty;
1211 uint32_t scratch_size;
1212 struct anv_pipeline * pipeline;
1213 struct anv_pipeline * compute_pipeline;
1214 struct anv_framebuffer * framebuffer;
1215 struct anv_render_pass * pass;
1216 struct anv_subpass * subpass;
1217 VkRect2D render_area;
1218 uint32_t restart_index;
1219 struct anv_vertex_binding vertex_bindings[MAX_VBS];
1220 struct anv_descriptor_set * descriptors[MAX_SETS];
1221 VkShaderStageFlags push_constant_stages;
1222 struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
1223 struct anv_state binding_tables[MESA_SHADER_STAGES];
1224 struct anv_state samplers[MESA_SHADER_STAGES];
1225 struct anv_dynamic_state dynamic;
1229 * Array length is anv_cmd_state::pass::attachment_count. Array content is
1230 * valid only when recording a render pass instance.
1232 struct anv_attachment_state * attachments;
1235 struct anv_buffer * index_buffer;
1236 uint32_t index_type; /**< 3DSTATE_INDEX_BUFFER.IndexFormat */
1237 uint32_t index_offset;
1241 struct anv_cmd_pool {
1242 VkAllocationCallbacks alloc;
1243 struct list_head cmd_buffers;
1246 #define ANV_CMD_BUFFER_BATCH_SIZE 8192
1248 enum anv_cmd_buffer_exec_mode {
1249 ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
1250 ANV_CMD_BUFFER_EXEC_MODE_EMIT,
1251 ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
1252 ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
1253 ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
1256 struct anv_cmd_buffer {
1257 VK_LOADER_DATA _loader_data;
1259 struct anv_device * device;
1261 struct anv_cmd_pool * pool;
1262 struct list_head pool_link;
1264 struct anv_batch batch;
1266 /* Fields required for the actual chain of anv_batch_bo's.
1268 * These fields are initialized by anv_cmd_buffer_init_batch_bo_chain().
1270 struct list_head batch_bos;
1271 enum anv_cmd_buffer_exec_mode exec_mode;
1273 /* A vector of anv_batch_bo pointers for every batch or surface buffer
1274 * referenced by this command buffer
1276 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1278 struct anv_vector seen_bbos;
1280 /* A vector of int32_t's for every block of binding tables.
1282 * initialized by anv_cmd_buffer_init_batch_bo_chain()
1284 struct anv_vector bt_blocks;
1286 struct anv_reloc_list surface_relocs;
1288 /* Information needed for execbuf
1290 * These fields are generated by anv_cmd_buffer_prepare_execbuf().
1293 struct drm_i915_gem_execbuffer2 execbuf;
1295 struct drm_i915_gem_exec_object2 * objects;
1297 struct anv_bo ** bos;
1299 /* Allocated length of the 'objects' and 'bos' arrays */
1300 uint32_t array_length;
1305 /* Serial for tracking buffer completion */
1308 /* Stream objects for storing temporary data */
1309 struct anv_state_stream surface_state_stream;
1310 struct anv_state_stream dynamic_state_stream;
1312 VkCommandBufferUsageFlags usage_flags;
1313 VkCommandBufferLevel level;
1315 struct anv_cmd_state state;
1318 VkResult anv_cmd_buffer_init_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1319 void anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1320 void anv_cmd_buffer_reset_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer);
1321 void anv_cmd_buffer_end_batch_buffer(struct anv_cmd_buffer *cmd_buffer);
1322 void anv_cmd_buffer_add_secondary(struct anv_cmd_buffer *primary,
1323 struct anv_cmd_buffer *secondary);
1324 void anv_cmd_buffer_prepare_execbuf(struct anv_cmd_buffer *cmd_buffer);
1326 VkResult anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
1327 unsigned stage, struct anv_state *bt_state);
1328 VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
1329 unsigned stage, struct anv_state *state);
1330 uint32_t gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
1331 void gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
1334 struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
1335 const void *data, uint32_t size, uint32_t alignment);
1336 struct anv_state anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
1337 uint32_t *a, uint32_t *b,
1338 uint32_t dwords, uint32_t alignment);
1341 anv_cmd_buffer_surface_base_address(struct anv_cmd_buffer *cmd_buffer);
1343 anv_cmd_buffer_alloc_binding_table(struct anv_cmd_buffer *cmd_buffer,
1344 uint32_t entries, uint32_t *state_offset);
1346 anv_cmd_buffer_alloc_surface_state(struct anv_cmd_buffer *cmd_buffer);
1348 anv_cmd_buffer_alloc_dynamic_state(struct anv_cmd_buffer *cmd_buffer,
1349 uint32_t size, uint32_t alignment);
1352 anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
1354 void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
1355 void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
1357 void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
1359 void anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
1360 const VkRenderPassBeginInfo *info);
1362 void anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
1363 struct anv_subpass *subpass);
1366 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1367 gl_shader_stage stage);
1369 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer);
1371 void anv_cmd_buffer_clear_subpass(struct anv_cmd_buffer *cmd_buffer);
1372 void anv_cmd_buffer_resolve_subpass(struct anv_cmd_buffer *cmd_buffer);
1374 const struct anv_image_view *
1375 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer);
1377 void anv_cmd_buffer_dump(struct anv_cmd_buffer *cmd_buffer);
1381 struct drm_i915_gem_execbuffer2 execbuf;
1382 struct drm_i915_gem_exec_object2 exec2_objects[1];
1388 struct anv_state state;
1393 struct anv_shader_module {
1394 struct nir_shader * nir;
1396 unsigned char sha1[20];
1401 void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
1402 struct anv_shader_module *module,
1403 const char *entrypoint,
1404 const VkSpecializationInfo *spec_info);
1406 static inline gl_shader_stage
1407 vk_to_mesa_shader_stage(VkShaderStageFlagBits vk_stage)
1409 assert(__builtin_popcount(vk_stage) == 1);
1410 return ffs(vk_stage) - 1;
1413 static inline VkShaderStageFlagBits
1414 mesa_to_vk_shader_stage(gl_shader_stage mesa_stage)
1416 return (1 << mesa_stage);
1419 #define ANV_STAGE_MASK ((1 << MESA_SHADER_STAGES) - 1)
1421 #define anv_foreach_stage(stage, stage_bits) \
1422 for (gl_shader_stage stage, \
1423 __tmp = (gl_shader_stage)((stage_bits) & ANV_STAGE_MASK); \
1424 stage = __builtin_ffs(__tmp) - 1, __tmp; \
1425 __tmp &= ~(1 << (stage)))
1427 struct anv_pipeline_bind_map {
1428 uint32_t surface_count;
1429 uint32_t sampler_count;
1430 uint32_t image_count;
1431 uint32_t attachment_count;
1433 struct anv_pipeline_binding * surface_to_descriptor;
1434 struct anv_pipeline_binding * sampler_to_descriptor;
1435 uint32_t * surface_to_attachment;
1438 struct anv_pipeline {
1439 struct anv_device * device;
1440 struct anv_batch batch;
1441 uint32_t batch_data[512];
1442 struct anv_reloc_list batch_relocs;
1443 uint32_t dynamic_state_mask;
1444 struct anv_dynamic_state dynamic_state;
1446 struct anv_pipeline_layout * layout;
1447 struct anv_pipeline_bind_map bindings[MESA_SHADER_STAGES];
1450 bool needs_data_cache;
1452 const struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES];
1453 uint32_t scratch_start[MESA_SHADER_STAGES];
1454 uint32_t total_scratch;
1456 uint32_t start[MESA_SHADER_GEOMETRY + 1];
1457 uint32_t size[MESA_SHADER_GEOMETRY + 1];
1458 uint32_t entries[MESA_SHADER_GEOMETRY + 1];
1459 const struct anv_l3_config * l3_config;
1460 uint32_t total_size;
1463 VkShaderStageFlags active_stages;
1464 struct anv_state blend_state;
1472 uint32_t binding_stride[MAX_VBS];
1473 bool instancing_enable[MAX_VBS];
1474 bool primitive_restart;
1477 uint32_t cs_thread_width_max;
1478 uint32_t cs_right_mask;
1482 uint32_t depth_stencil_state[3];
1488 uint32_t wm_depth_stencil[3];
1492 uint32_t wm_depth_stencil[4];
1496 static inline const struct brw_vs_prog_data *
1497 get_vs_prog_data(struct anv_pipeline *pipeline)
1499 return (const struct brw_vs_prog_data *) pipeline->prog_data[MESA_SHADER_VERTEX];
1502 static inline const struct brw_gs_prog_data *
1503 get_gs_prog_data(struct anv_pipeline *pipeline)
1505 return (const struct brw_gs_prog_data *) pipeline->prog_data[MESA_SHADER_GEOMETRY];
1508 static inline const struct brw_wm_prog_data *
1509 get_wm_prog_data(struct anv_pipeline *pipeline)
1511 return (const struct brw_wm_prog_data *) pipeline->prog_data[MESA_SHADER_FRAGMENT];
1514 static inline const struct brw_cs_prog_data *
1515 get_cs_prog_data(struct anv_pipeline *pipeline)
1517 return (const struct brw_cs_prog_data *) pipeline->prog_data[MESA_SHADER_COMPUTE];
1520 struct anv_graphics_pipeline_create_info {
1522 * If non-negative, overrides the color attachment count of the pipeline's
1525 int8_t color_attachment_count;
1533 anv_pipeline_init(struct anv_pipeline *pipeline, struct anv_device *device,
1534 struct anv_pipeline_cache *cache,
1535 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1536 const struct anv_graphics_pipeline_create_info *extra,
1537 const VkAllocationCallbacks *alloc);
1540 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1541 struct anv_pipeline_cache *cache,
1542 const VkComputePipelineCreateInfo *info,
1543 struct anv_shader_module *module,
1544 const char *entrypoint,
1545 const VkSpecializationInfo *spec_info);
1548 anv_graphics_pipeline_create(VkDevice device,
1549 VkPipelineCache cache,
1550 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1551 const struct anv_graphics_pipeline_create_info *extra,
1552 const VkAllocationCallbacks *alloc,
1553 VkPipeline *pPipeline);
1555 struct anv_format_swizzle {
1556 enum isl_channel_select r:4;
1557 enum isl_channel_select g:4;
1558 enum isl_channel_select b:4;
1559 enum isl_channel_select a:4;
1563 enum isl_format isl_format:16;
1564 struct anv_format_swizzle swizzle;
1568 anv_get_format(const struct brw_device_info *devinfo, VkFormat format,
1569 VkImageAspectFlags aspect, VkImageTiling tiling);
1571 static inline enum isl_format
1572 anv_get_isl_format(const struct brw_device_info *devinfo, VkFormat vk_format,
1573 VkImageAspectFlags aspect, VkImageTiling tiling)
1575 return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
1579 anv_compute_urb_partition(struct anv_pipeline *pipeline);
1582 anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline);
1585 * Subsurface of an anv_image.
1587 struct anv_surface {
1588 struct isl_surf isl;
1591 * Offset from VkImage's base address, as bound by vkBindImageMemory().
1598 /* The original VkFormat provided by the client. This may not match any
1599 * of the actual surface formats.
1602 VkImageAspectFlags aspects;
1605 uint32_t array_size;
1606 uint32_t samples; /**< VkImageCreateInfo::samples */
1607 VkImageUsageFlags usage; /**< Superset of VkImageCreateInfo::usage. */
1608 VkImageTiling tiling; /** VkImageCreateInfo::tiling */
1613 /* Set when bound */
1615 VkDeviceSize offset;
1620 * For each foo, anv_image::foo_surface is valid if and only if
1621 * anv_image::aspects has a foo aspect.
1623 * The hardware requires that the depth buffer and stencil buffer be
1624 * separate surfaces. From Vulkan's perspective, though, depth and stencil
1625 * reside in the same VkImage. To satisfy both the hardware and Vulkan, we
1626 * allocate the depth and stencil buffers as separate surfaces in the same
1630 struct anv_surface color_surface;
1633 struct anv_surface depth_surface;
1634 struct anv_surface stencil_surface;
1639 static inline uint32_t
1640 anv_get_layerCount(const struct anv_image *image,
1641 const VkImageSubresourceRange *range)
1643 return range->layerCount == VK_REMAINING_ARRAY_LAYERS ?
1644 image->array_size - range->baseArrayLayer : range->layerCount;
1647 static inline uint32_t
1648 anv_get_levelCount(const struct anv_image *image,
1649 const VkImageSubresourceRange *range)
1651 return range->levelCount == VK_REMAINING_MIP_LEVELS ?
1652 image->levels - range->baseMipLevel : range->levelCount;
1656 struct anv_image_view {
1657 const struct anv_image *image; /**< VkImageViewCreateInfo::image */
1659 uint32_t offset; /**< Offset into bo. */
1661 VkImageAspectFlags aspect_mask;
1663 uint32_t base_layer;
1665 VkExtent3D extent; /**< Extent of VkImageViewCreateInfo::baseMipLevel. */
1667 /** RENDER_SURFACE_STATE when using image as a color render target. */
1668 struct anv_state color_rt_surface_state;
1670 /** RENDER_SURFACE_STATE when using image as a sampler surface. */
1671 struct anv_state sampler_surface_state;
1673 /** RENDER_SURFACE_STATE when using image as a storage image. */
1674 struct anv_state storage_surface_state;
1676 struct brw_image_param storage_image_param;
1679 struct anv_image_create_info {
1680 const VkImageCreateInfo *vk_info;
1681 isl_tiling_flags_t isl_tiling_flags;
1685 VkResult anv_image_create(VkDevice _device,
1686 const struct anv_image_create_info *info,
1687 const VkAllocationCallbacks* alloc,
1690 struct anv_surface *
1691 anv_image_get_surface_for_aspect_mask(struct anv_image *image,
1692 VkImageAspectFlags aspect_mask);
1694 void anv_image_view_init(struct anv_image_view *view,
1695 struct anv_device *device,
1696 const VkImageViewCreateInfo* pCreateInfo,
1697 struct anv_cmd_buffer *cmd_buffer,
1698 VkImageUsageFlags usage_mask);
1700 struct anv_buffer_view {
1701 enum isl_format format; /**< VkBufferViewCreateInfo::format */
1703 uint32_t offset; /**< Offset into bo. */
1704 uint64_t range; /**< VkBufferViewCreateInfo::range */
1706 struct anv_state surface_state;
1707 struct anv_state storage_surface_state;
1709 struct brw_image_param storage_image_param;
1712 void anv_buffer_view_init(struct anv_buffer_view *view,
1713 struct anv_device *device,
1714 const VkBufferViewCreateInfo* pCreateInfo,
1715 struct anv_cmd_buffer *cmd_buffer);
1718 anv_isl_format_for_descriptor_type(VkDescriptorType type);
1720 static inline struct VkExtent3D
1721 anv_sanitize_image_extent(const VkImageType imageType,
1722 const struct VkExtent3D imageExtent)
1724 switch (imageType) {
1725 case VK_IMAGE_TYPE_1D:
1726 return (VkExtent3D) { imageExtent.width, 1, 1 };
1727 case VK_IMAGE_TYPE_2D:
1728 return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
1729 case VK_IMAGE_TYPE_3D:
1732 unreachable("invalid image type");
1736 static inline struct VkOffset3D
1737 anv_sanitize_image_offset(const VkImageType imageType,
1738 const struct VkOffset3D imageOffset)
1740 switch (imageType) {
1741 case VK_IMAGE_TYPE_1D:
1742 return (VkOffset3D) { imageOffset.x, 0, 0 };
1743 case VK_IMAGE_TYPE_2D:
1744 return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
1745 case VK_IMAGE_TYPE_3D:
1748 unreachable("invalid image type");
1753 void anv_fill_buffer_surface_state(struct anv_device *device,
1754 struct anv_state state,
1755 enum isl_format format,
1756 uint32_t offset, uint32_t range,
1759 void anv_image_view_fill_image_param(struct anv_device *device,
1760 struct anv_image_view *view,
1761 struct brw_image_param *param);
1762 void anv_buffer_view_fill_image_param(struct anv_device *device,
1763 struct anv_buffer_view *view,
1764 struct brw_image_param *param);
1766 struct anv_sampler {
1770 struct anv_framebuffer {
1775 uint32_t attachment_count;
1776 struct anv_image_view * attachments[0];
1779 struct anv_subpass {
1780 uint32_t input_count;
1781 uint32_t * input_attachments;
1782 uint32_t color_count;
1783 uint32_t * color_attachments;
1784 uint32_t * resolve_attachments;
1785 uint32_t depth_stencil_attachment;
1787 /** Subpass has at least one resolve attachment */
1791 struct anv_render_pass_attachment {
1794 VkAttachmentLoadOp load_op;
1795 VkAttachmentLoadOp stencil_load_op;
1798 struct anv_render_pass {
1799 uint32_t attachment_count;
1800 uint32_t subpass_count;
1801 uint32_t * subpass_attachments;
1802 struct anv_render_pass_attachment * attachments;
1803 struct anv_subpass subpasses[0];
1806 extern struct anv_render_pass anv_meta_dummy_renderpass;
1808 struct anv_query_pool_slot {
1814 struct anv_query_pool {
1820 VkResult anv_device_init_meta(struct anv_device *device);
1821 void anv_device_finish_meta(struct anv_device *device);
1823 void *anv_lookup_entrypoint(const char *name);
1825 void anv_dump_image_to_ppm(struct anv_device *device,
1826 struct anv_image *image, unsigned miplevel,
1827 unsigned array_layer, const char *filename);
1829 #define ANV_DEFINE_HANDLE_CASTS(__anv_type, __VkType) \
1831 static inline struct __anv_type * \
1832 __anv_type ## _from_handle(__VkType _handle) \
1834 return (struct __anv_type *) _handle; \
1837 static inline __VkType \
1838 __anv_type ## _to_handle(struct __anv_type *_obj) \
1840 return (__VkType) _obj; \
1843 #define ANV_DEFINE_NONDISP_HANDLE_CASTS(__anv_type, __VkType) \
1845 static inline struct __anv_type * \
1846 __anv_type ## _from_handle(__VkType _handle) \
1848 return (struct __anv_type *)(uintptr_t) _handle; \
1851 static inline __VkType \
1852 __anv_type ## _to_handle(struct __anv_type *_obj) \
1854 return (__VkType)(uintptr_t) _obj; \
1857 #define ANV_FROM_HANDLE(__anv_type, __name, __handle) \
1858 struct __anv_type *__name = __anv_type ## _from_handle(__handle)
1860 ANV_DEFINE_HANDLE_CASTS(anv_cmd_buffer, VkCommandBuffer)
1861 ANV_DEFINE_HANDLE_CASTS(anv_device, VkDevice)
1862 ANV_DEFINE_HANDLE_CASTS(anv_instance, VkInstance)
1863 ANV_DEFINE_HANDLE_CASTS(anv_physical_device, VkPhysicalDevice)
1864 ANV_DEFINE_HANDLE_CASTS(anv_queue, VkQueue)
1866 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_cmd_pool, VkCommandPool)
1867 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer, VkBuffer)
1868 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_buffer_view, VkBufferView)
1869 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_pool, VkDescriptorPool)
1870 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set, VkDescriptorSet)
1871 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_descriptor_set_layout, VkDescriptorSetLayout)
1872 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_device_memory, VkDeviceMemory)
1873 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_fence, VkFence)
1874 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_event, VkEvent)
1875 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_framebuffer, VkFramebuffer)
1876 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image, VkImage)
1877 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_image_view, VkImageView);
1878 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_cache, VkPipelineCache)
1879 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline, VkPipeline)
1880 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_pipeline_layout, VkPipelineLayout)
1881 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_query_pool, VkQueryPool)
1882 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_render_pass, VkRenderPass)
1883 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_sampler, VkSampler)
1884 ANV_DEFINE_NONDISP_HANDLE_CASTS(anv_shader_module, VkShaderModule)
1886 #define ANV_DEFINE_STRUCT_CASTS(__anv_type, __VkType) \
1888 static inline const __VkType * \
1889 __anv_type ## _to_ ## __VkType(const struct __anv_type *__anv_obj) \
1891 return (const __VkType *) __anv_obj; \
1894 #define ANV_COMMON_TO_STRUCT(__VkType, __vk_name, __common_name) \
1895 const __VkType *__vk_name = anv_common_to_ ## __VkType(__common_name)
1897 ANV_DEFINE_STRUCT_CASTS(anv_common, VkMemoryBarrier)
1898 ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
1899 ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
1901 /* Gen-specific function declarations */
1903 # include "anv_genX.h"
1905 # define genX(x) gen7_##x
1906 # include "anv_genX.h"
1908 # define genX(x) gen75_##x
1909 # include "anv_genX.h"
1911 # define genX(x) gen8_##x
1912 # include "anv_genX.h"
1914 # define genX(x) gen9_##x
1915 # include "anv_genX.h"