typedef uint32_t xcb_visualid_t;
typedef uint32_t xcb_window_t;
-#define VK_USE_PLATFORM_XCB_KHR
-#define VK_USE_PLATFORM_WAYLAND_KHR
+struct anv_l3_config;
-#define VK_PROTOTYPES
#include <vulkan/vulkan.h>
#include <vulkan/vulkan_intel.h>
#include <vulkan/vk_icd.h>
#define MAX(a, b) ((a) > (b) ? (a) : (b))
static inline uint32_t
+align_down_npot_u32(uint32_t v, uint32_t a)
+{
+ return v - (v % a);
+}
+
+static inline uint32_t
align_u32(uint32_t v, uint32_t a)
{
assert(a != 0 && a == (a & -a));
struct anv_bo_pool {
struct anv_device *device;
- uint32_t bo_size;
-
- void *free_list;
+ void *free_list[16];
};
-void anv_bo_pool_init(struct anv_bo_pool *pool,
- struct anv_device *device, uint32_t block_size);
+void anv_bo_pool_init(struct anv_bo_pool *pool, struct anv_device *device);
void anv_bo_pool_finish(struct anv_bo_pool *pool);
-VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo);
+VkResult anv_bo_pool_alloc(struct anv_bo_pool *pool, struct anv_bo *bo,
+ uint32_t size);
void anv_bo_pool_free(struct anv_bo_pool *pool, const struct anv_bo *bo);
+struct anv_scratch_pool {
+ /* Indexed by Per-Thread Scratch Space number (the hardware value) and stage */
+ struct anv_bo bos[16][MESA_SHADER_STAGES];
+};
+
+void anv_scratch_pool_init(struct anv_device *device,
+ struct anv_scratch_pool *pool);
+void anv_scratch_pool_finish(struct anv_device *device,
+ struct anv_scratch_pool *pool);
+struct anv_bo *anv_scratch_pool_alloc(struct anv_device *device,
+ struct anv_scratch_pool *pool,
+ gl_shader_stage stage,
+ unsigned per_thread_scratch);
void *anv_resolve_entrypoint(uint32_t index);
anv_free(parent_alloc, data);
}
+struct anv_wsi_interaface;
+
+#define VK_ICD_WSI_PLATFORM_MAX 5
+
struct anv_physical_device {
VK_LOADER_DATA _loader_data;
struct anv_instance * instance;
uint32_t chipset_id;
- const char * path;
+ char path[20];
const char * name;
const struct brw_device_info * info;
uint64_t aperture_size;
struct brw_compiler * compiler;
struct isl_device isl_dev;
-};
-
-struct anv_wsi_interaface;
+ int cmd_parser_version;
-#define VK_ICD_WSI_PLATFORM_MAX 5
+ struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
+};
struct anv_instance {
VK_LOADER_DATA _loader_data;
uint32_t apiVersion;
int physicalDeviceCount;
struct anv_physical_device physicalDevice;
-
- struct anv_wsi_interface * wsi[VK_ICD_WSI_PLATFORM_MAX];
};
-VkResult anv_init_wsi(struct anv_instance *instance);
-void anv_finish_wsi(struct anv_instance *instance);
+VkResult anv_init_wsi(struct anv_physical_device *physical_device);
+void anv_finish_wsi(struct anv_physical_device *physical_device);
struct anv_meta_state {
VkAllocationCallbacks alloc;
} blit;
struct {
+ VkRenderPass render_pass;
+
+ VkPipelineLayout img_p_layout;
+ VkDescriptorSetLayout img_ds_layout;
+ VkPipelineLayout buf_p_layout;
+ VkDescriptorSetLayout buf_ds_layout;
+
+ /* Pipelines indexed by source and destination type. See the
+ * blit2d_src_type and blit2d_dst_type enums in anv_meta_blit2d.c to
+ * see what these mean.
+ */
+ VkPipeline pipelines[2][3];
+ } blit2d;
+
+ struct {
/** Pipeline [i] resolves an image with 2^(i+1) samples. */
VkPipeline pipelines[MAX_SAMPLES_LOG2];
uint32_t * hash_table;
};
+struct anv_pipeline_bind_map;
+
void anv_pipeline_cache_init(struct anv_pipeline_cache *cache,
struct anv_device *device);
void anv_pipeline_cache_finish(struct anv_pipeline_cache *cache);
uint32_t anv_pipeline_cache_search(struct anv_pipeline_cache *cache,
- const unsigned char *sha1, void *prog_data);
+ const unsigned char *sha1,
+ const struct brw_stage_prog_data **prog_data,
+ struct anv_pipeline_bind_map *map);
uint32_t anv_pipeline_cache_upload_kernel(struct anv_pipeline_cache *cache,
const unsigned char *sha1,
const void *kernel,
size_t kernel_size,
- const void *prog_data,
- size_t prog_data_size);
+ const struct brw_stage_prog_data **prog_data,
+ size_t prog_data_size,
+ struct anv_pipeline_bind_map *map);
struct anv_device {
VK_LOADER_DATA _loader_data;
struct isl_device isl_dev;
int context_id;
int fd;
+ bool can_chain_batches;
+ bool robust_buffer_access;
struct anv_bo_pool batch_bo_pool;
struct anv_queue queue;
- struct anv_block_pool scratch_block_pool;
+ struct anv_scratch_pool scratch_pool;
uint32_t default_mocs;
pthread_mutex_t mutex;
};
-VkResult gen7_init_device_state(struct anv_device *device);
-VkResult gen75_init_device_state(struct anv_device *device);
-VkResult gen8_init_device_state(struct anv_device *device);
-VkResult gen9_init_device_state(struct anv_device *device);
-
void anv_device_get_cache_uuid(void *uuid);
#define __anv_cmd_length_bias(cmd) cmd ## _length_bias
#define __anv_cmd_header(cmd) cmd ## _header
#define __anv_cmd_pack(cmd) cmd ## _pack
+#define __anv_reg_num(reg) reg ## _num
-#define anv_batch_emit(batch, cmd, ...) do { \
- void *__dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
- struct cmd __template = { \
- __anv_cmd_header(cmd), \
+#define anv_pack_struct(dst, struc, ...) do { \
+ struct struc __template = { \
__VA_ARGS__ \
}; \
- __anv_cmd_pack(cmd)(batch, __dst, &__template); \
- VG(VALGRIND_CHECK_MEM_IS_DEFINED(__dst, __anv_cmd_length(cmd) * 4)); \
+ __anv_cmd_pack(struc)(NULL, dst, &__template); \
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(dst, __anv_cmd_length(struc) * 4)); \
} while (0)
#define anv_batch_emitn(batch, n, cmd, ...) ({ \
VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, ARRAY_SIZE(dwords0) * 4));\
} while (0)
+#define anv_batch_emit(batch, cmd, name) \
+ for (struct cmd name = { __anv_cmd_header(cmd) }, \
+ *_dst = anv_batch_emit_dwords(batch, __anv_cmd_length(cmd)); \
+ __builtin_expect(_dst != NULL, 1); \
+ ({ __anv_cmd_pack(cmd)(batch, _dst, &name); \
+ VG(VALGRIND_CHECK_MEM_IS_DEFINED(_dst, __anv_cmd_length(cmd) * 4)); \
+ _dst = NULL; \
+ }))
+
#define anv_state_pool_emit(pool, cmd, align, ...) ({ \
const uint32_t __size = __anv_cmd_length(cmd) * 4; \
struct anv_state __state = \
};
struct anv_descriptor_set_binding_layout {
+#ifndef NDEBUG
+ /* The type of the descriptors in this binding */
+ VkDescriptorType type;
+#endif
+
/* Number of array elements in this binding */
uint16_t array_size;
struct anv_descriptor_pool *pool,
struct anv_descriptor_set *set);
+#define ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS UINT8_MAX
+
struct anv_pipeline_binding {
- /* The descriptor set this surface corresponds to */
- uint16_t set;
+ /* The descriptor set this surface corresponds to. The special value of
+ * ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS indicates that the offset refers
+ * to a color attachment and not a regular descriptor.
+ */
+ uint8_t set;
+
+ /* Binding in the descriptor set */
+ uint8_t binding;
- /* Offset into the descriptor set */
- uint16_t offset;
+ /* Index in the binding */
+ uint8_t index;
};
struct anv_pipeline_layout {
struct {
bool has_dynamic_offsets;
} stage[MESA_SHADER_STAGES];
+
+ unsigned char sha1[20];
};
struct anv_buffer {
};
typedef uint32_t anv_cmd_dirty_mask_t;
+enum anv_pipe_bits {
+ ANV_PIPE_DEPTH_CACHE_FLUSH_BIT = (1 << 0),
+ ANV_PIPE_STALL_AT_SCOREBOARD_BIT = (1 << 1),
+ ANV_PIPE_STATE_CACHE_INVALIDATE_BIT = (1 << 2),
+ ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT = (1 << 3),
+ ANV_PIPE_VF_CACHE_INVALIDATE_BIT = (1 << 4),
+ ANV_PIPE_DATA_CACHE_FLUSH_BIT = (1 << 5),
+ ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT = (1 << 10),
+ ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT = (1 << 11),
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT = (1 << 12),
+ ANV_PIPE_DEPTH_STALL_BIT = (1 << 13),
+ ANV_PIPE_CS_STALL_BIT = (1 << 20),
+
+ /* This bit does not exist directly in PIPE_CONTROL. Instead it means that
+ * a flush has happened but not a CS stall. The next time we do any sort
+ * of invalidation we need to insert a CS stall at that time. Otherwise,
+ * we would have to CS stall on every flush which could be bad.
+ */
+ ANV_PIPE_NEEDS_CS_STALL_BIT = (1 << 21),
+};
+
+#define ANV_PIPE_FLUSH_BITS ( \
+ ANV_PIPE_DEPTH_CACHE_FLUSH_BIT | \
+ ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
+ ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
+
+#define ANV_PIPE_STALL_BITS ( \
+ ANV_PIPE_STALL_AT_SCOREBOARD_BIT | \
+ ANV_PIPE_DEPTH_STALL_BIT | \
+ ANV_PIPE_CS_STALL_BIT)
+
+#define ANV_PIPE_INVALIDATE_BITS ( \
+ ANV_PIPE_STATE_CACHE_INVALIDATE_BIT | \
+ ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT | \
+ ANV_PIPE_VF_CACHE_INVALIDATE_BIT | \
+ ANV_PIPE_DATA_CACHE_FLUSH_BIT | \
+ ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT | \
+ ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT)
+
struct anv_vertex_binding {
struct anv_buffer * buffer;
VkDeviceSize offset;
struct anv_cmd_state {
/* PIPELINE_SELECT.PipelineSelection */
uint32_t current_pipeline;
- uint32_t current_l3_config;
+ const struct anv_l3_config * current_l3_config;
uint32_t vb_dirty;
anv_cmd_dirty_mask_t dirty;
anv_cmd_dirty_mask_t compute_dirty;
+ enum anv_pipe_bits pending_pipe_bits;
uint32_t num_workgroups_offset;
struct anv_bo *num_workgroups_bo;
VkShaderStageFlags descriptors_dirty;
struct anv_framebuffer * framebuffer;
struct anv_render_pass * pass;
struct anv_subpass * subpass;
+ VkRect2D render_area;
uint32_t restart_index;
struct anv_vertex_binding vertex_bindings[MAX_VBS];
struct anv_descriptor_set * descriptors[MAX_SETS];
+ VkShaderStageFlags push_constant_stages;
struct anv_push_constants * push_constants[MESA_SHADER_STAGES];
struct anv_state binding_tables[MESA_SHADER_STAGES];
struct anv_state samplers[MESA_SHADER_STAGES];
enum anv_cmd_buffer_exec_mode {
ANV_CMD_BUFFER_EXEC_MODE_PRIMARY,
ANV_CMD_BUFFER_EXEC_MODE_EMIT,
+ ANV_CMD_BUFFER_EXEC_MODE_GROW_AND_EMIT,
ANV_CMD_BUFFER_EXEC_MODE_CHAIN,
ANV_CMD_BUFFER_EXEC_MODE_COPY_AND_CHAIN,
};
unsigned stage, struct anv_state *bt_state);
VkResult anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
unsigned stage, struct anv_state *state);
-uint32_t gen7_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
-void gen7_cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
- uint32_t stages);
+uint32_t anv_cmd_buffer_flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer);
struct anv_state anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
const void *data, uint32_t size, uint32_t alignment);
anv_cmd_buffer_new_binding_table_block(struct anv_cmd_buffer *cmd_buffer);
void gen8_cmd_buffer_emit_viewport(struct anv_cmd_buffer *cmd_buffer);
+void gen8_cmd_buffer_emit_depth_viewport(struct anv_cmd_buffer *cmd_buffer,
+ bool depth_clamp_enable);
void gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer);
-void gen7_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
-void gen75_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
-void gen8_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
-void gen9_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
-
void anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer);
void anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
const VkRenderPassBeginInfo *info);
-void gen7_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
- struct anv_subpass *subpass);
-void gen75_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
- struct anv_subpass *subpass);
-void gen8_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
- struct anv_subpass *subpass);
-void gen9_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
- struct anv_subpass *subpass);
void anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
struct anv_subpass *subpass);
-void gen7_flush_pipeline_select_3d(struct anv_cmd_buffer *cmd_buffer);
-void gen75_flush_pipeline_select_3d(struct anv_cmd_buffer *cmd_buffer);
-void gen8_flush_pipeline_select_3d(struct anv_cmd_buffer *cmd_buffer);
-void gen9_flush_pipeline_select_3d(struct anv_cmd_buffer *cmd_buffer);
-
-void gen7_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer);
-void gen75_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer);
-void gen8_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer);
-void gen9_cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer);
-
-void gen7_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer);
-void gen75_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer);
-void gen8_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer);
-void gen9_cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer);
-
struct anv_state
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage);
void anv_hash_shader(unsigned char *hash, const void *key, size_t key_size,
struct anv_shader_module *module,
const char *entrypoint,
+ const struct anv_pipeline_layout *pipeline_layout,
const VkSpecializationInfo *spec_info);
static inline gl_shader_stage
uint32_t surface_count;
uint32_t sampler_count;
uint32_t image_count;
+ uint32_t attachment_count;
struct anv_pipeline_binding * surface_to_descriptor;
struct anv_pipeline_binding * sampler_to_descriptor;
+ uint32_t * surface_to_attachment;
};
struct anv_pipeline {
struct anv_pipeline_bind_map bindings[MESA_SHADER_STAGES];
bool use_repclear;
+ bool needs_data_cache;
- struct brw_vs_prog_data vs_prog_data;
- struct brw_wm_prog_data wm_prog_data;
- struct brw_gs_prog_data gs_prog_data;
- struct brw_cs_prog_data cs_prog_data;
- bool writes_point_size;
- struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES];
- uint32_t scratch_start[MESA_SHADER_STAGES];
- uint32_t total_scratch;
+ const struct brw_stage_prog_data * prog_data[MESA_SHADER_STAGES];
struct {
- uint8_t push_size[MESA_SHADER_FRAGMENT + 1];
uint32_t start[MESA_SHADER_GEOMETRY + 1];
uint32_t size[MESA_SHADER_GEOMETRY + 1];
uint32_t entries[MESA_SHADER_GEOMETRY + 1];
+ const struct anv_l3_config * l3_config;
+ uint32_t total_size;
} urb;
VkShaderStageFlags active_stages;
struct anv_state blend_state;
uint32_t vs_simd8;
uint32_t vs_vec4;
- uint32_t ps_simd8;
- uint32_t ps_simd16;
uint32_t ps_ksp0;
- uint32_t ps_ksp2;
- uint32_t ps_grf_start0;
- uint32_t ps_grf_start2;
uint32_t gs_kernel;
uint32_t cs_simd;
bool primitive_restart;
uint32_t topology;
- uint32_t cs_thread_width_max;
uint32_t cs_right_mask;
+ bool depth_clamp_enable;
+
struct {
uint32_t sf[7];
uint32_t depth_stencil_state[3];
} gen9;
};
+static inline const struct brw_vs_prog_data *
+get_vs_prog_data(struct anv_pipeline *pipeline)
+{
+ return (const struct brw_vs_prog_data *) pipeline->prog_data[MESA_SHADER_VERTEX];
+}
+
+static inline const struct brw_gs_prog_data *
+get_gs_prog_data(struct anv_pipeline *pipeline)
+{
+ return (const struct brw_gs_prog_data *) pipeline->prog_data[MESA_SHADER_GEOMETRY];
+}
+
+static inline const struct brw_wm_prog_data *
+get_wm_prog_data(struct anv_pipeline *pipeline)
+{
+ return (const struct brw_wm_prog_data *) pipeline->prog_data[MESA_SHADER_FRAGMENT];
+}
+
+static inline const struct brw_cs_prog_data *
+get_cs_prog_data(struct anv_pipeline *pipeline)
+{
+ return (const struct brw_cs_prog_data *) pipeline->prog_data[MESA_SHADER_COMPUTE];
+}
+
struct anv_graphics_pipeline_create_info {
/**
* If non-negative, overrides the color attachment count of the pipeline's
int8_t color_attachment_count;
bool use_repclear;
- bool disable_viewport;
- bool disable_scissor;
bool disable_vs;
bool use_rectlist;
};
const VkAllocationCallbacks *alloc,
VkPipeline *pPipeline);
-VkResult
-gen7_graphics_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
-VkResult
-gen75_graphics_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
-VkResult
-gen8_graphics_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-VkResult
-gen9_graphics_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkGraphicsPipelineCreateInfo *pCreateInfo,
- const struct anv_graphics_pipeline_create_info *extra,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-VkResult
-gen7_compute_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkComputePipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-VkResult
-gen75_compute_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkComputePipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
-VkResult
-gen8_compute_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkComputePipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-VkResult
-gen9_compute_pipeline_create(VkDevice _device,
- struct anv_pipeline_cache *cache,
- const VkComputePipelineCreateInfo *pCreateInfo,
- const VkAllocationCallbacks *alloc,
- VkPipeline *pPipeline);
-
struct anv_format_swizzle {
- unsigned r:2;
- unsigned g:2;
- unsigned b:2;
- unsigned a:2;
+ enum isl_channel_select r:4;
+ enum isl_channel_select g:4;
+ enum isl_channel_select b:4;
+ enum isl_channel_select a:4;
};
struct anv_format {
- const VkFormat vk_format;
- const char *name;
- enum isl_format isl_format; /**< RENDER_SURFACE_STATE.SurfaceFormat */
- const struct isl_format_layout *isl_layout;
+ enum isl_format isl_format:16;
struct anv_format_swizzle swizzle;
- bool has_depth;
- bool has_stencil;
};
-const struct anv_format *
-anv_format_for_vk_format(VkFormat format);
-
-enum isl_format
-anv_get_isl_format(VkFormat format, VkImageAspectFlags aspect,
- VkImageTiling tiling, struct anv_format_swizzle *swizzle);
+struct anv_format
+anv_get_format(const struct brw_device_info *devinfo, VkFormat format,
+ VkImageAspectFlags aspect, VkImageTiling tiling);
-static inline bool
-anv_format_is_color(const struct anv_format *format)
+static inline enum isl_format
+anv_get_isl_format(const struct brw_device_info *devinfo, VkFormat vk_format,
+ VkImageAspectFlags aspect, VkImageTiling tiling)
{
- return !format->has_depth && !format->has_stencil;
+ return anv_get_format(devinfo, vk_format, aspect, tiling).isl_format;
}
-static inline bool
-anv_format_is_depth_or_stencil(const struct anv_format *format)
-{
- return format->has_depth || format->has_stencil;
-}
+void
+anv_compute_urb_partition(struct anv_pipeline *pipeline);
+
+void
+anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline);
/**
* Subsurface of an anv_image.
* of the actual surface formats.
*/
VkFormat vk_format;
- const struct anv_format *format;
+ VkImageAspectFlags aspects;
VkExtent3D extent;
uint32_t levels;
uint32_t array_size;
* Image subsurfaces
*
* For each foo, anv_image::foo_surface is valid if and only if
- * anv_image::format has a foo aspect.
+ * anv_image::aspects has a foo aspect.
*
* The hardware requires that the depth buffer and stencil buffer be
* separate surfaces. From Vulkan's perspective, though, depth and stencil
struct anv_device *device,
const VkImageViewCreateInfo* pCreateInfo,
struct anv_cmd_buffer *cmd_buffer,
- uint32_t offset,
VkImageUsageFlags usage_mask);
-void
-anv_fill_image_surface_state(struct anv_device *device, struct anv_state state,
- struct anv_image_view *iview,
- const VkImageViewCreateInfo *pCreateInfo,
- VkImageUsageFlagBits usage);
-void
-gen7_fill_image_surface_state(struct anv_device *device, void *state_map,
- struct anv_image_view *iview,
- const VkImageViewCreateInfo *pCreateInfo,
- VkImageUsageFlagBits usage);
-void
-gen75_fill_image_surface_state(struct anv_device *device, void *state_map,
- struct anv_image_view *iview,
- const VkImageViewCreateInfo *pCreateInfo,
- VkImageUsageFlagBits usage);
-void
-gen8_fill_image_surface_state(struct anv_device *device, void *state_map,
- struct anv_image_view *iview,
- const VkImageViewCreateInfo *pCreateInfo,
- VkImageUsageFlagBits usage);
-void
-gen9_fill_image_surface_state(struct anv_device *device, void *state_map,
- struct anv_image_view *iview,
- const VkImageViewCreateInfo *pCreateInfo,
- VkImageUsageFlagBits usage);
-
struct anv_buffer_view {
enum isl_format format; /**< VkBufferViewCreateInfo::format */
struct anv_bo *bo;
struct brw_image_param storage_image_param;
};
-const struct anv_format *
-anv_format_for_descriptor_type(VkDescriptorType type);
+void anv_buffer_view_init(struct anv_buffer_view *view,
+ struct anv_device *device,
+ const VkBufferViewCreateInfo* pCreateInfo,
+ struct anv_cmd_buffer *cmd_buffer);
+
+enum isl_format
+anv_isl_format_for_descriptor_type(VkDescriptorType type);
+
+static inline struct VkExtent3D
+anv_sanitize_image_extent(const VkImageType imageType,
+ const struct VkExtent3D imageExtent)
+{
+ switch (imageType) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkExtent3D) { imageExtent.width, 1, 1 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkExtent3D) { imageExtent.width, imageExtent.height, 1 };
+ case VK_IMAGE_TYPE_3D:
+ return imageExtent;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
+static inline struct VkOffset3D
+anv_sanitize_image_offset(const VkImageType imageType,
+ const struct VkOffset3D imageOffset)
+{
+ switch (imageType) {
+ case VK_IMAGE_TYPE_1D:
+ return (VkOffset3D) { imageOffset.x, 0, 0 };
+ case VK_IMAGE_TYPE_2D:
+ return (VkOffset3D) { imageOffset.x, imageOffset.y, 0 };
+ case VK_IMAGE_TYPE_3D:
+ return imageOffset;
+ default:
+ unreachable("invalid image type");
+ }
+}
+
void anv_fill_buffer_surface_state(struct anv_device *device,
struct anv_state state,
uint32_t offset, uint32_t range,
uint32_t stride);
-void gen7_fill_buffer_surface_state(void *state, enum isl_format format,
- uint32_t offset, uint32_t range,
- uint32_t stride);
-void gen75_fill_buffer_surface_state(void *state, enum isl_format format,
- uint32_t offset, uint32_t range,
- uint32_t stride);
-void gen8_fill_buffer_surface_state(void *state, enum isl_format format,
- uint32_t offset, uint32_t range,
- uint32_t stride);
-void gen9_fill_buffer_surface_state(void *state, enum isl_format format,
- uint32_t offset, uint32_t range,
- uint32_t stride);
-
void anv_image_view_fill_image_param(struct anv_device *device,
struct anv_image_view *view,
struct brw_image_param *param);
};
struct anv_render_pass_attachment {
- const struct anv_format *format;
+ VkFormat format;
uint32_t samples;
VkAttachmentLoadOp load_op;
VkAttachmentLoadOp stencil_load_op;
ANV_DEFINE_STRUCT_CASTS(anv_common, VkBufferMemoryBarrier)
ANV_DEFINE_STRUCT_CASTS(anv_common, VkImageMemoryBarrier)
+/* Gen-specific function declarations */
+#ifdef genX
+# include "anv_genX.h"
+#else
+# define genX(x) gen7_##x
+# include "anv_genX.h"
+# undef genX
+# define genX(x) gen75_##x
+# include "anv_genX.h"
+# undef genX
+# define genX(x) gen8_##x
+# include "anv_genX.h"
+# undef genX
+# define genX(x) gen9_##x
+# include "anv_genX.h"
+# undef genX
+#endif
+
#ifdef __cplusplus
}
#endif