#include "si_shader.h"
#include "sid.h"
+#include "util/u_format.h"
+#include "util/u_math.h"
#include "util/u_memory.h"
+#include "util/u_suballoc.h"
#include "util/u_upload_mgr.h"
-/* NULL image and buffer descriptor.
+/* NULL image and buffer descriptor for textures (alpha = 1) and images
+ * (alpha = 0).
*
* For images, all fields must be zero except for the swizzle, which
* supports arbitrary combinations of 0s and 1s. The texture type must be
*
* This is the only reason why the buffer descriptor must be in words [4:7].
*/
-static uint32_t null_descriptor[8] = {
+static uint32_t null_texture_descriptor[8] = {
0,
0,
0,
* descriptor */
};
+static uint32_t null_image_descriptor[8] = {
+ 0,
+ 0,
+ 0,
+ S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
+ /* the rest must contain zeros, which is also used by the buffer
+ * descriptor */
+};
+
static void si_init_descriptors(struct si_descriptors *desc,
unsigned shader_userdata_index,
unsigned element_dw_size,
- unsigned num_elements)
+ unsigned num_elements,
+ const uint32_t *null_descriptor,
+ unsigned *ce_offset)
{
int i;
desc->list = CALLOC(num_elements, element_dw_size * 4);
desc->element_dw_size = element_dw_size;
desc->num_elements = num_elements;
- desc->list_dirty = true; /* upload the list before the next draw */
+ desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
desc->shader_userdata_offset = shader_userdata_index * 4;
+ if (ce_offset) {
+ desc->ce_offset = *ce_offset;
+
+ /* make sure that ce_offset stays 32 byte aligned */
+ *ce_offset += align(element_dw_size * num_elements * 4, 32);
+ }
+
/* Initialize the array to NULL descriptors if the element size is 8. */
- if (element_dw_size % 8 == 0)
+ if (null_descriptor) {
+ assert(element_dw_size % 8 == 0);
for (i = 0; i < num_elements * element_dw_size / 8; i++)
- memcpy(desc->list + i*8, null_descriptor,
- sizeof(null_descriptor));
+ memcpy(desc->list + i * 8, null_descriptor,
+ 8 * 4);
+ }
}
static void si_release_descriptors(struct si_descriptors *desc)
FREE(desc->list);
}
+static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
+ unsigned *out_offset, struct r600_resource **out_buf) {
+ uint64_t va;
+
+ u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
+ (struct pipe_resource**)out_buf);
+ if (!out_buf)
+ return false;
+
+ va = (*out_buf)->gpu_address + *out_offset;
+
+ radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
+ radeon_emit(sctx->ce_ib, ce_offset);
+ radeon_emit(sctx->ce_ib, size / 4);
+ radeon_emit(sctx->ce_ib, va);
+ radeon_emit(sctx->ce_ib, va >> 32);
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
+
+ sctx->ce_need_synchronization = true;
+ return true;
+}
+
+static void si_ce_reinitialize_descriptors(struct si_context *sctx,
+ struct si_descriptors *desc)
+{
+ if (desc->buffer) {
+ struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
+ unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
+ uint64_t va = buffer->gpu_address + desc->buffer_offset;
+ struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
+
+ if (!ib)
+ ib = sctx->ce_ib;
+
+ list_size = align(list_size, 32);
+
+ radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
+ radeon_emit(ib, va);
+ radeon_emit(ib, va >> 32);
+ radeon_emit(ib, list_size / 4);
+ radeon_emit(ib, desc->ce_offset);
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+ }
+ desc->ce_ram_dirty = false;
+}
+
+void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
+{
+ for (int i = 0; i < SI_NUM_SHADERS; i++) {
+ si_ce_reinitialize_descriptors(sctx, &sctx->const_buffers[i].desc);
+ si_ce_reinitialize_descriptors(sctx, &sctx->shader_buffers[i].desc);
+ si_ce_reinitialize_descriptors(sctx, &sctx->samplers[i].views.desc);
+ si_ce_reinitialize_descriptors(sctx, &sctx->images[i].desc);
+ }
+ si_ce_reinitialize_descriptors(sctx, &sctx->rw_buffers.desc);
+}
+
+void si_ce_enable_loads(struct radeon_winsys_cs *ib)
+{
+ radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
+ radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
+ CONTEXT_CONTROL_LOAD_CE_RAM(1));
+ radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
+}
+
static bool si_upload_descriptors(struct si_context *sctx,
- struct si_descriptors *desc)
+ struct si_descriptors *desc,
+ struct r600_atom * atom)
{
unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
- void *ptr;
- if (!desc->list_dirty)
+ if (!desc->dirty_mask)
return true;
- u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
- &desc->buffer_offset,
- (struct pipe_resource**)&desc->buffer, &ptr);
- if (!desc->buffer)
- return false; /* skip the draw call */
+ if (sctx->ce_ib) {
+ uint32_t const* list = (uint32_t const*)desc->list;
- util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
+ if (desc->ce_ram_dirty)
+ si_ce_reinitialize_descriptors(sctx, desc);
+
+ while(desc->dirty_mask) {
+ int begin, count;
+ u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
+ &count);
+
+ begin *= desc->element_dw_size;
+ count *= desc->element_dw_size;
+
+ radeon_emit(sctx->ce_ib,
+ PKT3(PKT3_WRITE_CONST_RAM, count, 0));
+ radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
+ radeon_emit_array(sctx->ce_ib, list + begin, count);
+ }
+
+ if (!si_ce_upload(sctx, desc->ce_offset, list_size,
+ &desc->buffer_offset, &desc->buffer))
+ return false;
+ } else {
+ void *ptr;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
- RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+ u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
+ &desc->buffer_offset,
+ (struct pipe_resource**)&desc->buffer, &ptr);
+ if (!desc->buffer)
+ return false; /* skip the draw call */
- desc->list_dirty = false;
+ util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
+ RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
+ }
desc->pointer_dirty = true;
- si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
+ desc->dirty_mask = 0;
+
+ if (atom)
+ si_mark_atom_dirty(sctx, atom);
+
return true;
}
{
int i;
- for (i = 0; i < Elements(views->views); i++) {
+ for (i = 0; i < ARRAY_SIZE(views->views); i++) {
pipe_sampler_view_reference(&views->views[i], NULL);
}
si_release_descriptors(&views->desc);
}
static void si_sampler_view_add_buffer(struct si_context *sctx,
- struct pipe_resource *resource)
+ struct pipe_resource *resource,
+ enum radeon_bo_usage usage)
{
struct r600_resource *rres = (struct r600_resource*)resource;
if (!resource)
return;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
- RADEON_USAGE_READ,
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
r600_get_sampler_view_priority(rres));
}
static void si_sampler_views_begin_new_cs(struct si_context *sctx,
struct si_sampler_views *views)
{
- uint64_t mask = views->desc.enabled_mask;
+ unsigned mask = views->desc.enabled_mask;
/* Add buffers to the CS. */
while (mask) {
- int i = u_bit_scan64(&mask);
+ int i = u_bit_scan(&mask);
- si_sampler_view_add_buffer(sctx, views->views[i]->texture);
+ si_sampler_view_add_buffer(sctx, views->views[i]->texture,
+ RADEON_USAGE_READ);
}
+ views->desc.ce_ram_dirty = true;
+
if (!views->desc.buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
if (view) {
struct r600_texture *rtex = (struct r600_texture *)view->texture;
- si_sampler_view_add_buffer(sctx, view->texture);
+ si_sampler_view_add_buffer(sctx, view->texture,
+ RADEON_USAGE_READ);
pipe_sampler_view_reference(&views->views[slot], view);
memcpy(views->desc.list + slot * 16, rview->state, 8*4);
} else {
/* Disable FMASK and bind sampler state in [12:15]. */
memcpy(views->desc.list + slot*16 + 8,
- null_descriptor, 4*4);
+ null_texture_descriptor, 4*4);
if (views->sampler_states[slot])
memcpy(views->desc.list + slot*16 + 12,
views->sampler_states[slot], 4*4);
}
- views->desc.enabled_mask |= 1llu << slot;
+ views->desc.enabled_mask |= 1u << slot;
} else {
pipe_sampler_view_reference(&views->views[slot], NULL);
- memcpy(views->desc.list + slot*16, null_descriptor, 8*4);
+ memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
/* Only clear the lower dwords of FMASK. */
- memcpy(views->desc.list + slot*16 + 8, null_descriptor, 4*4);
- views->desc.enabled_mask &= ~(1llu << slot);
+ memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
+ views->desc.enabled_mask &= ~(1u << slot);
}
- views->desc.list_dirty = true;
+ views->desc.dirty_mask |= 1u << slot;
}
static bool is_compressed_colortex(struct r600_texture *rtex)
unsigned slot = start + i;
if (!views || !views[i]) {
- samplers->depth_texture_mask &= ~(1 << slot);
- samplers->compressed_colortex_mask &= ~(1 << slot);
+ samplers->depth_texture_mask &= ~(1u << slot);
+ samplers->compressed_colortex_mask &= ~(1u << slot);
si_set_sampler_view(sctx, &samplers->views, slot, NULL);
continue;
}
(struct r600_texture*)views[i]->texture;
if (rtex->is_depth && !rtex->is_flushing_texture) {
- samplers->depth_texture_mask |= 1 << slot;
+ samplers->depth_texture_mask |= 1u << slot;
} else {
- samplers->depth_texture_mask &= ~(1 << slot);
+ samplers->depth_texture_mask &= ~(1u << slot);
}
if (is_compressed_colortex(rtex)) {
- samplers->compressed_colortex_mask |= 1 << slot;
+ samplers->compressed_colortex_mask |= 1u << slot;
} else {
- samplers->compressed_colortex_mask &= ~(1 << slot);
+ samplers->compressed_colortex_mask &= ~(1u << slot);
}
} else {
- samplers->depth_texture_mask &= ~(1 << slot);
- samplers->compressed_colortex_mask &= ~(1 << slot);
+ samplers->depth_texture_mask &= ~(1u << slot);
+ samplers->compressed_colortex_mask &= ~(1u << slot);
}
}
}
static void
si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
{
- uint64_t mask = samplers->views.desc.enabled_mask;
+ unsigned mask = samplers->views.desc.enabled_mask;
while (mask) {
- int i = u_bit_scan64(&mask);
+ int i = u_bit_scan(&mask);
struct pipe_resource *res = samplers->views.views[i]->texture;
if (res && res->target != PIPE_BUFFER) {
struct r600_texture *rtex = (struct r600_texture *)res;
if (is_compressed_colortex(rtex)) {
- samplers->compressed_colortex_mask |= 1 << i;
+ samplers->compressed_colortex_mask |= 1u << i;
+ } else {
+ samplers->compressed_colortex_mask &= ~(1u << i);
+ }
+ }
+ }
+}
+
+/* IMAGE VIEWS */
+
+static void
+si_release_image_views(struct si_images_info *images)
+{
+ unsigned i;
+
+ for (i = 0; i < SI_NUM_IMAGES; ++i) {
+ struct pipe_image_view *view = &images->views[i];
+
+ pipe_resource_reference(&view->resource, NULL);
+ }
+
+ si_release_descriptors(&images->desc);
+}
+
+static void
+si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
+{
+ uint mask = images->desc.enabled_mask;
+
+ /* Add buffers to the CS. */
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ struct pipe_image_view *view = &images->views[i];
+
+ assert(view->resource);
+
+ si_sampler_view_add_buffer(sctx, view->resource,
+ RADEON_USAGE_READWRITE);
+ }
+
+ images->desc.ce_ram_dirty = true;
+
+ if (images->desc.buffer) {
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ images->desc.buffer,
+ RADEON_USAGE_READ,
+ RADEON_PRIO_DESCRIPTORS);
+ }
+}
+
+static void
+si_disable_shader_image(struct si_images_info *images, unsigned slot)
+{
+ if (images->desc.enabled_mask & (1u << slot)) {
+ pipe_resource_reference(&images->views[slot].resource, NULL);
+ images->compressed_colortex_mask &= ~(1 << slot);
+
+ memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
+ images->desc.enabled_mask &= ~(1u << slot);
+ images->desc.dirty_mask |= 1u << slot;
+ }
+}
+
+static void
+si_mark_image_range_valid(struct pipe_image_view *view)
+{
+ struct r600_resource *res = (struct r600_resource *)view->resource;
+ const struct util_format_description *desc;
+ unsigned stride;
+
+ assert(res && res->b.b.target == PIPE_BUFFER);
+
+ desc = util_format_description(view->format);
+ stride = desc->block.bits / 8;
+
+ util_range_add(&res->valid_buffer_range,
+ stride * (view->u.buf.first_element),
+ stride * (view->u.buf.last_element + 1));
+}
+
+static void
+si_set_shader_images(struct pipe_context *pipe, unsigned shader,
+ unsigned start_slot, unsigned count,
+ struct pipe_image_view *views)
+{
+ struct si_context *ctx = (struct si_context *)pipe;
+ struct si_screen *screen = ctx->screen;
+ struct si_images_info *images = &ctx->images[shader];
+ unsigned i, slot;
+
+ assert(shader < SI_NUM_SHADERS);
+
+ if (!count)
+ return;
+
+ assert(start_slot + count <= SI_NUM_IMAGES);
+
+ for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
+ struct r600_resource *res;
+
+ if (!views || !views[i].resource) {
+ si_disable_shader_image(images, slot);
+ continue;
+ }
+
+ res = (struct r600_resource *)views[i].resource;
+ util_copy_image_view(&images->views[slot], &views[i]);
+
+ si_sampler_view_add_buffer(ctx, &res->b.b,
+ RADEON_USAGE_READWRITE);
+
+ if (res->b.b.target == PIPE_BUFFER) {
+ if (views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+ si_mark_image_range_valid(&views[i]);
+
+ si_make_buffer_descriptor(screen, res,
+ views[i].format,
+ views[i].u.buf.first_element,
+ views[i].u.buf.last_element,
+ images->desc.list + slot * 8);
+ images->compressed_colortex_mask &= ~(1 << slot);
+ } else {
+ static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
+ struct r600_texture *tex = (struct r600_texture *)res;
+ unsigned level;
+ unsigned width, height, depth;
+
+ assert(!tex->is_depth);
+ assert(tex->fmask.size == 0);
+
+ if (tex->dcc_offset &&
+ views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+ r600_texture_disable_dcc(&screen->b, tex);
+
+ if (is_compressed_colortex(tex)) {
+ images->compressed_colortex_mask |= 1 << slot;
+ } else {
+ images->compressed_colortex_mask &= ~(1 << slot);
+ }
+
+ /* Always force the base level to the selected level.
+ *
+ * This is required for 3D textures, where otherwise
+ * selecting a single slice for non-layered bindings
+ * fails. It doesn't hurt the other targets.
+ */
+ level = views[i].u.tex.level;
+ width = u_minify(res->b.b.width0, level);
+ height = u_minify(res->b.b.height0, level);
+ depth = u_minify(res->b.b.depth0, level);
+
+ si_make_texture_descriptor(screen, tex, false, res->b.b.target,
+ views[i].format, swizzle,
+ level, 0, 0,
+ views[i].u.tex.first_layer, views[i].u.tex.last_layer,
+ width, height, depth,
+ images->desc.list + slot * 8,
+ NULL);
+ }
+
+ images->desc.enabled_mask |= 1u << slot;
+ images->desc.dirty_mask |= 1u << slot;
+ }
+}
+
+static void
+si_images_update_compressed_colortex_mask(struct si_images_info *images)
+{
+ unsigned mask = images->desc.enabled_mask;
+
+ while (mask) {
+ int i = u_bit_scan(&mask);
+ struct pipe_resource *res = images->views[i].resource;
+
+ if (res && res->target != PIPE_BUFFER) {
+ struct r600_texture *rtex = (struct r600_texture *)res;
+
+ if (is_compressed_colortex(rtex)) {
+ images->compressed_colortex_mask |= 1 << i;
} else {
- samplers->compressed_colortex_mask &= ~(1 << i);
+ images->compressed_colortex_mask &= ~(1 << i);
}
}
}
continue;
memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
- desc->list_dirty = true;
+ desc->dirty_mask |= 1u << slot;
}
}
unsigned num_buffers,
unsigned shader_userdata_index,
enum radeon_bo_usage shader_usage,
- enum radeon_bo_priority priority)
+ enum radeon_bo_priority priority,
+ unsigned *ce_offset)
{
buffers->shader_usage = shader_usage;
buffers->priority = priority;
buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
- num_buffers);
+ num_buffers, NULL, ce_offset);
}
static void si_release_buffer_resources(struct si_buffer_resources *buffers)
static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
struct si_buffer_resources *buffers)
{
- uint64_t mask = buffers->desc.enabled_mask;
+ unsigned mask = buffers->desc.enabled_mask;
/* Add buffers to the CS. */
while (mask) {
- int i = u_bit_scan64(&mask);
+ int i = u_bit_scan(&mask);
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffers->buffers[i],
buffers->shader_usage, buffers->priority);
}
+ buffers->desc.ce_ram_dirty = true;
+
if (!buffers->desc.buffer)
return;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
for (i = 0; i < count; i++) {
int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
- if (vb >= Elements(sctx->vertex_buffer))
+ if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
if (!sctx->vertex_buffer[vb].buffer)
continue;
unsigned offset;
uint32_t *desc = &ptr[i*4];
- if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
+ if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
memset(desc, 0, 16);
continue;
}
u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
(struct pipe_resource**)rbuffer, &tmp);
- if (rbuffer)
+ if (*rbuffer)
util_memcpy_cpu_to_le32(tmp, ptr, size);
}
-static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
- struct pipe_constant_buffer *input)
+void si_set_constant_buffer(struct si_context *sctx,
+ struct si_buffer_resources *buffers,
+ uint slot, struct pipe_constant_buffer *input)
{
- struct si_context *sctx = (struct si_context *)ctx;
- struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
-
- if (shader >= SI_NUM_SHADERS)
- return;
-
assert(slot < buffers->desc.num_elements);
pipe_resource_reference(&buffers->buffers[slot], NULL);
input->buffer_size, &buffer_offset);
if (!buffer) {
/* Just unbind on failure. */
- si_set_constant_buffer(ctx, shader, slot, NULL);
+ si_set_constant_buffer(sctx, buffers, slot, NULL);
return;
}
va = r600_resource(buffer)->gpu_address + buffer_offset;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
- buffers->desc.enabled_mask |= 1llu << slot;
+ buffers->desc.enabled_mask |= 1u << slot;
} else {
/* Clear the descriptor. */
memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
- buffers->desc.enabled_mask &= ~(1llu << slot);
+ buffers->desc.enabled_mask &= ~(1u << slot);
}
- buffers->desc.list_dirty = true;
+ buffers->desc.dirty_mask |= 1u << slot;
+}
+
+static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
+ uint shader, uint slot,
+ struct pipe_constant_buffer *input)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+
+ if (shader >= SI_NUM_SHADERS)
+ return;
+
+ si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
+}
+
+/* SHADER BUFFERS */
+
+static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
+ unsigned start_slot, unsigned count,
+ struct pipe_shader_buffer *sbuffers)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
+ unsigned i;
+
+ assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
+
+ for (i = 0; i < count; ++i) {
+ struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
+ struct r600_resource *buf;
+ unsigned slot = start_slot + i;
+ uint32_t *desc = buffers->desc.list + slot * 4;
+ uint64_t va;
+
+ if (!sbuffer || !sbuffer->buffer) {
+ pipe_resource_reference(&buffers->buffers[slot], NULL);
+ memset(desc, 0, sizeof(uint32_t) * 4);
+ buffers->desc.enabled_mask &= ~(1u << slot);
+ buffers->desc.dirty_mask |= 1u << slot;
+ continue;
+ }
+
+ buf = (struct r600_resource *)sbuffer->buffer;
+ va = buf->gpu_address + sbuffer->buffer_offset;
+
+ desc[0] = va;
+ desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
+ S_008F04_STRIDE(0);
+ desc[2] = sbuffer->buffer_size;
+ desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
+ S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
+ S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
+ S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
+ S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
+ S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
+
+ pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
+ buffers->shader_usage, buffers->priority);
+ buffers->desc.enabled_mask |= 1u << slot;
+ buffers->desc.dirty_mask |= 1u << slot;
+ }
}
/* RING BUFFERS */
-void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
+void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
struct pipe_resource *buffer,
unsigned stride, unsigned num_records,
bool add_tid, bool swizzle,
unsigned element_size, unsigned index_stride, uint64_t offset)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
-
- if (shader >= SI_NUM_SHADERS)
- return;
+ struct si_buffer_resources *buffers = &sctx->rw_buffers;
/* The stride field in the resource descriptor has 14 bits */
assert(stride < (1 << 14));
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
- buffers->desc.enabled_mask |= 1llu << slot;
+ buffers->desc.enabled_mask |= 1u << slot;
} else {
/* Clear the descriptor. */
memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
- buffers->desc.enabled_mask &= ~(1llu << slot);
+ buffers->desc.enabled_mask &= ~(1u << slot);
}
- buffers->desc.list_dirty = true;
+ buffers->desc.dirty_mask |= 1u << slot;
}
/* STREAMOUT BUFFERS */
const unsigned *offsets)
{
struct si_context *sctx = (struct si_context *)ctx;
- struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
+ struct si_buffer_resources *buffers = &sctx->rw_buffers;
unsigned old_num_targets = sctx->b.streamout.num_targets;
unsigned i, bufidx;
* and most other clients can use TC L2 as well, we don't need
* to flush it.
*
- * The only case which requires flushing it is VGT DMA index
- * fetching, which is a rare case. Thus, flag the TC L2
- * dirtiness in the resource and handle it when index fetching
- * is used.
+ * The only cases which requires flushing it is VGT DMA index
+ * fetching (on <= CIK) and indirect draw data, which are rare
+ * cases. Thus, flag the TC L2 dirtiness in the resource and
+ * handle it at draw call time.
*/
for (i = 0; i < sctx->b.streamout.num_targets; i++)
if (sctx->b.streamout.targets[i])
SI_CONTEXT_VS_PARTIAL_FLUSH;
}
+ /* All readers of the streamout targets need to be finished before we can
+ * start writing to the targets.
+ */
+ if (num_targets)
+ sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
+ SI_CONTEXT_CS_PARTIAL_FLUSH;
+
/* Streamout buffers must be bound in 2 places:
* 1) in VGT by setting the VGT_STRMOUT registers
* 2) as shader resources
/* Set the shader resources.*/
for (i = 0; i < num_targets; i++) {
- bufidx = SI_SO_BUF_OFFSET + i;
+ bufidx = SI_VS_STREAMOUT_BUF0 + i;
if (targets[i]) {
struct pipe_resource *buffer = targets[i]->buffer;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
(struct r600_resource*)buffer,
buffers->shader_usage, buffers->priority);
- buffers->desc.enabled_mask |= 1llu << bufidx;
+ buffers->desc.enabled_mask |= 1u << bufidx;
} else {
/* Clear the descriptor and unset the resource. */
memset(buffers->desc.list + bufidx*4, 0,
sizeof(uint32_t) * 4);
pipe_resource_reference(&buffers->buffers[bufidx],
NULL);
- buffers->desc.enabled_mask &= ~(1llu << bufidx);
+ buffers->desc.enabled_mask &= ~(1u << bufidx);
}
+ buffers->desc.dirty_mask |= 1u << bufidx;
}
for (; i < old_num_targets; i++) {
- bufidx = SI_SO_BUF_OFFSET + i;
+ bufidx = SI_VS_STREAMOUT_BUF0 + i;
/* Clear the descriptor and unset the resource. */
memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
pipe_resource_reference(&buffers->buffers[bufidx], NULL);
- buffers->desc.enabled_mask &= ~(1llu << bufidx);
+ buffers->desc.enabled_mask &= ~(1u << bufidx);
+ buffers->desc.dirty_mask |= 1u << bufidx;
}
-
- buffers->desc.list_dirty = true;
}
static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
S_008F04_BASE_ADDRESS_HI(va >> 32);
}
+/* INTERNAL CONST BUFFERS */
+
+static void si_set_polygon_stipple(struct pipe_context *ctx,
+ const struct pipe_poly_stipple *state)
+{
+ struct si_context *sctx = (struct si_context *)ctx;
+ struct pipe_constant_buffer cb = {};
+ unsigned stipple[32];
+ int i;
+
+ for (i = 0; i < 32; i++)
+ stipple[i] = util_bitreverse(state->stipple[i]);
+
+ cb.user_buffer = stipple;
+ cb.buffer_size = sizeof(stipple);
+
+ si_set_constant_buffer(sctx, &sctx->rw_buffers,
+ SI_PS_CONST_POLY_STIPPLE, &cb);
+}
+
/* TEXTURE METADATA ENABLE/DISABLE */
/* CMASK can be enabled (for fast clear) and disabled (for texture export)
{
for (int i = 0; i < SI_NUM_SHADERS; ++i) {
si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
+ si_images_update_compressed_colortex_mask(&sctx->images[i]);
}
}
/* BUFFER DISCARD/INVALIDATION */
+/** Reset descriptors of buffer resources after \p buf has been invalidated. */
+static void si_reset_buffer_resources(struct si_context *sctx,
+ struct si_buffer_resources *buffers,
+ struct pipe_resource *buf,
+ uint64_t old_va)
+{
+ unsigned mask = buffers->desc.enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+ if (buffers->buffers[i] == buf) {
+ si_desc_reset_buffer_offset(&sctx->b.b,
+ buffers->desc.list + i*4,
+ old_va, buf);
+ buffers->desc.dirty_mask |= 1u << i;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ (struct r600_resource *)buf,
+ buffers->shader_usage,
+ buffers->priority);
+ }
+ }
+}
+
/* Reallocate a buffer a update all resource bindings where the buffer is
* bound.
*
/* Reallocate the buffer in the same pipe_resource. */
r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
- alignment, TRUE);
+ alignment);
/* We changed the buffer, now we need to bind it where the old one
* was bound. This consists of 2 things:
for (i = 0; i < num_elems; i++) {
int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
- if (vb >= Elements(sctx->vertex_buffer))
+ if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
continue;
if (!sctx->vertex_buffer[vb].buffer)
continue;
}
}
- /* Read/Write buffers. */
- for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
- struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
- uint64_t mask = buffers->desc.enabled_mask;
+ /* Streamout buffers. (other internal buffers can't be invalidated) */
+ for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
+ struct si_buffer_resources *buffers = &sctx->rw_buffers;
- while (mask) {
- i = u_bit_scan64(&mask);
- if (buffers->buffers[i] == buf) {
- si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
- old_va, buf);
- buffers->desc.list_dirty = true;
+ if (buffers->buffers[i] != buf)
+ continue;
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- rbuffer, buffers->shader_usage,
- buffers->priority);
-
- if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
- /* Update the streamout state. */
- if (sctx->b.streamout.begin_emitted) {
- r600_emit_streamout_end(&sctx->b);
- }
- sctx->b.streamout.append_bitmask =
- sctx->b.streamout.enabled_mask;
- r600_streamout_buffers_dirty(&sctx->b);
- }
- }
- }
+ si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
+ old_va, buf);
+ buffers->desc.dirty_mask |= 1u << i;
+
+ radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
+ rbuffer, buffers->shader_usage,
+ buffers->priority);
+
+ /* Update the streamout state. */
+ if (sctx->b.streamout.begin_emitted)
+ r600_emit_streamout_end(&sctx->b);
+ sctx->b.streamout.append_bitmask =
+ sctx->b.streamout.enabled_mask;
+ r600_streamout_buffers_dirty(&sctx->b);
}
- /* Constant buffers. */
+ /* Constant and shader buffers. */
for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
- struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
- uint64_t mask = buffers->desc.enabled_mask;
-
- while (mask) {
- unsigned i = u_bit_scan64(&mask);
- if (buffers->buffers[i] == buf) {
- si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
- old_va, buf);
- buffers->desc.list_dirty = true;
-
- radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
- rbuffer, buffers->shader_usage,
- buffers->priority);
- }
- }
+ si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
+ buf, old_va);
+ si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
+ buf, old_va);
}
/* Texture buffers - update virtual addresses in sampler view descriptors. */
/* Texture buffers - update bindings. */
for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
struct si_sampler_views *views = &sctx->samplers[shader].views;
- uint64_t mask = views->desc.enabled_mask;
+ unsigned mask = views->desc.enabled_mask;
while (mask) {
- unsigned i = u_bit_scan64(&mask);
+ unsigned i = u_bit_scan(&mask);
if (views->views[i]->texture == buf) {
si_desc_reset_buffer_offset(ctx,
views->desc.list +
i * 16 + 4,
old_va, buf);
- views->desc.list_dirty = true;
+ views->desc.dirty_mask |= 1u << i;
radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
rbuffer, RADEON_USAGE_READ,
}
}
}
+
+ /* Shader images */
+ for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
+ struct si_images_info *images = &sctx->images[shader];
+ unsigned mask = images->desc.enabled_mask;
+
+ while (mask) {
+ unsigned i = u_bit_scan(&mask);
+
+ if (images->views[i].resource == buf) {
+ if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
+ si_mark_image_range_valid(&images->views[i]);
+
+ si_desc_reset_buffer_offset(
+ ctx, images->desc.list + i * 8 + 4,
+ old_va, buf);
+ images->desc.dirty_mask |= 1u << i;
+
+ radeon_add_to_buffer_list(
+ &sctx->b, &sctx->b.gfx, rbuffer,
+ RADEON_USAGE_READWRITE,
+ RADEON_PRIO_SAMPLER_BUFFER);
+ }
+ }
+ }
}
/* SHADER USER DATA */
unsigned shader)
{
sctx->const_buffers[shader].desc.pointer_dirty = true;
- sctx->rw_buffers[shader].desc.pointer_dirty = true;
+ sctx->shader_buffers[shader].desc.pointer_dirty = true;
sctx->samplers[shader].views.desc.pointer_dirty = true;
+ sctx->images[shader].desc.pointer_dirty = true;
if (shader == PIPE_SHADER_VERTEX)
sctx->vertex_buffers.pointer_dirty = true;
for (i = 0; i < SI_NUM_SHADERS; i++) {
si_mark_shader_pointers_dirty(sctx, i);
}
+ sctx->rw_buffers.desc.pointer_dirty = true;
}
/* Set a base register address for user data constants in the given shader.
desc->pointer_dirty = keep_dirty;
}
-void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
+void si_emit_graphics_shader_userdata(struct si_context *sctx,
+ struct r600_atom *atom)
{
unsigned i;
uint32_t *sh_base = sctx->shader_userdata.sh_base;
- if (sctx->gs_shader.cso) {
- /* The VS copy shader needs these for clipping, streamout, and rings. */
- unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
- unsigned i = PIPE_SHADER_VERTEX;
-
- si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
- si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
-
- if (sctx->tes_shader.cso) {
- /* The TESSEVAL shader needs this for the ESGS ring buffer. */
- si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
- R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
- }
- } else if (sctx->tes_shader.cso) {
- /* The TESSEVAL shader needs this for streamout. */
- si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
+ if (sctx->rw_buffers.desc.pointer_dirty) {
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+ R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+ R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+ R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
+ si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
+ R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
+ sctx->rw_buffers.desc.pointer_dirty = false;
}
- for (i = 0; i < SI_NUM_SHADERS; i++) {
+ for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
unsigned base = sh_base[i];
if (!base)
continue;
- if (i != PIPE_SHADER_TESS_EVAL)
- si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
-
si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
+ si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
+ si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
}
si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
}
+void si_emit_compute_shader_userdata(struct si_context *sctx)
+{
+ unsigned base = R_00B900_COMPUTE_USER_DATA_0;
+
+ si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
+ base, false);
+ si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
+ base, false);
+ si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
+ base, false);
+ si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
+ base, false);
+}
+
/* INIT/DEINIT/UPLOAD */
void si_init_all_descriptors(struct si_context *sctx)
{
int i;
+ unsigned ce_offset = 0;
for (i = 0; i < SI_NUM_SHADERS; i++) {
si_init_buffer_resources(&sctx->const_buffers[i],
SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
- RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
- si_init_buffer_resources(&sctx->rw_buffers[i],
- SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
- RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT);
+ RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
+ &ce_offset);
+ si_init_buffer_resources(&sctx->shader_buffers[i],
+ SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
+ &ce_offset);
si_init_descriptors(&sctx->samplers[i].views.desc,
- SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS);
+ SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
+ null_texture_descriptor, &ce_offset);
+
+ si_init_descriptors(&sctx->images[i].desc,
+ SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
+ null_image_descriptor, &ce_offset);
}
+ si_init_buffer_resources(&sctx->rw_buffers,
+ SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
+ RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
+ &ce_offset);
si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
- 4, SI_NUM_VERTEX_BUFFERS);
+ 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
+
+ assert(ce_offset <= 32768);
/* Set pipe_context functions. */
sctx->b.b.bind_sampler_states = si_bind_sampler_states;
- sctx->b.b.set_constant_buffer = si_set_constant_buffer;
+ sctx->b.b.set_shader_images = si_set_shader_images;
+ sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
+ sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
+ sctx->b.b.set_shader_buffers = si_set_shader_buffers;
sctx->b.b.set_sampler_views = si_set_sampler_views;
sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
sctx->b.invalidate_buffer = si_invalidate_buffer;
/* Shader user data. */
si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
- si_emit_shader_userdata);
+ si_emit_graphics_shader_userdata);
/* Set default and immutable mappings. */
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
}
-bool si_upload_shader_descriptors(struct si_context *sctx)
+bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
{
int i;
for (i = 0; i < SI_NUM_SHADERS; i++) {
- if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
- !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
- !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc))
+ if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
+ &sctx->shader_userdata.atom) ||
+ !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
+ &sctx->shader_userdata.atom) ||
+ !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
+ &sctx->shader_userdata.atom) ||
+ !si_upload_descriptors(sctx, &sctx->images[i].desc,
+ &sctx->shader_userdata.atom))
return false;
}
- return si_upload_vertex_buffer_descriptors(sctx);
+ return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
+ &sctx->shader_userdata.atom) &&
+ si_upload_vertex_buffer_descriptors(sctx);
+}
+
+bool si_upload_compute_shader_descriptors(struct si_context *sctx)
+{
+ /* Does not update rw_buffers as that is not needed for compute shaders
+ * and the input buffer is using the same SGPR's anyway.
+ */
+ return si_upload_descriptors(sctx,
+ &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
+ si_upload_descriptors(sctx,
+ &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
+ si_upload_descriptors(sctx,
+ &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
+ si_upload_descriptors(sctx,
+ &sctx->images[PIPE_SHADER_COMPUTE].desc, NULL);
}
void si_release_all_descriptors(struct si_context *sctx)
for (i = 0; i < SI_NUM_SHADERS; i++) {
si_release_buffer_resources(&sctx->const_buffers[i]);
- si_release_buffer_resources(&sctx->rw_buffers[i]);
+ si_release_buffer_resources(&sctx->shader_buffers[i]);
si_release_sampler_views(&sctx->samplers[i].views);
+ si_release_image_views(&sctx->images[i]);
}
+ si_release_buffer_resources(&sctx->rw_buffers);
si_release_descriptors(&sctx->vertex_buffers);
}
for (i = 0; i < SI_NUM_SHADERS; i++) {
si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
- si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
+ si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
+ si_image_views_begin_new_cs(sctx, &sctx->images[i]);
}
+ si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
si_vertex_buffers_begin_new_cs(sctx);
si_shader_userdata_begin_new_cs(sctx);
}