#include "main/state.h"
#include "main/enums.h"
#include "main/macros.h"
+#include "main/transformfeedback.h"
#include "tnl/tnl.h"
#include "vbo/vbo_context.h"
#include "swrast/swrast.h"
}
+/**
+ * The hardware is capable of removing dangling vertices on its own; however,
+ * prior to Gen6, we sometimes convert quads into trifans (and quad strips
+ * into tristrips), since pre-Gen6 hardware requires a GS to render quads.
+ * This function manually trims dangling vertices from a draw call involving
+ * quads so that those dangling vertices won't get drawn when we convert to
+ * trifans/tristrips.
+ */
static GLuint trim(GLenum prim, GLuint length)
{
if (prim == GL_QUAD_STRIP)
start_vertex_location += brw->vb.start_vertex_bias;
}
- verts_per_instance = trim(prim->mode, prim->count);
+ /* We only need to trim the primitive count on pre-Gen6. */
+ if (intel->gen < 6)
+ verts_per_instance = trim(prim->mode, prim->count);
+ else
+ verts_per_instance = prim->count;
/* If nothing to emit, just return. */
if (verts_per_instance == 0)
OUT_BATCH(verts_per_instance);
OUT_BATCH(start_vertex_location);
OUT_BATCH(prim->num_instances);
- OUT_BATCH(0); // start instance location
+ OUT_BATCH(prim->base_instance);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
start_vertex_location += brw->vb.start_vertex_bias;
}
- verts_per_instance = trim(prim->mode, prim->count);
+ verts_per_instance = prim->count;
/* If nothing to emit, just return. */
if (verts_per_instance == 0)
OUT_BATCH(verts_per_instance);
OUT_BATCH(start_vertex_location);
OUT_BATCH(prim->num_instances);
- OUT_BATCH(0); // start instance location
+ OUT_BATCH(prim->base_instance);
OUT_BATCH(base_vertex_location);
ADVANCE_BATCH();
static void brw_merge_inputs( struct brw_context *brw,
const struct gl_client_array *arrays[])
{
- struct brw_vertex_info old = brw->vb.info;
GLuint i;
for (i = 0; i < brw->vb.nr_buffers; i++) {
}
brw->vb.nr_buffers = 0;
- memset(&brw->vb.info, 0, sizeof(brw->vb.info));
-
for (i = 0; i < VERT_ATTRIB_MAX; i++) {
brw->vb.inputs[i].buffer = -1;
brw->vb.inputs[i].glarray = arrays[i];
brw->vb.inputs[i].attrib = (gl_vert_attrib) i;
-
- if (arrays[i]->StrideB != 0)
- brw->vb.info.sizes[i/16] |= (brw->vb.inputs[i].glarray->Size - 1) <<
- ((i%16) * 2);
}
-
- /* Raise statechanges if input sizes have changed. */
- if (memcmp(brw->vb.info.sizes, old.sizes, sizeof(old.sizes)) != 0)
- brw->state.dirty.brw |= BRW_NEW_INPUT_DIMENSIONS;
}
/*
/* Resolve the depth buffer's HiZ buffer. */
depth_irb = intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_DEPTH);
- if (depth_irb && depth_irb->mt) {
+ if (depth_irb)
intel_renderbuffer_resolve_hiz(intel, depth_irb);
- }
/* Resolve depth buffer of each enabled depth texture. */
for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
brw_update_primitive_count(struct brw_context *brw,
const struct _mesa_prim *prim)
{
- uint32_t count = count_tessellated_primitives(prim);
- brw->sol.primitives_generated += count;
- if (brw->intel.ctx.TransformFeedback.CurrentObject->Active &&
- !brw->intel.ctx.TransformFeedback.CurrentObject->Paused) {
+ uint32_t count
+ = vbo_count_tessellated_primitives(prim->mode, prim->count,
+ prim->num_instances);
+ if (_mesa_is_xfb_active_and_unpaused(&brw->intel.ctx)) {
/* Update brw->sol.svbi_0_max_index to reflect the amount by which the
* hardware is going to increment SVBI 0 when this drawing operation
* occurs. This is necessary because the kernel does not (yet) save and
(brw->sol.svbi_0_max_index - brw->sol.svbi_0_starting_index) / verts;
uint32_t primitives_written = MIN2 (space_avail, count);
brw->sol.svbi_0_starting_index += verts * primitives_written;
-
- /* And update the TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN query. */
- brw->sol.primitives_written += primitives_written;
}
}
*/
brw_validate_textures( brw );
- /* Resolves must occur after updating state and finalizing textures but
- * before setting up any hardware state for this draw call.
+ intel_prepare_render(intel);
+
+ /* This workaround has to happen outside of brw_upload_state() because it
+ * may flush the batchbuffer for a blit, affecting the state flags.
+ */
+ brw_workaround_depthstencil_alignment(brw, 0);
+
+ /* Resolves must occur after updating renderbuffers, updating context state,
+ * and finalizing textures but before setting up any hardware state for
+ * this draw call.
*/
brw_predraw_resolve_buffers(brw);
brw->vb.max_index = max_index;
brw->state.dirty.brw |= BRW_NEW_VERTICES;
- /* Have to validate state quite late. Will rebuild tnl_program,
- * which depends on varying information.
- *
- * Note this is where brw->vs->prog_data.inputs_read is calculated,
- * so can't access it earlier.
- */
-
- intel_prepare_render(intel);
-
for (i = 0; i < nr_prims; i++) {
int estimated_max_prim_size;
intel_batchbuffer_require_space(intel, estimated_max_prim_size, false);
intel_batchbuffer_save_state(intel);
- brw->num_instances = prim->num_instances;
+ if (brw->num_instances != prim->num_instances) {
+ brw->num_instances = prim->num_instances;
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ }
+ if (brw->basevertex != prim->basevertex) {
+ brw->basevertex = prim->basevertex;
+ brw->state.dirty.brw |= BRW_NEW_VERTICES;
+ }
if (intel->gen < 6)
brw_set_prim(brw, &prim[i]);
else
if (brw->state.dirty.brw) {
intel->no_batch_wrap = true;
brw_upload_state(brw);
-
- if (unlikely(brw->intel.Fallback)) {
- intel->no_batch_wrap = false;
- retval = false;
- goto out;
- }
}
if (intel->gen >= 7)
if (intel->always_flush_batch)
intel_batchbuffer_flush(intel);
- out:
brw_state_cache_check_size(brw);
brw_postdraw_set_buffers_need_resolve(brw);
GLuint max_index,
struct gl_transform_feedback_object *tfb_vertcount )
{
+ struct intel_context *intel = intel_context(ctx);
const struct gl_client_array **arrays = ctx->Array._DrawArrays;
- bool retval;
if (!_mesa_check_conditional_render(ctx))
return;
return;
}
- if (!vbo_all_varyings_in_vbos(arrays)) {
- if (!index_bounds_valid)
- vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
-
- /* Decide if we want to rebase. If so we end up recursing once
- * only into this function.
- */
- if (min_index != 0 && !vbo_any_varyings_in_vbos(arrays)) {
- vbo_rebase_prims(ctx, arrays,
- prim, nr_prims,
- ib, min_index, max_index,
- brw_draw_prims );
- return;
- }
- }
-
- /* Make a first attempt at drawing:
+ /* If we're going to have to upload any of the user's vertex arrays, then
+ * get the minimum and maximum of their index buffer so we know what range
+ * to upload.
*/
- retval = brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ if (!vbo_all_varyings_in_vbos(arrays) && !index_bounds_valid)
+ vbo_get_minmax_indices(ctx, prim, ib, &min_index, &max_index, nr_prims);
- /* Otherwise, we really are out of memory. Pass the drawing
- * command to the software tnl module and which will in turn call
- * swrast to do the drawing.
+ /* Do GL_SELECT and GL_FEEDBACK rendering using swrast, even though it
+ * won't support all the extensions we support.
*/
- if (!retval) {
- _swsetup_Wakeup(ctx);
- _tnl_wakeup(ctx);
+ if (ctx->RenderMode != GL_RENDER) {
+ perf_debug("%s render mode not supported in hardware\n",
+ _mesa_lookup_enum_by_nr(ctx->RenderMode));
+ _swsetup_Wakeup(ctx);
+ _tnl_wakeup(ctx);
_tnl_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
+ return;
}
+ /* Try drawing with the hardware, but don't do anything else if we can't
+ * manage it. swrast doesn't support our featureset, so we can't fall back
+ * to it.
+ */
+ brw_try_draw_prims(ctx, arrays, prim, nr_prims, ib, min_index, max_index);
}
void brw_draw_init( struct brw_context *brw )