struct brw_vertex_element *element,
GLuint dst_stride)
{
+ struct intel_context *intel = &brw->intel;
GLuint size = element->count * dst_stride;
get_space(brw, size, &element->bo, &element->offset);
}
if (dst_stride == element->glarray->StrideB) {
- dri_bo_subdata(element->bo,
- element->offset,
- size,
- element->glarray->Ptr);
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(element->bo);
+ memcpy((char *)element->bo->virtual + element->offset,
+ element->glarray->Ptr, size);
+ drm_intel_gem_bo_unmap_gtt(element->bo);
+ } else {
+ dri_bo_subdata(element->bo,
+ element->offset,
+ size,
+ element->glarray->Ptr);
+ }
} else {
- void *data;
char *dest;
const unsigned char *src = element->glarray->Ptr;
int i;
- data = _mesa_malloc(dst_stride * element->count);
- dest = data;
- for (i = 0; i < element->count; i++) {
- memcpy(dest, src, dst_stride);
- src += element->glarray->StrideB;
- dest += dst_stride;
- }
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(element->bo);
+ dest = element->bo->virtual;
+ dest += element->offset;
+
+ for (i = 0; i < element->count; i++) {
+ memcpy(dest, src, dst_stride);
+ src += element->glarray->StrideB;
+ dest += dst_stride;
+ }
+
+ drm_intel_gem_bo_unmap_gtt(element->bo);
+ } else {
+ void *data;
+
+ data = _mesa_malloc(dst_stride * element->count);
+ dest = data;
+ for (i = 0; i < element->count; i++) {
+ memcpy(dest, src, dst_stride);
+ src += element->glarray->StrideB;
+ dest += dst_stride;
+ }
- dri_bo_subdata(element->bo,
- element->offset,
- size,
- data);
- _mesa_free(data);
+ dri_bo_subdata(element->bo,
+ element->offset,
+ size,
+ data);
+
+ _mesa_free(data);
+ }
}
}
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLuint tmp = brw->vs.prog_data->inputs_read;
+ GLbitfield vs_inputs = brw->vs.prog_data->inputs_read;
GLuint i;
const unsigned char *ptr = NULL;
GLuint interleave = 0;
unsigned int min_index = brw->vb.min_index;
unsigned int max_index = brw->vb.max_index;
- struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
- GLuint nr_enabled = 0;
-
struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
GLuint nr_uploads = 0;
_mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
/* Accumulate the list of enabled arrays. */
- while (tmp) {
- GLuint i = _mesa_ffsll(tmp)-1;
+ brw->vb.nr_enabled = 0;
+ while (vs_inputs) {
+ GLuint i = _mesa_ffsll(vs_inputs) - 1;
struct brw_vertex_element *input = &brw->vb.inputs[i];
- tmp &= ~(1<<i);
- enabled[nr_enabled++] = input;
+ vs_inputs &= ~(1 << i);
+ brw->vb.enabled[brw->vb.nr_enabled++] = input;
}
/* XXX: In the rare cases where this happens we fallback all
* cases with > 17 vertex attributes enabled, so it probably
* isn't an issue at this point.
*/
- if (nr_enabled >= BRW_VEP_MAX) {
+ if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
intel->Fallback = 1;
return;
}
- for (i = 0; i < nr_enabled; i++) {
- struct brw_vertex_element *input = enabled[i];
+ for (i = 0; i < brw->vb.nr_enabled; i++) {
+ struct brw_vertex_element *input = brw->vb.enabled[i];
input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;
dri_bo_reference(input->bo);
input->offset = (unsigned long)input->glarray->Ptr;
input->stride = input->glarray->StrideB;
+
+ /* This is a common place to reach if the user mistakenly supplies
+ * a pointer in place of a VBO offset. If we just let it go through,
+ * we may end up dereferencing a pointer beyond the bounds of the
+ * GTT. We would hope that the VBO's max_index would save us, but
+ * Mesa appears to hand us min/max values not clipped to the
+ * array object's _MaxElement, and _MaxElement frequently appears
+ * to be wrong anyway.
+ *
+ * The VBO spec allows application termination in this case, and it's
+ * probably a service to the poor programmer to do so rather than
+ * trying to just not render.
+ */
+ assert(input->offset < input->bo->size);
} else {
if (input->bo != NULL) {
/* Already-uploaded vertex data is present from a previous
brw_prepare_query_begin(brw);
- for (i = 0; i < nr_enabled; i++) {
- struct brw_vertex_element *input = enabled[i];
+ for (i = 0; i < brw->vb.nr_enabled; i++) {
+ struct brw_vertex_element *input = brw->vb.enabled[i];
brw_add_validated_bo(brw, input->bo);
}
{
GLcontext *ctx = &brw->intel.ctx;
struct intel_context *intel = intel_context(ctx);
- GLuint tmp = brw->vs.prog_data->inputs_read;
- struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
GLuint i;
- GLuint nr_enabled = 0;
- /* Accumulate the list of enabled arrays. */
- while (tmp) {
- i = _mesa_ffsll(tmp)-1;
- struct brw_vertex_element *input = &brw->vb.inputs[i];
+ brw_emit_query_begin(brw);
- tmp &= ~(1<<i);
- enabled[nr_enabled++] = input;
+ /* If the VS doesn't read any inputs (calculating vertex position from
+ * a state variable for some reason, for example), emit a single pad
+ * VERTEX_ELEMENT struct and bail.
+ *
+ * The stale VB state stays in place, but they don't do anything unless
+ * a VE loads from them.
+ */
+ if (brw->vb.nr_enabled == 0) {
+ BEGIN_BATCH(3, IGNORE_CLIPRECTS);
+ OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
+ OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
+ BRW_VE0_VALID |
+ (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
+ (0 << BRW_VE0_SRC_OFFSET_SHIFT));
+ OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
+ (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
+ ADVANCE_BATCH();
+ return;
}
- brw_emit_query_begin(brw);
-
/* Now emit VB and VEP state packets.
*
* This still defines a hardware VB for each input, even if they
* are interleaved or from the same VBO. TBD if this makes a
* performance difference.
*/
- BEGIN_BATCH(1 + nr_enabled * 4, IGNORE_CLIPRECTS);
+ BEGIN_BATCH(1 + brw->vb.nr_enabled * 4, IGNORE_CLIPRECTS);
OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
- ((1 + nr_enabled * 4) - 2));
+ ((1 + brw->vb.nr_enabled * 4) - 2));
- for (i = 0; i < nr_enabled; i++) {
- struct brw_vertex_element *input = enabled[i];
+ for (i = 0; i < brw->vb.nr_enabled; i++) {
+ struct brw_vertex_element *input = brw->vb.enabled[i];
OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
BRW_VB0_ACCESS_VERTEXDATA |
}
ADVANCE_BATCH();
- BEGIN_BATCH(1 + nr_enabled * 2, IGNORE_CLIPRECTS);
- OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + nr_enabled * 2) - 2));
- for (i = 0; i < nr_enabled; i++) {
- struct brw_vertex_element *input = enabled[i];
+ BEGIN_BATCH(1 + brw->vb.nr_enabled * 2, IGNORE_CLIPRECTS);
+ OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + brw->vb.nr_enabled * 2) - 2));
+ for (i = 0; i < brw->vb.nr_enabled; i++) {
+ struct brw_vertex_element *input = brw->vb.enabled[i];
uint32_t format = get_surface_type(input->glarray->Type,
input->glarray->Size,
input->glarray->Format,
/* Straight upload
*/
- dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
+ if (intel->intelScreen->kernel_exec_fencing) {
+ drm_intel_gem_bo_map_gtt(bo);
+ memcpy((char *)bo->virtual + offset, index_buffer->ptr, ib_size);
+ drm_intel_gem_bo_unmap_gtt(bo);
+ } else {
+ dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
+ }
} else {
offset = (GLuint) (unsigned long) index_buffer->ptr;
if (index_buffer == NULL)
return;
- ib_size = get_size(index_buffer->type) * index_buffer->count;
+ ib_size = get_size(index_buffer->type) * index_buffer->count - 1;
/* Emit the indexbuffer packet:
*/