2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
45 static const struct brw_tracked_state *gen4_atoms[] =
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
61 &brw_recalculate_urb_fence,
65 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
66 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
68 &brw_vs_surfaces, /* must do before unit */
69 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
70 &brw_wm_surfaces, /* must do before samplers and unit */
71 &brw_wm_binding_table,
77 &brw_vs_unit, /* always required, enabled or not */
84 &brw_state_base_address,
86 &brw_binding_table_pointers,
87 &brw_blend_constant_color,
92 &brw_polygon_stipple_offset,
95 &brw_aa_line_parameters,
107 const struct brw_tracked_state *gen6_atoms[] =
119 /* Command packets: */
120 &brw_invarient_state,
122 &gen6_viewport_state, /* must do after *_vp stages */
125 &gen6_blend_state, /* must do before cc unit */
126 &gen6_color_calc_state, /* must do before cc unit */
127 &gen6_depth_stencil_state, /* must do before cc unit */
128 &gen6_cc_state_pointers,
130 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
131 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
132 &gen6_wm_constants, /* Before wm_state */
134 &brw_vs_surfaces, /* must do before unit */
135 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
136 &brw_wm_surfaces, /* must do before samplers and unit */
137 &brw_wm_binding_table,
149 &gen6_scissor_state_pointers,
151 &brw_state_base_address,
153 &gen6_binding_table_pointers,
157 &brw_polygon_stipple,
158 &brw_polygon_stipple_offset,
161 &brw_aa_line_parameters,
170 void brw_init_state( struct brw_context *brw )
172 brw_init_caches(brw);
176 void brw_destroy_state( struct brw_context *brw )
178 brw_destroy_caches(brw);
179 brw_destroy_batch_cache(brw);
182 /***********************************************************************
185 static GLboolean check_state( const struct brw_state_flags *a,
186 const struct brw_state_flags *b )
188 return ((a->mesa & b->mesa) ||
190 (a->cache & b->cache));
193 static void accumulate_state( struct brw_state_flags *a,
194 const struct brw_state_flags *b )
198 a->cache |= b->cache;
202 static void xor_states( struct brw_state_flags *result,
203 const struct brw_state_flags *a,
204 const struct brw_state_flags *b )
206 result->mesa = a->mesa ^ b->mesa;
207 result->brw = a->brw ^ b->brw;
208 result->cache = a->cache ^ b->cache;
212 brw_clear_validated_bos(struct brw_context *brw)
216 /* Clear the last round of validated bos */
217 for (i = 0; i < brw->state.validated_bo_count; i++) {
218 drm_intel_bo_unreference(brw->state.validated_bos[i]);
219 brw->state.validated_bos[i] = NULL;
221 brw->state.validated_bo_count = 0;
224 struct dirty_bit_map {
230 #define DEFINE_BIT(name) {name, #name, 0}
232 static struct dirty_bit_map mesa_bits[] = {
233 DEFINE_BIT(_NEW_MODELVIEW),
234 DEFINE_BIT(_NEW_PROJECTION),
235 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
236 DEFINE_BIT(_NEW_COLOR),
237 DEFINE_BIT(_NEW_DEPTH),
238 DEFINE_BIT(_NEW_EVAL),
239 DEFINE_BIT(_NEW_FOG),
240 DEFINE_BIT(_NEW_HINT),
241 DEFINE_BIT(_NEW_LIGHT),
242 DEFINE_BIT(_NEW_LINE),
243 DEFINE_BIT(_NEW_PIXEL),
244 DEFINE_BIT(_NEW_POINT),
245 DEFINE_BIT(_NEW_POLYGON),
246 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
247 DEFINE_BIT(_NEW_SCISSOR),
248 DEFINE_BIT(_NEW_STENCIL),
249 DEFINE_BIT(_NEW_TEXTURE),
250 DEFINE_BIT(_NEW_TRANSFORM),
251 DEFINE_BIT(_NEW_VIEWPORT),
252 DEFINE_BIT(_NEW_PACKUNPACK),
253 DEFINE_BIT(_NEW_ARRAY),
254 DEFINE_BIT(_NEW_RENDERMODE),
255 DEFINE_BIT(_NEW_BUFFERS),
256 DEFINE_BIT(_NEW_MULTISAMPLE),
257 DEFINE_BIT(_NEW_TRACK_MATRIX),
258 DEFINE_BIT(_NEW_PROGRAM),
259 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
263 static struct dirty_bit_map brw_bits[] = {
264 DEFINE_BIT(BRW_NEW_URB_FENCE),
265 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
266 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
267 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
268 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
269 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
270 DEFINE_BIT(BRW_NEW_PRIMITIVE),
271 DEFINE_BIT(BRW_NEW_CONTEXT),
272 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
273 DEFINE_BIT(BRW_NEW_PSP),
274 DEFINE_BIT(BRW_NEW_WM_SURFACES),
275 DEFINE_BIT(BRW_NEW_BINDING_TABLE),
276 DEFINE_BIT(BRW_NEW_INDICES),
277 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
278 DEFINE_BIT(BRW_NEW_VERTICES),
279 DEFINE_BIT(BRW_NEW_BATCH),
280 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
281 DEFINE_BIT(BRW_NEW_NR_WM_SURFACES),
282 DEFINE_BIT(BRW_NEW_NR_VS_SURFACES),
283 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
284 DEFINE_BIT(BRW_NEW_WM_CONSTBUF),
288 static struct dirty_bit_map cache_bits[] = {
289 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
290 DEFINE_BIT(CACHE_NEW_CC_VP),
291 DEFINE_BIT(CACHE_NEW_CC_UNIT),
292 DEFINE_BIT(CACHE_NEW_WM_PROG),
293 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
294 DEFINE_BIT(CACHE_NEW_SAMPLER),
295 DEFINE_BIT(CACHE_NEW_WM_UNIT),
296 DEFINE_BIT(CACHE_NEW_SF_PROG),
297 DEFINE_BIT(CACHE_NEW_SF_VP),
298 DEFINE_BIT(CACHE_NEW_SF_UNIT),
299 DEFINE_BIT(CACHE_NEW_VS_UNIT),
300 DEFINE_BIT(CACHE_NEW_VS_PROG),
301 DEFINE_BIT(CACHE_NEW_GS_UNIT),
302 DEFINE_BIT(CACHE_NEW_GS_PROG),
303 DEFINE_BIT(CACHE_NEW_CLIP_VP),
304 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
305 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
311 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
315 for (i = 0; i < 32; i++) {
316 if (bit_map[i].bit == 0)
319 if (bit_map[i].bit & bits)
325 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
329 for (i = 0; i < 32; i++) {
330 if (bit_map[i].bit == 0)
333 fprintf(stderr, "0x%08x: %12d (%s)\n",
334 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
338 /***********************************************************************
341 void brw_validate_state( struct brw_context *brw )
343 struct gl_context *ctx = &brw->intel.ctx;
344 struct intel_context *intel = &brw->intel;
345 struct brw_state_flags *state = &brw->state.dirty;
347 const struct brw_tracked_state **atoms;
350 brw_clear_validated_bos(brw);
352 state->mesa |= brw->intel.NewGLState;
353 brw->intel.NewGLState = 0;
355 brw_add_validated_bo(brw, intel->batch->buf);
357 if (intel->gen >= 6) {
359 num_atoms = ARRAY_SIZE(gen6_atoms);
362 num_atoms = ARRAY_SIZE(gen4_atoms);
365 if (brw->emit_state_always) {
371 if (brw->fragment_program != ctx->FragmentProgram._Current) {
372 brw->fragment_program = ctx->FragmentProgram._Current;
373 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
376 if (brw->vertex_program != ctx->VertexProgram._Current) {
377 brw->vertex_program = ctx->VertexProgram._Current;
378 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
381 if (state->mesa == 0 &&
386 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
387 brw_clear_batch_cache(brw);
389 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
391 /* do prepare stage for all atoms */
392 for (i = 0; i < num_atoms; i++) {
393 const struct brw_tracked_state *atom = atoms[i];
395 if (brw->intel.Fallback)
398 if (check_state(state, &atom->dirty)) {
405 intel_check_front_buffer_rendering(intel);
407 /* Make sure that the textures which are referenced by the current
408 * brw fragment program are actually present/valid.
409 * If this fails, we can experience GPU lock-ups.
412 const struct brw_fragment_program *fp;
413 fp = brw_fragment_program_const(brw->fragment_program);
415 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
416 == fp->tex_units_used);
422 void brw_upload_state(struct brw_context *brw)
424 struct intel_context *intel = &brw->intel;
425 struct brw_state_flags *state = &brw->state.dirty;
427 static int dirty_count = 0;
428 const struct brw_tracked_state **atoms;
431 if (intel->gen >= 6) {
433 num_atoms = ARRAY_SIZE(gen6_atoms);
436 num_atoms = ARRAY_SIZE(gen4_atoms);
439 brw_clear_validated_bos(brw);
441 if (unlikely(INTEL_DEBUG)) {
442 /* Debug version which enforces various sanity checks on the
443 * state flags which are generated and checked to help ensure
444 * state atoms are ordered correctly in the list.
446 struct brw_state_flags examined, prev;
447 memset(&examined, 0, sizeof(examined));
450 for (i = 0; i < num_atoms; i++) {
451 const struct brw_tracked_state *atom = atoms[i];
452 struct brw_state_flags generated;
454 assert(atom->dirty.mesa ||
458 if (brw->intel.Fallback)
461 if (check_state(state, &atom->dirty)) {
467 accumulate_state(&examined, &atom->dirty);
469 /* generated = (prev ^ state)
470 * if (examined & generated)
473 xor_states(&generated, &prev, state);
474 assert(!check_state(&examined, &generated));
479 for (i = 0; i < num_atoms; i++) {
480 const struct brw_tracked_state *atom = atoms[i];
482 if (brw->intel.Fallback)
485 if (check_state(state, &atom->dirty)) {
493 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
494 brw_update_dirty_count(mesa_bits, state->mesa);
495 brw_update_dirty_count(brw_bits, state->brw);
496 brw_update_dirty_count(cache_bits, state->cache);
497 if (dirty_count++ % 1000 == 0) {
498 brw_print_dirty_count(mesa_bits, state->mesa);
499 brw_print_dirty_count(brw_bits, state->brw);
500 brw_print_dirty_count(cache_bits, state->cache);
501 fprintf(stderr, "\n");
505 if (!brw->intel.Fallback)
506 memset(state, 0, sizeof(*state));