2 * Mesa 3-D graphics library
4 * Copyright (C) 2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #include "util/u_dual_blend.h"
29 #include "util/u_prim.h"
30 #include "intel_reg.h"
32 #include "ilo_context.h"
34 #include "ilo_gpe_gen6.h"
35 #include "ilo_shader.h"
36 #include "ilo_state.h"
37 #include "ilo_3d_pipeline.h"
38 #include "ilo_3d_pipeline_gen6.h"
41 * This should be called before any depth stall flush (including those
42 * produced by non-pipelined state commands) or cache flush on GEN6.
44 * \see intel_emit_post_sync_nonzero_flush()
47 gen6_wa_pipe_control_post_sync(struct ilo_3d_pipeline *p,
48 bool caller_post_sync)
50 assert(p->dev->gen == ILO_GEN(6));
53 if (p->state.has_gen6_wa_pipe_control)
56 p->state.has_gen6_wa_pipe_control = true;
59 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
61 * "Pipe-control with CS-stall bit set must be sent BEFORE the
62 * pipe-control with a post-sync op and no write-cache flushes."
64 * The workaround below necessitates this workaround.
66 p->gen6_PIPE_CONTROL(p->dev,
67 PIPE_CONTROL_CS_STALL |
68 PIPE_CONTROL_STALL_AT_SCOREBOARD,
69 NULL, 0, false, p->cp);
71 /* the caller will emit the post-sync op */
76 * From the Sandy Bridge PRM, volume 2 part 1, page 60:
78 * "Before any depth stall flush (including those produced by
79 * non-pipelined state commands), software needs to first send a
80 * PIPE_CONTROL with no bits set except Post-Sync Operation != 0."
82 * "Before a PIPE_CONTROL with Write Cache Flush Enable =1, a
83 * PIPE_CONTROL with any non-zero post-sync-op is required."
85 p->gen6_PIPE_CONTROL(p->dev,
86 PIPE_CONTROL_WRITE_IMMEDIATE,
87 p->workaround_bo, 0, false, p->cp);
91 gen6_wa_pipe_control_wm_multisample_flush(struct ilo_3d_pipeline *p)
93 assert(p->dev->gen == ILO_GEN(6));
95 gen6_wa_pipe_control_post_sync(p, false);
98 * From the Sandy Bridge PRM, volume 2 part 1, page 305:
100 * "Driver must guarentee that all the caches in the depth pipe are
101 * flushed before this command (3DSTATE_MULTISAMPLE) is parsed. This
102 * requires driver to send a PIPE_CONTROL with a CS stall along with a
103 * Depth Flush prior to this command."
105 p->gen6_PIPE_CONTROL(p->dev,
106 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
107 PIPE_CONTROL_CS_STALL,
112 gen6_wa_pipe_control_wm_depth_flush(struct ilo_3d_pipeline *p)
114 assert(p->dev->gen == ILO_GEN(6));
116 gen6_wa_pipe_control_post_sync(p, false);
119 * According to intel_emit_depth_stall_flushes() of classic i965, we need
120 * to emit a sequence of PIPE_CONTROLs prior to emitting depth related
123 p->gen6_PIPE_CONTROL(p->dev,
124 PIPE_CONTROL_DEPTH_STALL,
125 NULL, 0, false, p->cp);
127 p->gen6_PIPE_CONTROL(p->dev,
128 PIPE_CONTROL_DEPTH_CACHE_FLUSH,
129 NULL, 0, false, p->cp);
131 p->gen6_PIPE_CONTROL(p->dev,
132 PIPE_CONTROL_DEPTH_STALL,
133 NULL, 0, false, p->cp);
137 gen6_wa_pipe_control_wm_max_threads_stall(struct ilo_3d_pipeline *p)
139 assert(p->dev->gen == ILO_GEN(6));
141 /* the post-sync workaround should cover this already */
142 if (p->state.has_gen6_wa_pipe_control)
146 * From the Sandy Bridge PRM, volume 2 part 1, page 274:
148 * "A PIPE_CONTROL command, with only the Stall At Pixel Scoreboard
149 * field set (DW1 Bit 1), must be issued prior to any change to the
150 * value in this field (Maximum Number of Threads in 3DSTATE_WM)"
152 p->gen6_PIPE_CONTROL(p->dev,
153 PIPE_CONTROL_STALL_AT_SCOREBOARD,
154 NULL, 0, false, p->cp);
159 gen6_wa_pipe_control_vs_const_flush(struct ilo_3d_pipeline *p)
161 assert(p->dev->gen == ILO_GEN(6));
163 gen6_wa_pipe_control_post_sync(p, false);
166 * According to upload_vs_state() of classic i965, we need to emit
167 * PIPE_CONTROL after 3DSTATE_CONSTANT_VS so that the command is kept being
168 * buffered by VS FF, to the point that the FF dies.
170 p->gen6_PIPE_CONTROL(p->dev,
171 PIPE_CONTROL_DEPTH_STALL |
172 PIPE_CONTROL_INSTRUCTION_FLUSH |
173 PIPE_CONTROL_STATE_CACHE_INVALIDATE,
174 NULL, 0, false, p->cp);
177 #define DIRTY(state) (session->pipe_dirty & ILO_DIRTY_ ## state)
180 gen6_pipeline_common_select(struct ilo_3d_pipeline *p,
181 const struct ilo_context *ilo,
182 struct gen6_pipeline_session *session)
184 /* PIPELINE_SELECT */
185 if (session->hw_ctx_changed) {
186 if (p->dev->gen == ILO_GEN(6))
187 gen6_wa_pipe_control_post_sync(p, false);
189 p->gen6_PIPELINE_SELECT(p->dev, 0x0, p->cp);
194 gen6_pipeline_common_sip(struct ilo_3d_pipeline *p,
195 const struct ilo_context *ilo,
196 struct gen6_pipeline_session *session)
199 if (session->hw_ctx_changed) {
200 if (p->dev->gen == ILO_GEN(6))
201 gen6_wa_pipe_control_post_sync(p, false);
203 p->gen6_STATE_SIP(p->dev, 0, p->cp);
208 gen6_pipeline_common_base_address(struct ilo_3d_pipeline *p,
209 const struct ilo_context *ilo,
210 struct gen6_pipeline_session *session)
212 /* STATE_BASE_ADDRESS */
213 if (session->state_bo_changed || session->instruction_bo_changed) {
214 if (p->dev->gen == ILO_GEN(6))
215 gen6_wa_pipe_control_post_sync(p, false);
217 p->gen6_STATE_BASE_ADDRESS(p->dev,
218 NULL, p->cp->bo, p->cp->bo, NULL, ilo->shader_cache->bo,
222 * From the Sandy Bridge PRM, volume 1 part 1, page 28:
224 * "The following commands must be reissued following any change to
225 * the base addresses:
227 * * 3DSTATE_BINDING_TABLE_POINTERS
228 * * 3DSTATE_SAMPLER_STATE_POINTERS
229 * * 3DSTATE_VIEWPORT_STATE_POINTERS
230 * * 3DSTATE_CC_POINTERS
231 * * MEDIA_STATE_POINTERS"
233 * 3DSTATE_SCISSOR_STATE_POINTERS is not on the list, but it is
234 * reasonable to also reissue the command. Same to PCB.
236 session->viewport_state_changed = true;
238 session->cc_state_blend_changed = true;
239 session->cc_state_dsa_changed = true;
240 session->cc_state_cc_changed = true;
242 session->scissor_state_changed = true;
244 session->binding_table_vs_changed = true;
245 session->binding_table_gs_changed = true;
246 session->binding_table_fs_changed = true;
248 session->sampler_state_vs_changed = true;
249 session->sampler_state_gs_changed = true;
250 session->sampler_state_fs_changed = true;
252 session->pcb_state_vs_changed = true;
253 session->pcb_state_gs_changed = true;
254 session->pcb_state_fs_changed = true;
259 gen6_pipeline_common_urb(struct ilo_3d_pipeline *p,
260 const struct ilo_context *ilo,
261 struct gen6_pipeline_session *session)
264 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS) || DIRTY(GS)) {
265 const struct ilo_shader *vs = (ilo->vs) ? ilo->vs->shader : NULL;
266 const struct ilo_shader *gs = (ilo->gs) ? ilo->gs->shader : NULL;
267 const bool gs_active = (gs || (vs && vs->stream_output));
268 int vs_entry_size, gs_entry_size;
269 int vs_total_size, gs_total_size;
271 vs_entry_size = (vs) ? vs->out.count : 0;
274 * As indicated by 2e712e41db0c0676e9f30fc73172c0e8de8d84d4, VF and VS
275 * share VUE handles. The VUE allocation size must be large enough to
276 * store either VF outputs (number of VERTEX_ELEMENTs) and VS outputs.
278 * I am not sure if the PRM explicitly states that VF and VS share VUE
279 * handles. But here is a citation that implies so:
281 * From the Sandy Bridge PRM, volume 2 part 1, page 44:
283 * "Once a FF stage that spawn threads has sufficient input to
284 * initiate a thread, it must guarantee that it is safe to request
285 * the thread initiation. For all these FF stages, this check is
288 * - The availability of output URB entries:
289 * - VS: As the input URB entries are overwritten with the
290 * VS-generated output data, output URB availability isn't a
293 if (vs_entry_size < ilo->ve->count)
294 vs_entry_size = ilo->ve->count;
296 gs_entry_size = (gs) ? gs->out.count :
297 (vs && vs->stream_output) ? vs_entry_size : 0;
300 vs_entry_size *= sizeof(float) * 4;
301 gs_entry_size *= sizeof(float) * 4;
302 vs_total_size = ilo->dev->urb_size;
306 gs_total_size = vs_total_size;
312 p->gen6_3DSTATE_URB(p->dev, vs_total_size, gs_total_size,
313 vs_entry_size, gs_entry_size, p->cp);
316 * From the Sandy Bridge PRM, volume 2 part 1, page 27:
318 * "Because of a urb corruption caused by allocating a previous
319 * gsunit's urb entry to vsunit software is required to send a
320 * "GS NULL Fence" (Send URB fence with VS URB size == 1 and GS URB
321 * size == 0) plus a dummy DRAW call before any case where VS will
322 * be taking over GS URB space."
324 if (p->state.gs.active && !gs_active)
325 ilo_3d_pipeline_emit_flush_gen6(p);
327 p->state.gs.active = gs_active;
332 gen6_pipeline_common_pointers_1(struct ilo_3d_pipeline *p,
333 const struct ilo_context *ilo,
334 struct gen6_pipeline_session *session)
336 /* 3DSTATE_VIEWPORT_STATE_POINTERS */
337 if (session->viewport_state_changed) {
338 p->gen6_3DSTATE_VIEWPORT_STATE_POINTERS(p->dev,
339 p->state.CLIP_VIEWPORT,
340 p->state.SF_VIEWPORT,
341 p->state.CC_VIEWPORT, p->cp);
346 gen6_pipeline_common_pointers_2(struct ilo_3d_pipeline *p,
347 const struct ilo_context *ilo,
348 struct gen6_pipeline_session *session)
350 /* 3DSTATE_CC_STATE_POINTERS */
351 if (session->cc_state_blend_changed ||
352 session->cc_state_dsa_changed ||
353 session->cc_state_cc_changed) {
354 p->gen6_3DSTATE_CC_STATE_POINTERS(p->dev,
355 p->state.BLEND_STATE,
356 p->state.DEPTH_STENCIL_STATE,
357 p->state.COLOR_CALC_STATE, p->cp);
360 /* 3DSTATE_SAMPLER_STATE_POINTERS */
361 if (session->sampler_state_vs_changed ||
362 session->sampler_state_gs_changed ||
363 session->sampler_state_fs_changed) {
364 p->gen6_3DSTATE_SAMPLER_STATE_POINTERS(p->dev,
365 p->state.vs.SAMPLER_STATE,
367 p->state.wm.SAMPLER_STATE, p->cp);
372 gen6_pipeline_common_pointers_3(struct ilo_3d_pipeline *p,
373 const struct ilo_context *ilo,
374 struct gen6_pipeline_session *session)
376 /* 3DSTATE_SCISSOR_STATE_POINTERS */
377 if (session->scissor_state_changed) {
378 p->gen6_3DSTATE_SCISSOR_STATE_POINTERS(p->dev,
379 p->state.SCISSOR_RECT, p->cp);
382 /* 3DSTATE_BINDING_TABLE_POINTERS */
383 if (session->binding_table_vs_changed ||
384 session->binding_table_gs_changed ||
385 session->binding_table_fs_changed) {
386 p->gen6_3DSTATE_BINDING_TABLE_POINTERS(p->dev,
387 p->state.vs.BINDING_TABLE_STATE,
388 p->state.gs.BINDING_TABLE_STATE,
389 p->state.wm.BINDING_TABLE_STATE, p->cp);
394 gen6_pipeline_vf(struct ilo_3d_pipeline *p,
395 const struct ilo_context *ilo,
396 struct gen6_pipeline_session *session)
398 /* 3DSTATE_INDEX_BUFFER */
399 if (DIRTY(INDEX_BUFFER)) {
400 p->gen6_3DSTATE_INDEX_BUFFER(p->dev,
401 &ilo->ib.state, session->info->primitive_restart, p->cp);
404 /* 3DSTATE_VERTEX_BUFFERS */
405 if (DIRTY(VERTEX_BUFFERS) || DIRTY(VERTEX_ELEMENTS)) {
406 p->gen6_3DSTATE_VERTEX_BUFFERS(p->dev,
407 ilo->vb.states, ilo->vb.enabled_mask, ilo->ve, p->cp);
410 /* 3DSTATE_VERTEX_ELEMENTS */
411 if (DIRTY(VERTEX_ELEMENTS) || DIRTY(VS)) {
412 const struct ilo_ve_state *ve = ilo->ve;
413 bool last_velement_edgeflag = false;
414 bool prepend_generate_ids = false;
417 const struct ilo_shader_info *info = &ilo->vs->info;
419 if (info->edgeflag_in >= 0) {
420 /* we rely on the state tracker here */
421 assert(info->edgeflag_in == ve->count - 1);
422 last_velement_edgeflag = true;
425 prepend_generate_ids = (info->has_instanceid || info->has_vertexid);
428 p->gen6_3DSTATE_VERTEX_ELEMENTS(p->dev, ve,
429 last_velement_edgeflag, prepend_generate_ids, p->cp);
434 gen6_pipeline_vf_statistics(struct ilo_3d_pipeline *p,
435 const struct ilo_context *ilo,
436 struct gen6_pipeline_session *session)
438 /* 3DSTATE_VF_STATISTICS */
439 if (session->hw_ctx_changed)
440 p->gen6_3DSTATE_VF_STATISTICS(p->dev, false, p->cp);
444 gen6_pipeline_vf_draw(struct ilo_3d_pipeline *p,
445 const struct ilo_context *ilo,
446 struct gen6_pipeline_session *session)
449 p->gen6_3DPRIMITIVE(p->dev, session->info, false, p->cp);
450 p->state.has_gen6_wa_pipe_control = false;
454 gen6_pipeline_vs(struct ilo_3d_pipeline *p,
455 const struct ilo_context *ilo,
456 struct gen6_pipeline_session *session)
458 const bool emit_3dstate_vs = (DIRTY(VS) || DIRTY(VERTEX_SAMPLERS));
459 const bool emit_3dstate_constant_vs = session->pcb_state_vs_changed;
462 * the classic i965 does this in upload_vs_state(), citing a spec that I
465 if (emit_3dstate_vs && p->dev->gen == ILO_GEN(6))
466 gen6_wa_pipe_control_post_sync(p, false);
468 /* 3DSTATE_CONSTANT_VS */
469 if (emit_3dstate_constant_vs) {
470 p->gen6_3DSTATE_CONSTANT_VS(p->dev,
471 &p->state.vs.PUSH_CONSTANT_BUFFER,
472 &p->state.vs.PUSH_CONSTANT_BUFFER_size,
477 if (emit_3dstate_vs) {
478 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
479 const int num_samplers = ilo->sampler[PIPE_SHADER_VERTEX].count;
481 p->gen6_3DSTATE_VS(p->dev, vs, num_samplers, p->cp);
484 if (emit_3dstate_constant_vs && p->dev->gen == ILO_GEN(6))
485 gen6_wa_pipe_control_vs_const_flush(p);
489 gen6_pipeline_gs(struct ilo_3d_pipeline *p,
490 const struct ilo_context *ilo,
491 struct gen6_pipeline_session *session)
493 /* 3DSTATE_CONSTANT_GS */
494 if (session->pcb_state_gs_changed)
495 p->gen6_3DSTATE_CONSTANT_GS(p->dev, NULL, NULL, 0, p->cp);
498 if (DIRTY(GS) || DIRTY(VS) || session->prim_changed) {
499 const struct ilo_shader *gs = (ilo->gs)? ilo->gs->shader : NULL;
500 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
501 const int num_vertices = u_vertices_per_prim(session->reduced_prim);
504 assert(!gs->pcb.clip_state_size);
506 p->gen6_3DSTATE_GS(p->dev, gs, vs,
507 (vs) ? vs->cache_offset + vs->gs_offsets[num_vertices - 1] : 0,
513 gen6_pipeline_update_max_svbi(struct ilo_3d_pipeline *p,
514 const struct ilo_context *ilo,
515 struct gen6_pipeline_session *session)
517 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
518 const struct pipe_stream_output_info *so_info =
519 (ilo->gs) ? &ilo->gs->info.stream_output :
520 (ilo->vs) ? &ilo->vs->info.stream_output : NULL;
521 unsigned max_svbi = 0xffffffff;
524 for (i = 0; i < so_info->num_outputs; i++) {
525 const int output_buffer = so_info->output[i].output_buffer;
526 const struct pipe_stream_output_target *so =
527 ilo->so.states[output_buffer];
528 const int struct_size = so_info->stride[output_buffer] * 4;
529 const int elem_size = so_info->output[i].num_components * 4;
537 buf_size = so->buffer_size - so_info->output[i].dst_offset * 4;
539 count = buf_size / struct_size;
540 if (buf_size % struct_size >= elem_size)
543 if (count < max_svbi)
547 if (p->state.so_max_vertices != max_svbi) {
548 p->state.so_max_vertices = max_svbi;
557 gen6_pipeline_gs_svbi(struct ilo_3d_pipeline *p,
558 const struct ilo_context *ilo,
559 struct gen6_pipeline_session *session)
561 const bool emit = gen6_pipeline_update_max_svbi(p, ilo, session);
563 /* 3DSTATE_GS_SVB_INDEX */
565 if (p->dev->gen == ILO_GEN(6))
566 gen6_wa_pipe_control_post_sync(p, false);
568 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
569 0, p->state.so_num_vertices, p->state.so_max_vertices,
572 if (session->hw_ctx_changed) {
576 * From the Sandy Bridge PRM, volume 2 part 1, page 148:
578 * "If a buffer is not enabled then the SVBI must be set to 0x0
579 * in order to not cause overflow in that SVBI."
581 * "If a buffer is not enabled then the MaxSVBI must be set to
582 * 0xFFFFFFFF in order to not cause overflow in that SVBI."
584 for (i = 1; i < 4; i++) {
585 p->gen6_3DSTATE_GS_SVB_INDEX(p->dev,
586 i, 0, 0xffffffff, false, p->cp);
593 gen6_pipeline_clip(struct ilo_3d_pipeline *p,
594 const struct ilo_context *ilo,
595 struct gen6_pipeline_session *session)
598 if (DIRTY(RASTERIZER) || DIRTY(FS) ||
599 DIRTY(VIEWPORT) || DIRTY(FRAMEBUFFER)) {
600 bool enable_guardband = true;
604 * We do not do 2D clipping yet. Guard band test should only be enabled
605 * when the viewport is larger than the framebuffer.
607 for (i = 0; i < ilo->viewport.count; i++) {
608 const struct ilo_viewport_cso *vp = &ilo->viewport.cso[i];
610 if (vp->min_x > 0.0f || vp->max_x < ilo->fb.state.width ||
611 vp->min_y > 0.0f || vp->max_y < ilo->fb.state.height) {
612 enable_guardband = false;
617 p->gen6_3DSTATE_CLIP(p->dev,
618 &ilo->rasterizer->state,
619 (ilo->fs && ilo->fs->shader->in.has_linear_interp),
620 enable_guardband, 1, p->cp);
625 gen6_pipeline_sf(struct ilo_3d_pipeline *p,
626 const struct ilo_context *ilo,
627 struct gen6_pipeline_session *session)
630 if (DIRTY(RASTERIZER) || DIRTY(VS) || DIRTY(GS) || DIRTY(FS)) {
631 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
632 const struct ilo_shader *last_sh =
633 (ilo->gs)? ilo->gs->shader :
634 (ilo->vs)? ilo->vs->shader : NULL;
636 p->gen6_3DSTATE_SF(p->dev,
637 &ilo->rasterizer->state, fs, last_sh, p->cp);
642 gen6_pipeline_sf_rect(struct ilo_3d_pipeline *p,
643 const struct ilo_context *ilo,
644 struct gen6_pipeline_session *session)
646 /* 3DSTATE_DRAWING_RECTANGLE */
647 if (DIRTY(FRAMEBUFFER)) {
648 if (p->dev->gen == ILO_GEN(6))
649 gen6_wa_pipe_control_post_sync(p, false);
651 p->gen6_3DSTATE_DRAWING_RECTANGLE(p->dev, 0, 0,
652 ilo->fb.state.width, ilo->fb.state.height, p->cp);
657 gen6_pipeline_wm(struct ilo_3d_pipeline *p,
658 const struct ilo_context *ilo,
659 struct gen6_pipeline_session *session)
661 /* 3DSTATE_CONSTANT_PS */
662 if (session->pcb_state_fs_changed)
663 p->gen6_3DSTATE_CONSTANT_PS(p->dev, NULL, NULL, 0, p->cp);
666 if (DIRTY(FS) || DIRTY(FRAGMENT_SAMPLERS) ||
667 DIRTY(BLEND) || DIRTY(DEPTH_STENCIL_ALPHA) ||
669 const struct ilo_shader *fs = (ilo->fs)? ilo->fs->shader : NULL;
670 const int num_samplers = ilo->sampler[PIPE_SHADER_FRAGMENT].count;
671 const bool dual_blend = ilo->blend->dual_blend;
672 const bool cc_may_kill = (ilo->dsa->alpha.enabled ||
673 ilo->blend->alpha_to_coverage);
676 assert(!fs->pcb.clip_state_size);
678 if (p->dev->gen == ILO_GEN(6) && session->hw_ctx_changed)
679 gen6_wa_pipe_control_wm_max_threads_stall(p);
681 p->gen6_3DSTATE_WM(p->dev, fs, num_samplers,
682 &ilo->rasterizer->state, dual_blend, cc_may_kill, p->cp);
687 gen6_pipeline_wm_multisample(struct ilo_3d_pipeline *p,
688 const struct ilo_context *ilo,
689 struct gen6_pipeline_session *session)
691 /* 3DSTATE_MULTISAMPLE and 3DSTATE_SAMPLE_MASK */
692 if (DIRTY(SAMPLE_MASK) || DIRTY(FRAMEBUFFER)) {
693 const uint32_t *packed_sample_pos;
695 packed_sample_pos = (ilo->fb.num_samples > 1) ?
696 &p->packed_sample_position_4x : &p->packed_sample_position_1x;
698 if (p->dev->gen == ILO_GEN(6)) {
699 gen6_wa_pipe_control_post_sync(p, false);
700 gen6_wa_pipe_control_wm_multisample_flush(p);
703 p->gen6_3DSTATE_MULTISAMPLE(p->dev,
704 ilo->fb.num_samples, packed_sample_pos,
705 ilo->rasterizer->state.half_pixel_center, p->cp);
707 p->gen6_3DSTATE_SAMPLE_MASK(p->dev,
708 (ilo->fb.num_samples > 1) ? ilo->sample_mask : 0x1, p->cp);
713 gen6_pipeline_wm_depth(struct ilo_3d_pipeline *p,
714 const struct ilo_context *ilo,
715 struct gen6_pipeline_session *session)
717 /* 3DSTATE_DEPTH_BUFFER and 3DSTATE_CLEAR_PARAMS */
718 if (DIRTY(FRAMEBUFFER)) {
719 if (p->dev->gen == ILO_GEN(6)) {
720 gen6_wa_pipe_control_post_sync(p, false);
721 gen6_wa_pipe_control_wm_depth_flush(p);
724 p->gen6_3DSTATE_DEPTH_BUFFER(p->dev, ilo->fb.state.zsbuf, p->cp);
727 p->gen6_3DSTATE_CLEAR_PARAMS(p->dev, 0, p->cp);
732 gen6_pipeline_wm_raster(struct ilo_3d_pipeline *p,
733 const struct ilo_context *ilo,
734 struct gen6_pipeline_session *session)
736 /* 3DSTATE_POLY_STIPPLE_PATTERN and 3DSTATE_POLY_STIPPLE_OFFSET */
737 if ((DIRTY(RASTERIZER) || DIRTY(POLY_STIPPLE)) &&
738 ilo->rasterizer->state.poly_stipple_enable) {
739 if (p->dev->gen == ILO_GEN(6))
740 gen6_wa_pipe_control_post_sync(p, false);
742 p->gen6_3DSTATE_POLY_STIPPLE_PATTERN(p->dev,
743 &ilo->poly_stipple, p->cp);
745 p->gen6_3DSTATE_POLY_STIPPLE_OFFSET(p->dev, 0, 0, p->cp);
748 /* 3DSTATE_LINE_STIPPLE */
749 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_stipple_enable) {
750 if (p->dev->gen == ILO_GEN(6))
751 gen6_wa_pipe_control_post_sync(p, false);
753 p->gen6_3DSTATE_LINE_STIPPLE(p->dev,
754 ilo->rasterizer->state.line_stipple_pattern,
755 ilo->rasterizer->state.line_stipple_factor + 1, p->cp);
758 /* 3DSTATE_AA_LINE_PARAMETERS */
759 if (DIRTY(RASTERIZER) && ilo->rasterizer->state.line_smooth) {
760 if (p->dev->gen == ILO_GEN(6))
761 gen6_wa_pipe_control_post_sync(p, false);
763 p->gen6_3DSTATE_AA_LINE_PARAMETERS(p->dev, p->cp);
768 gen6_pipeline_state_viewports(struct ilo_3d_pipeline *p,
769 const struct ilo_context *ilo,
770 struct gen6_pipeline_session *session)
772 /* SF_CLIP_VIEWPORT and CC_VIEWPORT */
773 if (p->dev->gen >= ILO_GEN(7) && DIRTY(VIEWPORT)) {
774 p->state.SF_CLIP_VIEWPORT = p->gen7_SF_CLIP_VIEWPORT(p->dev,
775 ilo->viewport.cso, ilo->viewport.count, p->cp);
777 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
778 ilo->viewport.cso, ilo->viewport.count, p->cp);
780 session->viewport_state_changed = true;
782 /* SF_VIEWPORT, CLIP_VIEWPORT, and CC_VIEWPORT */
783 else if (DIRTY(VIEWPORT)) {
784 p->state.CLIP_VIEWPORT = p->gen6_CLIP_VIEWPORT(p->dev,
785 ilo->viewport.cso, ilo->viewport.count, p->cp);
787 p->state.SF_VIEWPORT = p->gen6_SF_VIEWPORT(p->dev,
788 ilo->viewport.cso, ilo->viewport.count, p->cp);
790 p->state.CC_VIEWPORT = p->gen6_CC_VIEWPORT(p->dev,
791 ilo->viewport.cso, ilo->viewport.count, p->cp);
793 session->viewport_state_changed = true;
798 gen6_pipeline_state_cc(struct ilo_3d_pipeline *p,
799 const struct ilo_context *ilo,
800 struct gen6_pipeline_session *session)
803 if (DIRTY(BLEND) || DIRTY(FRAMEBUFFER) || DIRTY(DEPTH_STENCIL_ALPHA)) {
804 p->state.BLEND_STATE = p->gen6_BLEND_STATE(p->dev,
805 ilo->blend, &ilo->fb, &ilo->dsa->alpha, p->cp);
807 session->cc_state_blend_changed = true;
810 /* COLOR_CALC_STATE */
811 if (DIRTY(DEPTH_STENCIL_ALPHA) || DIRTY(STENCIL_REF) || DIRTY(BLEND_COLOR)) {
812 p->state.COLOR_CALC_STATE =
813 p->gen6_COLOR_CALC_STATE(p->dev, &ilo->stencil_ref,
814 ilo->dsa->alpha.ref_value, &ilo->blend_color, p->cp);
816 session->cc_state_cc_changed = true;
819 /* DEPTH_STENCIL_STATE */
820 if (DIRTY(DEPTH_STENCIL_ALPHA)) {
821 p->state.DEPTH_STENCIL_STATE =
822 p->gen6_DEPTH_STENCIL_STATE(p->dev, ilo->dsa, p->cp);
824 session->cc_state_dsa_changed = true;
829 gen6_pipeline_state_scissors(struct ilo_3d_pipeline *p,
830 const struct ilo_context *ilo,
831 struct gen6_pipeline_session *session)
834 if (DIRTY(SCISSOR) || DIRTY(VIEWPORT)) {
835 /* there should be as many scissors as there are viewports */
836 p->state.SCISSOR_RECT = p->gen6_SCISSOR_RECT(p->dev,
837 &ilo->scissor, ilo->viewport.count, p->cp);
839 session->scissor_state_changed = true;
844 gen6_pipeline_state_surfaces_rt(struct ilo_3d_pipeline *p,
845 const struct ilo_context *ilo,
846 struct gen6_pipeline_session *session)
848 /* SURFACE_STATEs for render targets */
849 if (DIRTY(FRAMEBUFFER)) {
850 const int offset = ILO_WM_DRAW_SURFACE(0);
851 uint32_t *surface_state = &p->state.wm.SURFACE_STATE[offset];
854 for (i = 0; i < ilo->fb.state.nr_cbufs; i++) {
855 const struct ilo_surface_cso *surface =
856 (const struct ilo_surface_cso *) ilo->fb.state.cbufs[i];
858 assert(surface && surface->is_rt);
860 p->gen6_SURFACE_STATE(p->dev, &surface->u.rt, true, p->cp);
864 * Upload at least one render target, as
865 * brw_update_renderbuffer_surfaces() does. I don't know why.
868 struct ilo_view_surface null_surface;
870 ilo_gpe_init_view_surface_null(p->dev,
871 ilo->fb.state.width, ilo->fb.state.height,
872 1, 0, &null_surface);
875 p->gen6_SURFACE_STATE(p->dev, &null_surface, true, p->cp);
880 memset(&surface_state[i], 0, (ILO_MAX_DRAW_BUFFERS - i) * 4);
882 if (i && session->num_surfaces[PIPE_SHADER_FRAGMENT] < offset + i)
883 session->num_surfaces[PIPE_SHADER_FRAGMENT] = offset + i;
885 session->binding_table_fs_changed = true;
890 gen6_pipeline_state_surfaces_so(struct ilo_3d_pipeline *p,
891 const struct ilo_context *ilo,
892 struct gen6_pipeline_session *session)
894 const struct ilo_shader_state *vs = ilo->vs;
895 const struct ilo_shader_state *gs = ilo->gs;
896 const struct pipe_stream_output_target **so_targets =
897 (const struct pipe_stream_output_target **) ilo->so.states;
898 const int num_so_targets = ilo->so.count;
900 if (p->dev->gen != ILO_GEN(6))
903 /* SURFACE_STATEs for stream output targets */
904 if (DIRTY(VS) || DIRTY(GS) || DIRTY(STREAM_OUTPUT_TARGETS)) {
905 const struct pipe_stream_output_info *so_info =
906 (gs) ? &gs->info.stream_output :
907 (vs) ? &vs->info.stream_output : NULL;
908 const int offset = ILO_GS_SO_SURFACE(0);
909 uint32_t *surface_state = &p->state.gs.SURFACE_STATE[offset];
912 for (i = 0; so_info && i < so_info->num_outputs; i++) {
913 const int target = so_info->output[i].output_buffer;
914 const struct pipe_stream_output_target *so_target =
915 (target < num_so_targets) ? so_targets[target] : NULL;
918 surface_state[i] = p->gen6_so_SURFACE_STATE(p->dev,
919 so_target, so_info, i, p->cp);
922 surface_state[i] = 0;
926 memset(&surface_state[i], 0, (ILO_MAX_SO_BINDINGS - i) * 4);
928 if (i && session->num_surfaces[PIPE_SHADER_GEOMETRY] < offset + i)
929 session->num_surfaces[PIPE_SHADER_GEOMETRY] = offset + i;
931 session->binding_table_gs_changed = true;
936 gen6_pipeline_state_surfaces_view(struct ilo_3d_pipeline *p,
937 const struct ilo_context *ilo,
939 struct gen6_pipeline_session *session)
941 const struct pipe_sampler_view * const *views =
942 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
943 const int num_views = ilo->view[shader_type].count;
944 uint32_t *surface_state;
948 /* SURFACE_STATEs for sampler views */
949 switch (shader_type) {
950 case PIPE_SHADER_VERTEX:
951 if (DIRTY(VERTEX_SAMPLER_VIEWS)) {
952 offset = ILO_VS_TEXTURE_SURFACE(0);
953 surface_state = &p->state.vs.SURFACE_STATE[offset];
955 session->binding_table_vs_changed = true;
961 case PIPE_SHADER_FRAGMENT:
962 if (DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
963 offset = ILO_WM_TEXTURE_SURFACE(0);
964 surface_state = &p->state.wm.SURFACE_STATE[offset];
966 session->binding_table_fs_changed = true;
980 for (i = 0; i < num_views; i++) {
982 const struct ilo_view_cso *cso =
983 (const struct ilo_view_cso *) views[i];
986 p->gen6_SURFACE_STATE(p->dev, &cso->surface, false, p->cp);
989 surface_state[i] = 0;
993 memset(&surface_state[i], 0, (ILO_MAX_SAMPLER_VIEWS - i) * 4);
995 if (i && session->num_surfaces[shader_type] < offset + i)
996 session->num_surfaces[shader_type] = offset + i;
1000 gen6_pipeline_state_surfaces_const(struct ilo_3d_pipeline *p,
1001 const struct ilo_context *ilo,
1003 struct gen6_pipeline_session *session)
1005 const struct ilo_cbuf_cso *buffers = ilo->cbuf[shader_type].cso;
1006 const int num_buffers = ilo->cbuf[shader_type].count;
1007 uint32_t *surface_state;
1011 /* SURFACE_STATEs for constant buffers */
1012 switch (shader_type) {
1013 case PIPE_SHADER_VERTEX:
1014 if (DIRTY(CONSTANT_BUFFER)) {
1015 offset = ILO_VS_CONST_SURFACE(0);
1016 surface_state = &p->state.vs.SURFACE_STATE[offset];
1018 session->binding_table_vs_changed = true;
1024 case PIPE_SHADER_FRAGMENT:
1025 if (DIRTY(CONSTANT_BUFFER)) {
1026 offset = ILO_WM_CONST_SURFACE(0);
1027 surface_state = &p->state.wm.SURFACE_STATE[offset];
1029 session->binding_table_fs_changed = true;
1043 for (i = 0; i < num_buffers; i++) {
1044 if (buffers[i].resource) {
1045 const struct ilo_view_surface *surf = &buffers[i].surface;
1048 p->gen6_SURFACE_STATE(p->dev, surf, false, p->cp);
1051 surface_state[i] = 0;
1055 memset(&surface_state[i], 0, (ILO_MAX_CONST_BUFFERS - i) * 4);
1057 if (i && session->num_surfaces[shader_type] < offset + i)
1058 session->num_surfaces[shader_type] = offset + i;
1062 gen6_pipeline_state_binding_tables(struct ilo_3d_pipeline *p,
1063 const struct ilo_context *ilo,
1065 struct gen6_pipeline_session *session)
1067 uint32_t *binding_table_state, *surface_state;
1068 int *binding_table_state_size, size;
1071 /* BINDING_TABLE_STATE */
1072 switch (shader_type) {
1073 case PIPE_SHADER_VERTEX:
1074 surface_state = p->state.vs.SURFACE_STATE;
1075 binding_table_state = &p->state.vs.BINDING_TABLE_STATE;
1076 binding_table_state_size = &p->state.vs.BINDING_TABLE_STATE_size;
1078 skip = !session->binding_table_vs_changed;
1080 case PIPE_SHADER_GEOMETRY:
1081 surface_state = p->state.gs.SURFACE_STATE;
1082 binding_table_state = &p->state.gs.BINDING_TABLE_STATE;
1083 binding_table_state_size = &p->state.gs.BINDING_TABLE_STATE_size;
1085 skip = !session->binding_table_gs_changed;
1087 case PIPE_SHADER_FRAGMENT:
1088 surface_state = p->state.wm.SURFACE_STATE;
1089 binding_table_state = &p->state.wm.BINDING_TABLE_STATE;
1090 binding_table_state_size = &p->state.wm.BINDING_TABLE_STATE_size;
1092 skip = !session->binding_table_fs_changed;
1103 * If we have seemingly less SURFACE_STATEs than before, it could be that
1104 * we did not touch those reside at the tail in this upload. Loop over
1105 * them to figure out the real number of SURFACE_STATEs.
1107 for (size = *binding_table_state_size;
1108 size > session->num_surfaces[shader_type]; size--) {
1109 if (surface_state[size - 1])
1112 if (size < session->num_surfaces[shader_type])
1113 size = session->num_surfaces[shader_type];
1115 *binding_table_state = p->gen6_BINDING_TABLE_STATE(p->dev,
1116 surface_state, size, p->cp);
1117 *binding_table_state_size = size;
1121 gen6_pipeline_state_samplers(struct ilo_3d_pipeline *p,
1122 const struct ilo_context *ilo,
1124 struct gen6_pipeline_session *session)
1126 const struct ilo_sampler_cso * const *samplers =
1127 ilo->sampler[shader_type].cso;
1128 const struct pipe_sampler_view * const *views =
1129 (const struct pipe_sampler_view **) ilo->view[shader_type].states;
1130 const int num_samplers = ilo->sampler[shader_type].count;
1131 const int num_views = ilo->view[shader_type].count;
1132 uint32_t *sampler_state, *border_color_state;
1133 bool emit_border_color = false;
1136 /* SAMPLER_BORDER_COLOR_STATE and SAMPLER_STATE */
1137 switch (shader_type) {
1138 case PIPE_SHADER_VERTEX:
1139 if (DIRTY(VERTEX_SAMPLERS) || DIRTY(VERTEX_SAMPLER_VIEWS)) {
1140 sampler_state = &p->state.vs.SAMPLER_STATE;
1141 border_color_state = p->state.vs.SAMPLER_BORDER_COLOR_STATE;
1143 if (DIRTY(VERTEX_SAMPLERS))
1144 emit_border_color = true;
1146 session->sampler_state_vs_changed = true;
1152 case PIPE_SHADER_FRAGMENT:
1153 if (DIRTY(FRAGMENT_SAMPLERS) || DIRTY(FRAGMENT_SAMPLER_VIEWS)) {
1154 sampler_state = &p->state.wm.SAMPLER_STATE;
1155 border_color_state = p->state.wm.SAMPLER_BORDER_COLOR_STATE;
1157 if (DIRTY(FRAGMENT_SAMPLERS))
1158 emit_border_color = true;
1160 session->sampler_state_fs_changed = true;
1174 if (emit_border_color) {
1177 for (i = 0; i < num_samplers; i++) {
1178 border_color_state[i] = (samplers[i]) ?
1179 p->gen6_SAMPLER_BORDER_COLOR_STATE(p->dev,
1180 samplers[i], p->cp) : 0;
1184 /* should we take the minimum of num_samplers and num_views? */
1185 *sampler_state = p->gen6_SAMPLER_STATE(p->dev,
1188 MIN2(num_samplers, num_views), p->cp);
1192 gen6_pipeline_state_pcb(struct ilo_3d_pipeline *p,
1193 const struct ilo_context *ilo,
1194 struct gen6_pipeline_session *session)
1196 /* push constant buffer for VS */
1197 if (DIRTY(VS) || DIRTY(CLIP)) {
1198 const struct ilo_shader *vs = (ilo->vs)? ilo->vs->shader : NULL;
1200 if (vs && vs->pcb.clip_state_size) {
1203 p->state.vs.PUSH_CONSTANT_BUFFER_size = vs->pcb.clip_state_size;
1204 p->state.vs.PUSH_CONSTANT_BUFFER =
1205 p->gen6_push_constant_buffer(p->dev,
1206 p->state.vs.PUSH_CONSTANT_BUFFER_size, &pcb, p->cp);
1208 memcpy(pcb, &ilo->clip, vs->pcb.clip_state_size);
1211 p->state.vs.PUSH_CONSTANT_BUFFER_size = 0;
1212 p->state.vs.PUSH_CONSTANT_BUFFER = 0;
1215 session->pcb_state_vs_changed = true;
1222 gen6_pipeline_commands(struct ilo_3d_pipeline *p,
1223 const struct ilo_context *ilo,
1224 struct gen6_pipeline_session *session)
1227 * We try to keep the order of the commands match, as closely as possible,
1228 * that of the classic i965 driver. It allows us to compare the command
1231 gen6_pipeline_common_select(p, ilo, session);
1232 gen6_pipeline_gs_svbi(p, ilo, session);
1233 gen6_pipeline_common_sip(p, ilo, session);
1234 gen6_pipeline_vf_statistics(p, ilo, session);
1235 gen6_pipeline_common_base_address(p, ilo, session);
1236 gen6_pipeline_common_pointers_1(p, ilo, session);
1237 gen6_pipeline_common_urb(p, ilo, session);
1238 gen6_pipeline_common_pointers_2(p, ilo, session);
1239 gen6_pipeline_wm_multisample(p, ilo, session);
1240 gen6_pipeline_vs(p, ilo, session);
1241 gen6_pipeline_gs(p, ilo, session);
1242 gen6_pipeline_clip(p, ilo, session);
1243 gen6_pipeline_sf(p, ilo, session);
1244 gen6_pipeline_wm(p, ilo, session);
1245 gen6_pipeline_common_pointers_3(p, ilo, session);
1246 gen6_pipeline_wm_depth(p, ilo, session);
1247 gen6_pipeline_wm_raster(p, ilo, session);
1248 gen6_pipeline_sf_rect(p, ilo, session);
1249 gen6_pipeline_vf(p, ilo, session);
1250 gen6_pipeline_vf_draw(p, ilo, session);
1254 gen6_pipeline_states(struct ilo_3d_pipeline *p,
1255 const struct ilo_context *ilo,
1256 struct gen6_pipeline_session *session)
1260 gen6_pipeline_state_viewports(p, ilo, session);
1261 gen6_pipeline_state_cc(p, ilo, session);
1262 gen6_pipeline_state_scissors(p, ilo, session);
1263 gen6_pipeline_state_pcb(p, ilo, session);
1266 * upload all SURAFCE_STATEs together so that we know there are minimal
1269 gen6_pipeline_state_surfaces_rt(p, ilo, session);
1270 gen6_pipeline_state_surfaces_so(p, ilo, session);
1271 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1272 gen6_pipeline_state_surfaces_view(p, ilo, shader_type, session);
1273 gen6_pipeline_state_surfaces_const(p, ilo, shader_type, session);
1276 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1277 gen6_pipeline_state_samplers(p, ilo, shader_type, session);
1278 /* this must be called after all SURFACE_STATEs are uploaded */
1279 gen6_pipeline_state_binding_tables(p, ilo, shader_type, session);
1284 gen6_pipeline_prepare(const struct ilo_3d_pipeline *p,
1285 const struct ilo_context *ilo,
1286 const struct pipe_draw_info *info,
1287 struct gen6_pipeline_session *session)
1289 memset(session, 0, sizeof(*session));
1290 session->info = info;
1291 session->pipe_dirty = ilo->dirty;
1292 session->reduced_prim = u_reduced_prim(info->mode);
1294 /* available space before the session */
1295 session->init_cp_space = ilo_cp_space(p->cp);
1297 session->hw_ctx_changed =
1298 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_HW);
1300 if (session->hw_ctx_changed) {
1301 /* these should be enough to make everything uploaded */
1302 session->state_bo_changed = true;
1303 session->instruction_bo_changed = true;
1304 session->prim_changed = true;
1307 session->state_bo_changed =
1308 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_STATE_BO);
1309 session->instruction_bo_changed =
1310 (p->invalidate_flags & ILO_3D_PIPELINE_INVALIDATE_KERNEL_BO);
1311 session->prim_changed = (p->state.reduced_prim != session->reduced_prim);
1316 gen6_pipeline_draw(struct ilo_3d_pipeline *p,
1317 const struct ilo_context *ilo,
1318 struct gen6_pipeline_session *session)
1320 /* force all states to be uploaded if the state bo changed */
1321 if (session->state_bo_changed)
1322 session->pipe_dirty = ILO_DIRTY_ALL;
1324 session->pipe_dirty = ilo->dirty;
1326 session->emit_draw_states(p, ilo, session);
1328 /* force all commands to be uploaded if the HW context changed */
1329 if (session->hw_ctx_changed)
1330 session->pipe_dirty = ILO_DIRTY_ALL;
1332 session->pipe_dirty = ilo->dirty;
1334 session->emit_draw_commands(p, ilo, session);
1338 gen6_pipeline_end(struct ilo_3d_pipeline *p,
1339 const struct ilo_context *ilo,
1340 struct gen6_pipeline_session *session)
1344 /* sanity check size estimation */
1345 used = session->init_cp_space - ilo_cp_space(p->cp);
1346 estimate = ilo_3d_pipeline_estimate_size(p, ILO_3D_PIPELINE_DRAW, ilo);
1347 assert(used <= estimate);
1349 p->state.reduced_prim = session->reduced_prim;
1353 ilo_3d_pipeline_emit_draw_gen6(struct ilo_3d_pipeline *p,
1354 const struct ilo_context *ilo,
1355 const struct pipe_draw_info *info)
1357 struct gen6_pipeline_session session;
1359 gen6_pipeline_prepare(p, ilo, info, &session);
1361 session.emit_draw_states = gen6_pipeline_states;
1362 session.emit_draw_commands = gen6_pipeline_commands;
1364 gen6_pipeline_draw(p, ilo, &session);
1365 gen6_pipeline_end(p, ilo, &session);
1369 ilo_3d_pipeline_emit_flush_gen6(struct ilo_3d_pipeline *p)
1371 if (p->dev->gen == ILO_GEN(6))
1372 gen6_wa_pipe_control_post_sync(p, false);
1374 p->gen6_PIPE_CONTROL(p->dev,
1375 PIPE_CONTROL_INSTRUCTION_FLUSH |
1376 PIPE_CONTROL_WRITE_FLUSH |
1377 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1378 PIPE_CONTROL_VF_CACHE_INVALIDATE |
1379 PIPE_CONTROL_TC_FLUSH |
1380 PIPE_CONTROL_NO_WRITE |
1381 PIPE_CONTROL_CS_STALL,
1382 0, 0, false, p->cp);
1386 ilo_3d_pipeline_emit_write_timestamp_gen6(struct ilo_3d_pipeline *p,
1387 struct intel_bo *bo, int index)
1389 if (p->dev->gen == ILO_GEN(6))
1390 gen6_wa_pipe_control_post_sync(p, true);
1392 p->gen6_PIPE_CONTROL(p->dev,
1393 PIPE_CONTROL_WRITE_TIMESTAMP,
1394 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1399 ilo_3d_pipeline_emit_write_depth_count_gen6(struct ilo_3d_pipeline *p,
1400 struct intel_bo *bo, int index)
1402 if (p->dev->gen == ILO_GEN(6))
1403 gen6_wa_pipe_control_post_sync(p, false);
1405 p->gen6_PIPE_CONTROL(p->dev,
1406 PIPE_CONTROL_DEPTH_STALL |
1407 PIPE_CONTROL_WRITE_DEPTH_COUNT,
1408 bo, index * sizeof(uint64_t) | PIPE_CONTROL_GLOBAL_GTT_WRITE,
1413 gen6_pipeline_estimate_commands(const struct ilo_3d_pipeline *p,
1414 const struct ilo_gpe_gen6 *gen6,
1415 const struct ilo_context *ilo)
1418 enum ilo_gpe_gen6_command cmd;
1423 for (cmd = 0; cmd < ILO_GPE_GEN6_COMMAND_COUNT; cmd++) {
1427 case ILO_GPE_GEN6_PIPE_CONTROL:
1428 /* for the workaround */
1430 /* another one after 3DSTATE_URB */
1432 /* and another one after 3DSTATE_CONSTANT_VS */
1435 case ILO_GPE_GEN6_3DSTATE_GS_SVB_INDEX:
1436 /* there are 4 SVBIs */
1439 case ILO_GPE_GEN6_3DSTATE_VERTEX_BUFFERS:
1442 case ILO_GPE_GEN6_3DSTATE_VERTEX_ELEMENTS:
1445 case ILO_GPE_GEN6_MEDIA_VFE_STATE:
1446 case ILO_GPE_GEN6_MEDIA_CURBE_LOAD:
1447 case ILO_GPE_GEN6_MEDIA_INTERFACE_DESCRIPTOR_LOAD:
1448 case ILO_GPE_GEN6_MEDIA_GATEWAY_STATE:
1449 case ILO_GPE_GEN6_MEDIA_STATE_FLUSH:
1450 case ILO_GPE_GEN6_MEDIA_OBJECT_WALKER:
1451 /* media commands */
1460 size += gen6->estimate_command_size(p->dev, cmd, count);
1467 gen6_pipeline_estimate_states(const struct ilo_3d_pipeline *p,
1468 const struct ilo_gpe_gen6 *gen6,
1469 const struct ilo_context *ilo)
1471 static int static_size;
1472 int shader_type, count, size;
1476 enum ilo_gpe_gen6_state state;
1478 } static_states[] = {
1480 { ILO_GPE_GEN6_SF_VIEWPORT, 1 },
1481 { ILO_GPE_GEN6_CLIP_VIEWPORT, 1 },
1482 { ILO_GPE_GEN6_CC_VIEWPORT, 1 },
1484 { ILO_GPE_GEN6_COLOR_CALC_STATE, 1 },
1485 { ILO_GPE_GEN6_BLEND_STATE, ILO_MAX_DRAW_BUFFERS },
1486 { ILO_GPE_GEN6_DEPTH_STENCIL_STATE, 1 },
1488 { ILO_GPE_GEN6_SCISSOR_RECT, 1 },
1489 /* binding table (vs, gs, fs) */
1490 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_VS_SURFACES },
1491 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_GS_SURFACES },
1492 { ILO_GPE_GEN6_BINDING_TABLE_STATE, ILO_MAX_WM_SURFACES },
1496 for (i = 0; i < Elements(static_states); i++) {
1497 static_size += gen6->estimate_state_size(p->dev,
1498 static_states[i].state,
1499 static_states[i].count);
1506 * render targets (fs)
1507 * stream outputs (gs)
1508 * sampler views (vs, fs)
1509 * constant buffers (vs, fs)
1511 count = ilo->fb.state.nr_cbufs;
1514 count += ilo->gs->info.stream_output.num_outputs;
1516 count += ilo->vs->info.stream_output.num_outputs;
1518 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1519 count += ilo->view[shader_type].count;
1520 count += ilo->cbuf[shader_type].count;
1524 size += gen6->estimate_state_size(p->dev,
1525 ILO_GPE_GEN6_SURFACE_STATE, count);
1528 /* samplers (vs, fs) */
1529 for (shader_type = 0; shader_type < PIPE_SHADER_TYPES; shader_type++) {
1530 count = ilo->sampler[shader_type].count;
1532 size += gen6->estimate_state_size(p->dev,
1533 ILO_GPE_GEN6_SAMPLER_BORDER_COLOR_STATE, count);
1534 size += gen6->estimate_state_size(p->dev,
1535 ILO_GPE_GEN6_SAMPLER_STATE, count);
1540 if (ilo->vs && ilo->vs->shader->pcb.clip_state_size) {
1541 const int pcb_size = ilo->vs->shader->pcb.clip_state_size;
1543 size += gen6->estimate_state_size(p->dev,
1544 ILO_GPE_GEN6_PUSH_CONSTANT_BUFFER, pcb_size);
1551 ilo_3d_pipeline_estimate_size_gen6(struct ilo_3d_pipeline *p,
1552 enum ilo_3d_pipeline_action action,
1555 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1559 case ILO_3D_PIPELINE_DRAW:
1561 const struct ilo_context *ilo = arg;
1563 size = gen6_pipeline_estimate_commands(p, gen6, ilo) +
1564 gen6_pipeline_estimate_states(p, gen6, ilo);
1567 case ILO_3D_PIPELINE_FLUSH:
1568 size = gen6->estimate_command_size(p->dev,
1569 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1571 case ILO_3D_PIPELINE_WRITE_TIMESTAMP:
1572 size = gen6->estimate_command_size(p->dev,
1573 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 2;
1575 case ILO_3D_PIPELINE_WRITE_DEPTH_COUNT:
1576 size = gen6->estimate_command_size(p->dev,
1577 ILO_GPE_GEN6_PIPE_CONTROL, 1) * 3;
1580 assert(!"unknown 3D pipeline action");
1589 ilo_3d_pipeline_init_gen6(struct ilo_3d_pipeline *p)
1591 const struct ilo_gpe_gen6 *gen6 = ilo_gpe_gen6_get();
1593 p->estimate_size = ilo_3d_pipeline_estimate_size_gen6;
1594 p->emit_draw = ilo_3d_pipeline_emit_draw_gen6;
1595 p->emit_flush = ilo_3d_pipeline_emit_flush_gen6;
1596 p->emit_write_timestamp = ilo_3d_pipeline_emit_write_timestamp_gen6;
1597 p->emit_write_depth_count = ilo_3d_pipeline_emit_write_depth_count_gen6;
1599 #define GEN6_USE(p, name, from) \
1600 p->gen6_ ## name = from->emit_ ## name
1601 GEN6_USE(p, STATE_BASE_ADDRESS, gen6);
1602 GEN6_USE(p, STATE_SIP, gen6);
1603 GEN6_USE(p, PIPELINE_SELECT, gen6);
1604 GEN6_USE(p, 3DSTATE_BINDING_TABLE_POINTERS, gen6);
1605 GEN6_USE(p, 3DSTATE_SAMPLER_STATE_POINTERS, gen6);
1606 GEN6_USE(p, 3DSTATE_URB, gen6);
1607 GEN6_USE(p, 3DSTATE_VERTEX_BUFFERS, gen6);
1608 GEN6_USE(p, 3DSTATE_VERTEX_ELEMENTS, gen6);
1609 GEN6_USE(p, 3DSTATE_INDEX_BUFFER, gen6);
1610 GEN6_USE(p, 3DSTATE_VF_STATISTICS, gen6);
1611 GEN6_USE(p, 3DSTATE_VIEWPORT_STATE_POINTERS, gen6);
1612 GEN6_USE(p, 3DSTATE_CC_STATE_POINTERS, gen6);
1613 GEN6_USE(p, 3DSTATE_SCISSOR_STATE_POINTERS, gen6);
1614 GEN6_USE(p, 3DSTATE_VS, gen6);
1615 GEN6_USE(p, 3DSTATE_GS, gen6);
1616 GEN6_USE(p, 3DSTATE_CLIP, gen6);
1617 GEN6_USE(p, 3DSTATE_SF, gen6);
1618 GEN6_USE(p, 3DSTATE_WM, gen6);
1619 GEN6_USE(p, 3DSTATE_CONSTANT_VS, gen6);
1620 GEN6_USE(p, 3DSTATE_CONSTANT_GS, gen6);
1621 GEN6_USE(p, 3DSTATE_CONSTANT_PS, gen6);
1622 GEN6_USE(p, 3DSTATE_SAMPLE_MASK, gen6);
1623 GEN6_USE(p, 3DSTATE_DRAWING_RECTANGLE, gen6);
1624 GEN6_USE(p, 3DSTATE_DEPTH_BUFFER, gen6);
1625 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_OFFSET, gen6);
1626 GEN6_USE(p, 3DSTATE_POLY_STIPPLE_PATTERN, gen6);
1627 GEN6_USE(p, 3DSTATE_LINE_STIPPLE, gen6);
1628 GEN6_USE(p, 3DSTATE_AA_LINE_PARAMETERS, gen6);
1629 GEN6_USE(p, 3DSTATE_GS_SVB_INDEX, gen6);
1630 GEN6_USE(p, 3DSTATE_MULTISAMPLE, gen6);
1631 GEN6_USE(p, 3DSTATE_STENCIL_BUFFER, gen6);
1632 GEN6_USE(p, 3DSTATE_HIER_DEPTH_BUFFER, gen6);
1633 GEN6_USE(p, 3DSTATE_CLEAR_PARAMS, gen6);
1634 GEN6_USE(p, PIPE_CONTROL, gen6);
1635 GEN6_USE(p, 3DPRIMITIVE, gen6);
1636 GEN6_USE(p, INTERFACE_DESCRIPTOR_DATA, gen6);
1637 GEN6_USE(p, SF_VIEWPORT, gen6);
1638 GEN6_USE(p, CLIP_VIEWPORT, gen6);
1639 GEN6_USE(p, CC_VIEWPORT, gen6);
1640 GEN6_USE(p, COLOR_CALC_STATE, gen6);
1641 GEN6_USE(p, BLEND_STATE, gen6);
1642 GEN6_USE(p, DEPTH_STENCIL_STATE, gen6);
1643 GEN6_USE(p, SCISSOR_RECT, gen6);
1644 GEN6_USE(p, BINDING_TABLE_STATE, gen6);
1645 GEN6_USE(p, SURFACE_STATE, gen6);
1646 GEN6_USE(p, so_SURFACE_STATE, gen6);
1647 GEN6_USE(p, SAMPLER_STATE, gen6);
1648 GEN6_USE(p, SAMPLER_BORDER_COLOR_STATE, gen6);
1649 GEN6_USE(p, push_constant_buffer, gen6);