2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
29 #include "util/ralloc.h"
32 vc4_dump_program(struct vc4_compile *c)
34 fprintf(stderr, "%s prog %d/%d QPU:\n",
35 qir_get_stage_name(c->stage),
36 c->program_id, c->variant_id);
38 for (int i = 0; i < c->qpu_inst_count; i++) {
39 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
40 vc4_qpu_disasm(&c->qpu_insts[i], 1);
41 fprintf(stderr, "\n");
46 queue(struct vc4_compile *c, uint64_t inst)
48 struct queued_qpu_inst *q = rzalloc(c, struct queued_qpu_inst);
50 list_addtail(&q->link, &c->qpu_inst_list);
54 last_inst(struct vc4_compile *c)
56 struct queued_qpu_inst *q =
57 (struct queued_qpu_inst *)c->qpu_inst_list.prev;
62 set_last_cond_add(struct vc4_compile *c, uint32_t cond)
64 *last_inst(c) = qpu_set_cond_add(*last_inst(c), cond);
68 set_last_cond_mul(struct vc4_compile *c, uint32_t cond)
70 *last_inst(c) = qpu_set_cond_mul(*last_inst(c), cond);
74 * Some special registers can be read from either file, which lets us resolve
75 * raddr conflicts without extra MOVs.
78 swap_file(struct qpu_reg *src)
83 if (src->mux == QPU_MUX_SMALL_IMM) {
86 if (src->mux == QPU_MUX_A)
99 * This is used to resolve the fact that we might register-allocate two
100 * different operands of an instruction to the same physical register file
101 * even though instructions have only one field for the register file source
104 * In that case, we need to move one to a temporary that can be used in the
105 * instruction, instead. We reserve ra31/rb31 for this purpose.
108 fixup_raddr_conflict(struct vc4_compile *c,
110 struct qpu_reg *src0, struct qpu_reg *src1,
111 struct qinst *inst, uint64_t *unpack)
113 uint32_t mux0 = src0->mux == QPU_MUX_SMALL_IMM ? QPU_MUX_B : src0->mux;
114 uint32_t mux1 = src1->mux == QPU_MUX_SMALL_IMM ? QPU_MUX_B : src1->mux;
116 if (mux0 <= QPU_MUX_R5 ||
118 (src0->addr == src1->addr &&
119 src0->mux == src1->mux)) {
123 if (swap_file(src0) || swap_file(src1))
126 if (mux0 == QPU_MUX_A) {
127 /* Make sure we use the same type of MOV as the instruction,
128 * in case of unpacks.
130 if (qir_is_float_input(inst))
131 queue(c, qpu_a_FMAX(qpu_rb(31), *src0, *src0));
133 queue(c, qpu_a_MOV(qpu_rb(31), *src0));
135 /* If we had an unpack on this A-file source, we need to put
136 * it into this MOV, not into the later move from regfile B.
138 if (inst->src[0].pack) {
139 *last_inst(c) |= *unpack;
144 queue(c, qpu_a_MOV(qpu_ra(31), *src0));
150 set_last_dst_pack(struct vc4_compile *c, struct qinst *inst)
152 bool had_pm = *last_inst(c) & QPU_PM;
153 bool had_ws = *last_inst(c) & QPU_WS;
154 uint32_t unpack = QPU_GET_FIELD(*last_inst(c), QPU_UNPACK);
159 *last_inst(c) |= QPU_SET_FIELD(inst->dst.pack, QPU_PACK);
161 if (qir_is_mul(inst)) {
162 assert(!unpack || had_pm);
163 *last_inst(c) |= QPU_PM;
165 assert(!unpack || !had_pm);
166 assert(!had_ws); /* dst must be a-file to pack. */
171 handle_r4_qpu_write(struct vc4_compile *c, struct qinst *qinst,
174 if (dst.mux != QPU_MUX_R4)
175 queue(c, qpu_a_MOV(dst, qpu_r4()));
177 queue(c, qpu_a_MOV(qpu_ra(QPU_W_NOP), qpu_r4()));
181 vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
183 struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
184 uint32_t inputs_remaining = c->num_inputs;
185 uint32_t vpm_read_fifo_count = 0;
186 uint32_t vpm_read_offset = 0;
187 int last_vpm_read_index = -1;
189 list_inithead(&c->qpu_inst_list);
194 /* There's a 4-entry FIFO for VPMVCD reads, each of which can
195 * load up to 16 dwords (4 vec4s) per vertex.
197 while (inputs_remaining) {
198 uint32_t num_entries = MIN2(inputs_remaining, 16);
199 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
202 ((num_entries & 0xf) << 20)));
203 inputs_remaining -= num_entries;
204 vpm_read_offset += num_entries;
205 vpm_read_fifo_count++;
207 assert(vpm_read_fifo_count <= 4);
209 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
215 list_for_each_entry(struct qinst, qinst, &c->instructions, link) {
217 fprintf(stderr, "translating qinst to qpu: ");
218 qir_dump_inst(qinst);
219 fprintf(stderr, "\n");
222 static const struct {
225 #define A(name) [QOP_##name] = {QPU_A_##name}
226 #define M(name) [QOP_##name] = {QPU_M_##name}
255 /* If we replicate src[0] out to src[1], this works
256 * out the same as a MOV.
258 [QOP_MOV] = { QPU_A_OR },
259 [QOP_FMOV] = { QPU_A_FMAX },
260 [QOP_MMOV] = { QPU_M_V8MIN },
264 struct qpu_reg src[4];
265 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
266 int index = qinst->src[i].index;
267 switch (qinst->src[i].file) {
272 src[i] = temp_registers[index];
273 if (qinst->src[i].pack) {
275 unpack == qinst->src[i].pack);
276 unpack = QPU_SET_FIELD(qinst->src[i].pack,
278 if (src[i].mux == QPU_MUX_R4)
288 case QFILE_SMALL_IMM:
289 src[i].mux = QPU_MUX_SMALL_IMM;
290 src[i].addr = qpu_encode_small_immediate(qinst->src[i].index);
291 /* This should only have returned a valid
292 * small immediate field, not ~0 for failure.
294 assert(src[i].addr <= 47);
297 assert((int)qinst->src[i].index >=
298 last_vpm_read_index);
299 (void)last_vpm_read_index;
300 last_vpm_read_index = qinst->src[i].index;
301 src[i] = qpu_ra(QPU_R_VPM);
307 switch (qinst->dst.file) {
309 dst = qpu_ra(QPU_W_NOP);
312 dst = temp_registers[qinst->dst.index];
315 dst = qpu_ra(QPU_W_VPM);
319 case QFILE_SMALL_IMM:
320 assert(!"not reached");
324 bool handled_qinst_cond = false;
333 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
337 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
341 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
345 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
352 handle_r4_qpu_write(c, qinst, dst);
357 queue(c, qpu_a_ITOF(dst,
358 qpu_ra(QPU_R_XY_PIXEL_COORD)));
362 queue(c, qpu_a_ITOF(dst,
363 qpu_rb(QPU_R_XY_PIXEL_COORD)));
366 case QOP_FRAG_REV_FLAG:
367 queue(c, qpu_a_ITOF(dst,
368 qpu_rb(QPU_R_MS_REV_FLAGS)));
372 src[1] = qpu_ra(QPU_R_MS_REV_FLAGS);
373 fixup_raddr_conflict(c, dst, &src[0], &src[1],
375 queue(c, qpu_a_AND(qpu_ra(QPU_W_MS_FLAGS),
376 src[0], src[1]) | unpack);
381 /* QOP_FRAG_Z/W don't emit instructions, just allocate
382 * the register to the Z/W payload.
386 case QOP_TLB_STENCIL_SETUP:
388 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP),
392 case QOP_TLB_Z_WRITE:
393 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z),
395 set_last_cond_add(c, qinst->cond);
396 handled_qinst_cond = true;
399 case QOP_TLB_COLOR_READ:
401 *last_inst(c) = qpu_set_sig(*last_inst(c),
403 handle_r4_qpu_write(c, qinst, dst);
406 case QOP_TLB_COLOR_WRITE:
407 queue(c, qpu_a_MOV(qpu_tlbc(), src[0]) | unpack);
408 set_last_cond_add(c, qinst->cond);
409 handled_qinst_cond = true;
412 case QOP_TLB_COLOR_WRITE_MS:
413 queue(c, qpu_a_MOV(qpu_tlbc_ms(), src[0]));
414 set_last_cond_add(c, qinst->cond);
415 handled_qinst_cond = true;
419 queue(c, qpu_a_FADD(dst, src[0], qpu_r5()) | unpack);
426 queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
427 (qinst->op - QOP_TEX_S)),
432 fixup_raddr_conflict(c, dst, &src[0], &src[1],
434 queue(c, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S),
435 src[0], src[1]) | unpack);
440 *last_inst(c) = qpu_set_sig(*last_inst(c),
442 handle_r4_qpu_write(c, qinst, dst);
446 assert(qinst->op < ARRAY_SIZE(translate));
447 assert(translate[qinst->op].op != 0); /* NOPs */
449 /* Skip emitting the MOV if it's a no-op. */
450 if (qir_is_raw_mov(qinst) &&
451 dst.mux == src[0].mux && dst.addr == src[0].addr) {
455 /* If we have only one source, put it in the second
456 * argument slot as well so that we don't take up
457 * another raddr just to get unused data.
459 if (qir_get_op_nsrc(qinst->op) == 1)
462 fixup_raddr_conflict(c, dst, &src[0], &src[1],
465 if (qir_is_mul(qinst)) {
466 queue(c, qpu_m_alu2(translate[qinst->op].op,
468 src[0], src[1]) | unpack);
469 set_last_cond_mul(c, qinst->cond);
471 queue(c, qpu_a_alu2(translate[qinst->op].op,
473 src[0], src[1]) | unpack);
474 set_last_cond_add(c, qinst->cond);
476 handled_qinst_cond = true;
477 set_last_dst_pack(c, qinst);
482 assert(qinst->cond == QPU_COND_ALWAYS ||
486 *last_inst(c) |= QPU_SF;
489 uint32_t cycles = qpu_schedule_instructions(c);
490 uint32_t inst_count_at_schedule_time = c->qpu_inst_count;
492 /* thread end can't have VPM write or read */
493 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
494 QPU_WADDR_ADD) == QPU_W_VPM ||
495 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
496 QPU_WADDR_MUL) == QPU_W_VPM ||
497 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
498 QPU_RADDR_A) == QPU_R_VPM ||
499 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
500 QPU_RADDR_B) == QPU_R_VPM) {
501 qpu_serialize_one_inst(c, qpu_NOP());
504 /* thread end can't have uniform read */
505 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
506 QPU_RADDR_A) == QPU_R_UNIF ||
507 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
508 QPU_RADDR_B) == QPU_R_UNIF) {
509 qpu_serialize_one_inst(c, qpu_NOP());
512 /* thread end can't have TLB operations */
513 if (qpu_inst_is_tlb(c->qpu_insts[c->qpu_inst_count - 1]))
514 qpu_serialize_one_inst(c, qpu_NOP());
516 c->qpu_insts[c->qpu_inst_count - 1] =
517 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
519 qpu_serialize_one_inst(c, qpu_NOP());
520 qpu_serialize_one_inst(c, qpu_NOP());
527 c->qpu_insts[c->qpu_inst_count - 1] =
528 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
529 QPU_SIG_SCOREBOARD_UNLOCK);
533 cycles += c->qpu_inst_count - inst_count_at_schedule_time;
535 if (vc4_debug & VC4_DEBUG_SHADERDB) {
536 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d estimated cycles\n",
537 qir_get_stage_name(c->stage),
538 c->program_id, c->variant_id,
542 if (vc4_debug & VC4_DEBUG_QPU)
545 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
547 free(temp_registers);