2 * Copyright © 2014 Broadcom
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "vc4_context.h"
29 #include "util/ralloc.h"
32 vc4_dump_program(struct vc4_compile *c)
34 fprintf(stderr, "%s prog %d/%d QPU:\n",
35 qir_get_stage_name(c->stage),
36 c->program_id, c->variant_id);
38 for (int i = 0; i < c->qpu_inst_count; i++) {
39 fprintf(stderr, "0x%016"PRIx64" ", c->qpu_insts[i]);
40 vc4_qpu_disasm(&c->qpu_insts[i], 1);
41 fprintf(stderr, "\n");
46 queue(struct vc4_compile *c, uint64_t inst)
48 struct queued_qpu_inst *q = rzalloc(c, struct queued_qpu_inst);
50 list_addtail(&q->link, &c->qpu_inst_list);
54 last_inst(struct vc4_compile *c)
56 struct queued_qpu_inst *q =
57 (struct queued_qpu_inst *)c->qpu_inst_list.prev;
62 set_last_cond_add(struct vc4_compile *c, uint32_t cond)
64 *last_inst(c) = qpu_set_cond_add(*last_inst(c), cond);
68 * Some special registers can be read from either file, which lets us resolve
69 * raddr conflicts without extra MOVs.
72 swap_file(struct qpu_reg *src)
77 if (src->mux == QPU_MUX_SMALL_IMM) {
80 if (src->mux == QPU_MUX_A)
93 * This is used to resolve the fact that we might register-allocate two
94 * different operands of an instruction to the same physical register file
95 * even though instructions have only one field for the register file source
98 * In that case, we need to move one to a temporary that can be used in the
99 * instruction, instead. We reserve ra31/rb31 for this purpose.
102 fixup_raddr_conflict(struct vc4_compile *c,
104 struct qpu_reg *src0, struct qpu_reg *src1)
106 uint32_t mux0 = src0->mux == QPU_MUX_SMALL_IMM ? QPU_MUX_B : src0->mux;
107 uint32_t mux1 = src1->mux == QPU_MUX_SMALL_IMM ? QPU_MUX_B : src1->mux;
109 if (mux0 <= QPU_MUX_R5 ||
111 (src0->addr == src1->addr &&
112 src0->mux == src1->mux)) {
116 if (swap_file(src0) || swap_file(src1))
119 if (mux0 == QPU_MUX_A) {
120 queue(c, qpu_a_MOV(qpu_rb(31), *src0));
123 queue(c, qpu_a_MOV(qpu_ra(31), *src0));
129 vc4_generate_code(struct vc4_context *vc4, struct vc4_compile *c)
131 struct qpu_reg *temp_registers = vc4_register_allocate(vc4, c);
132 bool discard = false;
133 uint32_t inputs_remaining = c->num_inputs;
134 uint32_t vpm_read_fifo_count = 0;
135 uint32_t vpm_read_offset = 0;
136 int last_vpm_read_index = -1;
137 /* Map from the QIR ops enum order to QPU unpack bits. */
138 static const uint32_t unpack_map[] = {
143 QPU_UNPACK_16A_TO_F32,
144 QPU_UNPACK_16B_TO_F32,
147 list_inithead(&c->qpu_inst_list);
152 /* There's a 4-entry FIFO for VPMVCD reads, each of which can
153 * load up to 16 dwords (4 vec4s) per vertex.
155 while (inputs_remaining) {
156 uint32_t num_entries = MIN2(inputs_remaining, 16);
157 queue(c, qpu_load_imm_ui(qpu_vrsetup(),
160 ((num_entries & 0xf) << 20)));
161 inputs_remaining -= num_entries;
162 vpm_read_offset += num_entries;
163 vpm_read_fifo_count++;
165 assert(vpm_read_fifo_count <= 4);
167 queue(c, qpu_load_imm_ui(qpu_vwsetup(), 0x00001a00));
173 list_for_each_entry(struct qinst, qinst, &c->instructions, link) {
175 fprintf(stderr, "translating qinst to qpu: ");
176 qir_dump_inst(qinst);
177 fprintf(stderr, "\n");
180 static const struct {
183 #define A(name) [QOP_##name] = {QPU_A_##name}
184 #define M(name) [QOP_##name] = {QPU_M_##name}
209 struct qpu_reg src[4];
210 for (int i = 0; i < qir_get_op_nsrc(qinst->op); i++) {
211 int index = qinst->src[i].index;
212 switch (qinst->src[i].file) {
217 src[i] = temp_registers[index];
225 case QFILE_SMALL_IMM:
226 src[i].mux = QPU_MUX_SMALL_IMM;
227 src[i].addr = qpu_encode_small_immediate(qinst->src[i].index);
228 /* This should only have returned a valid
229 * small immediate field, not ~0 for failure.
231 assert(src[i].addr <= 47);
234 assert((int)qinst->src[i].index >=
235 last_vpm_read_index);
236 (void)last_vpm_read_index;
237 last_vpm_read_index = qinst->src[i].index;
238 src[i] = qpu_ra(QPU_R_VPM);
244 switch (qinst->dst.file) {
246 dst = qpu_ra(QPU_W_NOP);
249 dst = temp_registers[qinst->dst.index];
252 dst = qpu_ra(QPU_W_VPM);
256 case QFILE_SMALL_IMM:
257 assert(!"not reached");
263 /* Skip emitting the MOV if it's a no-op. */
264 if (dst.mux == QPU_MUX_A || dst.mux == QPU_MUX_B ||
265 dst.mux != src[0].mux || dst.addr != src[0].addr) {
266 queue(c, qpu_a_MOV(dst, src[0]));
274 queue(c, qpu_a_MOV(dst, src[0]));
275 set_last_cond_add(c, qinst->op - QOP_SEL_X_0_ZS +
278 queue(c, qpu_a_XOR(dst, qpu_r0(), qpu_r0()));
279 set_last_cond_add(c, ((qinst->op - QOP_SEL_X_0_ZS) ^
287 queue(c, qpu_a_MOV(dst, src[0]));
288 set_last_cond_add(c, qinst->op - QOP_SEL_X_Y_ZS +
291 queue(c, qpu_a_MOV(dst, src[1]));
292 set_last_cond_add(c, ((qinst->op - QOP_SEL_X_Y_ZS) ^
303 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIP),
307 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_RECIPSQRT),
311 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_EXP),
315 queue(c, qpu_a_MOV(qpu_rb(QPU_W_SFU_LOG),
322 if (dst.mux != QPU_MUX_R4)
323 queue(c, qpu_a_MOV(dst, qpu_r4()));
327 case QOP_PACK_8888_F:
328 queue(c, qpu_m_MOV(dst, src[0]));
329 *last_inst(c) |= QPU_PM;
330 *last_inst(c) |= QPU_SET_FIELD(QPU_PACK_MUL_8888,
339 qpu_m_MOV(dst, src[0]) |
341 QPU_SET_FIELD(QPU_PACK_MUL_8A +
342 qinst->op - QOP_PACK_8A_F,
347 queue(c, qpu_a_ITOF(dst,
348 qpu_ra(QPU_R_XY_PIXEL_COORD)));
352 queue(c, qpu_a_ITOF(dst,
353 qpu_rb(QPU_R_XY_PIXEL_COORD)));
356 case QOP_FRAG_REV_FLAG:
357 queue(c, qpu_a_ITOF(dst,
358 qpu_rb(QPU_R_MS_REV_FLAGS)));
363 /* QOP_FRAG_Z/W don't emit instructions, just allocate
364 * the register to the Z/W payload.
368 case QOP_TLB_DISCARD_SETUP:
370 queue(c, qpu_a_MOV(src[0], src[0]));
371 *last_inst(c) |= QPU_SF;
374 case QOP_TLB_STENCIL_SETUP:
375 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_STENCIL_SETUP), src[0]));
378 case QOP_TLB_Z_WRITE:
379 queue(c, qpu_a_MOV(qpu_ra(QPU_W_TLB_Z), src[0]));
381 set_last_cond_add(c, QPU_COND_ZS);
385 case QOP_TLB_COLOR_READ:
387 *last_inst(c) = qpu_set_sig(*last_inst(c),
390 if (dst.mux != QPU_MUX_R4)
391 queue(c, qpu_a_MOV(dst, qpu_r4()));
394 case QOP_TLB_COLOR_WRITE:
395 queue(c, qpu_a_MOV(qpu_tlbc(), src[0]));
397 set_last_cond_add(c, QPU_COND_ZS);
402 queue(c, qpu_a_FADD(dst, src[0], qpu_r5()));
409 queue(c, qpu_a_MOV(qpu_rb(QPU_W_TMU0_S +
410 (qinst->op - QOP_TEX_S)),
415 fixup_raddr_conflict(c, dst, &src[0], &src[1]);
416 queue(c, qpu_a_ADD(qpu_rb(QPU_W_TMU0_S), src[0], src[1]));
421 *last_inst(c) = qpu_set_sig(*last_inst(c),
423 if (dst.mux != QPU_MUX_R4)
424 queue(c, qpu_a_MOV(dst, qpu_r4()));
427 case QOP_UNPACK_8A_F:
428 case QOP_UNPACK_8B_F:
429 case QOP_UNPACK_8C_F:
430 case QOP_UNPACK_8D_F:
431 case QOP_UNPACK_16A_F:
432 case QOP_UNPACK_16B_F: {
433 if (src[0].mux == QPU_MUX_R4) {
434 queue(c, qpu_a_MOV(dst, src[0]));
435 *last_inst(c) |= QPU_PM;
436 *last_inst(c) |= QPU_SET_FIELD(QPU_UNPACK_8A +
441 assert(src[0].mux == QPU_MUX_A);
443 /* Since we're setting the pack bits, if the
444 * destination is in A it would get re-packed.
446 queue(c, qpu_a_FMAX((dst.mux == QPU_MUX_A ?
450 QPU_SET_FIELD(unpack_map[qinst->op -
454 if (dst.mux == QPU_MUX_A) {
455 queue(c, qpu_a_MOV(dst, qpu_rb(31)));
461 case QOP_UNPACK_8A_I:
462 case QOP_UNPACK_8B_I:
463 case QOP_UNPACK_8C_I:
464 case QOP_UNPACK_8D_I:
465 case QOP_UNPACK_16A_I:
466 case QOP_UNPACK_16B_I: {
467 assert(src[0].mux == QPU_MUX_A);
469 /* Since we're setting the pack bits, if the
470 * destination is in A it would get re-packed.
472 queue(c, qpu_a_MOV((dst.mux == QPU_MUX_A ?
473 qpu_rb(31) : dst), src[0]));
474 *last_inst(c) |= QPU_SET_FIELD(unpack_map[qinst->op -
478 if (dst.mux == QPU_MUX_A) {
479 queue(c, qpu_a_MOV(dst, qpu_rb(31)));
485 assert(qinst->op < ARRAY_SIZE(translate));
486 assert(translate[qinst->op].op != 0); /* NOPs */
488 /* If we have only one source, put it in the second
489 * argument slot as well so that we don't take up
490 * another raddr just to get unused data.
492 if (qir_get_op_nsrc(qinst->op) == 1)
495 fixup_raddr_conflict(c, dst, &src[0], &src[1]);
497 if (qir_is_mul(qinst)) {
498 queue(c, qpu_m_alu2(translate[qinst->op].op,
501 if (qinst->dst.pack) {
502 *last_inst(c) |= QPU_PM;
503 *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack,
507 queue(c, qpu_a_alu2(translate[qinst->op].op,
510 if (qinst->dst.pack) {
511 assert(dst.mux == QPU_MUX_A);
512 *last_inst(c) |= QPU_SET_FIELD(qinst->dst.pack,
521 assert(!qir_is_multi_instruction(qinst));
522 *last_inst(c) |= QPU_SF;
526 qpu_schedule_instructions(c);
528 /* thread end can't have VPM write or read */
529 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
530 QPU_WADDR_ADD) == QPU_W_VPM ||
531 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
532 QPU_WADDR_MUL) == QPU_W_VPM ||
533 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
534 QPU_RADDR_A) == QPU_R_VPM ||
535 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
536 QPU_RADDR_B) == QPU_R_VPM) {
537 qpu_serialize_one_inst(c, qpu_NOP());
540 /* thread end can't have uniform read */
541 if (QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
542 QPU_RADDR_A) == QPU_R_UNIF ||
543 QPU_GET_FIELD(c->qpu_insts[c->qpu_inst_count - 1],
544 QPU_RADDR_B) == QPU_R_UNIF) {
545 qpu_serialize_one_inst(c, qpu_NOP());
548 /* thread end can't have TLB operations */
549 if (qpu_inst_is_tlb(c->qpu_insts[c->qpu_inst_count - 1]))
550 qpu_serialize_one_inst(c, qpu_NOP());
552 c->qpu_insts[c->qpu_inst_count - 1] =
553 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
555 qpu_serialize_one_inst(c, qpu_NOP());
556 qpu_serialize_one_inst(c, qpu_NOP());
563 c->qpu_insts[c->qpu_inst_count - 1] =
564 qpu_set_sig(c->qpu_insts[c->qpu_inst_count - 1],
565 QPU_SIG_SCOREBOARD_UNLOCK);
569 if (vc4_debug & VC4_DEBUG_QPU)
572 vc4_qpu_validate(c->qpu_insts, c->qpu_inst_count);
574 free(temp_registers);