1 /**************************************************************************
3 * Copyright 2009 VMware, Inc.
4 * Copyright 2007-2008 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
31 * TGSI to LLVM IR translation -- SoA.
33 * @author Jose Fonseca <jfonseca@vmware.com>
35 * Based on tgsi_sse2.c code written by Michal Krol, Keith Whitwell,
36 * Brian Paul, and others.
39 #include "pipe/p_config.h"
40 #include "pipe/p_shader_tokens.h"
41 #include "util/u_debug.h"
42 #include "util/u_math.h"
43 #include "util/u_memory.h"
44 #include "tgsi/tgsi_dump.h"
45 #include "tgsi/tgsi_exec.h"
46 #include "tgsi/tgsi_info.h"
47 #include "tgsi/tgsi_parse.h"
48 #include "tgsi/tgsi_util.h"
49 #include "tgsi/tgsi_scan.h"
50 #include "lp_bld_tgsi_action.h"
51 #include "lp_bld_type.h"
52 #include "lp_bld_const.h"
53 #include "lp_bld_arit.h"
54 #include "lp_bld_bitarit.h"
55 #include "lp_bld_gather.h"
56 #include "lp_bld_init.h"
57 #include "lp_bld_logic.h"
58 #include "lp_bld_swizzle.h"
59 #include "lp_bld_flow.h"
60 #include "lp_bld_quad.h"
61 #include "lp_bld_tgsi.h"
62 #include "lp_bld_limits.h"
63 #include "lp_bld_debug.h"
64 #include "lp_bld_printf.h"
65 #include "lp_bld_sample.h"
66 #include "lp_bld_struct.h"
69 static void lp_exec_mask_init(struct lp_exec_mask *mask, struct lp_build_context *bld)
71 LLVMTypeRef int_type = LLVMInt32TypeInContext(bld->gallivm->context);
72 LLVMBuilderRef builder = bld->gallivm->builder;
75 mask->has_mask = FALSE;
76 mask->ret_in_main = FALSE;
77 mask->cond_stack_size = 0;
78 mask->loop_stack_size = 0;
79 mask->call_stack_size = 0;
81 mask->int_vec_type = lp_build_int_vec_type(bld->gallivm, mask->bld->type);
82 mask->exec_mask = mask->ret_mask = mask->break_mask = mask->cont_mask = mask->cond_mask =
83 LLVMConstAllOnes(mask->int_vec_type);
85 mask->loop_limiter = lp_build_alloca(bld->gallivm, int_type, "looplimiter");
89 LLVMConstInt(int_type, LP_MAX_TGSI_LOOP_ITERATIONS, false),
93 static void lp_exec_mask_update(struct lp_exec_mask *mask)
95 LLVMBuilderRef builder = mask->bld->gallivm->builder;
97 if (mask->loop_stack_size) {
98 /*for loops we need to update the entire mask at runtime */
100 assert(mask->break_mask);
101 tmp = LLVMBuildAnd(builder,
105 mask->exec_mask = LLVMBuildAnd(builder,
110 mask->exec_mask = mask->cond_mask;
112 if (mask->call_stack_size || mask->ret_in_main) {
113 mask->exec_mask = LLVMBuildAnd(builder,
119 mask->has_mask = (mask->cond_stack_size > 0 ||
120 mask->loop_stack_size > 0 ||
121 mask->call_stack_size > 0 ||
125 static void lp_exec_mask_cond_push(struct lp_exec_mask *mask,
128 LLVMBuilderRef builder = mask->bld->gallivm->builder;
130 assert(mask->cond_stack_size < LP_MAX_TGSI_NESTING);
131 if (mask->cond_stack_size == 0) {
132 assert(mask->cond_mask == LLVMConstAllOnes(mask->int_vec_type));
134 mask->cond_stack[mask->cond_stack_size++] = mask->cond_mask;
135 assert(LLVMTypeOf(val) == mask->int_vec_type);
136 mask->cond_mask = LLVMBuildAnd(builder,
140 lp_exec_mask_update(mask);
143 static void lp_exec_mask_cond_invert(struct lp_exec_mask *mask)
145 LLVMBuilderRef builder = mask->bld->gallivm->builder;
146 LLVMValueRef prev_mask;
147 LLVMValueRef inv_mask;
149 assert(mask->cond_stack_size);
150 prev_mask = mask->cond_stack[mask->cond_stack_size - 1];
151 if (mask->cond_stack_size == 1) {
152 assert(prev_mask == LLVMConstAllOnes(mask->int_vec_type));
155 inv_mask = LLVMBuildNot(builder, mask->cond_mask, "");
157 mask->cond_mask = LLVMBuildAnd(builder,
160 lp_exec_mask_update(mask);
163 static void lp_exec_mask_cond_pop(struct lp_exec_mask *mask)
165 assert(mask->cond_stack_size);
166 mask->cond_mask = mask->cond_stack[--mask->cond_stack_size];
167 lp_exec_mask_update(mask);
170 static void lp_exec_bgnloop(struct lp_exec_mask *mask)
172 LLVMBuilderRef builder = mask->bld->gallivm->builder;
174 if (mask->loop_stack_size == 0) {
175 assert(mask->loop_block == NULL);
176 assert(mask->cont_mask == LLVMConstAllOnes(mask->int_vec_type));
177 assert(mask->break_mask == LLVMConstAllOnes(mask->int_vec_type));
178 assert(mask->break_var == NULL);
181 assert(mask->loop_stack_size < LP_MAX_TGSI_NESTING);
183 mask->loop_stack[mask->loop_stack_size].loop_block = mask->loop_block;
184 mask->loop_stack[mask->loop_stack_size].cont_mask = mask->cont_mask;
185 mask->loop_stack[mask->loop_stack_size].break_mask = mask->break_mask;
186 mask->loop_stack[mask->loop_stack_size].break_var = mask->break_var;
187 ++mask->loop_stack_size;
189 mask->break_var = lp_build_alloca(mask->bld->gallivm, mask->int_vec_type, "");
190 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
192 mask->loop_block = lp_build_insert_new_block(mask->bld->gallivm, "bgnloop");
194 LLVMBuildBr(builder, mask->loop_block);
195 LLVMPositionBuilderAtEnd(builder, mask->loop_block);
197 mask->break_mask = LLVMBuildLoad(builder, mask->break_var, "");
199 lp_exec_mask_update(mask);
202 static void lp_exec_break(struct lp_exec_mask *mask)
204 LLVMBuilderRef builder = mask->bld->gallivm->builder;
205 LLVMValueRef exec_mask = LLVMBuildNot(builder,
209 mask->break_mask = LLVMBuildAnd(builder,
211 exec_mask, "break_full");
213 lp_exec_mask_update(mask);
216 static void lp_exec_break_condition(struct lp_exec_mask *mask,
219 LLVMBuilderRef builder = mask->bld->gallivm->builder;
220 LLVMValueRef exec_mask = LLVMBuildNot(builder,
224 exec_mask = LLVMBuildAnd(builder, exec_mask, cond, "");
226 mask->break_mask = LLVMBuildAnd(builder,
228 exec_mask, "break_full");
230 lp_exec_mask_update(mask);
233 static void lp_exec_continue(struct lp_exec_mask *mask)
235 LLVMBuilderRef builder = mask->bld->gallivm->builder;
236 LLVMValueRef exec_mask = LLVMBuildNot(builder,
240 mask->cont_mask = LLVMBuildAnd(builder,
244 lp_exec_mask_update(mask);
248 static void lp_exec_endloop(struct gallivm_state *gallivm,
249 struct lp_exec_mask *mask)
251 LLVMBuilderRef builder = mask->bld->gallivm->builder;
252 LLVMBasicBlockRef endloop;
253 LLVMTypeRef int_type = LLVMInt32TypeInContext(mask->bld->gallivm->context);
254 LLVMTypeRef reg_type = LLVMIntTypeInContext(gallivm->context,
255 mask->bld->type.width *
256 mask->bld->type.length);
257 LLVMValueRef i1cond, i2cond, icond, limiter;
259 assert(mask->break_mask);
262 * Restore the cont_mask, but don't pop
264 assert(mask->loop_stack_size);
265 mask->cont_mask = mask->loop_stack[mask->loop_stack_size - 1].cont_mask;
266 lp_exec_mask_update(mask);
269 * Unlike the continue mask, the break_mask must be preserved across loop
272 LLVMBuildStore(builder, mask->break_mask, mask->break_var);
274 /* Decrement the loop limiter */
275 limiter = LLVMBuildLoad(builder, mask->loop_limiter, "");
277 limiter = LLVMBuildSub(
280 LLVMConstInt(int_type, 1, false),
283 LLVMBuildStore(builder, limiter, mask->loop_limiter);
285 /* i1cond = (mask != 0) */
286 i1cond = LLVMBuildICmp(
289 LLVMBuildBitCast(builder, mask->exec_mask, reg_type, ""),
290 LLVMConstNull(reg_type), "");
292 /* i2cond = (looplimiter > 0) */
293 i2cond = LLVMBuildICmp(
297 LLVMConstNull(int_type), "");
299 /* if( i1cond && i2cond ) */
300 icond = LLVMBuildAnd(builder, i1cond, i2cond, "");
302 endloop = lp_build_insert_new_block(mask->bld->gallivm, "endloop");
304 LLVMBuildCondBr(builder,
305 icond, mask->loop_block, endloop);
307 LLVMPositionBuilderAtEnd(builder, endloop);
309 assert(mask->loop_stack_size);
310 --mask->loop_stack_size;
311 mask->loop_block = mask->loop_stack[mask->loop_stack_size].loop_block;
312 mask->cont_mask = mask->loop_stack[mask->loop_stack_size].cont_mask;
313 mask->break_mask = mask->loop_stack[mask->loop_stack_size].break_mask;
314 mask->break_var = mask->loop_stack[mask->loop_stack_size].break_var;
316 lp_exec_mask_update(mask);
319 /* stores val into an address pointed to by dst.
320 * mask->exec_mask is used to figure out which bits of val
321 * should be stored into the address
322 * (0 means don't store this bit, 1 means do store).
324 static void lp_exec_mask_store(struct lp_exec_mask *mask,
325 struct lp_build_context *bld_store,
330 LLVMBuilderRef builder = mask->bld->gallivm->builder;
332 /* Mix the predicate and execution mask */
333 if (mask->has_mask) {
335 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
337 pred = mask->exec_mask;
342 LLVMValueRef real_val, dst_val;
344 dst_val = LLVMBuildLoad(builder, dst, "");
345 real_val = lp_build_select(bld_store,
349 LLVMBuildStore(builder, real_val, dst);
351 LLVMBuildStore(builder, val, dst);
354 static void lp_exec_mask_call(struct lp_exec_mask *mask,
358 assert(mask->call_stack_size < LP_MAX_TGSI_NESTING);
359 mask->call_stack[mask->call_stack_size].pc = *pc;
360 mask->call_stack[mask->call_stack_size].ret_mask = mask->ret_mask;
361 mask->call_stack_size++;
365 static void lp_exec_mask_ret(struct lp_exec_mask *mask, int *pc)
367 LLVMBuilderRef builder = mask->bld->gallivm->builder;
368 LLVMValueRef exec_mask;
370 if (mask->cond_stack_size == 0 &&
371 mask->loop_stack_size == 0 &&
372 mask->call_stack_size == 0) {
373 /* returning from main() */
378 if (mask->call_stack_size == 0) {
380 * This requires special handling since we need to ensure
381 * we don't drop the mask even if we have no call stack
382 * (e.g. after a ret in a if clause after the endif)
384 mask->ret_in_main = TRUE;
387 exec_mask = LLVMBuildNot(builder,
391 mask->ret_mask = LLVMBuildAnd(builder,
393 exec_mask, "ret_full");
395 lp_exec_mask_update(mask);
398 static void lp_exec_mask_bgnsub(struct lp_exec_mask *mask)
402 static void lp_exec_mask_endsub(struct lp_exec_mask *mask, int *pc)
404 assert(mask->call_stack_size);
405 mask->call_stack_size--;
406 *pc = mask->call_stack[mask->call_stack_size].pc;
407 mask->ret_mask = mask->call_stack[mask->call_stack_size].ret_mask;
408 lp_exec_mask_update(mask);
413 * Return pointer to a temporary register channel (src or dest).
414 * Note that indirect addressing cannot be handled here.
415 * \param index which temporary register
416 * \param chan which channel of the temp register.
419 lp_get_temp_ptr_soa(struct lp_build_tgsi_soa_context *bld,
423 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
425 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
426 LLVMValueRef lindex = lp_build_const_int32(bld->bld_base.base.gallivm, index * 4 + chan);
427 return LLVMBuildGEP(builder, bld->temps_array, &lindex, 1, "");
430 return bld->temps[index][chan];
435 * Return pointer to a output register channel (src or dest).
436 * Note that indirect addressing cannot be handled here.
437 * \param index which output register
438 * \param chan which channel of the output register.
441 lp_get_output_ptr(struct lp_build_tgsi_soa_context *bld,
445 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
447 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
448 LLVMValueRef lindex = lp_build_const_int32(bld->bld_base.base.gallivm,
450 return LLVMBuildGEP(builder, bld->outputs_array, &lindex, 1, "");
453 return bld->outputs[index][chan];
458 * If we have indirect addressing in outputs copy our alloca array
459 * to the outputs slots specified by the caller to make sure
460 * our outputs are delivered consistently via the same interface.
463 gather_outputs(struct lp_build_tgsi_soa_context * bld)
465 if ((bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
466 unsigned index, chan;
467 assert(bld->bld_base.info->num_outputs <=
468 bld->bld_base.info->file_max[TGSI_FILE_OUTPUT] + 1);
469 for (index = 0; index < bld->bld_base.info->num_outputs; ++index) {
470 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
471 bld->outputs[index][chan] = lp_get_output_ptr(bld, index, chan);
479 * XXX the lp_build_gather() function should be capable of doing this
480 * with a little work.
483 build_gather(struct lp_build_context *bld,
484 LLVMValueRef base_ptr,
485 LLVMValueRef indexes)
487 LLVMBuilderRef builder = bld->gallivm->builder;
488 LLVMValueRef res = bld->undef;
492 * Loop over elements of index_vec, load scalar value, insert it into 'res'.
494 for (i = 0; i < bld->type.length; i++) {
495 LLVMValueRef ii = lp_build_const_int32(bld->gallivm, i);
496 LLVMValueRef index = LLVMBuildExtractElement(builder,
498 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr,
499 &index, 1, "gather_ptr");
500 LLVMValueRef scalar = LLVMBuildLoad(builder, scalar_ptr, "");
502 res = LLVMBuildInsertElement(builder, res, scalar, ii, "");
510 * Scatter/store vector.
513 emit_mask_scatter(struct lp_build_tgsi_soa_context *bld,
514 LLVMValueRef base_ptr,
515 LLVMValueRef indexes,
517 struct lp_exec_mask *mask,
520 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
521 LLVMBuilderRef builder = gallivm->builder;
524 /* Mix the predicate and execution mask */
525 if (mask->has_mask) {
527 pred = LLVMBuildAnd(builder, pred, mask->exec_mask, "");
530 pred = mask->exec_mask;
535 * Loop over elements of index_vec, store scalar value.
537 for (i = 0; i < bld->bld_base.base.type.length; i++) {
538 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
539 LLVMValueRef index = LLVMBuildExtractElement(builder, indexes, ii, "");
540 LLVMValueRef scalar_ptr = LLVMBuildGEP(builder, base_ptr, &index, 1, "scatter_ptr");
541 LLVMValueRef val = LLVMBuildExtractElement(builder, values, ii, "scatter_val");
542 LLVMValueRef scalar_pred = pred ?
543 LLVMBuildExtractElement(builder, pred, ii, "scatter_pred") : NULL;
546 lp_build_printf(gallivm, "scatter %d: val %f at %d %p\n",
547 ii, val, index, scalar_ptr);
550 LLVMValueRef real_val, dst_val;
551 dst_val = LLVMBuildLoad(builder, scalar_ptr, "");
552 real_val = lp_build_select(&bld->elem_bld, scalar_pred, val, dst_val);
553 LLVMBuildStore(builder, real_val, scalar_ptr);
556 LLVMBuildStore(builder, val, scalar_ptr);
563 * Read the current value of the ADDR register, convert the floats to
564 * ints, add the base index and return the vector of offsets.
565 * The offsets will be used to index into the constant buffer or
566 * temporary register file.
569 get_indirect_index(struct lp_build_tgsi_soa_context *bld,
570 unsigned reg_file, unsigned reg_index,
571 const struct tgsi_ind_register *indirect_reg)
573 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
574 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
575 /* always use X component of address register */
576 unsigned swizzle = indirect_reg->Swizzle;
579 LLVMValueRef max_index;
582 assert(bld->indirect_files & (1 << reg_file));
584 base = lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, reg_index);
587 switch (indirect_reg->File) {
588 case TGSI_FILE_ADDRESS:
589 rel = LLVMBuildLoad(builder,
590 bld->addr[indirect_reg->Index][swizzle],
592 /* ADDR LLVM values already have LLVM integer type. */
594 case TGSI_FILE_TEMPORARY:
595 rel = lp_get_temp_ptr_soa(bld, indirect_reg->Index, swizzle);
596 rel = LLVMBuildLoad(builder, rel, "load temp reg");
597 /* TEMP LLVM values always have LLVM float type, but for indirection, the
598 * value actually stored is expected to be an integer */
599 rel = LLVMBuildBitCast(builder, rel, uint_bld->vec_type, "");
603 rel = uint_bld->zero;
606 index = lp_build_add(uint_bld, base, rel);
608 max_index = lp_build_const_int_vec(bld->bld_base.base.gallivm,
610 bld->bld_base.info->file_max[reg_file]);
612 assert(!uint_bld->type.sign);
613 index = lp_build_min(uint_bld, index, max_index);
618 static struct lp_build_context *
619 stype_to_fetch(struct lp_build_tgsi_context * bld_base,
620 enum tgsi_opcode_type stype)
622 struct lp_build_context *bld_fetch;
625 case TGSI_TYPE_FLOAT:
626 case TGSI_TYPE_UNTYPED:
627 bld_fetch = &bld_base->base;
629 case TGSI_TYPE_UNSIGNED:
630 bld_fetch = &bld_base->uint_bld;
632 case TGSI_TYPE_SIGNED:
633 bld_fetch = &bld_base->int_bld;
636 case TGSI_TYPE_DOUBLE:
647 struct lp_build_tgsi_context * bld_base,
648 const struct tgsi_full_src_register * reg,
649 enum tgsi_opcode_type stype,
652 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
653 struct gallivm_state *gallivm = bld_base->base.gallivm;
654 LLVMBuilderRef builder = gallivm->builder;
655 struct lp_build_context *uint_bld = &bld_base->uint_bld;
656 LLVMValueRef indirect_index = NULL;
657 unsigned dimension = 0;
658 LLVMValueRef dimension_index;
659 LLVMValueRef consts_ptr;
662 /* XXX: Handle fetching xyzw components as a vector */
663 assert(swizzle != ~0);
665 if (reg->Register.Dimension) {
666 assert(!reg->Dimension.Indirect);
667 dimension = reg->Dimension.Index;
668 assert(dimension < LP_MAX_TGSI_CONST_BUFFERS);
671 dimension_index = lp_build_const_int32(gallivm, dimension);
672 consts_ptr = lp_build_array_get(gallivm, bld->consts_ptr, dimension_index);
674 if (reg->Register.Indirect) {
675 indirect_index = get_indirect_index(bld,
681 if (reg->Register.Indirect) {
682 LLVMValueRef swizzle_vec =
683 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, swizzle);
684 LLVMValueRef index_vec; /* index into the const buffer */
686 /* index_vec = indirect_index * 4 + swizzle */
687 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
688 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
690 /* Gather values from the constant buffer */
691 res = build_gather(&bld_base->base, consts_ptr, index_vec);
694 LLVMValueRef index; /* index into the const buffer */
695 LLVMValueRef scalar, scalar_ptr;
697 index = lp_build_const_int32(gallivm, reg->Register.Index*4 + swizzle);
699 scalar_ptr = LLVMBuildGEP(builder, consts_ptr,
701 scalar = LLVMBuildLoad(builder, scalar_ptr, "");
702 res = lp_build_broadcast_scalar(&bld_base->base, scalar);
705 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED) {
706 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
707 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
713 emit_fetch_immediate(
714 struct lp_build_tgsi_context * bld_base,
715 const struct tgsi_full_src_register * reg,
716 enum tgsi_opcode_type stype,
719 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
720 LLVMValueRef res = bld->immediates[reg->Register.Index][swizzle];
723 if (stype == TGSI_TYPE_UNSIGNED) {
724 res = LLVMConstBitCast(res, bld_base->uint_bld.vec_type);
725 } else if (stype == TGSI_TYPE_SIGNED) {
726 res = LLVMConstBitCast(res, bld_base->int_bld.vec_type);
733 struct lp_build_tgsi_context * bld_base,
734 const struct tgsi_full_src_register * reg,
735 enum tgsi_opcode_type stype,
738 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
739 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
740 LLVMBuilderRef builder = gallivm->builder;
741 struct lp_build_context *uint_bld = &bld_base->uint_bld;
742 LLVMValueRef indirect_index = NULL;
745 if (reg->Register.Indirect) {
746 indirect_index = get_indirect_index(bld,
752 if (reg->Register.Indirect) {
753 LLVMValueRef swizzle_vec =
754 lp_build_const_int_vec(gallivm, uint_bld->type, swizzle);
755 LLVMValueRef length_vec =
756 lp_build_const_int_vec(gallivm, uint_bld->type, bld->bld_base.base.type.length);
757 LLVMValueRef index_vec; /* index into the const buffer */
758 LLVMValueRef inputs_array;
759 LLVMTypeRef float4_ptr_type;
761 /* index_vec = (indirect_index * 4 + swizzle) * length */
762 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
763 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
764 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
766 /* cast inputs_array pointer to float* */
767 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
768 inputs_array = LLVMBuildBitCast(builder, bld->inputs_array,
769 float4_ptr_type, "");
771 /* Gather values from the temporary register array */
772 res = build_gather(&bld_base->base, inputs_array, index_vec);
774 if (bld->indirect_files & (1 << TGSI_FILE_INPUT)) {
775 LLVMValueRef lindex = lp_build_const_int32(gallivm,
776 reg->Register.Index * 4 + swizzle);
777 LLVMValueRef input_ptr = LLVMBuildGEP(builder,
778 bld->inputs_array, &lindex, 1, "");
779 res = LLVMBuildLoad(builder, input_ptr, "");
782 res = bld->inputs[reg->Register.Index][swizzle];
788 if (stype == TGSI_TYPE_UNSIGNED) {
789 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
790 } else if (stype == TGSI_TYPE_SIGNED) {
791 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
800 struct lp_build_tgsi_context * bld_base,
801 const struct tgsi_full_src_register * reg,
802 enum tgsi_opcode_type stype,
805 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
806 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
807 LLVMBuilderRef builder = gallivm->builder;
808 LLVMValueRef attrib_index = NULL;
809 LLVMValueRef vertex_index = NULL;
810 LLVMValueRef swizzle_index = lp_build_const_int32(gallivm, swizzle);
813 if (reg->Register.Indirect) {
814 attrib_index = get_indirect_index(bld,
819 attrib_index = lp_build_const_int32(gallivm, reg->Register.Index);
822 if (reg->Dimension.Indirect) {
823 vertex_index = get_indirect_index(bld,
825 reg->Dimension.Index,
828 vertex_index = lp_build_const_int32(gallivm, reg->Dimension.Index);
832 res = bld->gs_iface->fetch_input(bld->gs_iface, bld_base,
833 vertex_index, attrib_index,
838 if (stype == TGSI_TYPE_UNSIGNED) {
839 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
840 } else if (stype == TGSI_TYPE_SIGNED) {
841 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
848 emit_fetch_temporary(
849 struct lp_build_tgsi_context * bld_base,
850 const struct tgsi_full_src_register * reg,
851 enum tgsi_opcode_type stype,
854 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
855 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
856 LLVMBuilderRef builder = gallivm->builder;
857 struct lp_build_context *uint_bld = &bld_base->uint_bld;
858 LLVMValueRef indirect_index = NULL;
861 if (reg->Register.Indirect) {
862 indirect_index = get_indirect_index(bld,
868 if (reg->Register.Indirect) {
869 LLVMValueRef swizzle_vec =
870 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type, swizzle);
871 LLVMValueRef length_vec =
872 lp_build_const_int_vec(bld->bld_base.base.gallivm, uint_bld->type,
873 bld->bld_base.base.type.length);
874 LLVMValueRef index_vec; /* index into the const buffer */
875 LLVMValueRef temps_array;
876 LLVMTypeRef float4_ptr_type;
878 /* index_vec = (indirect_index * 4 + swizzle) * length */
879 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
880 index_vec = lp_build_add(uint_bld, index_vec, swizzle_vec);
881 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
883 /* cast temps_array pointer to float* */
884 float4_ptr_type = LLVMPointerType(LLVMFloatTypeInContext(bld->bld_base.base.gallivm->context), 0);
885 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
886 float4_ptr_type, "");
888 /* Gather values from the temporary register array */
889 res = build_gather(&bld_base->base, temps_array, index_vec);
892 LLVMValueRef temp_ptr;
893 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index, swizzle);
894 res = LLVMBuildLoad(builder, temp_ptr, "");
897 if (stype == TGSI_TYPE_SIGNED || stype == TGSI_TYPE_UNSIGNED) {
898 struct lp_build_context *bld_fetch = stype_to_fetch(bld_base, stype);
899 res = LLVMBuildBitCast(builder, res, bld_fetch->vec_type, "");
906 emit_fetch_system_value(
907 struct lp_build_tgsi_context * bld_base,
908 const struct tgsi_full_src_register * reg,
909 enum tgsi_opcode_type stype,
912 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
913 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
914 const struct tgsi_shader_info *info = bld->bld_base.info;
915 LLVMBuilderRef builder = gallivm->builder;
917 enum tgsi_opcode_type atype; // Actual type of the value
919 assert(!reg->Register.Indirect);
921 switch (info->system_value_semantic_name[reg->Register.Index]) {
922 case TGSI_SEMANTIC_INSTANCEID:
923 res = lp_build_broadcast_scalar(&bld_base->uint_bld, bld->system_values.instance_id);
924 atype = TGSI_TYPE_UNSIGNED;
927 case TGSI_SEMANTIC_VERTEXID:
928 res = bld->system_values.vertex_id;
929 atype = TGSI_TYPE_UNSIGNED;
932 case TGSI_SEMANTIC_PRIMID:
933 res = bld->system_values.prim_id;
934 atype = TGSI_TYPE_UNSIGNED;
938 assert(!"unexpected semantic in emit_fetch_system_value");
939 res = bld_base->base.zero;
940 atype = TGSI_TYPE_FLOAT;
944 if (atype != stype) {
945 if (stype == TGSI_TYPE_FLOAT) {
946 res = LLVMBuildBitCast(builder, res, bld_base->base.vec_type, "");
947 } else if (stype == TGSI_TYPE_UNSIGNED) {
948 res = LLVMBuildBitCast(builder, res, bld_base->uint_bld.vec_type, "");
949 } else if (stype == TGSI_TYPE_SIGNED) {
950 res = LLVMBuildBitCast(builder, res, bld_base->int_bld.vec_type, "");
958 * Register fetch with derivatives.
962 struct lp_build_tgsi_soa_context *bld,
971 /* TODO: use interpolation coeffs for inputs */
974 *ddx = lp_build_ddx(&bld->bld_base.base, src);
977 *ddy = lp_build_ddy(&bld->bld_base.base, src);
985 emit_fetch_predicate(
986 struct lp_build_tgsi_soa_context *bld,
987 const struct tgsi_full_instruction *inst,
990 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
992 unsigned char swizzles[4];
993 LLVMValueRef unswizzled[4] = {NULL, NULL, NULL, NULL};
997 if (!inst->Instruction.Predicate) {
998 TGSI_FOR_EACH_CHANNEL( chan ) {
1004 swizzles[0] = inst->Predicate.SwizzleX;
1005 swizzles[1] = inst->Predicate.SwizzleY;
1006 swizzles[2] = inst->Predicate.SwizzleZ;
1007 swizzles[3] = inst->Predicate.SwizzleW;
1009 index = inst->Predicate.Index;
1010 assert(index < LP_MAX_TGSI_PREDS);
1012 TGSI_FOR_EACH_CHANNEL( chan ) {
1013 unsigned swizzle = swizzles[chan];
1016 * Only fetch the predicate register channels that are actually listed
1019 if (!unswizzled[swizzle]) {
1020 value = LLVMBuildLoad(builder,
1021 bld->preds[index][swizzle], "");
1024 * Convert the value to an integer mask.
1026 * TODO: Short-circuit this comparison -- a D3D setp_xx instructions
1027 * is needlessly causing two comparisons due to storing the intermediate
1028 * result as float vector instead of an integer mask vector.
1030 value = lp_build_compare(bld->bld_base.base.gallivm,
1031 bld->bld_base.base.type,
1034 bld->bld_base.base.zero);
1035 if (inst->Predicate.Negate) {
1036 value = LLVMBuildNot(builder, value, "");
1039 unswizzled[swizzle] = value;
1041 value = unswizzled[swizzle];
1053 struct lp_build_tgsi_context *bld_base,
1054 const struct tgsi_full_instruction *inst,
1056 unsigned chan_index,
1060 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1061 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1062 LLVMBuilderRef builder = gallivm->builder;
1063 const struct tgsi_full_dst_register *reg = &inst->Dst[index];
1064 struct lp_build_context *uint_bld = &bld_base->uint_bld;
1065 LLVMValueRef indirect_index = NULL;
1066 struct lp_build_context *bld_store;
1067 enum tgsi_opcode_type dtype = tgsi_opcode_infer_dst_type(inst->Instruction.Opcode);
1071 case TGSI_TYPE_FLOAT:
1072 case TGSI_TYPE_UNTYPED:
1073 bld_store = &bld_base->base;
1075 case TGSI_TYPE_UNSIGNED:
1076 bld_store = &bld_base->uint_bld;
1078 case TGSI_TYPE_SIGNED:
1079 bld_store = &bld_base->int_bld;
1081 case TGSI_TYPE_DOUBLE:
1082 case TGSI_TYPE_VOID:
1088 switch( inst->Instruction.Saturate ) {
1092 case TGSI_SAT_ZERO_ONE:
1093 value = lp_build_max(&bld->bld_base.base, value, bld->bld_base.base.zero);
1094 value = lp_build_min(&bld->bld_base.base, value, bld->bld_base.base.one);
1097 case TGSI_SAT_MINUS_PLUS_ONE:
1098 value = lp_build_max(&bld->bld_base.base, value, lp_build_const_vec(bld->bld_base.base.gallivm, bld->bld_base.base.type, -1.0));
1099 value = lp_build_min(&bld->bld_base.base, value, bld->bld_base.base.one);
1106 if (reg->Register.Indirect) {
1107 indirect_index = get_indirect_index(bld,
1109 reg->Register.Index,
1112 assert(reg->Register.Index <=
1113 bld->bld_base.info->file_max[reg->Register.File]);
1116 switch( reg->Register.File ) {
1117 case TGSI_FILE_OUTPUT:
1118 if (reg->Register.Indirect) {
1119 LLVMValueRef chan_vec =
1120 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
1121 LLVMValueRef length_vec =
1122 lp_build_const_int_vec(gallivm, uint_bld->type, bld->bld_base.base.type.length);
1123 LLVMValueRef index_vec; /* indexes into the temp registers */
1124 LLVMValueRef outputs_array;
1125 LLVMValueRef pixel_offsets;
1126 LLVMTypeRef float_ptr_type;
1129 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1130 pixel_offsets = uint_bld->undef;
1131 for (i = 0; i < bld->bld_base.base.type.length; i++) {
1132 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1133 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
1137 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1138 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1139 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1140 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1141 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1144 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1145 outputs_array = LLVMBuildBitCast(builder, bld->outputs_array,
1146 float_ptr_type, "");
1148 /* Scatter store values into temp registers */
1149 emit_mask_scatter(bld, outputs_array, index_vec, value,
1150 &bld->exec_mask, pred);
1153 LLVMValueRef out_ptr = lp_get_output_ptr(bld, reg->Register.Index,
1155 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value, out_ptr);
1159 case TGSI_FILE_TEMPORARY:
1160 if (reg->Register.Indirect) {
1161 LLVMValueRef chan_vec =
1162 lp_build_const_int_vec(gallivm, uint_bld->type, chan_index);
1163 LLVMValueRef length_vec =
1164 lp_build_const_int_vec(gallivm, uint_bld->type,
1165 bld->bld_base.base.type.length);
1166 LLVMValueRef index_vec; /* indexes into the temp registers */
1167 LLVMValueRef temps_array;
1168 LLVMValueRef pixel_offsets;
1169 LLVMTypeRef float_ptr_type;
1172 /* build pixel offset vector: {0, 1, 2, 3, ...} */
1173 pixel_offsets = uint_bld->undef;
1174 for (i = 0; i < bld->bld_base.base.type.length; i++) {
1175 LLVMValueRef ii = lp_build_const_int32(gallivm, i);
1176 pixel_offsets = LLVMBuildInsertElement(builder, pixel_offsets,
1180 /* index_vec = (indirect_index * 4 + chan_index) * length + offsets */
1181 index_vec = lp_build_shl_imm(uint_bld, indirect_index, 2);
1182 index_vec = lp_build_add(uint_bld, index_vec, chan_vec);
1183 index_vec = lp_build_mul(uint_bld, index_vec, length_vec);
1184 index_vec = lp_build_add(uint_bld, index_vec, pixel_offsets);
1187 LLVMPointerType(LLVMFloatTypeInContext(gallivm->context), 0);
1188 temps_array = LLVMBuildBitCast(builder, bld->temps_array,
1189 float_ptr_type, "");
1191 /* Scatter store values into temp registers */
1192 emit_mask_scatter(bld, temps_array, index_vec, value,
1193 &bld->exec_mask, pred);
1196 LLVMValueRef temp_ptr;
1199 case TGSI_TYPE_UNSIGNED:
1200 case TGSI_TYPE_SIGNED: {
1201 LLVMTypeRef itype = bld_base->int_bld.vec_type;
1202 LLVMTypeRef ivtype = LLVMPointerType(itype, 0);
1203 LLVMValueRef tint_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index,
1205 LLVMValueRef temp_value_ptr;
1207 temp_ptr = LLVMBuildBitCast(builder, tint_ptr, ivtype, "");
1208 temp_value_ptr = LLVMBuildBitCast(builder, value, itype, "");
1209 value = temp_value_ptr;
1213 case TGSI_TYPE_FLOAT:
1214 case TGSI_TYPE_UNTYPED:
1215 temp_ptr = lp_get_temp_ptr_soa(bld, reg->Register.Index,
1220 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value, temp_ptr);
1224 case TGSI_FILE_ADDRESS:
1225 assert(dtype == TGSI_TYPE_SIGNED);
1226 assert(LLVMTypeOf(value) == bld_base->base.int_vec_type);
1227 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value,
1228 bld->addr[reg->Register.Index][chan_index]);
1231 case TGSI_FILE_PREDICATE:
1232 lp_exec_mask_store(&bld->exec_mask, bld_store, pred, value,
1233 bld->preds[reg->Register.Index][chan_index]);
1243 struct lp_build_tgsi_context * bld_base,
1244 const struct tgsi_full_instruction * inst,
1245 const struct tgsi_opcode_info * info,
1246 LLVMValueRef dst[4])
1249 unsigned chan_index;
1250 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1253 LLVMValueRef pred[TGSI_NUM_CHANNELS];
1255 emit_fetch_predicate( bld, inst, pred );
1257 TGSI_FOR_EACH_DST0_ENABLED_CHANNEL( inst, chan_index ) {
1258 emit_store_chan(bld_base, inst, 0, chan_index, pred[chan_index], dst[chan_index]);
1264 * High-level instruction translators.
1268 emit_tex( struct lp_build_tgsi_soa_context *bld,
1269 const struct tgsi_full_instruction *inst,
1270 enum lp_build_tex_modifier modifier,
1271 LLVMValueRef *texel)
1274 LLVMValueRef lod_bias, explicit_lod;
1275 LLVMValueRef oow = NULL;
1276 LLVMValueRef coords[4];
1277 LLVMValueRef offsets[3] = { NULL };
1278 struct lp_derivatives derivs;
1279 struct lp_derivatives *deriv_ptr = NULL;
1280 unsigned num_coords;
1284 if (!bld->sampler) {
1285 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1286 for (i = 0; i < 4; i++) {
1287 texel[i] = bld->bld_base.base.undef;
1292 switch (inst->Texture.Texture) {
1293 case TGSI_TEXTURE_1D:
1297 case TGSI_TEXTURE_1D_ARRAY:
1301 case TGSI_TEXTURE_2D:
1302 case TGSI_TEXTURE_RECT:
1306 case TGSI_TEXTURE_SHADOW1D:
1307 case TGSI_TEXTURE_SHADOW1D_ARRAY:
1311 case TGSI_TEXTURE_SHADOW2D:
1312 case TGSI_TEXTURE_SHADOWRECT:
1313 case TGSI_TEXTURE_2D_ARRAY:
1314 case TGSI_TEXTURE_CUBE:
1318 case TGSI_TEXTURE_3D:
1322 case TGSI_TEXTURE_SHADOW2D_ARRAY:
1323 case TGSI_TEXTURE_SHADOWCUBE:
1332 /* Note lod and especially projected are illegal in a LOT of cases */
1333 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
1334 assert(num_coords < 4);
1335 lod_bias = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1336 explicit_lod = NULL;
1338 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
1339 assert(num_coords < 4);
1341 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1345 explicit_lod = NULL;
1348 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED) {
1349 assert(num_coords < 4);
1350 oow = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1351 oow = lp_build_rcp(&bld->bld_base.base, oow);
1354 for (i = 0; i < num_coords; i++) {
1355 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1356 if (modifier == LP_BLD_TEX_MODIFIER_PROJECTED)
1357 coords[i] = lp_build_mul(&bld->bld_base.base, coords[i], oow);
1359 for (i = num_coords; i < 4; i++) {
1360 coords[i] = bld->bld_base.base.undef;
1363 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
1365 for (dim = 0; dim < dims; ++dim) {
1366 derivs.ddx[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 1, dim );
1367 derivs.ddy[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 2, dim );
1369 deriv_ptr = &derivs;
1370 unit = inst->Src[3].Register.Index;
1372 unit = inst->Src[1].Register.Index;
1375 /* some advanced gather instructions (txgo) would require 4 offsets */
1376 if (inst->Texture.NumOffsets == 1) {
1378 for (dim = 0; dim < dims; dim++) {
1379 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1383 bld->sampler->emit_fetch_texel(bld->sampler,
1384 bld->bld_base.base.gallivm,
1385 bld->bld_base.base.type,
1391 lod_bias, explicit_lod,
1396 emit_sample(struct lp_build_tgsi_soa_context *bld,
1397 const struct tgsi_full_instruction *inst,
1398 enum lp_build_tex_modifier modifier,
1400 LLVMValueRef *texel)
1402 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1403 unsigned texture_unit, sampler_unit;
1404 LLVMValueRef lod_bias, explicit_lod;
1405 LLVMValueRef coords[4];
1406 LLVMValueRef offsets[3] = { NULL };
1407 struct lp_derivatives derivs;
1408 struct lp_derivatives *deriv_ptr = NULL;
1409 unsigned num_coords, dims;
1412 if (!bld->sampler) {
1413 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1414 for (i = 0; i < 4; i++) {
1415 texel[i] = bld->bld_base.base.undef;
1421 * unlike old-style tex opcodes the texture/sampler indices
1422 * always come from src1 and src2 respectively.
1424 texture_unit = inst->Src[1].Register.Index;
1425 sampler_unit = inst->Src[2].Register.Index;
1428 * Note inst->Texture.Texture will contain the number of offsets,
1429 * however the target information is NOT there and comes from the
1430 * declared sampler views instead.
1432 switch (bld->sv[texture_unit].Resource) {
1433 case TGSI_TEXTURE_1D:
1437 case TGSI_TEXTURE_1D_ARRAY:
1441 case TGSI_TEXTURE_2D:
1442 case TGSI_TEXTURE_RECT:
1446 case TGSI_TEXTURE_2D_ARRAY:
1447 case TGSI_TEXTURE_CUBE:
1451 case TGSI_TEXTURE_3D:
1455 case TGSI_TEXTURE_CUBE_ARRAY:
1465 * unlike old-style tex opcodes the texture/sampler indices
1466 * always come from src1 and src2 respectively.
1468 texture_unit = inst->Src[1].Register.Index;
1469 sampler_unit = inst->Src[2].Register.Index;
1471 if (modifier == LP_BLD_TEX_MODIFIER_LOD_BIAS) {
1472 lod_bias = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1473 explicit_lod = NULL;
1475 else if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_LOD) {
1477 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1479 else if (modifier == LP_BLD_TEX_MODIFIER_LOD_ZERO) {
1481 /* XXX might be better to explicitly pass the level zero information */
1482 explicit_lod = lp_build_const_vec(gallivm, bld->bld_base.base.type, 0.0F);
1486 explicit_lod = NULL;
1489 for (i = 0; i < num_coords; i++) {
1490 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1492 for (i = num_coords; i < 4; i++) {
1493 coords[i] = bld->bld_base.base.undef;
1496 * XXX: whack shadow comparison value into place.
1497 * Should probably fix the interface for separate value
1498 * (it will not work for cube arrays if it is part of coords).
1501 unsigned c_coord = num_coords > 2 ? 3 : 2;
1502 assert(num_coords < 4);
1503 coords[c_coord] = lp_build_emit_fetch( &bld->bld_base, inst, 3, 0 );
1506 if (modifier == LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV) {
1508 for (dim = 0; dim < dims; ++dim) {
1509 derivs.ddx[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 3, dim );
1510 derivs.ddy[dim] = lp_build_emit_fetch( &bld->bld_base, inst, 4, dim );
1512 deriv_ptr = &derivs;
1515 /* some advanced gather instructions (txgo) would require 4 offsets */
1516 if (inst->Texture.NumOffsets == 1) {
1518 for (dim = 0; dim < dims; dim++) {
1519 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1523 bld->sampler->emit_fetch_texel(bld->sampler,
1524 bld->bld_base.base.gallivm,
1525 bld->bld_base.base.type,
1527 texture_unit, sampler_unit,
1531 lod_bias, explicit_lod,
1536 emit_fetch_texels( struct lp_build_tgsi_soa_context *bld,
1537 const struct tgsi_full_instruction *inst,
1538 LLVMValueRef *texel,
1541 unsigned unit, target;
1542 LLVMValueRef coord_undef = LLVMGetUndef(bld->bld_base.base.int_vec_type);
1543 LLVMValueRef explicit_lod = NULL;
1544 LLVMValueRef coords[3];
1545 LLVMValueRef offsets[3] = { NULL };
1546 unsigned num_coords;
1550 if (!bld->sampler) {
1551 _debug_printf("warning: found texture instruction but no sampler generator supplied\n");
1552 for (i = 0; i < 4; i++) {
1553 texel[i] = coord_undef;
1558 unit = inst->Src[1].Register.Index;
1561 target = bld->sv[unit].Resource;
1564 target = inst->Texture.Texture;
1568 case TGSI_TEXTURE_1D:
1569 case TGSI_TEXTURE_BUFFER:
1573 case TGSI_TEXTURE_1D_ARRAY:
1577 case TGSI_TEXTURE_2D:
1578 case TGSI_TEXTURE_RECT:
1582 case TGSI_TEXTURE_2D_ARRAY:
1586 case TGSI_TEXTURE_3D:
1595 /* always have lod except for buffers ? */
1596 if (target != TGSI_TEXTURE_BUFFER) {
1597 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 3 );
1600 for (i = 0; i < num_coords; i++) {
1601 coords[i] = lp_build_emit_fetch( &bld->bld_base, inst, 0, i );
1603 for (i = num_coords; i < 3; i++) {
1604 coords[i] = coord_undef;
1607 if (inst->Texture.NumOffsets == 1) {
1609 for (dim = 0; dim < dims; dim++) {
1610 offsets[dim] = lp_build_emit_fetch_texoffset(&bld->bld_base, inst, 0, dim );
1614 bld->sampler->emit_fetch_texel(bld->sampler,
1615 bld->bld_base.base.gallivm,
1616 bld->bld_base.base.type,
1627 emit_size_query( struct lp_build_tgsi_soa_context *bld,
1628 const struct tgsi_full_instruction *inst,
1629 LLVMValueRef *sizes_out,
1630 boolean is_sviewinfo)
1632 LLVMValueRef explicit_lod;
1635 unsigned unit = inst->Src[1].Register.Index;
1639 target = bld->sv[unit].Resource;
1642 target = inst->Texture.Texture;
1645 case TGSI_TEXTURE_BUFFER:
1646 case TGSI_TEXTURE_RECT:
1647 case TGSI_TEXTURE_SHADOWRECT:
1655 if (!bld->sampler) {
1656 _debug_printf("warning: found texture query instruction but no sampler generator supplied\n");
1657 for (i = 0; i < 4; i++)
1658 sizes_out[i] = bld->bld_base.int_bld.undef;
1663 explicit_lod = lp_build_emit_fetch( &bld->bld_base, inst, 0, 0 );
1665 explicit_lod = NULL;
1667 bld->sampler->emit_size_query(bld->sampler,
1668 bld->bld_base.base.gallivm,
1669 bld->bld_base.int_bld.type,
1677 near_end_of_shader(struct lp_build_tgsi_soa_context *bld,
1682 for (i = 0; i < 5; i++) {
1685 if (pc + i >= bld->bld_base.info->num_instructions)
1688 opcode = bld->bld_base.instructions[pc + i].Instruction.Opcode;
1690 if (opcode == TGSI_OPCODE_END)
1693 if (opcode == TGSI_OPCODE_TEX ||
1694 opcode == TGSI_OPCODE_TXP ||
1695 opcode == TGSI_OPCODE_TXD ||
1696 opcode == TGSI_OPCODE_TXB ||
1697 opcode == TGSI_OPCODE_TXL ||
1698 opcode == TGSI_OPCODE_TXF ||
1699 opcode == TGSI_OPCODE_TXQ ||
1700 opcode == TGSI_OPCODE_CAL ||
1701 opcode == TGSI_OPCODE_CALLNZ ||
1702 opcode == TGSI_OPCODE_IF ||
1703 opcode == TGSI_OPCODE_IFC ||
1704 opcode == TGSI_OPCODE_BGNLOOP ||
1705 opcode == TGSI_OPCODE_SWITCH)
1715 * Kill fragment if any of the src register values are negative.
1719 struct lp_build_tgsi_soa_context *bld,
1720 const struct tgsi_full_instruction *inst,
1723 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1724 const struct tgsi_full_src_register *reg = &inst->Src[0];
1725 LLVMValueRef terms[TGSI_NUM_CHANNELS];
1727 unsigned chan_index;
1729 memset(&terms, 0, sizeof terms);
1731 TGSI_FOR_EACH_CHANNEL( chan_index ) {
1734 /* Unswizzle channel */
1735 swizzle = tgsi_util_get_full_src_register_swizzle( reg, chan_index );
1737 /* Check if the component has not been already tested. */
1738 assert(swizzle < TGSI_NUM_CHANNELS);
1739 if( !terms[swizzle] )
1740 /* TODO: change the comparison operator instead of setting the sign */
1741 terms[swizzle] = lp_build_emit_fetch(&bld->bld_base, inst, 0, chan_index );
1745 TGSI_FOR_EACH_CHANNEL( chan_index ) {
1746 if(terms[chan_index]) {
1747 LLVMValueRef chan_mask;
1750 * If term < 0 then mask = 0 else mask = ~0.
1752 chan_mask = lp_build_cmp(&bld->bld_base.base, PIPE_FUNC_GEQUAL, terms[chan_index], bld->bld_base.base.zero);
1755 mask = LLVMBuildAnd(builder, mask, chan_mask, "");
1762 lp_build_mask_update(bld->mask, mask);
1764 if (!near_end_of_shader(bld, pc))
1765 lp_build_mask_check(bld->mask);
1771 * Predicated fragment kill.
1772 * XXX Actually, we do an unconditional kill (as in tgsi_exec.c).
1773 * The only predication is the execution mask which will apply if
1774 * we're inside a loop or conditional.
1777 emit_kilp(struct lp_build_tgsi_soa_context *bld,
1780 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
1783 /* For those channels which are "alive", disable fragment shader
1786 if (bld->exec_mask.has_mask) {
1787 mask = LLVMBuildNot(builder, bld->exec_mask.exec_mask, "kilp");
1790 LLVMValueRef zero = LLVMConstNull(bld->bld_base.base.int_vec_type);
1794 lp_build_mask_update(bld->mask, mask);
1796 if (!near_end_of_shader(bld, pc))
1797 lp_build_mask_check(bld->mask);
1802 * Emit code which will dump the value of all the temporary registers
1806 emit_dump_temps(struct lp_build_tgsi_soa_context *bld)
1808 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1809 LLVMBuilderRef builder = gallivm->builder;
1810 LLVMValueRef temp_ptr;
1811 LLVMValueRef i0 = lp_build_const_int32(gallivm, 0);
1812 LLVMValueRef i1 = lp_build_const_int32(gallivm, 1);
1813 LLVMValueRef i2 = lp_build_const_int32(gallivm, 2);
1814 LLVMValueRef i3 = lp_build_const_int32(gallivm, 3);
1816 int n = bld->bld_base.info->file_max[TGSI_FILE_TEMPORARY];
1818 for (index = 0; index < n; index++) {
1819 LLVMValueRef idx = lp_build_const_int32(gallivm, index);
1820 LLVMValueRef v[4][4], res;
1823 lp_build_printf(gallivm, "TEMP[%d]:\n", idx);
1825 for (chan = 0; chan < 4; chan++) {
1826 temp_ptr = lp_get_temp_ptr_soa(bld, index, chan);
1827 res = LLVMBuildLoad(builder, temp_ptr, "");
1828 v[chan][0] = LLVMBuildExtractElement(builder, res, i0, "");
1829 v[chan][1] = LLVMBuildExtractElement(builder, res, i1, "");
1830 v[chan][2] = LLVMBuildExtractElement(builder, res, i2, "");
1831 v[chan][3] = LLVMBuildExtractElement(builder, res, i3, "");
1834 lp_build_printf(gallivm, " X: %f %f %f %f\n",
1835 v[0][0], v[0][1], v[0][2], v[0][3]);
1836 lp_build_printf(gallivm, " Y: %f %f %f %f\n",
1837 v[1][0], v[1][1], v[1][2], v[1][3]);
1838 lp_build_printf(gallivm, " Z: %f %f %f %f\n",
1839 v[2][0], v[2][1], v[2][2], v[2][3]);
1840 lp_build_printf(gallivm, " W: %f %f %f %f\n",
1841 v[3][0], v[3][1], v[3][2], v[3][3]);
1848 lp_emit_declaration_soa(
1849 struct lp_build_tgsi_context *bld_base,
1850 const struct tgsi_full_declaration *decl)
1852 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1853 struct gallivm_state *gallivm = bld->bld_base.base.gallivm;
1854 LLVMTypeRef vec_type = bld->bld_base.base.vec_type;
1855 const unsigned first = decl->Range.First;
1856 const unsigned last = decl->Range.Last;
1859 for (idx = first; idx <= last; ++idx) {
1860 assert(last <= bld->bld_base.info->file_max[decl->Declaration.File]);
1861 switch (decl->Declaration.File) {
1862 case TGSI_FILE_TEMPORARY:
1863 assert(idx < LP_MAX_TGSI_TEMPS);
1864 if (!(bld->indirect_files & (1 << TGSI_FILE_TEMPORARY))) {
1865 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1866 bld->temps[idx][i] = lp_build_alloca(gallivm, vec_type, "temp");
1870 case TGSI_FILE_OUTPUT:
1871 if (!(bld->indirect_files & (1 << TGSI_FILE_OUTPUT))) {
1872 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1873 bld->outputs[idx][i] = lp_build_alloca(gallivm,
1874 vec_type, "output");
1878 case TGSI_FILE_ADDRESS:
1879 /* ADDR registers are only allocated with an integer LLVM IR type,
1880 * as they are guaranteed to always have integers.
1881 * XXX: Not sure if this exception is worthwhile (or the whole idea of
1882 * an ADDR register for that matter).
1884 assert(idx < LP_MAX_TGSI_ADDRS);
1885 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1886 bld->addr[idx][i] = lp_build_alloca(gallivm, bld_base->base.int_vec_type, "addr");
1889 case TGSI_FILE_PREDICATE:
1890 assert(idx < LP_MAX_TGSI_PREDS);
1891 for (i = 0; i < TGSI_NUM_CHANNELS; i++)
1892 bld->preds[idx][i] = lp_build_alloca(gallivm, vec_type,
1896 case TGSI_FILE_SAMPLER_VIEW:
1898 * The target stored here MUST match whatever there actually
1899 * is in the set sampler views (what about return type?).
1901 assert(idx < PIPE_MAX_SHADER_SAMPLER_VIEWS);
1902 bld->sv[idx] = decl->SamplerView;
1906 /* don't need to declare other vars */
1913 void lp_emit_immediate_soa(
1914 struct lp_build_tgsi_context *bld_base,
1915 const struct tgsi_full_immediate *imm)
1917 struct lp_build_tgsi_soa_context *bld = lp_soa_context(bld_base);
1918 struct gallivm_state * gallivm = bld_base->base.gallivm;
1920 /* simply copy the immediate values into the next immediates[] slot */
1922 const uint size = imm->Immediate.NrTokens - 1;
1924 assert(bld->num_immediates < LP_MAX_TGSI_IMMEDIATES);
1925 switch (imm->Immediate.DataType) {
1926 case TGSI_IMM_FLOAT32:
1927 for( i = 0; i < size; ++i )
1928 bld->immediates[bld->num_immediates][i] =
1929 lp_build_const_vec(gallivm, bld_base->base.type, imm->u[i].Float);
1932 case TGSI_IMM_UINT32:
1933 for( i = 0; i < size; ++i ) {
1934 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->uint_bld.type, imm->u[i].Uint);
1935 bld->immediates[bld->num_immediates][i] =
1936 LLVMConstBitCast(tmp, bld_base->base.vec_type);
1940 case TGSI_IMM_INT32:
1941 for( i = 0; i < size; ++i ) {
1942 LLVMValueRef tmp = lp_build_const_vec(gallivm, bld_base->int_bld.type, imm->u[i].Int);
1943 bld->immediates[bld->num_immediates][i] =
1944 LLVMConstBitCast(tmp, bld_base->base.vec_type);
1949 for( i = size; i < 4; ++i )
1950 bld->immediates[bld->num_immediates][i] = bld_base->base.undef;
1952 bld->num_immediates++;
1957 const struct lp_build_tgsi_action * action,
1958 struct lp_build_tgsi_context * bld_base,
1959 struct lp_build_emit_data * emit_data)
1961 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1963 emit_fetch_deriv(bld, emit_data->args[0], NULL,
1964 &emit_data->output[emit_data->chan], NULL);
1969 const struct lp_build_tgsi_action * action,
1970 struct lp_build_tgsi_context * bld_base,
1971 struct lp_build_emit_data * emit_data)
1973 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1975 emit_fetch_deriv(bld, emit_data->args[0], NULL, NULL,
1976 &emit_data->output[emit_data->chan]);
1981 const struct lp_build_tgsi_action * action,
1982 struct lp_build_tgsi_context * bld_base,
1983 struct lp_build_emit_data * emit_data)
1985 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1987 emit_kilp(bld, bld_base->pc - 1);
1992 const struct lp_build_tgsi_action * action,
1993 struct lp_build_tgsi_context * bld_base,
1994 struct lp_build_emit_data * emit_data)
1996 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
1998 emit_kil(bld, emit_data->inst, bld_base->pc - 1);
2003 const struct lp_build_tgsi_action * action,
2004 struct lp_build_tgsi_context * bld_base,
2005 struct lp_build_emit_data * emit_data)
2007 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2009 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE, emit_data->output);
2014 const struct lp_build_tgsi_action * action,
2015 struct lp_build_tgsi_context * bld_base,
2016 struct lp_build_emit_data * emit_data)
2018 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2020 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
2026 const struct lp_build_tgsi_action * action,
2027 struct lp_build_tgsi_context * bld_base,
2028 struct lp_build_emit_data * emit_data)
2030 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2032 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
2038 const struct lp_build_tgsi_action * action,
2039 struct lp_build_tgsi_context * bld_base,
2040 struct lp_build_emit_data * emit_data)
2042 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2044 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
2050 const struct lp_build_tgsi_action * action,
2051 struct lp_build_tgsi_context * bld_base,
2052 struct lp_build_emit_data * emit_data)
2054 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2056 emit_tex(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_PROJECTED,
2062 const struct lp_build_tgsi_action * action,
2063 struct lp_build_tgsi_context * bld_base,
2064 struct lp_build_emit_data * emit_data)
2066 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2068 emit_size_query(bld, emit_data->inst, emit_data->output, FALSE);
2073 const struct lp_build_tgsi_action * action,
2074 struct lp_build_tgsi_context * bld_base,
2075 struct lp_build_emit_data * emit_data)
2077 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2079 emit_fetch_texels(bld, emit_data->inst, emit_data->output, FALSE);
2084 const struct lp_build_tgsi_action * action,
2085 struct lp_build_tgsi_context * bld_base,
2086 struct lp_build_emit_data * emit_data)
2088 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2090 emit_fetch_texels(bld, emit_data->inst, emit_data->output, TRUE);
2095 const struct lp_build_tgsi_action * action,
2096 struct lp_build_tgsi_context * bld_base,
2097 struct lp_build_emit_data * emit_data)
2099 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2101 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
2102 FALSE, emit_data->output);
2107 const struct lp_build_tgsi_action * action,
2108 struct lp_build_tgsi_context * bld_base,
2109 struct lp_build_emit_data * emit_data)
2111 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2113 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_BIAS,
2114 FALSE, emit_data->output);
2119 const struct lp_build_tgsi_action * action,
2120 struct lp_build_tgsi_context * bld_base,
2121 struct lp_build_emit_data * emit_data)
2123 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2125 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_NONE,
2126 TRUE, emit_data->output);
2131 const struct lp_build_tgsi_action * action,
2132 struct lp_build_tgsi_context * bld_base,
2133 struct lp_build_emit_data * emit_data)
2135 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2137 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_LOD_ZERO,
2138 TRUE, emit_data->output);
2143 const struct lp_build_tgsi_action * action,
2144 struct lp_build_tgsi_context * bld_base,
2145 struct lp_build_emit_data * emit_data)
2147 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2149 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_DERIV,
2150 FALSE, emit_data->output);
2155 const struct lp_build_tgsi_action * action,
2156 struct lp_build_tgsi_context * bld_base,
2157 struct lp_build_emit_data * emit_data)
2159 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2161 emit_sample(bld, emit_data->inst, LP_BLD_TEX_MODIFIER_EXPLICIT_LOD,
2162 FALSE, emit_data->output);
2167 const struct lp_build_tgsi_action * action,
2168 struct lp_build_tgsi_context * bld_base,
2169 struct lp_build_emit_data * emit_data)
2171 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2173 emit_size_query(bld, emit_data->inst, emit_data->output, TRUE);
2177 mask_to_one_vec(struct lp_build_tgsi_context *bld_base)
2179 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2180 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2181 LLVMValueRef one_vec = bld_base->int_bld.one;
2182 struct lp_exec_mask *exec_mask = &bld->exec_mask;
2184 if (exec_mask->has_mask) {
2185 one_vec = LLVMBuildAnd(builder, one_vec, exec_mask->exec_mask, "");
2187 one_vec = LLVMBuildAnd(builder, one_vec,
2188 lp_build_mask_value(bld->mask), "");
2194 const struct lp_build_tgsi_action * action,
2195 struct lp_build_tgsi_context * bld_base,
2196 struct lp_build_emit_data * emit_data)
2198 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2199 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2201 if (bld->gs_iface->emit_vertex) {
2202 LLVMValueRef masked_ones = mask_to_one_vec(bld_base);
2203 gather_outputs(bld);
2204 bld->gs_iface->emit_vertex(bld->gs_iface, &bld->bld_base,
2206 bld->total_emitted_vertices_vec);
2207 bld->emitted_vertices_vec =
2208 LLVMBuildAdd(builder, bld->emitted_vertices_vec, masked_ones, "");
2209 bld->total_emitted_vertices_vec =
2210 LLVMBuildAdd(builder, bld->total_emitted_vertices_vec, masked_ones, "");
2211 bld->pending_end_primitive = TRUE;
2218 const struct lp_build_tgsi_action * action,
2219 struct lp_build_tgsi_context * bld_base,
2220 struct lp_build_emit_data * emit_data)
2222 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2223 LLVMBuilderRef builder = bld->bld_base.base.gallivm->builder;
2225 if (bld->gs_iface->end_primitive) {
2226 LLVMValueRef masked_ones = mask_to_one_vec(bld_base);
2227 bld->gs_iface->end_primitive(bld->gs_iface, &bld->bld_base,
2228 bld->emitted_vertices_vec,
2229 bld->emitted_prims_vec);
2230 bld->emitted_prims_vec =
2231 LLVMBuildAdd(builder, bld->emitted_prims_vec, masked_ones, "");
2232 bld->emitted_vertices_vec = bld_base->uint_bld.zero;
2233 bld->pending_end_primitive = FALSE;
2239 const struct lp_build_tgsi_action * action,
2240 struct lp_build_tgsi_context * bld_base,
2241 struct lp_build_emit_data * emit_data)
2243 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2245 lp_exec_mask_call(&bld->exec_mask, emit_data->inst->Label.Label,
2251 const struct lp_build_tgsi_action * action,
2252 struct lp_build_tgsi_context * bld_base,
2253 struct lp_build_emit_data * emit_data)
2255 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2257 lp_exec_mask_ret(&bld->exec_mask, &bld_base->pc);
2262 const struct lp_build_tgsi_action * action,
2263 struct lp_build_tgsi_context * bld_base,
2264 struct lp_build_emit_data * emit_data)
2266 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2268 lp_exec_break(&bld->exec_mask);
2273 const struct lp_build_tgsi_action * action,
2274 struct lp_build_tgsi_context * bld_base,
2275 struct lp_build_emit_data * emit_data)
2278 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2280 tmp = lp_build_cmp(&bld_base->base, PIPE_FUNC_NOTEQUAL,
2281 emit_data->args[0], bld->bld_base.base.zero);
2283 lp_exec_break_condition(&bld->exec_mask, tmp);
2288 const struct lp_build_tgsi_action * action,
2289 struct lp_build_tgsi_context * bld_base,
2290 struct lp_build_emit_data * emit_data)
2293 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2295 tmp = lp_build_cmp(&bld_base->base, PIPE_FUNC_NOTEQUAL,
2296 emit_data->args[0], bld->bld_base.base.zero);
2297 lp_exec_mask_cond_push(&bld->exec_mask, tmp);
2302 const struct lp_build_tgsi_action * action,
2303 struct lp_build_tgsi_context * bld_base,
2304 struct lp_build_emit_data * emit_data)
2306 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2308 lp_exec_bgnloop(&bld->exec_mask);
2313 const struct lp_build_tgsi_action * action,
2314 struct lp_build_tgsi_context * bld_base,
2315 struct lp_build_emit_data * emit_data)
2317 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2319 lp_exec_mask_bgnsub(&bld->exec_mask);
2324 const struct lp_build_tgsi_action * action,
2325 struct lp_build_tgsi_context * bld_base,
2326 struct lp_build_emit_data * emit_data)
2328 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2330 lp_exec_mask_cond_invert(&bld->exec_mask);
2335 const struct lp_build_tgsi_action * action,
2336 struct lp_build_tgsi_context * bld_base,
2337 struct lp_build_emit_data * emit_data)
2339 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2341 lp_exec_mask_cond_pop(&bld->exec_mask);
2346 const struct lp_build_tgsi_action * action,
2347 struct lp_build_tgsi_context * bld_base,
2348 struct lp_build_emit_data * emit_data)
2350 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2352 lp_exec_endloop(bld_base->base.gallivm, &bld->exec_mask);
2357 const struct lp_build_tgsi_action * action,
2358 struct lp_build_tgsi_context * bld_base,
2359 struct lp_build_emit_data * emit_data)
2361 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2363 lp_exec_mask_endsub(&bld->exec_mask, &bld_base->pc);
2368 const struct lp_build_tgsi_action * action,
2369 struct lp_build_tgsi_context * bld_base,
2370 struct lp_build_emit_data * emit_data)
2372 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2374 lp_exec_continue(&bld->exec_mask);
2377 /* XXX: Refactor and move it to lp_bld_tgsi_action.c
2379 * XXX: What do the comments about xmm registers mean? Maybe they are left over
2380 * from old code, but there is no garauntee that LLVM will use those registers
2383 * XXX: There should be no calls to lp_build_emit_fetch in this function. This
2384 * should be handled by the emit_data->fetch_args function. */
2387 const struct lp_build_tgsi_action * action,
2388 struct lp_build_tgsi_context * bld_base,
2389 struct lp_build_emit_data * emit_data)
2391 LLVMValueRef tmp0, tmp1;
2392 LLVMValueRef tmp4 = NULL;
2393 LLVMValueRef tmp5 = NULL;
2394 LLVMValueRef tmp6 = NULL;
2395 LLVMValueRef tmp7 = NULL;
2396 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2398 uint dims = (emit_data->inst->Instruction.Opcode == TGSI_OPCODE_NRM) ? 3 : 4;
2400 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X) ||
2401 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y) ||
2402 TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z) ||
2403 (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W) && dims == 4)) {
2405 /* NOTE: Cannot use xmm regs 2/3 here (see emit_rsqrt() above). */
2408 /* xmm0 = src.x * src.x */
2409 tmp0 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_X);
2410 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X)) {
2413 tmp0 = lp_build_mul( &bld->bld_base.base, tmp0, tmp0);
2416 /* xmm0 = xmm0 + src.y * src.y */
2417 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_Y);
2418 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y)) {
2421 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2422 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2425 /* xmm0 = xmm0 + src.z * src.z */
2426 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_Z);
2427 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z)) {
2430 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2431 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2435 /* xmm0 = xmm0 + src.w * src.w */
2436 tmp1 = lp_build_emit_fetch(&bld->bld_base, emit_data->inst, 0, TGSI_CHAN_W);
2437 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W)) {
2440 tmp1 = lp_build_mul( &bld->bld_base.base, tmp1, tmp1);
2441 tmp0 = lp_build_add( &bld->bld_base.base, tmp0, tmp1);
2443 /* xmm1 = 1 / sqrt(xmm0) */
2444 tmp1 = lp_build_rsqrt( &bld->bld_base.base, tmp0);
2445 /* dst.x = xmm1 * src.x */
2446 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X)) {
2447 emit_data->output[TGSI_CHAN_X] = lp_build_mul( &bld->bld_base.base, tmp4, tmp1);
2449 /* dst.y = xmm1 * src.y */
2450 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Y)) {
2451 emit_data->output[TGSI_CHAN_Y] = lp_build_mul( &bld->bld_base.base, tmp5, tmp1);
2454 /* dst.z = xmm1 * src.z */
2455 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_Z)) {
2456 emit_data->output[TGSI_CHAN_Z] = lp_build_mul( &bld->bld_base.base, tmp6, tmp1);
2458 /* dst.w = xmm1 * src.w */
2459 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_X) && dims == 4) {
2460 emit_data->output[TGSI_CHAN_W] = lp_build_mul( &bld->bld_base.base, tmp7, tmp1);
2465 if (TGSI_IS_DST0_CHANNEL_ENABLED(emit_data->inst, TGSI_CHAN_W) && dims == 3) {
2466 emit_data->output[TGSI_CHAN_W] = bld->bld_base.base.one;
2470 static void emit_prologue(struct lp_build_tgsi_context * bld_base)
2472 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2473 struct gallivm_state * gallivm = bld_base->base.gallivm;
2475 if (bld->indirect_files & (1 << TGSI_FILE_TEMPORARY)) {
2476 LLVMValueRef array_size =
2477 lp_build_const_int32(gallivm,
2478 bld_base->info->file_max[TGSI_FILE_TEMPORARY] * 4 + 4);
2479 bld->temps_array = lp_build_array_alloca(gallivm,
2480 bld_base->base.vec_type, array_size,
2484 if (bld->indirect_files & (1 << TGSI_FILE_OUTPUT)) {
2485 LLVMValueRef array_size =
2486 lp_build_const_int32(gallivm,
2487 bld_base->info->file_max[TGSI_FILE_OUTPUT] * 4 + 4);
2488 bld->outputs_array = lp_build_array_alloca(gallivm,
2489 bld_base->base.vec_type, array_size,
2493 /* If we have indirect addressing in inputs we need to copy them into
2494 * our alloca array to be able to iterate over them */
2495 if (bld->indirect_files & (1 << TGSI_FILE_INPUT) && !bld->gs_iface) {
2496 unsigned index, chan;
2497 LLVMTypeRef vec_type = bld_base->base.vec_type;
2498 LLVMValueRef array_size = lp_build_const_int32(gallivm,
2499 bld_base->info->file_max[TGSI_FILE_INPUT]*4 + 4);
2500 bld->inputs_array = lp_build_array_alloca(gallivm,
2501 vec_type, array_size,
2504 assert(bld_base->info->num_inputs
2505 <= bld_base->info->file_max[TGSI_FILE_INPUT] + 1);
2507 for (index = 0; index < bld_base->info->num_inputs; ++index) {
2508 for (chan = 0; chan < TGSI_NUM_CHANNELS; ++chan) {
2509 LLVMValueRef lindex =
2510 lp_build_const_int32(gallivm, index * 4 + chan);
2511 LLVMValueRef input_ptr =
2512 LLVMBuildGEP(gallivm->builder, bld->inputs_array,
2514 LLVMValueRef value = bld->inputs[index][chan];
2516 LLVMBuildStore(gallivm->builder, value, input_ptr);
2521 if (bld->gs_iface) {
2522 struct lp_build_context *uint_bld = &bld->bld_base.uint_bld;
2523 bld->emitted_prims_vec = uint_bld->zero;
2524 bld->emitted_vertices_vec = uint_bld->zero;
2525 bld->total_emitted_vertices_vec = uint_bld->zero;
2529 static void emit_epilogue(struct lp_build_tgsi_context * bld_base)
2531 struct lp_build_tgsi_soa_context * bld = lp_soa_context(bld_base);
2535 emit_dump_temps(bld);
2538 /* If we have indirect addressing in outputs we need to copy our alloca array
2539 * to the outputs slots specified by the caller */
2540 if (bld->gs_iface) {
2541 /* flush the accumulated vertices as a primitive */
2542 if (bld->pending_end_primitive) {
2543 end_primitive(NULL, bld_base, NULL);
2544 bld->pending_end_primitive = FALSE;
2547 bld->gs_iface->gs_epilogue(bld->gs_iface,
2549 bld->total_emitted_vertices_vec,
2550 bld->emitted_prims_vec);
2552 gather_outputs(bld);
2557 lp_build_tgsi_soa(struct gallivm_state *gallivm,
2558 const struct tgsi_token *tokens,
2559 struct lp_type type,
2560 struct lp_build_mask_context *mask,
2561 LLVMValueRef consts_ptr,
2562 const struct lp_bld_tgsi_system_values *system_values,
2563 const LLVMValueRef *pos,
2564 const LLVMValueRef (*inputs)[TGSI_NUM_CHANNELS],
2565 LLVMValueRef (*outputs)[TGSI_NUM_CHANNELS],
2566 struct lp_build_sampler_soa *sampler,
2567 const struct tgsi_shader_info *info,
2568 const struct lp_build_tgsi_gs_iface *gs_iface)
2570 struct lp_build_tgsi_soa_context bld;
2572 struct lp_type res_type;
2574 assert(type.length <= LP_MAX_VECTOR_LENGTH);
2575 memset(&res_type, 0, sizeof res_type);
2576 res_type.width = type.width;
2577 res_type.length = type.length;
2580 /* Setup build context */
2581 memset(&bld, 0, sizeof bld);
2582 lp_build_context_init(&bld.bld_base.base, gallivm, type);
2583 lp_build_context_init(&bld.bld_base.uint_bld, gallivm, lp_uint_type(type));
2584 lp_build_context_init(&bld.bld_base.int_bld, gallivm, lp_int_type(type));
2585 lp_build_context_init(&bld.elem_bld, gallivm, lp_elem_type(type));
2588 bld.inputs = inputs;
2589 bld.outputs = outputs;
2590 bld.consts_ptr = consts_ptr;
2591 bld.sampler = sampler;
2592 bld.bld_base.info = info;
2593 bld.indirect_files = info->indirect_files;
2595 bld.bld_base.soa = TRUE;
2596 bld.bld_base.emit_fetch_funcs[TGSI_FILE_CONSTANT] = emit_fetch_constant;
2597 bld.bld_base.emit_fetch_funcs[TGSI_FILE_IMMEDIATE] = emit_fetch_immediate;
2598 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_input;
2599 bld.bld_base.emit_fetch_funcs[TGSI_FILE_TEMPORARY] = emit_fetch_temporary;
2600 bld.bld_base.emit_fetch_funcs[TGSI_FILE_SYSTEM_VALUE] = emit_fetch_system_value;
2601 bld.bld_base.emit_store = emit_store;
2603 bld.bld_base.emit_declaration = lp_emit_declaration_soa;
2604 bld.bld_base.emit_immediate = lp_emit_immediate_soa;
2606 bld.bld_base.emit_prologue = emit_prologue;
2607 bld.bld_base.emit_epilogue = emit_epilogue;
2609 /* Set opcode actions */
2610 lp_set_default_actions_cpu(&bld.bld_base);
2612 bld.bld_base.op_actions[TGSI_OPCODE_BGNLOOP].emit = bgnloop_emit;
2613 bld.bld_base.op_actions[TGSI_OPCODE_BGNSUB].emit = bgnsub_emit;
2614 bld.bld_base.op_actions[TGSI_OPCODE_BRK].emit = brk_emit;
2615 bld.bld_base.op_actions[TGSI_OPCODE_BREAKC].emit = breakc_emit;
2616 bld.bld_base.op_actions[TGSI_OPCODE_CAL].emit = cal_emit;
2617 bld.bld_base.op_actions[TGSI_OPCODE_CONT].emit = cont_emit;
2618 bld.bld_base.op_actions[TGSI_OPCODE_DDX].emit = ddx_emit;
2619 bld.bld_base.op_actions[TGSI_OPCODE_DDY].emit = ddy_emit;
2620 bld.bld_base.op_actions[TGSI_OPCODE_ELSE].emit = else_emit;
2621 bld.bld_base.op_actions[TGSI_OPCODE_ENDIF].emit = endif_emit;
2622 bld.bld_base.op_actions[TGSI_OPCODE_ENDLOOP].emit = endloop_emit;
2623 bld.bld_base.op_actions[TGSI_OPCODE_ENDSUB].emit = endsub_emit;
2624 bld.bld_base.op_actions[TGSI_OPCODE_IF].emit = if_emit;
2625 bld.bld_base.op_actions[TGSI_OPCODE_KIL].emit = kil_emit;
2626 bld.bld_base.op_actions[TGSI_OPCODE_KILP].emit = kilp_emit;
2627 bld.bld_base.op_actions[TGSI_OPCODE_NRM].emit = nrm_emit;
2628 bld.bld_base.op_actions[TGSI_OPCODE_NRM4].emit = nrm_emit;
2629 bld.bld_base.op_actions[TGSI_OPCODE_RET].emit = ret_emit;
2630 bld.bld_base.op_actions[TGSI_OPCODE_TEX].emit = tex_emit;
2631 bld.bld_base.op_actions[TGSI_OPCODE_TXB].emit = txb_emit;
2632 bld.bld_base.op_actions[TGSI_OPCODE_TXD].emit = txd_emit;
2633 bld.bld_base.op_actions[TGSI_OPCODE_TXL].emit = txl_emit;
2634 bld.bld_base.op_actions[TGSI_OPCODE_TXP].emit = txp_emit;
2635 bld.bld_base.op_actions[TGSI_OPCODE_TXQ].emit = txq_emit;
2636 bld.bld_base.op_actions[TGSI_OPCODE_TXF].emit = txf_emit;
2637 /* DX10 sampling ops */
2638 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE].emit = sample_emit;
2639 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_B].emit = sample_b_emit;
2640 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C].emit = sample_c_emit;
2641 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_C_LZ].emit = sample_c_lz_emit;
2642 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_D].emit = sample_d_emit;
2643 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_I].emit = sample_i_emit;
2644 bld.bld_base.op_actions[TGSI_OPCODE_SAMPLE_L].emit = sample_l_emit;
2645 bld.bld_base.op_actions[TGSI_OPCODE_SVIEWINFO].emit = sviewinfo_emit;
2648 /* inputs are always indirect with gs */
2649 bld.indirect_files |= (1 << TGSI_FILE_INPUT);
2650 bld.gs_iface = gs_iface;
2651 bld.pending_end_primitive = FALSE;
2652 bld.bld_base.emit_fetch_funcs[TGSI_FILE_INPUT] = emit_fetch_gs_input;
2653 bld.bld_base.op_actions[TGSI_OPCODE_EMIT].emit = emit_vertex;
2654 bld.bld_base.op_actions[TGSI_OPCODE_ENDPRIM].emit = end_primitive;
2657 lp_exec_mask_init(&bld.exec_mask, &bld.bld_base.base);
2659 bld.system_values = *system_values;
2661 lp_build_tgsi_llvm(&bld.bld_base, tokens);
2664 LLVMBasicBlockRef block = LLVMGetInsertBlock(gallivm->builder);
2665 LLVMValueRef function = LLVMGetBasicBlockParent(block);
2666 debug_printf("11111111111111111111111111111 \n");
2667 tgsi_dump(tokens, 0);
2668 lp_debug_dump_value(function);
2669 debug_printf("2222222222222222222222222222 \n");
2673 LLVMModuleRef module = LLVMGetGlobalParent(
2674 LLVMGetBasicBlockParent(LLVMGetInsertBlock(gallivm->builder)));
2675 LLVMDumpModule(module);