1 // SwiftShader Software Renderer
\r
3 // Copyright(c) 2005-2013 TransGaming Inc.
\r
5 // All rights reserved. No part of this software may be copied, distributed, transmitted,
\r
6 // transcribed, stored in a retrieval system, translated into any human or computer
\r
7 // language by any means, or disclosed to third parties without the explicit written
\r
8 // agreement of TransGaming Inc. Without such an agreement, no rights or licenses, express
\r
9 // or implied, including but not limited to any patent rights, are granted to you.
\r
12 #include "OutputASM.h"
\r
13 #include "Common/Math.hpp"
\r
15 #include "common/debug.h"
\r
16 #include "InfoSink.h"
\r
18 #include "libGLESv2/Shader.h"
\r
20 #include <GLES2/gl2.h>
\r
21 #include <GLES2/gl2ext.h>
\r
22 #include <GLES3/gl3.h>
\r
26 // Integer to TString conversion
\r
30 sprintf(buffer, "%d", i);
\r
34 class Temporary : public TIntermSymbol
\r
37 Temporary(OutputASM *assembler) : TIntermSymbol(TSymbolTableLevel::nextUniqueId(), "tmp", TType(EbtFloat, EbpHigh, EvqTemporary, 4, 1, false)), assembler(assembler)
\r
43 assembler->freeTemporary(this);
\r
47 OutputASM *const assembler;
\r
50 class Constant : public TIntermConstantUnion
\r
53 Constant(float x, float y, float z, float w) : TIntermConstantUnion(constants, TType(EbtFloat, EbpHigh, EvqConstExpr, 4, 1, false))
\r
55 constants[0].setFConst(x);
\r
56 constants[1].setFConst(y);
\r
57 constants[2].setFConst(z);
\r
58 constants[3].setFConst(w);
\r
61 Constant(bool b) : TIntermConstantUnion(constants, TType(EbtBool, EbpHigh, EvqConstExpr, 1, 1, false))
\r
63 constants[0].setBConst(b);
\r
66 Constant(int i) : TIntermConstantUnion(constants, TType(EbtInt, EbpHigh, EvqConstExpr, 1, 1, false))
\r
68 constants[0].setIConst(i);
\r
76 ConstantUnion constants[4];
\r
79 Uniform::Uniform(GLenum type, GLenum precision, const std::string &name, int arraySize, int registerIndex, int blockId, const BlockMemberInfo& blockMemberInfo) :
\r
80 type(type), precision(precision), name(name), arraySize(arraySize), registerIndex(registerIndex), blockId(blockId), blockInfo(blockMemberInfo)
\r
84 UniformBlock::UniformBlock(const std::string& name, unsigned int dataSize, unsigned int arraySize,
\r
85 TLayoutBlockStorage layout, bool isRowMajorLayout, int registerIndex, int blockId) :
\r
86 name(name), dataSize(dataSize), arraySize(arraySize), layout(layout),
\r
87 isRowMajorLayout(isRowMajorLayout), registerIndex(registerIndex), blockId(blockId)
\r
91 BlockLayoutEncoder::BlockLayoutEncoder(bool rowMajor)
\r
92 : mCurrentOffset(0), isRowMajor(rowMajor)
\r
96 BlockMemberInfo BlockLayoutEncoder::encodeType(const TType &type)
\r
101 getBlockLayoutInfo(type, type.getArraySize(), isRowMajor, &arrayStride, &matrixStride);
\r
103 const BlockMemberInfo memberInfo(static_cast<int>(mCurrentOffset * BytesPerComponent),
\r
104 static_cast<int>(arrayStride * BytesPerComponent),
\r
105 static_cast<int>(matrixStride * BytesPerComponent),
\r
108 advanceOffset(type, type.getArraySize(), isRowMajor, arrayStride, matrixStride);
\r
114 size_t BlockLayoutEncoder::getBlockRegister(const BlockMemberInfo &info)
\r
116 return (info.offset / BytesPerComponent) / ComponentsPerRegister;
\r
120 size_t BlockLayoutEncoder::getBlockRegisterElement(const BlockMemberInfo &info)
\r
122 return (info.offset / BytesPerComponent) % ComponentsPerRegister;
\r
125 void BlockLayoutEncoder::nextRegister()
\r
127 mCurrentOffset = sw::align(mCurrentOffset, ComponentsPerRegister);
\r
130 Std140BlockEncoder::Std140BlockEncoder(bool rowMajor) : BlockLayoutEncoder(rowMajor)
\r
134 void Std140BlockEncoder::enterAggregateType()
\r
139 void Std140BlockEncoder::exitAggregateType()
\r
144 void Std140BlockEncoder::getBlockLayoutInfo(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int *arrayStrideOut, int *matrixStrideOut)
\r
146 size_t baseAlignment = 0;
\r
147 int matrixStride = 0;
\r
148 int arrayStride = 0;
\r
150 if(type.isMatrix())
\r
152 baseAlignment = ComponentsPerRegister;
\r
153 matrixStride = ComponentsPerRegister;
\r
157 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
158 arrayStride = ComponentsPerRegister * numRegisters;
\r
161 else if(arraySize > 0)
\r
163 baseAlignment = ComponentsPerRegister;
\r
164 arrayStride = ComponentsPerRegister;
\r
168 const int numComponents = type.getElementSize();
\r
169 baseAlignment = (numComponents == 3 ? 4u : static_cast<size_t>(numComponents));
\r
172 mCurrentOffset = sw::align(mCurrentOffset, baseAlignment);
\r
174 *matrixStrideOut = matrixStride;
\r
175 *arrayStrideOut = arrayStride;
\r
178 void Std140BlockEncoder::advanceOffset(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int arrayStride, int matrixStride)
\r
182 mCurrentOffset += arrayStride * arraySize;
\r
184 else if(type.isMatrix())
\r
186 ASSERT(matrixStride == ComponentsPerRegister);
\r
187 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
188 mCurrentOffset += ComponentsPerRegister * numRegisters;
\r
192 mCurrentOffset += type.getElementSize();
\r
196 Attribute::Attribute()
\r
203 Attribute::Attribute(GLenum type, const std::string &name, int arraySize, int location, int registerIndex)
\r
207 this->arraySize = arraySize;
\r
208 this->location = location;
\r
209 this->registerIndex = registerIndex;
\r
212 sw::PixelShader *Shader::getPixelShader() const
\r
217 sw::VertexShader *Shader::getVertexShader() const
\r
222 OutputASM::TextureFunction::TextureFunction(const TString& nodeName) : method(IMPLICIT), proj(false), offset(false)
\r
224 TString name = TFunction::unmangleName(nodeName);
\r
226 if(name == "texture2D" || name == "textureCube" || name == "texture" || name == "texture3D")
\r
230 else if(name == "texture2DProj" || name == "textureProj")
\r
235 else if(name == "texture2DLod" || name == "textureCubeLod" || name == "textureLod")
\r
239 else if(name == "texture2DProjLod" || name == "textureProjLod")
\r
244 else if(name == "textureSize")
\r
248 else if(name == "textureOffset")
\r
253 else if(name == "textureProjOffset")
\r
259 else if(name == "textureLodOffset")
\r
264 else if(name == "textureProjLodOffset")
\r
270 else if(name == "texelFetch")
\r
274 else if(name == "texelFetchOffset")
\r
279 else if(name == "textureGrad")
\r
283 else if(name == "textureGradOffset")
\r
288 else if(name == "textureProjGrad")
\r
293 else if(name == "textureProjGradOffset")
\r
299 else UNREACHABLE(0);
\r
302 OutputASM::OutputASM(TParseContext &context, Shader *shaderObject) : TIntermTraverser(true, true, true), shaderObject(shaderObject), mContext(context)
\r
310 shader = shaderObject->getShader();
\r
311 pixelShader = shaderObject->getPixelShader();
\r
312 vertexShader = shaderObject->getVertexShader();
\r
315 functionArray.push_back(Function(0, "main(", 0, 0));
\r
316 currentFunction = 0;
\r
317 outputQualifier = EvqOutput; // Set outputQualifier to any value other than EvqFragColor or EvqFragData
\r
320 OutputASM::~OutputASM()
\r
324 void OutputASM::output()
\r
328 emitShader(GLOBAL);
\r
330 if(functionArray.size() > 1) // Only call main() when there are other functions
\r
332 Instruction *callMain = emit(sw::Shader::OPCODE_CALL);
\r
333 callMain->dst.type = sw::Shader::PARAMETER_LABEL;
\r
334 callMain->dst.index = 0; // main()
\r
336 emit(sw::Shader::OPCODE_RET);
\r
339 emitShader(FUNCTION);
\r
343 void OutputASM::emitShader(Scope scope)
\r
346 currentScope = GLOBAL;
\r
347 mContext.getTreeRoot()->traverse(this);
\r
350 void OutputASM::freeTemporary(Temporary *temporary)
\r
352 free(temporaries, temporary);
\r
355 sw::Shader::Opcode OutputASM::getOpcode(sw::Shader::Opcode op, TIntermTyped *in) const
\r
357 TBasicType baseType = in->getType().getBasicType();
\r
361 case sw::Shader::OPCODE_NEG:
\r
366 return sw::Shader::OPCODE_INEG;
\r
371 case sw::Shader::OPCODE_ADD:
\r
376 return sw::Shader::OPCODE_IADD;
\r
381 case sw::Shader::OPCODE_SUB:
\r
386 return sw::Shader::OPCODE_ISUB;
\r
391 case sw::Shader::OPCODE_MUL:
\r
396 return sw::Shader::OPCODE_IMUL;
\r
401 case sw::Shader::OPCODE_DIV:
\r
405 return sw::Shader::OPCODE_IDIV;
\r
407 return sw::Shader::OPCODE_UDIV;
\r
412 case sw::Shader::OPCODE_IMOD:
\r
413 return baseType == EbtUInt ? sw::Shader::OPCODE_UMOD : op;
\r
414 case sw::Shader::OPCODE_ISHR:
\r
415 return baseType == EbtUInt ? sw::Shader::OPCODE_USHR : op;
\r
416 case sw::Shader::OPCODE_MIN:
\r
420 return sw::Shader::OPCODE_IMIN;
\r
422 return sw::Shader::OPCODE_UMIN;
\r
427 case sw::Shader::OPCODE_MAX:
\r
431 return sw::Shader::OPCODE_IMAX;
\r
433 return sw::Shader::OPCODE_UMAX;
\r
443 void OutputASM::visitSymbol(TIntermSymbol *symbol)
\r
445 // Vertex varyings don't have to be actively used to successfully link
\r
446 // against pixel shaders that use them. So make sure they're declared.
\r
447 if(symbol->getQualifier() == EvqVaryingOut || symbol->getQualifier() == EvqInvariantVaryingOut || symbol->getQualifier() == EvqVertexOut)
\r
449 if(symbol->getBasicType() != EbtInvariant) // Typeless declarations are not new varyings
\r
451 declareVarying(symbol, -1);
\r
456 bool OutputASM::visitBinary(Visit visit, TIntermBinary *node)
\r
458 if(currentScope != emitScope)
\r
463 TIntermTyped *result = node;
\r
464 TIntermTyped *left = node->getLeft();
\r
465 TIntermTyped *right = node->getRight();
\r
466 const TType &leftType = left->getType();
\r
467 const TType &rightType = right->getType();
\r
468 const TType &resultType = node->getType();
\r
470 switch(node->getOp())
\r
473 if(visit == PostVisit)
\r
475 assignLvalue(left, right);
\r
476 copy(result, right);
\r
479 case EOpInitialize:
\r
480 if(visit == PostVisit)
\r
485 case EOpMatrixTimesScalarAssign:
\r
486 if(visit == PostVisit)
\r
488 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
490 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
491 mul->dst.index += i;
\r
492 argument(mul->src[0], left, i);
\r
495 assignLvalue(left, result);
\r
498 case EOpVectorTimesMatrixAssign:
\r
499 if(visit == PostVisit)
\r
501 int size = leftType.getNominalSize();
\r
503 for(int i = 0; i < size; i++)
\r
505 Instruction *dot = emit(sw::Shader::OPCODE_DP(size), result, left, right);
\r
506 dot->dst.mask = 1 << i;
\r
507 argument(dot->src[1], right, i);
\r
510 assignLvalue(left, result);
\r
513 case EOpMatrixTimesMatrixAssign:
\r
514 if(visit == PostVisit)
\r
516 int dim = leftType.getNominalSize();
\r
518 for(int i = 0; i < dim; i++)
\r
520 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
521 mul->dst.index += i;
\r
522 argument(mul->src[1], right, i);
\r
523 mul->src[1].swizzle = 0x00;
\r
525 for(int j = 1; j < dim; j++)
\r
527 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, left, right, result);
\r
528 mad->dst.index += i;
\r
529 argument(mad->src[0], left, j);
\r
530 argument(mad->src[1], right, i);
\r
531 mad->src[1].swizzle = j * 0x55;
\r
532 argument(mad->src[2], result, i);
\r
536 assignLvalue(left, result);
\r
539 case EOpIndexDirect:
\r
540 if(visit == PostVisit)
\r
542 int index = right->getAsConstantUnion()->getIConst(0);
\r
544 if(result->isMatrix() || result->isStruct() || result->isInterfaceBlock())
\r
546 ASSERT(left->isArray());
\r
547 copy(result, left, index * left->elementRegisterCount());
\r
549 else if(result->isRegister())
\r
551 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, left);
\r
553 if(left->isRegister())
\r
555 mov->src[0].swizzle = index;
\r
557 else if(left->isArray())
\r
559 argument(mov->src[0], left, index * left->elementRegisterCount());
\r
561 else if(left->isMatrix())
\r
563 ASSERT(index < left->getNominalSize()); // FIXME: Report semantic error
\r
564 argument(mov->src[0], left, index);
\r
566 else UNREACHABLE(0);
\r
568 else UNREACHABLE(0);
\r
571 case EOpIndexIndirect:
\r
572 if(visit == PostVisit)
\r
574 if(left->isArray() || left->isMatrix())
\r
576 for(int index = 0; index < result->totalRegisterCount(); index++)
\r
578 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, left);
\r
579 mov->dst.index += index;
\r
580 mov->dst.mask = writeMask(result, index);
\r
581 argument(mov->src[0], left, index);
\r
583 if(left->totalRegisterCount() > 1)
\r
585 sw::Shader::SourceParameter relativeRegister;
\r
586 argument(relativeRegister, right);
\r
588 mov->src[0].rel.type = relativeRegister.type;
\r
589 mov->src[0].rel.index = relativeRegister.index;
\r
590 mov->src[0].rel.scale = result->totalRegisterCount();
\r
591 mov->src[0].rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
595 else if(left->isRegister())
\r
597 emit(sw::Shader::OPCODE_EXTRACT, result, left, right);
\r
599 else UNREACHABLE(0);
\r
602 case EOpIndexDirectStruct:
\r
603 case EOpIndexDirectInterfaceBlock:
\r
604 if(visit == PostVisit)
\r
606 ASSERT(leftType.isStruct() || (leftType.isInterfaceBlock()));
\r
608 const TFieldList& fields = (node->getOp() == EOpIndexDirectStruct) ?
\r
609 leftType.getStruct()->fields() :
\r
610 leftType.getInterfaceBlock()->fields();
\r
611 int index = right->getAsConstantUnion()->getIConst(0);
\r
612 int fieldOffset = 0;
\r
614 for(int i = 0; i < index; i++)
\r
616 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
619 copy(result, left, fieldOffset);
\r
622 case EOpVectorSwizzle:
\r
623 if(visit == PostVisit)
\r
626 TIntermAggregate *components = right->getAsAggregate();
\r
630 TIntermSequence &sequence = components->getSequence();
\r
633 for(TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++)
\r
635 TIntermConstantUnion *element = (*sit)->getAsConstantUnion();
\r
639 int i = element->getUnionArrayPointer()[0].getIConst();
\r
640 swizzle |= i << (component * 2);
\r
643 else UNREACHABLE(0);
\r
646 else UNREACHABLE(0);
\r
648 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, left);
\r
649 mov->src[0].swizzle = swizzle;
\r
652 case EOpAddAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, left, right); break;
\r
653 case EOpAdd: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, right); break;
\r
654 case EOpSubAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, left, right); break;
\r
655 case EOpSub: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, right); break;
\r
656 case EOpMulAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, left, right); break;
\r
657 case EOpMul: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, right); break;
\r
658 case EOpDivAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, left, right); break;
\r
659 case EOpDiv: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, right); break;
\r
660 case EOpIModAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, left, right); break;
\r
661 case EOpIMod: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, right); break;
\r
662 case EOpBitShiftLeftAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_SHL, result, left, left, right); break;
\r
663 case EOpBitShiftLeft: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_SHL, result, left, right); break;
\r
664 case EOpBitShiftRightAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, left, right); break;
\r
665 case EOpBitShiftRight: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, right); break;
\r
666 case EOpBitwiseAndAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_AND, result, left, left, right); break;
\r
667 case EOpBitwiseAnd: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_AND, result, left, right); break;
\r
668 case EOpBitwiseXorAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_XOR, result, left, left, right); break;
\r
669 case EOpBitwiseXor: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
670 case EOpBitwiseOrAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_OR, result, left, left, right); break;
\r
671 case EOpBitwiseOr: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_OR, result, left, right); break;
\r
673 if(visit == PostVisit)
\r
675 emitBinary(sw::Shader::OPCODE_EQ, result, left, right);
\r
677 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
679 Temporary equal(this);
\r
680 Instruction *eq = emit(sw::Shader::OPCODE_EQ, &equal, left, right);
\r
681 argument(eq->src[0], left, index);
\r
682 argument(eq->src[1], right, index);
\r
683 emit(sw::Shader::OPCODE_AND, result, result, &equal);
\r
688 if(visit == PostVisit)
\r
690 emitBinary(sw::Shader::OPCODE_NE, result, left, right);
\r
692 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
694 Temporary notEqual(this);
\r
695 Instruction *eq = emit(sw::Shader::OPCODE_NE, ¬Equal, left, right);
\r
696 argument(eq->src[0], left, index);
\r
697 argument(eq->src[1], right, index);
\r
698 emit(sw::Shader::OPCODE_OR, result, result, ¬Equal);
\r
702 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, left, right); break;
\r
703 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, left, right); break;
\r
704 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, left, right); break;
\r
705 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, left, right); break;
\r
706 case EOpVectorTimesScalarAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, left, right); break;
\r
707 case EOpVectorTimesScalar: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, right); break;
\r
708 case EOpMatrixTimesScalar:
\r
709 if(visit == PostVisit)
\r
711 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
713 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
714 mul->dst.index += i;
\r
715 argument(mul->src[0], left, i);
\r
719 case EOpVectorTimesMatrix:
\r
720 if(visit == PostVisit)
\r
722 sw::Shader::Opcode dpOpcode = sw::Shader::OPCODE_DP(leftType.getNominalSize());
\r
724 int size = rightType.getNominalSize();
\r
725 for(int i = 0; i < size; i++)
\r
727 Instruction *dot = emit(dpOpcode, result, left, right);
\r
728 dot->dst.mask = 1 << i;
\r
729 argument(dot->src[1], right, i);
\r
733 case EOpMatrixTimesVector:
\r
734 if(visit == PostVisit)
\r
736 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
737 mul->src[1].swizzle = 0x00;
\r
739 int size = rightType.getNominalSize();
\r
740 for(int i = 1; i < size; i++)
\r
742 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, left, right, result);
\r
743 argument(mad->src[0], left, i);
\r
744 mad->src[1].swizzle = i * 0x55;
\r
748 case EOpMatrixTimesMatrix:
\r
749 if(visit == PostVisit)
\r
751 int dim = leftType.getNominalSize();
\r
753 int size = rightType.getNominalSize();
\r
754 for(int i = 0; i < size; i++)
\r
756 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
757 mul->dst.index += i;
\r
758 argument(mul->src[1], right, i);
\r
759 mul->src[1].swizzle = 0x00;
\r
761 for(int j = 1; j < dim; j++)
\r
763 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, left, right, result);
\r
764 mad->dst.index += i;
\r
765 argument(mad->src[0], left, j);
\r
766 argument(mad->src[1], right, i);
\r
767 mad->src[1].swizzle = j * 0x55;
\r
768 argument(mad->src[2], result, i);
\r
774 if(trivial(right, 6))
\r
776 if(visit == PostVisit)
\r
778 emit(sw::Shader::OPCODE_OR, result, left, right);
\r
781 else // Short-circuit evaluation
\r
783 if(visit == InVisit)
\r
785 emit(sw::Shader::OPCODE_MOV, result, left);
\r
786 Instruction *ifnot = emit(sw::Shader::OPCODE_IF, 0, result);
\r
787 ifnot->src[0].modifier = sw::Shader::MODIFIER_NOT;
\r
789 else if(visit == PostVisit)
\r
791 emit(sw::Shader::OPCODE_MOV, result, right);
\r
792 emit(sw::Shader::OPCODE_ENDIF);
\r
796 case EOpLogicalXor: if(visit == PostVisit) emit(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
797 case EOpLogicalAnd:
\r
798 if(trivial(right, 6))
\r
800 if(visit == PostVisit)
\r
802 emit(sw::Shader::OPCODE_AND, result, left, right);
\r
805 else // Short-circuit evaluation
\r
807 if(visit == InVisit)
\r
809 emit(sw::Shader::OPCODE_MOV, result, left);
\r
810 emit(sw::Shader::OPCODE_IF, 0, result);
\r
812 else if(visit == PostVisit)
\r
814 emit(sw::Shader::OPCODE_MOV, result, right);
\r
815 emit(sw::Shader::OPCODE_ENDIF);
\r
819 default: UNREACHABLE(node->getOp());
\r
825 void OutputASM::emitDeterminant(TIntermTyped *result, TIntermTyped *arg, int size, int col, int row, int outCol, int outRow)
\r
829 case 1: // Used for cofactor computation only
\r
831 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
832 bool isMov = (row == col);
\r
833 sw::Shader::Opcode op = isMov ? sw::Shader::OPCODE_MOV : sw::Shader::OPCODE_NEG;
\r
834 Instruction *mov = emit(op, result, arg);
\r
835 mov->src[0].index += isMov ? 1 - row : row;
\r
836 mov->src[0].swizzle = 0x55 * (isMov ? 1 - col : col);
\r
837 mov->dst.index += outCol;
\r
838 mov->dst.mask = 1 << outRow;
\r
843 static const unsigned int swizzle[3] = { 0x99, 0x88, 0x44 }; // xy?? : yzyz, xzxz, xyxy
\r
845 bool isCofactor = (col >= 0) && (row >= 0);
\r
846 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
847 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
848 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
850 Instruction *det = emit(sw::Shader::OPCODE_DET2, result, arg, arg);
\r
851 det->src[0].index += negate ? col1 : col0;
\r
852 det->src[1].index += negate ? col0 : col1;
\r
853 det->src[0].swizzle = det->src[1].swizzle = swizzle[isCofactor ? row : 2];
\r
854 det->dst.index += outCol;
\r
855 det->dst.mask = 1 << outRow;
\r
860 static const unsigned int swizzle[4] = { 0xF9, 0xF8, 0xF4, 0xE4 }; // xyz? : yzww, xzww, xyww, xyzw
\r
862 bool isCofactor = (col >= 0) && (row >= 0);
\r
863 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
864 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
865 int col2 = (isCofactor && (col <= 2)) ? 3 : 2;
\r
866 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
868 Instruction *det = emit(sw::Shader::OPCODE_DET3, result, arg, arg, arg);
\r
869 det->src[0].index += col0;
\r
870 det->src[1].index += negate ? col2 : col1;
\r
871 det->src[2].index += negate ? col1 : col2;
\r
872 det->src[0].swizzle = det->src[1].swizzle = det->src[2].swizzle = swizzle[isCofactor ? row : 3];
\r
873 det->dst.index += outCol;
\r
874 det->dst.mask = 1 << outRow;
\r
879 Instruction *det = emit(sw::Shader::OPCODE_DET4, result, arg, arg, arg, arg);
\r
880 det->src[1].index += 1;
\r
881 det->src[2].index += 2;
\r
882 det->src[3].index += 3;
\r
883 det->dst.index += outCol;
\r
884 det->dst.mask = 1 << outRow;
\r
893 bool OutputASM::visitUnary(Visit visit, TIntermUnary *node)
\r
895 if(currentScope != emitScope)
\r
900 TIntermTyped *result = node;
\r
901 TIntermTyped *arg = node->getOperand();
\r
902 TBasicType basicType = arg->getType().getBasicType();
\r
910 if(basicType == EbtInt || basicType == EbtUInt)
\r
916 one_value.f = 1.0f;
\r
919 Constant one(one_value.f, one_value.f, one_value.f, one_value.f);
\r
920 Constant rad(1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f);
\r
921 Constant deg(5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f);
\r
923 switch(node->getOp())
\r
926 if(visit == PostVisit)
\r
928 sw::Shader::Opcode negOpcode = getOpcode(sw::Shader::OPCODE_NEG, arg);
\r
929 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
931 Instruction *neg = emit(negOpcode, result, arg);
\r
932 neg->dst.index += index;
\r
933 argument(neg->src[0], arg, index);
\r
937 case EOpVectorLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
938 case EOpLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
939 case EOpPostIncrement:
\r
940 if(visit == PostVisit)
\r
944 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
945 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
947 Instruction *add = emit(addOpcode, arg, arg, &one);
\r
948 add->dst.index += index;
\r
949 argument(add->src[0], arg, index);
\r
952 assignLvalue(arg, arg);
\r
955 case EOpPostDecrement:
\r
956 if(visit == PostVisit)
\r
960 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
961 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
963 Instruction *sub = emit(subOpcode, arg, arg, &one);
\r
964 sub->dst.index += index;
\r
965 argument(sub->src[0], arg, index);
\r
968 assignLvalue(arg, arg);
\r
971 case EOpPreIncrement:
\r
972 if(visit == PostVisit)
\r
974 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
975 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
977 Instruction *add = emit(addOpcode, result, arg, &one);
\r
978 add->dst.index += index;
\r
979 argument(add->src[0], arg, index);
\r
982 assignLvalue(arg, result);
\r
985 case EOpPreDecrement:
\r
986 if(visit == PostVisit)
\r
988 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
989 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
991 Instruction *sub = emit(subOpcode, result, arg, &one);
\r
992 sub->dst.index += index;
\r
993 argument(sub->src[0], arg, index);
\r
996 assignLvalue(arg, result);
\r
999 case EOpRadians: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, &rad); break;
\r
1000 case EOpDegrees: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, °); break;
\r
1001 case EOpSin: if(visit == PostVisit) emit(sw::Shader::OPCODE_SIN, result, arg); break;
\r
1002 case EOpCos: if(visit == PostVisit) emit(sw::Shader::OPCODE_COS, result, arg); break;
\r
1003 case EOpTan: if(visit == PostVisit) emit(sw::Shader::OPCODE_TAN, result, arg); break;
\r
1004 case EOpAsin: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASIN, result, arg); break;
\r
1005 case EOpAcos: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOS, result, arg); break;
\r
1006 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN, result, arg); break;
\r
1007 case EOpSinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_SINH, result, arg); break;
\r
1008 case EOpCosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_COSH, result, arg); break;
\r
1009 case EOpTanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_TANH, result, arg); break;
\r
1010 case EOpAsinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASINH, result, arg); break;
\r
1011 case EOpAcosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOSH, result, arg); break;
\r
1012 case EOpAtanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATANH, result, arg); break;
\r
1013 case EOpExp: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP, result, arg); break;
\r
1014 case EOpLog: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG, result, arg); break;
\r
1015 case EOpExp2: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP2, result, arg); break;
\r
1016 case EOpLog2: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG2, result, arg); break;
\r
1017 case EOpSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_SQRT, result, arg); break;
\r
1018 case EOpInverseSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_RSQ, result, arg); break;
\r
1019 case EOpAbs: if(visit == PostVisit) emit(sw::Shader::OPCODE_ABS, result, arg); break;
\r
1020 case EOpSign: if(visit == PostVisit) emit(sw::Shader::OPCODE_SGN, result, arg); break;
\r
1021 case EOpFloor: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOOR, result, arg); break;
\r
1022 case EOpTrunc: if(visit == PostVisit) emit(sw::Shader::OPCODE_TRUNC, result, arg); break;
\r
1023 case EOpRound: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUND, result, arg); break;
\r
1024 case EOpRoundEven: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUNDEVEN, result, arg); break;
\r
1025 case EOpCeil: if(visit == PostVisit) emit(sw::Shader::OPCODE_CEIL, result, arg, result); break;
\r
1026 case EOpFract: if(visit == PostVisit) emit(sw::Shader::OPCODE_FRC, result, arg); break;
\r
1027 case EOpIsNan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISNAN, result, arg); break;
\r
1028 case EOpIsInf: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISINF, result, arg); break;
\r
1029 case EOpLength: if(visit == PostVisit) emit(sw::Shader::OPCODE_LEN(dim(arg)), result, arg); break;
\r
1030 case EOpNormalize: if(visit == PostVisit) emit(sw::Shader::OPCODE_NRM(dim(arg)), result, arg); break;
\r
1031 case EOpDFdx: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDX, result, arg); break;
\r
1032 case EOpDFdy: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDY, result, arg); break;
\r
1033 case EOpFwidth: if(visit == PostVisit) emit(sw::Shader::OPCODE_FWIDTH, result, arg); break;
\r
1034 case EOpAny: if(visit == PostVisit) emit(sw::Shader::OPCODE_ANY, result, arg); break;
\r
1035 case EOpAll: if(visit == PostVisit) emit(sw::Shader::OPCODE_ALL, result, arg); break;
\r
1036 case EOpFloatBitsToInt: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOINT, result, arg); break;
\r
1037 case EOpFloatBitsToUint: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOUINT, result, arg); break;
\r
1038 case EOpIntBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_INTBITSTOFLOAT, result, arg); break;
\r
1039 case EOpUintBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_UINTBITSTOFLOAT, result, arg); break;
\r
1040 case EOpPackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKSNORM2x16, result, arg); break;
\r
1041 case EOpPackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKUNORM2x16, result, arg); break;
\r
1042 case EOpPackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKHALF2x16, result, arg); break;
\r
1043 case EOpUnpackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKSNORM2x16, result, arg); break;
\r
1044 case EOpUnpackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKUNORM2x16, result, arg); break;
\r
1045 case EOpUnpackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKHALF2x16, result, arg); break;
\r
1046 case EOpTranspose:
\r
1047 if(visit == PostVisit)
\r
1049 int numCols = arg->getNominalSize();
\r
1050 int numRows = arg->getSecondarySize();
\r
1051 for(int i = 0; i < numCols; ++i)
\r
1053 for(int j = 0; j < numRows; ++j)
\r
1055 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, arg);
\r
1056 mov->src[0].index += i;
\r
1057 mov->src[0].swizzle = 0x55 * j;
\r
1058 mov->dst.index += j;
\r
1059 mov->dst.mask = 1 << i;
\r
1064 case EOpDeterminant:
\r
1065 if(visit == PostVisit)
\r
1067 int size = arg->getNominalSize();
\r
1068 ASSERT(size == arg->getSecondarySize());
\r
1070 emitDeterminant(result, arg, size);
\r
1074 if(visit == PostVisit)
\r
1076 int size = arg->getNominalSize();
\r
1077 ASSERT(size == arg->getSecondarySize());
\r
1079 // Compute transposed matrix of cofactors
\r
1080 for(int i = 0; i < size; ++i)
\r
1082 for(int j = 0; j < size; ++j)
\r
1084 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
1085 // For a 3x3 or 4x4 matrix, the cofactor is a transposed determinant
\r
1086 emitDeterminant(result, arg, size - 1, j, i, i, j);
\r
1090 // Compute 1 / determinant
\r
1091 Temporary invDet(this);
\r
1092 emitDeterminant(&invDet, arg, size);
\r
1093 Constant one(1.0f, 1.0f, 1.0f, 1.0f);
\r
1094 Instruction *div = emit(sw::Shader::OPCODE_DIV, &invDet, &one, &invDet);
\r
1095 div->src[1].swizzle = 0x00; // xxxx
\r
1097 // Divide transposed matrix of cofactors by determinant
\r
1098 for(int i = 0; i < size; ++i)
\r
1100 Instruction *div = emit(sw::Shader::OPCODE_MUL, result, result, &invDet);
\r
1101 div->src[0].index += i;
\r
1102 div->dst.index += i;
\r
1106 default: UNREACHABLE(node->getOp());
\r
1112 bool OutputASM::visitAggregate(Visit visit, TIntermAggregate *node)
\r
1114 if(currentScope != emitScope && node->getOp() != EOpFunction && node->getOp() != EOpSequence)
\r
1119 Constant zero(0.0f, 0.0f, 0.0f, 0.0f);
\r
1121 TIntermTyped *result = node;
\r
1122 const TType &resultType = node->getType();
\r
1123 TIntermSequence &arg = node->getSequence();
\r
1124 int argumentCount = arg.size();
\r
1126 switch(node->getOp())
\r
1128 case EOpSequence: break;
\r
1129 case EOpDeclaration: break;
\r
1130 case EOpPrototype: break;
\r
1132 if(visit == PostVisit)
\r
1134 copy(result, arg[1]);
\r
1138 if(visit == PreVisit)
\r
1140 const TString &name = node->getName();
\r
1142 if(emitScope == FUNCTION)
\r
1144 if(functionArray.size() > 1) // No need for a label when there's only main()
\r
1146 Instruction *label = emit(sw::Shader::OPCODE_LABEL);
\r
1147 label->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1149 const Function *function = findFunction(name);
\r
1150 ASSERT(function); // Should have been added during global pass
\r
1151 label->dst.index = function->label;
\r
1152 currentFunction = function->label;
\r
1155 else if(emitScope == GLOBAL)
\r
1157 if(name != "main(")
\r
1159 TIntermSequence &arguments = node->getSequence()[0]->getAsAggregate()->getSequence();
\r
1160 functionArray.push_back(Function(functionArray.size(), name, &arguments, node));
\r
1163 else UNREACHABLE(emitScope);
\r
1165 currentScope = FUNCTION;
\r
1167 else if(visit == PostVisit)
\r
1169 if(emitScope == FUNCTION)
\r
1171 if(functionArray.size() > 1) // No need to return when there's only main()
\r
1173 emit(sw::Shader::OPCODE_RET);
\r
1177 currentScope = GLOBAL;
\r
1180 case EOpFunctionCall:
\r
1181 if(visit == PostVisit)
\r
1183 if(node->isUserDefined())
\r
1185 const TString &name = node->getName();
\r
1186 const Function *function = findFunction(name);
\r
1190 mContext.error(node->getLine(), "function definition not found", name.c_str());
\r
1194 TIntermSequence &arguments = *function->arg;
\r
1196 for(int i = 0; i < argumentCount; i++)
\r
1198 TIntermTyped *in = arguments[i]->getAsTyped();
\r
1200 if(in->getQualifier() == EvqIn ||
\r
1201 in->getQualifier() == EvqInOut ||
\r
1202 in->getQualifier() == EvqConstReadOnly)
\r
1208 Instruction *call = emit(sw::Shader::OPCODE_CALL);
\r
1209 call->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1210 call->dst.index = function->label;
\r
1212 if(function->ret && function->ret->getType().getBasicType() != EbtVoid)
\r
1214 copy(result, function->ret);
\r
1217 for(int i = 0; i < argumentCount; i++)
\r
1219 TIntermTyped *argument = arguments[i]->getAsTyped();
\r
1220 TIntermTyped *out = arg[i]->getAsTyped();
\r
1222 if(argument->getQualifier() == EvqOut ||
\r
1223 argument->getQualifier() == EvqInOut)
\r
1225 copy(out, argument);
\r
1231 const TextureFunction textureFunction(node->getName());
\r
1232 switch(textureFunction.method)
\r
1234 case TextureFunction::IMPLICIT:
\r
1236 TIntermTyped *t = arg[1]->getAsTyped();
\r
1238 TIntermNode* offset = textureFunction.offset ? arg[2] : 0;
\r
1240 if(argumentCount == 2 || (textureFunction.offset && argumentCount == 3))
\r
1242 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1243 result, arg[1], arg[0], offset);
\r
1244 if(textureFunction.proj)
\r
1246 tex->project = true;
\r
1248 switch(t->getNominalSize())
\r
1250 case 2: tex->src[0].swizzle = 0x54; break; // xyyy
\r
1251 case 3: tex->src[0].swizzle = 0xA4; break; // xyzz
\r
1252 case 4: break; // xyzw
\r
1254 UNREACHABLE(t->getNominalSize());
\r
1259 else if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4)) // bias
\r
1261 Temporary proj(this);
\r
1262 if(textureFunction.proj)
\r
1264 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1265 div->dst.mask = 0x3;
\r
1267 switch(t->getNominalSize())
\r
1272 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1275 UNREACHABLE(t->getNominalSize());
\r
1281 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1284 Instruction *bias = emit(sw::Shader::OPCODE_MOV, &proj, arg[textureFunction.offset ? 3 : 2]);
\r
1285 bias->dst.mask = 0x8;
\r
1287 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1288 result, &proj, arg[0], offset); // FIXME: Implement an efficient TEXLDB instruction
\r
1291 else UNREACHABLE(argumentCount);
\r
1294 case TextureFunction::LOD:
\r
1296 TIntermTyped *t = arg[1]->getAsTyped();
\r
1297 Temporary proj(this);
\r
1299 if(textureFunction.proj)
\r
1301 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1302 div->dst.mask = 0x3;
\r
1304 switch(t->getNominalSize())
\r
1309 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1312 UNREACHABLE(t->getNominalSize());
\r
1318 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1321 Instruction *lod = emit(sw::Shader::OPCODE_MOV, &proj, arg[2]);
\r
1322 lod->dst.mask = 0x8;
\r
1324 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXLDLOFFSET : sw::Shader::OPCODE_TEXLDL,
\r
1325 result, &proj, arg[0], textureFunction.offset ? arg[3] : 0);
\r
1328 case TextureFunction::FETCH:
\r
1330 TIntermTyped *t = arg[1]->getAsTyped();
\r
1332 if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4))
\r
1334 TIntermNode* offset = textureFunction.offset ? arg[3] : 0;
\r
1336 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXELFETCHOFFSET : sw::Shader::OPCODE_TEXELFETCH,
\r
1337 result, arg[1], arg[0], arg[2], offset);
\r
1339 else UNREACHABLE(argumentCount);
\r
1342 case TextureFunction::GRAD:
\r
1344 TIntermTyped *t = arg[1]->getAsTyped();
\r
1346 if(argumentCount == 4 || (textureFunction.offset && argumentCount == 5))
\r
1348 Temporary uvwb(this);
\r
1350 if(textureFunction.proj)
\r
1352 Instruction *div = emit(sw::Shader::OPCODE_DIV, &uvwb, arg[1], arg[1]);
\r
1353 div->dst.mask = 0x3;
\r
1355 switch(t->getNominalSize())
\r
1360 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1363 UNREACHABLE(t->getNominalSize());
\r
1369 emit(sw::Shader::OPCODE_MOV, &uvwb, arg[1]);
\r
1372 TIntermNode* offset = textureFunction.offset ? arg[4] : 0;
\r
1374 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXGRADOFFSET : sw::Shader::OPCODE_TEXGRAD,
\r
1375 result, &uvwb, arg[0], arg[2], arg[3], offset);
\r
1377 else UNREACHABLE(argumentCount);
\r
1380 case TextureFunction::SIZE:
\r
1381 emit(sw::Shader::OPCODE_TEXSIZE, result, arg[1], arg[0]);
\r
1384 UNREACHABLE(textureFunction.method);
\r
1389 case EOpParameters:
\r
1391 case EOpConstructFloat:
\r
1392 case EOpConstructVec2:
\r
1393 case EOpConstructVec3:
\r
1394 case EOpConstructVec4:
\r
1395 case EOpConstructBool:
\r
1396 case EOpConstructBVec2:
\r
1397 case EOpConstructBVec3:
\r
1398 case EOpConstructBVec4:
\r
1399 case EOpConstructInt:
\r
1400 case EOpConstructIVec2:
\r
1401 case EOpConstructIVec3:
\r
1402 case EOpConstructIVec4:
\r
1403 case EOpConstructUInt:
\r
1404 case EOpConstructUVec2:
\r
1405 case EOpConstructUVec3:
\r
1406 case EOpConstructUVec4:
\r
1407 if(visit == PostVisit)
\r
1409 int component = 0;
\r
1411 for(int i = 0; i < argumentCount; i++)
\r
1413 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1414 int size = argi->getNominalSize();
\r
1416 if(!argi->isMatrix())
\r
1418 Instruction *mov = emitCast(result, argi);
\r
1419 mov->dst.mask = (0xF << component) & 0xF;
\r
1420 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1422 component += size;
\r
1428 while(component < resultType.getNominalSize())
\r
1430 Instruction *mov = emitCast(result, argi);
\r
1431 mov->dst.mask = (0xF << component) & 0xF;
\r
1432 mov->src[0].index += column;
\r
1433 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1436 component += size;
\r
1442 case EOpConstructMat2:
\r
1443 case EOpConstructMat2x3:
\r
1444 case EOpConstructMat2x4:
\r
1445 case EOpConstructMat3x2:
\r
1446 case EOpConstructMat3:
\r
1447 case EOpConstructMat3x4:
\r
1448 case EOpConstructMat4x2:
\r
1449 case EOpConstructMat4x3:
\r
1450 case EOpConstructMat4:
\r
1451 if(visit == PostVisit)
\r
1453 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1454 const int outCols = result->getNominalSize();
\r
1455 const int outRows = result->getSecondarySize();
\r
1457 if(arg0->isScalar() && arg.size() == 1) // Construct scale matrix
\r
1459 for(int i = 0; i < outCols; i++)
\r
1461 Instruction *init = emit(sw::Shader::OPCODE_MOV, result, &zero);
\r
1462 init->dst.index += i;
\r
1463 Instruction *mov = emitCast(result, arg0);
\r
1464 mov->dst.index += i;
\r
1465 mov->dst.mask = 1 << i;
\r
1466 ASSERT(mov->src[0].swizzle == 0x00);
\r
1469 else if(arg0->isMatrix())
\r
1471 const int inCols = arg0->getNominalSize();
\r
1472 const int inRows = arg0->getSecondarySize();
\r
1474 for(int i = 0; i < outCols; i++)
\r
1476 if(i >= inCols || outRows > inRows)
\r
1478 // Initialize to identity matrix
\r
1479 Constant col((i == 0 ? 1.0f : 0.0f), (i == 1 ? 1.0f : 0.0f), (i == 2 ? 1.0f : 0.0f), (i == 3 ? 1.0f : 0.0f));
\r
1480 Instruction *mov = emitCast(result, &col);
\r
1481 mov->dst.index += i;
\r
1486 Instruction *mov = emitCast(result, arg0);
\r
1487 mov->dst.index += i;
\r
1488 mov->dst.mask = 0xF >> (4 - inRows);
\r
1489 argument(mov->src[0], arg0, i);
\r
1498 for(int i = 0; i < argumentCount; i++)
\r
1500 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1501 int size = argi->getNominalSize();
\r
1504 while(element < size)
\r
1506 Instruction *mov = emitCast(result, argi);
\r
1507 mov->dst.index += column;
\r
1508 mov->dst.mask = (0xF << row) & 0xF;
\r
1509 mov->src[0].swizzle = (readSwizzle(argi, size) << (row * 2)) + 0x55 * element;
\r
1511 int end = row + size - element;
\r
1512 column = end >= outRows ? column + 1 : column;
\r
1513 element = element + outRows - row;
\r
1514 row = end >= outRows ? 0 : end;
\r
1520 case EOpConstructStruct:
\r
1521 if(visit == PostVisit)
\r
1524 for(int i = 0; i < argumentCount; i++)
\r
1526 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1527 int size = argi->totalRegisterCount();
\r
1529 for(int index = 0; index < size; index++)
\r
1531 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, argi);
\r
1532 mov->dst.index += index + offset;
\r
1533 mov->dst.mask = writeMask(result, offset + index);
\r
1534 argument(mov->src[0], argi, index);
\r
1541 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, arg[0], arg[1]); break;
\r
1542 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, arg[0], arg[1]); break;
\r
1543 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, arg[0], arg[1]); break;
\r
1544 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, arg[0], arg[1]); break;
\r
1545 case EOpVectorEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_EQ, result, arg[0], arg[1]); break;
\r
1546 case EOpVectorNotEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_NE, result, arg[0], arg[1]); break;
\r
1547 case EOpMod: if(visit == PostVisit) emit(sw::Shader::OPCODE_MOD, result, arg[0], arg[1]); break;
\r
1548 case EOpPow: if(visit == PostVisit) emit(sw::Shader::OPCODE_POW, result, arg[0], arg[1]); break;
\r
1549 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN2, result, arg[0], arg[1]); break;
\r
1550 case EOpMin: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, arg[0], arg[1]); break;
\r
1551 case EOpMax: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]); break;
\r
1553 if(visit == PostVisit)
\r
1555 emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]);
\r
1556 emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, result, arg[2]);
\r
1559 case EOpMix: if(visit == PostVisit) emit(sw::Shader::OPCODE_LRP, result, arg[2], arg[1], arg[0]); break;
\r
1560 case EOpStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_STEP, result, arg[0], arg[1]); break;
\r
1561 case EOpSmoothStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_SMOOTH, result, arg[0], arg[1], arg[2]); break;
\r
1562 case EOpDistance: if(visit == PostVisit) emit(sw::Shader::OPCODE_DIST(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1563 case EOpDot: if(visit == PostVisit) emit(sw::Shader::OPCODE_DP(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1564 case EOpCross: if(visit == PostVisit) emit(sw::Shader::OPCODE_CRS, result, arg[0], arg[1]); break;
\r
1565 case EOpFaceForward: if(visit == PostVisit) emit(sw::Shader::OPCODE_FORWARD(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1566 case EOpReflect: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFLECT(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1567 case EOpRefract: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFRACT(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1569 if(visit == PostVisit)
\r
1571 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1572 TIntermTyped *arg1 = arg[1]->getAsTyped();
\r
1573 ASSERT((arg0->getNominalSize() == arg1->getNominalSize()) && (arg0->getSecondarySize() == arg1->getSecondarySize()));
\r
1575 int size = arg0->getNominalSize();
\r
1576 for(int i = 0; i < size; i++)
\r
1578 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, arg[0], arg[1]);
\r
1579 mul->dst.index += i;
\r
1580 argument(mul->src[0], arg[0], i);
\r
1581 argument(mul->src[1], arg[1], i);
\r
1585 case EOpOuterProduct:
\r
1586 if(visit == PostVisit)
\r
1588 for(int i = 0; i < dim(arg[1]); i++)
\r
1590 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, arg[0], arg[1]);
\r
1591 mul->dst.index += i;
\r
1592 mul->src[1].swizzle = 0x55 * i;
\r
1596 default: UNREACHABLE(node->getOp());
\r
1602 bool OutputASM::visitSelection(Visit visit, TIntermSelection *node)
\r
1604 if(currentScope != emitScope)
\r
1609 TIntermTyped *condition = node->getCondition();
\r
1610 TIntermNode *trueBlock = node->getTrueBlock();
\r
1611 TIntermNode *falseBlock = node->getFalseBlock();
\r
1612 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
1614 condition->traverse(this);
\r
1616 if(node->usesTernaryOperator())
\r
1618 if(constantCondition)
\r
1620 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1624 trueBlock->traverse(this);
\r
1625 copy(node, trueBlock);
\r
1629 falseBlock->traverse(this);
\r
1630 copy(node, falseBlock);
\r
1633 else if(trivial(node, 6)) // Fast to compute both potential results and no side effects
\r
1635 trueBlock->traverse(this);
\r
1636 falseBlock->traverse(this);
\r
1637 emit(sw::Shader::OPCODE_SELECT, node, condition, trueBlock, falseBlock);
\r
1641 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1645 trueBlock->traverse(this);
\r
1646 copy(node, trueBlock);
\r
1651 emit(sw::Shader::OPCODE_ELSE);
\r
1652 falseBlock->traverse(this);
\r
1653 copy(node, falseBlock);
\r
1656 emit(sw::Shader::OPCODE_ENDIF);
\r
1659 else // if/else statement
\r
1661 if(constantCondition)
\r
1663 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1669 trueBlock->traverse(this);
\r
1676 falseBlock->traverse(this);
\r
1682 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1686 trueBlock->traverse(this);
\r
1691 emit(sw::Shader::OPCODE_ELSE);
\r
1692 falseBlock->traverse(this);
\r
1695 emit(sw::Shader::OPCODE_ENDIF);
\r
1702 bool OutputASM::visitLoop(Visit visit, TIntermLoop *node)
\r
1704 if(currentScope != emitScope)
\r
1709 unsigned int iterations = loopCount(node);
\r
1711 if(iterations == 0)
\r
1716 bool unroll = (iterations <= 4);
\r
1720 DetectLoopDiscontinuity detectLoopDiscontinuity;
\r
1721 unroll = !detectLoopDiscontinuity.traverse(node);
\r
1724 TIntermNode *init = node->getInit();
\r
1725 TIntermTyped *condition = node->getCondition();
\r
1726 TIntermTyped *expression = node->getExpression();
\r
1727 TIntermNode *body = node->getBody();
\r
1729 if(node->getType() == ELoopDoWhile)
\r
1731 Temporary iterate(this);
\r
1732 Constant True(true);
\r
1733 emit(sw::Shader::OPCODE_MOV, &iterate, &True);
\r
1735 emit(sw::Shader::OPCODE_WHILE, 0, &iterate); // FIXME: Implement real do-while
\r
1739 body->traverse(this);
\r
1742 emit(sw::Shader::OPCODE_TEST);
\r
1744 condition->traverse(this);
\r
1745 emit(sw::Shader::OPCODE_MOV, &iterate, condition);
\r
1747 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1753 init->traverse(this);
\r
1758 for(unsigned int i = 0; i < iterations; i++)
\r
1760 // condition->traverse(this); // Condition could contain statements, but not in an unrollable loop
\r
1764 body->traverse(this);
\r
1769 expression->traverse(this);
\r
1777 condition->traverse(this);
\r
1780 emit(sw::Shader::OPCODE_WHILE, 0, condition);
\r
1784 body->traverse(this);
\r
1787 emit(sw::Shader::OPCODE_TEST);
\r
1791 expression->traverse(this);
\r
1796 condition->traverse(this);
\r
1799 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1806 bool OutputASM::visitBranch(Visit visit, TIntermBranch *node)
\r
1808 if(currentScope != emitScope)
\r
1813 switch(node->getFlowOp())
\r
1815 case EOpKill: if(visit == PostVisit) emit(sw::Shader::OPCODE_DISCARD); break;
\r
1816 case EOpBreak: if(visit == PostVisit) emit(sw::Shader::OPCODE_BREAK); break;
\r
1817 case EOpContinue: if(visit == PostVisit) emit(sw::Shader::OPCODE_CONTINUE); break;
\r
1819 if(visit == PostVisit)
\r
1821 TIntermTyped *value = node->getExpression();
\r
1825 copy(functionArray[currentFunction].ret, value);
\r
1828 emit(sw::Shader::OPCODE_LEAVE);
\r
1831 default: UNREACHABLE(node->getFlowOp());
\r
1837 bool OutputASM::isSamplerRegister(TIntermTyped *operand)
\r
1839 return operand && isSamplerRegister(operand->getType());
\r
1842 bool OutputASM::isSamplerRegister(const TType &type)
\r
1844 // A sampler register's qualifiers can be:
\r
1845 // - EvqUniform: The sampler uniform is used as is in the code (default case).
\r
1846 // - EvqTemporary: The sampler is indexed. It's still a sampler register.
\r
1847 // - EvqIn (and other similar types): The sampler has been passed as a function argument. At this point,
\r
1848 // the sampler has been copied and is no longer a sampler register.
\r
1849 return IsSampler(type.getBasicType()) && (type.getQualifier() == EvqUniform || type.getQualifier() == EvqTemporary);
\r
1852 Instruction *OutputASM::emit(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2, TIntermNode *src3, TIntermNode *src4, int index)
\r
1854 if(isSamplerRegister(dst))
\r
1856 op = sw::Shader::OPCODE_NULL; // Can't assign to a sampler, but this is hit when indexing sampler arrays
\r
1859 Instruction *instruction = new Instruction(op);
\r
1863 instruction->dst.type = registerType(dst);
\r
1864 instruction->dst.index = registerIndex(dst) + index;
\r
1865 instruction->dst.mask = writeMask(dst);
\r
1866 instruction->dst.integer = (dst->getBasicType() == EbtInt);
\r
1869 argument(instruction->src[0], src0, index);
\r
1870 argument(instruction->src[1], src1, index);
\r
1871 argument(instruction->src[2], src2, index);
\r
1872 argument(instruction->src[3], src3, index);
\r
1873 argument(instruction->src[4], src4, index);
\r
1875 shader->append(instruction);
\r
1877 return instruction;
\r
1880 Instruction *OutputASM::emitCast(TIntermTyped *dst, TIntermTyped *src)
\r
1882 switch(src->getBasicType())
\r
1885 switch(dst->getBasicType())
\r
1887 case EbtInt: return emit(sw::Shader::OPCODE_B2I, dst, src);
\r
1888 case EbtUInt: return emit(sw::Shader::OPCODE_B2U, dst, src);
\r
1889 case EbtFloat: return emit(sw::Shader::OPCODE_B2F, dst, src);
\r
1894 switch(dst->getBasicType())
\r
1896 case EbtBool: return emit(sw::Shader::OPCODE_I2B, dst, src);
\r
1897 case EbtFloat: return emit(sw::Shader::OPCODE_I2F, dst, src);
\r
1902 switch(dst->getBasicType())
\r
1904 case EbtBool: return emit(sw::Shader::OPCODE_U2B, dst, src);
\r
1905 case EbtFloat: return emit(sw::Shader::OPCODE_U2F, dst, src);
\r
1910 switch(dst->getBasicType())
\r
1912 case EbtBool: return emit(sw::Shader::OPCODE_F2B, dst, src);
\r
1913 case EbtInt: return emit(sw::Shader::OPCODE_F2I, dst, src);
\r
1914 case EbtUInt: return emit(sw::Shader::OPCODE_F2U, dst, src);
\r
1922 return emit(sw::Shader::OPCODE_MOV, dst, src);
\r
1925 void OutputASM::emitBinary(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2)
\r
1927 for(int index = 0; index < dst->elementRegisterCount(); index++)
\r
1929 emit(op, dst, src0, src1, src2, 0, 0, index);
\r
1933 void OutputASM::emitAssign(sw::Shader::Opcode op, TIntermTyped *result, TIntermTyped *lhs, TIntermTyped *src0, TIntermTyped *src1)
\r
1935 emitBinary(op, result, src0, src1);
\r
1936 assignLvalue(lhs, result);
\r
1939 void OutputASM::emitCmp(sw::Shader::Control cmpOp, TIntermTyped *dst, TIntermNode *left, TIntermNode *right, int index)
\r
1941 sw::Shader::Opcode opcode;
\r
1942 switch(left->getAsTyped()->getBasicType())
\r
1946 opcode = sw::Shader::OPCODE_ICMP;
\r
1949 opcode = sw::Shader::OPCODE_UCMP;
\r
1952 opcode = sw::Shader::OPCODE_CMP;
\r
1956 Instruction *cmp = emit(opcode, dst, left, right);
\r
1957 cmp->control = cmpOp;
\r
1958 argument(cmp->src[0], left, index);
\r
1959 argument(cmp->src[1], right, index);
\r
1962 int componentCount(const TType &type, int registers)
\r
1964 if(registers == 0)
\r
1969 if(type.isArray() && registers >= type.elementRegisterCount())
\r
1971 int index = registers / type.elementRegisterCount();
\r
1972 registers -= index * type.elementRegisterCount();
\r
1973 return index * type.getElementSize() + componentCount(type, registers);
\r
1976 if(type.isStruct() || type.isInterfaceBlock())
\r
1978 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
1981 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
1983 const TType &fieldType = *((*field)->type());
\r
1985 if(fieldType.totalRegisterCount() <= registers)
\r
1987 registers -= fieldType.totalRegisterCount();
\r
1988 elements += fieldType.getObjectSize();
\r
1990 else // Register within this field
\r
1992 return elements + componentCount(fieldType, registers);
\r
1996 else if(type.isMatrix())
\r
1998 return registers * type.registerSize();
\r
2005 int registerSize(const TType &type, int registers)
\r
2007 if(registers == 0)
\r
2009 if(type.isStruct())
\r
2011 return registerSize(*((*(type.getStruct()->fields().begin()))->type()), 0);
\r
2014 return type.registerSize();
\r
2017 if(type.isArray() && registers >= type.elementRegisterCount())
\r
2019 int index = registers / type.elementRegisterCount();
\r
2020 registers -= index * type.elementRegisterCount();
\r
2021 return registerSize(type, registers);
\r
2024 if(type.isStruct() || type.isInterfaceBlock())
\r
2026 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
2029 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
2031 const TType &fieldType = *((*field)->type());
\r
2033 if(fieldType.totalRegisterCount() <= registers)
\r
2035 registers -= fieldType.totalRegisterCount();
\r
2036 elements += fieldType.getObjectSize();
\r
2038 else // Register within this field
\r
2040 return registerSize(fieldType, registers);
\r
2044 else if(type.isMatrix())
\r
2046 return registerSize(type, 0);
\r
2053 void OutputASM::argument(sw::Shader::SourceParameter ¶meter, TIntermNode *argument, int index)
\r
2057 TIntermTyped *arg = argument->getAsTyped();
\r
2058 const TType &type = arg->getType();
\r
2059 index = (index >= arg->totalRegisterCount()) ? arg->totalRegisterCount() - 1 : index;
\r
2061 int size = registerSize(type, index);
\r
2063 parameter.type = registerType(arg);
\r
2065 if(arg->getQualifier() == EvqConstExpr)
\r
2067 int component = componentCount(type, index);
\r
2068 ConstantUnion *constants = arg->getAsConstantUnion()->getUnionArrayPointer();
\r
2070 for(int i = 0; i < 4; i++)
\r
2072 if(size == 1) // Replicate
\r
2074 parameter.value[i] = constants[component + 0].getAsFloat();
\r
2078 parameter.value[i] = constants[component + i].getAsFloat();
\r
2082 parameter.value[i] = 0.0f;
\r
2088 parameter.index = registerIndex(arg) + index;
\r
2090 if(isSamplerRegister(arg))
\r
2092 TIntermBinary *binary = argument->getAsBinaryNode();
\r
2096 TIntermTyped *left = binary->getLeft();
\r
2097 TIntermTyped *right = binary->getRight();
\r
2099 switch(binary->getOp())
\r
2101 case EOpIndexDirect:
\r
2102 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2104 case EOpIndexIndirect:
\r
2105 if(left->getArraySize() > 1)
\r
2107 parameter.rel.type = registerType(binary->getRight());
\r
2108 parameter.rel.index = registerIndex(binary->getRight());
\r
2109 parameter.rel.scale = 1;
\r
2110 parameter.rel.deterministic = true;
\r
2113 case EOpIndexDirectStruct:
\r
2114 case EOpIndexDirectInterfaceBlock:
\r
2115 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2118 UNREACHABLE(binary->getOp());
\r
2124 if(!IsSampler(arg->getBasicType()))
\r
2126 parameter.swizzle = readSwizzle(arg, size);
\r
2131 void OutputASM::copy(TIntermTyped *dst, TIntermNode *src, int offset)
\r
2133 for(int index = 0; index < dst->totalRegisterCount(); index++)
\r
2135 Instruction *mov = emit(sw::Shader::OPCODE_MOV, dst, src);
\r
2136 mov->dst.index += index;
\r
2137 mov->dst.mask = writeMask(dst, index);
\r
2138 argument(mov->src[0], src, offset + index);
\r
2142 int swizzleElement(int swizzle, int index)
\r
2144 return (swizzle >> (index * 2)) & 0x03;
\r
2147 int swizzleSwizzle(int leftSwizzle, int rightSwizzle)
\r
2149 return (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 0)) << 0) |
\r
2150 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 1)) << 2) |
\r
2151 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 2)) << 4) |
\r
2152 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 3)) << 6);
\r
2155 void OutputASM::assignLvalue(TIntermTyped *dst, TIntermTyped *src)
\r
2158 ((src->isVector() && (!dst->isVector() || (dst->getNominalSize() != dst->getNominalSize()))) ||
\r
2159 (src->isMatrix() && (!dst->isMatrix() || (src->getNominalSize() != dst->getNominalSize()) || (src->getSecondarySize() != dst->getSecondarySize())))))
\r
2161 return mContext.error(src->getLine(), "Result type should match the l-value type in compound assignment", src->isVector() ? "vector" : "matrix");
\r
2164 TIntermBinary *binary = dst->getAsBinaryNode();
\r
2166 if(binary && binary->getOp() == EOpIndexIndirect && dst->isScalar())
\r
2168 Instruction *insert = new Instruction(sw::Shader::OPCODE_INSERT);
\r
2170 Temporary address(this);
\r
2171 lvalue(insert->dst, address, dst);
\r
2173 insert->src[0].type = insert->dst.type;
\r
2174 insert->src[0].index = insert->dst.index;
\r
2175 insert->src[0].rel = insert->dst.rel;
\r
2176 argument(insert->src[1], src);
\r
2177 argument(insert->src[2], binary->getRight());
\r
2179 shader->append(insert);
\r
2183 for(int offset = 0; offset < dst->totalRegisterCount(); offset++)
\r
2185 Instruction *mov = new Instruction(sw::Shader::OPCODE_MOV);
\r
2187 Temporary address(this);
\r
2188 int swizzle = lvalue(mov->dst, address, dst);
\r
2189 mov->dst.index += offset;
\r
2193 mov->dst.mask = writeMask(dst, offset);
\r
2196 argument(mov->src[0], src, offset);
\r
2197 mov->src[0].swizzle = swizzleSwizzle(mov->src[0].swizzle, swizzle);
\r
2199 shader->append(mov);
\r
2204 int OutputASM::lvalue(sw::Shader::DestinationParameter &dst, Temporary &address, TIntermTyped *node)
\r
2206 TIntermTyped *result = node;
\r
2207 TIntermBinary *binary = node->getAsBinaryNode();
\r
2208 TIntermSymbol *symbol = node->getAsSymbolNode();
\r
2212 TIntermTyped *left = binary->getLeft();
\r
2213 TIntermTyped *right = binary->getRight();
\r
2215 int leftSwizzle = lvalue(dst, address, left); // Resolve the l-value of the left side
\r
2217 switch(binary->getOp())
\r
2219 case EOpIndexDirect:
\r
2221 int rightIndex = right->getAsConstantUnion()->getIConst(0);
\r
2223 if(left->isRegister())
\r
2225 int leftMask = dst.mask;
\r
2228 while((leftMask & dst.mask) == 0)
\r
2230 dst.mask = dst.mask << 1;
\r
2233 int element = swizzleElement(leftSwizzle, rightIndex);
\r
2234 dst.mask = 1 << element;
\r
2238 else if(left->isArray() || left->isMatrix())
\r
2240 dst.index += rightIndex * result->totalRegisterCount();
\r
2243 else UNREACHABLE(0);
\r
2246 case EOpIndexIndirect:
\r
2248 if(left->isRegister())
\r
2250 // Requires INSERT instruction (handled by calling function)
\r
2252 else if(left->isArray() || left->isMatrix())
\r
2254 int scale = result->totalRegisterCount();
\r
2256 if(dst.rel.type == sw::Shader::PARAMETER_VOID) // Use the index register as the relative address directly
\r
2258 if(left->totalRegisterCount() > 1)
\r
2260 sw::Shader::SourceParameter relativeRegister;
\r
2261 argument(relativeRegister, right);
\r
2263 dst.rel.index = relativeRegister.index;
\r
2264 dst.rel.type = relativeRegister.type;
\r
2265 dst.rel.scale = scale;
\r
2266 dst.rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
2269 else if(dst.rel.index != registerIndex(&address)) // Move the previous index register to the address register
\r
2273 Constant oldScale((int)dst.rel.scale);
\r
2274 Instruction *mad = emit(sw::Shader::OPCODE_IMAD, &address, &address, &oldScale, right);
\r
2275 mad->src[0].index = dst.rel.index;
\r
2276 mad->src[0].type = dst.rel.type;
\r
2280 Constant oldScale((int)dst.rel.scale);
\r
2281 Instruction *mul = emit(sw::Shader::OPCODE_IMUL, &address, &address, &oldScale);
\r
2282 mul->src[0].index = dst.rel.index;
\r
2283 mul->src[0].type = dst.rel.type;
\r
2285 Constant newScale(scale);
\r
2286 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2289 dst.rel.type = sw::Shader::PARAMETER_TEMP;
\r
2290 dst.rel.index = registerIndex(&address);
\r
2291 dst.rel.scale = 1;
\r
2293 else // Just add the new index to the address register
\r
2297 emit(sw::Shader::OPCODE_IADD, &address, &address, right);
\r
2301 Constant newScale(scale);
\r
2302 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2306 else UNREACHABLE(0);
\r
2309 case EOpIndexDirectStruct:
\r
2310 case EOpIndexDirectInterfaceBlock:
\r
2312 const TFieldList& fields = (binary->getOp() == EOpIndexDirectStruct) ?
\r
2313 left->getType().getStruct()->fields() :
\r
2314 left->getType().getInterfaceBlock()->fields();
\r
2315 int index = right->getAsConstantUnion()->getIConst(0);
\r
2316 int fieldOffset = 0;
\r
2318 for(int i = 0; i < index; i++)
\r
2320 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
2323 dst.type = registerType(left);
\r
2324 dst.index += fieldOffset;
\r
2325 dst.mask = writeMask(right);
\r
2330 case EOpVectorSwizzle:
\r
2332 ASSERT(left->isRegister());
\r
2334 int leftMask = dst.mask;
\r
2337 int rightMask = 0;
\r
2339 TIntermSequence &sequence = right->getAsAggregate()->getSequence();
\r
2341 for(unsigned int i = 0; i < sequence.size(); i++)
\r
2343 int index = sequence[i]->getAsConstantUnion()->getIConst(0);
\r
2345 int element = swizzleElement(leftSwizzle, index);
\r
2346 rightMask = rightMask | (1 << element);
\r
2347 swizzle = swizzle | swizzleElement(leftSwizzle, i) << (element * 2);
\r
2350 dst.mask = leftMask & rightMask;
\r
2356 UNREACHABLE(binary->getOp()); // Not an l-value operator
\r
2362 dst.type = registerType(symbol);
\r
2363 dst.index = registerIndex(symbol);
\r
2364 dst.mask = writeMask(symbol);
\r
2371 sw::Shader::ParameterType OutputASM::registerType(TIntermTyped *operand)
\r
2373 if(isSamplerRegister(operand))
\r
2375 return sw::Shader::PARAMETER_SAMPLER;
\r
2378 const TQualifier qualifier = operand->getQualifier();
\r
2379 if((EvqFragColor == qualifier) || (EvqFragData == qualifier))
\r
2381 if(((EvqFragData == qualifier) && (EvqFragColor == outputQualifier)) ||
\r
2382 ((EvqFragColor == qualifier) && (EvqFragData == outputQualifier)))
\r
2384 mContext.error(operand->getLine(), "static assignment to both gl_FragData and gl_FragColor", "");
\r
2386 outputQualifier = qualifier;
\r
2391 case EvqTemporary: return sw::Shader::PARAMETER_TEMP;
\r
2392 case EvqGlobal: return sw::Shader::PARAMETER_TEMP;
\r
2393 case EvqConstExpr: return sw::Shader::PARAMETER_FLOAT4LITERAL; // All converted to float
\r
2394 case EvqAttribute: return sw::Shader::PARAMETER_INPUT;
\r
2395 case EvqVaryingIn: return sw::Shader::PARAMETER_INPUT;
\r
2396 case EvqVaryingOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2397 case EvqVertexIn: return sw::Shader::PARAMETER_INPUT;
\r
2398 case EvqFragmentOut: return sw::Shader::PARAMETER_COLOROUT;
\r
2399 case EvqVertexOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2400 case EvqFragmentIn: return sw::Shader::PARAMETER_INPUT;
\r
2401 case EvqInvariantVaryingIn: return sw::Shader::PARAMETER_INPUT; // FIXME: Guarantee invariance at the backend
\r
2402 case EvqInvariantVaryingOut: return sw::Shader::PARAMETER_OUTPUT; // FIXME: Guarantee invariance at the backend
\r
2403 case EvqSmooth: return sw::Shader::PARAMETER_OUTPUT;
\r
2404 case EvqFlat: return sw::Shader::PARAMETER_OUTPUT;
\r
2405 case EvqCentroidOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2406 case EvqSmoothIn: return sw::Shader::PARAMETER_INPUT;
\r
2407 case EvqFlatIn: return sw::Shader::PARAMETER_INPUT;
\r
2408 case EvqCentroidIn: return sw::Shader::PARAMETER_INPUT;
\r
2409 case EvqUniform: return sw::Shader::PARAMETER_CONST;
\r
2410 case EvqIn: return sw::Shader::PARAMETER_TEMP;
\r
2411 case EvqOut: return sw::Shader::PARAMETER_TEMP;
\r
2412 case EvqInOut: return sw::Shader::PARAMETER_TEMP;
\r
2413 case EvqConstReadOnly: return sw::Shader::PARAMETER_TEMP;
\r
2414 case EvqPosition: return sw::Shader::PARAMETER_OUTPUT;
\r
2415 case EvqPointSize: return sw::Shader::PARAMETER_OUTPUT;
\r
2416 case EvqInstanceID: return sw::Shader::PARAMETER_MISCTYPE;
\r
2417 case EvqFragCoord: return sw::Shader::PARAMETER_MISCTYPE;
\r
2418 case EvqFrontFacing: return sw::Shader::PARAMETER_MISCTYPE;
\r
2419 case EvqPointCoord: return sw::Shader::PARAMETER_INPUT;
\r
2420 case EvqFragColor: return sw::Shader::PARAMETER_COLOROUT;
\r
2421 case EvqFragData: return sw::Shader::PARAMETER_COLOROUT;
\r
2422 case EvqFragDepth: return sw::Shader::PARAMETER_DEPTHOUT;
\r
2423 default: UNREACHABLE(qualifier);
\r
2426 return sw::Shader::PARAMETER_VOID;
\r
2429 unsigned int OutputASM::registerIndex(TIntermTyped *operand)
\r
2431 if(isSamplerRegister(operand))
\r
2433 return samplerRegister(operand);
\r
2436 switch(operand->getQualifier())
\r
2438 case EvqTemporary: return temporaryRegister(operand);
\r
2439 case EvqGlobal: return temporaryRegister(operand);
\r
2440 case EvqConstExpr: UNREACHABLE(EvqConstExpr);
\r
2441 case EvqAttribute: return attributeRegister(operand);
\r
2442 case EvqVaryingIn: return varyingRegister(operand);
\r
2443 case EvqVaryingOut: return varyingRegister(operand);
\r
2444 case EvqVertexIn: return attributeRegister(operand);
\r
2445 case EvqFragmentOut: return fragmentOutputRegister(operand);
\r
2446 case EvqVertexOut: return varyingRegister(operand);
\r
2447 case EvqFragmentIn: return varyingRegister(operand);
\r
2448 case EvqInvariantVaryingIn: return varyingRegister(operand);
\r
2449 case EvqInvariantVaryingOut: return varyingRegister(operand);
\r
2450 case EvqSmooth: return varyingRegister(operand);
\r
2451 case EvqFlat: return varyingRegister(operand);
\r
2452 case EvqCentroidOut: return varyingRegister(operand);
\r
2453 case EvqSmoothIn: return varyingRegister(operand);
\r
2454 case EvqFlatIn: return varyingRegister(operand);
\r
2455 case EvqCentroidIn: return varyingRegister(operand);
\r
2456 case EvqUniform: return uniformRegister(operand);
\r
2457 case EvqIn: return temporaryRegister(operand);
\r
2458 case EvqOut: return temporaryRegister(operand);
\r
2459 case EvqInOut: return temporaryRegister(operand);
\r
2460 case EvqConstReadOnly: return temporaryRegister(operand);
\r
2461 case EvqPosition: return varyingRegister(operand);
\r
2462 case EvqPointSize: return varyingRegister(operand);
\r
2463 case EvqInstanceID: vertexShader->instanceIdDeclared = true; return 0;
\r
2464 case EvqFragCoord: pixelShader->vPosDeclared = true; return 0;
\r
2465 case EvqFrontFacing: pixelShader->vFaceDeclared = true; return 1;
\r
2466 case EvqPointCoord: return varyingRegister(operand);
\r
2467 case EvqFragColor: return 0;
\r
2468 case EvqFragData: return 0;
\r
2469 case EvqFragDepth: return 0;
\r
2470 default: UNREACHABLE(operand->getQualifier());
\r
2476 int OutputASM::writeMask(TIntermTyped *destination, int index)
\r
2478 if(destination->getQualifier() == EvqPointSize)
\r
2480 return 0x2; // Point size stored in the y component
\r
2483 return 0xF >> (4 - registerSize(destination->getType(), index));
\r
2486 int OutputASM::readSwizzle(TIntermTyped *argument, int size)
\r
2488 if(argument->getQualifier() == EvqPointSize)
\r
2490 return 0x55; // Point size stored in the y component
\r
2493 static const unsigned char swizzleSize[5] = {0x00, 0x00, 0x54, 0xA4, 0xE4}; // (void), xxxx, xyyy, xyzz, xyzw
\r
2495 return swizzleSize[size];
\r
2498 // Conservatively checks whether an expression is fast to compute and has no side effects
\r
2499 bool OutputASM::trivial(TIntermTyped *expression, int budget)
\r
2501 if(!expression->isRegister())
\r
2506 return cost(expression, budget) >= 0;
\r
2509 // Returns the remaining computing budget (if < 0 the expression is too expensive or has side effects)
\r
2510 int OutputASM::cost(TIntermNode *expression, int budget)
\r
2517 if(expression->getAsSymbolNode())
\r
2521 else if(expression->getAsConstantUnion())
\r
2525 else if(expression->getAsBinaryNode())
\r
2527 TIntermBinary *binary = expression->getAsBinaryNode();
\r
2529 switch(binary->getOp())
\r
2531 case EOpVectorSwizzle:
\r
2532 case EOpIndexDirect:
\r
2533 case EOpIndexDirectStruct:
\r
2534 case EOpIndexDirectInterfaceBlock:
\r
2535 return cost(binary->getLeft(), budget - 0);
\r
2539 return cost(binary->getLeft(), cost(binary->getRight(), budget - 1));
\r
2544 else if(expression->getAsUnaryNode())
\r
2546 TIntermUnary *unary = expression->getAsUnaryNode();
\r
2548 switch(unary->getOp())
\r
2552 return cost(unary->getOperand(), budget - 1);
\r
2557 else if(expression->getAsSelectionNode())
\r
2559 TIntermSelection *selection = expression->getAsSelectionNode();
\r
2561 if(selection->usesTernaryOperator())
\r
2563 TIntermTyped *condition = selection->getCondition();
\r
2564 TIntermNode *trueBlock = selection->getTrueBlock();
\r
2565 TIntermNode *falseBlock = selection->getFalseBlock();
\r
2566 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
2568 if(constantCondition)
\r
2570 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
2574 return cost(trueBlock, budget - 0);
\r
2578 return cost(falseBlock, budget - 0);
\r
2583 return cost(trueBlock, cost(falseBlock, budget - 2));
\r
2591 const Function *OutputASM::findFunction(const TString &name)
\r
2593 for(unsigned int f = 0; f < functionArray.size(); f++)
\r
2595 if(functionArray[f].name == name)
\r
2597 return &functionArray[f];
\r
2604 int OutputASM::temporaryRegister(TIntermTyped *temporary)
\r
2606 return allocate(temporaries, temporary);
\r
2609 int OutputASM::varyingRegister(TIntermTyped *varying)
\r
2611 int var = lookup(varyings, varying);
\r
2615 var = allocate(varyings, varying);
\r
2616 int componentCount = varying->registerSize();
\r
2617 int registerCount = varying->totalRegisterCount();
\r
2621 if((var + registerCount) > sw::PixelShader::MAX_INPUT_VARYINGS)
\r
2623 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "fragment shader");
\r
2627 if(varying->getQualifier() == EvqPointCoord)
\r
2629 ASSERT(varying->isRegister());
\r
2630 if(componentCount >= 1) pixelShader->semantic[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2631 if(componentCount >= 2) pixelShader->semantic[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2632 if(componentCount >= 3) pixelShader->semantic[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2633 if(componentCount >= 4) pixelShader->semantic[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2637 for(int i = 0; i < varying->totalRegisterCount(); i++)
\r
2639 if(componentCount >= 1) pixelShader->semantic[var + i][0] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2640 if(componentCount >= 2) pixelShader->semantic[var + i][1] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2641 if(componentCount >= 3) pixelShader->semantic[var + i][2] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2642 if(componentCount >= 4) pixelShader->semantic[var + i][3] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2646 else if(vertexShader)
\r
2648 if((var + registerCount) > sw::VertexShader::MAX_OUTPUT_VARYINGS)
\r
2650 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "vertex shader");
\r
2654 if(varying->getQualifier() == EvqPosition)
\r
2656 ASSERT(varying->isRegister());
\r
2657 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2658 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2659 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2660 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2661 vertexShader->positionRegister = var;
\r
2663 else if(varying->getQualifier() == EvqPointSize)
\r
2665 ASSERT(varying->isRegister());
\r
2666 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2667 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2668 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2669 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2670 vertexShader->pointSizeRegister = var;
\r
2674 // Semantic indexes for user varyings will be assigned during program link to match the pixel shader
\r
2677 else UNREACHABLE(0);
\r
2679 declareVarying(varying, var);
\r
2685 void OutputASM::declareVarying(TIntermTyped *varying, int reg)
\r
2687 if(varying->getQualifier() != EvqPointCoord) // gl_PointCoord does not need linking
\r
2689 const TType &type = varying->getType();
\r
2690 const char *name = varying->getAsSymbolNode()->getSymbol().c_str();
\r
2691 VaryingList &activeVaryings = shaderObject->varyings;
\r
2693 // Check if this varying has been declared before without having a register assigned
\r
2694 for(VaryingList::iterator v = activeVaryings.begin(); v != activeVaryings.end(); v++)
\r
2696 if(v->name == name)
\r
2700 ASSERT(v->reg < 0 || v->reg == reg);
\r
2708 activeVaryings.push_back(glsl::Varying(glVariableType(type), name, varying->getArraySize(), reg, 0));
\r
2712 int OutputASM::uniformRegister(TIntermTyped *uniform)
\r
2714 const TType &type = uniform->getType();
\r
2715 ASSERT(!IsSampler(type.getBasicType()));
\r
2716 TInterfaceBlock *block = type.getAsInterfaceBlock();
\r
2717 TIntermSymbol *symbol = uniform->getAsSymbolNode();
\r
2718 ASSERT(symbol || block);
\r
2720 if(symbol || block)
\r
2722 int index = lookup(uniforms, uniform);
\r
2726 index = allocate(uniforms, uniform);
\r
2727 const TString &name = symbol ? symbol->getSymbol() : block->name();
\r
2729 declareUniform(type, name, index);
\r
2738 int OutputASM::attributeRegister(TIntermTyped *attribute)
\r
2740 ASSERT(!attribute->isArray());
\r
2742 int index = lookup(attributes, attribute);
\r
2746 TIntermSymbol *symbol = attribute->getAsSymbolNode();
\r
2751 index = allocate(attributes, attribute);
\r
2752 const TType &type = attribute->getType();
\r
2753 int registerCount = attribute->totalRegisterCount();
\r
2755 if(vertexShader && (index + registerCount) <= sw::VertexShader::MAX_INPUT_ATTRIBUTES)
\r
2757 for(int i = 0; i < registerCount; i++)
\r
2759 vertexShader->input[index + i] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, index + i);
\r
2763 ActiveAttributes &activeAttributes = shaderObject->activeAttributes;
\r
2765 const char *name = symbol->getSymbol().c_str();
\r
2766 activeAttributes.push_back(Attribute(glVariableType(type), name, type.getArraySize(), type.getLayoutQualifier().location, index));
\r
2773 int OutputASM::fragmentOutputRegister(TIntermTyped *fragmentOutput)
\r
2775 return allocate(fragmentOutputs, fragmentOutput);
\r
2778 int OutputASM::samplerRegister(TIntermTyped *sampler)
\r
2780 ASSERT(IsSampler(sampler->getType().getBasicType()));
\r
2781 TIntermSymbol *symbol = sampler->getAsSymbolNode();
\r
2782 TIntermBinary *binary = sampler->getAsBinaryNode();
\r
2786 return samplerRegister(symbol);
\r
2790 ASSERT(binary->getOp() == EOpIndexDirect || binary->getOp() == EOpIndexIndirect ||
\r
2791 binary->getOp() == EOpIndexDirectStruct || binary->getOp() == EOpIndexDirectInterfaceBlock);
\r
2793 return samplerRegister(binary->getLeft()); // Index added later
\r
2795 else UNREACHABLE(0);
\r
2800 int OutputASM::samplerRegister(TIntermSymbol *sampler)
\r
2802 const TType &type = sampler->getType();
\r
2803 ASSERT(IsSampler(type.getBasicType()) || type.getStruct()); // Structures can contain samplers
\r
2805 int index = lookup(samplers, sampler);
\r
2809 index = allocate(samplers, sampler);
\r
2811 if(sampler->getQualifier() == EvqUniform)
\r
2813 const char *name = sampler->getSymbol().c_str();
\r
2814 declareUniform(type, name, index);
\r
2821 int OutputASM::lookup(VariableArray &list, TIntermTyped *variable)
\r
2823 for(unsigned int i = 0; i < list.size(); i++)
\r
2825 if(list[i] == variable)
\r
2827 return i; // Pointer match
\r
2831 TIntermSymbol *varSymbol = variable->getAsSymbolNode();
\r
2832 TInterfaceBlock *varBlock = variable->getType().getAsInterfaceBlock();
\r
2836 for(unsigned int i = 0; i < list.size(); i++)
\r
2840 TInterfaceBlock *listBlock = list[i]->getType().getAsInterfaceBlock();
\r
2844 if(listBlock->name() == varBlock->name())
\r
2846 ASSERT(listBlock->arraySize() == varBlock->arraySize());
\r
2847 ASSERT(listBlock->fields() == varBlock->fields());
\r
2848 ASSERT(listBlock->blockStorage() == varBlock->blockStorage());
\r
2849 ASSERT(listBlock->matrixPacking() == varBlock->matrixPacking());
\r
2857 else if(varSymbol)
\r
2859 for(unsigned int i = 0; i < list.size(); i++)
\r
2863 TIntermSymbol *listSymbol = list[i]->getAsSymbolNode();
\r
2867 if(listSymbol->getId() == varSymbol->getId())
\r
2869 ASSERT(listSymbol->getSymbol() == varSymbol->getSymbol());
\r
2870 ASSERT(listSymbol->getType() == varSymbol->getType());
\r
2871 ASSERT(listSymbol->getQualifier() == varSymbol->getQualifier());
\r
2883 int OutputASM::allocate(VariableArray &list, TIntermTyped *variable)
\r
2885 int index = lookup(list, variable);
\r
2889 unsigned int registerCount = variable->totalRegisterCount();
\r
2891 for(unsigned int i = 0; i < list.size(); i++)
\r
2895 unsigned int j = 1;
\r
2896 for( ; j < registerCount && (i + j) < list.size(); j++)
\r
2898 if(list[i + j] != 0)
\r
2904 if(j == registerCount) // Found free slots
\r
2906 for(unsigned int j = 0; j < registerCount; j++)
\r
2908 list[i + j] = variable;
\r
2916 index = list.size();
\r
2918 for(unsigned int i = 0; i < registerCount; i++)
\r
2920 list.push_back(variable);
\r
2927 void OutputASM::free(VariableArray &list, TIntermTyped *variable)
\r
2929 int index = lookup(list, variable);
\r
2937 void OutputASM::declareUniform(const TType &type, const TString &name, int registerIndex, int blockId, BlockLayoutEncoder* encoder)
\r
2939 const TStructure *structure = type.getStruct();
\r
2940 const TInterfaceBlock *block = (type.isInterfaceBlock() || (blockId == -1)) ? type.getInterfaceBlock() : nullptr;
\r
2941 ActiveUniforms &activeUniforms = shaderObject->activeUniforms;
\r
2943 if(!structure && !block)
\r
2947 shaderObject->activeUniformBlocks[blockId].fields.push_back(activeUniforms.size());
\r
2949 BlockMemberInfo blockInfo = encoder ? encoder->encodeType(type) : BlockMemberInfo::getDefaultBlockInfo();
\r
2950 int regIndex = encoder ? registerIndex + BlockLayoutEncoder::getBlockRegister(blockInfo) : registerIndex;
\r
2951 activeUniforms.push_back(Uniform(glVariableType(type), glVariablePrecision(type), name.c_str(), type.getArraySize(),
\r
2952 regIndex, blockId, blockInfo));
\r
2954 if(isSamplerRegister(type))
\r
2956 for(int i = 0; i < type.totalRegisterCount(); i++)
\r
2958 shader->declareSampler(regIndex + i);
\r
2964 ActiveUniformBlocks &activeUniformBlocks = shaderObject->activeUniformBlocks;
\r
2965 blockId = activeUniformBlocks.size();
\r
2966 bool isRowMajor = block->matrixPacking() == EmpRowMajor;
\r
2967 const TString &blockName = block->name();
\r
2968 activeUniformBlocks.push_back(UniformBlock(blockName.c_str(), 0, block->arraySize(),
\r
2969 block->blockStorage(), isRowMajor, registerIndex, blockId));
\r
2971 const TFieldList& fields = block->fields();
\r
2972 Std140BlockEncoder currentBlockEncoder(isRowMajor);
\r
2973 for(size_t i = 0; i < fields.size(); i++)
\r
2975 const TType &fieldType = *(fields[i]->type());
\r
2976 const TString &fieldName = fields[i]->name();
\r
2977 const TString uniformName = block->hasInstanceName() ? blockName + "." + fieldName : fieldName;
\r
2979 declareUniform(fieldType, uniformName, registerIndex, blockId, ¤tBlockEncoder);
\r
2981 activeUniformBlocks[blockId].dataSize = currentBlockEncoder.getBlockSize();
\r
2985 int fieldRegisterIndex = registerIndex;
\r
2987 const TFieldList& fields = structure->fields();
\r
2988 if(type.isArray() && (structure || type.isInterfaceBlock()))
\r
2990 for(int i = 0; i < type.getArraySize(); i++)
\r
2994 encoder->enterAggregateType();
\r
2996 for(size_t j = 0; j < fields.size(); j++)
\r
2998 const TType &fieldType = *(fields[j]->type());
\r
2999 const TString &fieldName = fields[j]->name();
\r
3000 const TString uniformName = name + "[" + str(i) + "]." + fieldName;
\r
3002 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3005 int registerCount = fieldType.totalRegisterCount();
\r
3006 fieldRegisterIndex += registerCount;
\r
3011 encoder->exitAggregateType();
\r
3019 encoder->enterAggregateType();
\r
3021 for(size_t i = 0; i < fields.size(); i++)
\r
3023 const TType &fieldType = *(fields[i]->type());
\r
3024 const TString &fieldName = fields[i]->name();
\r
3025 const TString uniformName = name + "." + fieldName;
\r
3027 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3030 int registerCount = fieldType.totalRegisterCount();
\r
3031 fieldRegisterIndex += registerCount;
\r
3036 encoder->exitAggregateType();
\r
3042 GLenum OutputASM::glVariableType(const TType &type)
\r
3044 switch(type.getBasicType())
\r
3047 if(type.isScalar())
\r
3051 else if(type.isVector())
\r
3053 switch(type.getNominalSize())
\r
3055 case 2: return GL_FLOAT_VEC2;
\r
3056 case 3: return GL_FLOAT_VEC3;
\r
3057 case 4: return GL_FLOAT_VEC4;
\r
3058 default: UNREACHABLE(type.getNominalSize());
\r
3061 else if(type.isMatrix())
\r
3063 switch(type.getNominalSize())
\r
3066 switch(type.getSecondarySize())
\r
3068 case 2: return GL_FLOAT_MAT2;
\r
3069 case 3: return GL_FLOAT_MAT2x3;
\r
3070 case 4: return GL_FLOAT_MAT2x4;
\r
3071 default: UNREACHABLE(type.getSecondarySize());
\r
3074 switch(type.getSecondarySize())
\r
3076 case 2: return GL_FLOAT_MAT3x2;
\r
3077 case 3: return GL_FLOAT_MAT3;
\r
3078 case 4: return GL_FLOAT_MAT3x4;
\r
3079 default: UNREACHABLE(type.getSecondarySize());
\r
3082 switch(type.getSecondarySize())
\r
3084 case 2: return GL_FLOAT_MAT4x2;
\r
3085 case 3: return GL_FLOAT_MAT4x3;
\r
3086 case 4: return GL_FLOAT_MAT4;
\r
3087 default: UNREACHABLE(type.getSecondarySize());
\r
3089 default: UNREACHABLE(type.getNominalSize());
\r
3092 else UNREACHABLE(0);
\r
3095 if(type.isScalar())
\r
3099 else if(type.isVector())
\r
3101 switch(type.getNominalSize())
\r
3103 case 2: return GL_INT_VEC2;
\r
3104 case 3: return GL_INT_VEC3;
\r
3105 case 4: return GL_INT_VEC4;
\r
3106 default: UNREACHABLE(type.getNominalSize());
\r
3109 else UNREACHABLE(0);
\r
3112 if(type.isScalar())
\r
3114 return GL_UNSIGNED_INT;
\r
3116 else if(type.isVector())
\r
3118 switch(type.getNominalSize())
\r
3120 case 2: return GL_UNSIGNED_INT_VEC2;
\r
3121 case 3: return GL_UNSIGNED_INT_VEC3;
\r
3122 case 4: return GL_UNSIGNED_INT_VEC4;
\r
3123 default: UNREACHABLE(type.getNominalSize());
\r
3126 else UNREACHABLE(0);
\r
3129 if(type.isScalar())
\r
3133 else if(type.isVector())
\r
3135 switch(type.getNominalSize())
\r
3137 case 2: return GL_BOOL_VEC2;
\r
3138 case 3: return GL_BOOL_VEC3;
\r
3139 case 4: return GL_BOOL_VEC4;
\r
3140 default: UNREACHABLE(type.getNominalSize());
\r
3143 else UNREACHABLE(0);
\r
3145 case EbtSampler2D:
\r
3146 return GL_SAMPLER_2D;
\r
3147 case EbtISampler2D:
\r
3148 return GL_INT_SAMPLER_2D;
\r
3149 case EbtUSampler2D:
\r
3150 return GL_UNSIGNED_INT_SAMPLER_2D;
\r
3151 case EbtSamplerCube:
\r
3152 return GL_SAMPLER_CUBE;
\r
3153 case EbtISamplerCube:
\r
3154 return GL_INT_SAMPLER_CUBE;
\r
3155 case EbtUSamplerCube:
\r
3156 return GL_UNSIGNED_INT_SAMPLER_CUBE;
\r
3157 case EbtSamplerExternalOES:
\r
3158 return GL_SAMPLER_EXTERNAL_OES;
\r
3159 case EbtSampler3D:
\r
3160 return GL_SAMPLER_3D_OES;
\r
3161 case EbtISampler3D:
\r
3162 return GL_INT_SAMPLER_3D;
\r
3163 case EbtUSampler3D:
\r
3164 return GL_UNSIGNED_INT_SAMPLER_3D;
\r
3165 case EbtSampler2DArray:
\r
3166 return GL_SAMPLER_2D_ARRAY;
\r
3167 case EbtISampler2DArray:
\r
3168 return GL_INT_SAMPLER_2D_ARRAY;
\r
3169 case EbtUSampler2DArray:
\r
3170 return GL_UNSIGNED_INT_SAMPLER_2D_ARRAY;
\r
3171 case EbtSampler2DShadow:
\r
3172 return GL_SAMPLER_2D_SHADOW;
\r
3173 case EbtSamplerCubeShadow:
\r
3174 return GL_SAMPLER_CUBE_SHADOW;
\r
3175 case EbtSampler2DArrayShadow:
\r
3176 return GL_SAMPLER_2D_ARRAY_SHADOW;
\r
3178 UNREACHABLE(type.getBasicType());
\r
3185 GLenum OutputASM::glVariablePrecision(const TType &type)
\r
3187 if(type.getBasicType() == EbtFloat)
\r
3189 switch(type.getPrecision())
\r
3191 case EbpHigh: return GL_HIGH_FLOAT;
\r
3192 case EbpMedium: return GL_MEDIUM_FLOAT;
\r
3193 case EbpLow: return GL_LOW_FLOAT;
\r
3194 case EbpUndefined:
\r
3195 // Should be defined as the default precision by the parser
\r
3196 default: UNREACHABLE(type.getPrecision());
\r
3199 else if(type.getBasicType() == EbtInt)
\r
3201 switch(type.getPrecision())
\r
3203 case EbpHigh: return GL_HIGH_INT;
\r
3204 case EbpMedium: return GL_MEDIUM_INT;
\r
3205 case EbpLow: return GL_LOW_INT;
\r
3206 case EbpUndefined:
\r
3207 // Should be defined as the default precision by the parser
\r
3208 default: UNREACHABLE(type.getPrecision());
\r
3212 // Other types (boolean, sampler) don't have a precision
\r
3216 int OutputASM::dim(TIntermNode *v)
\r
3218 TIntermTyped *vector = v->getAsTyped();
\r
3219 ASSERT(vector && vector->isRegister());
\r
3220 return vector->getNominalSize();
\r
3223 int OutputASM::dim2(TIntermNode *m)
\r
3225 TIntermTyped *matrix = m->getAsTyped();
\r
3226 ASSERT(matrix && matrix->isMatrix() && !matrix->isArray());
\r
3227 return matrix->getSecondarySize();
\r
3230 // Returns ~0u if no loop count could be determined
\r
3231 unsigned int OutputASM::loopCount(TIntermLoop *node)
\r
3233 // Parse loops of the form:
\r
3234 // for(int index = initial; index [comparator] limit; index += increment)
\r
3235 TIntermSymbol *index = 0;
\r
3236 TOperator comparator = EOpNull;
\r
3239 int increment = 0;
\r
3241 // Parse index name and intial value
\r
3242 if(node->getInit())
\r
3244 TIntermAggregate *init = node->getInit()->getAsAggregate();
\r
3248 TIntermSequence &sequence = init->getSequence();
\r
3249 TIntermTyped *variable = sequence[0]->getAsTyped();
\r
3251 if(variable && variable->getQualifier() == EvqTemporary)
\r
3253 TIntermBinary *assign = variable->getAsBinaryNode();
\r
3255 if(assign->getOp() == EOpInitialize)
\r
3257 TIntermSymbol *symbol = assign->getLeft()->getAsSymbolNode();
\r
3258 TIntermConstantUnion *constant = assign->getRight()->getAsConstantUnion();
\r
3260 if(symbol && constant)
\r
3262 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3265 initial = constant->getUnionArrayPointer()[0].getIConst();
\r
3273 // Parse comparator and limit value
\r
3274 if(index && node->getCondition())
\r
3276 TIntermBinary *test = node->getCondition()->getAsBinaryNode();
\r
3278 if(test && test->getLeft()->getAsSymbolNode()->getId() == index->getId())
\r
3280 TIntermConstantUnion *constant = test->getRight()->getAsConstantUnion();
\r
3284 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3286 comparator = test->getOp();
\r
3287 limit = constant->getUnionArrayPointer()[0].getIConst();
\r
3293 // Parse increment
\r
3294 if(index && comparator != EOpNull && node->getExpression())
\r
3296 TIntermBinary *binaryTerminal = node->getExpression()->getAsBinaryNode();
\r
3297 TIntermUnary *unaryTerminal = node->getExpression()->getAsUnaryNode();
\r
3299 if(binaryTerminal)
\r
3301 TOperator op = binaryTerminal->getOp();
\r
3302 TIntermConstantUnion *constant = binaryTerminal->getRight()->getAsConstantUnion();
\r
3306 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3308 int value = constant->getUnionArrayPointer()[0].getIConst();
\r
3312 case EOpAddAssign: increment = value; break;
\r
3313 case EOpSubAssign: increment = -value; break;
\r
3314 default: UNIMPLEMENTED();
\r
3319 else if(unaryTerminal)
\r
3321 TOperator op = unaryTerminal->getOp();
\r
3325 case EOpPostIncrement: increment = 1; break;
\r
3326 case EOpPostDecrement: increment = -1; break;
\r
3327 case EOpPreIncrement: increment = 1; break;
\r
3328 case EOpPreDecrement: increment = -1; break;
\r
3329 default: UNIMPLEMENTED();
\r
3334 if(index && comparator != EOpNull && increment != 0)
\r
3336 if(comparator == EOpLessThanEqual)
\r
3338 comparator = EOpLessThan;
\r
3342 if(comparator == EOpLessThan)
\r
3344 int iterations = (limit - initial) / increment;
\r
3346 if(iterations <= 0)
\r
3351 return iterations;
\r
3353 else UNIMPLEMENTED(); // Falls through
\r
3359 bool DetectLoopDiscontinuity::traverse(TIntermNode *node)
\r
3362 loopDiscontinuity = false;
\r
3364 node->traverse(this);
\r
3366 return loopDiscontinuity;
\r
3369 bool DetectLoopDiscontinuity::visitLoop(Visit visit, TIntermLoop *loop)
\r
3371 if(visit == PreVisit)
\r
3375 else if(visit == PostVisit)
\r
3383 bool DetectLoopDiscontinuity::visitBranch(Visit visit, TIntermBranch *node)
\r
3385 if(loopDiscontinuity)
\r
3395 switch(node->getFlowOp())
\r
3402 loopDiscontinuity = true;
\r
3404 default: UNREACHABLE(node->getFlowOp());
\r
3407 return !loopDiscontinuity;
\r
3410 bool DetectLoopDiscontinuity::visitAggregate(Visit visit, TIntermAggregate *node)
\r
3412 return !loopDiscontinuity;
\r