1 // SwiftShader Software Renderer
\r
3 // Copyright(c) 2005-2013 TransGaming Inc.
\r
5 // All rights reserved. No part of this software may be copied, distributed, transmitted,
\r
6 // transcribed, stored in a retrieval system, translated into any human or computer
\r
7 // language by any means, or disclosed to third parties without the explicit written
\r
8 // agreement of TransGaming Inc. Without such an agreement, no rights or licenses, express
\r
9 // or implied, including but not limited to any patent rights, are granted to you.
\r
12 #include "OutputASM.h"
\r
13 #include "Common/Math.hpp"
\r
15 #include "common/debug.h"
\r
16 #include "InfoSink.h"
\r
18 #include "libGLESv2/Shader.h"
\r
20 #include <GLES2/gl2.h>
\r
21 #include <GLES2/gl2ext.h>
\r
22 #include <GLES3/gl3.h>
\r
26 // Integer to TString conversion
\r
30 sprintf(buffer, "%d", i);
\r
34 class Temporary : public TIntermSymbol
\r
37 Temporary(OutputASM *assembler) : TIntermSymbol(TSymbolTableLevel::nextUniqueId(), "tmp", TType(EbtFloat, EbpHigh, EvqTemporary, 4, 1, false)), assembler(assembler)
\r
43 assembler->freeTemporary(this);
\r
47 OutputASM *const assembler;
\r
50 class Constant : public TIntermConstantUnion
\r
53 Constant(float x, float y, float z, float w) : TIntermConstantUnion(constants, TType(EbtFloat, EbpHigh, EvqConstExpr, 4, 1, false))
\r
55 constants[0].setFConst(x);
\r
56 constants[1].setFConst(y);
\r
57 constants[2].setFConst(z);
\r
58 constants[3].setFConst(w);
\r
61 Constant(bool b) : TIntermConstantUnion(constants, TType(EbtBool, EbpHigh, EvqConstExpr, 1, 1, false))
\r
63 constants[0].setBConst(b);
\r
66 Constant(int i) : TIntermConstantUnion(constants, TType(EbtInt, EbpHigh, EvqConstExpr, 1, 1, false))
\r
68 constants[0].setIConst(i);
\r
76 ConstantUnion constants[4];
\r
79 Uniform::Uniform(GLenum type, GLenum precision, const std::string &name, int arraySize, int registerIndex, int blockId, const BlockMemberInfo& blockMemberInfo) :
\r
80 type(type), precision(precision), name(name), arraySize(arraySize), registerIndex(registerIndex), blockId(blockId), blockInfo(blockMemberInfo)
\r
84 UniformBlock::UniformBlock(const std::string& name, unsigned int dataSize, unsigned int arraySize,
\r
85 TLayoutBlockStorage layout, bool isRowMajorLayout, int registerIndex, int blockId) :
\r
86 name(name), dataSize(dataSize), arraySize(arraySize), layout(layout),
\r
87 isRowMajorLayout(isRowMajorLayout), registerIndex(registerIndex), blockId(blockId)
\r
91 BlockLayoutEncoder::BlockLayoutEncoder(bool rowMajor)
\r
92 : mCurrentOffset(0), isRowMajor(rowMajor)
\r
96 BlockMemberInfo BlockLayoutEncoder::encodeType(const TType &type)
\r
101 getBlockLayoutInfo(type, type.getArraySize(), isRowMajor, &arrayStride, &matrixStride);
\r
103 const BlockMemberInfo memberInfo(static_cast<int>(mCurrentOffset * BytesPerComponent),
\r
104 static_cast<int>(arrayStride * BytesPerComponent),
\r
105 static_cast<int>(matrixStride * BytesPerComponent),
\r
106 (matrixStride > 0) && isRowMajor);
\r
108 advanceOffset(type, type.getArraySize(), isRowMajor, arrayStride, matrixStride);
\r
114 size_t BlockLayoutEncoder::getBlockRegister(const BlockMemberInfo &info)
\r
116 return (info.offset / BytesPerComponent) / ComponentsPerRegister;
\r
120 size_t BlockLayoutEncoder::getBlockRegisterElement(const BlockMemberInfo &info)
\r
122 return (info.offset / BytesPerComponent) % ComponentsPerRegister;
\r
125 void BlockLayoutEncoder::nextRegister()
\r
127 mCurrentOffset = sw::align(mCurrentOffset, ComponentsPerRegister);
\r
130 Std140BlockEncoder::Std140BlockEncoder(bool rowMajor) : BlockLayoutEncoder(rowMajor)
\r
134 void Std140BlockEncoder::enterAggregateType()
\r
139 void Std140BlockEncoder::exitAggregateType()
\r
144 void Std140BlockEncoder::getBlockLayoutInfo(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int *arrayStrideOut, int *matrixStrideOut)
\r
146 size_t baseAlignment = 0;
\r
147 int matrixStride = 0;
\r
148 int arrayStride = 0;
\r
150 if(type.isMatrix())
\r
152 baseAlignment = ComponentsPerRegister;
\r
153 matrixStride = ComponentsPerRegister;
\r
157 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
158 arrayStride = ComponentsPerRegister * numRegisters;
\r
161 else if(arraySize > 0)
\r
163 baseAlignment = ComponentsPerRegister;
\r
164 arrayStride = ComponentsPerRegister;
\r
168 const int numComponents = type.getElementSize();
\r
169 baseAlignment = (numComponents == 3 ? 4u : static_cast<size_t>(numComponents));
\r
172 mCurrentOffset = sw::align(mCurrentOffset, baseAlignment);
\r
174 *matrixStrideOut = matrixStride;
\r
175 *arrayStrideOut = arrayStride;
\r
178 void Std140BlockEncoder::advanceOffset(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int arrayStride, int matrixStride)
\r
182 mCurrentOffset += arrayStride * arraySize;
\r
184 else if(type.isMatrix())
\r
186 ASSERT(matrixStride == ComponentsPerRegister);
\r
187 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
188 mCurrentOffset += ComponentsPerRegister * numRegisters;
\r
192 mCurrentOffset += type.getElementSize();
\r
196 Attribute::Attribute()
\r
203 Attribute::Attribute(GLenum type, const std::string &name, int arraySize, int location, int registerIndex)
\r
207 this->arraySize = arraySize;
\r
208 this->location = location;
\r
209 this->registerIndex = registerIndex;
\r
212 sw::PixelShader *Shader::getPixelShader() const
\r
217 sw::VertexShader *Shader::getVertexShader() const
\r
222 OutputASM::TextureFunction::TextureFunction(const TString& nodeName) : method(IMPLICIT), proj(false), offset(false)
\r
224 TString name = TFunction::unmangleName(nodeName);
\r
226 if(name == "texture2D" || name == "textureCube" || name == "texture" || name == "texture3D")
\r
230 else if(name == "texture2DProj" || name == "textureProj")
\r
235 else if(name == "texture2DLod" || name == "textureCubeLod" || name == "textureLod")
\r
239 else if(name == "texture2DProjLod" || name == "textureProjLod")
\r
244 else if(name == "textureSize")
\r
248 else if(name == "textureOffset")
\r
253 else if(name == "textureProjOffset")
\r
259 else if(name == "textureLodOffset")
\r
264 else if(name == "textureProjLodOffset")
\r
270 else if(name == "texelFetch")
\r
274 else if(name == "texelFetchOffset")
\r
279 else if(name == "textureGrad")
\r
283 else if(name == "textureGradOffset")
\r
288 else if(name == "textureProjGrad")
\r
293 else if(name == "textureProjGradOffset")
\r
299 else UNREACHABLE(0);
\r
302 OutputASM::OutputASM(TParseContext &context, Shader *shaderObject) : TIntermTraverser(true, true, true), shaderObject(shaderObject), mContext(context)
\r
310 shader = shaderObject->getShader();
\r
311 pixelShader = shaderObject->getPixelShader();
\r
312 vertexShader = shaderObject->getVertexShader();
\r
315 functionArray.push_back(Function(0, "main(", 0, 0));
\r
316 currentFunction = 0;
\r
317 outputQualifier = EvqOutput; // Set outputQualifier to any value other than EvqFragColor or EvqFragData
\r
320 OutputASM::~OutputASM()
\r
324 void OutputASM::output()
\r
328 emitShader(GLOBAL);
\r
330 if(functionArray.size() > 1) // Only call main() when there are other functions
\r
332 Instruction *callMain = emit(sw::Shader::OPCODE_CALL);
\r
333 callMain->dst.type = sw::Shader::PARAMETER_LABEL;
\r
334 callMain->dst.index = 0; // main()
\r
336 emit(sw::Shader::OPCODE_RET);
\r
339 emitShader(FUNCTION);
\r
343 void OutputASM::emitShader(Scope scope)
\r
346 currentScope = GLOBAL;
\r
347 mContext.getTreeRoot()->traverse(this);
\r
350 void OutputASM::freeTemporary(Temporary *temporary)
\r
352 free(temporaries, temporary);
\r
355 sw::Shader::Opcode OutputASM::getOpcode(sw::Shader::Opcode op, TIntermTyped *in) const
\r
357 TBasicType baseType = in->getType().getBasicType();
\r
361 case sw::Shader::OPCODE_NEG:
\r
366 return sw::Shader::OPCODE_INEG;
\r
371 case sw::Shader::OPCODE_ABS:
\r
375 return sw::Shader::OPCODE_IABS;
\r
380 case sw::Shader::OPCODE_SGN:
\r
384 return sw::Shader::OPCODE_ISGN;
\r
389 case sw::Shader::OPCODE_ADD:
\r
394 return sw::Shader::OPCODE_IADD;
\r
399 case sw::Shader::OPCODE_SUB:
\r
404 return sw::Shader::OPCODE_ISUB;
\r
409 case sw::Shader::OPCODE_MUL:
\r
414 return sw::Shader::OPCODE_IMUL;
\r
419 case sw::Shader::OPCODE_DIV:
\r
423 return sw::Shader::OPCODE_IDIV;
\r
425 return sw::Shader::OPCODE_UDIV;
\r
430 case sw::Shader::OPCODE_IMOD:
\r
431 return baseType == EbtUInt ? sw::Shader::OPCODE_UMOD : op;
\r
432 case sw::Shader::OPCODE_ISHR:
\r
433 return baseType == EbtUInt ? sw::Shader::OPCODE_USHR : op;
\r
434 case sw::Shader::OPCODE_MIN:
\r
438 return sw::Shader::OPCODE_IMIN;
\r
440 return sw::Shader::OPCODE_UMIN;
\r
445 case sw::Shader::OPCODE_MAX:
\r
449 return sw::Shader::OPCODE_IMAX;
\r
451 return sw::Shader::OPCODE_UMAX;
\r
461 void OutputASM::visitSymbol(TIntermSymbol *symbol)
\r
463 // Vertex varyings don't have to be actively used to successfully link
\r
464 // against pixel shaders that use them. So make sure they're declared.
\r
465 if(symbol->getQualifier() == EvqVaryingOut || symbol->getQualifier() == EvqInvariantVaryingOut || symbol->getQualifier() == EvqVertexOut)
\r
467 if(symbol->getBasicType() != EbtInvariant) // Typeless declarations are not new varyings
\r
469 declareVarying(symbol, -1);
\r
473 TInterfaceBlock* block = symbol->getType().getInterfaceBlock();
\r
474 // OpenGL ES 3.0.4 spec, section 2.12.6 Uniform Variables:
\r
475 // "All members of a named uniform block declared with a shared or std140 layout qualifier
\r
476 // are considered active, even if they are not referenced in any shader in the program.
\r
477 // The uniform block itself is also considered active, even if no member of the block is referenced."
\r
478 if(block && ((block->blockStorage() == EbsShared) || (block->blockStorage() == EbsStd140)))
\r
480 uniformRegister(symbol);
\r
484 bool OutputASM::visitBinary(Visit visit, TIntermBinary *node)
\r
486 if(currentScope != emitScope)
\r
491 TIntermTyped *result = node;
\r
492 TIntermTyped *left = node->getLeft();
\r
493 TIntermTyped *right = node->getRight();
\r
494 const TType &leftType = left->getType();
\r
495 const TType &rightType = right->getType();
\r
496 const TType &resultType = node->getType();
\r
498 switch(node->getOp())
\r
501 if(visit == PostVisit)
\r
503 assignLvalue(left, right);
\r
504 copy(result, right);
\r
507 case EOpInitialize:
\r
508 if(visit == PostVisit)
\r
513 case EOpMatrixTimesScalarAssign:
\r
514 if(visit == PostVisit)
\r
516 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
518 emit(sw::Shader::OPCODE_MUL, result, i, left, i, right);
\r
521 assignLvalue(left, result);
\r
524 case EOpVectorTimesMatrixAssign:
\r
525 if(visit == PostVisit)
\r
527 int size = leftType.getNominalSize();
\r
529 for(int i = 0; i < size; i++)
\r
531 Instruction *dot = emit(sw::Shader::OPCODE_DP(size), result, 0, left, 0, right, i);
\r
532 dot->dst.mask = 1 << i;
\r
535 assignLvalue(left, result);
\r
538 case EOpMatrixTimesMatrixAssign:
\r
539 if(visit == PostVisit)
\r
541 int dim = leftType.getNominalSize();
\r
543 for(int i = 0; i < dim; i++)
\r
545 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, left, 0, right, i);
\r
546 mul->src[1].swizzle = 0x00;
\r
548 for(int j = 1; j < dim; j++)
\r
550 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, i, left, j, right, i, result, i);
\r
551 mad->src[1].swizzle = j * 0x55;
\r
555 assignLvalue(left, result);
\r
558 case EOpIndexDirect:
\r
559 if(visit == PostVisit)
\r
561 int index = right->getAsConstantUnion()->getIConst(0);
\r
563 if(result->isMatrix() || result->isStruct() || result->isInterfaceBlock())
\r
565 ASSERT(left->isArray());
\r
566 copy(result, left, index * left->elementRegisterCount());
\r
568 else if(result->isRegister())
\r
571 if(left->isRegister())
\r
575 else if(left->isArray())
\r
577 srcIndex = index * left->elementRegisterCount();
\r
579 else if(left->isMatrix())
\r
581 ASSERT(index < left->getNominalSize()); // FIXME: Report semantic error
\r
584 else UNREACHABLE(0);
\r
586 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, 0, left, srcIndex);
\r
588 if(left->isRegister())
\r
590 mov->src[0].swizzle = index;
\r
593 else UNREACHABLE(0);
\r
596 case EOpIndexIndirect:
\r
597 if(visit == PostVisit)
\r
599 if(left->isArray() || left->isMatrix())
\r
601 for(int index = 0; index < result->totalRegisterCount(); index++)
\r
603 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, index, left, index);
\r
604 mov->dst.mask = writeMask(result, index);
\r
606 if(left->totalRegisterCount() > 1)
\r
608 sw::Shader::SourceParameter relativeRegister;
\r
609 argument(relativeRegister, right);
\r
611 mov->src[0].rel.type = relativeRegister.type;
\r
612 mov->src[0].rel.index = relativeRegister.index;
\r
613 mov->src[0].rel.scale = result->totalRegisterCount();
\r
614 mov->src[0].rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
618 else if(left->isRegister())
\r
620 emit(sw::Shader::OPCODE_EXTRACT, result, left, right);
\r
622 else UNREACHABLE(0);
\r
625 case EOpIndexDirectStruct:
\r
626 case EOpIndexDirectInterfaceBlock:
\r
627 if(visit == PostVisit)
\r
629 ASSERT(leftType.isStruct() || (leftType.isInterfaceBlock()));
\r
631 const TFieldList& fields = (node->getOp() == EOpIndexDirectStruct) ?
\r
632 leftType.getStruct()->fields() :
\r
633 leftType.getInterfaceBlock()->fields();
\r
634 int index = right->getAsConstantUnion()->getIConst(0);
\r
635 int fieldOffset = 0;
\r
637 for(int i = 0; i < index; i++)
\r
639 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
642 copy(result, left, fieldOffset);
\r
645 case EOpVectorSwizzle:
\r
646 if(visit == PostVisit)
\r
649 TIntermAggregate *components = right->getAsAggregate();
\r
653 TIntermSequence &sequence = components->getSequence();
\r
656 for(TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++)
\r
658 TIntermConstantUnion *element = (*sit)->getAsConstantUnion();
\r
662 int i = element->getUnionArrayPointer()[0].getIConst();
\r
663 swizzle |= i << (component * 2);
\r
666 else UNREACHABLE(0);
\r
669 else UNREACHABLE(0);
\r
671 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, left);
\r
672 mov->src[0].swizzle = swizzle;
\r
675 case EOpAddAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, left, right); break;
\r
676 case EOpAdd: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, right); break;
\r
677 case EOpSubAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, left, right); break;
\r
678 case EOpSub: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, right); break;
\r
679 case EOpMulAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, left, right); break;
\r
680 case EOpMul: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, right); break;
\r
681 case EOpDivAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, left, right); break;
\r
682 case EOpDiv: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, right); break;
\r
683 case EOpIModAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, left, right); break;
\r
684 case EOpIMod: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, right); break;
\r
685 case EOpBitShiftLeftAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_SHL, result, left, left, right); break;
\r
686 case EOpBitShiftLeft: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_SHL, result, left, right); break;
\r
687 case EOpBitShiftRightAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, left, right); break;
\r
688 case EOpBitShiftRight: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, right); break;
\r
689 case EOpBitwiseAndAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_AND, result, left, left, right); break;
\r
690 case EOpBitwiseAnd: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_AND, result, left, right); break;
\r
691 case EOpBitwiseXorAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_XOR, result, left, left, right); break;
\r
692 case EOpBitwiseXor: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
693 case EOpBitwiseOrAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_OR, result, left, left, right); break;
\r
694 case EOpBitwiseOr: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_OR, result, left, right); break;
\r
696 if(visit == PostVisit)
\r
698 emitBinary(sw::Shader::OPCODE_EQ, result, left, right);
\r
700 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
702 Temporary equal(this);
\r
703 emit(sw::Shader::OPCODE_EQ, &equal, 0, left, index, right, index);
\r
704 emit(sw::Shader::OPCODE_AND, result, result, &equal);
\r
709 if(visit == PostVisit)
\r
711 emitBinary(sw::Shader::OPCODE_NE, result, left, right);
\r
713 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
715 Temporary notEqual(this);
\r
716 emit(sw::Shader::OPCODE_NE, ¬Equal, 0, left, index, right, index);
\r
717 emit(sw::Shader::OPCODE_OR, result, result, ¬Equal);
\r
721 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, left, right); break;
\r
722 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, left, right); break;
\r
723 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, left, right); break;
\r
724 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, left, right); break;
\r
725 case EOpVectorTimesScalarAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, left, right); break;
\r
726 case EOpVectorTimesScalar: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, right); break;
\r
727 case EOpMatrixTimesScalar:
\r
728 if(visit == PostVisit)
\r
730 if(left->isMatrix())
\r
732 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
734 emit(sw::Shader::OPCODE_MUL, result, i, left, i, right, 0);
\r
737 else if(right->isMatrix())
\r
739 for(int i = 0; i < rightType.getNominalSize(); i++)
\r
741 emit(sw::Shader::OPCODE_MUL, result, i, left, 0, right, i);
\r
744 else UNREACHABLE(0);
\r
747 case EOpVectorTimesMatrix:
\r
748 if(visit == PostVisit)
\r
750 sw::Shader::Opcode dpOpcode = sw::Shader::OPCODE_DP(leftType.getNominalSize());
\r
752 int size = rightType.getNominalSize();
\r
753 for(int i = 0; i < size; i++)
\r
755 Instruction *dot = emit(dpOpcode, result, 0, left, 0, right, i);
\r
756 dot->dst.mask = 1 << i;
\r
760 case EOpMatrixTimesVector:
\r
761 if(visit == PostVisit)
\r
763 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
764 mul->src[1].swizzle = 0x00;
\r
766 int size = rightType.getNominalSize();
\r
767 for(int i = 1; i < size; i++)
\r
769 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, 0, left, i, right, 0, result);
\r
770 mad->src[1].swizzle = i * 0x55;
\r
774 case EOpMatrixTimesMatrix:
\r
775 if(visit == PostVisit)
\r
777 int dim = leftType.getNominalSize();
\r
779 int size = rightType.getNominalSize();
\r
780 for(int i = 0; i < size; i++)
\r
782 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, left, 0, right, i);
\r
783 mul->src[1].swizzle = 0x00;
\r
785 for(int j = 1; j < dim; j++)
\r
787 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, i, left, j, right, i, result, i);
\r
788 mad->src[1].swizzle = j * 0x55;
\r
794 if(trivial(right, 6))
\r
796 if(visit == PostVisit)
\r
798 emit(sw::Shader::OPCODE_OR, result, left, right);
\r
801 else // Short-circuit evaluation
\r
803 if(visit == InVisit)
\r
805 emit(sw::Shader::OPCODE_MOV, result, left);
\r
806 Instruction *ifnot = emit(sw::Shader::OPCODE_IF, 0, result);
\r
807 ifnot->src[0].modifier = sw::Shader::MODIFIER_NOT;
\r
809 else if(visit == PostVisit)
\r
811 emit(sw::Shader::OPCODE_MOV, result, right);
\r
812 emit(sw::Shader::OPCODE_ENDIF);
\r
816 case EOpLogicalXor: if(visit == PostVisit) emit(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
817 case EOpLogicalAnd:
\r
818 if(trivial(right, 6))
\r
820 if(visit == PostVisit)
\r
822 emit(sw::Shader::OPCODE_AND, result, left, right);
\r
825 else // Short-circuit evaluation
\r
827 if(visit == InVisit)
\r
829 emit(sw::Shader::OPCODE_MOV, result, left);
\r
830 emit(sw::Shader::OPCODE_IF, 0, result);
\r
832 else if(visit == PostVisit)
\r
834 emit(sw::Shader::OPCODE_MOV, result, right);
\r
835 emit(sw::Shader::OPCODE_ENDIF);
\r
839 default: UNREACHABLE(node->getOp());
\r
845 void OutputASM::emitDeterminant(TIntermTyped *result, TIntermTyped *arg, int size, int col, int row, int outCol, int outRow)
\r
849 case 1: // Used for cofactor computation only
\r
851 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
852 bool isMov = (row == col);
\r
853 sw::Shader::Opcode op = isMov ? sw::Shader::OPCODE_MOV : sw::Shader::OPCODE_NEG;
\r
854 Instruction *mov = emit(op, result, outCol, arg, isMov ? 1 - row : row);
\r
855 mov->src[0].swizzle = 0x55 * (isMov ? 1 - col : col);
\r
856 mov->dst.mask = 1 << outRow;
\r
861 static const unsigned int swizzle[3] = { 0x99, 0x88, 0x44 }; // xy?? : yzyz, xzxz, xyxy
\r
863 bool isCofactor = (col >= 0) && (row >= 0);
\r
864 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
865 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
866 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
868 Instruction *det = emit(sw::Shader::OPCODE_DET2, result, outCol, arg, negate ? col1 : col0, arg, negate ? col0 : col1);
\r
869 det->src[0].swizzle = det->src[1].swizzle = swizzle[isCofactor ? row : 2];
\r
870 det->dst.mask = 1 << outRow;
\r
875 static const unsigned int swizzle[4] = { 0xF9, 0xF8, 0xF4, 0xE4 }; // xyz? : yzww, xzww, xyww, xyzw
\r
877 bool isCofactor = (col >= 0) && (row >= 0);
\r
878 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
879 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
880 int col2 = (isCofactor && (col <= 2)) ? 3 : 2;
\r
881 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
883 Instruction *det = emit(sw::Shader::OPCODE_DET3, result, outCol, arg, col0, arg, negate ? col2 : col1, arg, negate ? col1 : col2);
\r
884 det->src[0].swizzle = det->src[1].swizzle = det->src[2].swizzle = swizzle[isCofactor ? row : 3];
\r
885 det->dst.mask = 1 << outRow;
\r
890 Instruction *det = emit(sw::Shader::OPCODE_DET4, result, outCol, arg, 0, arg, 1, arg, 2, arg, 3);
\r
891 det->dst.mask = 1 << outRow;
\r
900 bool OutputASM::visitUnary(Visit visit, TIntermUnary *node)
\r
902 if(currentScope != emitScope)
\r
907 TIntermTyped *result = node;
\r
908 TIntermTyped *arg = node->getOperand();
\r
909 TBasicType basicType = arg->getType().getBasicType();
\r
917 if(basicType == EbtInt || basicType == EbtUInt)
\r
923 one_value.f = 1.0f;
\r
926 Constant one(one_value.f, one_value.f, one_value.f, one_value.f);
\r
927 Constant rad(1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f);
\r
928 Constant deg(5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f);
\r
930 switch(node->getOp())
\r
933 if(visit == PostVisit)
\r
935 sw::Shader::Opcode negOpcode = getOpcode(sw::Shader::OPCODE_NEG, arg);
\r
936 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
938 emit(negOpcode, result, index, arg, index);
\r
942 case EOpVectorLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
943 case EOpLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
944 case EOpPostIncrement:
\r
945 if(visit == PostVisit)
\r
949 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
950 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
952 emit(addOpcode, arg, index, arg, index, &one);
\r
955 assignLvalue(arg, arg);
\r
958 case EOpPostDecrement:
\r
959 if(visit == PostVisit)
\r
963 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
964 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
966 emit(subOpcode, arg, index, arg, index, &one);
\r
969 assignLvalue(arg, arg);
\r
972 case EOpPreIncrement:
\r
973 if(visit == PostVisit)
\r
975 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
976 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
978 emit(addOpcode, result, index, arg, index, &one);
\r
981 assignLvalue(arg, result);
\r
984 case EOpPreDecrement:
\r
985 if(visit == PostVisit)
\r
987 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
988 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
990 emit(subOpcode, result, index, arg, index, &one);
\r
993 assignLvalue(arg, result);
\r
996 case EOpRadians: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, &rad); break;
\r
997 case EOpDegrees: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, °); break;
\r
998 case EOpSin: if(visit == PostVisit) emit(sw::Shader::OPCODE_SIN, result, arg); break;
\r
999 case EOpCos: if(visit == PostVisit) emit(sw::Shader::OPCODE_COS, result, arg); break;
\r
1000 case EOpTan: if(visit == PostVisit) emit(sw::Shader::OPCODE_TAN, result, arg); break;
\r
1001 case EOpAsin: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASIN, result, arg); break;
\r
1002 case EOpAcos: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOS, result, arg); break;
\r
1003 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN, result, arg); break;
\r
1004 case EOpSinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_SINH, result, arg); break;
\r
1005 case EOpCosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_COSH, result, arg); break;
\r
1006 case EOpTanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_TANH, result, arg); break;
\r
1007 case EOpAsinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASINH, result, arg); break;
\r
1008 case EOpAcosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOSH, result, arg); break;
\r
1009 case EOpAtanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATANH, result, arg); break;
\r
1010 case EOpExp: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP, result, arg); break;
\r
1011 case EOpLog: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG, result, arg); break;
\r
1012 case EOpExp2: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP2, result, arg); break;
\r
1013 case EOpLog2: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG2, result, arg); break;
\r
1014 case EOpSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_SQRT, result, arg); break;
\r
1015 case EOpInverseSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_RSQ, result, arg); break;
\r
1016 case EOpAbs: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_ABS, result), result, arg); break;
\r
1017 case EOpSign: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_SGN, result), result, arg); break;
\r
1018 case EOpFloor: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOOR, result, arg); break;
\r
1019 case EOpTrunc: if(visit == PostVisit) emit(sw::Shader::OPCODE_TRUNC, result, arg); break;
\r
1020 case EOpRound: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUND, result, arg); break;
\r
1021 case EOpRoundEven: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUNDEVEN, result, arg); break;
\r
1022 case EOpCeil: if(visit == PostVisit) emit(sw::Shader::OPCODE_CEIL, result, arg, result); break;
\r
1023 case EOpFract: if(visit == PostVisit) emit(sw::Shader::OPCODE_FRC, result, arg); break;
\r
1024 case EOpIsNan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISNAN, result, arg); break;
\r
1025 case EOpIsInf: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISINF, result, arg); break;
\r
1026 case EOpLength: if(visit == PostVisit) emit(sw::Shader::OPCODE_LEN(dim(arg)), result, arg); break;
\r
1027 case EOpNormalize: if(visit == PostVisit) emit(sw::Shader::OPCODE_NRM(dim(arg)), result, arg); break;
\r
1028 case EOpDFdx: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDX, result, arg); break;
\r
1029 case EOpDFdy: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDY, result, arg); break;
\r
1030 case EOpFwidth: if(visit == PostVisit) emit(sw::Shader::OPCODE_FWIDTH, result, arg); break;
\r
1031 case EOpAny: if(visit == PostVisit) emit(sw::Shader::OPCODE_ANY, result, arg); break;
\r
1032 case EOpAll: if(visit == PostVisit) emit(sw::Shader::OPCODE_ALL, result, arg); break;
\r
1033 case EOpFloatBitsToInt: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOINT, result, arg); break;
\r
1034 case EOpFloatBitsToUint: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOUINT, result, arg); break;
\r
1035 case EOpIntBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_INTBITSTOFLOAT, result, arg); break;
\r
1036 case EOpUintBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_UINTBITSTOFLOAT, result, arg); break;
\r
1037 case EOpPackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKSNORM2x16, result, arg); break;
\r
1038 case EOpPackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKUNORM2x16, result, arg); break;
\r
1039 case EOpPackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKHALF2x16, result, arg); break;
\r
1040 case EOpUnpackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKSNORM2x16, result, arg); break;
\r
1041 case EOpUnpackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKUNORM2x16, result, arg); break;
\r
1042 case EOpUnpackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKHALF2x16, result, arg); break;
\r
1043 case EOpTranspose:
\r
1044 if(visit == PostVisit)
\r
1046 int numCols = arg->getNominalSize();
\r
1047 int numRows = arg->getSecondarySize();
\r
1048 for(int i = 0; i < numCols; ++i)
\r
1050 for(int j = 0; j < numRows; ++j)
\r
1052 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, j, arg, i);
\r
1053 mov->src[0].swizzle = 0x55 * j;
\r
1054 mov->dst.mask = 1 << i;
\r
1059 case EOpDeterminant:
\r
1060 if(visit == PostVisit)
\r
1062 int size = arg->getNominalSize();
\r
1063 ASSERT(size == arg->getSecondarySize());
\r
1065 emitDeterminant(result, arg, size);
\r
1069 if(visit == PostVisit)
\r
1071 int size = arg->getNominalSize();
\r
1072 ASSERT(size == arg->getSecondarySize());
\r
1074 // Compute transposed matrix of cofactors
\r
1075 for(int i = 0; i < size; ++i)
\r
1077 for(int j = 0; j < size; ++j)
\r
1079 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
1080 // For a 3x3 or 4x4 matrix, the cofactor is a transposed determinant
\r
1081 emitDeterminant(result, arg, size - 1, j, i, i, j);
\r
1085 // Compute 1 / determinant
\r
1086 Temporary invDet(this);
\r
1087 emitDeterminant(&invDet, arg, size);
\r
1088 Constant one(1.0f, 1.0f, 1.0f, 1.0f);
\r
1089 Instruction *div = emit(sw::Shader::OPCODE_DIV, &invDet, &one, &invDet);
\r
1090 div->src[1].swizzle = 0x00; // xxxx
\r
1092 // Divide transposed matrix of cofactors by determinant
\r
1093 for(int i = 0; i < size; ++i)
\r
1095 emit(sw::Shader::OPCODE_MUL, result, i, result, i, &invDet);
\r
1099 default: UNREACHABLE(node->getOp());
\r
1105 bool OutputASM::visitAggregate(Visit visit, TIntermAggregate *node)
\r
1107 if(currentScope != emitScope && node->getOp() != EOpFunction && node->getOp() != EOpSequence)
\r
1112 Constant zero(0.0f, 0.0f, 0.0f, 0.0f);
\r
1114 TIntermTyped *result = node;
\r
1115 const TType &resultType = node->getType();
\r
1116 TIntermSequence &arg = node->getSequence();
\r
1117 int argumentCount = arg.size();
\r
1119 switch(node->getOp())
\r
1121 case EOpSequence: break;
\r
1122 case EOpDeclaration: break;
\r
1123 case EOpInvariantDeclaration: break;
\r
1124 case EOpPrototype: break;
\r
1126 if(visit == PostVisit)
\r
1128 copy(result, arg[1]);
\r
1132 if(visit == PreVisit)
\r
1134 const TString &name = node->getName();
\r
1136 if(emitScope == FUNCTION)
\r
1138 if(functionArray.size() > 1) // No need for a label when there's only main()
\r
1140 Instruction *label = emit(sw::Shader::OPCODE_LABEL);
\r
1141 label->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1143 const Function *function = findFunction(name);
\r
1144 ASSERT(function); // Should have been added during global pass
\r
1145 label->dst.index = function->label;
\r
1146 currentFunction = function->label;
\r
1149 else if(emitScope == GLOBAL)
\r
1151 if(name != "main(")
\r
1153 TIntermSequence &arguments = node->getSequence()[0]->getAsAggregate()->getSequence();
\r
1154 functionArray.push_back(Function(functionArray.size(), name, &arguments, node));
\r
1157 else UNREACHABLE(emitScope);
\r
1159 currentScope = FUNCTION;
\r
1161 else if(visit == PostVisit)
\r
1163 if(emitScope == FUNCTION)
\r
1165 if(functionArray.size() > 1) // No need to return when there's only main()
\r
1167 emit(sw::Shader::OPCODE_RET);
\r
1171 currentScope = GLOBAL;
\r
1174 case EOpFunctionCall:
\r
1175 if(visit == PostVisit)
\r
1177 if(node->isUserDefined())
\r
1179 const TString &name = node->getName();
\r
1180 const Function *function = findFunction(name);
\r
1184 mContext.error(node->getLine(), "function definition not found", name.c_str());
\r
1188 TIntermSequence &arguments = *function->arg;
\r
1190 for(int i = 0; i < argumentCount; i++)
\r
1192 TIntermTyped *in = arguments[i]->getAsTyped();
\r
1194 if(in->getQualifier() == EvqIn ||
\r
1195 in->getQualifier() == EvqInOut ||
\r
1196 in->getQualifier() == EvqConstReadOnly)
\r
1202 Instruction *call = emit(sw::Shader::OPCODE_CALL);
\r
1203 call->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1204 call->dst.index = function->label;
\r
1206 if(function->ret && function->ret->getType().getBasicType() != EbtVoid)
\r
1208 copy(result, function->ret);
\r
1211 for(int i = 0; i < argumentCount; i++)
\r
1213 TIntermTyped *argument = arguments[i]->getAsTyped();
\r
1214 TIntermTyped *out = arg[i]->getAsTyped();
\r
1216 if(argument->getQualifier() == EvqOut ||
\r
1217 argument->getQualifier() == EvqInOut)
\r
1219 copy(out, argument);
\r
1225 const TextureFunction textureFunction(node->getName());
\r
1226 switch(textureFunction.method)
\r
1228 case TextureFunction::IMPLICIT:
\r
1230 TIntermTyped *t = arg[1]->getAsTyped();
\r
1232 TIntermNode* offset = textureFunction.offset ? arg[2] : 0;
\r
1234 if(argumentCount == 2 || (textureFunction.offset && argumentCount == 3))
\r
1236 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1237 result, arg[1], arg[0], offset);
\r
1238 if(textureFunction.proj)
\r
1240 tex->project = true;
\r
1242 switch(t->getNominalSize())
\r
1244 case 2: tex->src[0].swizzle = 0x54; break; // xyyy
\r
1245 case 3: tex->src[0].swizzle = 0xA4; break; // xyzz
\r
1246 case 4: break; // xyzw
\r
1248 UNREACHABLE(t->getNominalSize());
\r
1253 else if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4)) // bias
\r
1255 Temporary proj(this);
\r
1256 if(textureFunction.proj)
\r
1258 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1259 div->dst.mask = 0x3;
\r
1261 switch(t->getNominalSize())
\r
1266 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1269 UNREACHABLE(t->getNominalSize());
\r
1275 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1278 Instruction *bias = emit(sw::Shader::OPCODE_MOV, &proj, arg[textureFunction.offset ? 3 : 2]);
\r
1279 bias->dst.mask = 0x8;
\r
1281 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1282 result, &proj, arg[0], offset); // FIXME: Implement an efficient TEXLDB instruction
\r
1285 else UNREACHABLE(argumentCount);
\r
1288 case TextureFunction::LOD:
\r
1290 TIntermTyped *t = arg[1]->getAsTyped();
\r
1291 Temporary proj(this);
\r
1293 if(textureFunction.proj)
\r
1295 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1296 div->dst.mask = 0x3;
\r
1298 switch(t->getNominalSize())
\r
1303 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1306 UNREACHABLE(t->getNominalSize());
\r
1312 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1315 Instruction *lod = emit(sw::Shader::OPCODE_MOV, &proj, arg[2]);
\r
1316 lod->dst.mask = 0x8;
\r
1318 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXLDLOFFSET : sw::Shader::OPCODE_TEXLDL,
\r
1319 result, &proj, arg[0], textureFunction.offset ? arg[3] : 0);
\r
1322 case TextureFunction::FETCH:
\r
1324 TIntermTyped *t = arg[1]->getAsTyped();
\r
1326 if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4))
\r
1328 TIntermNode* offset = textureFunction.offset ? arg[3] : 0;
\r
1330 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXELFETCHOFFSET : sw::Shader::OPCODE_TEXELFETCH,
\r
1331 result, arg[1], arg[0], arg[2], offset);
\r
1333 else UNREACHABLE(argumentCount);
\r
1336 case TextureFunction::GRAD:
\r
1338 TIntermTyped *t = arg[1]->getAsTyped();
\r
1340 if(argumentCount == 4 || (textureFunction.offset && argumentCount == 5))
\r
1342 Temporary uvwb(this);
\r
1344 if(textureFunction.proj)
\r
1346 Instruction *div = emit(sw::Shader::OPCODE_DIV, &uvwb, arg[1], arg[1]);
\r
1347 div->dst.mask = 0x3;
\r
1349 switch(t->getNominalSize())
\r
1354 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1357 UNREACHABLE(t->getNominalSize());
\r
1363 emit(sw::Shader::OPCODE_MOV, &uvwb, arg[1]);
\r
1366 TIntermNode* offset = textureFunction.offset ? arg[4] : 0;
\r
1368 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXGRADOFFSET : sw::Shader::OPCODE_TEXGRAD,
\r
1369 result, &uvwb, arg[0], arg[2], arg[3], offset);
\r
1371 else UNREACHABLE(argumentCount);
\r
1374 case TextureFunction::SIZE:
\r
1375 emit(sw::Shader::OPCODE_TEXSIZE, result, arg[1], arg[0]);
\r
1378 UNREACHABLE(textureFunction.method);
\r
1383 case EOpParameters:
\r
1385 case EOpConstructFloat:
\r
1386 case EOpConstructVec2:
\r
1387 case EOpConstructVec3:
\r
1388 case EOpConstructVec4:
\r
1389 case EOpConstructBool:
\r
1390 case EOpConstructBVec2:
\r
1391 case EOpConstructBVec3:
\r
1392 case EOpConstructBVec4:
\r
1393 case EOpConstructInt:
\r
1394 case EOpConstructIVec2:
\r
1395 case EOpConstructIVec3:
\r
1396 case EOpConstructIVec4:
\r
1397 case EOpConstructUInt:
\r
1398 case EOpConstructUVec2:
\r
1399 case EOpConstructUVec3:
\r
1400 case EOpConstructUVec4:
\r
1401 if(visit == PostVisit)
\r
1403 int component = 0;
\r
1405 for(int i = 0; i < argumentCount; i++)
\r
1407 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1408 int size = argi->getNominalSize();
\r
1410 if(!argi->isMatrix())
\r
1412 Instruction *mov = emitCast(result, argi);
\r
1413 mov->dst.mask = (0xF << component) & 0xF;
\r
1414 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1416 component += size;
\r
1422 while(component < resultType.getNominalSize())
\r
1424 Instruction *mov = emitCast(result, 0, argi, column);
\r
1425 mov->dst.mask = (0xF << component) & 0xF;
\r
1426 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1429 component += size;
\r
1435 case EOpConstructMat2:
\r
1436 case EOpConstructMat2x3:
\r
1437 case EOpConstructMat2x4:
\r
1438 case EOpConstructMat3x2:
\r
1439 case EOpConstructMat3:
\r
1440 case EOpConstructMat3x4:
\r
1441 case EOpConstructMat4x2:
\r
1442 case EOpConstructMat4x3:
\r
1443 case EOpConstructMat4:
\r
1444 if(visit == PostVisit)
\r
1446 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1447 const int outCols = result->getNominalSize();
\r
1448 const int outRows = result->getSecondarySize();
\r
1450 if(arg0->isScalar() && arg.size() == 1) // Construct scale matrix
\r
1452 for(int i = 0; i < outCols; i++)
\r
1454 Instruction *init = emit(sw::Shader::OPCODE_MOV, result, i, &zero);
\r
1455 Instruction *mov = emitCast(result, i, arg0, 0);
\r
1456 mov->dst.mask = 1 << i;
\r
1457 ASSERT(mov->src[0].swizzle == 0x00);
\r
1460 else if(arg0->isMatrix())
\r
1462 const int inCols = arg0->getNominalSize();
\r
1463 const int inRows = arg0->getSecondarySize();
\r
1465 for(int i = 0; i < outCols; i++)
\r
1467 if(i >= inCols || outRows > inRows)
\r
1469 // Initialize to identity matrix
\r
1470 Constant col((i == 0 ? 1.0f : 0.0f), (i == 1 ? 1.0f : 0.0f), (i == 2 ? 1.0f : 0.0f), (i == 3 ? 1.0f : 0.0f));
\r
1471 Instruction *mov = emitCast(result, i, &col, 0);
\r
1476 Instruction *mov = emitCast(result, i, arg0, i);
\r
1477 mov->dst.mask = 0xF >> (4 - inRows);
\r
1486 for(int i = 0; i < argumentCount; i++)
\r
1488 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1489 int size = argi->getNominalSize();
\r
1492 while(element < size)
\r
1494 Instruction *mov = emitCast(result, column, argi, 0);
\r
1495 mov->dst.mask = (0xF << row) & 0xF;
\r
1496 mov->src[0].swizzle = (readSwizzle(argi, size) << (row * 2)) + 0x55 * element;
\r
1498 int end = row + size - element;
\r
1499 column = end >= outRows ? column + 1 : column;
\r
1500 element = element + outRows - row;
\r
1501 row = end >= outRows ? 0 : end;
\r
1507 case EOpConstructStruct:
\r
1508 if(visit == PostVisit)
\r
1511 for(int i = 0; i < argumentCount; i++)
\r
1513 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1514 int size = argi->totalRegisterCount();
\r
1516 for(int index = 0; index < size; index++)
\r
1518 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, index + offset, argi, index);
\r
1519 mov->dst.mask = writeMask(result, offset + index);
\r
1526 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, arg[0], arg[1]); break;
\r
1527 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, arg[0], arg[1]); break;
\r
1528 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, arg[0], arg[1]); break;
\r
1529 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, arg[0], arg[1]); break;
\r
1530 case EOpVectorEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_EQ, result, arg[0], arg[1]); break;
\r
1531 case EOpVectorNotEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_NE, result, arg[0], arg[1]); break;
\r
1532 case EOpMod: if(visit == PostVisit) emit(sw::Shader::OPCODE_MOD, result, arg[0], arg[1]); break;
\r
1533 case EOpPow: if(visit == PostVisit) emit(sw::Shader::OPCODE_POW, result, arg[0], arg[1]); break;
\r
1534 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN2, result, arg[0], arg[1]); break;
\r
1535 case EOpMin: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, arg[0], arg[1]); break;
\r
1536 case EOpMax: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]); break;
\r
1538 if(visit == PostVisit)
\r
1540 emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]);
\r
1541 emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, result, arg[2]);
\r
1544 case EOpMix: if(visit == PostVisit) emit(sw::Shader::OPCODE_LRP, result, arg[2], arg[1], arg[0]); break;
\r
1545 case EOpStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_STEP, result, arg[0], arg[1]); break;
\r
1546 case EOpSmoothStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_SMOOTH, result, arg[0], arg[1], arg[2]); break;
\r
1547 case EOpDistance: if(visit == PostVisit) emit(sw::Shader::OPCODE_DIST(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1548 case EOpDot: if(visit == PostVisit) emit(sw::Shader::OPCODE_DP(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1549 case EOpCross: if(visit == PostVisit) emit(sw::Shader::OPCODE_CRS, result, arg[0], arg[1]); break;
\r
1550 case EOpFaceForward: if(visit == PostVisit) emit(sw::Shader::OPCODE_FORWARD(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1551 case EOpReflect: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFLECT(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1552 case EOpRefract: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFRACT(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1554 if(visit == PostVisit)
\r
1556 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1557 TIntermTyped *arg1 = arg[1]->getAsTyped();
\r
1558 ASSERT((arg0->getNominalSize() == arg1->getNominalSize()) && (arg0->getSecondarySize() == arg1->getSecondarySize()));
\r
1560 int size = arg0->getNominalSize();
\r
1561 for(int i = 0; i < size; i++)
\r
1563 emit(sw::Shader::OPCODE_MUL, result, i, arg[0], i, arg[1], i);
\r
1567 case EOpOuterProduct:
\r
1568 if(visit == PostVisit)
\r
1570 for(int i = 0; i < dim(arg[1]); i++)
\r
1572 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, arg[0], 0, arg[1]);
\r
1573 mul->src[1].swizzle = 0x55 * i;
\r
1577 default: UNREACHABLE(node->getOp());
\r
1583 bool OutputASM::visitSelection(Visit visit, TIntermSelection *node)
\r
1585 if(currentScope != emitScope)
\r
1590 TIntermTyped *condition = node->getCondition();
\r
1591 TIntermNode *trueBlock = node->getTrueBlock();
\r
1592 TIntermNode *falseBlock = node->getFalseBlock();
\r
1593 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
1595 condition->traverse(this);
\r
1597 if(node->usesTernaryOperator())
\r
1599 if(constantCondition)
\r
1601 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1605 trueBlock->traverse(this);
\r
1606 copy(node, trueBlock);
\r
1610 falseBlock->traverse(this);
\r
1611 copy(node, falseBlock);
\r
1614 else if(trivial(node, 6)) // Fast to compute both potential results and no side effects
\r
1616 trueBlock->traverse(this);
\r
1617 falseBlock->traverse(this);
\r
1618 emit(sw::Shader::OPCODE_SELECT, node, condition, trueBlock, falseBlock);
\r
1622 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1626 trueBlock->traverse(this);
\r
1627 copy(node, trueBlock);
\r
1632 emit(sw::Shader::OPCODE_ELSE);
\r
1633 falseBlock->traverse(this);
\r
1634 copy(node, falseBlock);
\r
1637 emit(sw::Shader::OPCODE_ENDIF);
\r
1640 else // if/else statement
\r
1642 if(constantCondition)
\r
1644 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1650 trueBlock->traverse(this);
\r
1657 falseBlock->traverse(this);
\r
1663 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1667 trueBlock->traverse(this);
\r
1672 emit(sw::Shader::OPCODE_ELSE);
\r
1673 falseBlock->traverse(this);
\r
1676 emit(sw::Shader::OPCODE_ENDIF);
\r
1683 bool OutputASM::visitLoop(Visit visit, TIntermLoop *node)
\r
1685 if(currentScope != emitScope)
\r
1690 unsigned int iterations = loopCount(node);
\r
1692 if(iterations == 0)
\r
1697 bool unroll = (iterations <= 4);
\r
1701 DetectLoopDiscontinuity detectLoopDiscontinuity;
\r
1702 unroll = !detectLoopDiscontinuity.traverse(node);
\r
1705 TIntermNode *init = node->getInit();
\r
1706 TIntermTyped *condition = node->getCondition();
\r
1707 TIntermTyped *expression = node->getExpression();
\r
1708 TIntermNode *body = node->getBody();
\r
1709 Constant True(true);
\r
1711 if(node->getType() == ELoopDoWhile)
\r
1713 Temporary iterate(this);
\r
1714 emit(sw::Shader::OPCODE_MOV, &iterate, &True);
\r
1716 emit(sw::Shader::OPCODE_WHILE, 0, &iterate); // FIXME: Implement real do-while
\r
1720 body->traverse(this);
\r
1723 emit(sw::Shader::OPCODE_TEST);
\r
1725 condition->traverse(this);
\r
1726 emit(sw::Shader::OPCODE_MOV, &iterate, condition);
\r
1728 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1734 init->traverse(this);
\r
1739 for(unsigned int i = 0; i < iterations; i++)
\r
1741 // condition->traverse(this); // Condition could contain statements, but not in an unrollable loop
\r
1745 body->traverse(this);
\r
1750 expression->traverse(this);
\r
1758 condition->traverse(this);
\r
1762 condition = &True;
\r
1765 emit(sw::Shader::OPCODE_WHILE, 0, condition);
\r
1769 body->traverse(this);
\r
1772 emit(sw::Shader::OPCODE_TEST);
\r
1776 expression->traverse(this);
\r
1781 condition->traverse(this);
\r
1784 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1791 bool OutputASM::visitBranch(Visit visit, TIntermBranch *node)
\r
1793 if(currentScope != emitScope)
\r
1798 switch(node->getFlowOp())
\r
1800 case EOpKill: if(visit == PostVisit) emit(sw::Shader::OPCODE_DISCARD); break;
\r
1801 case EOpBreak: if(visit == PostVisit) emit(sw::Shader::OPCODE_BREAK); break;
\r
1802 case EOpContinue: if(visit == PostVisit) emit(sw::Shader::OPCODE_CONTINUE); break;
\r
1804 if(visit == PostVisit)
\r
1806 TIntermTyped *value = node->getExpression();
\r
1810 copy(functionArray[currentFunction].ret, value);
\r
1813 emit(sw::Shader::OPCODE_LEAVE);
\r
1816 default: UNREACHABLE(node->getFlowOp());
\r
1822 bool OutputASM::isSamplerRegister(TIntermTyped *operand)
\r
1824 return operand && isSamplerRegister(operand->getType());
\r
1827 bool OutputASM::isSamplerRegister(const TType &type)
\r
1829 // A sampler register's qualifiers can be:
\r
1830 // - EvqUniform: The sampler uniform is used as is in the code (default case).
\r
1831 // - EvqTemporary: The sampler is indexed. It's still a sampler register.
\r
1832 // - EvqIn (and other similar types): The sampler has been passed as a function argument. At this point,
\r
1833 // the sampler has been copied and is no longer a sampler register.
\r
1834 return IsSampler(type.getBasicType()) && (type.getQualifier() == EvqUniform || type.getQualifier() == EvqTemporary);
\r
1837 Instruction *OutputASM::emit(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2, TIntermNode *src3, TIntermNode *src4)
\r
1839 return emit(op, dst, 0, src0, 0, src1, 0, src2, 0, src3, 0, src4, 0);
\r
1842 Instruction *OutputASM::emit(sw::Shader::Opcode op, TIntermTyped *dst, int dstIndex, TIntermNode *src0, int index0, TIntermNode *src1, int index1,
\r
1843 TIntermNode *src2, int index2, TIntermNode *src3, int index3, TIntermNode *src4, int index4)
\r
1845 if(isSamplerRegister(dst))
\r
1847 op = sw::Shader::OPCODE_NULL; // Can't assign to a sampler, but this is hit when indexing sampler arrays
\r
1850 Instruction *instruction = new Instruction(op);
\r
1854 instruction->dst.type = registerType(dst);
\r
1855 instruction->dst.index = registerIndex(dst) + dstIndex;
\r
1856 instruction->dst.mask = writeMask(dst);
\r
1857 instruction->dst.integer = (dst->getBasicType() == EbtInt);
\r
1860 argument(instruction->src[0], src0, index0);
\r
1861 argument(instruction->src[1], src1, index1);
\r
1862 argument(instruction->src[2], src2, index2);
\r
1863 argument(instruction->src[3], src3, index3);
\r
1864 argument(instruction->src[4], src4, index4);
\r
1866 shader->append(instruction);
\r
1868 return instruction;
\r
1871 Instruction *OutputASM::emitCast(TIntermTyped *dst, TIntermTyped *src)
\r
1873 return emitCast(dst, 0, src, 0);
\r
1876 Instruction *OutputASM::emitCast(TIntermTyped *dst, int dstIndex, TIntermTyped *src, int srcIndex)
\r
1878 switch(src->getBasicType())
\r
1881 switch(dst->getBasicType())
\r
1883 case EbtInt: return emit(sw::Shader::OPCODE_B2I, dst, dstIndex, src, srcIndex);
\r
1884 case EbtUInt: return emit(sw::Shader::OPCODE_B2I, dst, dstIndex, src, srcIndex);
\r
1885 case EbtFloat: return emit(sw::Shader::OPCODE_B2F, dst, dstIndex, src, srcIndex);
\r
1890 switch(dst->getBasicType())
\r
1892 case EbtBool: return emit(sw::Shader::OPCODE_I2B, dst, dstIndex, src, srcIndex);
\r
1893 case EbtFloat: return emit(sw::Shader::OPCODE_I2F, dst, dstIndex, src, srcIndex);
\r
1898 switch(dst->getBasicType())
\r
1900 case EbtBool: return emit(sw::Shader::OPCODE_I2B, dst, dstIndex, src, srcIndex);
\r
1901 case EbtFloat: return emit(sw::Shader::OPCODE_U2F, dst, dstIndex, src, srcIndex);
\r
1906 switch(dst->getBasicType())
\r
1908 case EbtBool: return emit(sw::Shader::OPCODE_F2B, dst, dstIndex, src, srcIndex);
\r
1909 case EbtInt: return emit(sw::Shader::OPCODE_F2I, dst, dstIndex, src, srcIndex);
\r
1910 case EbtUInt: return emit(sw::Shader::OPCODE_F2U, dst, dstIndex, src, srcIndex);
\r
1918 ASSERT(src->getBasicType() == dst->getBasicType());
\r
1920 return emit(sw::Shader::OPCODE_MOV, dst, dstIndex, src, srcIndex);
\r
1923 void OutputASM::emitBinary(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2)
\r
1925 for(int index = 0; index < dst->elementRegisterCount(); index++)
\r
1927 emit(op, dst, index, src0, index, src1, index, src2, index);
\r
1931 void OutputASM::emitAssign(sw::Shader::Opcode op, TIntermTyped *result, TIntermTyped *lhs, TIntermTyped *src0, TIntermTyped *src1)
\r
1933 emitBinary(op, result, src0, src1);
\r
1934 assignLvalue(lhs, result);
\r
1937 void OutputASM::emitCmp(sw::Shader::Control cmpOp, TIntermTyped *dst, TIntermNode *left, TIntermNode *right, int index)
\r
1939 sw::Shader::Opcode opcode;
\r
1940 switch(left->getAsTyped()->getBasicType())
\r
1944 opcode = sw::Shader::OPCODE_ICMP;
\r
1947 opcode = sw::Shader::OPCODE_UCMP;
\r
1950 opcode = sw::Shader::OPCODE_CMP;
\r
1954 Instruction *cmp = emit(opcode, dst, 0, left, index, right, index);
\r
1955 cmp->control = cmpOp;
\r
1958 int componentCount(const TType &type, int registers)
\r
1960 if(registers == 0)
\r
1965 if(type.isArray() && registers >= type.elementRegisterCount())
\r
1967 int index = registers / type.elementRegisterCount();
\r
1968 registers -= index * type.elementRegisterCount();
\r
1969 return index * type.getElementSize() + componentCount(type, registers);
\r
1972 if(type.isStruct() || type.isInterfaceBlock())
\r
1974 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
1977 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
1979 const TType &fieldType = *((*field)->type());
\r
1981 if(fieldType.totalRegisterCount() <= registers)
\r
1983 registers -= fieldType.totalRegisterCount();
\r
1984 elements += fieldType.getObjectSize();
\r
1986 else // Register within this field
\r
1988 return elements + componentCount(fieldType, registers);
\r
1992 else if(type.isMatrix())
\r
1994 return registers * type.registerSize();
\r
2001 int registerSize(const TType &type, int registers)
\r
2003 if(registers == 0)
\r
2005 if(type.isStruct())
\r
2007 return registerSize(*((*(type.getStruct()->fields().begin()))->type()), 0);
\r
2009 else if(type.isInterfaceBlock())
\r
2011 return registerSize(*((*(type.getInterfaceBlock()->fields().begin()))->type()), 0);
\r
2014 return type.registerSize();
\r
2017 if(type.isArray() && registers >= type.elementRegisterCount())
\r
2019 int index = registers / type.elementRegisterCount();
\r
2020 registers -= index * type.elementRegisterCount();
\r
2021 return registerSize(type, registers);
\r
2024 if(type.isStruct() || type.isInterfaceBlock())
\r
2026 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
2029 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
2031 const TType &fieldType = *((*field)->type());
\r
2033 if(fieldType.totalRegisterCount() <= registers)
\r
2035 registers -= fieldType.totalRegisterCount();
\r
2036 elements += fieldType.getObjectSize();
\r
2038 else // Register within this field
\r
2040 return registerSize(fieldType, registers);
\r
2044 else if(type.isMatrix())
\r
2046 return registerSize(type, 0);
\r
2053 void OutputASM::argument(sw::Shader::SourceParameter ¶meter, TIntermNode *argument, int index)
\r
2057 TIntermTyped *arg = argument->getAsTyped();
\r
2058 const TType &type = arg->getType();
\r
2059 index = (index >= arg->totalRegisterCount()) ? arg->totalRegisterCount() - 1 : index;
\r
2061 int size = registerSize(type, index);
\r
2063 parameter.type = registerType(arg);
\r
2065 if(arg->getQualifier() == EvqConstExpr)
\r
2067 int component = componentCount(type, index);
\r
2068 ConstantUnion *constants = arg->getAsConstantUnion()->getUnionArrayPointer();
\r
2070 for(int i = 0; i < 4; i++)
\r
2072 if(size == 1) // Replicate
\r
2074 parameter.value[i] = constants[component + 0].getAsFloat();
\r
2078 parameter.value[i] = constants[component + i].getAsFloat();
\r
2082 parameter.value[i] = 0.0f;
\r
2088 parameter.index = registerIndex(arg) + index;
\r
2090 if(isSamplerRegister(arg))
\r
2092 TIntermBinary *binary = argument->getAsBinaryNode();
\r
2096 TIntermTyped *left = binary->getLeft();
\r
2097 TIntermTyped *right = binary->getRight();
\r
2099 switch(binary->getOp())
\r
2101 case EOpIndexDirect:
\r
2102 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2104 case EOpIndexIndirect:
\r
2105 if(left->getArraySize() > 1)
\r
2107 parameter.rel.type = registerType(binary->getRight());
\r
2108 parameter.rel.index = registerIndex(binary->getRight());
\r
2109 parameter.rel.scale = 1;
\r
2110 parameter.rel.deterministic = true;
\r
2113 case EOpIndexDirectStruct:
\r
2114 case EOpIndexDirectInterfaceBlock:
\r
2115 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2118 UNREACHABLE(binary->getOp());
\r
2124 if(!IsSampler(arg->getBasicType()))
\r
2126 parameter.swizzle = readSwizzle(arg, size);
\r
2131 void OutputASM::copy(TIntermTyped *dst, TIntermNode *src, int offset)
\r
2133 for(int index = 0; index < dst->totalRegisterCount(); index++)
\r
2135 Instruction *mov = emit(sw::Shader::OPCODE_MOV, dst, index, src, offset + index);
\r
2136 mov->dst.mask = writeMask(dst, index);
\r
2140 int swizzleElement(int swizzle, int index)
\r
2142 return (swizzle >> (index * 2)) & 0x03;
\r
2145 int swizzleSwizzle(int leftSwizzle, int rightSwizzle)
\r
2147 return (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 0)) << 0) |
\r
2148 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 1)) << 2) |
\r
2149 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 2)) << 4) |
\r
2150 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 3)) << 6);
\r
2153 void OutputASM::assignLvalue(TIntermTyped *dst, TIntermTyped *src)
\r
2156 ((src->isVector() && (!dst->isVector() || (dst->getNominalSize() != dst->getNominalSize()))) ||
\r
2157 (src->isMatrix() && (!dst->isMatrix() || (src->getNominalSize() != dst->getNominalSize()) || (src->getSecondarySize() != dst->getSecondarySize())))))
\r
2159 return mContext.error(src->getLine(), "Result type should match the l-value type in compound assignment", src->isVector() ? "vector" : "matrix");
\r
2162 TIntermBinary *binary = dst->getAsBinaryNode();
\r
2164 if(binary && binary->getOp() == EOpIndexIndirect && binary->getLeft()->isVector() && dst->isScalar())
\r
2166 Instruction *insert = new Instruction(sw::Shader::OPCODE_INSERT);
\r
2168 Temporary address(this);
\r
2169 lvalue(insert->dst, address, dst);
\r
2171 insert->src[0].type = insert->dst.type;
\r
2172 insert->src[0].index = insert->dst.index;
\r
2173 insert->src[0].rel = insert->dst.rel;
\r
2174 argument(insert->src[1], src);
\r
2175 argument(insert->src[2], binary->getRight());
\r
2177 shader->append(insert);
\r
2181 for(int offset = 0; offset < dst->totalRegisterCount(); offset++)
\r
2183 Instruction *mov = new Instruction(sw::Shader::OPCODE_MOV);
\r
2185 Temporary address(this);
\r
2186 int swizzle = lvalue(mov->dst, address, dst);
\r
2187 mov->dst.index += offset;
\r
2191 mov->dst.mask = writeMask(dst, offset);
\r
2194 argument(mov->src[0], src, offset);
\r
2195 mov->src[0].swizzle = swizzleSwizzle(mov->src[0].swizzle, swizzle);
\r
2197 shader->append(mov);
\r
2202 int OutputASM::lvalue(sw::Shader::DestinationParameter &dst, Temporary &address, TIntermTyped *node)
\r
2204 TIntermTyped *result = node;
\r
2205 TIntermBinary *binary = node->getAsBinaryNode();
\r
2206 TIntermSymbol *symbol = node->getAsSymbolNode();
\r
2210 TIntermTyped *left = binary->getLeft();
\r
2211 TIntermTyped *right = binary->getRight();
\r
2213 int leftSwizzle = lvalue(dst, address, left); // Resolve the l-value of the left side
\r
2215 switch(binary->getOp())
\r
2217 case EOpIndexDirect:
\r
2219 int rightIndex = right->getAsConstantUnion()->getIConst(0);
\r
2221 if(left->isRegister())
\r
2223 int leftMask = dst.mask;
\r
2226 while((leftMask & dst.mask) == 0)
\r
2228 dst.mask = dst.mask << 1;
\r
2231 int element = swizzleElement(leftSwizzle, rightIndex);
\r
2232 dst.mask = 1 << element;
\r
2236 else if(left->isArray() || left->isMatrix())
\r
2238 dst.index += rightIndex * result->totalRegisterCount();
\r
2241 else UNREACHABLE(0);
\r
2244 case EOpIndexIndirect:
\r
2246 if(left->isRegister())
\r
2248 // Requires INSERT instruction (handled by calling function)
\r
2250 else if(left->isArray() || left->isMatrix())
\r
2252 int scale = result->totalRegisterCount();
\r
2254 if(dst.rel.type == sw::Shader::PARAMETER_VOID) // Use the index register as the relative address directly
\r
2256 if(left->totalRegisterCount() > 1)
\r
2258 sw::Shader::SourceParameter relativeRegister;
\r
2259 argument(relativeRegister, right);
\r
2261 dst.rel.index = relativeRegister.index;
\r
2262 dst.rel.type = relativeRegister.type;
\r
2263 dst.rel.scale = scale;
\r
2264 dst.rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
2267 else if(dst.rel.index != registerIndex(&address)) // Move the previous index register to the address register
\r
2271 Constant oldScale((int)dst.rel.scale);
\r
2272 Instruction *mad = emit(sw::Shader::OPCODE_IMAD, &address, &address, &oldScale, right);
\r
2273 mad->src[0].index = dst.rel.index;
\r
2274 mad->src[0].type = dst.rel.type;
\r
2278 Constant oldScale((int)dst.rel.scale);
\r
2279 Instruction *mul = emit(sw::Shader::OPCODE_IMUL, &address, &address, &oldScale);
\r
2280 mul->src[0].index = dst.rel.index;
\r
2281 mul->src[0].type = dst.rel.type;
\r
2283 Constant newScale(scale);
\r
2284 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2287 dst.rel.type = sw::Shader::PARAMETER_TEMP;
\r
2288 dst.rel.index = registerIndex(&address);
\r
2289 dst.rel.scale = 1;
\r
2291 else // Just add the new index to the address register
\r
2295 emit(sw::Shader::OPCODE_IADD, &address, &address, right);
\r
2299 Constant newScale(scale);
\r
2300 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2304 else UNREACHABLE(0);
\r
2307 case EOpIndexDirectStruct:
\r
2308 case EOpIndexDirectInterfaceBlock:
\r
2310 const TFieldList& fields = (binary->getOp() == EOpIndexDirectStruct) ?
\r
2311 left->getType().getStruct()->fields() :
\r
2312 left->getType().getInterfaceBlock()->fields();
\r
2313 int index = right->getAsConstantUnion()->getIConst(0);
\r
2314 int fieldOffset = 0;
\r
2316 for(int i = 0; i < index; i++)
\r
2318 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
2321 dst.type = registerType(left);
\r
2322 dst.index += fieldOffset;
\r
2323 dst.mask = writeMask(right);
\r
2328 case EOpVectorSwizzle:
\r
2330 ASSERT(left->isRegister());
\r
2332 int leftMask = dst.mask;
\r
2335 int rightMask = 0;
\r
2337 TIntermSequence &sequence = right->getAsAggregate()->getSequence();
\r
2339 for(unsigned int i = 0; i < sequence.size(); i++)
\r
2341 int index = sequence[i]->getAsConstantUnion()->getIConst(0);
\r
2343 int element = swizzleElement(leftSwizzle, index);
\r
2344 rightMask = rightMask | (1 << element);
\r
2345 swizzle = swizzle | swizzleElement(leftSwizzle, i) << (element * 2);
\r
2348 dst.mask = leftMask & rightMask;
\r
2354 UNREACHABLE(binary->getOp()); // Not an l-value operator
\r
2360 dst.type = registerType(symbol);
\r
2361 dst.index = registerIndex(symbol);
\r
2362 dst.mask = writeMask(symbol);
\r
2369 sw::Shader::ParameterType OutputASM::registerType(TIntermTyped *operand)
\r
2371 if(isSamplerRegister(operand))
\r
2373 return sw::Shader::PARAMETER_SAMPLER;
\r
2376 const TQualifier qualifier = operand->getQualifier();
\r
2377 if((EvqFragColor == qualifier) || (EvqFragData == qualifier))
\r
2379 if(((EvqFragData == qualifier) && (EvqFragColor == outputQualifier)) ||
\r
2380 ((EvqFragColor == qualifier) && (EvqFragData == outputQualifier)))
\r
2382 mContext.error(operand->getLine(), "static assignment to both gl_FragData and gl_FragColor", "");
\r
2384 outputQualifier = qualifier;
\r
2389 case EvqTemporary: return sw::Shader::PARAMETER_TEMP;
\r
2390 case EvqGlobal: return sw::Shader::PARAMETER_TEMP;
\r
2391 case EvqConstExpr: return sw::Shader::PARAMETER_FLOAT4LITERAL; // All converted to float
\r
2392 case EvqAttribute: return sw::Shader::PARAMETER_INPUT;
\r
2393 case EvqVaryingIn: return sw::Shader::PARAMETER_INPUT;
\r
2394 case EvqVaryingOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2395 case EvqVertexIn: return sw::Shader::PARAMETER_INPUT;
\r
2396 case EvqFragmentOut: return sw::Shader::PARAMETER_COLOROUT;
\r
2397 case EvqVertexOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2398 case EvqFragmentIn: return sw::Shader::PARAMETER_INPUT;
\r
2399 case EvqInvariantVaryingIn: return sw::Shader::PARAMETER_INPUT; // FIXME: Guarantee invariance at the backend
\r
2400 case EvqInvariantVaryingOut: return sw::Shader::PARAMETER_OUTPUT; // FIXME: Guarantee invariance at the backend
\r
2401 case EvqSmooth: return sw::Shader::PARAMETER_OUTPUT;
\r
2402 case EvqFlat: return sw::Shader::PARAMETER_OUTPUT;
\r
2403 case EvqCentroidOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2404 case EvqSmoothIn: return sw::Shader::PARAMETER_INPUT;
\r
2405 case EvqFlatIn: return sw::Shader::PARAMETER_INPUT;
\r
2406 case EvqCentroidIn: return sw::Shader::PARAMETER_INPUT;
\r
2407 case EvqUniform: return sw::Shader::PARAMETER_CONST;
\r
2408 case EvqIn: return sw::Shader::PARAMETER_TEMP;
\r
2409 case EvqOut: return sw::Shader::PARAMETER_TEMP;
\r
2410 case EvqInOut: return sw::Shader::PARAMETER_TEMP;
\r
2411 case EvqConstReadOnly: return sw::Shader::PARAMETER_TEMP;
\r
2412 case EvqPosition: return sw::Shader::PARAMETER_OUTPUT;
\r
2413 case EvqPointSize: return sw::Shader::PARAMETER_OUTPUT;
\r
2414 case EvqInstanceID: return sw::Shader::PARAMETER_MISCTYPE;
\r
2415 case EvqFragCoord: return sw::Shader::PARAMETER_MISCTYPE;
\r
2416 case EvqFrontFacing: return sw::Shader::PARAMETER_MISCTYPE;
\r
2417 case EvqPointCoord: return sw::Shader::PARAMETER_INPUT;
\r
2418 case EvqFragColor: return sw::Shader::PARAMETER_COLOROUT;
\r
2419 case EvqFragData: return sw::Shader::PARAMETER_COLOROUT;
\r
2420 case EvqFragDepth: return sw::Shader::PARAMETER_DEPTHOUT;
\r
2421 default: UNREACHABLE(qualifier);
\r
2424 return sw::Shader::PARAMETER_VOID;
\r
2427 unsigned int OutputASM::registerIndex(TIntermTyped *operand)
\r
2429 if(isSamplerRegister(operand))
\r
2431 return samplerRegister(operand);
\r
2434 switch(operand->getQualifier())
\r
2436 case EvqTemporary: return temporaryRegister(operand);
\r
2437 case EvqGlobal: return temporaryRegister(operand);
\r
2438 case EvqConstExpr: UNREACHABLE(EvqConstExpr);
\r
2439 case EvqAttribute: return attributeRegister(operand);
\r
2440 case EvqVaryingIn: return varyingRegister(operand);
\r
2441 case EvqVaryingOut: return varyingRegister(operand);
\r
2442 case EvqVertexIn: return attributeRegister(operand);
\r
2443 case EvqFragmentOut: return fragmentOutputRegister(operand);
\r
2444 case EvqVertexOut: return varyingRegister(operand);
\r
2445 case EvqFragmentIn: return varyingRegister(operand);
\r
2446 case EvqInvariantVaryingIn: return varyingRegister(operand);
\r
2447 case EvqInvariantVaryingOut: return varyingRegister(operand);
\r
2448 case EvqSmooth: return varyingRegister(operand);
\r
2449 case EvqFlat: return varyingRegister(operand);
\r
2450 case EvqCentroidOut: return varyingRegister(operand);
\r
2451 case EvqSmoothIn: return varyingRegister(operand);
\r
2452 case EvqFlatIn: return varyingRegister(operand);
\r
2453 case EvqCentroidIn: return varyingRegister(operand);
\r
2454 case EvqUniform: return uniformRegister(operand);
\r
2455 case EvqIn: return temporaryRegister(operand);
\r
2456 case EvqOut: return temporaryRegister(operand);
\r
2457 case EvqInOut: return temporaryRegister(operand);
\r
2458 case EvqConstReadOnly: return temporaryRegister(operand);
\r
2459 case EvqPosition: return varyingRegister(operand);
\r
2460 case EvqPointSize: return varyingRegister(operand);
\r
2461 case EvqInstanceID: vertexShader->instanceIdDeclared = true; return 0;
\r
2462 case EvqFragCoord: pixelShader->vPosDeclared = true; return 0;
\r
2463 case EvqFrontFacing: pixelShader->vFaceDeclared = true; return 1;
\r
2464 case EvqPointCoord: return varyingRegister(operand);
\r
2465 case EvqFragColor: return 0;
\r
2466 case EvqFragData: return 0;
\r
2467 case EvqFragDepth: return 0;
\r
2468 default: UNREACHABLE(operand->getQualifier());
\r
2474 int OutputASM::writeMask(TIntermTyped *destination, int index)
\r
2476 if(destination->getQualifier() == EvqPointSize)
\r
2478 return 0x2; // Point size stored in the y component
\r
2481 return 0xF >> (4 - registerSize(destination->getType(), index));
\r
2484 int OutputASM::readSwizzle(TIntermTyped *argument, int size)
\r
2486 if(argument->getQualifier() == EvqPointSize)
\r
2488 return 0x55; // Point size stored in the y component
\r
2491 static const unsigned char swizzleSize[5] = {0x00, 0x00, 0x54, 0xA4, 0xE4}; // (void), xxxx, xyyy, xyzz, xyzw
\r
2493 return swizzleSize[size];
\r
2496 // Conservatively checks whether an expression is fast to compute and has no side effects
\r
2497 bool OutputASM::trivial(TIntermTyped *expression, int budget)
\r
2499 if(!expression->isRegister())
\r
2504 return cost(expression, budget) >= 0;
\r
2507 // Returns the remaining computing budget (if < 0 the expression is too expensive or has side effects)
\r
2508 int OutputASM::cost(TIntermNode *expression, int budget)
\r
2515 if(expression->getAsSymbolNode())
\r
2519 else if(expression->getAsConstantUnion())
\r
2523 else if(expression->getAsBinaryNode())
\r
2525 TIntermBinary *binary = expression->getAsBinaryNode();
\r
2527 switch(binary->getOp())
\r
2529 case EOpVectorSwizzle:
\r
2530 case EOpIndexDirect:
\r
2531 case EOpIndexDirectStruct:
\r
2532 case EOpIndexDirectInterfaceBlock:
\r
2533 return cost(binary->getLeft(), budget - 0);
\r
2537 return cost(binary->getLeft(), cost(binary->getRight(), budget - 1));
\r
2542 else if(expression->getAsUnaryNode())
\r
2544 TIntermUnary *unary = expression->getAsUnaryNode();
\r
2546 switch(unary->getOp())
\r
2550 return cost(unary->getOperand(), budget - 1);
\r
2555 else if(expression->getAsSelectionNode())
\r
2557 TIntermSelection *selection = expression->getAsSelectionNode();
\r
2559 if(selection->usesTernaryOperator())
\r
2561 TIntermTyped *condition = selection->getCondition();
\r
2562 TIntermNode *trueBlock = selection->getTrueBlock();
\r
2563 TIntermNode *falseBlock = selection->getFalseBlock();
\r
2564 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
2566 if(constantCondition)
\r
2568 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
2572 return cost(trueBlock, budget - 0);
\r
2576 return cost(falseBlock, budget - 0);
\r
2581 return cost(trueBlock, cost(falseBlock, budget - 2));
\r
2589 const Function *OutputASM::findFunction(const TString &name)
\r
2591 for(unsigned int f = 0; f < functionArray.size(); f++)
\r
2593 if(functionArray[f].name == name)
\r
2595 return &functionArray[f];
\r
2602 int OutputASM::temporaryRegister(TIntermTyped *temporary)
\r
2604 return allocate(temporaries, temporary);
\r
2607 int OutputASM::varyingRegister(TIntermTyped *varying)
\r
2609 int var = lookup(varyings, varying);
\r
2613 var = allocate(varyings, varying);
\r
2614 int componentCount = varying->registerSize();
\r
2615 int registerCount = varying->totalRegisterCount();
\r
2619 if((var + registerCount) > sw::PixelShader::MAX_INPUT_VARYINGS)
\r
2621 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "fragment shader");
\r
2625 if(varying->getQualifier() == EvqPointCoord)
\r
2627 ASSERT(varying->isRegister());
\r
2628 if(componentCount >= 1) pixelShader->semantic[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2629 if(componentCount >= 2) pixelShader->semantic[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2630 if(componentCount >= 3) pixelShader->semantic[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2631 if(componentCount >= 4) pixelShader->semantic[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2635 for(int i = 0; i < varying->totalRegisterCount(); i++)
\r
2637 if(componentCount >= 1) pixelShader->semantic[var + i][0] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2638 if(componentCount >= 2) pixelShader->semantic[var + i][1] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2639 if(componentCount >= 3) pixelShader->semantic[var + i][2] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2640 if(componentCount >= 4) pixelShader->semantic[var + i][3] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2644 else if(vertexShader)
\r
2646 if((var + registerCount) > sw::VertexShader::MAX_OUTPUT_VARYINGS)
\r
2648 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "vertex shader");
\r
2652 if(varying->getQualifier() == EvqPosition)
\r
2654 ASSERT(varying->isRegister());
\r
2655 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2656 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2657 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2658 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2659 vertexShader->positionRegister = var;
\r
2661 else if(varying->getQualifier() == EvqPointSize)
\r
2663 ASSERT(varying->isRegister());
\r
2664 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2665 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2666 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2667 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2668 vertexShader->pointSizeRegister = var;
\r
2672 // Semantic indexes for user varyings will be assigned during program link to match the pixel shader
\r
2675 else UNREACHABLE(0);
\r
2677 declareVarying(varying, var);
\r
2683 void OutputASM::declareVarying(TIntermTyped *varying, int reg)
\r
2685 if(varying->getQualifier() != EvqPointCoord) // gl_PointCoord does not need linking
\r
2687 const TType &type = varying->getType();
\r
2688 const char *name = varying->getAsSymbolNode()->getSymbol().c_str();
\r
2689 VaryingList &activeVaryings = shaderObject->varyings;
\r
2691 // Check if this varying has been declared before without having a register assigned
\r
2692 for(VaryingList::iterator v = activeVaryings.begin(); v != activeVaryings.end(); v++)
\r
2694 if(v->name == name)
\r
2698 ASSERT(v->reg < 0 || v->reg == reg);
\r
2706 activeVaryings.push_back(glsl::Varying(glVariableType(type), name, varying->getArraySize(), reg, 0));
\r
2710 int OutputASM::uniformRegister(TIntermTyped *uniform)
\r
2712 const TType &type = uniform->getType();
\r
2713 ASSERT(!IsSampler(type.getBasicType()));
\r
2714 TInterfaceBlock *block = type.getAsInterfaceBlock();
\r
2715 TIntermSymbol *symbol = uniform->getAsSymbolNode();
\r
2716 ASSERT(symbol || block);
\r
2718 if(symbol || block)
\r
2720 int index = lookup(uniforms, uniform);
\r
2724 index = allocate(uniforms, uniform);
\r
2725 const TString &name = symbol ? symbol->getSymbol() : block->name();
\r
2727 declareUniform(type, name, index);
\r
2736 int OutputASM::attributeRegister(TIntermTyped *attribute)
\r
2738 ASSERT(!attribute->isArray());
\r
2740 int index = lookup(attributes, attribute);
\r
2744 TIntermSymbol *symbol = attribute->getAsSymbolNode();
\r
2749 index = allocate(attributes, attribute);
\r
2750 const TType &type = attribute->getType();
\r
2751 int registerCount = attribute->totalRegisterCount();
\r
2753 if(vertexShader && (index + registerCount) <= sw::VertexShader::MAX_INPUT_ATTRIBUTES)
\r
2755 for(int i = 0; i < registerCount; i++)
\r
2757 vertexShader->input[index + i] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, index + i);
\r
2761 ActiveAttributes &activeAttributes = shaderObject->activeAttributes;
\r
2763 const char *name = symbol->getSymbol().c_str();
\r
2764 activeAttributes.push_back(Attribute(glVariableType(type), name, type.getArraySize(), type.getLayoutQualifier().location, index));
\r
2771 int OutputASM::fragmentOutputRegister(TIntermTyped *fragmentOutput)
\r
2773 return allocate(fragmentOutputs, fragmentOutput);
\r
2776 int OutputASM::samplerRegister(TIntermTyped *sampler)
\r
2778 ASSERT(IsSampler(sampler->getType().getBasicType()));
\r
2779 TIntermSymbol *symbol = sampler->getAsSymbolNode();
\r
2780 TIntermBinary *binary = sampler->getAsBinaryNode();
\r
2784 return samplerRegister(symbol);
\r
2788 ASSERT(binary->getOp() == EOpIndexDirect || binary->getOp() == EOpIndexIndirect ||
\r
2789 binary->getOp() == EOpIndexDirectStruct || binary->getOp() == EOpIndexDirectInterfaceBlock);
\r
2791 return samplerRegister(binary->getLeft()); // Index added later
\r
2793 else UNREACHABLE(0);
\r
2798 int OutputASM::samplerRegister(TIntermSymbol *sampler)
\r
2800 const TType &type = sampler->getType();
\r
2801 ASSERT(IsSampler(type.getBasicType()) || type.getStruct()); // Structures can contain samplers
\r
2803 int index = lookup(samplers, sampler);
\r
2807 index = allocate(samplers, sampler);
\r
2809 if(sampler->getQualifier() == EvqUniform)
\r
2811 const char *name = sampler->getSymbol().c_str();
\r
2812 declareUniform(type, name, index);
\r
2819 int OutputASM::lookup(VariableArray &list, TIntermTyped *variable)
\r
2821 for(unsigned int i = 0; i < list.size(); i++)
\r
2823 if(list[i] == variable)
\r
2825 return i; // Pointer match
\r
2829 TIntermSymbol *varSymbol = variable->getAsSymbolNode();
\r
2830 TInterfaceBlock *varBlock = variable->getType().getAsInterfaceBlock();
\r
2834 for(unsigned int i = 0; i < list.size(); i++)
\r
2838 TInterfaceBlock *listBlock = list[i]->getType().getAsInterfaceBlock();
\r
2842 if(listBlock->name() == varBlock->name())
\r
2844 ASSERT(listBlock->arraySize() == varBlock->arraySize());
\r
2845 ASSERT(listBlock->fields() == varBlock->fields());
\r
2846 ASSERT(listBlock->blockStorage() == varBlock->blockStorage());
\r
2847 ASSERT(listBlock->matrixPacking() == varBlock->matrixPacking());
\r
2855 else if(varSymbol)
\r
2857 for(unsigned int i = 0; i < list.size(); i++)
\r
2861 TIntermSymbol *listSymbol = list[i]->getAsSymbolNode();
\r
2865 if(listSymbol->getId() == varSymbol->getId())
\r
2867 ASSERT(listSymbol->getSymbol() == varSymbol->getSymbol());
\r
2868 ASSERT(listSymbol->getType() == varSymbol->getType());
\r
2869 ASSERT(listSymbol->getQualifier() == varSymbol->getQualifier());
\r
2881 int OutputASM::allocate(VariableArray &list, TIntermTyped *variable)
\r
2883 int index = lookup(list, variable);
\r
2887 unsigned int registerCount = variable->totalRegisterCount();
\r
2889 for(unsigned int i = 0; i < list.size(); i++)
\r
2893 unsigned int j = 1;
\r
2894 for( ; j < registerCount && (i + j) < list.size(); j++)
\r
2896 if(list[i + j] != 0)
\r
2902 if(j == registerCount) // Found free slots
\r
2904 for(unsigned int j = 0; j < registerCount; j++)
\r
2906 list[i + j] = variable;
\r
2914 index = list.size();
\r
2916 for(unsigned int i = 0; i < registerCount; i++)
\r
2918 list.push_back(variable);
\r
2925 void OutputASM::free(VariableArray &list, TIntermTyped *variable)
\r
2927 int index = lookup(list, variable);
\r
2935 int OutputASM::declareUniform(const TType &type, const TString &name, int registerIndex, int blockId, BlockLayoutEncoder* encoder)
\r
2937 const TStructure *structure = type.getStruct();
\r
2938 const TInterfaceBlock *block = (type.isInterfaceBlock() || (blockId == -1)) ? type.getInterfaceBlock() : nullptr;
\r
2939 ActiveUniforms &activeUniforms = shaderObject->activeUniforms;
\r
2941 if(!structure && !block)
\r
2943 const BlockMemberInfo blockInfo = encoder ? encoder->encodeType(type) : BlockMemberInfo::getDefaultBlockInfo();
\r
2946 blockDefinitions[blockId].indexMap[registerIndex] = TypedMemberInfo(blockInfo, type);
\r
2947 shaderObject->activeUniformBlocks[blockId].fields.push_back(activeUniforms.size());
\r
2949 int fieldRegisterIndex = encoder ? shaderObject->activeUniformBlocks[blockId].registerIndex + BlockLayoutEncoder::getBlockRegister(blockInfo) : registerIndex;
\r
2950 activeUniforms.push_back(Uniform(glVariableType(type), glVariablePrecision(type), name.c_str(), type.getArraySize(),
\r
2951 fieldRegisterIndex, blockId, blockInfo));
\r
2952 if(isSamplerRegister(type))
\r
2954 for(int i = 0; i < type.totalRegisterCount(); i++)
\r
2956 shader->declareSampler(fieldRegisterIndex + i);
\r
2962 ActiveUniformBlocks &activeUniformBlocks = shaderObject->activeUniformBlocks;
\r
2963 const TFieldList& fields = block->fields();
\r
2964 const TString &blockName = block->name();
\r
2965 int fieldRegisterIndex = registerIndex;
\r
2966 bool isUniformBlockMember = !type.isInterfaceBlock() && (blockId == -1);
\r
2968 if(isUniformBlockMember)
\r
2970 // This is a uniform that's part of a block, let's see if the block is already defined
\r
2971 for(size_t i = 0; i < activeUniformBlocks.size(); ++i)
\r
2973 if(activeUniformBlocks[i].name == blockName.c_str())
\r
2975 // The block is already defined, find the register for the current uniform and return it
\r
2976 for(size_t j = 0; j < fields.size(); j++)
\r
2978 const TString &fieldName = fields[j]->name();
\r
2979 if(fieldName == name)
\r
2981 return fieldRegisterIndex;
\r
2984 fieldRegisterIndex += fields[j]->type()->totalRegisterCount();
\r
2988 return fieldRegisterIndex;
\r
2993 blockId = activeUniformBlocks.size();
\r
2994 bool isRowMajor = block->matrixPacking() == EmpRowMajor;
\r
2995 activeUniformBlocks.push_back(UniformBlock(blockName.c_str(), 0, block->arraySize(),
\r
2996 block->blockStorage(), isRowMajor, registerIndex, blockId));
\r
2997 blockDefinitions.push_back(BlockDefinition());
\r
2999 Std140BlockEncoder currentBlockEncoder(isRowMajor);
\r
3000 currentBlockEncoder.enterAggregateType();
\r
3001 for(size_t i = 0; i < fields.size(); i++)
\r
3003 const TType &fieldType = *(fields[i]->type());
\r
3004 const TString &fieldName = fields[i]->name();
\r
3005 if(isUniformBlockMember && (fieldName == name))
\r
3007 registerIndex = fieldRegisterIndex;
\r
3010 const TString uniformName = block->hasInstanceName() ? blockName + "." + fieldName : fieldName;
\r
3012 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, ¤tBlockEncoder);
\r
3013 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3015 currentBlockEncoder.exitAggregateType();
\r
3016 activeUniformBlocks[blockId].dataSize = currentBlockEncoder.getBlockSize();
\r
3020 int fieldRegisterIndex = registerIndex;
\r
3022 const TFieldList& fields = structure->fields();
\r
3023 if(type.isArray() && (structure || type.isInterfaceBlock()))
\r
3025 for(int i = 0; i < type.getArraySize(); i++)
\r
3029 encoder->enterAggregateType();
\r
3031 for(size_t j = 0; j < fields.size(); j++)
\r
3033 const TType &fieldType = *(fields[j]->type());
\r
3034 const TString &fieldName = fields[j]->name();
\r
3035 const TString uniformName = name + "[" + str(i) + "]." + fieldName;
\r
3037 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3038 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3042 encoder->exitAggregateType();
\r
3050 encoder->enterAggregateType();
\r
3052 for(size_t i = 0; i < fields.size(); i++)
\r
3054 const TType &fieldType = *(fields[i]->type());
\r
3055 const TString &fieldName = fields[i]->name();
\r
3056 const TString uniformName = name + "." + fieldName;
\r
3058 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3059 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3063 encoder->exitAggregateType();
\r
3068 return registerIndex;
\r
3071 GLenum OutputASM::glVariableType(const TType &type)
\r
3073 switch(type.getBasicType())
\r
3076 if(type.isScalar())
\r
3080 else if(type.isVector())
\r
3082 switch(type.getNominalSize())
\r
3084 case 2: return GL_FLOAT_VEC2;
\r
3085 case 3: return GL_FLOAT_VEC3;
\r
3086 case 4: return GL_FLOAT_VEC4;
\r
3087 default: UNREACHABLE(type.getNominalSize());
\r
3090 else if(type.isMatrix())
\r
3092 switch(type.getNominalSize())
\r
3095 switch(type.getSecondarySize())
\r
3097 case 2: return GL_FLOAT_MAT2;
\r
3098 case 3: return GL_FLOAT_MAT2x3;
\r
3099 case 4: return GL_FLOAT_MAT2x4;
\r
3100 default: UNREACHABLE(type.getSecondarySize());
\r
3103 switch(type.getSecondarySize())
\r
3105 case 2: return GL_FLOAT_MAT3x2;
\r
3106 case 3: return GL_FLOAT_MAT3;
\r
3107 case 4: return GL_FLOAT_MAT3x4;
\r
3108 default: UNREACHABLE(type.getSecondarySize());
\r
3111 switch(type.getSecondarySize())
\r
3113 case 2: return GL_FLOAT_MAT4x2;
\r
3114 case 3: return GL_FLOAT_MAT4x3;
\r
3115 case 4: return GL_FLOAT_MAT4;
\r
3116 default: UNREACHABLE(type.getSecondarySize());
\r
3118 default: UNREACHABLE(type.getNominalSize());
\r
3121 else UNREACHABLE(0);
\r
3124 if(type.isScalar())
\r
3128 else if(type.isVector())
\r
3130 switch(type.getNominalSize())
\r
3132 case 2: return GL_INT_VEC2;
\r
3133 case 3: return GL_INT_VEC3;
\r
3134 case 4: return GL_INT_VEC4;
\r
3135 default: UNREACHABLE(type.getNominalSize());
\r
3138 else UNREACHABLE(0);
\r
3141 if(type.isScalar())
\r
3143 return GL_UNSIGNED_INT;
\r
3145 else if(type.isVector())
\r
3147 switch(type.getNominalSize())
\r
3149 case 2: return GL_UNSIGNED_INT_VEC2;
\r
3150 case 3: return GL_UNSIGNED_INT_VEC3;
\r
3151 case 4: return GL_UNSIGNED_INT_VEC4;
\r
3152 default: UNREACHABLE(type.getNominalSize());
\r
3155 else UNREACHABLE(0);
\r
3158 if(type.isScalar())
\r
3162 else if(type.isVector())
\r
3164 switch(type.getNominalSize())
\r
3166 case 2: return GL_BOOL_VEC2;
\r
3167 case 3: return GL_BOOL_VEC3;
\r
3168 case 4: return GL_BOOL_VEC4;
\r
3169 default: UNREACHABLE(type.getNominalSize());
\r
3172 else UNREACHABLE(0);
\r
3174 case EbtSampler2D:
\r
3175 return GL_SAMPLER_2D;
\r
3176 case EbtISampler2D:
\r
3177 return GL_INT_SAMPLER_2D;
\r
3178 case EbtUSampler2D:
\r
3179 return GL_UNSIGNED_INT_SAMPLER_2D;
\r
3180 case EbtSamplerCube:
\r
3181 return GL_SAMPLER_CUBE;
\r
3182 case EbtISamplerCube:
\r
3183 return GL_INT_SAMPLER_CUBE;
\r
3184 case EbtUSamplerCube:
\r
3185 return GL_UNSIGNED_INT_SAMPLER_CUBE;
\r
3186 case EbtSamplerExternalOES:
\r
3187 return GL_SAMPLER_EXTERNAL_OES;
\r
3188 case EbtSampler3D:
\r
3189 return GL_SAMPLER_3D_OES;
\r
3190 case EbtISampler3D:
\r
3191 return GL_INT_SAMPLER_3D;
\r
3192 case EbtUSampler3D:
\r
3193 return GL_UNSIGNED_INT_SAMPLER_3D;
\r
3194 case EbtSampler2DArray:
\r
3195 return GL_SAMPLER_2D_ARRAY;
\r
3196 case EbtISampler2DArray:
\r
3197 return GL_INT_SAMPLER_2D_ARRAY;
\r
3198 case EbtUSampler2DArray:
\r
3199 return GL_UNSIGNED_INT_SAMPLER_2D_ARRAY;
\r
3200 case EbtSampler2DShadow:
\r
3201 return GL_SAMPLER_2D_SHADOW;
\r
3202 case EbtSamplerCubeShadow:
\r
3203 return GL_SAMPLER_CUBE_SHADOW;
\r
3204 case EbtSampler2DArrayShadow:
\r
3205 return GL_SAMPLER_2D_ARRAY_SHADOW;
\r
3207 UNREACHABLE(type.getBasicType());
\r
3214 GLenum OutputASM::glVariablePrecision(const TType &type)
\r
3216 if(type.getBasicType() == EbtFloat)
\r
3218 switch(type.getPrecision())
\r
3220 case EbpHigh: return GL_HIGH_FLOAT;
\r
3221 case EbpMedium: return GL_MEDIUM_FLOAT;
\r
3222 case EbpLow: return GL_LOW_FLOAT;
\r
3223 case EbpUndefined:
\r
3224 // Should be defined as the default precision by the parser
\r
3225 default: UNREACHABLE(type.getPrecision());
\r
3228 else if(type.getBasicType() == EbtInt)
\r
3230 switch(type.getPrecision())
\r
3232 case EbpHigh: return GL_HIGH_INT;
\r
3233 case EbpMedium: return GL_MEDIUM_INT;
\r
3234 case EbpLow: return GL_LOW_INT;
\r
3235 case EbpUndefined:
\r
3236 // Should be defined as the default precision by the parser
\r
3237 default: UNREACHABLE(type.getPrecision());
\r
3241 // Other types (boolean, sampler) don't have a precision
\r
3245 int OutputASM::dim(TIntermNode *v)
\r
3247 TIntermTyped *vector = v->getAsTyped();
\r
3248 ASSERT(vector && vector->isRegister());
\r
3249 return vector->getNominalSize();
\r
3252 int OutputASM::dim2(TIntermNode *m)
\r
3254 TIntermTyped *matrix = m->getAsTyped();
\r
3255 ASSERT(matrix && matrix->isMatrix() && !matrix->isArray());
\r
3256 return matrix->getSecondarySize();
\r
3259 // Returns ~0u if no loop count could be determined
\r
3260 unsigned int OutputASM::loopCount(TIntermLoop *node)
\r
3262 // Parse loops of the form:
\r
3263 // for(int index = initial; index [comparator] limit; index += increment)
\r
3264 TIntermSymbol *index = 0;
\r
3265 TOperator comparator = EOpNull;
\r
3268 int increment = 0;
\r
3270 // Parse index name and intial value
\r
3271 if(node->getInit())
\r
3273 TIntermAggregate *init = node->getInit()->getAsAggregate();
\r
3277 TIntermSequence &sequence = init->getSequence();
\r
3278 TIntermTyped *variable = sequence[0]->getAsTyped();
\r
3280 if(variable && variable->getQualifier() == EvqTemporary)
\r
3282 TIntermBinary *assign = variable->getAsBinaryNode();
\r
3284 if(assign->getOp() == EOpInitialize)
\r
3286 TIntermSymbol *symbol = assign->getLeft()->getAsSymbolNode();
\r
3287 TIntermConstantUnion *constant = assign->getRight()->getAsConstantUnion();
\r
3289 if(symbol && constant)
\r
3291 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3294 initial = constant->getUnionArrayPointer()[0].getIConst();
\r
3302 // Parse comparator and limit value
\r
3303 if(index && node->getCondition())
\r
3305 TIntermBinary *test = node->getCondition()->getAsBinaryNode();
\r
3307 if(test && test->getLeft()->getAsSymbolNode()->getId() == index->getId())
\r
3309 TIntermConstantUnion *constant = test->getRight()->getAsConstantUnion();
\r
3313 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3315 comparator = test->getOp();
\r
3316 limit = constant->getUnionArrayPointer()[0].getIConst();
\r
3322 // Parse increment
\r
3323 if(index && comparator != EOpNull && node->getExpression())
\r
3325 TIntermBinary *binaryTerminal = node->getExpression()->getAsBinaryNode();
\r
3326 TIntermUnary *unaryTerminal = node->getExpression()->getAsUnaryNode();
\r
3328 if(binaryTerminal)
\r
3330 TOperator op = binaryTerminal->getOp();
\r
3331 TIntermConstantUnion *constant = binaryTerminal->getRight()->getAsConstantUnion();
\r
3335 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3337 int value = constant->getUnionArrayPointer()[0].getIConst();
\r
3341 case EOpAddAssign: increment = value; break;
\r
3342 case EOpSubAssign: increment = -value; break;
\r
3343 default: UNIMPLEMENTED();
\r
3348 else if(unaryTerminal)
\r
3350 TOperator op = unaryTerminal->getOp();
\r
3354 case EOpPostIncrement: increment = 1; break;
\r
3355 case EOpPostDecrement: increment = -1; break;
\r
3356 case EOpPreIncrement: increment = 1; break;
\r
3357 case EOpPreDecrement: increment = -1; break;
\r
3358 default: UNIMPLEMENTED();
\r
3363 if(index && comparator != EOpNull && increment != 0)
\r
3365 if(comparator == EOpLessThanEqual)
\r
3367 comparator = EOpLessThan;
\r
3371 if(comparator == EOpLessThan)
\r
3373 int iterations = (limit - initial) / increment;
\r
3375 if(iterations <= 0)
\r
3380 return iterations;
\r
3382 else UNIMPLEMENTED(); // Falls through
\r
3388 bool DetectLoopDiscontinuity::traverse(TIntermNode *node)
\r
3391 loopDiscontinuity = false;
\r
3393 node->traverse(this);
\r
3395 return loopDiscontinuity;
\r
3398 bool DetectLoopDiscontinuity::visitLoop(Visit visit, TIntermLoop *loop)
\r
3400 if(visit == PreVisit)
\r
3404 else if(visit == PostVisit)
\r
3412 bool DetectLoopDiscontinuity::visitBranch(Visit visit, TIntermBranch *node)
\r
3414 if(loopDiscontinuity)
\r
3424 switch(node->getFlowOp())
\r
3431 loopDiscontinuity = true;
\r
3433 default: UNREACHABLE(node->getFlowOp());
\r
3436 return !loopDiscontinuity;
\r
3439 bool DetectLoopDiscontinuity::visitAggregate(Visit visit, TIntermAggregate *node)
\r
3441 return !loopDiscontinuity;
\r