1 // SwiftShader Software Renderer
\r
3 // Copyright(c) 2005-2013 TransGaming Inc.
\r
5 // All rights reserved. No part of this software may be copied, distributed, transmitted,
\r
6 // transcribed, stored in a retrieval system, translated into any human or computer
\r
7 // language by any means, or disclosed to third parties without the explicit written
\r
8 // agreement of TransGaming Inc. Without such an agreement, no rights or licenses, express
\r
9 // or implied, including but not limited to any patent rights, are granted to you.
\r
12 #include "OutputASM.h"
\r
13 #include "Common/Math.hpp"
\r
15 #include "common/debug.h"
\r
16 #include "InfoSink.h"
\r
18 #include "libGLESv2/Shader.h"
\r
20 #include <GLES2/gl2.h>
\r
21 #include <GLES2/gl2ext.h>
\r
22 #include <GLES3/gl3.h>
\r
26 // Integer to TString conversion
\r
30 sprintf(buffer, "%d", i);
\r
34 class Temporary : public TIntermSymbol
\r
37 Temporary(OutputASM *assembler) : TIntermSymbol(TSymbolTableLevel::nextUniqueId(), "tmp", TType(EbtFloat, EbpHigh, EvqTemporary, 4, 1, false)), assembler(assembler)
\r
43 assembler->freeTemporary(this);
\r
47 OutputASM *const assembler;
\r
50 class Constant : public TIntermConstantUnion
\r
53 Constant(float x, float y, float z, float w) : TIntermConstantUnion(constants, TType(EbtFloat, EbpHigh, EvqConstExpr, 4, 1, false))
\r
55 constants[0].setFConst(x);
\r
56 constants[1].setFConst(y);
\r
57 constants[2].setFConst(z);
\r
58 constants[3].setFConst(w);
\r
61 Constant(bool b) : TIntermConstantUnion(constants, TType(EbtBool, EbpHigh, EvqConstExpr, 1, 1, false))
\r
63 constants[0].setBConst(b);
\r
66 Constant(int i) : TIntermConstantUnion(constants, TType(EbtInt, EbpHigh, EvqConstExpr, 1, 1, false))
\r
68 constants[0].setIConst(i);
\r
76 ConstantUnion constants[4];
\r
79 Uniform::Uniform(GLenum type, GLenum precision, const std::string &name, int arraySize, int registerIndex, int blockId, const BlockMemberInfo& blockMemberInfo) :
\r
80 type(type), precision(precision), name(name), arraySize(arraySize), registerIndex(registerIndex), blockId(blockId), blockInfo(blockMemberInfo)
\r
84 UniformBlock::UniformBlock(const std::string& name, unsigned int dataSize, unsigned int arraySize,
\r
85 TLayoutBlockStorage layout, bool isRowMajorLayout, int registerIndex, int blockId) :
\r
86 name(name), dataSize(dataSize), arraySize(arraySize), layout(layout),
\r
87 isRowMajorLayout(isRowMajorLayout), registerIndex(registerIndex), blockId(blockId)
\r
91 BlockLayoutEncoder::BlockLayoutEncoder(bool rowMajor)
\r
92 : mCurrentOffset(0), isRowMajor(rowMajor)
\r
96 BlockMemberInfo BlockLayoutEncoder::encodeType(const TType &type)
\r
101 getBlockLayoutInfo(type, type.getArraySize(), isRowMajor, &arrayStride, &matrixStride);
\r
103 const BlockMemberInfo memberInfo(static_cast<int>(mCurrentOffset * BytesPerComponent),
\r
104 static_cast<int>(arrayStride * BytesPerComponent),
\r
105 static_cast<int>(matrixStride * BytesPerComponent),
\r
106 (matrixStride > 0) && isRowMajor);
\r
108 advanceOffset(type, type.getArraySize(), isRowMajor, arrayStride, matrixStride);
\r
114 size_t BlockLayoutEncoder::getBlockRegister(const BlockMemberInfo &info)
\r
116 return (info.offset / BytesPerComponent) / ComponentsPerRegister;
\r
120 size_t BlockLayoutEncoder::getBlockRegisterElement(const BlockMemberInfo &info)
\r
122 return (info.offset / BytesPerComponent) % ComponentsPerRegister;
\r
125 void BlockLayoutEncoder::nextRegister()
\r
127 mCurrentOffset = sw::align(mCurrentOffset, ComponentsPerRegister);
\r
130 Std140BlockEncoder::Std140BlockEncoder(bool rowMajor) : BlockLayoutEncoder(rowMajor)
\r
134 void Std140BlockEncoder::enterAggregateType()
\r
139 void Std140BlockEncoder::exitAggregateType()
\r
144 void Std140BlockEncoder::getBlockLayoutInfo(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int *arrayStrideOut, int *matrixStrideOut)
\r
146 size_t baseAlignment = 0;
\r
147 int matrixStride = 0;
\r
148 int arrayStride = 0;
\r
150 if(type.isMatrix())
\r
152 baseAlignment = ComponentsPerRegister;
\r
153 matrixStride = ComponentsPerRegister;
\r
157 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
158 arrayStride = ComponentsPerRegister * numRegisters;
\r
161 else if(arraySize > 0)
\r
163 baseAlignment = ComponentsPerRegister;
\r
164 arrayStride = ComponentsPerRegister;
\r
168 const int numComponents = type.getElementSize();
\r
169 baseAlignment = (numComponents == 3 ? 4u : static_cast<size_t>(numComponents));
\r
172 mCurrentOffset = sw::align(mCurrentOffset, baseAlignment);
\r
174 *matrixStrideOut = matrixStride;
\r
175 *arrayStrideOut = arrayStride;
\r
178 void Std140BlockEncoder::advanceOffset(const TType &type, unsigned int arraySize, bool isRowMajorMatrix, int arrayStride, int matrixStride)
\r
182 mCurrentOffset += arrayStride * arraySize;
\r
184 else if(type.isMatrix())
\r
186 ASSERT(matrixStride == ComponentsPerRegister);
\r
187 const int numRegisters = isRowMajorMatrix ? type.getSecondarySize() : type.getNominalSize();
\r
188 mCurrentOffset += ComponentsPerRegister * numRegisters;
\r
192 mCurrentOffset += type.getElementSize();
\r
196 Attribute::Attribute()
\r
203 Attribute::Attribute(GLenum type, const std::string &name, int arraySize, int location, int registerIndex)
\r
207 this->arraySize = arraySize;
\r
208 this->location = location;
\r
209 this->registerIndex = registerIndex;
\r
212 sw::PixelShader *Shader::getPixelShader() const
\r
217 sw::VertexShader *Shader::getVertexShader() const
\r
222 OutputASM::TextureFunction::TextureFunction(const TString& nodeName) : method(IMPLICIT), proj(false), offset(false)
\r
224 TString name = TFunction::unmangleName(nodeName);
\r
226 if(name == "texture2D" || name == "textureCube" || name == "texture" || name == "texture3D")
\r
230 else if(name == "texture2DProj" || name == "textureProj")
\r
235 else if(name == "texture2DLod" || name == "textureCubeLod" || name == "textureLod")
\r
239 else if(name == "texture2DProjLod" || name == "textureProjLod")
\r
244 else if(name == "textureSize")
\r
248 else if(name == "textureOffset")
\r
253 else if(name == "textureProjOffset")
\r
259 else if(name == "textureLodOffset")
\r
264 else if(name == "textureProjLodOffset")
\r
270 else if(name == "texelFetch")
\r
274 else if(name == "texelFetchOffset")
\r
279 else if(name == "textureGrad")
\r
283 else if(name == "textureGradOffset")
\r
288 else if(name == "textureProjGrad")
\r
293 else if(name == "textureProjGradOffset")
\r
299 else UNREACHABLE(0);
\r
302 OutputASM::OutputASM(TParseContext &context, Shader *shaderObject) : TIntermTraverser(true, true, true), shaderObject(shaderObject), mContext(context)
\r
310 shader = shaderObject->getShader();
\r
311 pixelShader = shaderObject->getPixelShader();
\r
312 vertexShader = shaderObject->getVertexShader();
\r
315 functionArray.push_back(Function(0, "main(", 0, 0));
\r
316 currentFunction = 0;
\r
317 outputQualifier = EvqOutput; // Set outputQualifier to any value other than EvqFragColor or EvqFragData
\r
320 OutputASM::~OutputASM()
\r
324 void OutputASM::output()
\r
328 emitShader(GLOBAL);
\r
330 if(functionArray.size() > 1) // Only call main() when there are other functions
\r
332 Instruction *callMain = emit(sw::Shader::OPCODE_CALL);
\r
333 callMain->dst.type = sw::Shader::PARAMETER_LABEL;
\r
334 callMain->dst.index = 0; // main()
\r
336 emit(sw::Shader::OPCODE_RET);
\r
339 emitShader(FUNCTION);
\r
343 void OutputASM::emitShader(Scope scope)
\r
346 currentScope = GLOBAL;
\r
347 mContext.getTreeRoot()->traverse(this);
\r
350 void OutputASM::freeTemporary(Temporary *temporary)
\r
352 free(temporaries, temporary);
\r
355 sw::Shader::Opcode OutputASM::getOpcode(sw::Shader::Opcode op, TIntermTyped *in) const
\r
357 TBasicType baseType = in->getType().getBasicType();
\r
361 case sw::Shader::OPCODE_NEG:
\r
366 return sw::Shader::OPCODE_INEG;
\r
371 case sw::Shader::OPCODE_ABS:
\r
375 return sw::Shader::OPCODE_IABS;
\r
380 case sw::Shader::OPCODE_SGN:
\r
384 return sw::Shader::OPCODE_ISGN;
\r
389 case sw::Shader::OPCODE_ADD:
\r
394 return sw::Shader::OPCODE_IADD;
\r
399 case sw::Shader::OPCODE_SUB:
\r
404 return sw::Shader::OPCODE_ISUB;
\r
409 case sw::Shader::OPCODE_MUL:
\r
414 return sw::Shader::OPCODE_IMUL;
\r
419 case sw::Shader::OPCODE_DIV:
\r
423 return sw::Shader::OPCODE_IDIV;
\r
425 return sw::Shader::OPCODE_UDIV;
\r
430 case sw::Shader::OPCODE_IMOD:
\r
431 return baseType == EbtUInt ? sw::Shader::OPCODE_UMOD : op;
\r
432 case sw::Shader::OPCODE_ISHR:
\r
433 return baseType == EbtUInt ? sw::Shader::OPCODE_USHR : op;
\r
434 case sw::Shader::OPCODE_MIN:
\r
438 return sw::Shader::OPCODE_IMIN;
\r
440 return sw::Shader::OPCODE_UMIN;
\r
445 case sw::Shader::OPCODE_MAX:
\r
449 return sw::Shader::OPCODE_IMAX;
\r
451 return sw::Shader::OPCODE_UMAX;
\r
461 void OutputASM::visitSymbol(TIntermSymbol *symbol)
\r
463 // Vertex varyings don't have to be actively used to successfully link
\r
464 // against pixel shaders that use them. So make sure they're declared.
\r
465 if(symbol->getQualifier() == EvqVaryingOut || symbol->getQualifier() == EvqInvariantVaryingOut || symbol->getQualifier() == EvqVertexOut)
\r
467 if(symbol->getBasicType() != EbtInvariant) // Typeless declarations are not new varyings
\r
469 declareVarying(symbol, -1);
\r
473 TInterfaceBlock* block = symbol->getType().getInterfaceBlock();
\r
474 // OpenGL ES 3.0.4 spec, section 2.12.6 Uniform Variables:
\r
475 // "All members of a named uniform block declared with a shared or std140 layout qualifier
\r
476 // are considered active, even if they are not referenced in any shader in the program.
\r
477 // The uniform block itself is also considered active, even if no member of the block is referenced."
\r
478 if(block && ((block->blockStorage() == EbsShared) || (block->blockStorage() == EbsStd140)))
\r
480 uniformRegister(symbol);
\r
484 bool OutputASM::visitBinary(Visit visit, TIntermBinary *node)
\r
486 if(currentScope != emitScope)
\r
491 TIntermTyped *result = node;
\r
492 TIntermTyped *left = node->getLeft();
\r
493 TIntermTyped *right = node->getRight();
\r
494 const TType &leftType = left->getType();
\r
495 const TType &rightType = right->getType();
\r
496 const TType &resultType = node->getType();
\r
498 switch(node->getOp())
\r
501 if(visit == PostVisit)
\r
503 assignLvalue(left, right);
\r
504 copy(result, right);
\r
507 case EOpInitialize:
\r
508 if(visit == PostVisit)
\r
513 case EOpMatrixTimesScalarAssign:
\r
514 if(visit == PostVisit)
\r
516 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
518 emit(sw::Shader::OPCODE_MUL, result, i, left, i, right);
\r
521 assignLvalue(left, result);
\r
524 case EOpVectorTimesMatrixAssign:
\r
525 if(visit == PostVisit)
\r
527 int size = leftType.getNominalSize();
\r
529 for(int i = 0; i < size; i++)
\r
531 Instruction *dot = emit(sw::Shader::OPCODE_DP(size), result, 0, left, 0, right, i);
\r
532 dot->dst.mask = 1 << i;
\r
535 assignLvalue(left, result);
\r
538 case EOpMatrixTimesMatrixAssign:
\r
539 if(visit == PostVisit)
\r
541 int dim = leftType.getNominalSize();
\r
543 for(int i = 0; i < dim; i++)
\r
545 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, left, 0, right, i);
\r
546 mul->src[1].swizzle = 0x00;
\r
548 for(int j = 1; j < dim; j++)
\r
550 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, i, left, j, right, i, result, i);
\r
551 mad->src[1].swizzle = j * 0x55;
\r
555 assignLvalue(left, result);
\r
558 case EOpIndexDirect:
\r
559 if(visit == PostVisit)
\r
561 int index = right->getAsConstantUnion()->getIConst(0);
\r
563 if(result->isMatrix() || result->isStruct() || result->isInterfaceBlock())
\r
565 ASSERT(left->isArray());
\r
566 copy(result, left, index * left->elementRegisterCount());
\r
568 else if(result->isRegister())
\r
571 if(left->isRegister())
\r
575 else if(left->isArray())
\r
577 srcIndex = index * left->elementRegisterCount();
\r
579 else if(left->isMatrix())
\r
581 ASSERT(index < left->getNominalSize()); // FIXME: Report semantic error
\r
584 else UNREACHABLE(0);
\r
586 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, 0, left, srcIndex);
\r
588 if(left->isRegister())
\r
590 mov->src[0].swizzle = index;
\r
593 else UNREACHABLE(0);
\r
596 case EOpIndexIndirect:
\r
597 if(visit == PostVisit)
\r
599 if(left->isArray() || left->isMatrix())
\r
601 for(int index = 0; index < result->totalRegisterCount(); index++)
\r
603 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, index, left, index);
\r
604 mov->dst.mask = writeMask(result, index);
\r
606 if(left->totalRegisterCount() > 1)
\r
608 sw::Shader::SourceParameter relativeRegister;
\r
609 argument(relativeRegister, right);
\r
611 mov->src[0].rel.type = relativeRegister.type;
\r
612 mov->src[0].rel.index = relativeRegister.index;
\r
613 mov->src[0].rel.scale = result->totalRegisterCount();
\r
614 mov->src[0].rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
618 else if(left->isRegister())
\r
620 emit(sw::Shader::OPCODE_EXTRACT, result, left, right);
\r
622 else UNREACHABLE(0);
\r
625 case EOpIndexDirectStruct:
\r
626 case EOpIndexDirectInterfaceBlock:
\r
627 if(visit == PostVisit)
\r
629 ASSERT(leftType.isStruct() || (leftType.isInterfaceBlock()));
\r
631 const TFieldList& fields = (node->getOp() == EOpIndexDirectStruct) ?
\r
632 leftType.getStruct()->fields() :
\r
633 leftType.getInterfaceBlock()->fields();
\r
634 int index = right->getAsConstantUnion()->getIConst(0);
\r
635 int fieldOffset = 0;
\r
637 for(int i = 0; i < index; i++)
\r
639 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
642 copy(result, left, fieldOffset);
\r
645 case EOpVectorSwizzle:
\r
646 if(visit == PostVisit)
\r
649 TIntermAggregate *components = right->getAsAggregate();
\r
653 TIntermSequence &sequence = components->getSequence();
\r
656 for(TIntermSequence::iterator sit = sequence.begin(); sit != sequence.end(); sit++)
\r
658 TIntermConstantUnion *element = (*sit)->getAsConstantUnion();
\r
662 int i = element->getUnionArrayPointer()[0].getIConst();
\r
663 swizzle |= i << (component * 2);
\r
666 else UNREACHABLE(0);
\r
669 else UNREACHABLE(0);
\r
671 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, left);
\r
672 mov->src[0].swizzle = swizzle;
\r
675 case EOpAddAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, left, right); break;
\r
676 case EOpAdd: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ADD, result), result, left, right); break;
\r
677 case EOpSubAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, left, right); break;
\r
678 case EOpSub: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_SUB, result), result, left, right); break;
\r
679 case EOpMulAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, left, right); break;
\r
680 case EOpMul: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_MUL, result), result, left, right); break;
\r
681 case EOpDivAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, left, right); break;
\r
682 case EOpDiv: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_DIV, result), result, left, right); break;
\r
683 case EOpIModAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, left, right); break;
\r
684 case EOpIMod: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_IMOD, result), result, left, right); break;
\r
685 case EOpBitShiftLeftAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_SHL, result, left, left, right); break;
\r
686 case EOpBitShiftLeft: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_SHL, result, left, right); break;
\r
687 case EOpBitShiftRightAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, left, right); break;
\r
688 case EOpBitShiftRight: if(visit == PostVisit) emitBinary(getOpcode(sw::Shader::OPCODE_ISHR, result), result, left, right); break;
\r
689 case EOpBitwiseAndAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_AND, result, left, left, right); break;
\r
690 case EOpBitwiseAnd: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_AND, result, left, right); break;
\r
691 case EOpBitwiseXorAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_XOR, result, left, left, right); break;
\r
692 case EOpBitwiseXor: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
693 case EOpBitwiseOrAssign: if(visit == PostVisit) emitAssign(sw::Shader::OPCODE_OR, result, left, left, right); break;
\r
694 case EOpBitwiseOr: if(visit == PostVisit) emitBinary(sw::Shader::OPCODE_OR, result, left, right); break;
\r
696 if(visit == PostVisit)
\r
698 emitBinary(sw::Shader::OPCODE_EQ, result, left, right);
\r
700 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
702 Temporary equal(this);
\r
703 emit(sw::Shader::OPCODE_EQ, &equal, 0, left, index, right, index);
\r
704 emit(sw::Shader::OPCODE_AND, result, result, &equal);
\r
709 if(visit == PostVisit)
\r
711 emitBinary(sw::Shader::OPCODE_NE, result, left, right);
\r
713 for(int index = 1; index < left->totalRegisterCount(); index++)
\r
715 Temporary notEqual(this);
\r
716 emit(sw::Shader::OPCODE_NE, ¬Equal, 0, left, index, right, index);
\r
717 emit(sw::Shader::OPCODE_OR, result, result, ¬Equal);
\r
721 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, left, right); break;
\r
722 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, left, right); break;
\r
723 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, left, right); break;
\r
724 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, left, right); break;
\r
725 case EOpVectorTimesScalarAssign: if(visit == PostVisit) emitAssign(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, left, right); break;
\r
726 case EOpVectorTimesScalar: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MUL, left), result, left, right); break;
\r
727 case EOpMatrixTimesScalar:
\r
728 if(visit == PostVisit)
\r
730 for(int i = 0; i < leftType.getNominalSize(); i++)
\r
732 emit(sw::Shader::OPCODE_MUL, result, i, left, i, right);
\r
736 case EOpVectorTimesMatrix:
\r
737 if(visit == PostVisit)
\r
739 sw::Shader::Opcode dpOpcode = sw::Shader::OPCODE_DP(leftType.getNominalSize());
\r
741 int size = rightType.getNominalSize();
\r
742 for(int i = 0; i < size; i++)
\r
744 Instruction *dot = emit(dpOpcode, result, 0, left, 0, right, i);
\r
745 dot->dst.mask = 1 << i;
\r
749 case EOpMatrixTimesVector:
\r
750 if(visit == PostVisit)
\r
752 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, left, right);
\r
753 mul->src[1].swizzle = 0x00;
\r
755 int size = rightType.getNominalSize();
\r
756 for(int i = 1; i < size; i++)
\r
758 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, 0, left, i, right, 0, result);
\r
759 mad->src[1].swizzle = i * 0x55;
\r
763 case EOpMatrixTimesMatrix:
\r
764 if(visit == PostVisit)
\r
766 int dim = leftType.getNominalSize();
\r
768 int size = rightType.getNominalSize();
\r
769 for(int i = 0; i < size; i++)
\r
771 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, left, 0, right, i);
\r
772 mul->src[1].swizzle = 0x00;
\r
774 for(int j = 1; j < dim; j++)
\r
776 Instruction *mad = emit(sw::Shader::OPCODE_MAD, result, i, left, j, right, i, result, i);
\r
777 mad->src[1].swizzle = j * 0x55;
\r
783 if(trivial(right, 6))
\r
785 if(visit == PostVisit)
\r
787 emit(sw::Shader::OPCODE_OR, result, left, right);
\r
790 else // Short-circuit evaluation
\r
792 if(visit == InVisit)
\r
794 emit(sw::Shader::OPCODE_MOV, result, left);
\r
795 Instruction *ifnot = emit(sw::Shader::OPCODE_IF, 0, result);
\r
796 ifnot->src[0].modifier = sw::Shader::MODIFIER_NOT;
\r
798 else if(visit == PostVisit)
\r
800 emit(sw::Shader::OPCODE_MOV, result, right);
\r
801 emit(sw::Shader::OPCODE_ENDIF);
\r
805 case EOpLogicalXor: if(visit == PostVisit) emit(sw::Shader::OPCODE_XOR, result, left, right); break;
\r
806 case EOpLogicalAnd:
\r
807 if(trivial(right, 6))
\r
809 if(visit == PostVisit)
\r
811 emit(sw::Shader::OPCODE_AND, result, left, right);
\r
814 else // Short-circuit evaluation
\r
816 if(visit == InVisit)
\r
818 emit(sw::Shader::OPCODE_MOV, result, left);
\r
819 emit(sw::Shader::OPCODE_IF, 0, result);
\r
821 else if(visit == PostVisit)
\r
823 emit(sw::Shader::OPCODE_MOV, result, right);
\r
824 emit(sw::Shader::OPCODE_ENDIF);
\r
828 default: UNREACHABLE(node->getOp());
\r
834 void OutputASM::emitDeterminant(TIntermTyped *result, TIntermTyped *arg, int size, int col, int row, int outCol, int outRow)
\r
838 case 1: // Used for cofactor computation only
\r
840 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
841 bool isMov = (row == col);
\r
842 sw::Shader::Opcode op = isMov ? sw::Shader::OPCODE_MOV : sw::Shader::OPCODE_NEG;
\r
843 Instruction *mov = emit(op, result, outCol, arg, isMov ? 1 - row : row);
\r
844 mov->src[0].swizzle = 0x55 * (isMov ? 1 - col : col);
\r
845 mov->dst.mask = 1 << outRow;
\r
850 static const unsigned int swizzle[3] = { 0x99, 0x88, 0x44 }; // xy?? : yzyz, xzxz, xyxy
\r
852 bool isCofactor = (col >= 0) && (row >= 0);
\r
853 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
854 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
855 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
857 Instruction *det = emit(sw::Shader::OPCODE_DET2, result, outCol, arg, negate ? col1 : col0, arg, negate ? col0 : col1);
\r
858 det->src[0].swizzle = det->src[1].swizzle = swizzle[isCofactor ? row : 2];
\r
859 det->dst.mask = 1 << outRow;
\r
864 static const unsigned int swizzle[4] = { 0xF9, 0xF8, 0xF4, 0xE4 }; // xyz? : yzww, xzww, xyww, xyzw
\r
866 bool isCofactor = (col >= 0) && (row >= 0);
\r
867 int col0 = (isCofactor && (col <= 0)) ? 1 : 0;
\r
868 int col1 = (isCofactor && (col <= 1)) ? 2 : 1;
\r
869 int col2 = (isCofactor && (col <= 2)) ? 3 : 2;
\r
870 bool negate = isCofactor && ((col & 0x01) ^ (row & 0x01));
\r
872 Instruction *det = emit(sw::Shader::OPCODE_DET3, result, outCol, arg, col0, arg, negate ? col2 : col1, arg, negate ? col1 : col2);
\r
873 det->src[0].swizzle = det->src[1].swizzle = det->src[2].swizzle = swizzle[isCofactor ? row : 3];
\r
874 det->dst.mask = 1 << outRow;
\r
879 Instruction *det = emit(sw::Shader::OPCODE_DET4, result, outCol, arg, 0, arg, 1, arg, 2, arg, 3);
\r
880 det->dst.mask = 1 << outRow;
\r
889 bool OutputASM::visitUnary(Visit visit, TIntermUnary *node)
\r
891 if(currentScope != emitScope)
\r
896 TIntermTyped *result = node;
\r
897 TIntermTyped *arg = node->getOperand();
\r
898 TBasicType basicType = arg->getType().getBasicType();
\r
906 if(basicType == EbtInt || basicType == EbtUInt)
\r
912 one_value.f = 1.0f;
\r
915 Constant one(one_value.f, one_value.f, one_value.f, one_value.f);
\r
916 Constant rad(1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f, 1.74532925e-2f);
\r
917 Constant deg(5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f, 5.72957795e+1f);
\r
919 switch(node->getOp())
\r
922 if(visit == PostVisit)
\r
924 sw::Shader::Opcode negOpcode = getOpcode(sw::Shader::OPCODE_NEG, arg);
\r
925 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
927 emit(negOpcode, result, index, arg, index);
\r
931 case EOpVectorLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
932 case EOpLogicalNot: if(visit == PostVisit) emit(sw::Shader::OPCODE_NOT, result, arg); break;
\r
933 case EOpPostIncrement:
\r
934 if(visit == PostVisit)
\r
938 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
939 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
941 emit(addOpcode, arg, index, arg, index, &one);
\r
944 assignLvalue(arg, arg);
\r
947 case EOpPostDecrement:
\r
948 if(visit == PostVisit)
\r
952 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
953 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
955 emit(subOpcode, arg, index, arg, index, &one);
\r
958 assignLvalue(arg, arg);
\r
961 case EOpPreIncrement:
\r
962 if(visit == PostVisit)
\r
964 sw::Shader::Opcode addOpcode = getOpcode(sw::Shader::OPCODE_ADD, arg);
\r
965 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
967 emit(addOpcode, result, index, arg, index, &one);
\r
970 assignLvalue(arg, result);
\r
973 case EOpPreDecrement:
\r
974 if(visit == PostVisit)
\r
976 sw::Shader::Opcode subOpcode = getOpcode(sw::Shader::OPCODE_SUB, arg);
\r
977 for(int index = 0; index < arg->totalRegisterCount(); index++)
\r
979 emit(subOpcode, result, index, arg, index, &one);
\r
982 assignLvalue(arg, result);
\r
985 case EOpRadians: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, &rad); break;
\r
986 case EOpDegrees: if(visit == PostVisit) emit(sw::Shader::OPCODE_MUL, result, arg, °); break;
\r
987 case EOpSin: if(visit == PostVisit) emit(sw::Shader::OPCODE_SIN, result, arg); break;
\r
988 case EOpCos: if(visit == PostVisit) emit(sw::Shader::OPCODE_COS, result, arg); break;
\r
989 case EOpTan: if(visit == PostVisit) emit(sw::Shader::OPCODE_TAN, result, arg); break;
\r
990 case EOpAsin: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASIN, result, arg); break;
\r
991 case EOpAcos: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOS, result, arg); break;
\r
992 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN, result, arg); break;
\r
993 case EOpSinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_SINH, result, arg); break;
\r
994 case EOpCosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_COSH, result, arg); break;
\r
995 case EOpTanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_TANH, result, arg); break;
\r
996 case EOpAsinh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ASINH, result, arg); break;
\r
997 case EOpAcosh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ACOSH, result, arg); break;
\r
998 case EOpAtanh: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATANH, result, arg); break;
\r
999 case EOpExp: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP, result, arg); break;
\r
1000 case EOpLog: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG, result, arg); break;
\r
1001 case EOpExp2: if(visit == PostVisit) emit(sw::Shader::OPCODE_EXP2, result, arg); break;
\r
1002 case EOpLog2: if(visit == PostVisit) emit(sw::Shader::OPCODE_LOG2, result, arg); break;
\r
1003 case EOpSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_SQRT, result, arg); break;
\r
1004 case EOpInverseSqrt: if(visit == PostVisit) emit(sw::Shader::OPCODE_RSQ, result, arg); break;
\r
1005 case EOpAbs: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_ABS, result), result, arg); break;
\r
1006 case EOpSign: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_SGN, result), result, arg); break;
\r
1007 case EOpFloor: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOOR, result, arg); break;
\r
1008 case EOpTrunc: if(visit == PostVisit) emit(sw::Shader::OPCODE_TRUNC, result, arg); break;
\r
1009 case EOpRound: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUND, result, arg); break;
\r
1010 case EOpRoundEven: if(visit == PostVisit) emit(sw::Shader::OPCODE_ROUNDEVEN, result, arg); break;
\r
1011 case EOpCeil: if(visit == PostVisit) emit(sw::Shader::OPCODE_CEIL, result, arg, result); break;
\r
1012 case EOpFract: if(visit == PostVisit) emit(sw::Shader::OPCODE_FRC, result, arg); break;
\r
1013 case EOpIsNan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISNAN, result, arg); break;
\r
1014 case EOpIsInf: if(visit == PostVisit) emit(sw::Shader::OPCODE_ISINF, result, arg); break;
\r
1015 case EOpLength: if(visit == PostVisit) emit(sw::Shader::OPCODE_LEN(dim(arg)), result, arg); break;
\r
1016 case EOpNormalize: if(visit == PostVisit) emit(sw::Shader::OPCODE_NRM(dim(arg)), result, arg); break;
\r
1017 case EOpDFdx: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDX, result, arg); break;
\r
1018 case EOpDFdy: if(visit == PostVisit) emit(sw::Shader::OPCODE_DFDY, result, arg); break;
\r
1019 case EOpFwidth: if(visit == PostVisit) emit(sw::Shader::OPCODE_FWIDTH, result, arg); break;
\r
1020 case EOpAny: if(visit == PostVisit) emit(sw::Shader::OPCODE_ANY, result, arg); break;
\r
1021 case EOpAll: if(visit == PostVisit) emit(sw::Shader::OPCODE_ALL, result, arg); break;
\r
1022 case EOpFloatBitsToInt: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOINT, result, arg); break;
\r
1023 case EOpFloatBitsToUint: if(visit == PostVisit) emit(sw::Shader::OPCODE_FLOATBITSTOUINT, result, arg); break;
\r
1024 case EOpIntBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_INTBITSTOFLOAT, result, arg); break;
\r
1025 case EOpUintBitsToFloat: if(visit == PostVisit) emit(sw::Shader::OPCODE_UINTBITSTOFLOAT, result, arg); break;
\r
1026 case EOpPackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKSNORM2x16, result, arg); break;
\r
1027 case EOpPackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKUNORM2x16, result, arg); break;
\r
1028 case EOpPackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_PACKHALF2x16, result, arg); break;
\r
1029 case EOpUnpackSnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKSNORM2x16, result, arg); break;
\r
1030 case EOpUnpackUnorm2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKUNORM2x16, result, arg); break;
\r
1031 case EOpUnpackHalf2x16: if(visit == PostVisit) emit(sw::Shader::OPCODE_UNPACKHALF2x16, result, arg); break;
\r
1032 case EOpTranspose:
\r
1033 if(visit == PostVisit)
\r
1035 int numCols = arg->getNominalSize();
\r
1036 int numRows = arg->getSecondarySize();
\r
1037 for(int i = 0; i < numCols; ++i)
\r
1039 for(int j = 0; j < numRows; ++j)
\r
1041 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, j, arg, i);
\r
1042 mov->src[0].swizzle = 0x55 * j;
\r
1043 mov->dst.mask = 1 << i;
\r
1048 case EOpDeterminant:
\r
1049 if(visit == PostVisit)
\r
1051 int size = arg->getNominalSize();
\r
1052 ASSERT(size == arg->getSecondarySize());
\r
1054 emitDeterminant(result, arg, size);
\r
1058 if(visit == PostVisit)
\r
1060 int size = arg->getNominalSize();
\r
1061 ASSERT(size == arg->getSecondarySize());
\r
1063 // Compute transposed matrix of cofactors
\r
1064 for(int i = 0; i < size; ++i)
\r
1066 for(int j = 0; j < size; ++j)
\r
1068 // For a 2x2 matrix, the cofactor is simply a transposed move or negate
\r
1069 // For a 3x3 or 4x4 matrix, the cofactor is a transposed determinant
\r
1070 emitDeterminant(result, arg, size - 1, j, i, i, j);
\r
1074 // Compute 1 / determinant
\r
1075 Temporary invDet(this);
\r
1076 emitDeterminant(&invDet, arg, size);
\r
1077 Constant one(1.0f, 1.0f, 1.0f, 1.0f);
\r
1078 Instruction *div = emit(sw::Shader::OPCODE_DIV, &invDet, &one, &invDet);
\r
1079 div->src[1].swizzle = 0x00; // xxxx
\r
1081 // Divide transposed matrix of cofactors by determinant
\r
1082 for(int i = 0; i < size; ++i)
\r
1084 emit(sw::Shader::OPCODE_MUL, result, i, result, i, &invDet);
\r
1088 default: UNREACHABLE(node->getOp());
\r
1094 bool OutputASM::visitAggregate(Visit visit, TIntermAggregate *node)
\r
1096 if(currentScope != emitScope && node->getOp() != EOpFunction && node->getOp() != EOpSequence)
\r
1101 Constant zero(0.0f, 0.0f, 0.0f, 0.0f);
\r
1103 TIntermTyped *result = node;
\r
1104 const TType &resultType = node->getType();
\r
1105 TIntermSequence &arg = node->getSequence();
\r
1106 int argumentCount = arg.size();
\r
1108 switch(node->getOp())
\r
1110 case EOpSequence: break;
\r
1111 case EOpDeclaration: break;
\r
1112 case EOpInvariantDeclaration: break;
\r
1113 case EOpPrototype: break;
\r
1115 if(visit == PostVisit)
\r
1117 copy(result, arg[1]);
\r
1121 if(visit == PreVisit)
\r
1123 const TString &name = node->getName();
\r
1125 if(emitScope == FUNCTION)
\r
1127 if(functionArray.size() > 1) // No need for a label when there's only main()
\r
1129 Instruction *label = emit(sw::Shader::OPCODE_LABEL);
\r
1130 label->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1132 const Function *function = findFunction(name);
\r
1133 ASSERT(function); // Should have been added during global pass
\r
1134 label->dst.index = function->label;
\r
1135 currentFunction = function->label;
\r
1138 else if(emitScope == GLOBAL)
\r
1140 if(name != "main(")
\r
1142 TIntermSequence &arguments = node->getSequence()[0]->getAsAggregate()->getSequence();
\r
1143 functionArray.push_back(Function(functionArray.size(), name, &arguments, node));
\r
1146 else UNREACHABLE(emitScope);
\r
1148 currentScope = FUNCTION;
\r
1150 else if(visit == PostVisit)
\r
1152 if(emitScope == FUNCTION)
\r
1154 if(functionArray.size() > 1) // No need to return when there's only main()
\r
1156 emit(sw::Shader::OPCODE_RET);
\r
1160 currentScope = GLOBAL;
\r
1163 case EOpFunctionCall:
\r
1164 if(visit == PostVisit)
\r
1166 if(node->isUserDefined())
\r
1168 const TString &name = node->getName();
\r
1169 const Function *function = findFunction(name);
\r
1173 mContext.error(node->getLine(), "function definition not found", name.c_str());
\r
1177 TIntermSequence &arguments = *function->arg;
\r
1179 for(int i = 0; i < argumentCount; i++)
\r
1181 TIntermTyped *in = arguments[i]->getAsTyped();
\r
1183 if(in->getQualifier() == EvqIn ||
\r
1184 in->getQualifier() == EvqInOut ||
\r
1185 in->getQualifier() == EvqConstReadOnly)
\r
1191 Instruction *call = emit(sw::Shader::OPCODE_CALL);
\r
1192 call->dst.type = sw::Shader::PARAMETER_LABEL;
\r
1193 call->dst.index = function->label;
\r
1195 if(function->ret && function->ret->getType().getBasicType() != EbtVoid)
\r
1197 copy(result, function->ret);
\r
1200 for(int i = 0; i < argumentCount; i++)
\r
1202 TIntermTyped *argument = arguments[i]->getAsTyped();
\r
1203 TIntermTyped *out = arg[i]->getAsTyped();
\r
1205 if(argument->getQualifier() == EvqOut ||
\r
1206 argument->getQualifier() == EvqInOut)
\r
1208 copy(out, argument);
\r
1214 const TextureFunction textureFunction(node->getName());
\r
1215 switch(textureFunction.method)
\r
1217 case TextureFunction::IMPLICIT:
\r
1219 TIntermTyped *t = arg[1]->getAsTyped();
\r
1221 TIntermNode* offset = textureFunction.offset ? arg[2] : 0;
\r
1223 if(argumentCount == 2 || (textureFunction.offset && argumentCount == 3))
\r
1225 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1226 result, arg[1], arg[0], offset);
\r
1227 if(textureFunction.proj)
\r
1229 tex->project = true;
\r
1231 switch(t->getNominalSize())
\r
1233 case 2: tex->src[0].swizzle = 0x54; break; // xyyy
\r
1234 case 3: tex->src[0].swizzle = 0xA4; break; // xyzz
\r
1235 case 4: break; // xyzw
\r
1237 UNREACHABLE(t->getNominalSize());
\r
1242 else if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4)) // bias
\r
1244 Temporary proj(this);
\r
1245 if(textureFunction.proj)
\r
1247 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1248 div->dst.mask = 0x3;
\r
1250 switch(t->getNominalSize())
\r
1255 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1258 UNREACHABLE(t->getNominalSize());
\r
1264 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1267 Instruction *bias = emit(sw::Shader::OPCODE_MOV, &proj, arg[textureFunction.offset ? 3 : 2]);
\r
1268 bias->dst.mask = 0x8;
\r
1270 Instruction *tex = emit(textureFunction.offset ? sw::Shader::OPCODE_TEXOFFSET : sw::Shader::OPCODE_TEX,
\r
1271 result, &proj, arg[0], offset); // FIXME: Implement an efficient TEXLDB instruction
\r
1274 else UNREACHABLE(argumentCount);
\r
1277 case TextureFunction::LOD:
\r
1279 TIntermTyped *t = arg[1]->getAsTyped();
\r
1280 Temporary proj(this);
\r
1282 if(textureFunction.proj)
\r
1284 Instruction *div = emit(sw::Shader::OPCODE_DIV, &proj, arg[1], arg[1]);
\r
1285 div->dst.mask = 0x3;
\r
1287 switch(t->getNominalSize())
\r
1292 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1295 UNREACHABLE(t->getNominalSize());
\r
1301 emit(sw::Shader::OPCODE_MOV, &proj, arg[1]);
\r
1304 Instruction *lod = emit(sw::Shader::OPCODE_MOV, &proj, arg[2]);
\r
1305 lod->dst.mask = 0x8;
\r
1307 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXLDLOFFSET : sw::Shader::OPCODE_TEXLDL,
\r
1308 result, &proj, arg[0], textureFunction.offset ? arg[3] : 0);
\r
1311 case TextureFunction::FETCH:
\r
1313 TIntermTyped *t = arg[1]->getAsTyped();
\r
1315 if(argumentCount == 3 || (textureFunction.offset && argumentCount == 4))
\r
1317 TIntermNode* offset = textureFunction.offset ? arg[3] : 0;
\r
1319 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXELFETCHOFFSET : sw::Shader::OPCODE_TEXELFETCH,
\r
1320 result, arg[1], arg[0], arg[2], offset);
\r
1322 else UNREACHABLE(argumentCount);
\r
1325 case TextureFunction::GRAD:
\r
1327 TIntermTyped *t = arg[1]->getAsTyped();
\r
1329 if(argumentCount == 4 || (textureFunction.offset && argumentCount == 5))
\r
1331 Temporary uvwb(this);
\r
1333 if(textureFunction.proj)
\r
1335 Instruction *div = emit(sw::Shader::OPCODE_DIV, &uvwb, arg[1], arg[1]);
\r
1336 div->dst.mask = 0x3;
\r
1338 switch(t->getNominalSize())
\r
1343 div->src[1].swizzle = 0x55 * (t->getNominalSize() - 1);
\r
1346 UNREACHABLE(t->getNominalSize());
\r
1352 emit(sw::Shader::OPCODE_MOV, &uvwb, arg[1]);
\r
1355 TIntermNode* offset = textureFunction.offset ? arg[4] : 0;
\r
1357 emit(textureFunction.offset ? sw::Shader::OPCODE_TEXGRADOFFSET : sw::Shader::OPCODE_TEXGRAD,
\r
1358 result, &uvwb, arg[0], arg[2], arg[3], offset);
\r
1360 else UNREACHABLE(argumentCount);
\r
1363 case TextureFunction::SIZE:
\r
1364 emit(sw::Shader::OPCODE_TEXSIZE, result, arg[1], arg[0]);
\r
1367 UNREACHABLE(textureFunction.method);
\r
1372 case EOpParameters:
\r
1374 case EOpConstructFloat:
\r
1375 case EOpConstructVec2:
\r
1376 case EOpConstructVec3:
\r
1377 case EOpConstructVec4:
\r
1378 case EOpConstructBool:
\r
1379 case EOpConstructBVec2:
\r
1380 case EOpConstructBVec3:
\r
1381 case EOpConstructBVec4:
\r
1382 case EOpConstructInt:
\r
1383 case EOpConstructIVec2:
\r
1384 case EOpConstructIVec3:
\r
1385 case EOpConstructIVec4:
\r
1386 case EOpConstructUInt:
\r
1387 case EOpConstructUVec2:
\r
1388 case EOpConstructUVec3:
\r
1389 case EOpConstructUVec4:
\r
1390 if(visit == PostVisit)
\r
1392 int component = 0;
\r
1394 for(int i = 0; i < argumentCount; i++)
\r
1396 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1397 int size = argi->getNominalSize();
\r
1399 if(!argi->isMatrix())
\r
1401 Instruction *mov = emitCast(result, argi);
\r
1402 mov->dst.mask = (0xF << component) & 0xF;
\r
1403 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1405 component += size;
\r
1411 while(component < resultType.getNominalSize())
\r
1413 Instruction *mov = emitCast(result, 0, argi, column);
\r
1414 mov->dst.mask = (0xF << component) & 0xF;
\r
1415 mov->src[0].swizzle = readSwizzle(argi, size) << (component * 2);
\r
1418 component += size;
\r
1424 case EOpConstructMat2:
\r
1425 case EOpConstructMat2x3:
\r
1426 case EOpConstructMat2x4:
\r
1427 case EOpConstructMat3x2:
\r
1428 case EOpConstructMat3:
\r
1429 case EOpConstructMat3x4:
\r
1430 case EOpConstructMat4x2:
\r
1431 case EOpConstructMat4x3:
\r
1432 case EOpConstructMat4:
\r
1433 if(visit == PostVisit)
\r
1435 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1436 const int outCols = result->getNominalSize();
\r
1437 const int outRows = result->getSecondarySize();
\r
1439 if(arg0->isScalar() && arg.size() == 1) // Construct scale matrix
\r
1441 for(int i = 0; i < outCols; i++)
\r
1443 Instruction *init = emit(sw::Shader::OPCODE_MOV, result, i, &zero);
\r
1444 Instruction *mov = emitCast(result, i, arg0, 0);
\r
1445 mov->dst.mask = 1 << i;
\r
1446 ASSERT(mov->src[0].swizzle == 0x00);
\r
1449 else if(arg0->isMatrix())
\r
1451 const int inCols = arg0->getNominalSize();
\r
1452 const int inRows = arg0->getSecondarySize();
\r
1454 for(int i = 0; i < outCols; i++)
\r
1456 if(i >= inCols || outRows > inRows)
\r
1458 // Initialize to identity matrix
\r
1459 Constant col((i == 0 ? 1.0f : 0.0f), (i == 1 ? 1.0f : 0.0f), (i == 2 ? 1.0f : 0.0f), (i == 3 ? 1.0f : 0.0f));
\r
1460 Instruction *mov = emitCast(result, i, &col, 0);
\r
1465 Instruction *mov = emitCast(result, i, arg0, i);
\r
1466 mov->dst.mask = 0xF >> (4 - inRows);
\r
1475 for(int i = 0; i < argumentCount; i++)
\r
1477 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1478 int size = argi->getNominalSize();
\r
1481 while(element < size)
\r
1483 Instruction *mov = emitCast(result, column, argi, 0);
\r
1484 mov->dst.mask = (0xF << row) & 0xF;
\r
1485 mov->src[0].swizzle = (readSwizzle(argi, size) << (row * 2)) + 0x55 * element;
\r
1487 int end = row + size - element;
\r
1488 column = end >= outRows ? column + 1 : column;
\r
1489 element = element + outRows - row;
\r
1490 row = end >= outRows ? 0 : end;
\r
1496 case EOpConstructStruct:
\r
1497 if(visit == PostVisit)
\r
1500 for(int i = 0; i < argumentCount; i++)
\r
1502 TIntermTyped *argi = arg[i]->getAsTyped();
\r
1503 int size = argi->totalRegisterCount();
\r
1505 for(int index = 0; index < size; index++)
\r
1507 Instruction *mov = emit(sw::Shader::OPCODE_MOV, result, index + offset, argi, index);
\r
1508 mov->dst.mask = writeMask(result, offset + index);
\r
1515 case EOpLessThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LT, result, arg[0], arg[1]); break;
\r
1516 case EOpGreaterThan: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GT, result, arg[0], arg[1]); break;
\r
1517 case EOpLessThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_LE, result, arg[0], arg[1]); break;
\r
1518 case EOpGreaterThanEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_GE, result, arg[0], arg[1]); break;
\r
1519 case EOpVectorEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_EQ, result, arg[0], arg[1]); break;
\r
1520 case EOpVectorNotEqual: if(visit == PostVisit) emitCmp(sw::Shader::CONTROL_NE, result, arg[0], arg[1]); break;
\r
1521 case EOpMod: if(visit == PostVisit) emit(sw::Shader::OPCODE_MOD, result, arg[0], arg[1]); break;
\r
1522 case EOpPow: if(visit == PostVisit) emit(sw::Shader::OPCODE_POW, result, arg[0], arg[1]); break;
\r
1523 case EOpAtan: if(visit == PostVisit) emit(sw::Shader::OPCODE_ATAN2, result, arg[0], arg[1]); break;
\r
1524 case EOpMin: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, arg[0], arg[1]); break;
\r
1525 case EOpMax: if(visit == PostVisit) emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]); break;
\r
1527 if(visit == PostVisit)
\r
1529 emit(getOpcode(sw::Shader::OPCODE_MAX, result), result, arg[0], arg[1]);
\r
1530 emit(getOpcode(sw::Shader::OPCODE_MIN, result), result, result, arg[2]);
\r
1533 case EOpMix: if(visit == PostVisit) emit(sw::Shader::OPCODE_LRP, result, arg[2], arg[1], arg[0]); break;
\r
1534 case EOpStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_STEP, result, arg[0], arg[1]); break;
\r
1535 case EOpSmoothStep: if(visit == PostVisit) emit(sw::Shader::OPCODE_SMOOTH, result, arg[0], arg[1], arg[2]); break;
\r
1536 case EOpDistance: if(visit == PostVisit) emit(sw::Shader::OPCODE_DIST(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1537 case EOpDot: if(visit == PostVisit) emit(sw::Shader::OPCODE_DP(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1538 case EOpCross: if(visit == PostVisit) emit(sw::Shader::OPCODE_CRS, result, arg[0], arg[1]); break;
\r
1539 case EOpFaceForward: if(visit == PostVisit) emit(sw::Shader::OPCODE_FORWARD(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1540 case EOpReflect: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFLECT(dim(arg[0])), result, arg[0], arg[1]); break;
\r
1541 case EOpRefract: if(visit == PostVisit) emit(sw::Shader::OPCODE_REFRACT(dim(arg[0])), result, arg[0], arg[1], arg[2]); break;
\r
1543 if(visit == PostVisit)
\r
1545 TIntermTyped *arg0 = arg[0]->getAsTyped();
\r
1546 TIntermTyped *arg1 = arg[1]->getAsTyped();
\r
1547 ASSERT((arg0->getNominalSize() == arg1->getNominalSize()) && (arg0->getSecondarySize() == arg1->getSecondarySize()));
\r
1549 int size = arg0->getNominalSize();
\r
1550 for(int i = 0; i < size; i++)
\r
1552 emit(sw::Shader::OPCODE_MUL, result, i, arg[0], i, arg[1], i);
\r
1556 case EOpOuterProduct:
\r
1557 if(visit == PostVisit)
\r
1559 for(int i = 0; i < dim(arg[1]); i++)
\r
1561 Instruction *mul = emit(sw::Shader::OPCODE_MUL, result, i, arg[0], 0, arg[1]);
\r
1562 mul->src[1].swizzle = 0x55 * i;
\r
1566 default: UNREACHABLE(node->getOp());
\r
1572 bool OutputASM::visitSelection(Visit visit, TIntermSelection *node)
\r
1574 if(currentScope != emitScope)
\r
1579 TIntermTyped *condition = node->getCondition();
\r
1580 TIntermNode *trueBlock = node->getTrueBlock();
\r
1581 TIntermNode *falseBlock = node->getFalseBlock();
\r
1582 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
1584 condition->traverse(this);
\r
1586 if(node->usesTernaryOperator())
\r
1588 if(constantCondition)
\r
1590 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1594 trueBlock->traverse(this);
\r
1595 copy(node, trueBlock);
\r
1599 falseBlock->traverse(this);
\r
1600 copy(node, falseBlock);
\r
1603 else if(trivial(node, 6)) // Fast to compute both potential results and no side effects
\r
1605 trueBlock->traverse(this);
\r
1606 falseBlock->traverse(this);
\r
1607 emit(sw::Shader::OPCODE_SELECT, node, condition, trueBlock, falseBlock);
\r
1611 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1615 trueBlock->traverse(this);
\r
1616 copy(node, trueBlock);
\r
1621 emit(sw::Shader::OPCODE_ELSE);
\r
1622 falseBlock->traverse(this);
\r
1623 copy(node, falseBlock);
\r
1626 emit(sw::Shader::OPCODE_ENDIF);
\r
1629 else // if/else statement
\r
1631 if(constantCondition)
\r
1633 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
1639 trueBlock->traverse(this);
\r
1646 falseBlock->traverse(this);
\r
1652 emit(sw::Shader::OPCODE_IF, 0, condition);
\r
1656 trueBlock->traverse(this);
\r
1661 emit(sw::Shader::OPCODE_ELSE);
\r
1662 falseBlock->traverse(this);
\r
1665 emit(sw::Shader::OPCODE_ENDIF);
\r
1672 bool OutputASM::visitLoop(Visit visit, TIntermLoop *node)
\r
1674 if(currentScope != emitScope)
\r
1679 unsigned int iterations = loopCount(node);
\r
1681 if(iterations == 0)
\r
1686 bool unroll = (iterations <= 4);
\r
1690 DetectLoopDiscontinuity detectLoopDiscontinuity;
\r
1691 unroll = !detectLoopDiscontinuity.traverse(node);
\r
1694 TIntermNode *init = node->getInit();
\r
1695 TIntermTyped *condition = node->getCondition();
\r
1696 TIntermTyped *expression = node->getExpression();
\r
1697 TIntermNode *body = node->getBody();
\r
1698 Constant True(true);
\r
1700 if(node->getType() == ELoopDoWhile)
\r
1702 Temporary iterate(this);
\r
1703 emit(sw::Shader::OPCODE_MOV, &iterate, &True);
\r
1705 emit(sw::Shader::OPCODE_WHILE, 0, &iterate); // FIXME: Implement real do-while
\r
1709 body->traverse(this);
\r
1712 emit(sw::Shader::OPCODE_TEST);
\r
1714 condition->traverse(this);
\r
1715 emit(sw::Shader::OPCODE_MOV, &iterate, condition);
\r
1717 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1723 init->traverse(this);
\r
1728 for(unsigned int i = 0; i < iterations; i++)
\r
1730 // condition->traverse(this); // Condition could contain statements, but not in an unrollable loop
\r
1734 body->traverse(this);
\r
1739 expression->traverse(this);
\r
1747 condition->traverse(this);
\r
1751 condition = &True;
\r
1754 emit(sw::Shader::OPCODE_WHILE, 0, condition);
\r
1758 body->traverse(this);
\r
1761 emit(sw::Shader::OPCODE_TEST);
\r
1765 expression->traverse(this);
\r
1770 condition->traverse(this);
\r
1773 emit(sw::Shader::OPCODE_ENDWHILE);
\r
1780 bool OutputASM::visitBranch(Visit visit, TIntermBranch *node)
\r
1782 if(currentScope != emitScope)
\r
1787 switch(node->getFlowOp())
\r
1789 case EOpKill: if(visit == PostVisit) emit(sw::Shader::OPCODE_DISCARD); break;
\r
1790 case EOpBreak: if(visit == PostVisit) emit(sw::Shader::OPCODE_BREAK); break;
\r
1791 case EOpContinue: if(visit == PostVisit) emit(sw::Shader::OPCODE_CONTINUE); break;
\r
1793 if(visit == PostVisit)
\r
1795 TIntermTyped *value = node->getExpression();
\r
1799 copy(functionArray[currentFunction].ret, value);
\r
1802 emit(sw::Shader::OPCODE_LEAVE);
\r
1805 default: UNREACHABLE(node->getFlowOp());
\r
1811 bool OutputASM::isSamplerRegister(TIntermTyped *operand)
\r
1813 return operand && isSamplerRegister(operand->getType());
\r
1816 bool OutputASM::isSamplerRegister(const TType &type)
\r
1818 // A sampler register's qualifiers can be:
\r
1819 // - EvqUniform: The sampler uniform is used as is in the code (default case).
\r
1820 // - EvqTemporary: The sampler is indexed. It's still a sampler register.
\r
1821 // - EvqIn (and other similar types): The sampler has been passed as a function argument. At this point,
\r
1822 // the sampler has been copied and is no longer a sampler register.
\r
1823 return IsSampler(type.getBasicType()) && (type.getQualifier() == EvqUniform || type.getQualifier() == EvqTemporary);
\r
1826 Instruction *OutputASM::emit(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2, TIntermNode *src3, TIntermNode *src4)
\r
1828 return emit(op, dst, 0, src0, 0, src1, 0, src2, 0, src3, 0, src4, 0);
\r
1831 Instruction *OutputASM::emit(sw::Shader::Opcode op, TIntermTyped *dst, int dstIndex, TIntermNode *src0, int index0, TIntermNode *src1, int index1,
\r
1832 TIntermNode *src2, int index2, TIntermNode *src3, int index3, TIntermNode *src4, int index4)
\r
1834 if(isSamplerRegister(dst))
\r
1836 op = sw::Shader::OPCODE_NULL; // Can't assign to a sampler, but this is hit when indexing sampler arrays
\r
1839 Instruction *instruction = new Instruction(op);
\r
1843 instruction->dst.type = registerType(dst);
\r
1844 instruction->dst.index = registerIndex(dst) + dstIndex;
\r
1845 instruction->dst.mask = writeMask(dst);
\r
1846 instruction->dst.integer = (dst->getBasicType() == EbtInt);
\r
1849 argument(instruction->src[0], src0, index0);
\r
1850 argument(instruction->src[1], src1, index1);
\r
1851 argument(instruction->src[2], src2, index2);
\r
1852 argument(instruction->src[3], src3, index3);
\r
1853 argument(instruction->src[4], src4, index4);
\r
1855 shader->append(instruction);
\r
1857 return instruction;
\r
1860 Instruction *OutputASM::emitCast(TIntermTyped *dst, TIntermTyped *src)
\r
1862 return emitCast(dst, 0, src, 0);
\r
1865 Instruction *OutputASM::emitCast(TIntermTyped *dst, int dstIndex, TIntermTyped *src, int srcIndex)
\r
1867 switch(src->getBasicType())
\r
1870 switch(dst->getBasicType())
\r
1872 case EbtInt: return emit(sw::Shader::OPCODE_B2I, dst, dstIndex, src, srcIndex);
\r
1873 case EbtUInt: return emit(sw::Shader::OPCODE_B2I, dst, dstIndex, src, srcIndex);
\r
1874 case EbtFloat: return emit(sw::Shader::OPCODE_B2F, dst, dstIndex, src, srcIndex);
\r
1879 switch(dst->getBasicType())
\r
1881 case EbtBool: return emit(sw::Shader::OPCODE_I2B, dst, dstIndex, src, srcIndex);
\r
1882 case EbtFloat: return emit(sw::Shader::OPCODE_I2F, dst, dstIndex, src, srcIndex);
\r
1887 switch(dst->getBasicType())
\r
1889 case EbtBool: return emit(sw::Shader::OPCODE_I2B, dst, dstIndex, src, srcIndex);
\r
1890 case EbtFloat: return emit(sw::Shader::OPCODE_U2F, dst, dstIndex, src, srcIndex);
\r
1895 switch(dst->getBasicType())
\r
1897 case EbtBool: return emit(sw::Shader::OPCODE_F2B, dst, dstIndex, src, srcIndex);
\r
1898 case EbtInt: return emit(sw::Shader::OPCODE_F2I, dst, dstIndex, src, srcIndex);
\r
1899 case EbtUInt: return emit(sw::Shader::OPCODE_F2U, dst, dstIndex, src, srcIndex);
\r
1907 ASSERT(src->getBasicType() == dst->getBasicType());
\r
1909 return emit(sw::Shader::OPCODE_MOV, dst, dstIndex, src, srcIndex);
\r
1912 void OutputASM::emitBinary(sw::Shader::Opcode op, TIntermTyped *dst, TIntermNode *src0, TIntermNode *src1, TIntermNode *src2)
\r
1914 for(int index = 0; index < dst->elementRegisterCount(); index++)
\r
1916 emit(op, dst, index, src0, index, src1, index, src2, index);
\r
1920 void OutputASM::emitAssign(sw::Shader::Opcode op, TIntermTyped *result, TIntermTyped *lhs, TIntermTyped *src0, TIntermTyped *src1)
\r
1922 emitBinary(op, result, src0, src1);
\r
1923 assignLvalue(lhs, result);
\r
1926 void OutputASM::emitCmp(sw::Shader::Control cmpOp, TIntermTyped *dst, TIntermNode *left, TIntermNode *right, int index)
\r
1928 sw::Shader::Opcode opcode;
\r
1929 switch(left->getAsTyped()->getBasicType())
\r
1933 opcode = sw::Shader::OPCODE_ICMP;
\r
1936 opcode = sw::Shader::OPCODE_UCMP;
\r
1939 opcode = sw::Shader::OPCODE_CMP;
\r
1943 Instruction *cmp = emit(opcode, dst, 0, left, index, right, index);
\r
1944 cmp->control = cmpOp;
\r
1947 int componentCount(const TType &type, int registers)
\r
1949 if(registers == 0)
\r
1954 if(type.isArray() && registers >= type.elementRegisterCount())
\r
1956 int index = registers / type.elementRegisterCount();
\r
1957 registers -= index * type.elementRegisterCount();
\r
1958 return index * type.getElementSize() + componentCount(type, registers);
\r
1961 if(type.isStruct() || type.isInterfaceBlock())
\r
1963 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
1966 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
1968 const TType &fieldType = *((*field)->type());
\r
1970 if(fieldType.totalRegisterCount() <= registers)
\r
1972 registers -= fieldType.totalRegisterCount();
\r
1973 elements += fieldType.getObjectSize();
\r
1975 else // Register within this field
\r
1977 return elements + componentCount(fieldType, registers);
\r
1981 else if(type.isMatrix())
\r
1983 return registers * type.registerSize();
\r
1990 int registerSize(const TType &type, int registers)
\r
1992 if(registers == 0)
\r
1994 if(type.isStruct())
\r
1996 return registerSize(*((*(type.getStruct()->fields().begin()))->type()), 0);
\r
1998 else if(type.isInterfaceBlock())
\r
2000 return registerSize(*((*(type.getInterfaceBlock()->fields().begin()))->type()), 0);
\r
2003 return type.registerSize();
\r
2006 if(type.isArray() && registers >= type.elementRegisterCount())
\r
2008 int index = registers / type.elementRegisterCount();
\r
2009 registers -= index * type.elementRegisterCount();
\r
2010 return registerSize(type, registers);
\r
2013 if(type.isStruct() || type.isInterfaceBlock())
\r
2015 const TFieldList& fields = type.getStruct() ? type.getStruct()->fields() : type.getInterfaceBlock()->fields();
\r
2018 for(TFieldList::const_iterator field = fields.begin(); field != fields.end(); field++)
\r
2020 const TType &fieldType = *((*field)->type());
\r
2022 if(fieldType.totalRegisterCount() <= registers)
\r
2024 registers -= fieldType.totalRegisterCount();
\r
2025 elements += fieldType.getObjectSize();
\r
2027 else // Register within this field
\r
2029 return registerSize(fieldType, registers);
\r
2033 else if(type.isMatrix())
\r
2035 return registerSize(type, 0);
\r
2042 void OutputASM::argument(sw::Shader::SourceParameter ¶meter, TIntermNode *argument, int index)
\r
2046 TIntermTyped *arg = argument->getAsTyped();
\r
2047 const TType &type = arg->getType();
\r
2048 index = (index >= arg->totalRegisterCount()) ? arg->totalRegisterCount() - 1 : index;
\r
2050 int size = registerSize(type, index);
\r
2052 parameter.type = registerType(arg);
\r
2054 if(arg->getQualifier() == EvqConstExpr)
\r
2056 int component = componentCount(type, index);
\r
2057 ConstantUnion *constants = arg->getAsConstantUnion()->getUnionArrayPointer();
\r
2059 for(int i = 0; i < 4; i++)
\r
2061 if(size == 1) // Replicate
\r
2063 parameter.value[i] = constants[component + 0].getAsFloat();
\r
2067 parameter.value[i] = constants[component + i].getAsFloat();
\r
2071 parameter.value[i] = 0.0f;
\r
2077 parameter.index = registerIndex(arg) + index;
\r
2079 if(isSamplerRegister(arg))
\r
2081 TIntermBinary *binary = argument->getAsBinaryNode();
\r
2085 TIntermTyped *left = binary->getLeft();
\r
2086 TIntermTyped *right = binary->getRight();
\r
2088 switch(binary->getOp())
\r
2090 case EOpIndexDirect:
\r
2091 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2093 case EOpIndexIndirect:
\r
2094 if(left->getArraySize() > 1)
\r
2096 parameter.rel.type = registerType(binary->getRight());
\r
2097 parameter.rel.index = registerIndex(binary->getRight());
\r
2098 parameter.rel.scale = 1;
\r
2099 parameter.rel.deterministic = true;
\r
2102 case EOpIndexDirectStruct:
\r
2103 case EOpIndexDirectInterfaceBlock:
\r
2104 parameter.index += right->getAsConstantUnion()->getIConst(0);
\r
2107 UNREACHABLE(binary->getOp());
\r
2113 if(!IsSampler(arg->getBasicType()))
\r
2115 parameter.swizzle = readSwizzle(arg, size);
\r
2120 void OutputASM::copy(TIntermTyped *dst, TIntermNode *src, int offset)
\r
2122 for(int index = 0; index < dst->totalRegisterCount(); index++)
\r
2124 Instruction *mov = emit(sw::Shader::OPCODE_MOV, dst, index, src, offset + index);
\r
2125 mov->dst.mask = writeMask(dst, index);
\r
2129 int swizzleElement(int swizzle, int index)
\r
2131 return (swizzle >> (index * 2)) & 0x03;
\r
2134 int swizzleSwizzle(int leftSwizzle, int rightSwizzle)
\r
2136 return (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 0)) << 0) |
\r
2137 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 1)) << 2) |
\r
2138 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 2)) << 4) |
\r
2139 (swizzleElement(leftSwizzle, swizzleElement(rightSwizzle, 3)) << 6);
\r
2142 void OutputASM::assignLvalue(TIntermTyped *dst, TIntermTyped *src)
\r
2145 ((src->isVector() && (!dst->isVector() || (dst->getNominalSize() != dst->getNominalSize()))) ||
\r
2146 (src->isMatrix() && (!dst->isMatrix() || (src->getNominalSize() != dst->getNominalSize()) || (src->getSecondarySize() != dst->getSecondarySize())))))
\r
2148 return mContext.error(src->getLine(), "Result type should match the l-value type in compound assignment", src->isVector() ? "vector" : "matrix");
\r
2151 TIntermBinary *binary = dst->getAsBinaryNode();
\r
2153 if(binary && binary->getOp() == EOpIndexIndirect && dst->isScalar())
\r
2155 Instruction *insert = new Instruction(sw::Shader::OPCODE_INSERT);
\r
2157 Temporary address(this);
\r
2158 lvalue(insert->dst, address, dst);
\r
2160 insert->src[0].type = insert->dst.type;
\r
2161 insert->src[0].index = insert->dst.index;
\r
2162 insert->src[0].rel = insert->dst.rel;
\r
2163 argument(insert->src[1], src);
\r
2164 argument(insert->src[2], binary->getRight());
\r
2166 shader->append(insert);
\r
2170 for(int offset = 0; offset < dst->totalRegisterCount(); offset++)
\r
2172 Instruction *mov = new Instruction(sw::Shader::OPCODE_MOV);
\r
2174 Temporary address(this);
\r
2175 int swizzle = lvalue(mov->dst, address, dst);
\r
2176 mov->dst.index += offset;
\r
2180 mov->dst.mask = writeMask(dst, offset);
\r
2183 argument(mov->src[0], src, offset);
\r
2184 mov->src[0].swizzle = swizzleSwizzle(mov->src[0].swizzle, swizzle);
\r
2186 shader->append(mov);
\r
2191 int OutputASM::lvalue(sw::Shader::DestinationParameter &dst, Temporary &address, TIntermTyped *node)
\r
2193 TIntermTyped *result = node;
\r
2194 TIntermBinary *binary = node->getAsBinaryNode();
\r
2195 TIntermSymbol *symbol = node->getAsSymbolNode();
\r
2199 TIntermTyped *left = binary->getLeft();
\r
2200 TIntermTyped *right = binary->getRight();
\r
2202 int leftSwizzle = lvalue(dst, address, left); // Resolve the l-value of the left side
\r
2204 switch(binary->getOp())
\r
2206 case EOpIndexDirect:
\r
2208 int rightIndex = right->getAsConstantUnion()->getIConst(0);
\r
2210 if(left->isRegister())
\r
2212 int leftMask = dst.mask;
\r
2215 while((leftMask & dst.mask) == 0)
\r
2217 dst.mask = dst.mask << 1;
\r
2220 int element = swizzleElement(leftSwizzle, rightIndex);
\r
2221 dst.mask = 1 << element;
\r
2225 else if(left->isArray() || left->isMatrix())
\r
2227 dst.index += rightIndex * result->totalRegisterCount();
\r
2230 else UNREACHABLE(0);
\r
2233 case EOpIndexIndirect:
\r
2235 if(left->isRegister())
\r
2237 // Requires INSERT instruction (handled by calling function)
\r
2239 else if(left->isArray() || left->isMatrix())
\r
2241 int scale = result->totalRegisterCount();
\r
2243 if(dst.rel.type == sw::Shader::PARAMETER_VOID) // Use the index register as the relative address directly
\r
2245 if(left->totalRegisterCount() > 1)
\r
2247 sw::Shader::SourceParameter relativeRegister;
\r
2248 argument(relativeRegister, right);
\r
2250 dst.rel.index = relativeRegister.index;
\r
2251 dst.rel.type = relativeRegister.type;
\r
2252 dst.rel.scale = scale;
\r
2253 dst.rel.deterministic = !(vertexShader && left->getQualifier() == EvqUniform);
\r
2256 else if(dst.rel.index != registerIndex(&address)) // Move the previous index register to the address register
\r
2260 Constant oldScale((int)dst.rel.scale);
\r
2261 Instruction *mad = emit(sw::Shader::OPCODE_IMAD, &address, &address, &oldScale, right);
\r
2262 mad->src[0].index = dst.rel.index;
\r
2263 mad->src[0].type = dst.rel.type;
\r
2267 Constant oldScale((int)dst.rel.scale);
\r
2268 Instruction *mul = emit(sw::Shader::OPCODE_IMUL, &address, &address, &oldScale);
\r
2269 mul->src[0].index = dst.rel.index;
\r
2270 mul->src[0].type = dst.rel.type;
\r
2272 Constant newScale(scale);
\r
2273 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2276 dst.rel.type = sw::Shader::PARAMETER_TEMP;
\r
2277 dst.rel.index = registerIndex(&address);
\r
2278 dst.rel.scale = 1;
\r
2280 else // Just add the new index to the address register
\r
2284 emit(sw::Shader::OPCODE_IADD, &address, &address, right);
\r
2288 Constant newScale(scale);
\r
2289 emit(sw::Shader::OPCODE_IMAD, &address, right, &newScale, &address);
\r
2293 else UNREACHABLE(0);
\r
2296 case EOpIndexDirectStruct:
\r
2297 case EOpIndexDirectInterfaceBlock:
\r
2299 const TFieldList& fields = (binary->getOp() == EOpIndexDirectStruct) ?
\r
2300 left->getType().getStruct()->fields() :
\r
2301 left->getType().getInterfaceBlock()->fields();
\r
2302 int index = right->getAsConstantUnion()->getIConst(0);
\r
2303 int fieldOffset = 0;
\r
2305 for(int i = 0; i < index; i++)
\r
2307 fieldOffset += fields[i]->type()->totalRegisterCount();
\r
2310 dst.type = registerType(left);
\r
2311 dst.index += fieldOffset;
\r
2312 dst.mask = writeMask(right);
\r
2317 case EOpVectorSwizzle:
\r
2319 ASSERT(left->isRegister());
\r
2321 int leftMask = dst.mask;
\r
2324 int rightMask = 0;
\r
2326 TIntermSequence &sequence = right->getAsAggregate()->getSequence();
\r
2328 for(unsigned int i = 0; i < sequence.size(); i++)
\r
2330 int index = sequence[i]->getAsConstantUnion()->getIConst(0);
\r
2332 int element = swizzleElement(leftSwizzle, index);
\r
2333 rightMask = rightMask | (1 << element);
\r
2334 swizzle = swizzle | swizzleElement(leftSwizzle, i) << (element * 2);
\r
2337 dst.mask = leftMask & rightMask;
\r
2343 UNREACHABLE(binary->getOp()); // Not an l-value operator
\r
2349 dst.type = registerType(symbol);
\r
2350 dst.index = registerIndex(symbol);
\r
2351 dst.mask = writeMask(symbol);
\r
2358 sw::Shader::ParameterType OutputASM::registerType(TIntermTyped *operand)
\r
2360 if(isSamplerRegister(operand))
\r
2362 return sw::Shader::PARAMETER_SAMPLER;
\r
2365 const TQualifier qualifier = operand->getQualifier();
\r
2366 if((EvqFragColor == qualifier) || (EvqFragData == qualifier))
\r
2368 if(((EvqFragData == qualifier) && (EvqFragColor == outputQualifier)) ||
\r
2369 ((EvqFragColor == qualifier) && (EvqFragData == outputQualifier)))
\r
2371 mContext.error(operand->getLine(), "static assignment to both gl_FragData and gl_FragColor", "");
\r
2373 outputQualifier = qualifier;
\r
2378 case EvqTemporary: return sw::Shader::PARAMETER_TEMP;
\r
2379 case EvqGlobal: return sw::Shader::PARAMETER_TEMP;
\r
2380 case EvqConstExpr: return sw::Shader::PARAMETER_FLOAT4LITERAL; // All converted to float
\r
2381 case EvqAttribute: return sw::Shader::PARAMETER_INPUT;
\r
2382 case EvqVaryingIn: return sw::Shader::PARAMETER_INPUT;
\r
2383 case EvqVaryingOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2384 case EvqVertexIn: return sw::Shader::PARAMETER_INPUT;
\r
2385 case EvqFragmentOut: return sw::Shader::PARAMETER_COLOROUT;
\r
2386 case EvqVertexOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2387 case EvqFragmentIn: return sw::Shader::PARAMETER_INPUT;
\r
2388 case EvqInvariantVaryingIn: return sw::Shader::PARAMETER_INPUT; // FIXME: Guarantee invariance at the backend
\r
2389 case EvqInvariantVaryingOut: return sw::Shader::PARAMETER_OUTPUT; // FIXME: Guarantee invariance at the backend
\r
2390 case EvqSmooth: return sw::Shader::PARAMETER_OUTPUT;
\r
2391 case EvqFlat: return sw::Shader::PARAMETER_OUTPUT;
\r
2392 case EvqCentroidOut: return sw::Shader::PARAMETER_OUTPUT;
\r
2393 case EvqSmoothIn: return sw::Shader::PARAMETER_INPUT;
\r
2394 case EvqFlatIn: return sw::Shader::PARAMETER_INPUT;
\r
2395 case EvqCentroidIn: return sw::Shader::PARAMETER_INPUT;
\r
2396 case EvqUniform: return sw::Shader::PARAMETER_CONST;
\r
2397 case EvqIn: return sw::Shader::PARAMETER_TEMP;
\r
2398 case EvqOut: return sw::Shader::PARAMETER_TEMP;
\r
2399 case EvqInOut: return sw::Shader::PARAMETER_TEMP;
\r
2400 case EvqConstReadOnly: return sw::Shader::PARAMETER_TEMP;
\r
2401 case EvqPosition: return sw::Shader::PARAMETER_OUTPUT;
\r
2402 case EvqPointSize: return sw::Shader::PARAMETER_OUTPUT;
\r
2403 case EvqInstanceID: return sw::Shader::PARAMETER_MISCTYPE;
\r
2404 case EvqFragCoord: return sw::Shader::PARAMETER_MISCTYPE;
\r
2405 case EvqFrontFacing: return sw::Shader::PARAMETER_MISCTYPE;
\r
2406 case EvqPointCoord: return sw::Shader::PARAMETER_INPUT;
\r
2407 case EvqFragColor: return sw::Shader::PARAMETER_COLOROUT;
\r
2408 case EvqFragData: return sw::Shader::PARAMETER_COLOROUT;
\r
2409 case EvqFragDepth: return sw::Shader::PARAMETER_DEPTHOUT;
\r
2410 default: UNREACHABLE(qualifier);
\r
2413 return sw::Shader::PARAMETER_VOID;
\r
2416 unsigned int OutputASM::registerIndex(TIntermTyped *operand)
\r
2418 if(isSamplerRegister(operand))
\r
2420 return samplerRegister(operand);
\r
2423 switch(operand->getQualifier())
\r
2425 case EvqTemporary: return temporaryRegister(operand);
\r
2426 case EvqGlobal: return temporaryRegister(operand);
\r
2427 case EvqConstExpr: UNREACHABLE(EvqConstExpr);
\r
2428 case EvqAttribute: return attributeRegister(operand);
\r
2429 case EvqVaryingIn: return varyingRegister(operand);
\r
2430 case EvqVaryingOut: return varyingRegister(operand);
\r
2431 case EvqVertexIn: return attributeRegister(operand);
\r
2432 case EvqFragmentOut: return fragmentOutputRegister(operand);
\r
2433 case EvqVertexOut: return varyingRegister(operand);
\r
2434 case EvqFragmentIn: return varyingRegister(operand);
\r
2435 case EvqInvariantVaryingIn: return varyingRegister(operand);
\r
2436 case EvqInvariantVaryingOut: return varyingRegister(operand);
\r
2437 case EvqSmooth: return varyingRegister(operand);
\r
2438 case EvqFlat: return varyingRegister(operand);
\r
2439 case EvqCentroidOut: return varyingRegister(operand);
\r
2440 case EvqSmoothIn: return varyingRegister(operand);
\r
2441 case EvqFlatIn: return varyingRegister(operand);
\r
2442 case EvqCentroidIn: return varyingRegister(operand);
\r
2443 case EvqUniform: return uniformRegister(operand);
\r
2444 case EvqIn: return temporaryRegister(operand);
\r
2445 case EvqOut: return temporaryRegister(operand);
\r
2446 case EvqInOut: return temporaryRegister(operand);
\r
2447 case EvqConstReadOnly: return temporaryRegister(operand);
\r
2448 case EvqPosition: return varyingRegister(operand);
\r
2449 case EvqPointSize: return varyingRegister(operand);
\r
2450 case EvqInstanceID: vertexShader->instanceIdDeclared = true; return 0;
\r
2451 case EvqFragCoord: pixelShader->vPosDeclared = true; return 0;
\r
2452 case EvqFrontFacing: pixelShader->vFaceDeclared = true; return 1;
\r
2453 case EvqPointCoord: return varyingRegister(operand);
\r
2454 case EvqFragColor: return 0;
\r
2455 case EvqFragData: return 0;
\r
2456 case EvqFragDepth: return 0;
\r
2457 default: UNREACHABLE(operand->getQualifier());
\r
2463 int OutputASM::writeMask(TIntermTyped *destination, int index)
\r
2465 if(destination->getQualifier() == EvqPointSize)
\r
2467 return 0x2; // Point size stored in the y component
\r
2470 return 0xF >> (4 - registerSize(destination->getType(), index));
\r
2473 int OutputASM::readSwizzle(TIntermTyped *argument, int size)
\r
2475 if(argument->getQualifier() == EvqPointSize)
\r
2477 return 0x55; // Point size stored in the y component
\r
2480 static const unsigned char swizzleSize[5] = {0x00, 0x00, 0x54, 0xA4, 0xE4}; // (void), xxxx, xyyy, xyzz, xyzw
\r
2482 return swizzleSize[size];
\r
2485 // Conservatively checks whether an expression is fast to compute and has no side effects
\r
2486 bool OutputASM::trivial(TIntermTyped *expression, int budget)
\r
2488 if(!expression->isRegister())
\r
2493 return cost(expression, budget) >= 0;
\r
2496 // Returns the remaining computing budget (if < 0 the expression is too expensive or has side effects)
\r
2497 int OutputASM::cost(TIntermNode *expression, int budget)
\r
2504 if(expression->getAsSymbolNode())
\r
2508 else if(expression->getAsConstantUnion())
\r
2512 else if(expression->getAsBinaryNode())
\r
2514 TIntermBinary *binary = expression->getAsBinaryNode();
\r
2516 switch(binary->getOp())
\r
2518 case EOpVectorSwizzle:
\r
2519 case EOpIndexDirect:
\r
2520 case EOpIndexDirectStruct:
\r
2521 case EOpIndexDirectInterfaceBlock:
\r
2522 return cost(binary->getLeft(), budget - 0);
\r
2526 return cost(binary->getLeft(), cost(binary->getRight(), budget - 1));
\r
2531 else if(expression->getAsUnaryNode())
\r
2533 TIntermUnary *unary = expression->getAsUnaryNode();
\r
2535 switch(unary->getOp())
\r
2539 return cost(unary->getOperand(), budget - 1);
\r
2544 else if(expression->getAsSelectionNode())
\r
2546 TIntermSelection *selection = expression->getAsSelectionNode();
\r
2548 if(selection->usesTernaryOperator())
\r
2550 TIntermTyped *condition = selection->getCondition();
\r
2551 TIntermNode *trueBlock = selection->getTrueBlock();
\r
2552 TIntermNode *falseBlock = selection->getFalseBlock();
\r
2553 TIntermConstantUnion *constantCondition = condition->getAsConstantUnion();
\r
2555 if(constantCondition)
\r
2557 bool trueCondition = constantCondition->getUnionArrayPointer()->getBConst();
\r
2561 return cost(trueBlock, budget - 0);
\r
2565 return cost(falseBlock, budget - 0);
\r
2570 return cost(trueBlock, cost(falseBlock, budget - 2));
\r
2578 const Function *OutputASM::findFunction(const TString &name)
\r
2580 for(unsigned int f = 0; f < functionArray.size(); f++)
\r
2582 if(functionArray[f].name == name)
\r
2584 return &functionArray[f];
\r
2591 int OutputASM::temporaryRegister(TIntermTyped *temporary)
\r
2593 return allocate(temporaries, temporary);
\r
2596 int OutputASM::varyingRegister(TIntermTyped *varying)
\r
2598 int var = lookup(varyings, varying);
\r
2602 var = allocate(varyings, varying);
\r
2603 int componentCount = varying->registerSize();
\r
2604 int registerCount = varying->totalRegisterCount();
\r
2608 if((var + registerCount) > sw::PixelShader::MAX_INPUT_VARYINGS)
\r
2610 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "fragment shader");
\r
2614 if(varying->getQualifier() == EvqPointCoord)
\r
2616 ASSERT(varying->isRegister());
\r
2617 if(componentCount >= 1) pixelShader->semantic[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2618 if(componentCount >= 2) pixelShader->semantic[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2619 if(componentCount >= 3) pixelShader->semantic[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2620 if(componentCount >= 4) pixelShader->semantic[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, var);
\r
2624 for(int i = 0; i < varying->totalRegisterCount(); i++)
\r
2626 if(componentCount >= 1) pixelShader->semantic[var + i][0] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2627 if(componentCount >= 2) pixelShader->semantic[var + i][1] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2628 if(componentCount >= 3) pixelShader->semantic[var + i][2] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2629 if(componentCount >= 4) pixelShader->semantic[var + i][3] = sw::Shader::Semantic(sw::Shader::USAGE_COLOR, var + i);
\r
2633 else if(vertexShader)
\r
2635 if((var + registerCount) > sw::VertexShader::MAX_OUTPUT_VARYINGS)
\r
2637 mContext.error(varying->getLine(), "Varyings packing failed: Too many varyings", "vertex shader");
\r
2641 if(varying->getQualifier() == EvqPosition)
\r
2643 ASSERT(varying->isRegister());
\r
2644 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2645 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2646 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2647 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_POSITION, 0);
\r
2648 vertexShader->positionRegister = var;
\r
2650 else if(varying->getQualifier() == EvqPointSize)
\r
2652 ASSERT(varying->isRegister());
\r
2653 vertexShader->output[var][0] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2654 vertexShader->output[var][1] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2655 vertexShader->output[var][2] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2656 vertexShader->output[var][3] = sw::Shader::Semantic(sw::Shader::USAGE_PSIZE, 0);
\r
2657 vertexShader->pointSizeRegister = var;
\r
2661 // Semantic indexes for user varyings will be assigned during program link to match the pixel shader
\r
2664 else UNREACHABLE(0);
\r
2666 declareVarying(varying, var);
\r
2672 void OutputASM::declareVarying(TIntermTyped *varying, int reg)
\r
2674 if(varying->getQualifier() != EvqPointCoord) // gl_PointCoord does not need linking
\r
2676 const TType &type = varying->getType();
\r
2677 const char *name = varying->getAsSymbolNode()->getSymbol().c_str();
\r
2678 VaryingList &activeVaryings = shaderObject->varyings;
\r
2680 // Check if this varying has been declared before without having a register assigned
\r
2681 for(VaryingList::iterator v = activeVaryings.begin(); v != activeVaryings.end(); v++)
\r
2683 if(v->name == name)
\r
2687 ASSERT(v->reg < 0 || v->reg == reg);
\r
2695 activeVaryings.push_back(glsl::Varying(glVariableType(type), name, varying->getArraySize(), reg, 0));
\r
2699 int OutputASM::uniformRegister(TIntermTyped *uniform)
\r
2701 const TType &type = uniform->getType();
\r
2702 ASSERT(!IsSampler(type.getBasicType()));
\r
2703 TInterfaceBlock *block = type.getAsInterfaceBlock();
\r
2704 TIntermSymbol *symbol = uniform->getAsSymbolNode();
\r
2705 ASSERT(symbol || block);
\r
2707 if(symbol || block)
\r
2709 int index = lookup(uniforms, uniform);
\r
2713 index = allocate(uniforms, uniform);
\r
2714 const TString &name = symbol ? symbol->getSymbol() : block->name();
\r
2716 declareUniform(type, name, index);
\r
2725 int OutputASM::attributeRegister(TIntermTyped *attribute)
\r
2727 ASSERT(!attribute->isArray());
\r
2729 int index = lookup(attributes, attribute);
\r
2733 TIntermSymbol *symbol = attribute->getAsSymbolNode();
\r
2738 index = allocate(attributes, attribute);
\r
2739 const TType &type = attribute->getType();
\r
2740 int registerCount = attribute->totalRegisterCount();
\r
2742 if(vertexShader && (index + registerCount) <= sw::VertexShader::MAX_INPUT_ATTRIBUTES)
\r
2744 for(int i = 0; i < registerCount; i++)
\r
2746 vertexShader->input[index + i] = sw::Shader::Semantic(sw::Shader::USAGE_TEXCOORD, index + i);
\r
2750 ActiveAttributes &activeAttributes = shaderObject->activeAttributes;
\r
2752 const char *name = symbol->getSymbol().c_str();
\r
2753 activeAttributes.push_back(Attribute(glVariableType(type), name, type.getArraySize(), type.getLayoutQualifier().location, index));
\r
2760 int OutputASM::fragmentOutputRegister(TIntermTyped *fragmentOutput)
\r
2762 return allocate(fragmentOutputs, fragmentOutput);
\r
2765 int OutputASM::samplerRegister(TIntermTyped *sampler)
\r
2767 ASSERT(IsSampler(sampler->getType().getBasicType()));
\r
2768 TIntermSymbol *symbol = sampler->getAsSymbolNode();
\r
2769 TIntermBinary *binary = sampler->getAsBinaryNode();
\r
2773 return samplerRegister(symbol);
\r
2777 ASSERT(binary->getOp() == EOpIndexDirect || binary->getOp() == EOpIndexIndirect ||
\r
2778 binary->getOp() == EOpIndexDirectStruct || binary->getOp() == EOpIndexDirectInterfaceBlock);
\r
2780 return samplerRegister(binary->getLeft()); // Index added later
\r
2782 else UNREACHABLE(0);
\r
2787 int OutputASM::samplerRegister(TIntermSymbol *sampler)
\r
2789 const TType &type = sampler->getType();
\r
2790 ASSERT(IsSampler(type.getBasicType()) || type.getStruct()); // Structures can contain samplers
\r
2792 int index = lookup(samplers, sampler);
\r
2796 index = allocate(samplers, sampler);
\r
2798 if(sampler->getQualifier() == EvqUniform)
\r
2800 const char *name = sampler->getSymbol().c_str();
\r
2801 declareUniform(type, name, index);
\r
2808 int OutputASM::lookup(VariableArray &list, TIntermTyped *variable)
\r
2810 for(unsigned int i = 0; i < list.size(); i++)
\r
2812 if(list[i] == variable)
\r
2814 return i; // Pointer match
\r
2818 TIntermSymbol *varSymbol = variable->getAsSymbolNode();
\r
2819 TInterfaceBlock *varBlock = variable->getType().getAsInterfaceBlock();
\r
2823 for(unsigned int i = 0; i < list.size(); i++)
\r
2827 TInterfaceBlock *listBlock = list[i]->getType().getAsInterfaceBlock();
\r
2831 if(listBlock->name() == varBlock->name())
\r
2833 ASSERT(listBlock->arraySize() == varBlock->arraySize());
\r
2834 ASSERT(listBlock->fields() == varBlock->fields());
\r
2835 ASSERT(listBlock->blockStorage() == varBlock->blockStorage());
\r
2836 ASSERT(listBlock->matrixPacking() == varBlock->matrixPacking());
\r
2844 else if(varSymbol)
\r
2846 for(unsigned int i = 0; i < list.size(); i++)
\r
2850 TIntermSymbol *listSymbol = list[i]->getAsSymbolNode();
\r
2854 if(listSymbol->getId() == varSymbol->getId())
\r
2856 ASSERT(listSymbol->getSymbol() == varSymbol->getSymbol());
\r
2857 ASSERT(listSymbol->getType() == varSymbol->getType());
\r
2858 ASSERT(listSymbol->getQualifier() == varSymbol->getQualifier());
\r
2870 int OutputASM::allocate(VariableArray &list, TIntermTyped *variable)
\r
2872 int index = lookup(list, variable);
\r
2876 unsigned int registerCount = variable->totalRegisterCount();
\r
2878 for(unsigned int i = 0; i < list.size(); i++)
\r
2882 unsigned int j = 1;
\r
2883 for( ; j < registerCount && (i + j) < list.size(); j++)
\r
2885 if(list[i + j] != 0)
\r
2891 if(j == registerCount) // Found free slots
\r
2893 for(unsigned int j = 0; j < registerCount; j++)
\r
2895 list[i + j] = variable;
\r
2903 index = list.size();
\r
2905 for(unsigned int i = 0; i < registerCount; i++)
\r
2907 list.push_back(variable);
\r
2914 void OutputASM::free(VariableArray &list, TIntermTyped *variable)
\r
2916 int index = lookup(list, variable);
\r
2924 int OutputASM::declareUniform(const TType &type, const TString &name, int registerIndex, int blockId, BlockLayoutEncoder* encoder)
\r
2926 const TStructure *structure = type.getStruct();
\r
2927 const TInterfaceBlock *block = (type.isInterfaceBlock() || (blockId == -1)) ? type.getInterfaceBlock() : nullptr;
\r
2928 ActiveUniforms &activeUniforms = shaderObject->activeUniforms;
\r
2930 if(!structure && !block)
\r
2932 const BlockMemberInfo blockInfo = encoder ? encoder->encodeType(type) : BlockMemberInfo::getDefaultBlockInfo();
\r
2935 blockDefinitions[blockId].indexMap[registerIndex] = TypedMemberInfo(blockInfo, type);
\r
2936 shaderObject->activeUniformBlocks[blockId].fields.push_back(activeUniforms.size());
\r
2938 int fieldRegisterIndex = encoder ? shaderObject->activeUniformBlocks[blockId].registerIndex + BlockLayoutEncoder::getBlockRegister(blockInfo) : registerIndex;
\r
2939 activeUniforms.push_back(Uniform(glVariableType(type), glVariablePrecision(type), name.c_str(), type.getArraySize(),
\r
2940 fieldRegisterIndex, blockId, blockInfo));
\r
2941 if(isSamplerRegister(type))
\r
2943 for(int i = 0; i < type.totalRegisterCount(); i++)
\r
2945 shader->declareSampler(fieldRegisterIndex + i);
\r
2951 ActiveUniformBlocks &activeUniformBlocks = shaderObject->activeUniformBlocks;
\r
2952 const TFieldList& fields = block->fields();
\r
2953 const TString &blockName = block->name();
\r
2954 int fieldRegisterIndex = registerIndex;
\r
2955 bool isUniformBlockMember = !type.isInterfaceBlock() && (blockId == -1);
\r
2957 if(isUniformBlockMember)
\r
2959 // This is a uniform that's part of a block, let's see if the block is already defined
\r
2960 for(size_t i = 0; i < activeUniformBlocks.size(); ++i)
\r
2962 if(activeUniformBlocks[i].name == blockName.c_str())
\r
2964 // The block is already defined, find the register for the current uniform and return it
\r
2965 for(size_t j = 0; j < fields.size(); j++)
\r
2967 const TString &fieldName = fields[j]->name();
\r
2968 if(fieldName == name)
\r
2970 return fieldRegisterIndex;
\r
2973 fieldRegisterIndex += fields[j]->type()->totalRegisterCount();
\r
2977 return fieldRegisterIndex;
\r
2982 blockId = activeUniformBlocks.size();
\r
2983 bool isRowMajor = block->matrixPacking() == EmpRowMajor;
\r
2984 activeUniformBlocks.push_back(UniformBlock(blockName.c_str(), 0, block->arraySize(),
\r
2985 block->blockStorage(), isRowMajor, registerIndex, blockId));
\r
2986 blockDefinitions.push_back(BlockDefinition());
\r
2988 Std140BlockEncoder currentBlockEncoder(isRowMajor);
\r
2989 currentBlockEncoder.enterAggregateType();
\r
2990 for(size_t i = 0; i < fields.size(); i++)
\r
2992 const TType &fieldType = *(fields[i]->type());
\r
2993 const TString &fieldName = fields[i]->name();
\r
2994 if(isUniformBlockMember && (fieldName == name))
\r
2996 registerIndex = fieldRegisterIndex;
\r
2999 const TString uniformName = block->hasInstanceName() ? blockName + "." + fieldName : fieldName;
\r
3001 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, ¤tBlockEncoder);
\r
3002 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3004 currentBlockEncoder.exitAggregateType();
\r
3005 activeUniformBlocks[blockId].dataSize = currentBlockEncoder.getBlockSize();
\r
3009 int fieldRegisterIndex = registerIndex;
\r
3011 const TFieldList& fields = structure->fields();
\r
3012 if(type.isArray() && (structure || type.isInterfaceBlock()))
\r
3014 for(int i = 0; i < type.getArraySize(); i++)
\r
3018 encoder->enterAggregateType();
\r
3020 for(size_t j = 0; j < fields.size(); j++)
\r
3022 const TType &fieldType = *(fields[j]->type());
\r
3023 const TString &fieldName = fields[j]->name();
\r
3024 const TString uniformName = name + "[" + str(i) + "]." + fieldName;
\r
3026 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3027 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3031 encoder->exitAggregateType();
\r
3039 encoder->enterAggregateType();
\r
3041 for(size_t i = 0; i < fields.size(); i++)
\r
3043 const TType &fieldType = *(fields[i]->type());
\r
3044 const TString &fieldName = fields[i]->name();
\r
3045 const TString uniformName = name + "." + fieldName;
\r
3047 declareUniform(fieldType, uniformName, fieldRegisterIndex, blockId, encoder);
\r
3048 fieldRegisterIndex += fieldType.totalRegisterCount();
\r
3052 encoder->exitAggregateType();
\r
3057 return registerIndex;
\r
3060 GLenum OutputASM::glVariableType(const TType &type)
\r
3062 switch(type.getBasicType())
\r
3065 if(type.isScalar())
\r
3069 else if(type.isVector())
\r
3071 switch(type.getNominalSize())
\r
3073 case 2: return GL_FLOAT_VEC2;
\r
3074 case 3: return GL_FLOAT_VEC3;
\r
3075 case 4: return GL_FLOAT_VEC4;
\r
3076 default: UNREACHABLE(type.getNominalSize());
\r
3079 else if(type.isMatrix())
\r
3081 switch(type.getNominalSize())
\r
3084 switch(type.getSecondarySize())
\r
3086 case 2: return GL_FLOAT_MAT2;
\r
3087 case 3: return GL_FLOAT_MAT2x3;
\r
3088 case 4: return GL_FLOAT_MAT2x4;
\r
3089 default: UNREACHABLE(type.getSecondarySize());
\r
3092 switch(type.getSecondarySize())
\r
3094 case 2: return GL_FLOAT_MAT3x2;
\r
3095 case 3: return GL_FLOAT_MAT3;
\r
3096 case 4: return GL_FLOAT_MAT3x4;
\r
3097 default: UNREACHABLE(type.getSecondarySize());
\r
3100 switch(type.getSecondarySize())
\r
3102 case 2: return GL_FLOAT_MAT4x2;
\r
3103 case 3: return GL_FLOAT_MAT4x3;
\r
3104 case 4: return GL_FLOAT_MAT4;
\r
3105 default: UNREACHABLE(type.getSecondarySize());
\r
3107 default: UNREACHABLE(type.getNominalSize());
\r
3110 else UNREACHABLE(0);
\r
3113 if(type.isScalar())
\r
3117 else if(type.isVector())
\r
3119 switch(type.getNominalSize())
\r
3121 case 2: return GL_INT_VEC2;
\r
3122 case 3: return GL_INT_VEC3;
\r
3123 case 4: return GL_INT_VEC4;
\r
3124 default: UNREACHABLE(type.getNominalSize());
\r
3127 else UNREACHABLE(0);
\r
3130 if(type.isScalar())
\r
3132 return GL_UNSIGNED_INT;
\r
3134 else if(type.isVector())
\r
3136 switch(type.getNominalSize())
\r
3138 case 2: return GL_UNSIGNED_INT_VEC2;
\r
3139 case 3: return GL_UNSIGNED_INT_VEC3;
\r
3140 case 4: return GL_UNSIGNED_INT_VEC4;
\r
3141 default: UNREACHABLE(type.getNominalSize());
\r
3144 else UNREACHABLE(0);
\r
3147 if(type.isScalar())
\r
3151 else if(type.isVector())
\r
3153 switch(type.getNominalSize())
\r
3155 case 2: return GL_BOOL_VEC2;
\r
3156 case 3: return GL_BOOL_VEC3;
\r
3157 case 4: return GL_BOOL_VEC4;
\r
3158 default: UNREACHABLE(type.getNominalSize());
\r
3161 else UNREACHABLE(0);
\r
3163 case EbtSampler2D:
\r
3164 return GL_SAMPLER_2D;
\r
3165 case EbtISampler2D:
\r
3166 return GL_INT_SAMPLER_2D;
\r
3167 case EbtUSampler2D:
\r
3168 return GL_UNSIGNED_INT_SAMPLER_2D;
\r
3169 case EbtSamplerCube:
\r
3170 return GL_SAMPLER_CUBE;
\r
3171 case EbtISamplerCube:
\r
3172 return GL_INT_SAMPLER_CUBE;
\r
3173 case EbtUSamplerCube:
\r
3174 return GL_UNSIGNED_INT_SAMPLER_CUBE;
\r
3175 case EbtSamplerExternalOES:
\r
3176 return GL_SAMPLER_EXTERNAL_OES;
\r
3177 case EbtSampler3D:
\r
3178 return GL_SAMPLER_3D_OES;
\r
3179 case EbtISampler3D:
\r
3180 return GL_INT_SAMPLER_3D;
\r
3181 case EbtUSampler3D:
\r
3182 return GL_UNSIGNED_INT_SAMPLER_3D;
\r
3183 case EbtSampler2DArray:
\r
3184 return GL_SAMPLER_2D_ARRAY;
\r
3185 case EbtISampler2DArray:
\r
3186 return GL_INT_SAMPLER_2D_ARRAY;
\r
3187 case EbtUSampler2DArray:
\r
3188 return GL_UNSIGNED_INT_SAMPLER_2D_ARRAY;
\r
3189 case EbtSampler2DShadow:
\r
3190 return GL_SAMPLER_2D_SHADOW;
\r
3191 case EbtSamplerCubeShadow:
\r
3192 return GL_SAMPLER_CUBE_SHADOW;
\r
3193 case EbtSampler2DArrayShadow:
\r
3194 return GL_SAMPLER_2D_ARRAY_SHADOW;
\r
3196 UNREACHABLE(type.getBasicType());
\r
3203 GLenum OutputASM::glVariablePrecision(const TType &type)
\r
3205 if(type.getBasicType() == EbtFloat)
\r
3207 switch(type.getPrecision())
\r
3209 case EbpHigh: return GL_HIGH_FLOAT;
\r
3210 case EbpMedium: return GL_MEDIUM_FLOAT;
\r
3211 case EbpLow: return GL_LOW_FLOAT;
\r
3212 case EbpUndefined:
\r
3213 // Should be defined as the default precision by the parser
\r
3214 default: UNREACHABLE(type.getPrecision());
\r
3217 else if(type.getBasicType() == EbtInt)
\r
3219 switch(type.getPrecision())
\r
3221 case EbpHigh: return GL_HIGH_INT;
\r
3222 case EbpMedium: return GL_MEDIUM_INT;
\r
3223 case EbpLow: return GL_LOW_INT;
\r
3224 case EbpUndefined:
\r
3225 // Should be defined as the default precision by the parser
\r
3226 default: UNREACHABLE(type.getPrecision());
\r
3230 // Other types (boolean, sampler) don't have a precision
\r
3234 int OutputASM::dim(TIntermNode *v)
\r
3236 TIntermTyped *vector = v->getAsTyped();
\r
3237 ASSERT(vector && vector->isRegister());
\r
3238 return vector->getNominalSize();
\r
3241 int OutputASM::dim2(TIntermNode *m)
\r
3243 TIntermTyped *matrix = m->getAsTyped();
\r
3244 ASSERT(matrix && matrix->isMatrix() && !matrix->isArray());
\r
3245 return matrix->getSecondarySize();
\r
3248 // Returns ~0u if no loop count could be determined
\r
3249 unsigned int OutputASM::loopCount(TIntermLoop *node)
\r
3251 // Parse loops of the form:
\r
3252 // for(int index = initial; index [comparator] limit; index += increment)
\r
3253 TIntermSymbol *index = 0;
\r
3254 TOperator comparator = EOpNull;
\r
3257 int increment = 0;
\r
3259 // Parse index name and intial value
\r
3260 if(node->getInit())
\r
3262 TIntermAggregate *init = node->getInit()->getAsAggregate();
\r
3266 TIntermSequence &sequence = init->getSequence();
\r
3267 TIntermTyped *variable = sequence[0]->getAsTyped();
\r
3269 if(variable && variable->getQualifier() == EvqTemporary)
\r
3271 TIntermBinary *assign = variable->getAsBinaryNode();
\r
3273 if(assign->getOp() == EOpInitialize)
\r
3275 TIntermSymbol *symbol = assign->getLeft()->getAsSymbolNode();
\r
3276 TIntermConstantUnion *constant = assign->getRight()->getAsConstantUnion();
\r
3278 if(symbol && constant)
\r
3280 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3283 initial = constant->getUnionArrayPointer()[0].getIConst();
\r
3291 // Parse comparator and limit value
\r
3292 if(index && node->getCondition())
\r
3294 TIntermBinary *test = node->getCondition()->getAsBinaryNode();
\r
3296 if(test && test->getLeft()->getAsSymbolNode()->getId() == index->getId())
\r
3298 TIntermConstantUnion *constant = test->getRight()->getAsConstantUnion();
\r
3302 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3304 comparator = test->getOp();
\r
3305 limit = constant->getUnionArrayPointer()[0].getIConst();
\r
3311 // Parse increment
\r
3312 if(index && comparator != EOpNull && node->getExpression())
\r
3314 TIntermBinary *binaryTerminal = node->getExpression()->getAsBinaryNode();
\r
3315 TIntermUnary *unaryTerminal = node->getExpression()->getAsUnaryNode();
\r
3317 if(binaryTerminal)
\r
3319 TOperator op = binaryTerminal->getOp();
\r
3320 TIntermConstantUnion *constant = binaryTerminal->getRight()->getAsConstantUnion();
\r
3324 if(constant->getBasicType() == EbtInt && constant->getNominalSize() == 1)
\r
3326 int value = constant->getUnionArrayPointer()[0].getIConst();
\r
3330 case EOpAddAssign: increment = value; break;
\r
3331 case EOpSubAssign: increment = -value; break;
\r
3332 default: UNIMPLEMENTED();
\r
3337 else if(unaryTerminal)
\r
3339 TOperator op = unaryTerminal->getOp();
\r
3343 case EOpPostIncrement: increment = 1; break;
\r
3344 case EOpPostDecrement: increment = -1; break;
\r
3345 case EOpPreIncrement: increment = 1; break;
\r
3346 case EOpPreDecrement: increment = -1; break;
\r
3347 default: UNIMPLEMENTED();
\r
3352 if(index && comparator != EOpNull && increment != 0)
\r
3354 if(comparator == EOpLessThanEqual)
\r
3356 comparator = EOpLessThan;
\r
3360 if(comparator == EOpLessThan)
\r
3362 int iterations = (limit - initial) / increment;
\r
3364 if(iterations <= 0)
\r
3369 return iterations;
\r
3371 else UNIMPLEMENTED(); // Falls through
\r
3377 bool DetectLoopDiscontinuity::traverse(TIntermNode *node)
\r
3380 loopDiscontinuity = false;
\r
3382 node->traverse(this);
\r
3384 return loopDiscontinuity;
\r
3387 bool DetectLoopDiscontinuity::visitLoop(Visit visit, TIntermLoop *loop)
\r
3389 if(visit == PreVisit)
\r
3393 else if(visit == PostVisit)
\r
3401 bool DetectLoopDiscontinuity::visitBranch(Visit visit, TIntermBranch *node)
\r
3403 if(loopDiscontinuity)
\r
3413 switch(node->getFlowOp())
\r
3420 loopDiscontinuity = true;
\r
3422 default: UNREACHABLE(node->getFlowOp());
\r
3425 return !loopDiscontinuity;
\r
3428 bool DetectLoopDiscontinuity::visitAggregate(Visit visit, TIntermAggregate *node)
\r
3430 return !loopDiscontinuity;
\r