// Access the label number and name for this node.
SizeT getIndex() const { return Number; }
IceString getName() const;
+ void setName(IceString &NewName) {
+ // Make sure that the name can only be set once.
+ assert(Name.empty());
+ Name = NewName;
+ }
IceString getAsmName() const {
return ".L" + Func->getFunctionName() + "$" + getName();
}
SizeT getIndex() const { return Number; }
IceString getName() const;
+ void setName(IceString &NewName) {
+ // Make sure that the name can only be set once.
+ assert(Name.empty());
+ Name = NewName;
+ }
Inst *getDefinition() const { return DefInst; }
void setDefinition(Inst *Inst, const CfgNode *Node);
// (bit)vector index for liveness analysis.
const SizeT Number;
// Name is optional.
- const IceString Name;
+ IceString Name;
// DefInst is the instruction that produces this variable as its
// dest.
Inst *DefInst;
installGlobalVar();
}
-// Parses a valuesymtab block in the bitcode file.
+/// Base class for parsing a valuesymtab block in the bitcode file.
class ValuesymtabParser : public BlockParserBaseClass {
- typedef SmallString<128> StringType;
+ ValuesymtabParser(const ValuesymtabParser &) LLVM_DELETED_FUNCTION;
+ void operator=(const ValuesymtabParser &) LLVM_DELETED_FUNCTION;
public:
- ValuesymtabParser(unsigned BlockID, BlockParserBaseClass *EnclosingParser,
- bool AllowBbEntries)
- : BlockParserBaseClass(BlockID, EnclosingParser),
- AllowBbEntries(AllowBbEntries) {}
+ ValuesymtabParser(unsigned BlockID, BlockParserBaseClass *EnclosingParser)
+ : BlockParserBaseClass(BlockID, EnclosingParser) {}
virtual ~ValuesymtabParser() LLVM_OVERRIDE {}
+protected:
+ typedef SmallString<128> StringType;
+
+ // Associates Name with the value defined by the given Index.
+ virtual void setValueName(uint64_t Index, StringType &Name) = 0;
+
+ // Associates Name with the value defined by the given Index;
+ virtual void setBbName(uint64_t Index, StringType &Name) = 0;
+
private:
- // True if entries to name basic blocks allowed.
- bool AllowBbEntries;
virtual void ProcessRecord() LLVM_OVERRIDE;
if (!isValidRecordSizeAtLeast(2, "Valuesymtab value entry"))
return;
ConvertToString(ConvertedName);
- Value *V = Context->getGlobalValueByID(Values[0]);
- if (V == NULL) {
- std::string Buffer;
- raw_string_ostream StrBuf(Buffer);
- StrBuf << "Invalid global address ID in valuesymtab: " << Values[0];
- Error(StrBuf.str());
- return;
- }
- V->setName(StringRef(ConvertedName.data(), ConvertedName.size()));
+ setValueName(Values[0], ConvertedName);
return;
}
case naclbitc::VST_CODE_BBENTRY: {
// VST_BBENTRY: [BbId, namechar x N]
- // For now, since we aren't processing function blocks, don't handle.
- if (AllowBbEntries) {
- Error("Valuesymtab bb entry not implemented");
+ if (!isValidRecordSizeAtLeast(2, "Valuesymtab basic block entry"))
return;
- }
- break;
+ ConvertToString(ConvertedName);
+ setBbName(Values[0], ConvertedName);
+ return;
}
default:
break;
return;
}
+class FunctionValuesymtabParser;
+
/// Parses function blocks in the bitcode file.
class FunctionParser : public BlockParserBaseClass {
FunctionParser(const FunctionParser &) LLVM_DELETED_FUNCTION;
FunctionParser &operator=(const FunctionParser &) LLVM_DELETED_FUNCTION;
+ friend class FunctionValuesymtabParser;
public:
FunctionParser(unsigned BlockID, BlockParserBaseClass *EnclosingParser)
Func->setReturnType(Context->convertToIceType(LLVMFunc->getReturnType()));
Func->setInternal(LLVMFunc->hasInternalLinkage());
CurrentNode = InstallNextBasicBlock();
+ Func->setEntryNode(CurrentNode);
for (Function::const_arg_iterator ArgI = LLVMFunc->arg_begin(),
ArgE = LLVMFunc->arg_end();
ArgI != ArgE; ++ArgI) {
Node->appendInst(Ice::InstUnreachable::create(Func));
}
}
+ Func->computePredecessors();
// Note: Once any errors have been found, we turn off all
// translation of all remaining functions. This allows use to see
// multiple errors, without adding extra checks to the translator
}
}
+// Parses valuesymtab blocks appearing in a function block.
+class FunctionValuesymtabParser : public ValuesymtabParser {
+ FunctionValuesymtabParser(const FunctionValuesymtabParser &)
+ LLVM_DELETED_FUNCTION;
+ void operator=(const FunctionValuesymtabParser &) LLVM_DELETED_FUNCTION;
+
+public:
+ FunctionValuesymtabParser(unsigned BlockID, FunctionParser *EnclosingParser)
+ : ValuesymtabParser(BlockID, EnclosingParser) {}
+
+private:
+ // Returns the enclosing function parser.
+ FunctionParser *getFunctionParser() const {
+ return reinterpret_cast<FunctionParser *>(GetEnclosingParser());
+ }
+
+ virtual void setValueName(uint64_t Index, StringType &Name) LLVM_OVERRIDE;
+ virtual void setBbName(uint64_t Index, StringType &Name) LLVM_OVERRIDE;
+
+ // Reports that the assignment of Name to the value associated with
+ // index is not possible, for the given Context.
+ void reportUnableToAssign(const char *Context, uint64_t Index,
+ StringType &Name) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Function-local " << Context << " name '" << Name
+ << "' can't be associated with index " << Index;
+ Error(StrBuf.str());
+ }
+};
+
+void FunctionValuesymtabParser::setValueName(uint64_t Index, StringType &Name) {
+ // Note: We check when Index is too small, so that we can error recover
+ // (FP->getOperand will create fatal error).
+ if (Index < getFunctionParser()->CachedNumGlobalValueIDs) {
+ reportUnableToAssign("instruction", Index, Name);
+ // TODO(kschimpf) Remove error recovery once implementation complete.
+ return;
+ }
+ Ice::Operand *Op = getFunctionParser()->getOperand(Index);
+ if (Ice::Variable *V = dyn_cast<Ice::Variable>(Op)) {
+ std::string Nm(Name.data(), Name.size());
+ V->setName(Nm);
+ } else {
+ reportUnableToAssign("variable", Index, Name);
+ }
+}
+
+void FunctionValuesymtabParser::setBbName(uint64_t Index, StringType &Name) {
+ if (Index >= getFunctionParser()->Func->getNumNodes()) {
+ reportUnableToAssign("block", Index, Name);
+ return;
+ }
+ std::string Nm(Name.data(), Name.size());
+ getFunctionParser()->Func->getNodes()[Index]->setName(Nm);
+}
+
bool FunctionParser::ParseBlock(unsigned BlockID) {
switch (BlockID) {
case naclbitc::CONSTANTS_BLOCK_ID: {
ConstantsParser Parser(BlockID, this);
return Parser.ParseThisBlock();
}
+ case naclbitc::VALUE_SYMTAB_BLOCK_ID: {
+ if (PNaClAllowLocalSymbolTables) {
+ FunctionValuesymtabParser Parser(BlockID, this);
+ return Parser.ParseThisBlock();
+ }
+ break;
+ }
default:
- return BlockParserBaseClass::ParseBlock(BlockID);
+ break;
}
+ return BlockParserBaseClass::ParseBlock(BlockID);
}
/// Parses the module block in the bitcode file.
virtual void ProcessRecord() LLVM_OVERRIDE;
};
+class ModuleValuesymtabParser : public ValuesymtabParser {
+ ModuleValuesymtabParser(const ModuleValuesymtabParser &)
+ LLVM_DELETED_FUNCTION;
+ void operator=(const ModuleValuesymtabParser &) LLVM_DELETED_FUNCTION;
+
+public:
+ ModuleValuesymtabParser(unsigned BlockID, ModuleParser *MP)
+ : ValuesymtabParser(BlockID, MP) {}
+
+ virtual ~ModuleValuesymtabParser() LLVM_OVERRIDE {}
+
+private:
+ virtual void setValueName(uint64_t Index, StringType &Name) LLVM_OVERRIDE;
+ virtual void setBbName(uint64_t Index, StringType &Name) LLVM_OVERRIDE;
+};
+
+void ModuleValuesymtabParser::setValueName(uint64_t Index, StringType &Name) {
+ Value *V = Context->getGlobalValueByID(Index);
+ if (V == NULL) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Invalid global address ID in valuesymtab: " << Index;
+ Error(StrBuf.str());
+ return;
+ }
+ V->setName(StringRef(Name.data(), Name.size()));
+}
+
+void ModuleValuesymtabParser::setBbName(uint64_t Index, StringType &Name) {
+ std::string Buffer;
+ raw_string_ostream StrBuf(Buffer);
+ StrBuf << "Can't define basic block name at global level: '" << Name
+ << "' -> " << Index;
+ Error(StrBuf.str());
+}
+
bool ModuleParser::ParseBlock(unsigned BlockID) LLVM_OVERRIDE {
switch (BlockID) {
case naclbitc::BLOCKINFO_BLOCK_ID:
return Parser.ParseThisBlock();
}
case naclbitc::VALUE_SYMTAB_BLOCK_ID: {
- ValuesymtabParser Parser(BlockID, this, false);
+ ModuleValuesymtabParser Parser(BlockID, this);
return Parser.ParseThisBlock();
}
case naclbitc::FUNCTION_BLOCK_ID: {
; Test if we can read alloca instructions.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
; Show examples where size is defined by a constant.
define i32 @AllocaA0Size1() {
+entry:
%array = alloca i8, i32 1
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__0 = alloca i8, i32 1
-; CHECK-NEXT: ret i32 %__0
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 1
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA0Size2() {
+entry:
%array = alloca i8, i32 2
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__0 = alloca i8, i32 2
-; CHECK-NEXT: ret i32 %__0
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 2
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA0Size3() {
+entry:
%array = alloca i8, i32 3
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__0 = alloca i8, i32 3
-; CHECK-NEXT: ret i32 %__0
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 3
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA0Size4() {
+entry:
%array = alloca i8, i32 4
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__0 = alloca i8, i32 4
-; CHECK-NEXT: ret i32 %__0
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 4
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA1Size4(i32 %n) {
+entry:
%array = alloca i8, i32 4, align 1
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 4, align 1
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 4, align 1
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA2Size4(i32 %n) {
+entry:
%array = alloca i8, i32 4, align 2
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 4, align 2
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 4, align 2
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaA8Size4(i32 %n) {
+entry:
%array = alloca i8, i32 4, align 8
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 4, align 8
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 4, align 8
+; CHECK-NEXT: ret i32 %array
}
define i32 @Alloca16Size4(i32 %n) {
+entry:
%array = alloca i8, i32 4, align 16
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 4, align 16
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 4, align 16
+; CHECK-NEXT: ret i32 %array
}
; Show examples where size is not known at compile time.
define i32 @AllocaVarsizeA0(i32 %n) {
+entry:
%array = alloca i8, i32 %n
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaVarsizeA1(i32 %n) {
+entry:
%array = alloca i8, i32 %n, align 1
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0, align 1
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n, align 1
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaVarsizeA2(i32 %n) {
+entry:
%array = alloca i8, i32 %n, align 2
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0, align 2
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n, align 2
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaVarsizeA4(i32 %n) {
+entry:
%array = alloca i8, i32 %n, align 4
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0, align 4
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n, align 4
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaVarsizeA8(i32 %n) {
+entry:
%array = alloca i8, i32 %n, align 8
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0, align 8
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n, align 8
+; CHECK-NEXT: ret i32 %array
}
define i32 @AllocaVarsizeA16(i32 %n) {
+entry:
%array = alloca i8, i32 %n, align 16
%addr = ptrtoint i8* %array to i32
ret i32 %addr
-; CHECK: __0:
-; CHECK-NEXT: %__1 = alloca i8, i32 %__0, align 16
-; CHECK-NEXT: ret i32 %__1
+; CHECK: entry:
+; CHECK-NEXT: %array = alloca i8, i32 %n, align 16
+; CHECK-NEXT: ret i32 %array
}
; Tests if we can read binary operators.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
; TODO(kschimpf): add i8/i16. Needs bitcasts.
define i32 @AddI32(i32 %a, i32 %b) {
+entry:
%add = add i32 %b, %a
ret i32 %add
}
-; CHECK: define i32 @AddI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = add i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK: define i32 @AddI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = add i32 %b, %a
+; CHECK-NEXT: ret i32 %add
; CHECK-NEXT: }
define i64 @AddI64(i64 %a, i64 %b) {
+entry:
%add = add i64 %b, %a
ret i64 %add
}
-; CHECK-NEXT: define i64 @AddI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = add i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @AddI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = add i64 %b, %a
+; CHECK-NEXT: ret i64 %add
; CHECK-NEXT: }
define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%add = add <16 x i8> %b, %a
ret <16 x i8> %add
}
-; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = add <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @AddV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = add <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %add
; CHECK-NEXT: }
define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%add = add <8 x i16> %b, %a
ret <8 x i16> %add
}
-; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = add <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @AddV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = add <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %add
; CHECK-NEXT: }
define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%add = add <4 x i32> %b, %a
ret <4 x i32> %add
}
-; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = add <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @AddV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = add <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %add
; CHECK-NEXT: }
define float @AddFloat(float %a, float %b) {
+entry:
%add = fadd float %b, %a
ret float %add
}
-; CHECK-NEXT: define float @AddFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fadd float %__1, %__0
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @AddFloat(float %a, float %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = fadd float %b, %a
+; CHECK-NEXT: ret float %add
; CHECK-NEXT: }
define double @AddDouble(double %a, double %b) {
+entry:
%add = fadd double %b, %a
ret double %add
}
-; CHECK-NEXT: define double @AddDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fadd double %__1, %__0
-; CHECK-NEXT: ret double %__2
+; CHECK-NEXT: define double @AddDouble(double %a, double %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = fadd double %b, %a
+; CHECK-NEXT: ret double %add
; CHECK-NEXT: }
define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
+entry:
%add = fadd <4 x float> %b, %a
ret <4 x float> %add
}
-; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fadd <4 x float> %__1, %__0
-; CHECK-NEXT: ret <4 x float> %__2
+; CHECK-NEXT: define <4 x float> @AddV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %add = fadd <4 x float> %b, %a
+; CHECK-NEXT: ret <4 x float> %add
; CHECK-NEXT: }
; TODO(kschimpf): sub i8/i16. Needs bitcasts.
define i32 @SubI32(i32 %a, i32 %b) {
+entry:
%sub = sub i32 %a, %b
ret i32 %sub
}
-; CHECK-NEXT: define i32 @SubI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sub i32 %__0, %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @SubI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = sub i32 %a, %b
+; CHECK-NEXT: ret i32 %sub
; CHECK-NEXT: }
define i64 @SubI64(i64 %a, i64 %b) {
+entry:
%sub = sub i64 %a, %b
ret i64 %sub
}
-; CHECK-NEXT: define i64 @SubI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sub i64 %__0, %__1
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @SubI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = sub i64 %a, %b
+; CHECK-NEXT: ret i64 %sub
; CHECK-NEXT: }
define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%sub = sub <16 x i8> %a, %b
ret <16 x i8> %sub
}
-; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sub <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @SubV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = sub <16 x i8> %a, %b
+; CHECK-NEXT: ret <16 x i8> %sub
; CHECK-NEXT: }
define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%sub = sub <8 x i16> %a, %b
ret <8 x i16> %sub
}
-; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sub <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @SubV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = sub <8 x i16> %a, %b
+; CHECK-NEXT: ret <8 x i16> %sub
; CHECK-NEXT: }
define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%sub = sub <4 x i32> %a, %b
ret <4 x i32> %sub
}
-; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sub <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @SubV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = sub <4 x i32> %a, %b
+; CHECK-NEXT: ret <4 x i32> %sub
; CHECK-NEXT: }
define float @SubFloat(float %a, float %b) {
+entry:
%sub = fsub float %a, %b
ret float %sub
}
-; CHECK-NEXT: define float @SubFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fsub float %__0, %__1
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @SubFloat(float %a, float %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = fsub float %a, %b
+; CHECK-NEXT: ret float %sub
; CHECK-NEXT: }
define double @SubDouble(double %a, double %b) {
+entry:
%sub = fsub double %a, %b
ret double %sub
}
-; CHECK-NEXT: define double @SubDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fsub double %__0, %__1
-; CHECK-NEXT: ret double %__2
+; CHECK-NEXT: define double @SubDouble(double %a, double %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = fsub double %a, %b
+; CHECK-NEXT: ret double %sub
; CHECK-NEXT: }
define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
+entry:
%sub = fsub <4 x float> %a, %b
ret <4 x float> %sub
}
-; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fsub <4 x float> %__0, %__1
-; CHECK-NEXT: ret <4 x float> %__2
+; CHECK-NEXT: define <4 x float> @SubV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %sub = fsub <4 x float> %a, %b
+; CHECK-NEXT: ret <4 x float> %sub
; CHECK-NEXT: }
; TODO(kschimpf): mul i8/i16. Needs bitcasts.
define i32 @MulI32(i32 %a, i32 %b) {
+entry:
%mul = mul i32 %b, %a
ret i32 %mul
}
-; CHECK-NEXT: define i32 @MulI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = mul i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @MulI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = mul i32 %b, %a
+; CHECK-NEXT: ret i32 %mul
; CHECK-NEXT: }
define i64 @MulI64(i64 %a, i64 %b) {
+entry:
%mul = mul i64 %b, %a
ret i64 %mul
}
-; CHECK-NEXT: define i64 @MulI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = mul i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @MulI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = mul i64 %b, %a
+; CHECK-NEXT: ret i64 %mul
; CHECK-NEXT: }
-
define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%mul = mul <16 x i8> %b, %a
ret <16 x i8> %mul
}
-; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = mul <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @MulV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = mul <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %mul
; CHECK-NEXT: }
define float @MulFloat(float %a, float %b) {
+entry:
%mul = fmul float %b, %a
ret float %mul
}
-; CHECK-NEXT: define float @MulFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fmul float %__1, %__0
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @MulFloat(float %a, float %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = fmul float %b, %a
+; CHECK-NEXT: ret float %mul
; CHECK-NEXT: }
define double @MulDouble(double %a, double %b) {
+entry:
%mul = fmul double %b, %a
ret double %mul
}
-; CHECK-NEXT: define double @MulDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fmul double %__1, %__0
-; CHECK-NEXT: ret double %__2
+; CHECK-NEXT: define double @MulDouble(double %a, double %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = fmul double %b, %a
+; CHECK-NEXT: ret double %mul
; CHECK-NEXT: }
define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
+entry:
%mul = fmul <4 x float> %b, %a
ret <4 x float> %mul
}
-; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fmul <4 x float> %__1, %__0
-; CHECK-NEXT: ret <4 x float> %__2
+; CHECK-NEXT: define <4 x float> @MulV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %mul = fmul <4 x float> %b, %a
+; CHECK-NEXT: ret <4 x float> %mul
; CHECK-NEXT: }
; TODO(kschimpf): sdiv i8/i16. Needs bitcasts.
define i32 @SdivI32(i32 %a, i32 %b) {
+entry:
%div = sdiv i32 %a, %b
ret i32 %div
}
-; CHECK-NEXT: define i32 @SdivI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sdiv i32 %__0, %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @SdivI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = sdiv i32 %a, %b
+; CHECK-NEXT: ret i32 %div
; CHECK-NEXT: }
define i64 @SdivI64(i64 %a, i64 %b) {
+entry:
%div = sdiv i64 %a, %b
ret i64 %div
}
-; CHECK-NEXT: define i64 @SdivI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sdiv i64 %__0, %__1
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @SdivI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = sdiv i64 %a, %b
+; CHECK-NEXT: ret i64 %div
; CHECK-NEXT: }
define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%div = sdiv <16 x i8> %a, %b
ret <16 x i8> %div
}
-; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sdiv <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @SdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = sdiv <16 x i8> %a, %b
+; CHECK-NEXT: ret <16 x i8> %div
; CHECK-NEXT: }
define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%div = sdiv <8 x i16> %a, %b
ret <8 x i16> %div
}
-; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sdiv <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @SdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = sdiv <8 x i16> %a, %b
+; CHECK-NEXT: ret <8 x i16> %div
; CHECK-NEXT: }
define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%div = sdiv <4 x i32> %a, %b
ret <4 x i32> %div
}
-; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = sdiv <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @SdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = sdiv <4 x i32> %a, %b
+; CHECK-NEXT: ret <4 x i32> %div
; CHECK-NEXT: }
; TODO(kschimpf): srem i8/i16. Needs bitcasts.
define i32 @SremI32(i32 %a, i32 %b) {
+entry:
%rem = srem i32 %a, %b
ret i32 %rem
}
-; CHECK-NEXT: define i32 @SremI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = srem i32 %__0, %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @SremI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = srem i32 %a, %b
+; CHECK-NEXT: ret i32 %rem
; CHECK-NEXT: }
define i64 @SremI64(i64 %a, i64 %b) {
+entry:
%rem = srem i64 %a, %b
ret i64 %rem
}
-; CHECK-NEXT: define i64 @SremI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = srem i64 %__0, %__1
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @SremI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = srem i64 %a, %b
+; CHECK-NEXT: ret i64 %rem
; CHECK-NEXT: }
define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%rem = srem <16 x i8> %a, %b
ret <16 x i8> %rem
}
-; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = srem <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @SremV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = srem <16 x i8> %a, %b
+; CHECK-NEXT: ret <16 x i8> %rem
; CHECK-NEXT: }
define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%rem = srem <8 x i16> %a, %b
ret <8 x i16> %rem
}
-; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = srem <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @SremV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = srem <8 x i16> %a, %b
+; CHECK-NEXT: ret <8 x i16> %rem
; CHECK-NEXT: }
define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%rem = srem <4 x i32> %a, %b
ret <4 x i32> %rem
}
-; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = srem <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @SremV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = srem <4 x i32> %a, %b
+; CHECK-NEXT: ret <4 x i32> %rem
; CHECK-NEXT: }
; TODO(kschimpf): udiv i8/i16. Needs bitcasts.
define i32 @UdivI32(i32 %a, i32 %b) {
+entry:
%div = udiv i32 %a, %b
ret i32 %div
}
-; CHECK-NEXT: define i32 @UdivI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = udiv i32 %__0, %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @UdivI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = udiv i32 %a, %b
+; CHECK-NEXT: ret i32 %div
; CHECK-NEXT: }
define i64 @UdivI64(i64 %a, i64 %b) {
+entry:
%div = udiv i64 %a, %b
ret i64 %div
}
-; CHECK-NEXT: define i64 @UdivI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = udiv i64 %__0, %__1
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @UdivI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = udiv i64 %a, %b
+; CHECK-NEXT: ret i64 %div
; CHECK-NEXT: }
define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%div = udiv <16 x i8> %a, %b
ret <16 x i8> %div
}
-; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = udiv <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @UdivV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = udiv <16 x i8> %a, %b
+; CHECK-NEXT: ret <16 x i8> %div
; CHECK-NEXT: }
define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%div = udiv <8 x i16> %a, %b
ret <8 x i16> %div
}
-; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = udiv <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @UdivV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = udiv <8 x i16> %a, %b
+; CHECK-NEXT: ret <8 x i16> %div
; CHECK-NEXT: }
define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%div = udiv <4 x i32> %a, %b
ret <4 x i32> %div
}
-; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = udiv <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @UdivV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = udiv <4 x i32> %a, %b
+; CHECK-NEXT: ret <4 x i32> %div
; CHECK-NEXT: }
; TODO(kschimpf): urem i8/i16. Needs bitcasts.
define i32 @UremI32(i32 %a, i32 %b) {
+entry:
%rem = urem i32 %a, %b
ret i32 %rem
}
-; CHECK-NEXT: define i32 @UremI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = urem i32 %__0, %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @UremI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = urem i32 %a, %b
+; CHECK-NEXT: ret i32 %rem
; CHECK-NEXT: }
define i64 @UremI64(i64 %a, i64 %b) {
+entry:
%rem = urem i64 %a, %b
ret i64 %rem
}
-; CHECK-NEXT: define i64 @UremI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = urem i64 %__0, %__1
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @UremI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = urem i64 %a, %b
+; CHECK-NEXT: ret i64 %rem
; CHECK-NEXT: }
define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%rem = urem <16 x i8> %a, %b
ret <16 x i8> %rem
}
-; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = urem <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @UremV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = urem <16 x i8> %a, %b
+; CHECK-NEXT: ret <16 x i8> %rem
; CHECK-NEXT: }
define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%rem = urem <8 x i16> %a, %b
ret <8 x i16> %rem
}
-; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = urem <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @UremV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = urem <8 x i16> %a, %b
+; CHECK-NEXT: ret <8 x i16> %rem
; CHECK-NEXT: }
define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%rem = urem <4 x i32> %a, %b
ret <4 x i32> %rem
}
-; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = urem <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @UremV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = urem <4 x i32> %a, %b
+; CHECK-NEXT: ret <4 x i32> %rem
; CHECK-NEXT: }
define float @fdivFloat(float %a, float %b) {
+entry:
%div = fdiv float %a, %b
ret float %div
}
-; CHECK-NEXT: define float @fdivFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fdiv float %__0, %__1
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @fdivFloat(float %a, float %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = fdiv float %a, %b
+; CHECK-NEXT: ret float %div
; CHECK-NEXT: }
define double @fdivDouble(double %a, double %b) {
+entry:
%div = fdiv double %a, %b
ret double %div
}
-; CHECK-NEXT: define double @fdivDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fdiv double %__0, %__1
-; CHECK-NEXT: ret double %__2
+; CHECK-NEXT: define double @fdivDouble(double %a, double %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = fdiv double %a, %b
+; CHECK-NEXT: ret double %div
; CHECK-NEXT: }
define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
+entry:
%div = fdiv <4 x float> %a, %b
ret <4 x float> %div
}
-; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fdiv <4 x float> %__0, %__1
-; CHECK-NEXT: ret <4 x float> %__2
+; CHECK-NEXT: define <4 x float> @fdivV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %div = fdiv <4 x float> %a, %b
+; CHECK-NEXT: ret <4 x float> %div
; CHECK-NEXT: }
define float @fremFloat(float %a, float %b) {
+entry:
%rem = frem float %a, %b
ret float %rem
}
-; CHECK-NEXT: define float @fremFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = frem float %__0, %__1
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @fremFloat(float %a, float %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = frem float %a, %b
+; CHECK-NEXT: ret float %rem
; CHECK-NEXT: }
-
define double @fremDouble(double %a, double %b) {
+entry:
%rem = frem double %a, %b
ret double %rem
}
-; CHECK-NEXT: define double @fremDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = frem double %__0, %__1
-; CHECK-NEXT: ret double %__2
+; CHECK-NEXT: define double @fremDouble(double %a, double %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = frem double %a, %b
+; CHECK-NEXT: ret double %rem
; CHECK-NEXT: }
define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
+entry:
%rem = frem <4 x float> %a, %b
ret <4 x float> %rem
}
-; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = frem <4 x float> %__0, %__1
-; CHECK-NEXT: ret <4 x float> %__2
+; CHECK-NEXT: define <4 x float> @fremV4Float(<4 x float> %a, <4 x float> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %rem = frem <4 x float> %a, %b
+; CHECK-NEXT: ret <4 x float> %rem
; CHECK-NEXT: }
; TODO(kschimpf): and i1/i8/i16. Needs bitcasts.
define i32 @AndI32(i32 %a, i32 %b) {
+entry:
%and = and i32 %b, %a
ret i32 %and
}
-; CHECK-NEXT: define i32 @AndI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = and i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @AndI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %and = and i32 %b, %a
+; CHECK-NEXT: ret i32 %and
; CHECK-NEXT: }
define i64 @AndI64(i64 %a, i64 %b) {
+entry:
%and = and i64 %b, %a
ret i64 %and
}
-; CHECK-NEXT: define i64 @AndI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = and i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @AndI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %and = and i64 %b, %a
+; CHECK-NEXT: ret i64 %and
; CHECK-NEXT: }
define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%and = and <16 x i8> %b, %a
ret <16 x i8> %and
}
-; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = and <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @AndV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %and = and <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %and
; CHECK-NEXT: }
define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%and = and <8 x i16> %b, %a
ret <8 x i16> %and
}
-; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = and <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @AndV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %and = and <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %and
; CHECK-NEXT: }
define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%and = and <4 x i32> %b, %a
ret <4 x i32> %and
}
-; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = and <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @AndV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %and = and <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %and
; CHECK-NEXT: }
; TODO(kschimpf): or i1/i8/i16. Needs bitcasts.
define i32 @OrI32(i32 %a, i32 %b) {
+entry:
%or = or i32 %b, %a
ret i32 %or
}
-; CHECK-NEXT: define i32 @OrI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = or i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @OrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or i32 %b, %a
+; CHECK-NEXT: ret i32 %or
; CHECK-NEXT: }
define i64 @OrI64(i64 %a, i64 %b) {
+entry:
%or = or i64 %b, %a
ret i64 %or
}
-; CHECK-NEXT: define i64 @OrI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = or i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @OrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or i64 %b, %a
+; CHECK-NEXT: ret i64 %or
; CHECK-NEXT: }
define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%or = or <16 x i8> %b, %a
ret <16 x i8> %or
}
-; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = or <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @OrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %or
; CHECK-NEXT: }
define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%or = or <8 x i16> %b, %a
ret <8 x i16> %or
}
-; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = or <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @OrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %or
; CHECK-NEXT: }
define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%or = or <4 x i32> %b, %a
ret <4 x i32> %or
}
-; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = or <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @OrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %or = or <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %or
; CHECK-NEXT: }
; TODO(kschimpf): xor i1/i8/i16. Needs bitcasts.
define i32 @XorI32(i32 %a, i32 %b) {
+entry:
%xor = xor i32 %b, %a
ret i32 %xor
}
-; CHECK-NEXT: define i32 @XorI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = xor i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @XorI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %xor = xor i32 %b, %a
+; CHECK-NEXT: ret i32 %xor
; CHECK-NEXT: }
define i64 @XorI64(i64 %a, i64 %b) {
+entry:
%xor = xor i64 %b, %a
ret i64 %xor
}
-; CHECK-NEXT: define i64 @XorI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = xor i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @XorI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %xor = xor i64 %b, %a
+; CHECK-NEXT: ret i64 %xor
; CHECK-NEXT: }
define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%xor = xor <16 x i8> %b, %a
ret <16 x i8> %xor
}
-; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = xor <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @XorV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %xor = xor <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %xor
; CHECK-NEXT: }
define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%xor = xor <8 x i16> %b, %a
ret <8 x i16> %xor
}
-; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = xor <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @XorV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %xor = xor <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %xor
; CHECK-NEXT: }
define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%xor = xor <4 x i32> %b, %a
ret <4 x i32> %xor
}
-; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = xor <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @XorV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %xor = xor <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %xor
; CHECK-NEXT: }
; TODO(kschimpf): shl i8/i16. Needs bitcasts.
define i32 @ShlI32(i32 %a, i32 %b) {
+entry:
%shl = shl i32 %b, %a
ret i32 %shl
}
-; CHECK-NEXT: define i32 @ShlI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = shl i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @ShlI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %shl = shl i32 %b, %a
+; CHECK-NEXT: ret i32 %shl
; CHECK-NEXT: }
define i64 @ShlI64(i64 %a, i64 %b) {
+entry:
%shl = shl i64 %b, %a
ret i64 %shl
}
-; CHECK-NEXT: define i64 @ShlI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = shl i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @ShlI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %shl = shl i64 %b, %a
+; CHECK-NEXT: ret i64 %shl
; CHECK-NEXT: }
define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%shl = shl <16 x i8> %b, %a
ret <16 x i8> %shl
}
-; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = shl <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @ShlV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %shl = shl <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %shl
; CHECK-NEXT: }
define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%shl = shl <8 x i16> %b, %a
ret <8 x i16> %shl
}
-; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = shl <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @ShlV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %shl = shl <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %shl
; CHECK-NEXT: }
define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%shl = shl <4 x i32> %b, %a
ret <4 x i32> %shl
}
-; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = shl <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @ShlV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %shl = shl <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %shl
; CHECK-NEXT: }
; TODO(kschimpf): ashr i8/i16. Needs bitcasts.
define i32 @ashrI32(i32 %a, i32 %b) {
+entry:
%ashr = ashr i32 %b, %a
ret i32 %ashr
}
-; CHECK-NEXT: define i32 @ashrI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = ashr i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @ashrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %ashr = ashr i32 %b, %a
+; CHECK-NEXT: ret i32 %ashr
; CHECK-NEXT: }
define i64 @AshrI64(i64 %a, i64 %b) {
+entry:
%ashr = ashr i64 %b, %a
ret i64 %ashr
}
-; CHECK-NEXT: define i64 @AshrI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = ashr i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @AshrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %ashr = ashr i64 %b, %a
+; CHECK-NEXT: ret i64 %ashr
; CHECK-NEXT: }
define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%ashr = ashr <16 x i8> %b, %a
ret <16 x i8> %ashr
}
-; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = ashr <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @AshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %ashr = ashr <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %ashr
; CHECK-NEXT: }
define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%ashr = ashr <8 x i16> %b, %a
ret <8 x i16> %ashr
}
-; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = ashr <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @AshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %ashr = ashr <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %ashr
; CHECK-NEXT: }
define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%ashr = ashr <4 x i32> %b, %a
ret <4 x i32> %ashr
}
-; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = ashr <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @AshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %ashr = ashr <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %ashr
; CHECK-NEXT: }
; TODO(kschimpf): lshr i8/i16. Needs bitcasts.
define i32 @lshrI32(i32 %a, i32 %b) {
+entry:
%lshr = lshr i32 %b, %a
ret i32 %lshr
}
-; CHECK-NEXT: define i32 @lshrI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = lshr i32 %__1, %__0
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @lshrI32(i32 %a, i32 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lshr = lshr i32 %b, %a
+; CHECK-NEXT: ret i32 %lshr
; CHECK-NEXT: }
define i64 @LshrI64(i64 %a, i64 %b) {
+entry:
%lshr = lshr i64 %b, %a
ret i64 %lshr
}
-; CHECK-NEXT: define i64 @LshrI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = lshr i64 %__1, %__0
-; CHECK-NEXT: ret i64 %__2
+; CHECK-NEXT: define i64 @LshrI64(i64 %a, i64 %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lshr = lshr i64 %b, %a
+; CHECK-NEXT: ret i64 %lshr
; CHECK-NEXT: }
define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+entry:
%lshr = lshr <16 x i8> %b, %a
ret <16 x i8> %lshr
}
-; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = lshr <16 x i8> %__1, %__0
-; CHECK-NEXT: ret <16 x i8> %__2
+; CHECK-NEXT: define <16 x i8> @LshrV16I8(<16 x i8> %a, <16 x i8> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lshr = lshr <16 x i8> %b, %a
+; CHECK-NEXT: ret <16 x i8> %lshr
; CHECK-NEXT: }
define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+entry:
%lshr = lshr <8 x i16> %b, %a
ret <8 x i16> %lshr
}
-; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = lshr <8 x i16> %__1, %__0
-; CHECK-NEXT: ret <8 x i16> %__2
+; CHECK-NEXT: define <8 x i16> @LshrV8I16(<8 x i16> %a, <8 x i16> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lshr = lshr <8 x i16> %b, %a
+; CHECK-NEXT: ret <8 x i16> %lshr
; CHECK-NEXT: }
define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+entry:
%lshr = lshr <4 x i32> %b, %a
ret <4 x i32> %lshr
}
-; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = lshr <4 x i32> %__1, %__0
-; CHECK-NEXT: ret <4 x i32> %__2
+; CHECK-NEXT: define <4 x i32> @LshrV4I32(<4 x i32> %a, <4 x i32> %b) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %lshr = lshr <4 x i32> %b, %a
+; CHECK-NEXT: ret <4 x i32> %lshr
; CHECK-NEXT: }
; Tests if we handle a branch instructions.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
define void @SimpleBranch() {
+entry:
br label %b3
b1:
br label %b2
}
; CHECK: define void @SimpleBranch() {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: br label %__3
-; CHECK-NEXT: __1:
-; CHECK-NEXT: br label %__2
-; CHECK-NEXT: __2:
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label %b3
+; CHECK-NEXT: b1:
+; CHECK-NEXT: br label %b2
+; CHECK-NEXT: b2:
; CHECK-NEXT: ret void
-; CHECK-NEXT: __3:
-; CHECK-NEXT: br label %__1
+; CHECK-NEXT: b3:
+; CHECK-NEXT: br label %b1
; CHECK-NEXT: }
define void @CondBranch(i32 %p) {
+entry:
%test = trunc i32 %p to i1
br i1 %test, label %b1, label %b2
b1:
br i1 %test, label %b2, label %b1
}
-; CHECK-NEXT: define void @CondBranch(i32 %__0) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
-; CHECK-NEXT: br i1 %__1, label %__1, label %__2
-; CHECK-NEXT: __1:
+; CHECK-NEXT: define void @CondBranch(i32 %p) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %test = trunc i32 %p to i1
+; CHECK-NEXT: br i1 %test, label %b1, label %b2
+; CHECK-NEXT: b1:
; CHECK-NEXT: ret void
-; CHECK-NEXT: __2:
-; CHECK-NEXT: br i1 %__1, label %__2, label %__1
+; CHECK-NEXT: b2:
+; CHECK-NEXT: br i1 %test, label %b2, label %b1
; CHECK-NEXT: }
; Test if we can read compare instructions.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
define i1 @IcmpI1(i32 %p1, i32 %p2) {
+entry:
%a1 = trunc i32 %p1 to i1
%a2 = trunc i32 %p2 to i1
%veq = icmp eq i1 %a1, %a2
ret i1 %veq
}
-; CHECK: define i1 @IcmpI1(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i1
-; CHECK-NEXT: %__4 = icmp eq i1 %__2, %__3
-; CHECK-NEXT: %__5 = icmp ne i1 %__2, %__3
-; CHECK-NEXT: %__6 = icmp ugt i1 %__2, %__3
-; CHECK-NEXT: %__7 = icmp uge i1 %__2, %__3
-; CHECK-NEXT: %__8 = icmp ult i1 %__2, %__3
-; CHECK-NEXT: %__9 = icmp ule i1 %__2, %__3
-; CHECK-NEXT: %__10 = icmp sgt i1 %__2, %__3
-; CHECK-NEXT: %__11 = icmp sge i1 %__2, %__3
-; CHECK-NEXT: %__12 = icmp slt i1 %__2, %__3
-; CHECK-NEXT: %__13 = icmp sle i1 %__2, %__3
-; CHECK-NEXT: ret i1 %__4
+; CHECK: define i1 @IcmpI1(i32 %p1, i32 %p2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %a1 = trunc i32 %p1 to i1
+; CHECK-NEXT: %a2 = trunc i32 %p2 to i1
+; CHECK-NEXT: %veq = icmp eq i1 %a1, %a2
+; CHECK-NEXT: %vne = icmp ne i1 %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt i1 %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge i1 %a1, %a2
+; CHECK-NEXT: %vult = icmp ult i1 %a1, %a2
+; CHECK-NEXT: %vule = icmp ule i1 %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt i1 %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge i1 %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt i1 %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle i1 %a1, %a2
+; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
define i1 @IcmpI8(i32 %p1, i32 %p2) {
+entry:
%a1 = trunc i32 %p1 to i8
%a2 = trunc i32 %p2 to i8
%veq = icmp eq i8 %a1, %a2
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI8(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i8
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i8
-; CHECK-NEXT: %__4 = icmp eq i8 %__2, %__3
-; CHECK-NEXT: %__5 = icmp ne i8 %__2, %__3
-; CHECK-NEXT: %__6 = icmp ugt i8 %__2, %__3
-; CHECK-NEXT: %__7 = icmp uge i8 %__2, %__3
-; CHECK-NEXT: %__8 = icmp ult i8 %__2, %__3
-; CHECK-NEXT: %__9 = icmp ule i8 %__2, %__3
-; CHECK-NEXT: %__10 = icmp sgt i8 %__2, %__3
-; CHECK-NEXT: %__11 = icmp sge i8 %__2, %__3
-; CHECK-NEXT: %__12 = icmp slt i8 %__2, %__3
-; CHECK-NEXT: %__13 = icmp sle i8 %__2, %__3
-; CHECK-NEXT: ret i1 %__4
+; CHECK-NEXT: define i1 @IcmpI8(i32 %p1, i32 %p2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %a1 = trunc i32 %p1 to i8
+; CHECK-NEXT: %a2 = trunc i32 %p2 to i8
+; CHECK-NEXT: %veq = icmp eq i8 %a1, %a2
+; CHECK-NEXT: %vne = icmp ne i8 %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt i8 %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge i8 %a1, %a2
+; CHECK-NEXT: %vult = icmp ult i8 %a1, %a2
+; CHECK-NEXT: %vule = icmp ule i8 %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt i8 %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge i8 %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt i8 %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle i8 %a1, %a2
+; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
define i1 @IcmpI16(i32 %p1, i32 %p2) {
+entry:
%a1 = trunc i32 %p1 to i16
%a2 = trunc i32 %p2 to i16
%veq = icmp eq i16 %a1, %a2
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI16(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i16
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i16
-; CHECK-NEXT: %__4 = icmp eq i16 %__2, %__3
-; CHECK-NEXT: %__5 = icmp ne i16 %__2, %__3
-; CHECK-NEXT: %__6 = icmp ugt i16 %__2, %__3
-; CHECK-NEXT: %__7 = icmp uge i16 %__2, %__3
-; CHECK-NEXT: %__8 = icmp ult i16 %__2, %__3
-; CHECK-NEXT: %__9 = icmp ule i16 %__2, %__3
-; CHECK-NEXT: %__10 = icmp sgt i16 %__2, %__3
-; CHECK-NEXT: %__11 = icmp sge i16 %__2, %__3
-; CHECK-NEXT: %__12 = icmp slt i16 %__2, %__3
-; CHECK-NEXT: %__13 = icmp sle i16 %__2, %__3
-; CHECK-NEXT: ret i1 %__4
+; CHECK-NEXT: define i1 @IcmpI16(i32 %p1, i32 %p2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %a1 = trunc i32 %p1 to i16
+; CHECK-NEXT: %a2 = trunc i32 %p2 to i16
+; CHECK-NEXT: %veq = icmp eq i16 %a1, %a2
+; CHECK-NEXT: %vne = icmp ne i16 %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt i16 %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge i16 %a1, %a2
+; CHECK-NEXT: %vult = icmp ult i16 %a1, %a2
+; CHECK-NEXT: %vule = icmp ule i16 %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt i16 %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge i16 %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt i16 %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle i16 %a1, %a2
+; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
define i1 @IcmpI32(i32 %a1, i32 %a2) {
+entry:
%veq = icmp eq i32 %a1, %a2
%vne = icmp ne i32 %a1, %a2
%vugt = icmp ugt i32 %a1, %a2
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI32(i32 %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq i32 %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne i32 %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt i32 %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge i32 %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult i32 %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule i32 %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt i32 %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge i32 %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt i32 %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle i32 %__0, %__1
-; CHECK-NEXT: ret i1 %__2
+; CHECK-NEXT: define i1 @IcmpI32(i32 %a1, i32 %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq i32 %a1, %a2
+; CHECK-NEXT: %vne = icmp ne i32 %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt i32 %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge i32 %a1, %a2
+; CHECK-NEXT: %vult = icmp ult i32 %a1, %a2
+; CHECK-NEXT: %vule = icmp ule i32 %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt i32 %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge i32 %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt i32 %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle i32 %a1, %a2
+; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
define i1 @IcmpI64(i64 %a1, i64 %a2) {
+entry:
%veq = icmp eq i64 %a1, %a2
%vne = icmp ne i64 %a1, %a2
%vugt = icmp ugt i64 %a1, %a2
ret i1 %veq
}
-; CHECK-NEXT: define i1 @IcmpI64(i64 %__0, i64 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq i64 %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne i64 %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt i64 %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge i64 %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult i64 %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule i64 %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt i64 %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge i64 %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt i64 %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle i64 %__0, %__1
-; CHECK-NEXT: ret i1 %__2
+; CHECK-NEXT: define i1 @IcmpI64(i64 %a1, i64 %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq i64 %a1, %a2
+; CHECK-NEXT: %vne = icmp ne i64 %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt i64 %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge i64 %a1, %a2
+; CHECK-NEXT: %vult = icmp ult i64 %a1, %a2
+; CHECK-NEXT: %vule = icmp ule i64 %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt i64 %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge i64 %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt i64 %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle i64 %a1, %a2
+; CHECK-NEXT: ret i1 %veq
; CHECK-NEXT: }
define <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
+entry:
%veq = icmp eq <4 x i1> %a1, %a2
%vne = icmp ne <4 x i1> %a1, %a2
%vugt = icmp ugt <4 x i1> %a1, %a2
ret <4 x i1> %veq
}
-; CHECK-NEXT: define <4 x i1> @IcmpV4xI1(<4 x i1> %__0, <4 x i1> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <4 x i1> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <4 x i1> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <4 x i1> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <4 x i1> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <4 x i1> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <4 x i1> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <4 x i1> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <4 x i1> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <4 x i1> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <4 x i1> %__0, %__1
-; CHECK-NEXT: ret <4 x i1> %__2
+; CHECK-NEXT: define <4 x i1> @IcmpV4xI1(<4 x i1> %a1, <4 x i1> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <4 x i1> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <4 x i1> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <4 x i1> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <4 x i1> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <4 x i1> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <4 x i1> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <4 x i1> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <4 x i1> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <4 x i1> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <4 x i1> %a1, %a2
+; CHECK-NEXT: ret <4 x i1> %veq
; CHECK-NEXT: }
define <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
+entry:
%veq = icmp eq <8 x i1> %a1, %a2
%vne = icmp ne <8 x i1> %a1, %a2
%vugt = icmp ugt <8 x i1> %a1, %a2
ret <8 x i1> %veq
}
-; CHECK-NEXT: define <8 x i1> @IcmpV8xI1(<8 x i1> %__0, <8 x i1> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <8 x i1> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <8 x i1> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <8 x i1> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <8 x i1> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <8 x i1> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <8 x i1> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <8 x i1> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <8 x i1> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <8 x i1> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <8 x i1> %__0, %__1
-; CHECK-NEXT: ret <8 x i1> %__2
+; CHECK-NEXT: define <8 x i1> @IcmpV8xI1(<8 x i1> %a1, <8 x i1> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <8 x i1> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <8 x i1> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <8 x i1> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <8 x i1> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <8 x i1> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <8 x i1> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <8 x i1> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <8 x i1> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <8 x i1> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <8 x i1> %a1, %a2
+; CHECK-NEXT: ret <8 x i1> %veq
; CHECK-NEXT: }
define <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
+entry:
%veq = icmp eq <16 x i1> %a1, %a2
%vne = icmp ne <16 x i1> %a1, %a2
%vugt = icmp ugt <16 x i1> %a1, %a2
ret <16 x i1> %veq
}
-; CHECK-NEXT: define <16 x i1> @IcmpV16xI1(<16 x i1> %__0, <16 x i1> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <16 x i1> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <16 x i1> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <16 x i1> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <16 x i1> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <16 x i1> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <16 x i1> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <16 x i1> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <16 x i1> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <16 x i1> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <16 x i1> %__0, %__1
-; CHECK-NEXT: ret <16 x i1> %__2
+; CHECK-NEXT: define <16 x i1> @IcmpV16xI1(<16 x i1> %a1, <16 x i1> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <16 x i1> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <16 x i1> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <16 x i1> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <16 x i1> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <16 x i1> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <16 x i1> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <16 x i1> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <16 x i1> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <16 x i1> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <16 x i1> %a1, %a2
+; CHECK-NEXT: ret <16 x i1> %veq
; CHECK-NEXT: }
define <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
+entry:
%veq = icmp eq <16 x i8> %a1, %a2
%vne = icmp ne <16 x i8> %a1, %a2
%vugt = icmp ugt <16 x i8> %a1, %a2
ret <16 x i1> %veq
}
-; CHECK-NEXT: define <16 x i1> @IcmpV16xI8(<16 x i8> %__0, <16 x i8> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <16 x i8> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <16 x i8> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <16 x i8> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <16 x i8> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <16 x i8> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <16 x i8> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <16 x i8> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <16 x i8> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <16 x i8> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <16 x i8> %__0, %__1
-; CHECK-NEXT: ret <16 x i1> %__2
+; CHECK-NEXT: define <16 x i1> @IcmpV16xI8(<16 x i8> %a1, <16 x i8> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <16 x i8> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <16 x i8> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <16 x i8> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <16 x i8> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <16 x i8> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <16 x i8> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <16 x i8> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <16 x i8> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <16 x i8> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <16 x i8> %a1, %a2
+; CHECK-NEXT: ret <16 x i1> %veq
; CHECK-NEXT: }
define <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
+entry:
%veq = icmp eq <8 x i16> %a1, %a2
%vne = icmp ne <8 x i16> %a1, %a2
%vugt = icmp ugt <8 x i16> %a1, %a2
ret <8 x i1> %veq
}
-; CHECK-NEXT: define <8 x i1> @IcmpV8xI16(<8 x i16> %__0, <8 x i16> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <8 x i16> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <8 x i16> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <8 x i16> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <8 x i16> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <8 x i16> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <8 x i16> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <8 x i16> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <8 x i16> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <8 x i16> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <8 x i16> %__0, %__1
-; CHECK-NEXT: ret <8 x i1> %__2
+; CHECK-NEXT: define <8 x i1> @IcmpV8xI16(<8 x i16> %a1, <8 x i16> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <8 x i16> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <8 x i16> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <8 x i16> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <8 x i16> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <8 x i16> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <8 x i16> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <8 x i16> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <8 x i16> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <8 x i16> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <8 x i16> %a1, %a2
+; CHECK-NEXT: ret <8 x i1> %veq
; CHECK-NEXT: }
define <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
+entry:
%veq = icmp eq <4 x i32> %a1, %a2
%vne = icmp ne <4 x i32> %a1, %a2
%vugt = icmp ugt <4 x i32> %a1, %a2
ret <4 x i1> %veq
}
-; CHECK-NEXT: define <4 x i1> @IcmpV4xI32(<4 x i32> %__0, <4 x i32> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = icmp eq <4 x i32> %__0, %__1
-; CHECK-NEXT: %__3 = icmp ne <4 x i32> %__0, %__1
-; CHECK-NEXT: %__4 = icmp ugt <4 x i32> %__0, %__1
-; CHECK-NEXT: %__5 = icmp uge <4 x i32> %__0, %__1
-; CHECK-NEXT: %__6 = icmp ult <4 x i32> %__0, %__1
-; CHECK-NEXT: %__7 = icmp ule <4 x i32> %__0, %__1
-; CHECK-NEXT: %__8 = icmp sgt <4 x i32> %__0, %__1
-; CHECK-NEXT: %__9 = icmp sge <4 x i32> %__0, %__1
-; CHECK-NEXT: %__10 = icmp slt <4 x i32> %__0, %__1
-; CHECK-NEXT: %__11 = icmp sle <4 x i32> %__0, %__1
-; CHECK-NEXT: ret <4 x i1> %__2
+; CHECK-NEXT: define <4 x i1> @IcmpV4xI32(<4 x i32> %a1, <4 x i32> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %veq = icmp eq <4 x i32> %a1, %a2
+; CHECK-NEXT: %vne = icmp ne <4 x i32> %a1, %a2
+; CHECK-NEXT: %vugt = icmp ugt <4 x i32> %a1, %a2
+; CHECK-NEXT: %vuge = icmp uge <4 x i32> %a1, %a2
+; CHECK-NEXT: %vult = icmp ult <4 x i32> %a1, %a2
+; CHECK-NEXT: %vule = icmp ule <4 x i32> %a1, %a2
+; CHECK-NEXT: %vsgt = icmp sgt <4 x i32> %a1, %a2
+; CHECK-NEXT: %vsge = icmp sge <4 x i32> %a1, %a2
+; CHECK-NEXT: %vslt = icmp slt <4 x i32> %a1, %a2
+; CHECK-NEXT: %vsle = icmp sle <4 x i32> %a1, %a2
+; CHECK-NEXT: ret <4 x i1> %veq
; CHECK-NEXT: }
define i1 @FcmpFloat(float %a1, float %a2) {
+entry:
%vfalse = fcmp false float %a1, %a2
%voeq = fcmp oeq float %a1, %a2
%vogt = fcmp ogt float %a1, %a2
ret i1 %voeq
}
-; CHECK-NEXT: define i1 @FcmpFloat(float %__0, float %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fcmp false float %__0, %__1
-; CHECK-NEXT: %__3 = fcmp oeq float %__0, %__1
-; CHECK-NEXT: %__4 = fcmp ogt float %__0, %__1
-; CHECK-NEXT: %__5 = fcmp oge float %__0, %__1
-; CHECK-NEXT: %__6 = fcmp olt float %__0, %__1
-; CHECK-NEXT: %__7 = fcmp ole float %__0, %__1
-; CHECK-NEXT: %__8 = fcmp one float %__0, %__1
-; CHECK-NEXT: %__9 = fcmp ord float %__0, %__1
-; CHECK-NEXT: %__10 = fcmp ueq float %__0, %__1
-; CHECK-NEXT: %__11 = fcmp ugt float %__0, %__1
-; CHECK-NEXT: %__12 = fcmp uge float %__0, %__1
-; CHECK-NEXT: %__13 = fcmp ult float %__0, %__1
-; CHECK-NEXT: %__14 = fcmp ule float %__0, %__1
-; CHECK-NEXT: %__15 = fcmp une float %__0, %__1
-; CHECK-NEXT: %__16 = fcmp uno float %__0, %__1
-; CHECK-NEXT: %__17 = fcmp true float %__0, %__1
-; CHECK-NEXT: ret i1 %__3
+; CHECK-NEXT: define i1 @FcmpFloat(float %a1, float %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vfalse = fcmp false float %a1, %a2
+; CHECK-NEXT: %voeq = fcmp oeq float %a1, %a2
+; CHECK-NEXT: %vogt = fcmp ogt float %a1, %a2
+; CHECK-NEXT: %voge = fcmp oge float %a1, %a2
+; CHECK-NEXT: %volt = fcmp olt float %a1, %a2
+; CHECK-NEXT: %vole = fcmp ole float %a1, %a2
+; CHECK-NEXT: %vone = fcmp one float %a1, %a2
+; CHECK-NEXT: %ord = fcmp ord float %a1, %a2
+; CHECK-NEXT: %vueq = fcmp ueq float %a1, %a2
+; CHECK-NEXT: %vugt = fcmp ugt float %a1, %a2
+; CHECK-NEXT: %vuge = fcmp uge float %a1, %a2
+; CHECK-NEXT: %vult = fcmp ult float %a1, %a2
+; CHECK-NEXT: %vule = fcmp ule float %a1, %a2
+; CHECK-NEXT: %vune = fcmp une float %a1, %a2
+; CHECK-NEXT: %vuno = fcmp uno float %a1, %a2
+; CHECK-NEXT: %vtrue = fcmp true float %a1, %a2
+; CHECK-NEXT: ret i1 %voeq
; CHECK-NEXT: }
define i1 @FcmpDouble(double %a1, double %a2) {
+entry:
%vfalse = fcmp false double %a1, %a2
%voeq = fcmp oeq double %a1, %a2
%vogt = fcmp ogt double %a1, %a2
ret i1 %voeq
}
-; CHECK-NEXT: define i1 @FcmpDouble(double %__0, double %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fcmp false double %__0, %__1
-; CHECK-NEXT: %__3 = fcmp oeq double %__0, %__1
-; CHECK-NEXT: %__4 = fcmp ogt double %__0, %__1
-; CHECK-NEXT: %__5 = fcmp oge double %__0, %__1
-; CHECK-NEXT: %__6 = fcmp olt double %__0, %__1
-; CHECK-NEXT: %__7 = fcmp ole double %__0, %__1
-; CHECK-NEXT: %__8 = fcmp one double %__0, %__1
-; CHECK-NEXT: %__9 = fcmp ord double %__0, %__1
-; CHECK-NEXT: %__10 = fcmp ueq double %__0, %__1
-; CHECK-NEXT: %__11 = fcmp ugt double %__0, %__1
-; CHECK-NEXT: %__12 = fcmp uge double %__0, %__1
-; CHECK-NEXT: %__13 = fcmp ult double %__0, %__1
-; CHECK-NEXT: %__14 = fcmp ule double %__0, %__1
-; CHECK-NEXT: %__15 = fcmp une double %__0, %__1
-; CHECK-NEXT: %__16 = fcmp uno double %__0, %__1
-; CHECK-NEXT: %__17 = fcmp true double %__0, %__1
-; CHECK-NEXT: ret i1 %__3
+; CHECK-NEXT: define i1 @FcmpDouble(double %a1, double %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vfalse = fcmp false double %a1, %a2
+; CHECK-NEXT: %voeq = fcmp oeq double %a1, %a2
+; CHECK-NEXT: %vogt = fcmp ogt double %a1, %a2
+; CHECK-NEXT: %voge = fcmp oge double %a1, %a2
+; CHECK-NEXT: %volt = fcmp olt double %a1, %a2
+; CHECK-NEXT: %vole = fcmp ole double %a1, %a2
+; CHECK-NEXT: %vone = fcmp one double %a1, %a2
+; CHECK-NEXT: %ord = fcmp ord double %a1, %a2
+; CHECK-NEXT: %vueq = fcmp ueq double %a1, %a2
+; CHECK-NEXT: %vugt = fcmp ugt double %a1, %a2
+; CHECK-NEXT: %vuge = fcmp uge double %a1, %a2
+; CHECK-NEXT: %vult = fcmp ult double %a1, %a2
+; CHECK-NEXT: %vule = fcmp ule double %a1, %a2
+; CHECK-NEXT: %vune = fcmp une double %a1, %a2
+; CHECK-NEXT: %vuno = fcmp uno double %a1, %a2
+; CHECK-NEXT: %vtrue = fcmp true double %a1, %a2
+; CHECK-NEXT: ret i1 %voeq
; CHECK-NEXT: }
define <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
+entry:
%vfalse = fcmp false <4 x float> %a1, %a2
%voeq = fcmp oeq <4 x float> %a1, %a2
%vogt = fcmp ogt <4 x float> %a1, %a2
ret <4 x i1> %voeq
}
-; CHECK-NEXT: define <4 x i1> @FcmpV4xFloat(<4 x float> %__0, <4 x float> %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = fcmp false <4 x float> %__0, %__1
-; CHECK-NEXT: %__3 = fcmp oeq <4 x float> %__0, %__1
-; CHECK-NEXT: %__4 = fcmp ogt <4 x float> %__0, %__1
-; CHECK-NEXT: %__5 = fcmp oge <4 x float> %__0, %__1
-; CHECK-NEXT: %__6 = fcmp olt <4 x float> %__0, %__1
-; CHECK-NEXT: %__7 = fcmp ole <4 x float> %__0, %__1
-; CHECK-NEXT: %__8 = fcmp one <4 x float> %__0, %__1
-; CHECK-NEXT: %__9 = fcmp ord <4 x float> %__0, %__1
-; CHECK-NEXT: %__10 = fcmp ueq <4 x float> %__0, %__1
-; CHECK-NEXT: %__11 = fcmp ugt <4 x float> %__0, %__1
-; CHECK-NEXT: %__12 = fcmp uge <4 x float> %__0, %__1
-; CHECK-NEXT: %__13 = fcmp ult <4 x float> %__0, %__1
-; CHECK-NEXT: %__14 = fcmp ule <4 x float> %__0, %__1
-; CHECK-NEXT: %__15 = fcmp une <4 x float> %__0, %__1
-; CHECK-NEXT: %__16 = fcmp uno <4 x float> %__0, %__1
-; CHECK-NEXT: %__17 = fcmp true <4 x float> %__0, %__1
-; CHECK-NEXT: ret <4 x i1> %__3
+; CHECK-NEXT: define <4 x i1> @FcmpV4xFloat(<4 x float> %a1, <4 x float> %a2) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vfalse = fcmp false <4 x float> %a1, %a2
+; CHECK-NEXT: %voeq = fcmp oeq <4 x float> %a1, %a2
+; CHECK-NEXT: %vogt = fcmp ogt <4 x float> %a1, %a2
+; CHECK-NEXT: %voge = fcmp oge <4 x float> %a1, %a2
+; CHECK-NEXT: %volt = fcmp olt <4 x float> %a1, %a2
+; CHECK-NEXT: %vole = fcmp ole <4 x float> %a1, %a2
+; CHECK-NEXT: %vone = fcmp one <4 x float> %a1, %a2
+; CHECK-NEXT: %ord = fcmp ord <4 x float> %a1, %a2
+; CHECK-NEXT: %vueq = fcmp ueq <4 x float> %a1, %a2
+; CHECK-NEXT: %vugt = fcmp ugt <4 x float> %a1, %a2
+; CHECK-NEXT: %vuge = fcmp uge <4 x float> %a1, %a2
+; CHECK-NEXT: %vult = fcmp ult <4 x float> %a1, %a2
+; CHECK-NEXT: %vule = fcmp ule <4 x float> %a1, %a2
+; CHECK-NEXT: %vune = fcmp une <4 x float> %a1, %a2
+; CHECK-NEXT: %vuno = fcmp uno <4 x float> %a1, %a2
+; CHECK-NEXT: %vtrue = fcmp true <4 x float> %a1, %a2
+; CHECK-NEXT: ret <4 x i1> %voeq
; CHECK-NEXT: }
; Test handling of constants in function blocks.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
define void @TestIntegers() {
-; CHECK: __0:
+entry:
+; CHECK: entry:
; Test various sized integers
%v0 = or i1 true, false
-; CHECK-NEXT: %__0 = or i1 true, false
+; CHECK-NEXT: %v0 = or i1 true, false
%v1 = add i8 0, 0
-; CHECK-NEXT: %__1 = add i8 0, 0
+; CHECK-NEXT: %v1 = add i8 0, 0
%v2 = add i8 5, 0
-; CHECK-NEXT: %__2 = add i8 5, 0
+; CHECK-NEXT: %v2 = add i8 5, 0
%v3 = add i8 -5, 0
-; CHECK-NEXT: %__3 = add i8 -5, 0
+; CHECK-NEXT: %v3 = add i8 -5, 0
%v4 = and i16 10, 0
-; CHECK-NEXT: %__4 = and i16 10, 0
+; CHECK-NEXT: %v4 = and i16 10, 0
%v5 = add i16 -10, 0
-; CHECK-NEXT: %__5 = add i16 -10, 0
+; CHECK-NEXT: %v5 = add i16 -10, 0
%v6 = add i32 20, 0
-; CHECK-NEXT: %__6 = add i32 20, 0
+; CHECK-NEXT: %v6 = add i32 20, 0
%v7 = add i32 -20, 0
-; CHECK-NEXT: %__7 = add i32 -20, 0
+; CHECK-NEXT: %v7 = add i32 -20, 0
%v8 = add i64 30, 0
-; CHECK-NEXT: %__8 = add i64 30, 0
+; CHECK-NEXT: %v8 = add i64 30, 0
%v9 = add i64 -30, 0
-; CHECK-NEXT: %__9 = add i64 -30, 0
+; CHECK-NEXT: %v9 = add i64 -30, 0
; Test undefined integer values.
%v10 = xor i1 undef, false
-; CHECK-NEXT: %__10 = xor i1 undef, false
+; CHECK-NEXT: %v10 = xor i1 undef, false
%v11 = add i8 undef, 0
-; CHECK-NEXT: %__11 = add i8 undef, 0
+; CHECK-NEXT: %v11 = add i8 undef, 0
%v12 = add i16 undef, 0
-; CHECK-NEXT: %__12 = add i16 undef, 0
+; CHECK-NEXT: %v12 = add i16 undef, 0
%v13 = add i32 undef, 0
-; CHECK-NEXT: %__13 = add i32 undef, 0
+; CHECK-NEXT: %v13 = add i32 undef, 0
%v14 = add i64 undef, 0
-; CHECK-NEXT: %__14 = add i64 undef, 0
+; CHECK-NEXT: %v14 = add i64 undef, 0
ret void
; CHECK-NEXT: ret void
}
define void @TestFloats() {
-; CHECK: __0:
+entry:
+; CHECK: entry:
; Test float and double constants
%v0 = fadd float 1.0, 0.0
-; CHECK-NEXT: %__0 = fadd float 1.000000e+00, 0.000000e+00
+; CHECK-NEXT: %v0 = fadd float 1.000000e+00, 0.000000e+00
%v1 = fadd double 1.0, 0.0
-; CHECK-NEXT: %__1 = fadd double 1.000000e+00, 0.000000e+00
+; CHECK-NEXT: %v1 = fadd double 1.000000e+00, 0.000000e+00
%v2 = fsub float 7.000000e+00, 8.000000e+00
-; CHECK-NEXT: %__2 = fsub float 7.000000e+00, 8.000000e+00
+; CHECK-NEXT: %v2 = fsub float 7.000000e+00, 8.000000e+00
%v3 = fsub double 5.000000e+00, 6.000000e+00
-; CHECK-NEXT: %__3 = fsub double 5.000000e+00, 6.000000e+00
+; CHECK-NEXT: %v3 = fsub double 5.000000e+00, 6.000000e+00
; Test undefined float and double.
%v4 = fadd float undef, 0.0
-; CHECK-NEXT: %__4 = fadd float undef, 0.000000e+00
+; CHECK-NEXT: %v4 = fadd float undef, 0.000000e+00
%v5 = fsub double undef, 6.000000e+00
-; CHECK-NEXT: %__5 = fsub double undef, 6.000000e+00
+; CHECK-NEXT: %v5 = fsub double undef, 6.000000e+00
; Test special floating point constants. Note: LLVM assembly appears
; to use 64-bit integer constants for both float and double.
; Generated from NAN in <math.h>
%v6 = fadd float 0x7FF8000000000000, 0.0
-; CHECK-NEXT: %__6 = fadd float nan, 0.000000e+00
+; CHECK-NEXT: %v6 = fadd float nan, 0.000000e+00
; Generated from -NAN in <math.h>
%v7 = fadd float 0xFFF8000000000000, 0.0
-; CHECK-NEXT: %__7 = fadd float -nan, 0.000000e+00
+; CHECK-NEXT: %v7 = fadd float -nan, 0.000000e+00
; Generated from INFINITY in <math.h>
%v8 = fadd float 0x7FF0000000000000, 0.0
-; CHECK-NEXT: %__8 = fadd float inf, 0.000000e+00
+; CHECK-NEXT: %v8 = fadd float inf, 0.000000e+00
; Generated from -INFINITY in <math.h>
%v9 = fadd float 0xFFF0000000000000, 0.0
-; CHECK-NEXT: %__9 = fadd float -inf, 0.000000e+00
+; CHECK-NEXT: %v9 = fadd float -inf, 0.000000e+00
; Generated from FLT_MIN in <float.h>
%v10 = fadd float 0x381000000000000000, 0.0
-; CHECK-NEXT: %__10 = fadd float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: %v10 = fadd float 0.000000e+00, 0.000000e+00
; Generated from -FLT_MIN in <float.h>
%v11 = fadd float 0xb81000000000000000, 0.0
-; CHECK-NEXT: %__11 = fadd float 0.000000e+00, 0.000000e+00
+; CHECK-NEXT: %v11 = fadd float 0.000000e+00, 0.000000e+00
; Generated from FLT_MAX in <float.h>
%v12 = fadd float 340282346638528859811704183484516925440.000000, 0.0
-; CHECK-NEXT: %__12 = fadd float 3.402823e+38, 0.000000e+00
+; CHECK-NEXT: %v12 = fadd float 3.402823e+38, 0.000000e+00
; Generated from -FLT_MAX in <float.h>
%v13 = fadd float -340282346638528859811704183484516925440.000000, 0.0
-; CHECK-NEXT: %__13 = fadd float -3.402823e+38, 0.000000e+00
+; CHECK-NEXT: %v13 = fadd float -3.402823e+38, 0.000000e+00
; Generated from NAN in <math.h>
%v14 = fadd double 0x7FF8000000000000, 0.0
-; CHECK-NEXT: %__14 = fadd double nan, 0.000000e+00
+; CHECK-NEXT: %v14 = fadd double nan, 0.000000e+00
; Generated from -NAN in <math.h>
%v15 = fadd double 0xFFF8000000000000, 0.0
-; CHECK-NEXT: %__15 = fadd double -nan, 0.000000e+00
+; CHECK-NEXT: %v15 = fadd double -nan, 0.000000e+00
; Generated from INFINITY in <math.h>
%v16 = fadd double 0x7FF0000000000000, 0.0
-; CHECK-NEXT: %__16 = fadd double inf, 0.000000e+00
+; CHECK-NEXT: %v16 = fadd double inf, 0.000000e+00
; Generated from -INFINITY in <math.h>
%v17 = fadd double 0xFFF0000000000000, 0.0
-; CHECK-NEXT: %__17 = fadd double -inf, 0.000000e+00
+; CHECK-NEXT: %v17 = fadd double -inf, 0.000000e+00
; Generated from DBL_MIN in <float.h>
%v18 = fadd double 0x0010000000000000, 0.0
-; CHECK-NEXT: %__18 = fadd double 2.225074e-308, 0.000000e+00
+; CHECK-NEXT: %v18 = fadd double 2.225074e-308, 0.000000e+00
; Generated from -DBL_MIN in <float.h>
%v19 = fadd double 0x8010000000000000, 0.0
-; CHECK-NEXT: %__19 = fadd double -2.225074e-308, 0.000000e+00
+; CHECK-NEXT: %v19 = fadd double -2.225074e-308, 0.000000e+00
; Generated from DBL_MAX in <float.h>
%v20 = fadd double 179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.000000, 0.0
-; CHECK-NEXT: %__20 = fadd double 1.797693e+308, 0.000000e+00
+; CHECK-NEXT: %v20 = fadd double 1.797693e+308, 0.000000e+00
; Generated from -DBL_MAX in <float.h>
%v21 = fadd double -179769313486231570814527423731704356798070567525844996598917476803157260780028538760589558632766878171540458953514382464234321326889464182768467546703537516986049910576551282076245490090389328944075868508455133942304583236903222948165808559332123348274797826204144723168738177180919299881250404026184124858368.000000, 0.0
-; CHECK-NEXT: %__21 = fadd double -1.797693e+308, 0.000000e+00
+; CHECK-NEXT: %v21 = fadd double -1.797693e+308, 0.000000e+00
ret void
; CHECK-NEXT: ret void
; Tests insertelement and extractelement vector instructions.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
; TODO(kschimpf): Change index arguments to valid constant indices once
; we can handle constants.
define void @ExtractV4xi1(<4 x i1> %v, i32 %i) {
+entry:
%e = extractelement <4 x i1> %v, i32 %i
ret void
}
-; CHECK: define void @ExtractV4xi1(<4 x i1> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <4 x i1> %__0, i32 %__1
+; CHECK: define void @ExtractV4xi1(<4 x i1> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <4 x i1> %v, i32 %i
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @ExtractV8xi1(<8 x i1> %v, i32 %i) {
+entry:
%e = extractelement <8 x i1> %v, i32 %i
ret void
}
-; CHECK-NEXT: define void @ExtractV8xi1(<8 x i1> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <8 x i1> %__0, i32 %__1
+; CHECK-NEXT: define void @ExtractV8xi1(<8 x i1> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <8 x i1> %v, i32 %i
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @ExtractV16xi1(<16 x i1> %v, i32 %i) {
+entry:
%e = extractelement <16 x i1> %v, i32 %i
ret void
}
-; CHECK-NEXT: define void @ExtractV16xi1(<16 x i1> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <16 x i1> %__0, i32 %__1
+; CHECK-NEXT: define void @ExtractV16xi1(<16 x i1> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <16 x i1> %v, i32 %i
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
+entry:
%e = extractelement <16 x i8> %v, i32 %i
ret void
}
-; CHECK-NEXT: define void @ExtractV16xi8(<16 x i8> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <16 x i8> %__0, i32 %__1
+; CHECK-NEXT: define void @ExtractV16xi8(<16 x i8> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <16 x i8> %v, i32 %i
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @ExtractV8xi16(<8 x i16> %v, i32 %i) {
+entry:
%e = extractelement <8 x i16> %v, i32 %i
ret void
}
-; CHECK-NEXT: define void @ExtractV8xi16(<8 x i16> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <8 x i16> %__0, i32 %__1
+; CHECK-NEXT: define void @ExtractV8xi16(<8 x i16> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <8 x i16> %v, i32 %i
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define i32 @ExtractV4xi32(<4 x i32> %v, i32 %i) {
+entry:
%e = extractelement <4 x i32> %v, i32 %i
ret i32 %e
}
-; CHECK-NEXT: define i32 @ExtractV4xi32(<4 x i32> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <4 x i32> %__0, i32 %__1
-; CHECK-NEXT: ret i32 %__2
+; CHECK-NEXT: define i32 @ExtractV4xi32(<4 x i32> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <4 x i32> %v, i32 %i
+; CHECK-NEXT: ret i32 %e
; CHECK-NEXT: }
define float @ExtractV4xfloat(<4 x float> %v, i32 %i) {
+entry:
%e = extractelement <4 x float> %v, i32 %i
ret float %e
}
-; CHECK-NEXT: define float @ExtractV4xfloat(<4 x float> %__0, i32 %__1) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__2 = extractelement <4 x float> %__0, i32 %__1
-; CHECK-NEXT: ret float %__2
+; CHECK-NEXT: define float @ExtractV4xfloat(<4 x float> %v, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = extractelement <4 x float> %v, i32 %i
+; CHECK-NEXT: ret float %e
; CHECK-NEXT: }
define <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe, i32 %i) {
+entry:
%e = trunc i32 %pe to i1
%r = insertelement <4 x i1> %v, i1 %e, i32 %i
ret <4 x i1> %r
}
-; CHECK-NEXT: define <4 x i1> @InsertV4xi1(<4 x i1> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i1
-; CHECK-NEXT: %__4 = insertelement <4 x i1> %__0, i1 %__3, i32 %__2
-; CHECK-NEXT: ret i1 %__4
+; CHECK-NEXT: define <4 x i1> @InsertV4xi1(<4 x i1> %v, i32 %pe, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = trunc i32 %pe to i1
+; CHECK-NEXT: %r = insertelement <4 x i1> %v, i1 %e, i32 %i
+; CHECK-NEXT: ret i1 %r
; CHECK-NEXT: }
define <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe, i32 %i) {
+entry:
%e = trunc i32 %pe to i1
%r = insertelement <8 x i1> %v, i1 %e, i32 %i
ret <8 x i1> %r
}
-; CHECK-NEXT: define <8 x i1> @InsertV8xi1(<8 x i1> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i1
-; CHECK-NEXT: %__4 = insertelement <8 x i1> %__0, i1 %__3, i32 %__2
-; CHECK-NEXT: ret i1 %__4
+; CHECK-NEXT: define <8 x i1> @InsertV8xi1(<8 x i1> %v, i32 %pe, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = trunc i32 %pe to i1
+; CHECK-NEXT: %r = insertelement <8 x i1> %v, i1 %e, i32 %i
+; CHECK-NEXT: ret i1 %r
; CHECK-NEXT: }
define <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe, i32 %i) {
+entry:
%e = trunc i32 %pe to i1
%r = insertelement <16 x i1> %v, i1 %e, i32 %i
ret <16 x i1> %r
}
-; CHECK-NEXT: define <16 x i1> @InsertV16xi1(<16 x i1> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i1
-; CHECK-NEXT: %__4 = insertelement <16 x i1> %__0, i1 %__3, i32 %__2
-; CHECK-NEXT: ret i1 %__4
+; CHECK-NEXT: define <16 x i1> @InsertV16xi1(<16 x i1> %v, i32 %pe, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = trunc i32 %pe to i1
+; CHECK-NEXT: %r = insertelement <16 x i1> %v, i1 %e, i32 %i
+; CHECK-NEXT: ret i1 %r
; CHECK-NEXT: }
define <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe, i32 %i) {
+entry:
%e = trunc i32 %pe to i8
%r = insertelement <16 x i8> %v, i8 %e, i32 %i
ret <16 x i8> %r
}
-; CHECK-NEXT: define <16 x i8> @InsertV16xi8(<16 x i8> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i8
-; CHECK-NEXT: %__4 = insertelement <16 x i8> %__0, i8 %__3, i32 %__2
-; CHECK-NEXT: ret i8 %__4
+; CHECK-NEXT: define <16 x i8> @InsertV16xi8(<16 x i8> %v, i32 %pe, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = trunc i32 %pe to i8
+; CHECK-NEXT: %r = insertelement <16 x i8> %v, i8 %e, i32 %i
+; CHECK-NEXT: ret i8 %r
; CHECK-NEXT: }
define <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe, i32 %i) {
+entry:
%e = trunc i32 %pe to i16
%r = insertelement <8 x i16> %v, i16 %e, i32 %i
ret <8 x i16> %r
}
-; CHECK-NEXT: define <8 x i16> @InsertV8xi16(<8 x i16> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__1 to i16
-; CHECK-NEXT: %__4 = insertelement <8 x i16> %__0, i16 %__3, i32 %__2
-; CHECK-NEXT: ret i16 %__4
+; CHECK-NEXT: define <8 x i16> @InsertV8xi16(<8 x i16> %v, i32 %pe, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %e = trunc i32 %pe to i16
+; CHECK-NEXT: %r = insertelement <8 x i16> %v, i16 %e, i32 %i
+; CHECK-NEXT: ret i16 %r
; CHECK-NEXT: }
-define <4 x i32> @InsertV16xi32(<4 x i32> %v, i32 %e, i32 %i) {
+define <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e, i32 %i) {
+entry:
%r = insertelement <4 x i32> %v, i32 %e, i32 %i
ret <4 x i32> %r
}
-; CHECK-NEXT: define <4 x i32> @InsertV16xi32(<4 x i32> %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = insertelement <4 x i32> %__0, i32 %__1, i32 %__2
-; CHECK-NEXT: ret i32 %__3
+; CHECK-NEXT: define <4 x i32> @InsertV4xi32(<4 x i32> %v, i32 %e, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = insertelement <4 x i32> %v, i32 %e, i32 %i
+; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
-define <4 x float> @InsertV16xfloat(<4 x float> %v, float %e, i32 %i) {
+define <4 x float> @InsertV4xfloat(<4 x float> %v, float %e, i32 %i) {
+entry:
%r = insertelement <4 x float> %v, float %e, i32 %i
ret <4 x float> %r
}
-; CHECK-NEXT: define <4 x float> @InsertV16xfloat(<4 x float> %__0, float %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = insertelement <4 x float> %__0, float %__1, i32 %__2
-; CHECK-NEXT: ret float %__3
+; CHECK-NEXT: define <4 x float> @InsertV4xfloat(<4 x float> %v, float %e, i32 %i) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = insertelement <4 x float> %v, float %e, i32 %i
+; CHECK-NEXT: ret float %r
; CHECK-NEXT: }
; Tests if we can read select instructions.
-; RUN: llvm-as < %s | pnacl-freeze \
+; RUN: llvm-as < %s | pnacl-freeze -allow-local-symbol-tables \
; RUN: | %llvm2ice -notranslate -verbose=inst -build-on-read \
; RUN: -allow-pnacl-reader-error-recovery \
+; RUN: -allow-local-symbol-tables \
; RUN: | FileCheck %s
define void @Seli1(i32 %p) {
+entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i1
%ve = trunc i32 %p to i1
ret void
}
-; CHECK: define void @Seli1(i32 %__0) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__1, i1 %__2, i1 %__3
+; CHECK: define void @Seli1(i32 %p) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %p to i1
+; CHECK-NEXT: %vt = trunc i32 %p to i1
+; CHECK-NEXT: %ve = trunc i32 %p to i1
+; CHECK-NEXT: %r = select i1 %vc, i1 %vt, i1 %ve
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @Seli8(i32 %p) {
+entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i8
%ve = trunc i32 %p to i8
ret void
}
-; CHECK-NEXT: define void @Seli8(i32 %__0) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i8
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i8
-; CHECK-NEXT: %__4 = select i1 %__1, i8 %__2, i8 %__3
+; CHECK-NEXT: define void @Seli8(i32 %p) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %p to i1
+; CHECK-NEXT: %vt = trunc i32 %p to i8
+; CHECK-NEXT: %ve = trunc i32 %p to i8
+; CHECK-NEXT: %r = select i1 %vc, i8 %vt, i8 %ve
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define void @Seli16(i32 %p) {
+entry:
%vc = trunc i32 %p to i1
%vt = trunc i32 %p to i16
%ve = trunc i32 %p to i16
ret void
}
-; CHECK-NEXT: define void @Seli16(i32 %__0) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__1 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__2 = trunc i32 %__0 to i16
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i16
-; CHECK-NEXT: %__4 = select i1 %__1, i16 %__2, i16 %__3
+; CHECK-NEXT: define void @Seli16(i32 %p) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %p to i1
+; CHECK-NEXT: %vt = trunc i32 %p to i16
+; CHECK-NEXT: %ve = trunc i32 %p to i16
+; CHECK-NEXT: %r = select i1 %vc, i16 %vt, i16 %ve
; CHECK-NEXT: ret void
; CHECK-NEXT: }
define i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, i32 %pt, i32 %pe
ret i32 %r
}
-; CHECK-NEXT: define i32 @Seli32(i32 %__0, i32 %__1, i32 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, i32 %__1, i32 %__2
-; CHECK-NEXT: ret i32 %__4
+; CHECK-NEXT: define i32 @Seli32(i32 %pc, i32 %pt, i32 %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, i32 %pt, i32 %pe
+; CHECK-NEXT: ret i32 %r
; CHECK-NEXT: }
define i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
+entry:
%vc = trunc i64 %pc to i1
%r = select i1 %vc, i64 %pt, i64 %pe
ret i64 %r
}
-; CHECK-NEXT: define i64 @Seli64(i64 %__0, i64 %__1, i64 %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i64 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, i64 %__1, i64 %__2
-; CHECK-NEXT: ret i64 %__4
+; CHECK-NEXT: define i64 @Seli64(i64 %pc, i64 %pt, i64 %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i64 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, i64 %pt, i64 %pe
+; CHECK-NEXT: ret i64 %r
; CHECK-NEXT: }
define float @SelFloat(i32 %pc, float %pt, float %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, float %pt, float %pe
ret float %r
}
-; CHECK-NEXT: define float @SelFloat(i32 %__0, float %__1, float %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, float %__1, float %__2
-; CHECK-NEXT: ret float %__4
+; CHECK-NEXT: define float @SelFloat(i32 %pc, float %pt, float %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, float %pt, float %pe
+; CHECK-NEXT: ret float %r
; CHECK-NEXT: }
define double @SelDouble(i32 %pc, double %pt, double %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, double %pt, double %pe
ret double %r
}
-; CHECK-NEXT: define double @SelDouble(i32 %__0, double %__1, double %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, double %__1, double %__2
-; CHECK-NEXT: ret double %__4
+; CHECK-NEXT: define double @SelDouble(i32 %pc, double %pt, double %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, double %pt, double %pe
+; CHECK-NEXT: ret double %r
; CHECK-NEXT: }
define <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <16 x i1> %pt, <16 x i1> %pe
ret <16 x i1> %r
}
-; CHECK-NEXT: define <16 x i1> @SelV16x1(i32 %__0, <16 x i1> %__1, <16 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <16 x i1> %__1, <16 x i1> %__2
-; CHECK-NEXT: ret <16 x i1> %__4
+; CHECK-NEXT: define <16 x i1> @SelV16x1(i32 %pc, <16 x i1> %pt, <16 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <16 x i1> %pt, <16 x i1> %pe
+; CHECK-NEXT: ret <16 x i1> %r
; CHECK-NEXT: }
define <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <8 x i1> %pt, <8 x i1> %pe
ret <8 x i1> %r
}
-; CHECK-NEXT: define <8 x i1> @SelV8x1(i32 %__0, <8 x i1> %__1, <8 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <8 x i1> %__1, <8 x i1> %__2
-; CHECK-NEXT: ret <8 x i1> %__4
+; CHECK-NEXT: define <8 x i1> @SelV8x1(i32 %pc, <8 x i1> %pt, <8 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <8 x i1> %pt, <8 x i1> %pe
+; CHECK-NEXT: ret <8 x i1> %r
; CHECK-NEXT: }
define <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x i1> %pt, <4 x i1> %pe
ret <4 x i1> %r
}
-; CHECK-NEXT: define <4 x i1> @SelV4x1(i32 %__0, <4 x i1> %__1, <4 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <4 x i1> %__1, <4 x i1> %__2
-; CHECK-NEXT: ret <4 x i1> %__4
+; CHECK-NEXT: define <4 x i1> @SelV4x1(i32 %pc, <4 x i1> %pt, <4 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <4 x i1> %pt, <4 x i1> %pe
+; CHECK-NEXT: ret <4 x i1> %r
; CHECK-NEXT: }
define <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <16 x i8> %pt, <16 x i8> %pe
ret <16 x i8> %r
}
-; CHECK-NEXT: define <16 x i8> @SelV16x8(i32 %__0, <16 x i8> %__1, <16 x i8> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <16 x i8> %__1, <16 x i8> %__2
-; CHECK-NEXT: ret <16 x i8> %__4
+; CHECK-NEXT: define <16 x i8> @SelV16x8(i32 %pc, <16 x i8> %pt, <16 x i8> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <16 x i8> %pt, <16 x i8> %pe
+; CHECK-NEXT: ret <16 x i8> %r
; CHECK-NEXT: }
define <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <8 x i16> %pt, <8 x i16> %pe
ret <8 x i16> %r
}
-; CHECK-NEXT: define <8 x i16> @SelV8x16(i32 %__0, <8 x i16> %__1, <8 x i16> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <8 x i16> %__1, <8 x i16> %__2
-; CHECK-NEXT: ret <8 x i16> %__4
+; CHECK-NEXT: define <8 x i16> @SelV8x16(i32 %pc, <8 x i16> %pt, <8 x i16> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <8 x i16> %pt, <8 x i16> %pe
+; CHECK-NEXT: ret <8 x i16> %r
; CHECK-NEXT: }
define <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x i32> %pt, <4 x i32> %pe
ret <4 x i32> %r
}
-; CHECK-NEXT: define <4 x i32> @SelV4x32(i32 %__0, <4 x i32> %__1, <4 x i32> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <4 x i32> %__1, <4 x i32> %__2
-; CHECK-NEXT: ret <4 x i32> %__4
+; CHECK-NEXT: define <4 x i32> @SelV4x32(i32 %pc, <4 x i32> %pt, <4 x i32> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <4 x i32> %pt, <4 x i32> %pe
+; CHECK-NEXT: ret <4 x i32> %r
; CHECK-NEXT: }
define <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
+entry:
%vc = trunc i32 %pc to i1
%r = select i1 %vc, <4 x float> %pt, <4 x float> %pe
ret <4 x float> %r
}
-; CHECK-NEXT: define <4 x float> @SelV4xfloat(i32 %__0, <4 x float> %__1, <4 x float> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = trunc i32 %__0 to i1
-; CHECK-NEXT: %__4 = select i1 %__3, <4 x float> %__1, <4 x float> %__2
-; CHECK-NEXT: ret <4 x float> %__4
+; CHECK-NEXT: define <4 x float> @SelV4xfloat(i32 %pc, <4 x float> %pt, <4 x float> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %vc = trunc i32 %pc to i1
+; CHECK-NEXT: %r = select i1 %vc, <4 x float> %pt, <4 x float> %pe
+; CHECK-NEXT: ret <4 x float> %r
; CHECK-NEXT: }
define <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
+entry:
%r = select <16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe
ret <16 x i1> %r
}
-; CHECK-NEXT: define <16 x i1> @SelV16x1Vcond(<16 x i1> %__0, <16 x i1> %__1, <16 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <16 x i1> %__0, <16 x i1> %__1, <16 x i1> %__2
-; CHECK-NEXT: ret <16 x i1> %__3
+; CHECK-NEXT: define <16 x i1> @SelV16x1Vcond(<16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <16 x i1> %pc, <16 x i1> %pt, <16 x i1> %pe
+; CHECK-NEXT: ret <16 x i1> %r
; CHECK-NEXT: }
define <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
+entry:
%r = select <8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe
ret <8 x i1> %r
}
-; CHECK-NEXT: define <8 x i1> @SelV8x1Vcond(<8 x i1> %__0, <8 x i1> %__1, <8 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <8 x i1> %__0, <8 x i1> %__1, <8 x i1> %__2
-; CHECK-NEXT: ret <8 x i1> %__3
+; CHECK-NEXT: define <8 x i1> @SelV8x1Vcond(<8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <8 x i1> %pc, <8 x i1> %pt, <8 x i1> %pe
+; CHECK-NEXT: ret <8 x i1> %r
; CHECK-NEXT: }
define <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
+entry:
%r = select <4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe
ret <4 x i1> %r
}
-; CHECK-NEXT: define <4 x i1> @SelV4x1Vcond(<4 x i1> %__0, <4 x i1> %__1, <4 x i1> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <4 x i1> %__0, <4 x i1> %__1, <4 x i1> %__2
-; CHECK-NEXT: ret <4 x i1> %__3
+; CHECK-NEXT: define <4 x i1> @SelV4x1Vcond(<4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x i1> %pt, <4 x i1> %pe
+; CHECK-NEXT: ret <4 x i1> %r
; CHECK-NEXT: }
define <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
+entry:
%r = select <16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe
ret <16 x i8> %r
}
-; CHECK-NEXT: define <16 x i8> @SelV16x8Vcond(<16 x i1> %__0, <16 x i8> %__1, <16 x i8> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <16 x i1> %__0, <16 x i8> %__1, <16 x i8> %__2
-; CHECK-NEXT: ret <16 x i8> %__3
+; CHECK-NEXT: define <16 x i8> @SelV16x8Vcond(<16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <16 x i1> %pc, <16 x i8> %pt, <16 x i8> %pe
+; CHECK-NEXT: ret <16 x i8> %r
; CHECK-NEXT: }
define <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
+entry:
%r = select <8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe
ret <8 x i16> %r
}
-; CHECK-NEXT: define <8 x i16> @SelV8x16Vcond(<8 x i1> %__0, <8 x i16> %__1, <8 x i16> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <8 x i1> %__0, <8 x i16> %__1, <8 x i16> %__2
-; CHECK-NEXT: ret <8 x i16> %__3
+; CHECK-NEXT: define <8 x i16> @SelV8x16Vcond(<8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <8 x i1> %pc, <8 x i16> %pt, <8 x i16> %pe
+; CHECK-NEXT: ret <8 x i16> %r
; CHECK-NEXT: }
define <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
+entry:
%r = select <4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe
ret <4 x i32> %r
}
-; CHECK-NEXT: define <4 x i32> @SelV4x32Vcond(<4 x i1> %__0, <4 x i32> %__1, <4 x i32> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <4 x i1> %__0, <4 x i32> %__1, <4 x i32> %__2
-; CHECK-NEXT: ret <4 x i32> %__3
+; CHECK-NEXT: define <4 x i32> @SelV4x32Vcond(<4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x i32> %pt, <4 x i32> %pe
+; CHECK-NEXT: ret <4 x i32> %r
; CHECK-NEXT: }
define <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
+entry:
%r = select <4 x i1> %pc, <4 x float> %pt, <4 x float> %pe
ret <4 x float> %r
}
-; CHECK-NEXT: define <4 x float> @SelV4xfloatVcond(<4 x i1> %__0, <4 x float> %__1, <4 x float> %__2) {
-; CHECK-NEXT: __0:
-; CHECK-NEXT: %__3 = select <4 x i1> %__0, <4 x float> %__1, <4 x float> %__2
-; CHECK-NEXT: ret <4 x float> %__3
+; CHECK-NEXT: define <4 x float> @SelV4xfloatVcond(<4 x i1> %pc, <4 x float> %pt, <4 x float> %pe) {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: %r = select <4 x i1> %pc, <4 x float> %pt, <4 x float> %pe
+; CHECK-NEXT: ret <4 x float> %r
; CHECK-NEXT: }