From: Tim Northover Date: Fri, 29 Jul 2016 22:32:36 +0000 (+0000) Subject: GlobalISel: support translation of intrinsic calls. X-Git-Tag: android-x86-7.1-r4~29429 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=0f15518daebd4f884dff9cf5f08b8296691b3d5c;p=android-x86%2Fexternal-llvm.git GlobalISel: support translation of intrinsic calls. These come in two variants for now: G_INTRINSIC and G_INTRINSIC_W_SIDE_EFFECTS. We may decide to split the latter up with finer-grained restrictions later, if necessary. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@277224 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/CodeGen/GlobalISel/IRTranslator.h b/include/llvm/CodeGen/GlobalISel/IRTranslator.h index 3606cdd807c..f267d6c98ee 100644 --- a/include/llvm/CodeGen/GlobalISel/IRTranslator.h +++ b/include/llvm/CodeGen/GlobalISel/IRTranslator.h @@ -103,6 +103,10 @@ private: /// Translate an LLVM store instruction into generic IR. bool translateStore(const StoreInst &SI); + /// Translate call instruction. + /// \pre \p Inst is a branch instruction. + bool translateCall(const CallInst &Inst); + /// Translate one of LLVM's cast instructions into MachineInstrs, with the /// given generic Opcode. bool translateCast(unsigned Opcode, const CastInst &CI); @@ -119,6 +123,7 @@ private: /// \pre \p Inst is a branch instruction. bool translateBr(const BranchInst &Inst); + /// Translate return (ret) instruction. /// The target needs to implement CallLowering::lowerReturn for /// this to succeed. diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h index 538e2a78d1d..e83ade6e886 100644 --- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h +++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h @@ -204,6 +204,19 @@ public: /// \return a MachineInstrBuilder for the newly created instruction. MachineInstrBuilder buildSequence(LLT Ty, unsigned Res, ArrayRef Ops); + + /// Build and insert either a G_INTRINSIC (if \p HasSideEffects is false) or + /// G_INTRINSIC_W_SIDE_EFFECTS instruction. Its first operand will be the + /// result register definition unless \p Reg is NoReg (== 0). The second + /// operand will be the intrinsic's ID. + /// + /// Callers are expected to add the required definitions and uses afterwards. + /// + /// \pre setBasicBlock or setMI must have been called. + /// + /// \return a MachineInstrBuilder for the newly created instruction. + MachineInstrBuilder buildIntrinsic(ArrayRef Tys, Intrinsic::ID ID, + unsigned Res, bool HasSideEffects); }; } // End namespace llvm. diff --git a/include/llvm/Target/GenericOpcodes.td b/include/llvm/Target/GenericOpcodes.td index e35f33f9e2d..7dd0c070fa2 100644 --- a/include/llvm/Target/GenericOpcodes.td +++ b/include/llvm/Target/GenericOpcodes.td @@ -125,6 +125,22 @@ def G_SEQUENCE : Instruction { let hasSideEffects = 0; } +// Intrinsic without side effects. +def G_INTRINSIC : Instruction { + let OutOperandList = (outs); + let InOperandList = (ins unknown:$intrin, variable_ops); + let hasSideEffects = 0; +} + +// Intrinsic with side effects. +def G_INTRINSIC_W_SIDE_EFFECTS : Instruction { + let OutOperandList = (outs); + let InOperandList = (ins unknown:$intrin, variable_ops); + let hasSideEffects = 1; + let mayLoad = 1; + let mayStore = 1; +} + //------------------------------------------------------------------------------ // Branches. //------------------------------------------------------------------------------ diff --git a/include/llvm/Target/TargetIntrinsicInfo.h b/include/llvm/Target/TargetIntrinsicInfo.h index 41325a1c05f..6a92bdee747 100644 --- a/include/llvm/Target/TargetIntrinsicInfo.h +++ b/include/llvm/Target/TargetIntrinsicInfo.h @@ -53,7 +53,7 @@ public: } /// Return the target intrinsic ID of a function, or 0. - virtual unsigned getIntrinsicID(Function *F) const; + virtual unsigned getIntrinsicID(const Function *F) const; /// Returns true if the intrinsic can be overloaded. virtual bool isOverloaded(unsigned IID) const = 0; diff --git a/include/llvm/Target/TargetOpcodes.def b/include/llvm/Target/TargetOpcodes.def index 7df74ad46af..7c8a1df7299 100644 --- a/include/llvm/Target/TargetOpcodes.def +++ b/include/llvm/Target/TargetOpcodes.def @@ -202,6 +202,12 @@ HANDLE_TARGET_OPCODE(G_STORE) /// Generic conditional branch instruction. HANDLE_TARGET_OPCODE(G_BRCOND) +/// Generic intrinsic use (without side effects). +HANDLE_TARGET_OPCODE(G_INTRINSIC) + +/// Generic intrinsic use (with side effects). +HANDLE_TARGET_OPCODE(G_INTRINSIC_W_SIDE_EFFECTS) + /// Generic BRANCH instruction. This is an unconditional branch. HANDLE_TARGET_OPCODE(G_BR) diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp index b0751c0bb5e..80ec1b529d3 100644 --- a/lib/CodeGen/GlobalISel/IRTranslator.cpp +++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp @@ -19,8 +19,10 @@ #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/IR/Constant.h" #include "llvm/IR/Function.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Type.h" #include "llvm/IR/Value.h" +#include "llvm/Target/TargetIntrinsicInfo.h" #include "llvm/Target/TargetLowering.h" #define DEBUG_TYPE "irtranslator" @@ -175,6 +177,34 @@ bool IRTranslator::translateCast(unsigned Opcode, const CastInst &CI) { return true; } +bool IRTranslator::translateCall(const CallInst &CI) { + auto TII = MIRBuilder.getMF().getTarget().getIntrinsicInfo(); + const Function &F = *CI.getCalledFunction(); + Intrinsic::ID ID = F.getIntrinsicID(); + if (TII && ID == Intrinsic::not_intrinsic) + ID = static_cast(TII->getIntrinsicID(&F)); + + assert(ID != Intrinsic::not_intrinsic && "FIXME: support real calls"); + + // Need types (starting with return) & args. + SmallVector Tys; + Tys.emplace_back(*CI.getType()); + for (auto &Arg : CI.arg_operands()) + Tys.emplace_back(*Arg->getType()); + + unsigned Res = CI.getType()->isVoidTy() ? 0 : getOrCreateVReg(CI); + MachineInstrBuilder MIB = + MIRBuilder.buildIntrinsic(Tys, ID, Res, !CI.doesNotAccessMemory()); + + for (auto &Arg : CI.arg_operands()) { + if (ConstantInt *CI = dyn_cast(Arg)) + MIB.addImm(CI->getSExtValue()); + else + MIB.addUse(getOrCreateVReg(*Arg)); + } + return true; +} + bool IRTranslator::translateStaticAlloca(const AllocaInst &AI) { assert(AI.isStaticAlloca() && "only handle static allocas now"); MachineFunction &MF = MIRBuilder.getMF(); @@ -218,6 +248,10 @@ bool IRTranslator::translate(const Instruction &Inst) { case Instruction::Ret: return translateReturn(cast(Inst)); + // Calls + case Instruction::Call: + return translateCall(cast(Inst)); + // Casts case Instruction::BitCast: return translateBitCast(cast(Inst)); diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp index 426de444d88..5e737d5e2bb 100644 --- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp +++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp @@ -143,3 +143,17 @@ MachineInstrBuilder MachineIRBuilder::buildSequence(LLT Ty, unsigned Res, MIB.addUse(Op); return MIB; } + +MachineInstrBuilder MachineIRBuilder::buildIntrinsic(ArrayRef Tys, + Intrinsic::ID ID, + unsigned Res, + bool HasSideEffects) { + auto MIB = + buildInstr(HasSideEffects ? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS + : TargetOpcode::G_INTRINSIC, + Tys); + if (Res) + MIB.addDef(Res); + MIB.addIntrinsicID(ID); + return MIB; +} diff --git a/lib/Target/TargetIntrinsicInfo.cpp b/lib/Target/TargetIntrinsicInfo.cpp index 64bd56f6e7d..e8b71924e0d 100644 --- a/lib/Target/TargetIntrinsicInfo.cpp +++ b/lib/Target/TargetIntrinsicInfo.cpp @@ -22,7 +22,7 @@ TargetIntrinsicInfo::TargetIntrinsicInfo() { TargetIntrinsicInfo::~TargetIntrinsicInfo() { } -unsigned TargetIntrinsicInfo::getIntrinsicID(Function *F) const { +unsigned TargetIntrinsicInfo::getIntrinsicID(const Function *F) const { const ValueName *ValName = F->getValueName(); if (!ValName) return 0; diff --git a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll index 8663d312e0c..28005fad460 100644 --- a/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll +++ b/test/CodeGen/AArch64/GlobalISel/arm64-irtranslator.ll @@ -252,3 +252,22 @@ define void @store(i64* %addr, i64 addrspace(42)* %addr42, i64 %val1, i64 %val2) %sum = add i64 %val1, %val2 ret void } + +; CHECK-LABEL: name: intrinsics +; CHECK: [[CUR:%[0-9]+]](32) = COPY %w0 +; CHECK: [[BITS:%[0-9]+]](32) = COPY %w1 +; CHECK: [[PTR:%[0-9]+]](64) = G_INTRINSIC { p0, s32 } intrinsic(@llvm.returnaddress), 0 +; CHECK: [[PTR_VEC:%[0-9]+]](64) = G_FRAME_INDEX p0 %stack.0.ptr.vec +; CHECK: [[VEC:%[0-9]+]](64) = G_LOAD { <8 x s8>, p0 } [[PTR_VEC]] +; CHECK: G_INTRINSIC_W_SIDE_EFFECTS { unsized, <8 x s8>, <8 x s8>, p0 } intrinsic(@llvm.aarch64.neon.st2), [[VEC]], [[VEC]], [[PTR]] +; CHECK: RET_ReallyLR +declare i8* @llvm.returnaddress(i32) +declare void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8>, <8 x i8>, i8*) +declare { <8 x i8>, <8 x i8> } @llvm.aarch64.neon.ld2.v8i8.p0v8i8(<8 x i8>*) +define void @intrinsics(i32 %cur, i32 %bits) { + %ptr = call i8* @llvm.returnaddress(i32 0) + %ptr.vec = alloca <8 x i8> + %vec = load <8 x i8>, <8 x i8>* %ptr.vec + call void @llvm.aarch64.neon.st2.v8i8.p0i8(<8 x i8> %vec, <8 x i8> %vec, i8* %ptr) + ret void +}