Nodes[I]->resetIndex(I);
}
+template <>
+Variable *Cfg::makeVariable<Variable>(Type Ty) {
+ SizeT Index = Variables.size();
+ Variable *Var = Target->shouldSplitToVariable64On32(Ty)
+ ? Variable64On32::create(this, Ty, Index)
+ : Variable::create(this, Ty, Index);
+ Variables.push_back(Var);
+ return Var;
+}
+
void Cfg::addArg(Variable *Arg) {
Arg->setIsArg();
Args.push_back(Arg);
}
TimerMarker T(TimerStack::TT_translate, this);
+ // Create the Hi and Lo variables where a split was needed
+ for (Variable *Var : Variables)
+ if (auto Var64On32 = llvm::dyn_cast<Variable64On32>(Var))
+ Var64On32->initHiLo(this);
+
dump("Initial CFG");
if (getContext()->getFlags().getEnableBlockProfile()) {
static void TlsInit() { ICE_TLS_INIT_FIELD(CurrentCfg); }
};
+template <> Variable *Cfg::makeVariable<Variable>(Type Ty);
+
} // end of namespace Ice
#endif // SUBZERO_SRC_ICECFG_H
Variable *NewSrc = Func->makeVariable(Dest->getType());
if (BuildDefs::dump())
NewSrc->setName(Func, Dest->getName(Func) + "_phi");
+ if (auto *NewSrc64On32 = llvm::dyn_cast<Variable64On32>(NewSrc))
+ NewSrc64On32->initHiLo(Func);
this->Dest = NewSrc;
return InstAssign::create(Func, Dest, NewSrc);
}
kConst_Target, // leave space for target-specific constant kinds
kConst_Max = kConst_Target + MaxTargetKinds,
kVariable,
+ kVariable64On32,
kVariable_Target, // leave space for target-specific variable kinds
kVariable_Max = kVariable_Target + MaxTargetKinds,
// Target-specific operand classes use kTarget as the starting point for
Variable(const Variable &) = delete;
Variable &operator=(const Variable &) = delete;
- enum RegRequirement {
+ enum RegRequirement : uint8_t {
RR_MayHaveRegister,
RR_MustHaveRegister,
RR_MustNotHaveRegister,
SizeT getIndex() const { return Number; }
IceString getName(const Cfg *Func) const;
- void setName(Cfg *Func, const IceString &NewName) {
+ virtual void setName(Cfg *Func, const IceString &NewName) {
// Make sure that the name can only be set once.
assert(NameIndex == Cfg::IdentifierIndexInvalid);
if (!NewName.empty())
}
bool getIsArg() const { return IsArgument; }
- void setIsArg(bool Val = true) { IsArgument = Val; }
+ virtual void setIsArg(bool Val = true) { IsArgument = Val; }
bool getIsImplicitArg() const { return IsImplicitArgument; }
void setIsImplicitArg(bool Val = true) { IsImplicitArgument = Val; }
return Live.overlapsInst(Other->Live.getStart(), UseTrimmed);
}
- Variable *getLo() const { return LoVar; }
- Variable *getHi() const { return HiVar; }
- void setLoHi(Variable *Lo, Variable *Hi) {
- assert(LoVar == nullptr);
- assert(HiVar == nullptr);
- LoVar = Lo;
- HiVar = Hi;
- }
/// Creates a temporary copy of the variable with a different type. Used
/// primarily for syntactic correctness of textual assembly emission. Note
/// that only basic information is copied, in particular not IsArgument,
/// and validating live ranges. This is usually reserved for the stack
/// pointer.
bool IgnoreLiveness = false;
- /// StackOffset is the canonical location on stack (only if RegNum==NoRegister
- /// || IsArgument).
- int32_t StackOffset = 0;
+ RegRequirement RegRequirement = RR_MayHaveRegister;
/// RegNum is the allocated register, or NoRegister if it isn't
/// register-allocated.
int32_t RegNum = NoRegister;
/// RegNumTmp is the tentative assignment during register allocation.
int32_t RegNumTmp = NoRegister;
- RegRequirement RegRequirement = RR_MayHaveRegister;
+ /// StackOffset is the canonical location on stack (only if
+ /// RegNum==NoRegister || IsArgument).
+ int32_t StackOffset = 0;
LiveRange Live;
- // LoVar and HiVar are needed for lowering from 64 to 32 bits. When lowering
- // from I64 to I32 on a 32-bit architecture, we split the variable into two
- // machine-size pieces. LoVar is the low-order machine-size portion, and
- // HiVar is the remaining high-order portion.
- // TODO: It's wasteful to penalize all variables on all targets this way; use
- // a sparser representation. It's also wasteful for a 64-bit target.
- Variable *LoVar = nullptr;
- Variable *HiVar = nullptr;
/// VarsReal (and Operand::Vars) are set up such that Vars[0] == this.
Variable *VarsReal[1];
};
+// Variable64On32 represents a 64-bit variable on a 32-bit architecture. In
+// this situation the variable must be split into a low and a high word.
+class Variable64On32 : public Variable {
+ Variable64On32() = delete;
+ Variable64On32(const Variable64On32 &) = delete;
+ Variable64On32 &operator=(const Variable64On32 &) = delete;
+
+public:
+ static Variable64On32 *create(Cfg *Func, Type Ty, SizeT Index) {
+ return new (Func->allocate<Variable64On32>()) Variable64On32(
+ kVariable64On32, Ty, Index);
+ }
+
+ void setName(Cfg *Func, const IceString &NewName) override {
+ Variable::setName(Func, NewName);
+ if (LoVar && HiVar) {
+ LoVar->setName(Func, getName(Func) + "__lo");
+ HiVar->setName(Func, getName(Func) + "__hi");
+ }
+ }
+
+ void setIsArg(bool Val = true) override {
+ Variable::setIsArg(Val);
+ if (LoVar && HiVar) {
+ LoVar->setIsArg(Val);
+ HiVar->setIsArg(Val);
+ }
+ }
+
+ Variable *getLo() const {
+ assert(LoVar != nullptr);
+ return LoVar;
+ }
+ Variable *getHi() const {
+ assert(HiVar != nullptr);
+ return HiVar;
+ }
+
+ void initHiLo(Cfg *Func) {
+ assert(LoVar == nullptr);
+ assert(HiVar == nullptr);
+ LoVar = Func->makeVariable(IceType_i32);
+ HiVar = Func->makeVariable(IceType_i32);
+ LoVar->setIsArg(getIsArg());
+ HiVar->setIsArg(getIsArg());
+ LoVar->setName(Func, getName(Func) + "__lo");
+ HiVar->setName(Func, getName(Func) + "__hi");
+ }
+
+ static bool classof(const Operand *Operand) {
+ OperandKind Kind = Operand->getKind();
+ return Kind == kVariable64On32;
+ }
+
+protected:
+ Variable64On32(OperandKind K, Type Ty, SizeT Index)
+ : Variable(K, Ty, Index) {
+ assert(typeWidthInBytes(Ty) == 8);
+ }
+
+ Variable *LoVar = nullptr;
+ Variable *HiVar = nullptr;
+};
+
enum MetadataKind {
VMK_Uses, /// Track only uses, not defs
VMK_SingleDefs, /// Track uses+defs, but only record single def
virtual SizeT getFrameOrStackReg() const = 0;
virtual size_t typeWidthInBytesOnStack(Type Ty) const = 0;
+ /// Return whether a 64-bit Variable should be split into a Variable64On32.
+ virtual bool shouldSplitToVariable64On32(Type Ty) const = 0;
+
bool hasComputedFrame() const { return HasComputedFrame; }
/// Returns true if this function calls a function that has the "returns
/// twice" attribute.
if (!CC.I64InRegs(&RegPair))
continue;
Variable *RegisterArg = Func->makeVariable(Ty);
- Variable *RegisterLo = Func->makeVariable(IceType_i32);
- Variable *RegisterHi = Func->makeVariable(IceType_i32);
- if (BuildDefs::dump()) {
- RegisterArg->setName(Func, "home_reg:" + Arg->getName(Func));
- RegisterLo->setName(Func, "home_reg_lo:" + Arg->getName(Func));
- RegisterHi->setName(Func, "home_reg_hi:" + Arg->getName(Func));
- }
- RegisterLo->setRegNum(RegPair.first);
- RegisterLo->setIsArg();
- RegisterHi->setRegNum(RegPair.second);
- RegisterHi->setIsArg();
- RegisterArg->setLoHi(RegisterLo, RegisterHi);
- RegisterArg->setIsArg();
+ auto *RegisterArg64On32 = llvm::cast<Variable64On32>(RegisterArg);
+ if (BuildDefs::dump())
+ RegisterArg64On32->setName(Func, "home_reg:" + Arg->getName(Func));
+ RegisterArg64On32->initHiLo(Func);
+ RegisterArg64On32->setIsArg();
+ RegisterArg64On32->getLo()->setRegNum(RegPair.first);
+ RegisterArg64On32->getHi()->setRegNum(RegPair.second);
Arg->setIsArg(false);
- Args[I] = RegisterArg;
+ Args[I] = RegisterArg64On32;
Context.insert(InstAssign::create(Func, Arg, RegisterArg));
continue;
} else {
void TargetARM32::finishArgumentLowering(Variable *Arg, Variable *FramePtr,
size_t BasicFrameOffset,
size_t &InArgsSizeBytes) {
- Variable *Lo = Arg->getLo();
- Variable *Hi = Arg->getHi();
- Type Ty = Arg->getType();
- if (Lo && Hi && Ty == IceType_i64) {
- assert(Lo->getType() != IceType_i64); // don't want infinite recursion
- assert(Hi->getType() != IceType_i64); // don't want infinite recursion
+ if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
+ Variable *Lo = Arg64On32->getLo();
+ Variable *Hi = Arg64On32->getHi();
finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes);
finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes);
return;
}
+ Type Ty = Arg->getType();
InArgsSizeBytes = applyStackAlignmentTy(InArgsSizeBytes, Ty);
Arg->setStackOffset(BasicFrameOffset + InArgsSizeBytes);
InArgsSizeBytes += typeWidthInBytesOnStack(Ty);
}
}
-void TargetARM32::split64(Variable *Var) {
- assert(Var->getType() == IceType_i64);
- Variable *Lo = Var->getLo();
- Variable *Hi = Var->getHi();
- if (Lo) {
- assert(Hi);
- return;
- }
- assert(Hi == nullptr);
- Lo = Func->makeVariable(IceType_i32);
- Hi = Func->makeVariable(IceType_i32);
- if (BuildDefs::dump()) {
- Lo->setName(Func, Var->getName(Func) + "__lo");
- Hi->setName(Func, Var->getName(Func) + "__hi");
- }
- Var->setLoHi(Lo, Hi);
- if (Var->getIsArg()) {
- Lo->setIsArg();
- Hi->setIsArg();
- }
-}
-
Operand *TargetARM32::loOperand(Operand *Operand) {
assert(Operand->getType() == IceType_i64);
if (Operand->getType() != IceType_i64)
return Operand;
- if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
- split64(Var);
- return Var->getLo();
- }
- if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
+ if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
+ return Var64On32->getLo();
+ if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand))
return Ctx->getConstantInt32(static_cast<uint32_t>(Const->getValue()));
- }
if (auto *Mem = llvm::dyn_cast<OperandARM32Mem>(Operand)) {
// Conservatively disallow memory operands with side-effects (pre/post
// increment) in case of duplication.
assert(Operand->getType() == IceType_i64);
if (Operand->getType() != IceType_i64)
return Operand;
- if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
- split64(Var);
- return Var->getHi();
- }
+ if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
+ return Var64On32->getHi();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
return Ctx->getConstantInt32(
static_cast<uint32_t>(Const->getValue() >> 32));
// Assign the result of the call to Dest.
if (ReturnReg) {
if (ReturnRegHi) {
- assert(Dest->getType() == IceType_i64);
- split64(Dest);
- Variable *DestLo = Dest->getLo();
- Variable *DestHi = Dest->getHi();
+ auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
+ Variable *DestLo = Dest64On32->getLo();
+ Variable *DestHi = Dest64On32->getHi();
_mov(DestLo, ReturnReg);
_mov(DestHi, ReturnRegHi);
} else {
if (isVectorType(Dest->getType())) {
UnimplementedError(Func->getContext()->getFlags());
break;
- } else if (Dest->getType() == IceType_i64) {
- split64(Dest);
- Context.insert(InstFakeDef::create(Func, Dest->getLo()));
- Context.insert(InstFakeDef::create(Func, Dest->getHi()));
+ }
+ if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
+ Context.insert(InstFakeDef::create(Func, Dest64On32->getLo()));
+ Context.insert(InstFakeDef::create(Func, Dest64On32->getHi()));
UnimplementedError(Func->getContext()->getFlags());
break;
}
Variable *T0 = makeReg(IceType_i32);
Variable *T1 = makeReg(IceType_i32);
Variable *Src0R = legalizeToReg(Src0);
- split64(Dest);
_vmov(InstARM32Vmov::RegisterPair(T0, T1), Src0R);
- lowerAssign(InstAssign::create(Func, Dest->getLo(), T0));
- lowerAssign(InstAssign::create(Func, Dest->getHi(), T1));
+ auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
+ lowerAssign(InstAssign::create(Func, Dest64On32->getLo(), T0));
+ lowerAssign(InstAssign::create(Func, Dest64On32->getHi(), T1));
break;
}
case IceType_f64: {
return (typeWidthInBytes(Ty) + 3) & ~3;
}
+ bool shouldSplitToVariable64On32(Type Ty) const override {
+ return Ty == IceType_i64;
+ }
+
// TODO(ascull): what size is best for ARM?
SizeT getMinJumpTableSize() const override { return 3; }
void emitJumpTable(const Cfg *Func,
void addProlog(CfgNode *Node) override;
void addEpilog(CfgNode *Node) override;
- /// Ensure that a 64-bit Variable has been split into 2 32-bit Variables,
- /// creating them if necessary. This is needed for all I64 operations.
- void split64(Variable *Var);
Operand *loOperand(Operand *Operand);
Operand *hiOperand(Operand *Operand);
void finishArgumentLowering(Variable *Arg, Variable *FramePtr,
return (typeWidthInBytes(Ty) + 3) & ~3;
}
+ bool shouldSplitToVariable64On32(Type Ty) const override {
+ return Ty == IceType_i64;
+ }
+
// TODO(ascull): what is the best size of MIPS?
SizeT getMinJumpTableSize() const override { return 3; }
void emitJumpTable(const Cfg *Func,
// Assign the result of the call to Dest.
if (ReturnReg) {
if (ReturnRegHi) {
- assert(Dest->getType() == IceType_i64);
- split64(Dest);
- Variable *DestLo = Dest->getLo();
- Variable *DestHi = Dest->getHi();
+ auto *Dest64On32 = llvm::cast<Variable64On32>(Dest);
+ Variable *DestLo = Dest64On32->getLo();
+ Variable *DestHi = Dest64On32->getHi();
_mov(DestLo, ReturnReg);
_mov(DestHi, ReturnRegHi);
} else {
return Utils::applyAlignment(typeWidthInBytes(Ty), WordSizeInBytes);
}
+ bool shouldSplitToVariable64On32(Type Ty) const override {
+ return Traits::Is64Bit ? false : Ty == IceType_i64;
+ }
+
SizeT getMinJumpTableSize() const override { return 4; }
void emitVariable(const Variable *Var) const override;
void emit(const ConstantDouble *C) const final;
void initNodeForLowering(CfgNode *Node) override;
- /// x86-32: Ensure that a 64-bit Variable has been split into 2 32-bit
- /// Variables, creating them if necessary. This is needed for all I64
- /// operations, and it is needed for pushing F64 arguments for function calls
- /// using the 32-bit push instruction (though the latter could be done by
- /// directly writing to the stack).
- ///
- /// x86-64: Complains loudly if invoked because the cpu can handle 64-bit
- /// types natively.
- template <typename T = Traits>
- typename std::enable_if<!T::Is64Bit, void>::type split64(Variable *Var);
- template <typename T = Traits>
- typename std::enable_if<T::Is64Bit, void>::type split64(Variable *) {
- llvm::report_fatal_error(
- "Hey, yo! This is x86-64. Watcha doin'? (split64)");
- }
template <typename T = Traits>
typename std::enable_if<!T::Is64Bit, Operand>::type *
Variable *FramePtr,
size_t BasicFrameOffset,
size_t &InArgsSizeBytes) {
- Variable *Lo = Arg->getLo();
- Variable *Hi = Arg->getHi();
- Type Ty = Arg->getType();
- if (!Traits::Is64Bit && Lo && Hi && Ty == IceType_i64) {
- assert(Lo->getType() != IceType_i64); // don't want infinite recursion
- assert(Hi->getType() != IceType_i64); // don't want infinite recursion
- finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes);
- finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes);
- return;
+ if (!Traits::Is64Bit) {
+ if (auto *Arg64On32 = llvm::dyn_cast<Variable64On32>(Arg)) {
+ Variable *Lo = Arg64On32->getLo();
+ Variable *Hi = Arg64On32->getHi();
+ finishArgumentLowering(Lo, FramePtr, BasicFrameOffset, InArgsSizeBytes);
+ finishArgumentLowering(Hi, FramePtr, BasicFrameOffset, InArgsSizeBytes);
+ return;
+ }
}
+ Type Ty = Arg->getType();
if (isVectorType(Ty)) {
InArgsSizeBytes = Traits::applyStackAlignment(InArgsSizeBytes);
}
template <class Machine>
template <typename T>
-typename std::enable_if<!T::Is64Bit, void>::type
-TargetX86Base<Machine>::split64(Variable *Var) {
- switch (Var->getType()) {
- default:
- return;
- case IceType_i64:
- // TODO: Only consider F64 if we need to push each half when passing as an
- // argument to a function call. Note that each half is still typed as I32.
- case IceType_f64:
- break;
- }
- Variable *Lo = Var->getLo();
- Variable *Hi = Var->getHi();
- if (Lo) {
- assert(Hi);
- return;
- }
- assert(Hi == nullptr);
- Lo = Func->makeVariable(IceType_i32);
- Hi = Func->makeVariable(IceType_i32);
- if (BuildDefs::dump()) {
- Lo->setName(Func, Var->getName(Func) + "__lo");
- Hi->setName(Func, Var->getName(Func) + "__hi");
- }
- Var->setLoHi(Lo, Hi);
- if (Var->getIsArg()) {
- Lo->setIsArg();
- Hi->setIsArg();
- }
-}
-
-template <class Machine>
-template <typename T>
typename std::enable_if<!T::Is64Bit, Operand>::type *
TargetX86Base<Machine>::loOperand(Operand *Operand) {
assert(Operand->getType() == IceType_i64 ||
Operand->getType() == IceType_f64);
if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
return Operand;
- if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
- split64(Var);
- return Var->getLo();
- }
+ if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
+ return Var64On32->getLo();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue())));
Operand->getType() == IceType_f64);
if (Operand->getType() != IceType_i64 && Operand->getType() != IceType_f64)
return Operand;
- if (auto *Var = llvm::dyn_cast<Variable>(Operand)) {
- split64(Var);
- return Var->getHi();
- }
+ if (auto *Var64On32 = llvm::dyn_cast<Variable64On32>(Operand))
+ return Var64On32->getHi();
if (auto *Const = llvm::dyn_cast<ConstantInteger64>(Operand)) {
auto *ConstInt = llvm::dyn_cast<ConstantInteger32>(
Ctx->getConstantInt32(static_cast<int32_t>(Const->getValue() >> 32)));
_cvt(T, Src0RM, Traits::Insts::Cvt::Tps2dq);
_movp(Dest, T);
} else if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
- // Use a helper for converting floating-point values to 64-bit integers.
- // SSE2 appears to have no way to convert from xmm registers to something
- // like the edx:eax register pair, and gcc and clang both want to use x87
- // instructions complete with temporary manipulation of the status word.
- // This helper is not needed for x86-64.
- split64(Dest);
const SizeT MaxSrcs = 1;
Type SrcType = Inst->getSrc(0)->getType();
InstCall *Call =
} else if (Dest->getType() == IceType_i64 ||
(!Traits::Is64Bit && Dest->getType() == IceType_i32)) {
// Use a helper for both x86-32 and x86-64.
- if (!Traits::Is64Bit)
- split64(Dest);
const SizeT MaxSrcs = 1;
Type DestType = Dest->getType();
Type SrcType = Inst->getSrc(0)->getType();
return;
}
Variable *Dest = Instr->getDest();
- if (!Traits::Is64Bit && Dest->getType() == IceType_i64) {
- // Follow what GCC does and use a movq instead of what lowerLoad()
- // normally does (split the load into two). Thus, this skips
- // load/arithmetic op folding. Load/arithmetic folding can't happen
- // anyway, since this is x86-32 and integer arithmetic only happens on
- // 32-bit quantities.
- Variable *T = makeReg(IceType_f64);
- typename Traits::X86OperandMem *Addr =
- formMemoryOperand(Instr->getArg(0), IceType_f64);
- _movq(T, Addr);
- // Then cast the bits back out of the XMM register to the i64 Dest.
- InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
- lowerCast(Cast);
- // Make sure that the atomic load isn't elided when unused.
- Context.insert(InstFakeUse::create(Func, Dest->getLo()));
- Context.insert(InstFakeUse::create(Func, Dest->getHi()));
- return;
+ if (!Traits::Is64Bit) {
+ if (auto *Dest64On32 = llvm::dyn_cast<Variable64On32>(Dest)) {
+ // Follow what GCC does and use a movq instead of what lowerLoad()
+ // normally does (split the load into two). Thus, this skips
+ // load/arithmetic op folding. Load/arithmetic folding can't happen
+ // anyway, since this is x86-32 and integer arithmetic only happens on
+ // 32-bit quantities.
+ Variable *T = makeReg(IceType_f64);
+ typename Traits::X86OperandMem *Addr =
+ formMemoryOperand(Instr->getArg(0), IceType_f64);
+ _movq(T, Addr);
+ // Then cast the bits back out of the XMM register to the i64 Dest.
+ InstCast *Cast = InstCast::create(Func, InstCast::Bitcast, Dest, T);
+ lowerCast(Cast);
+ // Make sure that the atomic load isn't elided when unused.
+ Context.insert(InstFakeUse::create(Func, Dest64On32->getLo()));
+ Context.insert(InstFakeUse::create(Func, Dest64On32->getHi()));
+ return;
+ }
}
InstLoad *Load = InstLoad::create(Func, Dest, Instr->getArg(0));
lowerLoad(Load);