//
//===----------------------------------------------------------------------===//
+// TODO: Most floating-point instructions (except for simple moves and the
+// like) can raise exceptions -- should they have hasSideEffects=1 ?
+
//===----------------------------------------------------------------------===//
// Select instructions
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Load zero.
-let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1 in {
+let isAsCheapAsAMove = 1, isMoveImm = 1 in {
def LZER : InherentRRE<"lzer", 0xB374, FP32, fpimm0>;
def LZDR : InherentRRE<"lzdr", 0xB375, FP64, fpimm0>;
def LZXR : InherentRRE<"lzxr", 0xB376, FP128, fpimm0>;
}
// Moves between two floating-point registers.
-let hasSideEffects = 0 in {
- def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>;
- def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>;
- def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>;
+def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>;
+def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>;
+def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>;
- // For z13 we prefer LDR over LER to avoid partial register dependencies.
- let isCodeGenOnly = 1 in
- def LDR32 : UnaryRR<"ldr", 0x28, null_frag, FP32, FP32>;
-}
+// For z13 we prefer LDR over LER to avoid partial register dependencies.
+let isCodeGenOnly = 1 in
+ def LDR32 : UnaryRR<"ldr", 0x28, null_frag, FP32, FP32>;
// Moves between two floating-point registers that also set the condition
// codes.
// Load instructions
//===----------------------------------------------------------------------===//
-let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
+let canFoldAsLoad = 1, SimpleBDXLoad = 1, mayLoad = 1 in {
defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>;
defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>;
// Store instructions
//===----------------------------------------------------------------------===//
-let SimpleBDXStore = 1 in {
+let SimpleBDXStore = 1, mayStore = 1 in {
defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32, 4>;
defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64, 8>;
//===----------------------------------------------------------------------===//
let hasSideEffects = 1 in {
- def EFPC : InherentRRE<"efpc", 0xB38C, GR32, int_s390_efpc>;
- def STFPC : StoreInherentS<"stfpc", 0xB29C, storei<int_s390_efpc>, 4>;
+ let mayLoad = 1, mayStore = 1 in {
+ // TODO: EFPC and SFPC do not touch memory at all
+ def EFPC : InherentRRE<"efpc", 0xB38C, GR32, int_s390_efpc>;
+ def STFPC : StoreInherentS<"stfpc", 0xB29C, storei<int_s390_efpc>, 4>;
- def SFPC : SideEffectUnaryRRE<"sfpc", 0xB384, GR32, int_s390_sfpc>;
- def LFPC : SideEffectUnaryS<"lfpc", 0xB29D, loadu<int_s390_sfpc>, 4>;
+ def SFPC : SideEffectUnaryRRE<"sfpc", 0xB384, GR32, int_s390_sfpc>;
+ def LFPC : SideEffectUnaryS<"lfpc", 0xB29D, loadu<int_s390_sfpc>, 4>;
+ }
def SFASR : SideEffectUnaryRRE<"sfasr", 0xB385, GR32, null_frag>;
def LFAS : SideEffectUnaryS<"lfas", 0xB2BD, null_frag, 4>;
// Stack allocation
//===----------------------------------------------------------------------===//
-let hasNoSchedulingInfo = 1 in {
+// The callseq_start node requires the hasSideEffects flag, even though these
+// instructions are noops on SystemZ.
+let hasNoSchedulingInfo = 1, hasSideEffects = 1 in {
def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
[(callseq_start timm:$amt1, timm:$amt2)]>;
def ADJCALLSTACKUP : Pseudo<(outs), (ins i64imm:$amt1, i64imm:$amt2),
[(callseq_end timm:$amt1, timm:$amt2)]>;
}
-let hasSideEffects = 0 in {
- // Takes as input the value of the stack pointer after a dynamic allocation
- // has been made. Sets the output to the address of the dynamically-
- // allocated area itself, skipping the outgoing arguments.
- //
- // This expands to an LA or LAY instruction. We restrict the offset
- // to the range of LA and keep the LAY range in reserve for when
- // the size of the outgoing arguments is added.
- def ADJDYNALLOC : Pseudo<(outs GR64:$dst), (ins dynalloc12only:$src),
- [(set GR64:$dst, dynalloc12only:$src)]>;
-}
+// Takes as input the value of the stack pointer after a dynamic allocation
+// has been made. Sets the output to the address of the dynamically-
+// allocated area itself, skipping the outgoing arguments.
+//
+// This expands to an LA or LAY instruction. We restrict the offset
+// to the range of LA and keep the LAY range in reserve for when
+// the size of the outgoing arguments is added.
+def ADJDYNALLOC : Pseudo<(outs GR64:$dst), (ins dynalloc12only:$src),
+ [(set GR64:$dst, dynalloc12only:$src)]>;
+
//===----------------------------------------------------------------------===//
// Branch instructions
//===----------------------------------------------------------------------===//
// Unconditional trap.
-let hasCtrlDep = 1 in
+let hasCtrlDep = 1, hasSideEffects = 1 in
def Trap : Alias<4, (outs), (ins), [(trap)]>;
// Conditional trap.
-let hasCtrlDep = 1, Uses = [CC] in
+let hasCtrlDep = 1, Uses = [CC], hasSideEffects = 1 in
def CondTrap : Alias<4, (outs), (ins cond4:$valid, cond4:$R1), []>;
// Fused compare-and-trap instructions.
-let hasCtrlDep = 1 in {
+let hasCtrlDep = 1, hasSideEffects = 1 in {
// These patterns work the same way as for compare-and-branch.
defm CRT : CmpBranchRRFcPair<"crt", 0xB972, GR32>;
defm CGRT : CmpBranchRRFcPair<"cgrt", 0xB960, GR64>;
//===----------------------------------------------------------------------===//
// Register moves.
-let hasSideEffects = 0 in {
- // Expands to LR, RISBHG or RISBLG, depending on the choice of registers.
- def LRMux : UnaryRRPseudo<"lr", null_frag, GRX32, GRX32>,
- Requires<[FeatureHighWord]>;
- def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
- def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
-}
+// Expands to LR, RISBHG or RISBLG, depending on the choice of registers.
+def LRMux : UnaryRRPseudo<"lr", null_frag, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>;
+def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>;
+
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
def LTR : UnaryRR <"ltr", 0x12, null_frag, GR32, GR32>;
def LTGR : UnaryRRE<"ltgr", 0xB902, null_frag, GR64, GR64>;
def PAIR128 : Pseudo<(outs GR128:$dst), (ins GR64:$hi, GR64:$lo), []>;
// Immediate moves.
-let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1,
- isReMaterializable = 1 in {
+let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in {
// 16-bit sign-extended immediates. LHIMux expands to LHI or IIHF,
// deopending on the choice of register.
def LHIMux : UnaryRIPseudo<bitconvert, GRX32, imm32sx16>,
}
// Register loads.
-let canFoldAsLoad = 1, SimpleBDXLoad = 1 in {
+let canFoldAsLoad = 1, SimpleBDXLoad = 1, mayLoad = 1 in {
// Expands to L, LY or LFH, depending on the choice of register.
def LMux : UnaryRXYPseudo<"l", load, GRX32, 4>,
Requires<[FeatureHighWord]>;
}
// Load and trap.
-let Predicates = [FeatureLoadAndTrap] in {
+let Predicates = [FeatureLoadAndTrap], hasSideEffects = 1 in {
def LAT : UnaryRXY<"lat", 0xE39F, null_frag, GR32, 4>;
def LFHAT : UnaryRXY<"lfhat", 0xE3C8, null_frag, GRH32, 4>;
def LGAT : UnaryRXY<"lgat", 0xE385, null_frag, GR64, 8>;
}
// Register stores.
-let SimpleBDXStore = 1 in {
+let SimpleBDXStore = 1, mayStore = 1 in {
// Expands to ST, STY or STFH, depending on the choice of register.
def STMux : StoreRXYPseudo<store, GRX32, 4>,
Requires<[FeatureHighWord]>;
let Predicates = [FeatureLoadStoreOnCond2], Uses = [CC] in {
// Load immediate on condition. Matched via DAG pattern and created
// by the PeepholeOptimizer via FoldImmediate.
- let hasSideEffects = 0 in {
- // Expands to LOCHI or LOCHHI, depending on the choice of register.
- def LOCHIMux : CondBinaryRIEPseudo<GRX32, imm32sx16>;
- defm LOCHHI : CondBinaryRIEPair<"lochhi", 0xEC4E, GRH32, imm32sx16>;
- defm LOCHI : CondBinaryRIEPair<"lochi", 0xEC42, GR32, imm32sx16>;
- defm LOCGHI : CondBinaryRIEPair<"locghi", 0xEC46, GR64, imm64sx16>;
- }
+
+ // Expands to LOCHI or LOCHHI, depending on the choice of register.
+ def LOCHIMux : CondBinaryRIEPseudo<GRX32, imm32sx16>;
+ defm LOCHHI : CondBinaryRIEPair<"lochhi", 0xEC4E, GRH32, imm32sx16>;
+ defm LOCHI : CondBinaryRIEPair<"lochi", 0xEC42, GR32, imm32sx16>;
+ defm LOCGHI : CondBinaryRIEPair<"locghi", 0xEC46, GR64, imm64sx16>;
// Move register on condition. Expanded from Select* pseudos and
// created by early if-conversion.
- let hasSideEffects = 0, isCommutable = 1 in {
+ let isCommutable = 1 in {
// Expands to LOCR or LOCFHR or a branch-and-move sequence,
// depending on the choice of registers.
def LOCRMux : CondBinaryRRFPseudo<GRX32, GRX32>;
let Predicates = [FeatureLoadStoreOnCond], Uses = [CC] in {
// Move register on condition. Expanded from Select* pseudos and
// created by early if-conversion.
- let hasSideEffects = 0, isCommutable = 1 in {
+ let isCommutable = 1 in {
defm LOCR : CondBinaryRRFPair<"locr", 0xB9F2, GR32, GR32>;
defm LOCGR : CondBinaryRRFPair<"locgr", 0xB9E2, GR64, GR64>;
}
//===----------------------------------------------------------------------===//
// 32-bit extensions from registers.
-let hasSideEffects = 0 in {
- def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>;
- def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>;
-}
+def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>;
+def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>;
// 64-bit extensions from registers.
-let hasSideEffects = 0 in {
- def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>;
- def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>;
- def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>;
-}
+def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>;
+def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>;
+def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>;
+
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in
def LTGFR : UnaryRRE<"ltgfr", 0xB912, null_frag, GR64, GR32>;
//===----------------------------------------------------------------------===//
// 32-bit extensions from registers.
-let hasSideEffects = 0 in {
- // Expands to LLCR or RISB[LH]G, depending on the choice of registers.
- def LLCRMux : UnaryRRPseudo<"llcr", zext8, GRX32, GRX32>,
- Requires<[FeatureHighWord]>;
- def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>;
- // Expands to LLHR or RISB[LH]G, depending on the choice of registers.
- def LLHRMux : UnaryRRPseudo<"llhr", zext16, GRX32, GRX32>,
- Requires<[FeatureHighWord]>;
- def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>;
-}
+
+// Expands to LLCR or RISB[LH]G, depending on the choice of registers.
+def LLCRMux : UnaryRRPseudo<"llcr", zext8, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>;
+// Expands to LLHR or RISB[LH]G, depending on the choice of registers.
+def LLHRMux : UnaryRRPseudo<"llhr", zext16, GRX32, GRX32>,
+ Requires<[FeatureHighWord]>;
+def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>;
// 64-bit extensions from registers.
-let hasSideEffects = 0 in {
- def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>;
- def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>;
- def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>;
-}
+def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>;
+def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>;
+def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>;
// Match 32-to-64-bit zero extensions in which the source is already
// in a 64-bit register.
}
// Load and trap.
-let Predicates = [FeatureLoadAndTrap] in {
+let Predicates = [FeatureLoadAndTrap], hasSideEffects = 1 in {
def LLGFAT : UnaryRXY<"llgfat", 0xE39D, null_frag, GR64, 4>;
def LLGTAT : UnaryRXY<"llgtat", 0xE39C, null_frag, GR64, 4>;
}
//===----------------------------------------------------------------------===//
// Byte-swapping register moves.
-let hasSideEffects = 0 in {
- def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>;
- def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>;
-}
+def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>;
+def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>;
// Byte-swapping loads. Unlike normal loads, these instructions are
// allowed to access storage more than once.
//===----------------------------------------------------------------------===//
// Load BDX-style addresses.
-let hasSideEffects = 0, isAsCheapAsAMove = 1, isReMaterializable = 1 in
+let isAsCheapAsAMove = 1, isReMaterializable = 1 in
defm LA : LoadAddressRXPair<"la", 0x41, 0xE371, bitconvert>;
// Load a PC-relative address. There's no version of this instruction
// with a 16-bit offset, so there's no relaxation.
-let hasSideEffects = 0, isAsCheapAsAMove = 1, isMoveImm = 1,
- isReMaterializable = 1 in
+let isAsCheapAsAMove = 1, isMoveImm = 1, isReMaterializable = 1 in
def LARL : LoadAddressRIL<"larl", 0xC00, bitconvert>;
// Load the Global Offset Table address. This will be lowered into a
Requires<[FeatureMiscellaneousExtensions2]>;
def MLR : BinaryRRE<"mlr", 0xB996, null_frag, GR128, GR32>;
def MLGR : BinaryRRE<"mlgr", 0xB986, null_frag, GR128, GR64>;
+
def : Pat<(z_smul_lohi GR64:$src1, GR64:$src2),
(MGRK GR64:$src1, GR64:$src2)>;
def : Pat<(z_umul_lohi GR64:$src1, GR64:$src2),
Requires<[FeatureMiscellaneousExtensions2]>;
def ML : BinaryRXY<"ml", 0xE396, null_frag, GR128, load, 4>;
def MLG : BinaryRXY<"mlg", 0xE386, null_frag, GR128, load, 8>;
+
def : Pat<(z_smul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))),
(MG (AEXT128 GR64:$src1), bdxaddr20only:$src2)>;
def : Pat<(z_umul_lohi GR64:$src1, (i64 (load bdxaddr20only:$src2))),
//===----------------------------------------------------------------------===//
// Logical shift left.
-let hasSideEffects = 0 in {
- defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
- def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
- def SLDL : BinaryRS<"sldl", 0x8D, null_frag, GR128>;
-}
+defm SLL : BinaryRSAndK<"sll", 0x89, 0xEBDF, shl, GR32>;
+def SLLG : BinaryRSY<"sllg", 0xEB0D, shl, GR64>;
+def SLDL : BinaryRS<"sldl", 0x8D, null_frag, GR128>;
// Arithmetic shift left.
let Defs = [CC] in {
}
// Logical shift right.
-let hasSideEffects = 0 in {
- defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
- def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>;
- def SRDL : BinaryRS<"srdl", 0x8C, null_frag, GR128>;
-}
+defm SRL : BinaryRSAndK<"srl", 0x88, 0xEBDE, srl, GR32>;
+def SRLG : BinaryRSY<"srlg", 0xEB0C, srl, GR64>;
+def SRDL : BinaryRS<"srdl", 0x8C, null_frag, GR128>;
// Arithmetic shift right.
let Defs = [CC], CCValues = 0xE, CompareZeroCCMask = 0xE in {
}
// Rotate left.
-let hasSideEffects = 0 in {
- def RLL : BinaryRSY<"rll", 0xEB1D, rotl, GR32>;
- def RLLG : BinaryRSY<"rllg", 0xEB1C, rotl, GR64>;
-}
+def RLL : BinaryRSY<"rll", 0xEB1D, rotl, GR32>;
+def RLLG : BinaryRSY<"rllg", 0xEB1C, rotl, GR64>;
// Rotate second operand left and inserted selected bits into first operand.
// These can act like 32-bit operands provided that the constant start and
// Prefetch and execution hint
//===----------------------------------------------------------------------===//
-def PFD : PrefetchRXY<"pfd", 0xE336, z_prefetch>;
-def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
+let mayLoad = 1, mayStore = 1 in {
+ def PFD : PrefetchRXY<"pfd", 0xE336, z_prefetch>;
+ def PFDRL : PrefetchRILPC<"pfdrl", 0xC62, z_prefetch>;
+}
-let Predicates = [FeatureExecutionHint] in {
+let Predicates = [FeatureExecutionHint], hasSideEffects = 1 in {
// Branch Prediction Preload
def BPP : BranchPreloadSMI<"bpp", 0xC7>;
def BPRP : BranchPreloadMII<"bprp", 0xC5>;
// Guarded storage
//===----------------------------------------------------------------------===//
-let Predicates = [FeatureGuardedStorage] in {
+// These instructions use and/or modify the guarded storage control
+// registers, which we do not otherwise model, so they should have
+// hasSideEffects.
+let Predicates = [FeatureGuardedStorage], hasSideEffects = 1 in {
def LGG : UnaryRXY<"lgg", 0xE34C, null_frag, GR64, 8>;
def LLGFSG : UnaryRXY<"llgfsg", 0xE348, null_frag, GR64, 4>;
// Load access multiple.
defm LAM : LoadMultipleRSPair<"lam", 0x9A, 0xEB9A, AR32>;
-// Load access multiple.
+// Store access multiple.
defm STAM : StoreMultipleRSPair<"stam", 0x9B, 0xEB9B, AR32>;
//===----------------------------------------------------------------------===//
let mayStore = 1, usesCustomInserter = 1, Defs = [CC] in {
def TBEGIN : SideEffectBinarySIL<"tbegin", 0xE560, z_tbegin, imm32zx16>;
def TBEGIN_nofloat : SideEffectBinarySILPseudo<z_tbegin_nofloat, imm32zx16>;
-
def TBEGINC : SideEffectBinarySIL<"tbeginc", 0xE561,
int_s390_tbeginc, imm32zx16>;
}
def TEND : SideEffectInherentS<"tend", 0xB2F8, z_tend>;
// Transaction Abort
- let isTerminator = 1, isBarrier = 1 in
+ // TODO: Shouldn't be mayLoad or mayStore.
+ let isTerminator = 1, isBarrier = 1, mayLoad = 1, mayStore = 1,
+ hasSideEffects = 1 in
def TABORT : SideEffectAddressS<"tabort", 0xB2FC, int_s390_tabort>;
// Nontransactional Store
// .insn directive instructions
//===----------------------------------------------------------------------===//
-let isCodeGenOnly = 1 in {
+let isCodeGenOnly = 1, hasSideEffects = 1 in {
def InsnE : DirectiveInsnE<(outs), (ins imm64zx16:$enc), ".insn e,$enc", []>;
def InsnRI : DirectiveInsnRI<(outs), (ins imm64zx32:$enc, AnyReg:$R1,
imm32sx16:$I2),