/// - InsnID - Instruction ID
/// - The predicate to test
GIM_CheckAPFloatImmPredicate,
- /// Check a memory operation is non-atomic.
+ /// Check a memory operation has the specified atomic ordering.
/// - InsnID - Instruction ID
- GIM_CheckNonAtomic,
+ /// - Ordering - The AtomicOrdering value
+ GIM_CheckAtomicOrdering,
/// Check the type for the specified operand
/// - InsnID - Instruction ID
return false;
break;
}
- case GIM_CheckNonAtomic: {
+ case GIM_CheckAtomicOrdering: {
int64_t InsnID = MatchTable[CurrentIdx++];
+ AtomicOrdering Ordering = (AtomicOrdering)MatchTable[CurrentIdx++];
DEBUG_WITH_TYPE(TgtInstructionSelector::getName(),
- dbgs() << CurrentIdx << ": GIM_CheckNonAtomic(MIs["
- << InsnID << "])\n");
+ dbgs() << CurrentIdx << ": GIM_CheckAtomicOrdering(MIs["
+ << InsnID << "], " << (uint64_t)Ordering << ")\n");
assert(State.MIs[InsnID] != nullptr && "Used insn before defined");
- assert((State.MIs[InsnID]->getOpcode() == TargetOpcode::G_LOAD ||
- State.MIs[InsnID]->getOpcode() == TargetOpcode::G_STORE) &&
- "Expected G_LOAD/G_STORE");
if (!State.MIs[InsnID]->hasOneMemOperand())
if (handleReject() == RejectAndGiveUp)
return false;
for (const auto &MMO : State.MIs[InsnID]->memoperands())
- if (MMO->getOrdering() != AtomicOrdering::NotAtomic)
+ if (MMO->getOrdering() != Ordering)
if (handleReject() == RejectAndGiveUp)
return false;
break;
}
-
case GIM_CheckType: {
int64_t InsnID = MatchTable[CurrentIdx++];
int64_t OpIdx = MatchTable[CurrentIdx++];
// G_STORE with a non-atomic MachineMemOperand.
def : GINodeEquiv<G_STORE, st> { let CheckMMOIsNonAtomic = 1; }
+def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap>;
+def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap>;
+def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add>;
+def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub>;
+def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and>;
+def : GINodeEquiv<G_ATOMICRMW_NAND, atomic_load_nand>;
+def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or>;
+def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor>;
+def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min>;
+def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max>;
+def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin>;
+def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax>;
+
// Specifies the GlobalISel equivalents for SelectionDAG's ComplexPattern.
// Should be used on defs that subclass GIComplexOperandMatcher<>.
class GIComplexPatternEquiv<ComplexPattern seldag> {
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @atomicrmw_xchg_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i64(i64* %addr) { ret void }
+ define void @atomicrmw_add_i32(i64* %addr) { ret void }
+ define void @atomicrmw_sub_i32(i64* %addr) { ret void }
+ define void @atomicrmw_and_i32(i64* %addr) { ret void }
+ ; nand isn't legal
+ define void @atomicrmw_or_i32(i64* %addr) { ret void }
+ define void @atomicrmw_xor_i32(i64* %addr) { ret void }
+ define void @atomicrmw_min_i32(i64* %addr) { ret void }
+ define void @atomicrmw_max_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umin_i32(i64* %addr) { ret void }
+ define void @atomicrmw_umax_i32(i64* %addr) { ret void }
+...
+
+---
+name: atomicrmw_xchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 1
+ %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %2(s64)
+...
+---
+name: atomicrmw_add_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_add_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_sub_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_sub_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_and_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_and_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr %wzr, [[CST]]
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_or_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_or_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_xor_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_xor_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_min_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_min_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_max_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_max_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umin_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umin_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
+
+---
+name: atomicrmw_umax_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: atomicrmw_umax_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 1
+ %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 8 on %ir.addr)
+ %w0 = COPY %2(s32)
+...
--- /dev/null
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=aarch64-- -mattr=+lse -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
+
+--- |
+ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+ define void @cmpxchg_i32(i64* %addr) { ret void }
+ define void @cmpxchg_i64(i64* %addr) { ret void }
+...
+
+---
+name: cmpxchg_i32
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i32
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr32 = MOVi32imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %w0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s32) = G_CONSTANT i32 0
+ %2:gpr(s32) = G_CONSTANT i32 1
+ %3:gpr(s32) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %w0 = COPY %3(s32)
+...
+
+---
+name: cmpxchg_i64
+legalized: true
+regBankSelected: true
+
+body: |
+ bb.0:
+ liveins: %x0
+
+ ; CHECK-LABEL: name: cmpxchg_i64
+ ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY %x0
+ ; CHECK: [[CMP:%[0-9]+]]:gpr64 = MOVi64imm 0
+ ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1
+ ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr)
+ ; CHECK: %x0 = COPY [[RES]]
+ %0:gpr(p0) = COPY %x0
+ %1:gpr(s64) = G_CONSTANT i64 0
+ %2:gpr(s64) = G_CONSTANT i64 1
+ %3:gpr(s64) = G_ATOMIC_CMPXCHG %0, %1, %2 :: (load store monotonic 8 on %ir.addr)
+ %x0 = COPY %3(s64)
+...
// CHECK-NEXT: GIM_Try, /*On fail goto*//*Label 22*/ [[LABEL:[0-9]+]],
// CHECK-NEXT: GIM_CheckNumOperands, /*MI*/0, /*Expected*/2,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/0, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/0,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/0, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[0] dst
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/0, /*Type*/GILLT_s32,
// CHECK-NEXT: GIM_CheckRegBankForClass, /*MI*/0, /*Op*/0, /*RC*/MyTarget::GPR32RegClassID,
// CHECK-NEXT: // MIs[0] Operand 1
// CHECK-NEXT: GIM_CheckType, /*MI*/0, /*Op*/1, /*Type*/GILLT_s16,
// CHECK-NEXT: GIM_CheckOpcode, /*MI*/1, TargetOpcode::G_LOAD,
-// CHECK-NEXT: GIM_CheckNonAtomic, /*MI*/1,
+// CHECK-NEXT: GIM_CheckAtomicOrdering, /*MI*/1, /*Order*/(int64_t)AtomicOrdering::NotAtomic,
// CHECK-NEXT: // MIs[1] Operand 0
// CHECK-NEXT: GIM_CheckType, /*MI*/1, /*Op*/0, /*Type*/GILLT_s16,
// CHECK-NEXT: // MIs[1] src1
for (const auto &P : N->getPredicateFns()) {
Explanation +=
(Separator + P.getOrigPatFragRecord()->getRecord()->getName()).str();
+ Separator = ", ";
+
if (P.isAlwaysTrue())
Explanation += " always-true";
if (P.isImmediatePattern())
Explanation += (" MemVT=" + VT->getName()).str();
if (Record *VT = P.getScalarMemoryVT())
Explanation += (" ScalarVT(MemVT)=" + VT->getName()).str();
+
+ if (P.isAtomicOrderingMonotonic())
+ Explanation += " monotonic";
+ if (P.isAtomicOrderingAcquire())
+ Explanation += " acquire";
+ if (P.isAtomicOrderingRelease())
+ Explanation += " release";
+ if (P.isAtomicOrderingAcquireRelease())
+ Explanation += " acq_rel";
+ if (P.isAtomicOrderingSequentiallyConsistent())
+ Explanation += " seq_cst";
}
return Explanation;
}
if (Predicate.isImmediatePattern())
continue;
- if (Predicate.isLoad() && Predicate.isUnindexed())
+ if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isNonExtLoad())
+ if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.isUnindexed())
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic() && Predicate.getMemoryVT())
continue;
- if (Predicate.isNonTruncStore())
+ if (Predicate.isAtomic() &&
+ (Predicate.isAtomicOrderingMonotonic() ||
+ Predicate.isAtomicOrderingAcquire() ||
+ Predicate.isAtomicOrderingRelease() ||
+ Predicate.isAtomicOrderingAcquireRelease() ||
+ Predicate.isAtomicOrderingSequentiallyConsistent()))
continue;
HasUnsupportedPredicate = true;
enum PredicateKind {
IPM_Opcode,
IPM_ImmPredicate,
- IPM_NonAtomicMMO,
+ IPM_AtomicOrderingMMO,
};
PredicateKind Kind;
}
};
-/// Generates code to check that a memory instruction has a non-atomic MachineMemoryOperand.
-class NonAtomicMMOPredicateMatcher : public InstructionPredicateMatcher {
+/// Generates code to check that a memory instruction has a atomic ordering
+/// MachineMemoryOperand.
+class AtomicOrderingMMOPredicateMatcher : public InstructionPredicateMatcher {
+ StringRef Order;
+
public:
- NonAtomicMMOPredicateMatcher()
- : InstructionPredicateMatcher(IPM_NonAtomicMMO) {}
+ AtomicOrderingMMOPredicateMatcher(StringRef Order)
+ : InstructionPredicateMatcher(IPM_AtomicOrderingMMO), Order(Order) {}
static bool classof(const InstructionPredicateMatcher *P) {
- return P->getKind() == IPM_NonAtomicMMO;
+ return P->getKind() == IPM_AtomicOrderingMMO;
}
void emitPredicateOpcodes(MatchTable &Table, RuleMatcher &Rule,
unsigned InsnVarID) const override {
- Table << MatchTable::Opcode("GIM_CheckNonAtomic")
+ Table << MatchTable::Opcode("GIM_CheckAtomicOrdering")
<< MatchTable::Comment("MI") << MatchTable::IntValue(InsnVarID)
+ << MatchTable::Comment("Order")
+ << MatchTable::NamedValue(("(int64_t)AtomicOrdering::" + Order).str())
<< MatchTable::LineBreak;
}
};
continue;
}
- // No check required. A G_LOAD is an unindexed load.
- if (Predicate.isLoad() && Predicate.isUnindexed())
- continue;
-
// No check required. G_LOAD by itself is a non-extending load.
if (Predicate.isNonExtLoad())
continue;
- if (Predicate.isLoad() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
-
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
-
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
- }
-
- // No check required. A G_STORE is an unindexed store.
- if (Predicate.isStore() && Predicate.isUnindexed())
- continue;
-
// No check required. G_STORE by itself is a non-extending store.
if (Predicate.isNonTruncStore())
continue;
- if (Predicate.isStore() && Predicate.getMemoryVT() != nullptr) {
- Optional<LLTCodeGen> MemTyOrNone =
- MVTToLLT(getValueType(Predicate.getMemoryVT()));
+ if (Predicate.isLoad() || Predicate.isStore() || Predicate.isAtomic()) {
+ if (Predicate.getMemoryVT() != nullptr) {
+ Optional<LLTCodeGen> MemTyOrNone =
+ MVTToLLT(getValueType(Predicate.getMemoryVT()));
- if (!MemTyOrNone)
- return failedImport("MemVT could not be converted to LLT");
+ if (!MemTyOrNone)
+ return failedImport("MemVT could not be converted to LLT");
- InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(MemTyOrNone.getValue());
- continue;
+ InsnMatcher.getOperand(0).addPredicate<LLTOperandMatcher>(
+ MemTyOrNone.getValue());
+ continue;
+ }
+ }
+
+ if (Predicate.isLoad() || Predicate.isStore()) {
+ // No check required. A G_LOAD/G_STORE is an unindexed load.
+ if (Predicate.isUnindexed())
+ continue;
+ }
+
+ if (Predicate.isAtomic()) {
+ if (Predicate.isAtomicOrderingMonotonic()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "Monotonic");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquire()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Acquire");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("Release");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingAcquireRelease()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "AcquireRelease");
+ continue;
+ }
+ if (Predicate.isAtomicOrderingSequentiallyConsistent()) {
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>(
+ "SequentiallyConsistent");
+ continue;
+ }
}
return failedImport("Src pattern child has predicate (" +
explainPredicates(Src) + ")");
}
if (SrcGIEquivOrNull && SrcGIEquivOrNull->getValueAsBit("CheckMMOIsNonAtomic"))
- InsnMatcher.addPredicate<NonAtomicMMOPredicateMatcher>();
+ InsnMatcher.addPredicate<AtomicOrderingMMOPredicateMatcher>("NotAtomic");
if (Src->isLeaf()) {
Init *SrcInit = Src->getLeafValue();