bool isStructStore() const { return Flags & IsStructStore; }
bool isZExtReturn() const { return Flags & IsZExtReturn; }
bool isByteIndexed() const { return Flags & IsByteIndexed; }
+ bool isOverloadNone() const { return Flags & IsOverloadNone; }
+ bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
uint64_t getBits() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
// o: 4x width elements, 1/4 element count
//
// i: constant uint64_t
+// k: int32_t
+// l: int64_t
+// m: uint32_t
+// n: uint64_t
//
// I: Predicate Pattern (sv_pattern)
def IsStructLoad : FlagType<0x00020000>;
def IsStructStore : FlagType<0x00040000>;
def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
+def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
// : :
// : :
def IsByteIndexed : FlagType<0x02000000>;
def SVQDECH_S : SInst<"svqdech_pat[_{d}]", "ddIi", "s", MergeNone, "aarch64_sve_sqdech", [], [ImmCheck<2, ImmCheck1_16>]>;
def SVQDECH_U : SInst<"svqdech_pat[_{d}]", "ddIi", "Us", MergeNone, "aarch64_sve_uqdech", [], [ImmCheck<2, ImmCheck1_16>]>;
+
+////////////////////////////////////////////////////////////////////////////////
+// Predicate creation
+
+def SVPFALSE : SInst<"svpfalse[_b]", "P", "", MergeNone, "", [IsOverloadNone]>;
+
+
+////////////////////////////////////////////////////////////////////////////////
+// Counting elements
+
+def SVCNTB_PAT : SInst<"svcntb_pat", "nI", "", MergeNone, "aarch64_sve_cntb", [IsOverloadNone]>;
+def SVCNTH_PAT : SInst<"svcnth_pat", "nI", "", MergeNone, "aarch64_sve_cnth", [IsOverloadNone]>;
+def SVCNTW_PAT : SInst<"svcntw_pat", "nI", "", MergeNone, "aarch64_sve_cntw", [IsOverloadNone]>;
+def SVCNTD_PAT : SInst<"svcntd_pat", "nI", "", MergeNone, "aarch64_sve_cntd", [IsOverloadNone]>;
////////////////////////////////////////////////////////////////////////////////
// Integer arithmetic
def SVDOT_LANE_S : SInst<"svdot_lane[_{d}]", "ddqqi", "il", MergeNone, "aarch64_sve_sdot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
Ops.insert(Ops.begin(), SplatUndef);
}
+SmallVector<llvm::Type *, 2>
+CodeGenFunction::getSVEOverloadTypes(SVETypeFlags TypeFlags,
+ ArrayRef<Value *> Ops) {
+ if (TypeFlags.isOverloadNone())
+ return {};
+
+ llvm::Type *DefaultType = getSVEType(TypeFlags);
+
+ assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
+ return {DefaultType};
+}
+
Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
// Find out if any arguments are required to be integer constant expressions.
else if (TypeFlags.isScatterStore())
return EmitSVEScatterStore(TypeFlags, Ops, Builtin->LLVMIntrinsic);
else if (Builtin->LLVMIntrinsic != 0) {
- llvm::VectorType *OverloadedTy = getSVEType(TypeFlags);
-
if (TypeFlags.getMergeType() == SVETypeFlags::MergeZeroExp)
InsertExplicitZeroOperand(Builder, Ty, Ops);
InsertExplicitUndefOperand(Builder, Ty, Ops);
// Predicates must match the main datatype.
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (auto PredTy = dyn_cast<llvm::VectorType>(Ops[i]->getType()))
- if (PredTy->getScalarType()->isIntegerTy(1)) {
- auto NewPredTy = cast<llvm::VectorType>(OverloadedTy);
- Ops[i] = EmitSVEPredicateCast(Ops[i], NewPredTy);
- }
- }
+ if (PredTy->getElementType()->isIntegerTy(1))
+ Ops[i] = EmitSVEPredicateCast(Ops[i], getSVEType(TypeFlags));
// Splat scalar operand to vector (intrinsics with _n infix)
if (TypeFlags.hasSplatOperand()) {
Ops[1] = Builder.CreateCall(Sel, {Ops[0], Ops[1], SplatZero});
}
- Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic, OverloadedTy);
+ Function *F = CGM.getIntrinsic(Builtin->LLVMIntrinsic,
+ getSVEOverloadTypes(TypeFlags, Ops));
Value *Call = Builder.CreateCall(F, Ops);
- return Call;
+
+ // Predicate results must be converted to svbool_t.
+ if (auto PredTy = dyn_cast<llvm::VectorType>(Call->getType()))
+ if (PredTy->getScalarType()->isIntegerTy(1))
+ Call = EmitSVEPredicateCast(Call, cast<llvm::VectorType>(Ty));
+
+ return Call;
+ }
+
+ switch (BuiltinID) {
+ default:
+ return nullptr;
+ case SVE::BI__builtin_sve_svpfalse_b:
+ return ConstantInt::getFalse(Ty);
}
/// Should not happen
/// pointer operand.
llvm::Type *SVEBuiltinMemEltTy(SVETypeFlags TypeFlags);
+ SmallVector<llvm::Type *, 2> getSVEOverloadTypes(SVETypeFlags TypeFlags,
+ ArrayRef<llvm::Value *> Ops);
llvm::Type *getEltType(SVETypeFlags TypeFlags);
llvm::VectorType *getSVEType(const SVETypeFlags &TypeFlags);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+uint64_t test_svcntb_pat()
+{
+ // CHECK-LABEL: test_svcntb_pat
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 0)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_POW2);
+}
+
+uint64_t test_svcntb_pat_1()
+{
+ // CHECK-LABEL: test_svcntb_pat_1
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 1)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL1);
+}
+
+uint64_t test_svcntb_pat_2()
+{
+ // CHECK-LABEL: test_svcntb_pat_2
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 2)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL2);
+}
+
+uint64_t test_svcntb_pat_3()
+{
+ // CHECK-LABEL: test_svcntb_pat_3
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 3)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL3);
+}
+
+uint64_t test_svcntb_pat_4()
+{
+ // CHECK-LABEL: test_svcntb_pat_4
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 4)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL4);
+}
+
+uint64_t test_svcntb_pat_5()
+{
+ // CHECK-LABEL: test_svcntb_pat_5
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 5)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL5);
+}
+
+uint64_t test_svcntb_pat_6()
+{
+ // CHECK-LABEL: test_svcntb_pat_6
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 6)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL6);
+}
+
+uint64_t test_svcntb_pat_7()
+{
+ // CHECK-LABEL: test_svcntb_pat_7
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 7)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL7);
+}
+
+uint64_t test_svcntb_pat_8()
+{
+ // CHECK-LABEL: test_svcntb_pat_8
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 8)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL8);
+}
+
+uint64_t test_svcntb_pat_9()
+{
+ // CHECK-LABEL: test_svcntb_pat_9
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 9)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL16);
+}
+
+uint64_t test_svcntb_pat_10()
+{
+ // CHECK-LABEL: test_svcntb_pat_10
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 10)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL32);
+}
+
+uint64_t test_svcntb_pat_11()
+{
+ // CHECK-LABEL: test_svcntb_pat_11
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 11)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL64);
+}
+
+uint64_t test_svcntb_pat_12()
+{
+ // CHECK-LABEL: test_svcntb_pat_12
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 12)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL128);
+}
+
+uint64_t test_svcntb_pat_13()
+{
+ // CHECK-LABEL: test_svcntb_pat_13
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 13)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_VL256);
+}
+
+uint64_t test_svcntb_pat_14()
+{
+ // CHECK-LABEL: test_svcntb_pat_14
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 29)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_MUL4);
+}
+
+uint64_t test_svcntb_pat_15()
+{
+ // CHECK-LABEL: test_svcntb_pat_15
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 30)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_MUL3);
+}
+
+uint64_t test_svcntb_pat_16()
+{
+ // CHECK-LABEL: test_svcntb_pat_16
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntb(i32 31)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntb_pat(SV_ALL);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+uint64_t test_svcntd_pat()
+{
+ // CHECK-LABEL: test_svcntd_pat
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 0)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_POW2);
+}
+
+uint64_t test_svcntd_pat_1()
+{
+ // CHECK-LABEL: test_svcntd_pat_1
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 1)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL1);
+}
+
+uint64_t test_svcntd_pat_2()
+{
+ // CHECK-LABEL: test_svcntd_pat_2
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 2)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL2);
+}
+
+uint64_t test_svcntd_pat_3()
+{
+ // CHECK-LABEL: test_svcntd_pat_3
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 3)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL3);
+}
+
+uint64_t test_svcntd_pat_4()
+{
+ // CHECK-LABEL: test_svcntd_pat_4
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 4)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL4);
+}
+
+uint64_t test_svcntd_pat_5()
+{
+ // CHECK-LABEL: test_svcntd_pat_5
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 5)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL5);
+}
+
+uint64_t test_svcntd_pat_6()
+{
+ // CHECK-LABEL: test_svcntd_pat_6
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 6)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL6);
+}
+
+uint64_t test_svcntd_pat_7()
+{
+ // CHECK-LABEL: test_svcntd_pat_7
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 7)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL7);
+}
+
+uint64_t test_svcntd_pat_8()
+{
+ // CHECK-LABEL: test_svcntd_pat_8
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 8)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL8);
+}
+
+uint64_t test_svcntd_pat_9()
+{
+ // CHECK-LABEL: test_svcntd_pat_9
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 9)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL16);
+}
+
+uint64_t test_svcntd_pat_10()
+{
+ // CHECK-LABEL: test_svcntd_pat_10
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 10)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL32);
+}
+
+uint64_t test_svcntd_pat_11()
+{
+ // CHECK-LABEL: test_svcntd_pat_11
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 11)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL64);
+}
+
+uint64_t test_svcntd_pat_12()
+{
+ // CHECK-LABEL: test_svcntd_pat_12
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 12)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL128);
+}
+
+uint64_t test_svcntd_pat_13()
+{
+ // CHECK-LABEL: test_svcntd_pat_13
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 13)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_VL256);
+}
+
+uint64_t test_svcntd_pat_14()
+{
+ // CHECK-LABEL: test_svcntd_pat_14
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 29)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_MUL4);
+}
+
+uint64_t test_svcntd_pat_15()
+{
+ // CHECK-LABEL: test_svcntd_pat_15
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 30)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_MUL3);
+}
+
+uint64_t test_svcntd_pat_16()
+{
+ // CHECK-LABEL: test_svcntd_pat_16
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntd(i32 31)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntd_pat(SV_ALL);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+uint64_t test_svcnth_pat()
+{
+ // CHECK-LABEL: test_svcnth_pat
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 0)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_POW2);
+}
+
+uint64_t test_svcnth_pat_1()
+{
+ // CHECK-LABEL: test_svcnth_pat_1
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 1)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL1);
+}
+
+uint64_t test_svcnth_pat_2()
+{
+ // CHECK-LABEL: test_svcnth_pat_2
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 2)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL2);
+}
+
+uint64_t test_svcnth_pat_3()
+{
+ // CHECK-LABEL: test_svcnth_pat_3
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 3)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL3);
+}
+
+uint64_t test_svcnth_pat_4()
+{
+ // CHECK-LABEL: test_svcnth_pat_4
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 4)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL4);
+}
+
+uint64_t test_svcnth_pat_5()
+{
+ // CHECK-LABEL: test_svcnth_pat_5
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 5)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL5);
+}
+
+uint64_t test_svcnth_pat_6()
+{
+ // CHECK-LABEL: test_svcnth_pat_6
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 6)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL6);
+}
+
+uint64_t test_svcnth_pat_7()
+{
+ // CHECK-LABEL: test_svcnth_pat_7
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 7)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL7);
+}
+
+uint64_t test_svcnth_pat_8()
+{
+ // CHECK-LABEL: test_svcnth_pat_8
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 8)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL8);
+}
+
+uint64_t test_svcnth_pat_9()
+{
+ // CHECK-LABEL: test_svcnth_pat_9
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 9)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL16);
+}
+
+uint64_t test_svcnth_pat_10()
+{
+ // CHECK-LABEL: test_svcnth_pat_10
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 10)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL32);
+}
+
+uint64_t test_svcnth_pat_11()
+{
+ // CHECK-LABEL: test_svcnth_pat_11
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 11)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL64);
+}
+
+uint64_t test_svcnth_pat_12()
+{
+ // CHECK-LABEL: test_svcnth_pat_12
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 12)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL128);
+}
+
+uint64_t test_svcnth_pat_13()
+{
+ // CHECK-LABEL: test_svcnth_pat_13
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 13)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_VL256);
+}
+
+uint64_t test_svcnth_pat_14()
+{
+ // CHECK-LABEL: test_svcnth_pat_14
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 29)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_MUL4);
+}
+
+uint64_t test_svcnth_pat_15()
+{
+ // CHECK-LABEL: test_svcnth_pat_15
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 30)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_MUL3);
+}
+
+uint64_t test_svcnth_pat_16()
+{
+ // CHECK-LABEL: test_svcnth_pat_16
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cnth(i32 31)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcnth_pat(SV_ALL);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+uint64_t test_svcntw_pat()
+{
+ // CHECK-LABEL: test_svcntw_pat
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 0)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_POW2);
+}
+
+uint64_t test_svcntw_pat_1()
+{
+ // CHECK-LABEL: test_svcntw_pat_1
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 1)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL1);
+}
+
+uint64_t test_svcntw_pat_2()
+{
+ // CHECK-LABEL: test_svcntw_pat_2
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 2)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL2);
+}
+
+uint64_t test_svcntw_pat_3()
+{
+ // CHECK-LABEL: test_svcntw_pat_3
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 3)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL3);
+}
+
+uint64_t test_svcntw_pat_4()
+{
+ // CHECK-LABEL: test_svcntw_pat_4
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 4)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL4);
+}
+
+uint64_t test_svcntw_pat_5()
+{
+ // CHECK-LABEL: test_svcntw_pat_5
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 5)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL5);
+}
+
+uint64_t test_svcntw_pat_6()
+{
+ // CHECK-LABEL: test_svcntw_pat_6
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 6)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL6);
+}
+
+uint64_t test_svcntw_pat_7()
+{
+ // CHECK-LABEL: test_svcntw_pat_7
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 7)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL7);
+}
+
+uint64_t test_svcntw_pat_8()
+{
+ // CHECK-LABEL: test_svcntw_pat_8
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 8)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL8);
+}
+
+uint64_t test_svcntw_pat_9()
+{
+ // CHECK-LABEL: test_svcntw_pat_9
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 9)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL16);
+}
+
+uint64_t test_svcntw_pat_10()
+{
+ // CHECK-LABEL: test_svcntw_pat_10
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 10)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL32);
+}
+
+uint64_t test_svcntw_pat_11()
+{
+ // CHECK-LABEL: test_svcntw_pat_11
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 11)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL64);
+}
+
+uint64_t test_svcntw_pat_12()
+{
+ // CHECK-LABEL: test_svcntw_pat_12
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 12)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL128);
+}
+
+uint64_t test_svcntw_pat_13()
+{
+ // CHECK-LABEL: test_svcntw_pat_13
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 13)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_VL256);
+}
+
+uint64_t test_svcntw_pat_14()
+{
+ // CHECK-LABEL: test_svcntw_pat_14
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 29)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_MUL4);
+}
+
+uint64_t test_svcntw_pat_15()
+{
+ // CHECK-LABEL: test_svcntw_pat_15
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 30)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_MUL3);
+}
+
+uint64_t test_svcntw_pat_16()
+{
+ // CHECK-LABEL: test_svcntw_pat_16
+ // CHECK: %[[INTRINSIC:.*]] = call i64 @llvm.aarch64.sve.cntw(i32 31)
+ // CHECK: ret i64 %[[INTRINSIC]]
+ return svcntw_pat(SV_ALL);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svpfalse_b()
+{
+ // CHECK-LABEL: test_svpfalse_b
+ // CHECK: ret <vscale x 16 x i1> zeroinitializer
+ return SVE_ACLE_FUNC(svpfalse,_b,,)();
+}
llvm_unreachable("Unsupported imm check");
}
+ /// Returns the enum value for the flag type
+ uint64_t getEnumValueForFlag(StringRef C) const {
+ auto Res = FlagTypes.find(C);
+ if (Res != FlagTypes.end())
+ return Res->getValue();
+ llvm_unreachable("Unsupported flag");
+ }
+
// Returns the SVETypeFlags for a given value and mask.
uint64_t encodeFlag(uint64_t V, StringRef MaskName) const {
auto It = FlagTypes.find(MaskName);
Immediate = true;
PredicatePattern = true;
break;
+ case 'k':
+ Predicate = false;
+ Signed = true;
+ Float = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ break;
case 'l':
Predicate = false;
Signed = true;
ElementBitwidth = Bitwidth = 64;
NumVectors = 0;
break;
+ case 'm':
+ Predicate = false;
+ Signed = false;
+ Float = false;
+ ElementBitwidth = Bitwidth = 32;
+ NumVectors = 0;
+ break;
+ case 'n':
+ Predicate = false;
+ Signed = false;
+ Float = false;
+ ElementBitwidth = Bitwidth = 64;
+ NumVectors = 0;
+ break;
case 'S':
Constant = true;
Pointer = true;
for (auto FlagRec : FlagsList)
Flags |= FlagRec->getValueAsInt("Value");
+ // Create a dummy TypeSpec for non-overloaded builtins.
+ if (Types.empty()) {
+ assert((Flags & getEnumValueForFlag("IsOverloadNone")) &&
+ "Expect TypeSpec for overloaded builtin!");
+ Types = "i";
+ }
+
// Extract type specs from string
SmallVector<TypeSpec, 8> TypeSpecs;
TypeSpec Acc;