bool isZExtReturn() const { return Flags & IsZExtReturn; }
bool isByteIndexed() const { return Flags & IsByteIndexed; }
bool isOverloadNone() const { return Flags & IsOverloadNone; }
+ bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
uint64_t getBits() const { return Flags; }
def IsStructStore : FlagType<0x00040000>;
def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
+def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
// : :
// : :
}
////////////////////////////////////////////////////////////////////////////////
+// While comparisons
+
+def SVWHILELE_S32 : SInst<"svwhilele_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELE_S64 : SInst<"svwhilele_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilele", [IsOverloadWhile]>;
+def SVWHILELO_U32 : SInst<"svwhilelt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELO_U64 : SInst<"svwhilelt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilelo", [IsOverloadWhile]>;
+def SVWHILELS_U32 : SInst<"svwhilele_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELS_U64 : SInst<"svwhilele_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilels", [IsOverloadWhile]>;
+def SVWHILELT_S32 : SInst<"svwhilelt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+def SVWHILELT_S64 : SInst<"svwhilelt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilelt", [IsOverloadWhile]>;
+
+////////////////////////////////////////////////////////////////////////////////
// Floating-point arithmetic
defm SVABS_F : SInstZPZ<"svabs", "hfd", "aarch64_sve_fabs">;
def SVDOT_LANE_U : SInst<"svdot_lane[_{d}]", "ddqqi", "UiUl", MergeNone, "aarch64_sve_udot_lane", [], [ImmCheck<3, ImmCheckLaneIndexDot, 2>]>;
////////////////////////////////////////////////////////////////////////////////
+// SVE2 WhileGE/GT
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILEGE_S32 : SInst<"svwhilege_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGE_S64 : SInst<"svwhilege_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilege", [IsOverloadWhile]>;
+def SVWHILEGT_S32 : SInst<"svwhilegt_{d}[_{1}]", "Pkk", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEGT_S64 : SInst<"svwhilegt_{d}[_{1}]", "Pll", "PcPsPiPl", MergeNone, "aarch64_sve_whilegt", [IsOverloadWhile]>;
+def SVWHILEHI_U32 : SInst<"svwhilegt_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHI_U64 : SInst<"svwhilegt_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehi", [IsOverloadWhile]>;
+def SVWHILEHS_U32 : SInst<"svwhilege_{d}[_{1}]", "Pmm", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+def SVWHILEHS_U64 : SInst<"svwhilege_{d}[_{1}]", "Pnn", "PUcPUsPUiPUl", MergeNone, "aarch64_sve_whilehs", [IsOverloadWhile]>;
+}
+
+////////////////////////////////////////////////////////////////////////////////
// SVE2 - Non-temporal gather/scatter
let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
// Non-temporal gather load one vector (vector base)
llvm::Type *DefaultType = getSVEType(TypeFlags);
+ if (TypeFlags.isOverloadWhile())
+ return {DefaultType, Ops[1]->getType()};
+
assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
return {DefaultType};
}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilele_b8_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b8_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilele_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b16_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b32_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b64_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b8_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilele_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b16_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b32_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b64_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b8_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilele.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilele_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b16_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilele.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b32_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilele.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b64_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b8_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b8_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilels.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilele_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b16_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b16_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilels.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b32_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b32_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilels.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilele_b64_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilele_b64_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilele_b64,_u64,,)(op1, op2);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilelt_b8_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b8_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilelt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b16_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b32_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b64_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b8_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilelt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b16_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b32_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b64_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b8_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelt.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilelt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b16_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelt.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b32_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelt.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b64_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b8_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b8_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilelo.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ return SVE_ACLE_FUNC(svwhilelt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b16_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b16_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilelo.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b32_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b32_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilelo.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilelt_b64_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilelt_b64_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ return SVE_ACLE_FUNC(svwhilelt_b64,_u64,,)(op1, op2);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilege_b8_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b8_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s32'}}
+ return SVE_ACLE_FUNC(svwhilege_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b16_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s32'}}
+ return SVE_ACLE_FUNC(svwhilege_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b32_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s32'}}
+ return SVE_ACLE_FUNC(svwhilege_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b64_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s32'}}
+ return SVE_ACLE_FUNC(svwhilege_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b8_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u32'}}
+ return SVE_ACLE_FUNC(svwhilege_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b16_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u32'}}
+ return SVE_ACLE_FUNC(svwhilege_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b32_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u32'}}
+ return SVE_ACLE_FUNC(svwhilege_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b64_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u32'}}
+ return SVE_ACLE_FUNC(svwhilege_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b8_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_s64'}}
+ return SVE_ACLE_FUNC(svwhilege_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b16_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_s64'}}
+ return SVE_ACLE_FUNC(svwhilege_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b32_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_s64'}}
+ return SVE_ACLE_FUNC(svwhilege_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b64_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_s64'}}
+ return SVE_ACLE_FUNC(svwhilege_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b8_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b8_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b8_u64'}}
+ return SVE_ACLE_FUNC(svwhilege_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b16_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b16_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b16_u64'}}
+ return SVE_ACLE_FUNC(svwhilege_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b32_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b32_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b32_u64'}}
+ return SVE_ACLE_FUNC(svwhilege_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilege_b64_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilege_b64_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilege_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilege_b64_u64'}}
+ return SVE_ACLE_FUNC(svwhilege_b64,_u64,,)(op1, op2);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilegt_b8_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b8_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b8,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b16_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b16,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b32_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b32,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s32(int32_t op1, int32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b64_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b64,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b8_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %op1, i32 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b8,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b16_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b16,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b32_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b32,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u32(uint32_t op1, uint32_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b64_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %op1, i32 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u32'}}
+ return SVE_ACLE_FUNC(svwhilegt_b64,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b8_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_s64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b8,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b16_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_s64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b16,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b32_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_s64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b32,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_s64(int64_t op1, int64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b64_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_s64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b64,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b8_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b8_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %op1, i64 %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b8'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b8_u64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b8,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b16_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b16_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b16'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b16_u64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b16,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b32_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b32_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b32'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b32_u64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b32,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilegt_b64_u64(uint64_t op1, uint64_t op2)
+{
+ // CHECK-LABEL: test_svwhilegt_b64_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %op1, i64 %op2)
+ // CHECK: %[[CAST:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[CAST]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilegt_b64'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilegt_b64_u64'}}
+ return SVE_ACLE_FUNC(svwhilegt_b64,_u64,,)(op1, op2);
+}