bool isOverloadNone() const { return Flags & IsOverloadNone; }
bool isOverloadWhile() const { return Flags & IsOverloadWhile; }
bool isOverloadDefault() const { return !(Flags & OverloadKindMask); }
+ bool isOverloadWhileRW() const { return Flags & IsOverloadWhileRW; }
uint64_t getBits() const { return Flags; }
bool isFlagSet(uint64_t Flag) const { return Flags & Flag; }
def IsZExtReturn : FlagType<0x00080000>; // Return value is sign-extend by default
def IsOverloadNone : FlagType<0x00100000>; // Intrinsic does not take any overloaded types.
def IsOverloadWhile : FlagType<0x00200000>; // Use {default type, typeof(operand1)} as overloaded types.
+def IsOverloadWhileRW : FlagType<0x00400000>; // Use {pred(default type), typeof(operand0)} as overloaded types.
def OverloadKindMask : FlagType<0x00E00000>; // When the masked values are all '0', the default type is used as overload type.
// : :
// : :
def SVSTNT1H_SCATTER_INDEX_S : MInst<"svstnt1h_scatter[_{2}base]_index[_{d}]", "vPuld", "ilUiUl", [IsScatterStore], MemEltTyInt16, "aarch64_sve_stnt1_scatter_scalar_offset">;
def SVSTNT1W_SCATTER_INDEX_S : MInst<"svstnt1w_scatter[_{2}base]_index[_{d}]", "vPuld", "lUl", [IsScatterStore], MemEltTyInt32, "aarch64_sve_stnt1_scatter_scalar_offset">;
}
+
+////////////////////////////////////////////////////////////////////////////////
+// SVE2 - Contiguous conflict detection
+let ArchGuard = "defined(__ARM_FEATURE_SVE2)" in {
+def SVWHILERW_B : SInst<"svwhilerw[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilerw_b", [IsOverloadWhileRW]>;
+def SVWHILERW_H : SInst<"svwhilerw[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilerw_h", [IsOverloadWhileRW]>;
+def SVWHILERW_S : SInst<"svwhilerw[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilerw_s", [IsOverloadWhileRW]>;
+def SVWHILERW_D : SInst<"svwhilerw[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilerw_d", [IsOverloadWhileRW]>;
+
+def SVWHILEWR_B : SInst<"svwhilewr[_{1}]", "Pcc", "cUc", MergeNone, "aarch64_sve_whilewr_b", [IsOverloadWhileRW]>;
+def SVWHILEWR_H : SInst<"svwhilewr[_{1}]", "Pcc", "sUsh", MergeNone, "aarch64_sve_whilewr_h", [IsOverloadWhileRW]>;
+def SVWHILEWR_S : SInst<"svwhilewr[_{1}]", "Pcc", "iUif", MergeNone, "aarch64_sve_whilewr_s", [IsOverloadWhileRW]>;
+def SVWHILEWR_D : SInst<"svwhilewr[_{1}]", "Pcc", "lUld", MergeNone, "aarch64_sve_whilewr_d", [IsOverloadWhileRW]>;
+}
}
}
+// Return the llvm predicate vector type corresponding to the specified element
+// TypeFlags.
+llvm::VectorType* CodeGenFunction::getSVEPredType(SVETypeFlags TypeFlags) {
+ switch (TypeFlags.getEltType()) {
+ default: llvm_unreachable("Unhandled SVETypeFlag!");
+
+ case SVETypeFlags::EltTyInt8:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 16, true });
+ case SVETypeFlags::EltTyInt16:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 8, true });
+ case SVETypeFlags::EltTyInt32:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 4, true });
+ case SVETypeFlags::EltTyInt64:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 2, true });
+
+ case SVETypeFlags::EltTyFloat16:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 8, true });
+ case SVETypeFlags::EltTyFloat32:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 4, true });
+ case SVETypeFlags::EltTyFloat64:
+ return llvm::VectorType::get(Builder.getInt1Ty(), { 2, true });
+ }
+}
+
// Return the llvm vector type corresponding to the specified element TypeFlags.
llvm::VectorType *CodeGenFunction::getSVEType(const SVETypeFlags &TypeFlags) {
switch (TypeFlags.getEltType()) {
if (TypeFlags.isOverloadWhile())
return {DefaultType, Ops[1]->getType()};
+ if (TypeFlags.isOverloadWhileRW())
+ return {getSVEPredType(TypeFlags), Ops[0]->getType()};
+
assert(TypeFlags.isOverloadDefault() && "Unexpected value for overloads");
return {DefaultType};
}
ArrayRef<llvm::Value *> Ops);
llvm::Type *getEltType(SVETypeFlags TypeFlags);
llvm::VectorType *getSVEType(const SVETypeFlags &TypeFlags);
+ llvm::VectorType *getSVEPredType(SVETypeFlags TypeFlags);
llvm::Value *EmitSVEDupX(llvm::Value *Scalar);
llvm::Value *EmitSVEPredicateCast(llvm::Value *Pred, llvm::VectorType *VTy);
llvm::Value *EmitSVEGatherLoad(SVETypeFlags TypeFlags,
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilerw_s8(const int8_t *op1, const int8_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nxv16i1.p0i8(i8* %op1, i8* %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_s8'}}
+ return SVE_ACLE_FUNC(svwhilerw,_s8,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_s16(const int16_t *op1, const int16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_s16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nxv8i1.p0i16(i16* %op1, i16* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_s16'}}
+ return SVE_ACLE_FUNC(svwhilerw,_s16,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_s32(const int32_t *op1, const int32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nxv4i1.p0i32(i32* %op1, i32* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_s32'}}
+ return SVE_ACLE_FUNC(svwhilerw,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_s64(const int64_t *op1, const int64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nxv2i1.p0i64(i64* %op1, i64* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_s64'}}
+ return SVE_ACLE_FUNC(svwhilerw,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_u8(const uint8_t *op1, const uint8_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilerw.b.nxv16i1.p0i8(i8* %op1, i8* %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_u8'}}
+ return SVE_ACLE_FUNC(svwhilerw,_u8,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_u16(const uint16_t *op1, const uint16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_u16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nxv8i1.p0i16(i16* %op1, i16* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_u16'}}
+ return SVE_ACLE_FUNC(svwhilerw,_u16,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_u32(const uint32_t *op1, const uint32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nxv4i1.p0i32(i32* %op1, i32* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_u32'}}
+ return SVE_ACLE_FUNC(svwhilerw,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_u64(const uint64_t *op1, const uint64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nxv2i1.p0i64(i64* %op1, i64* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_u64'}}
+ return SVE_ACLE_FUNC(svwhilerw,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_f16(const float16_t *op1, const float16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_f16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilerw.h.nxv8i1.p0f16(half* %op1, half* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_f16'}}
+ return SVE_ACLE_FUNC(svwhilerw,_f16,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_f32(const float32_t *op1, const float32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_f32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilerw.s.nxv4i1.p0f32(float* %op1, float* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_f32'}}
+ return SVE_ACLE_FUNC(svwhilerw,_f32,,)(op1, op2);
+}
+
+svbool_t test_svwhilerw_f64(const float64_t *op1, const float64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilerw_f64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilerw.d.nxv2i1.p0f64(double* %op1, double* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // overload-warning@+2 {{implicit declaration of function 'svwhilerw'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilerw_f64'}}
+ return SVE_ACLE_FUNC(svwhilerw,_f64,,)(op1, op2);
+}
--- /dev/null
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -D__ARM_FEATURE_SVE2 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -fallow-half-arguments-and-returns -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify -verify-ignore-unexpected=error %s
+// RUN: %clang_cc1 -D__ARM_FEATURE_SVE -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -fallow-half-arguments-and-returns -fsyntax-only -verify=overload -verify-ignore-unexpected=error %s
+
+#include <arm_sve.h>
+
+#ifdef SVE_OVERLOADED_FORMS
+// A simple used,unused... macro, long enough to represent any SVE builtin.
+#define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
+#else
+#define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
+#endif
+
+svbool_t test_svwhilewr_s8(const int8_t *op1, const int8_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_s8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nxv16i1.p0i8(i8* %op1, i8* %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_s8'}}
+ return SVE_ACLE_FUNC(svwhilewr,_s8,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_s16(const int16_t *op1, const int16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_s16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nxv8i1.p0i16(i16* %op1, i16* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_s16'}}
+ return SVE_ACLE_FUNC(svwhilewr,_s16,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_s32(const int32_t *op1, const int32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_s32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nxv4i1.p0i32(i32* %op1, i32* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_s32'}}
+ return SVE_ACLE_FUNC(svwhilewr,_s32,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_s64(const int64_t *op1, const int64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_s64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nxv2i1.p0i64(i64* %op1, i64* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_s64'}}
+ return SVE_ACLE_FUNC(svwhilewr,_s64,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_u8(const uint8_t *op1, const uint8_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_u8
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.whilewr.b.nxv16i1.p0i8(i8* %op1, i8* %op2)
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_u8'}}
+ return SVE_ACLE_FUNC(svwhilewr,_u8,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_u16(const uint16_t *op1, const uint16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_u16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nxv8i1.p0i16(i16* %op1, i16* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_u16'}}
+ return SVE_ACLE_FUNC(svwhilewr,_u16,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_u32(const uint32_t *op1, const uint32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_u32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nxv4i1.p0i32(i32* %op1, i32* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_u32'}}
+ return SVE_ACLE_FUNC(svwhilewr,_u32,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_u64(const uint64_t *op1, const uint64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_u64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nxv2i1.p0i64(i64* %op1, i64* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_u64'}}
+ return SVE_ACLE_FUNC(svwhilewr,_u64,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_f16(const float16_t *op1, const float16_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_f16
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 8 x i1> @llvm.aarch64.sve.whilewr.h.nxv8i1.p0f16(half* %op1, half* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_f16'}}
+ return SVE_ACLE_FUNC(svwhilewr,_f16,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_f32(const float32_t *op1, const float32_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_f32
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 4 x i1> @llvm.aarch64.sve.whilewr.s.nxv4i1.p0f32(float* %op1, float* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %[[INTRINSIC]])
+ // CHECK: ret <vscale x 16 x i1> %[[INTRINSIC_REINT]]
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_f32'}}
+ return SVE_ACLE_FUNC(svwhilewr,_f32,,)(op1, op2);
+}
+
+svbool_t test_svwhilewr_f64(const float64_t *op1, const float64_t *op2)
+{
+ // CHECK-LABEL: test_svwhilewr_f64
+ // CHECK: %[[INTRINSIC:.*]] = call <vscale x 2 x i1> @llvm.aarch64.sve.whilewr.d.nxv2i1.p0f64(double* %op1, double* %op2)
+ // CHECK: %[[INTRINSIC_REINT:.*]] = call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %[[INTRINSIC]])
+ // overload-warning@+2 {{implicit declaration of function 'svwhilewr'}}
+ // expected-warning@+1 {{implicit declaration of function 'svwhilewr_f64'}}
+ return SVE_ACLE_FUNC(svwhilewr,_f64,,)(op1, op2);
+}