From: Jan Vesely Date: Fri, 2 Sep 2016 20:13:19 +0000 (+0000) Subject: AMDGPU/R600: EXTRACT_VECT_ELT should only bypass BUILD_VECTOR if the vectors have... X-Git-Tag: android-x86-7.1-r4~27652 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=bf9a94abffb483e0aa77545a04ceb62ff5696e54;p=android-x86%2Fexternal-llvm.git AMDGPU/R600: EXTRACT_VECT_ELT should only bypass BUILD_VECTOR if the vectors have the same number of elements. Fixes R600 piglit regressions since r280298 Differential Revision: https://reviews.llvm.org/D24174 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@280535 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/R600ISelLowering.cpp b/lib/Target/AMDGPU/R600ISelLowering.cpp index 8c252e8c725..da4cf387c8d 100644 --- a/lib/Target/AMDGPU/R600ISelLowering.cpp +++ b/lib/Target/AMDGPU/R600ISelLowering.cpp @@ -1820,7 +1820,9 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N, } } if (Arg.getOpcode() == ISD::BITCAST && - Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) { + Arg.getOperand(0).getOpcode() == ISD::BUILD_VECTOR && + (Arg.getOperand(0).getValueType().getVectorNumElements() == + Arg.getValueType().getVectorNumElements())) { if (ConstantSDNode *Const = dyn_cast(N->getOperand(1))) { unsigned Element = Const->getZExtValue(); return DAG.getNode(ISD::BITCAST, DL, N->getVTList(), diff --git a/test/CodeGen/AMDGPU/bitcast.ll b/test/CodeGen/AMDGPU/amdgcn.bitcast.ll similarity index 100% rename from test/CodeGen/AMDGPU/bitcast.ll rename to test/CodeGen/AMDGPU/amdgcn.bitcast.ll diff --git a/test/CodeGen/AMDGPU/r600.bitcast.ll b/test/CodeGen/AMDGPU/r600.bitcast.ll new file mode 100644 index 00000000000..49441ee8d18 --- /dev/null +++ b/test/CodeGen/AMDGPU/r600.bitcast.ll @@ -0,0 +1,107 @@ +; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s + +; This test just checks that the compiler doesn't crash. + + +; FUNC-LABEL: {{^}}i8ptr_v16i8ptr: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.XYZW]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_128 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @i8ptr_v16i8ptr(<16 x i8> addrspace(1)* %out, i8 addrspace(1)* %in) { +entry: + %0 = bitcast i8 addrspace(1)* %in to <16 x i8> addrspace(1)* + %1 = load <16 x i8>, <16 x i8> addrspace(1)* %0 + store <16 x i8> %1, <16 x i8> addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}f32_to_v2i16: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @f32_to_v2i16(<2 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind { + %load = load float, float addrspace(1)* %in, align 4 + %bc = bitcast float %load to <2 x i16> + store <2 x i16> %bc, <2 x i16> addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v2i16_to_f32: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @v2i16_to_f32(float addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind { + %load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4 + %bc = bitcast <2 x i16> %load to float + store float %bc, float addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v4i8_to_i32: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @v4i8_to_i32(i32 addrspace(1)* %out, <4 x i8> addrspace(1)* %in) nounwind { + %load = load <4 x i8>, <4 x i8> addrspace(1)* %in, align 4 + %bc = bitcast <4 x i8> %load to i32 + store i32 %bc, i32 addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}i32_to_v4i8: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @i32_to_v4i8(<4 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind { + %load = load i32, i32 addrspace(1)* %in, align 4 + %bc = bitcast i32 %load to <4 x i8> + store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4 + ret void +} + +; FUNC-LABEL: {{^}}v2i16_to_v4i8: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.[XYZW]]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_32 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @v2i16_to_v4i8(<4 x i8> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) nounwind { + %load = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4 + %bc = bitcast <2 x i16> %load to <4 x i8> + store <4 x i8> %bc, <4 x i8> addrspace(1)* %out, align 4 + ret void +} + +; This just checks for crash in BUILD_VECTOR/EXTRACT_ELEMENT combine +; the stack manipulation is tricky to follow +; TODO: This should only use one load +; FUNC-LABEL: {{^}}v4i16_extract_i8: +; EG: MEM_RAT MSKOR {{T[0-9]+\.XW}}, [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_16 +; EG: VTX_READ_16 +; EG-DAG: BFE_UINT +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @v4i16_extract_i8(i8 addrspace(1)* %out, <4 x i16> addrspace(1)* %in) nounwind { + %load = load <4 x i16>, <4 x i16> addrspace(1)* %in, align 2 + %bc = bitcast <4 x i16> %load to <8 x i8> + %element = extractelement <8 x i8> %bc, i32 5 + store i8 %element, i8 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}bitcast_v2i32_to_f64: +; EG: MEM_RAT_CACHELESS STORE_RAW [[DATA:T[0-9]+\.XY]], [[ST_PTR:T[0-9]+\.[XYZW]]] +; EG: VTX_READ_64 [[DATA]], [[LD_PTR:T[0-9]+\.[XYZW]]] +; EG-DAG: MOV {{[\* ]*}}[[LD_PTR]], KC0[2].Z +; EG-DAG: LSHR {{[\* ]*}}[[ST_PTR]], KC0[2].Y, literal +define void @bitcast_v2i32_to_f64(double addrspace(1)* %out, <2 x i32> addrspace(1)* %in) { + %val = load <2 x i32>, <2 x i32> addrspace(1)* %in, align 8 + %bc = bitcast <2 x i32> %val to double + store double %bc, double addrspace(1)* %out, align 8 + ret void +} + diff --git a/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll b/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll new file mode 100644 index 00000000000..babae9ead27 --- /dev/null +++ b/test/CodeGen/AMDGPU/xfail.r600.bitcast.ll @@ -0,0 +1,46 @@ +; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s +; XFAIL: * + +; This is the failing part of the r600 bitacts tests + +; TODO: enable doubles +; FUNC-LABEL: {{^}}bitcast_f64_to_v2i32: +define void @bitcast_f64_to_v2i32(<2 x i32> addrspace(1)* %out, double addrspace(1)* %in) { + %val = load double, double addrspace(1)* %in, align 8 + %add = fadd double %val, 4.0 + %bc = bitcast double %add to <2 x i32> + store <2 x i32> %bc, <2 x i32> addrspace(1)* %out, align 8 + ret void +} + +; FUNC-LABEL: {{^}}bitcast_v2i64_to_v2f64: +define void @bitcast_v2i64_to_v2f64(i32 %cond, <2 x double> addrspace(1)* %out, <2 x i64> %value) { +entry: + %cmp0 = icmp eq i32 %cond, 0 + br i1 %cmp0, label %if, label %end + +if: + %cast = bitcast <2 x i64> %value to <2 x double> + br label %end + +end: + %phi = phi <2 x double> [zeroinitializer, %entry], [%cast, %if] + store <2 x double> %phi, <2 x double> addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}bitcast_v2f64_to_v2i64: +define void @bitcast_v2f64_to_v2i64(i32 %cond, <2 x i64> addrspace(1)* %out, <2 x double> %value) { +entry: + %cmp0 = icmp eq i32 %cond, 0 + br i1 %cmp0, label %if, label %end + +if: + %cast = bitcast <2 x double> %value to <2 x i64> + br label %end + +end: + %phi = phi <2 x i64> [zeroinitializer, %entry], [%cast, %if] + store <2 x i64> %phi, <2 x i64> addrspace(1)* %out + ret void +}