From 6c8569fba676cdef1495a63868c1d8c333b39bd4 Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Fri, 22 Apr 2016 22:48:38 +0000 Subject: [PATCH] AMDGPU: Re-visit nodes in performAndCombine This fixes test regressions when i64 loads/stores are made promote. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@267240 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 5 +++++ test/CodeGen/AMDGPU/and.ll | 15 +++++++++------ test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll | 6 +++--- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 92ed678ba8f..4878e5b7a22 100644 --- a/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -2229,6 +2229,11 @@ SDValue AMDGPUTargetLowering::performAndCombine(SDNode *N, SDValue LoAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Lo, LoRHS); SDValue HiAnd = DAG.getNode(ISD::AND, SL, MVT::i32, Hi, HiRHS); + // Re-visit the ands. It's possible we eliminated one of them and it could + // simplify the vector. + DCI.AddToWorklist(Lo.getNode()); + DCI.AddToWorklist(Hi.getNode()); + SDValue Vec = DAG.getNode(ISD::BUILD_VECTOR, SL, MVT::v2i32, LoAnd, HiAnd); return DAG.getNode(ISD::BITCAST, SL, MVT::i64, Vec); } diff --git a/test/CodeGen/AMDGPU/and.ll b/test/CodeGen/AMDGPU/and.ll index 530b7f2d9d2..338747c413b 100644 --- a/test/CodeGen/AMDGPU/and.ll +++ b/test/CodeGen/AMDGPU/and.ll @@ -213,12 +213,14 @@ define void @s_and_32_bit_constant_i64(i64 addrspace(1)* %out, i64 %a) { ; FUNC-LABEL: {{^}}s_and_multi_use_inline_imm_i64: ; SI: s_load_dwordx2 -; SI: s_load_dwordx2 -; SI: s_load_dwordx2 +; SI: s_load_dword [[A:s[0-9]+]] +; SI: s_load_dword [[B:s[0-9]+]] ; SI: s_load_dwordx2 ; SI-NOT: and -; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62 -; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 62 +; SI: s_lshl_b32 [[A]], [[A]], 1 +; SI: s_lshl_b32 [[B]], [[B]], 1 +; SI: s_and_b32 s{{[0-9]+}}, [[A]], 62 +; SI: s_and_b32 s{{[0-9]+}}, [[B]], 62 ; SI-NOT: and ; SI: buffer_store_dwordx2 define void @s_and_multi_use_inline_imm_i64(i64 addrspace(1)* %out, i64 %a, i64 %b, i64 %c) { @@ -336,9 +338,10 @@ define void @s_and_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* % } ; FUNC-LABEL: {{^}}s_and_inline_imm_64_i64_noshrink: -; SI: s_lshl_b64 s{{\[}}[[VALLO:[0-9]+]]:{{[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 1 +; SI: s_load_dword [[A:s[0-9]+]] +; SI: s_lshl_b32 [[A]], [[A]], 1{{$}} ; SI-NOT: and -; SI: s_and_b32 s{{[0-9]+}}, s[[VALLO]], 64 +; SI: s_and_b32 s{{[0-9]+}}, [[A]], 64 ; SI-NOT: and ; SI: s_add_u32 ; SI-NEXT: s_addc_u32 diff --git a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll index 29dae1b40d2..f5ab732710a 100644 --- a/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll +++ b/test/CodeGen/AMDGPU/shift-and-i64-ubfe.ll @@ -299,9 +299,9 @@ define void @v_uextract_bit_31_32_i64_trunc_i32(i32 addrspace(1)* %out, i64 addr } ; GCN-LABEL: {{^}}and_not_mask_i64: -; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]] -; GCN: v_lshr_b64 v{{\[}}[[SHRLO:[0-9]+]]:[[SHRHI:[0-9]+]]{{\]}}, [[VAL]], 20 -; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, v[[SHRLO]] +; GCN: buffer_load_dwordx2 v{{\[}}[[VALLO:[0-9]+]]:[[VALHI:[0-9]+]]{{\]}} +; GCN: v_lshrrev_b32_e32 [[SHR:v[0-9]+]], 20, v[[VALLO]] +; GCN-DAG: v_and_b32_e32 v[[SHRLO]], 4, [[SHR]] ; GCN-DAG: v_mov_b32_e32 v[[SHRHI]], 0{{$}} ; GCN-NOT: v[[SHRLO]] ; GCN-NOT: v[[SHRHI]] -- 2.11.0