From 0991c314d7c1a2052963dc89af1d2f07134488b6 Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Fri, 16 Aug 2013 23:51:24 +0000 Subject: [PATCH] R600: Expand vector float operations for both SI and R600 MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Reviewed-by: Michel Dänzer git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@188596 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/R600/AMDGPUISelLowering.cpp | 22 +++++++++++++--- lib/Target/R600/R600ISelLowering.cpp | 9 ------- test/CodeGen/R600/fadd.ll | 48 ++++++++++++++++++---------------- test/CodeGen/R600/fdiv.ll | 46 ++++++++++++++++++++------------ test/CodeGen/R600/fmul.ll | 46 ++++++++++++++++++-------------- test/CodeGen/R600/fsub.ll | 45 ++++++++++++++++++------------- 6 files changed, 128 insertions(+), 88 deletions(-) diff --git a/lib/Target/R600/AMDGPUISelLowering.cpp b/lib/Target/R600/AMDGPUISelLowering.cpp index 746c4794d12..25b1e54f119 100644 --- a/lib/Target/R600/AMDGPUISelLowering.cpp +++ b/lib/Target/R600/AMDGPUISelLowering.cpp @@ -115,14 +115,14 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::VSELECT, MVT::v2f32, Expand); setOperationAction(ISD::VSELECT, MVT::v4f32, Expand); - static const int types[] = { + static const int IntTypes[] = { (int)MVT::v2i32, (int)MVT::v4i32 }; - const size_t NumTypes = array_lengthof(types); + const size_t NumIntTypes = array_lengthof(IntTypes); - for (unsigned int x = 0; x < NumTypes; ++x) { - MVT::SimpleValueType VT = (MVT::SimpleValueType)types[x]; + for (unsigned int x = 0; x < NumIntTypes; ++x) { + MVT::SimpleValueType VT = (MVT::SimpleValueType)IntTypes[x]; //Expand the following operations for the current type by default setOperationAction(ISD::ADD, VT, Expand); setOperationAction(ISD::AND, VT, Expand); @@ -141,6 +141,20 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(TargetMachine &TM) : setOperationAction(ISD::VSELECT, VT, Expand); setOperationAction(ISD::XOR, VT, Expand); } + + static const int FloatTypes[] = { + (int)MVT::v2f32, + (int)MVT::v4f32 + }; + const size_t NumFloatTypes = array_lengthof(FloatTypes); + + for (unsigned int x = 0; x < NumFloatTypes; ++x) { + MVT::SimpleValueType VT = (MVT::SimpleValueType)FloatTypes[x]; + setOperationAction(ISD::FADD, VT, Expand); + setOperationAction(ISD::FDIV, VT, Expand); + setOperationAction(ISD::FMUL, VT, Expand); + setOperationAction(ISD::FSUB, VT, Expand); + } } //===----------------------------------------------------------------------===// diff --git a/lib/Target/R600/R600ISelLowering.cpp b/lib/Target/R600/R600ISelLowering.cpp index e10af2b16ba..b822431fab3 100644 --- a/lib/Target/R600/R600ISelLowering.cpp +++ b/lib/Target/R600/R600ISelLowering.cpp @@ -38,15 +38,6 @@ R600TargetLowering::R600TargetLowering(TargetMachine &TM) : computeRegisterProperties(); - setOperationAction(ISD::FADD, MVT::v4f32, Expand); - setOperationAction(ISD::FADD, MVT::v2f32, Expand); - setOperationAction(ISD::FMUL, MVT::v4f32, Expand); - setOperationAction(ISD::FMUL, MVT::v2f32, Expand); - setOperationAction(ISD::FDIV, MVT::v4f32, Expand); - setOperationAction(ISD::FDIV, MVT::v2f32, Expand); - setOperationAction(ISD::FSUB, MVT::v4f32, Expand); - setOperationAction(ISD::FSUB, MVT::v2f32, Expand); - setOperationAction(ISD::FCOS, MVT::f32, Custom); setOperationAction(ISD::FSIN, MVT::f32, Custom); diff --git a/test/CodeGen/R600/fadd.ll b/test/CodeGen/R600/fadd.ll index 2716958e503..6d459679d43 100644 --- a/test/CodeGen/R600/fadd.ll +++ b/test/CodeGen/R600/fadd.ll @@ -1,23 +1,23 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK -; CHECK: @fadd_f32 -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} - -define void @fadd_f32() { - %r0 = call float @llvm.R600.load.input(i32 0) - %r1 = call float @llvm.R600.load.input(i32 1) - %r2 = fadd float %r0, %r1 - call void @llvm.AMDGPU.store.output(float %r2, i32 0) +; R600-CHECK: @fadd_f32 +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W +; SI-CHECK: @fadd_f32 +; SI-CHECK: V_ADD_F32 +define void @fadd_f32(float addrspace(1)* %out, float %a, float %b) { +entry: + %0 = fadd float %a, %b + store float %0, float addrspace(1)* %out ret void } -declare float @llvm.R600.load.input(i32) readnone - -declare void @llvm.AMDGPU.store.output(float, i32) - -; CHECK: @fadd_v2f32 -; CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z -; CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y +; R600-CHECK: @fadd_v2f32 +; R600-CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[3].X, KC0[3].Z +; R600-CHECK-DAG: ADD * T{{[0-9]\.[XYZW]}}, KC0[2].W, KC0[3].Y +; SI-CHECK: @fadd_v2f32 +; SI-CHECK: V_ADD_F32 +; SI-CHECK: V_ADD_F32 define void @fadd_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { entry: %0 = fadd <2 x float> %a, %b @@ -25,12 +25,16 @@ entry: ret void } -; CHECK: @fadd_v4f32 -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} - +; R600-CHECK: @fadd_v4f32 +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; SI-CHECK: @fadd_v4f32 +; SI-CHECK: V_ADD_F32 +; SI-CHECK: V_ADD_F32 +; SI-CHECK: V_ADD_F32 +; SI-CHECK: V_ADD_F32 define void @fadd_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 %a = load <4 x float> addrspace(1) * %in diff --git a/test/CodeGen/R600/fdiv.ll b/test/CodeGen/R600/fdiv.ll index 6798eacfa3f..84e9f677368 100644 --- a/test/CodeGen/R600/fdiv.ll +++ b/test/CodeGen/R600/fdiv.ll @@ -1,14 +1,20 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK ; These tests check that fdiv is expanded correctly and also test that the ; scheduler is scheduling the RECIP_IEEE and MUL_IEEE instructions in separate ; instruction groups. -; CHECK: @fdiv_v2f32 -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y -; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS -; CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS +; R600-CHECK: @fdiv_v2f32 +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Z +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[3].Y +; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW]}}, KC0[3].X, PS +; R600-CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW]}}, KC0[2].W, PS +; SI-CHECK: @fdiv_v2f32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 define void @fdiv_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { entry: %0 = fdiv <2 x float> %a, %b @@ -16,16 +22,24 @@ entry: ret void } -; CHECK: @fdiv_v4f32 -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS -; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS -; CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS -; CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS - +; R600-CHECK: @fdiv_v4f32 +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK-DAG: RECIP_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS +; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS +; R600-CHECK-DAG: MUL_IEEE T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS +; R600-CHECK-DAG: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}, PS +; SI-CHECK: @fdiv_v4f32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 +; SI-CHECK-DAG: V_RCP_F32 +; SI-CHECK-DAG: V_MUL_F32 define void @fdiv_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 %a = load <4 x float> addrspace(1) * %in diff --git a/test/CodeGen/R600/fmul.ll b/test/CodeGen/R600/fmul.ll index 471b04e6f37..f2b3e2c17db 100644 --- a/test/CodeGen/R600/fmul.ll +++ b/test/CodeGen/R600/fmul.ll @@ -1,23 +1,27 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK -; CHECK: @fmul_f32 -; CHECK: MUL_IEEE * {{T[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} - -define void @fmul_f32() { - %r0 = call float @llvm.R600.load.input(i32 0) - %r1 = call float @llvm.R600.load.input(i32 1) - %r2 = fmul float %r0, %r1 - call void @llvm.AMDGPU.store.output(float %r2, i32 0) - ret void +; R600-CHECK: @fmul_f32 +; R600-CHECK: MUL_IEEE * {{T[0-9]+\.[XYZW]}}, KC0[2].Z, KC0[2].W +; SI-CHECK: @fmul_f32 +; SI-CHECK: V_MUL_F32 +define void @fmul_f32(float addrspace(1)* %out, float %a, float %b) { +entry: + %0 = fmul float %a, %b + store float %0, float addrspace(1)* %out + ret void } declare float @llvm.R600.load.input(i32) readnone declare void @llvm.AMDGPU.store.output(float, i32) -; CHECK: @fmul_v2f32 -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}} -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}} +; R600-CHECK: @fmul_v2f32 +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}} +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW]}} +; SI-CHECK: @fmul_v2f32 +; SI-CHECK: V_MUL_F32 +; SI-CHECK: V_MUL_F32 define void @fmul_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { entry: %0 = fmul <2 x float> %a, %b @@ -25,12 +29,16 @@ entry: ret void } -; CHECK: @fmul_v4f32 -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} -; CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} - +; R600-CHECK: @fmul_v4f32 +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; R600-CHECK: MUL_IEEE * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}} +; SI-CHECK: @fmul_v4f32 +; SI-CHECK: V_MUL_F32 +; SI-CHECK: V_MUL_F32 +; SI-CHECK: V_MUL_F32 +; SI-CHECK: V_MUL_F32 define void @fmul_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 %a = load <4 x float> addrspace(1) * %in diff --git a/test/CodeGen/R600/fsub.ll b/test/CodeGen/R600/fsub.ll index b45aafff5a1..1608c3aced5 100644 --- a/test/CodeGen/R600/fsub.ll +++ b/test/CodeGen/R600/fsub.ll @@ -1,23 +1,27 @@ -; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s +; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=R600-CHECK +; RUN: llc < %s -march=r600 -mcpu=SI | FileCheck %s --check-prefix=SI-CHECK -; CHECK: @fsub_f32 -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} - -define void @fsub_f32() { - %r0 = call float @llvm.R600.load.input(i32 0) - %r1 = call float @llvm.R600.load.input(i32 1) - %r2 = fsub float %r0, %r1 - call void @llvm.AMDGPU.store.output(float %r2, i32 0) - ret void +; R600-CHECK: @fsub_f32 +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].Z, -KC0[2].W +; SI-CHECK: @fsub_f32 +; SI-CHECK: V_SUB_F32 +define void @fsub_f32(float addrspace(1)* %out, float %a, float %b) { +entry: + %0 = fsub float %a, %b + store float %0, float addrspace(1)* %out + ret void } declare float @llvm.R600.load.input(i32) readnone declare void @llvm.AMDGPU.store.output(float, i32) -; CHECK: @fsub_v2f32 -; CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z -; CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y +; R600-CHECK: @fsub_v2f32 +; R600-CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[3].X, -KC0[3].Z +; R600-CHECK-DAG: ADD * T{{[0-9]+\.[XYZW]}}, KC0[2].W, -KC0[3].Y +; SI-CHECK: @fsub_v2f32 +; SI-CHECK: V_SUB_F32 +; SI-CHECK: V_SUB_F32 define void @fsub_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, <2 x float> %b) { entry: %0 = fsub <2 x float> %a, %b @@ -25,11 +29,16 @@ entry: ret void } -; CHECK: @fsub_v4f32 -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} -; CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600-CHECK: @fsub_v4f32 +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; R600-CHECK: ADD * T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], -T[0-9]+\.[XYZW]}} +; SI-CHECK: @fsub_v4f32 +; SI-CHECK: V_SUB_F32 +; SI-CHECK: V_SUB_F32 +; SI-CHECK: V_SUB_F32 +; SI-CHECK: V_SUB_F32 define void @fsub_v4f32(<4 x float> addrspace(1)* %out, <4 x float> addrspace(1)* %in) { %b_ptr = getelementptr <4 x float> addrspace(1)* %in, i32 1 %a = load <4 x float> addrspace(1) * %in -- 2.11.0