From f5a6019248db0d18592a6541c549e61119fecb91 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 12 Aug 2015 00:29:10 +0000 Subject: [PATCH] [x86] enable machine combiner reassociations for 256-bit vector FP mul/add git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@244705 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 4 +++ test/CodeGen/X86/machine-combiner.ll | 60 ++++++++++++++++++++++++++++++++++++ test/CodeGen/X86/sqrt-fastmath.ll | 4 +-- 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index ce0aa8b7320..f6fb5bf0261 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -6401,10 +6401,14 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) { case X86::MULSSrr: case X86::VADDPDrr: case X86::VADDPSrr: + case X86::VADDPDYrr: + case X86::VADDPSYrr: case X86::VADDSDrr: case X86::VADDSSrr: case X86::VMULPDrr: case X86::VMULPSrr: + case X86::VMULPDYrr: + case X86::VMULPSYrr: case X86::VMULSDrr: case X86::VMULSSrr: return Inst.getParent()->getParent()->getTarget().Options.UnsafeFPMath; diff --git a/test/CodeGen/X86/machine-combiner.ll b/test/CodeGen/X86/machine-combiner.ll index fb21c19744f..82da87fb63d 100644 --- a/test/CodeGen/X86/machine-combiner.ll +++ b/test/CodeGen/X86/machine-combiner.ll @@ -298,3 +298,63 @@ define <2 x double> @reassociate_muls_v2f64(<2 x double> %x0, <2 x double> %x1, ret <2 x double> %t2 } +; Verify that AVX 256-bit vector single-precison adds are reassociated. + +define <8 x float> @reassociate_adds_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { +; AVX-LABEL: reassociate_adds_v8f32: +; AVX: # BB#0: +; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vaddps %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fmul <8 x float> %x0, %x1 + %t1 = fadd <8 x float> %x2, %t0 + %t2 = fadd <8 x float> %x3, %t1 + ret <8 x float> %t2 +} + +; Verify that AVX 256-bit vector double-precison adds are reassociated. + +define <4 x double> @reassociate_adds_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { +; AVX-LABEL: reassociate_adds_v4f64: +; AVX: # BB#0: +; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vaddpd %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fmul <4 x double> %x0, %x1 + %t1 = fadd <4 x double> %x2, %t0 + %t2 = fadd <4 x double> %x3, %t1 + ret <4 x double> %t2 +} + +; Verify that AVX 256-bit vector single-precison multiplies are reassociated. + +define <8 x float> @reassociate_muls_v8f32(<8 x float> %x0, <8 x float> %x1, <8 x float> %x2, <8 x float> %x3) { +; AVX-LABEL: reassociate_muls_v8f32: +; AVX: # BB#0: +; AVX-NEXT: vaddps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmulps %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vmulps %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <8 x float> %x0, %x1 + %t1 = fmul <8 x float> %x2, %t0 + %t2 = fmul <8 x float> %x3, %t1 + ret <8 x float> %t2 +} + +; Verify that AVX 256-bit vector double-precison multiplies are reassociated. + +define <4 x double> @reassociate_muls_v4f64(<4 x double> %x0, <4 x double> %x1, <4 x double> %x2, <4 x double> %x3) { +; AVX-LABEL: reassociate_muls_v4f64: +; AVX: # BB#0: +; AVX-NEXT: vaddpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: vmulpd %ymm3, %ymm2, %ymm1 +; AVX-NEXT: vmulpd %ymm1, %ymm0, %ymm0 +; AVX-NEXT: retq + %t0 = fadd <4 x double> %x0, %x1 + %t1 = fmul <4 x double> %x2, %t0 + %t2 = fmul <4 x double> %x3, %t1 + ret <4 x double> %t2 +} + diff --git a/test/CodeGen/X86/sqrt-fastmath.ll b/test/CodeGen/X86/sqrt-fastmath.ll index daa6d7292e7..d743896062e 100644 --- a/test/CodeGen/X86/sqrt-fastmath.ll +++ b/test/CodeGen/X86/sqrt-fastmath.ll @@ -124,8 +124,8 @@ define <8 x float> @reciprocal_square_root_v8f32(<8 x float> %x) #0 { ; ESTIMATE-LABEL: reciprocal_square_root_v8f32: ; ESTIMATE: # BB#0: ; ESTIMATE-NEXT: vrsqrtps %ymm0, %ymm1 -; ESTIMATE-NEXT: vmulps %ymm1, %ymm1, %ymm2 -; ESTIMATE-NEXT: vmulps %ymm0, %ymm2, %ymm0 +; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0 +; ESTIMATE-NEXT: vmulps %ymm0, %ymm1, %ymm0 ; ESTIMATE-NEXT: vaddps {{.*}}(%rip), %ymm0, %ymm0 ; ESTIMATE-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 ; ESTIMATE-NEXT: vmulps %ymm1, %ymm0, %ymm0 -- 2.11.0