From: Simon Pilgrim Date: Mon, 21 Nov 2016 15:28:21 +0000 (+0000) Subject: [X86][SSE] Add SSE reciprocal estimate tests X-Git-Tag: android-x86-7.1-r4~24197 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=ab814ecdff8983c9dcfef7aa7de457288a6d7f2e;p=android-x86%2Fexternal-llvm.git [X86][SSE] Add SSE reciprocal estimate tests git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@287543 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/X86/recip-fastmath.ll b/test/CodeGen/X86/recip-fastmath.ll index bd622d0442e..0a99254cd62 100644 --- a/test/CodeGen/X86/recip-fastmath.ll +++ b/test/CodeGen/X86/recip-fastmath.ll @@ -1,5 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX ; If the target's divss/divps instructions are substantially ; slower than rcpss/rcpps with a Newton-Raphson refinement, @@ -10,17 +11,34 @@ ; differences of x86 reciprocal estimates. define float @f32_no_estimate(float %x) #0 { +; SSE-LABEL: f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: divss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast float 1.0, %x ret float %div } define float @f32_one_step(float %x) #1 { +; SSE-LABEL: f32_one_step: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm2 +; SSE-NEXT: mulss %xmm2, %xmm0 +; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm2, %xmm1 +; SSE-NEXT: addss %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: f32_one_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 @@ -30,12 +48,28 @@ define float @f32_one_step(float %x) #1 { ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast float 1.0, %x ret float %div } define float @f32_two_step(float %x) #2 { +; SSE-LABEL: f32_two_step: +; SSE: # BB#0: +; SSE-NEXT: rcpss %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: mulss %xmm2, %xmm3 +; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: subss %xmm3, %xmm4 +; SSE-NEXT: mulss %xmm2, %xmm4 +; SSE-NEXT: addss %xmm2, %xmm4 +; SSE-NEXT: mulss %xmm4, %xmm0 +; SSE-NEXT: subss %xmm0, %xmm1 +; SSE-NEXT: mulss %xmm4, %xmm1 +; SSE-NEXT: addss %xmm4, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: f32_two_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpss %xmm0, %xmm0, %xmm1 @@ -49,23 +83,39 @@ define float @f32_two_step(float %x) #2 { ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vaddss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast float 1.0, %x ret float %div } define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 { +; SSE-LABEL: v4f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: divps %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v4f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] ; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast <4 x float> , %x ret <4 x float> %div } define <4 x float> @v4f32_one_step(<4 x float> %x) #1 { +; SSE-LABEL: v4f32_one_step: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: subps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm2, %xmm1 +; SSE-NEXT: addps %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v4f32_one_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpps %xmm0, %xmm1 @@ -75,12 +125,28 @@ define <4 x float> @v4f32_one_step(<4 x float> %x) #1 { ; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast <4 x float> , %x ret <4 x float> %div } define <4 x float> @v4f32_two_step(<4 x float> %x) #2 { +; SSE-LABEL: v4f32_two_step: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm0, %xmm3 +; SSE-NEXT: mulps %xmm2, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm1, %xmm4 +; SSE-NEXT: subps %xmm3, %xmm4 +; SSE-NEXT: mulps %xmm2, %xmm4 +; SSE-NEXT: addps %xmm2, %xmm4 +; SSE-NEXT: mulps %xmm4, %xmm0 +; SSE-NEXT: subps %xmm0, %xmm1 +; SSE-NEXT: mulps %xmm4, %xmm1 +; SSE-NEXT: addps %xmm4, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v4f32_two_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpps %xmm0, %xmm1 @@ -94,23 +160,49 @@ define <4 x float> @v4f32_two_step(<4 x float> %x) #2 { ; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %div = fdiv fast <4 x float> , %x ret <4 x float> %div } define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 { +; SSE-LABEL: v8f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: divps %xmm0, %xmm3 +; SSE-NEXT: divps %xmm1, %xmm2 +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: movaps %xmm2, %xmm1 +; SSE-NEXT: retq +; ; AVX-LABEL: v8f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] ; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %div = fdiv fast <8 x float> , %x ret <8 x float> %div } define <8 x float> @v8f32_one_step(<8 x float> %x) #1 { +; SSE-LABEL: v8f32_one_step: +; SSE: # BB#0: +; SSE-NEXT: rcpps %xmm0, %xmm4 +; SSE-NEXT: mulps %xmm4, %xmm0 +; SSE-NEXT: movaps {{.*#+}} xmm2 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm2, %xmm3 +; SSE-NEXT: subps %xmm0, %xmm3 +; SSE-NEXT: mulps %xmm4, %xmm3 +; SSE-NEXT: addps %xmm4, %xmm3 +; SSE-NEXT: rcpps %xmm1, %xmm0 +; SSE-NEXT: mulps %xmm0, %xmm1 +; SSE-NEXT: subps %xmm1, %xmm2 +; SSE-NEXT: mulps %xmm0, %xmm2 +; SSE-NEXT: addps %xmm0, %xmm2 +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: movaps %xmm2, %xmm1 +; SSE-NEXT: retq +; ; AVX-LABEL: v8f32_one_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpps %ymm0, %ymm1 @@ -120,12 +212,41 @@ define <8 x float> @v8f32_one_step(<8 x float> %x) #1 { ; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %div = fdiv fast <8 x float> , %x ret <8 x float> %div } define <8 x float> @v8f32_two_step(<8 x float> %x) #2 { +; SSE-LABEL: v8f32_two_step: +; SSE: # BB#0: +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: rcpps %xmm0, %xmm3 +; SSE-NEXT: movaps %xmm0, %xmm4 +; SSE-NEXT: mulps %xmm3, %xmm4 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: subps %xmm4, %xmm5 +; SSE-NEXT: mulps %xmm3, %xmm5 +; SSE-NEXT: addps %xmm3, %xmm5 +; SSE-NEXT: mulps %xmm5, %xmm0 +; SSE-NEXT: movaps %xmm1, %xmm3 +; SSE-NEXT: subps %xmm0, %xmm3 +; SSE-NEXT: mulps %xmm5, %xmm3 +; SSE-NEXT: addps %xmm5, %xmm3 +; SSE-NEXT: rcpps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm2, %xmm4 +; SSE-NEXT: mulps %xmm0, %xmm4 +; SSE-NEXT: movaps %xmm1, %xmm5 +; SSE-NEXT: subps %xmm4, %xmm5 +; SSE-NEXT: mulps %xmm0, %xmm5 +; SSE-NEXT: addps %xmm0, %xmm5 +; SSE-NEXT: mulps %xmm5, %xmm2 +; SSE-NEXT: subps %xmm2, %xmm1 +; SSE-NEXT: mulps %xmm5, %xmm1 +; SSE-NEXT: addps %xmm5, %xmm1 +; SSE-NEXT: movaps %xmm3, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v8f32_two_step: ; AVX: # BB#0: ; AVX-NEXT: vrcpps %ymm0, %ymm1 @@ -139,7 +260,6 @@ define <8 x float> @v8f32_two_step(<8 x float> %x) #2 { ; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: vaddps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %div = fdiv fast <8 x float> , %x ret <8 x float> %div } diff --git a/test/CodeGen/X86/sqrt-fastmath.ll b/test/CodeGen/X86/sqrt-fastmath.ll index 68424c60aa6..af2dcc495f5 100644 --- a/test/CodeGen/X86/sqrt-fastmath.ll +++ b/test/CodeGen/X86/sqrt-fastmath.ll @@ -1,5 +1,6 @@ -; NOTE: Assertions have been autogenerated by utils/update_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=SSE +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX declare double @__sqrt_finite(double) declare float @__sqrtf_finite(float) @@ -10,11 +11,15 @@ declare <8 x float> @llvm.sqrt.v8f32(<8 x float>) define double @finite_f64_no_estimate(double %d) #0 { +; SSE-LABEL: finite_f64_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtsd %xmm0, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: finite_f64_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq -; %call = tail call double @__sqrt_finite(double %d) #2 ret double %call } @@ -22,26 +27,49 @@ define double @finite_f64_no_estimate(double %d) #0 { ; No estimates for doubles. define double @finite_f64_estimate(double %d) #1 { +; SSE-LABEL: finite_f64_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtsd %xmm0, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: finite_f64_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq -; %call = tail call double @__sqrt_finite(double %d) #2 ret double %call } define float @finite_f32_no_estimate(float %f) #0 { +; SSE-LABEL: finite_f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtss %xmm0, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: finite_f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ; AVX-NEXT: retq -; %call = tail call float @__sqrtf_finite(float %f) #2 ret float %call } define float @finite_f32_estimate(float %f) #1 { +; SSE-LABEL: finite_f32_estimate: +; SSE: # BB#0: +; SSE-NEXT: rsqrtss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm0, %xmm2 +; SSE-NEXT: mulss %xmm1, %xmm2 +; SSE-NEXT: movss {{.*#+}} xmm3 = mem[0],zero,zero,zero +; SSE-NEXT: mulss %xmm2, %xmm3 +; SSE-NEXT: mulss %xmm1, %xmm2 +; SSE-NEXT: addss {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulss %xmm3, %xmm2 +; SSE-NEXT: xorps %xmm1, %xmm1 +; SSE-NEXT: cmpeqss %xmm1, %xmm0 +; SSE-NEXT: andnps %xmm2, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: finite_f32_estimate: ; AVX: # BB#0: ; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 @@ -54,18 +82,16 @@ define float @finite_f32_estimate(float %f) #1 { ; AVX-NEXT: vcmpeqss %xmm2, %xmm0, %xmm0 ; AVX-NEXT: vandnps %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq -; %call = tail call float @__sqrtf_finite(float %f) #2 ret float %call } define x86_fp80 @finite_f80_no_estimate(x86_fp80 %ld) #0 { -; AVX-LABEL: finite_f80_no_estimate: -; AVX: # BB#0: -; AVX-NEXT: fldt {{[0-9]+}}(%rsp) -; AVX-NEXT: fsqrt -; AVX-NEXT: retq -; +; CHECK-LABEL: finite_f80_no_estimate: +; CHECK: # BB#0: +; CHECK-NEXT: fldt {{[0-9]+}}(%rsp) +; CHECK-NEXT: fsqrt +; CHECK-NEXT: retq %call = tail call x86_fp80 @__sqrtl_finite(x86_fp80 %ld) #2 ret x86_fp80 %call } @@ -73,30 +99,47 @@ define x86_fp80 @finite_f80_no_estimate(x86_fp80 %ld) #0 { ; Don't die on the impossible. define x86_fp80 @finite_f80_estimate_but_no(x86_fp80 %ld) #1 { -; AVX-LABEL: finite_f80_estimate_but_no: -; AVX: # BB#0: -; AVX-NEXT: fldt {{[0-9]+}}(%rsp) -; AVX-NEXT: fsqrt -; AVX-NEXT: retq -; +; CHECK-LABEL: finite_f80_estimate_but_no: +; CHECK: # BB#0: +; CHECK-NEXT: fldt {{[0-9]+}}(%rsp) +; CHECK-NEXT: fsqrt +; CHECK-NEXT: retq %call = tail call x86_fp80 @__sqrtl_finite(x86_fp80 %ld) #2 ret x86_fp80 %call } define float @f32_no_estimate(float %x) #0 { +; SSE-LABEL: f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtss %xmm0, %xmm1 +; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: divss %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero ; AVX-NEXT: vdivss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %sqrt = tail call float @llvm.sqrt.f32(float %x) %div = fdiv fast float 1.0, %sqrt ret float %div } define float @f32_estimate(float %x) #1 { +; SSE-LABEL: f32_estimate: +; SSE: # BB#0: +; SSE-NEXT: rsqrtss %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: mulss %xmm2, %xmm2 +; SSE-NEXT: mulss %xmm0, %xmm2 +; SSE-NEXT: addss {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulss {{.*}}(%rip), %xmm1 +; SSE-NEXT: mulss %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: f32_estimate: ; AVX: # BB#0: ; AVX-NEXT: vrsqrtss %xmm0, %xmm0, %xmm1 @@ -106,26 +149,43 @@ define float @f32_estimate(float %x) #1 { ; AVX-NEXT: vmulss {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vmulss %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %sqrt = tail call float @llvm.sqrt.f32(float %x) %div = fdiv fast float 1.0, %sqrt ret float %div } define <4 x float> @v4f32_no_estimate(<4 x float> %x) #0 { +; SSE-LABEL: v4f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtps %xmm0, %xmm1 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: divps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v4f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtps %xmm0, %xmm0 ; AVX-NEXT: vmovaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] ; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %sqrt = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) %div = fdiv fast <4 x float> , %sqrt ret <4 x float> %div } define <4 x float> @v4f32_estimate(<4 x float> %x) #1 { +; SSE-LABEL: v4f32_estimate: +; SSE: # BB#0: +; SSE-NEXT: rsqrtps %xmm0, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm0, %xmm2 +; SSE-NEXT: addps {{.*}}(%rip), %xmm2 +; SSE-NEXT: mulps {{.*}}(%rip), %xmm1 +; SSE-NEXT: mulps %xmm2, %xmm1 +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: retq +; ; AVX-LABEL: v4f32_estimate: ; AVX: # BB#0: ; AVX-NEXT: vrsqrtps %xmm0, %xmm1 @@ -135,26 +195,56 @@ define <4 x float> @v4f32_estimate(<4 x float> %x) #1 { ; AVX-NEXT: vmulps {{.*}}(%rip), %xmm1, %xmm1 ; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq -; %sqrt = tail call <4 x float> @llvm.sqrt.v4f32(<4 x float> %x) %div = fdiv fast <4 x float> , %sqrt ret <4 x float> %div } define <8 x float> @v8f32_no_estimate(<8 x float> %x) #0 { +; SSE-LABEL: v8f32_no_estimate: +; SSE: # BB#0: +; SSE-NEXT: sqrtps %xmm1, %xmm2 +; SSE-NEXT: sqrtps %xmm0, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] +; SSE-NEXT: movaps %xmm1, %xmm0 +; SSE-NEXT: divps %xmm3, %xmm0 +; SSE-NEXT: divps %xmm2, %xmm1 +; SSE-NEXT: retq +; ; AVX-LABEL: v8f32_no_estimate: ; AVX: # BB#0: ; AVX-NEXT: vsqrtps %ymm0, %ymm0 ; AVX-NEXT: vmovaps {{.*#+}} ymm1 = [1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00,1.000000e+00] ; AVX-NEXT: vdivps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x) %div = fdiv fast <8 x float> , %sqrt ret <8 x float> %div } define <8 x float> @v8f32_estimate(<8 x float> %x) #1 { +; SSE-LABEL: v8f32_estimate: +; SSE: # BB#0: +; SSE-NEXT: rsqrtps %xmm0, %xmm3 +; SSE-NEXT: movaps {{.*#+}} xmm4 = [-5.000000e-01,-5.000000e-01,-5.000000e-01,-5.000000e-01] +; SSE-NEXT: movaps %xmm3, %xmm2 +; SSE-NEXT: mulps %xmm2, %xmm2 +; SSE-NEXT: mulps %xmm0, %xmm2 +; SSE-NEXT: movaps {{.*#+}} xmm0 = [-3.000000e+00,-3.000000e+00,-3.000000e+00,-3.000000e+00] +; SSE-NEXT: addps %xmm0, %xmm2 +; SSE-NEXT: mulps %xmm4, %xmm2 +; SSE-NEXT: mulps %xmm3, %xmm2 +; SSE-NEXT: rsqrtps %xmm1, %xmm5 +; SSE-NEXT: movaps %xmm5, %xmm3 +; SSE-NEXT: mulps %xmm3, %xmm3 +; SSE-NEXT: mulps %xmm1, %xmm3 +; SSE-NEXT: addps %xmm0, %xmm3 +; SSE-NEXT: mulps %xmm4, %xmm3 +; SSE-NEXT: mulps %xmm5, %xmm3 +; SSE-NEXT: movaps %xmm2, %xmm0 +; SSE-NEXT: movaps %xmm3, %xmm1 +; SSE-NEXT: retq +; ; AVX-LABEL: v8f32_estimate: ; AVX: # BB#0: ; AVX-NEXT: vrsqrtps %ymm0, %ymm1 @@ -164,7 +254,6 @@ define <8 x float> @v8f32_estimate(<8 x float> %x) #1 { ; AVX-NEXT: vmulps {{.*}}(%rip), %ymm1, %ymm1 ; AVX-NEXT: vmulps %ymm0, %ymm1, %ymm0 ; AVX-NEXT: retq -; %sqrt = tail call <8 x float> @llvm.sqrt.v8f32(<8 x float> %x) %div = fdiv fast <8 x float> , %sqrt ret <8 x float> %div