From: Simon Pilgrim Date: Fri, 21 Jun 2019 18:35:04 +0000 (+0000) Subject: [X86][AVX] Combine INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)) as shuffle X-Git-Tag: android-x86-9.0-r1~1512 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=59c7547c88a2017dddc7854028d9175a773a9df6;p=android-x86%2Fexternal-llvm.git [X86][AVX] Combine INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)) as shuffle Subvector shuffling often ends up as insert/extract subvector. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@364090 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 30c05a3efb0..caa8c1d47a1 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -6706,7 +6706,6 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, return true; } case ISD::INSERT_SUBVECTOR: { - // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)). SDValue Src = N.getOperand(0); SDValue Sub = N.getOperand(1); EVT SubVT = Sub.getValueType(); @@ -6714,12 +6713,26 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl &Mask, if (!isa(N.getOperand(2)) || !N->isOnlyUserOf(Sub.getNode())) return false; + int InsertIdx = N.getConstantOperandVal(2); + // Handle INSERT_SUBVECTOR(SRC0, EXTRACT_SUBVECTOR(SRC1)). + if (Sub.getOpcode() == ISD::EXTRACT_SUBVECTOR && + Sub.getOperand(0).getValueType() == VT && + isa(Sub.getOperand(1))) { + int ExtractIdx = Sub.getConstantOperandVal(1); + for (int i = 0; i != (int)NumElts; ++i) + Mask.push_back(i); + for (int i = 0; i != (int)NumSubElts; ++i) + Mask[InsertIdx + i] = NumElts + ExtractIdx + i; + Ops.push_back(Src); + Ops.push_back(Sub.getOperand(0)); + return true; + } + // Handle INSERT_SUBVECTOR(SRC0, SHUFFLE(SRC1)). SmallVector SubMask; SmallVector SubInputs; if (!resolveTargetShuffleInputs(peekThroughOneUseBitcasts(Sub), SubInputs, SubMask, DAG)) return false; - int InsertIdx = N.getConstantOperandVal(2); if (SubMask.size() != NumSubElts) { assert(((SubMask.size() % NumSubElts) == 0 || (NumSubElts % SubMask.size()) == 0) && "Illegal submask scale"); diff --git a/test/CodeGen/X86/var-permute-128.ll b/test/CodeGen/X86/var-permute-128.ll index 4d48527f677..50cedfd2a29 100644 --- a/test/CodeGen/X86/var-permute-128.ll +++ b/test/CodeGen/X86/var-permute-128.ll @@ -1027,11 +1027,11 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in ; AVX2-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX2: # %bb.0: ; AVX2-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX2-NEXT: vpshufb %xmm1, %xmm2, %xmm2 +; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX2-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq @@ -1039,11 +1039,11 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in ; AVX512-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX512: # %bb.0: ; AVX512-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512-NEXT: vpshufb %xmm1, %xmm2, %xmm2 +; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm2 +; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX512-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 +; AVX512-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq @@ -1051,12 +1051,12 @@ define <16 x i8> @var_shuffle_v16i8_from_v32i8_v16i8(<32 x i8> %v, <16 x i8> %in ; AVX512VLBW-LABEL: var_shuffle_v16i8_from_v32i8_v16i8: ; AVX512VLBW: # %bb.0: ; AVX512VLBW-NEXT: # kill: def $xmm1 killed $xmm1 def $ymm1 -; AVX512VLBW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VLBW-NEXT: vpshufb %xmm1, %xmm2, %xmm2 +; AVX512VLBW-NEXT: vpshufb %xmm1, %xmm0, %xmm2 +; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX512VLBW-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX512VLBW-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %k1 -; AVX512VLBW-NEXT: vmovdqu8 %ymm2, %ymm0 {%k1} -; AVX512VLBW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 +; AVX512VLBW-NEXT: vmovdqu8 %ymm0, %ymm2 {%k1} +; AVX512VLBW-NEXT: vmovdqa %xmm2, %xmm0 ; AVX512VLBW-NEXT: vzeroupper ; AVX512VLBW-NEXT: retq ; diff --git a/test/CodeGen/X86/var-permute-256.ll b/test/CodeGen/X86/var-permute-256.ll index 7e1780449fd..5d5ed467e85 100644 --- a/test/CodeGen/X86/var-permute-256.ll +++ b/test/CodeGen/X86/var-permute-256.ll @@ -170,39 +170,36 @@ define <16 x i16> @var_shuffle_v16i16(<16 x i16> %v, <16 x i16> %indices) nounwi ; AVX2: # %bb.0: ; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 ; AVX2-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v16i16: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 ; AVX512-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v16i16: ; AVX512VLDQ: # %bb.0: ; AVX512VLDQ-NEXT: vpmullw {{.*}}(%rip), %ymm1, %ymm1 ; AVX512VLDQ-NEXT: vpaddw {{.*}}(%rip), %ymm1, %ymm1 -; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX512VLDQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX512VLDQ-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512VLDQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v16i16: @@ -293,45 +290,42 @@ define <32 x i8> @var_shuffle_v32i8(<32 x i8> %v, <32 x i8> %indices) nounwind { ; ; AVX2-LABEL: var_shuffle_v32i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX2-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX2-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: var_shuffle_v32i8: ; AVX512: # %bb.0: -; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX512-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX512-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512-NEXT: retq ; ; AVX512VLDQ-LABEL: var_shuffle_v32i8: ; AVX512VLDQ: # %bb.0: -; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX512VLDQ-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,3,2,3] ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm2, %ymm2 -; AVX512VLDQ-NEXT: vextracti128 $1, %ymm0, %xmm3 -; AVX512VLDQ-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0,1,2,3],ymm0[4,5,6,7] +; AVX512VLDQ-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpshufb %ymm1, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %ymm1 -; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm0, %ymm2, %ymm0 +; AVX512VLDQ-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0 ; AVX512VLDQ-NEXT: retq ; ; AVX512VLBW-LABEL: var_shuffle_v32i8: ; AVX512VLBW: # %bb.0: -; AVX512VLBW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512VLBW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm0[4,5,6,7] -; AVX512VLBW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm0 -; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm0, %ymm0 +; AVX512VLBW-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm2 +; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm2, %ymm2 +; AVX512VLBW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,3] ; AVX512VLBW-NEXT: vpcmpgtb {{.*}}(%rip), %ymm1, %k1 -; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm2, %ymm0 {%k1} +; AVX512VLBW-NEXT: vpshufb %ymm1, %ymm0, %ymm2 {%k1} +; AVX512VLBW-NEXT: vmovdqa %ymm2, %ymm0 ; AVX512VLBW-NEXT: retq ; ; VLVBMI-LABEL: var_shuffle_v32i8: diff --git a/test/CodeGen/X86/vector-shuffle-256-v4.ll b/test/CodeGen/X86/vector-shuffle-256-v4.ll index 099aad76ba7..33513fef481 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v4.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v4.ll @@ -447,11 +447,23 @@ define <4 x double> @shuffle_v4f64_0167(<4 x double> %a, <4 x double> %b) { } define <4 x double> @shuffle_v4f64_1054(<4 x double> %a, <4 x double> %b) { -; ALL-LABEL: shuffle_v4f64_1054: -; ALL: # %bb.0: -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; ALL-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] -; ALL-NEXT: retq +; AVX1OR2-LABEL: shuffle_v4f64_1054: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1OR2-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX1OR2-NEXT: retq +; +; AVX512VL-SLOW-LABEL: shuffle_v4f64_1054: +; AVX512VL-SLOW: # %bb.0: +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2] +; AVX512VL-SLOW-NEXT: retq +; +; AVX512VL-FAST-LABEL: shuffle_v4f64_1054: +; AVX512VL-FAST: # %bb.0: +; AVX512VL-FAST-NEXT: vmovapd {{.*#+}} ymm2 = [1,0,5,4] +; AVX512VL-FAST-NEXT: vpermt2pd %ymm1, %ymm2, %ymm0 +; AVX512VL-FAST-NEXT: retq %shuffle = shufflevector <4 x double> %a, <4 x double> %b, <4 x i32> ret <4 x double> %shuffle } @@ -990,19 +1002,11 @@ define <4 x i64> @shuffle_v4i64_0142(<4 x i64> %a, <4 x i64> %b) { ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] ; AVX2-NEXT: retq ; -; AVX512VL-SLOW-LABEL: shuffle_v4i64_0142: -; AVX512VL-SLOW: # %bb.0: -; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 -; AVX512VL-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,2] -; AVX512VL-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5],ymm0[6,7] -; AVX512VL-SLOW-NEXT: retq -; -; AVX512VL-FAST-LABEL: shuffle_v4i64_0142: -; AVX512VL-FAST: # %bb.0: -; AVX512VL-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1 -; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,6,2] -; AVX512VL-FAST-NEXT: vpermt2q %ymm1, %ymm2, %ymm0 -; AVX512VL-FAST-NEXT: retq +; AVX512VL-LABEL: shuffle_v4i64_0142: +; AVX512VL: # %bb.0: +; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,2] +; AVX512VL-NEXT: vpermt2q %ymm1, %ymm2, %ymm0 +; AVX512VL-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> ret <4 x i64> %shuffle } @@ -1198,11 +1202,17 @@ define <4 x i64> @shuffle_v4i64_1054(<4 x i64> %a, <4 x i64> %b) { ; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5] ; AVX2-NEXT: retq ; -; AVX512VL-LABEL: shuffle_v4i64_1054: -; AVX512VL: # %bb.0: -; AVX512VL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; AVX512VL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5] -; AVX512VL-NEXT: retq +; AVX512VL-SLOW-LABEL: shuffle_v4i64_1054: +; AVX512VL-SLOW: # %bb.0: +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,3,0,1,6,7,4,5] +; AVX512VL-SLOW-NEXT: retq +; +; AVX512VL-FAST-LABEL: shuffle_v4i64_1054: +; AVX512VL-FAST: # %bb.0: +; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [1,0,5,4] +; AVX512VL-FAST-NEXT: vpermt2q %ymm1, %ymm2, %ymm0 +; AVX512VL-FAST-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> %b, <4 x i32> ret <4 x i64> %shuffle } diff --git a/test/CodeGen/X86/vector-shuffle-256-v8.ll b/test/CodeGen/X86/vector-shuffle-256-v8.ll index 3f9d9ade272..9969167a10d 100644 --- a/test/CodeGen/X86/vector-shuffle-256-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-256-v8.ll @@ -935,11 +935,23 @@ define <8 x float> @shuffle_v8f32_76543210(<8 x float> %a, <8 x float> %b) { } define <8 x float> @shuffle_v8f32_3210ba98(<8 x float> %a, <8 x float> %b) { -; ALL-LABEL: shuffle_v8f32_3210ba98: -; ALL: # %bb.0: -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] -; ALL-NEXT: retq +; AVX1OR2-LABEL: shuffle_v8f32_3210ba98: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1OR2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX1OR2-NEXT: retq +; +; AVX512VL-SLOW-LABEL: shuffle_v8f32_3210ba98: +; AVX512VL-SLOW: # %bb.0: +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX512VL-SLOW-NEXT: retq +; +; AVX512VL-FAST-LABEL: shuffle_v8f32_3210ba98: +; AVX512VL-FAST: # %bb.0: +; AVX512VL-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [3,2,1,0,11,10,9,8] +; AVX512VL-FAST-NEXT: vpermt2ps %ymm1, %ymm2, %ymm0 +; AVX512VL-FAST-NEXT: retq %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } @@ -1064,11 +1076,24 @@ define <8 x float> @shuffle_v8f32_ba987654(<8 x float> %a, <8 x float> %b) { } define <8 x float> @shuffle_v8f32_ba983210(<8 x float> %a, <8 x float> %b) { -; ALL-LABEL: shuffle_v8f32_ba983210: -; ALL: # %bb.0: -; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 -; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] -; ALL-NEXT: retq +; AVX1OR2-LABEL: shuffle_v8f32_ba983210: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX1OR2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX1OR2-NEXT: retq +; +; AVX512VL-SLOW-LABEL: shuffle_v8f32_ba983210: +; AVX512VL-SLOW: # %bb.0: +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0 +; AVX512VL-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX512VL-SLOW-NEXT: retq +; +; AVX512VL-FAST-LABEL: shuffle_v8f32_ba983210: +; AVX512VL-FAST: # %bb.0: +; AVX512VL-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [3,2,1,0,11,10,9,8] +; AVX512VL-FAST-NEXT: vpermi2ps %ymm0, %ymm1, %ymm2 +; AVX512VL-FAST-NEXT: vmovaps %ymm2, %ymm0 +; AVX512VL-FAST-NEXT: retq %shuffle = shufflevector <8 x float> %a, <8 x float> %b, <8 x i32> ret <8 x float> %shuffle } @@ -2240,11 +2265,23 @@ define <8 x i32> @shuffle_v8i32_76543210(<8 x i32> %a, <8 x i32> %b) { } define <8 x i32> @shuffle_v8i32_3210ba98(<8 x i32> %a, <8 x i32> %b) { -; ALL-LABEL: shuffle_v8i32_3210ba98: -; ALL: # %bb.0: -; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 -; ALL-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] -; ALL-NEXT: retq +; AVX1OR2-LABEL: shuffle_v8i32_3210ba98: +; AVX1OR2: # %bb.0: +; AVX1OR2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX1OR2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX1OR2-NEXT: retq +; +; AVX512VL-SLOW-LABEL: shuffle_v8i32_3210ba98: +; AVX512VL-SLOW: # %bb.0: +; AVX512VL-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 +; AVX512VL-SLOW-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4] +; AVX512VL-SLOW-NEXT: retq +; +; AVX512VL-FAST-LABEL: shuffle_v8i32_3210ba98: +; AVX512VL-FAST: # %bb.0: +; AVX512VL-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [3,2,1,0,11,10,9,8] +; AVX512VL-FAST-NEXT: vpermt2d %ymm1, %ymm2, %ymm0 +; AVX512VL-FAST-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> %b, <8 x i32> ret <8 x i32> %shuffle } diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll index dea5457baea..ff3a3106337 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2252,13 +2252,21 @@ define <8 x double> @shuffle_v2f64_v8f64_01010101(<2 x double> %a) { ;FIXME: compressp define <4 x double> @test_v8f64_2346 (<8 x double> %v) { -; ALL-LABEL: test_v8f64_2346: -; ALL: # %bb.0: -; ALL-NEXT: vextractf128 $1, %ymm0, %xmm1 -; ALL-NEXT: vextractf64x4 $1, %zmm0, %ymm0 -; ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,0,2] -; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7] -; ALL-NEXT: ret{{[l|q]}} +; AVX512F-LABEL: test_v8f64_2346: +; AVX512F: # %bb.0: +; AVX512F-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [2,3,4,6,2,3,4,6] +; AVX512F-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] +; AVX512F-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512F-NEXT: retq +; +; AVX512F-32-LABEL: test_v8f64_2346: +; AVX512F-32: # %bb.0: +; AVX512F-32-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [2,0,3,0,4,0,6,0,2,0,3,0,4,0,6,0] +; AVX512F-32-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3] +; AVX512F-32-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; AVX512F-32-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512F-32-NEXT: retl %res = shufflevector <8 x double> %v, <8 x double> undef, <4 x i32> ret <4 x double> %res }