From 5b11a64fa00cf480ada2d9e42e60abe23bd52ca0 Mon Sep 17 00:00:00 2001 From: Nico Weber Date: Fri, 4 Aug 2017 20:24:13 +0000 Subject: [PATCH] Revert r310058, it caused PR34073. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@310118 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 49 +--------------- test/CodeGen/X86/oddshuffles.ll | 10 ++-- test/CodeGen/X86/shuffle-vs-trunc-512.ll | 92 +++++++++++++++++++++++------- test/CodeGen/X86/vector-shuffle-512-v16.ll | 11 ++-- test/CodeGen/X86/vector-shuffle-512-v8.ll | 13 +++-- test/CodeGen/X86/x86-interleaved-access.ll | 62 ++++++++++---------- 6 files changed, 123 insertions(+), 114 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 9c02cc1ce9b..937522ea2db 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -14133,18 +14133,10 @@ SDValue DAGCombiner::createBuildVecShuffle(const SDLoc &DL, SDNode *N, EVT InVT1 = VecIn1.getValueType(); EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1; - unsigned Vec2Offset = 0; + unsigned Vec2Offset = InVT1.getVectorNumElements(); unsigned NumElems = VT.getVectorNumElements(); unsigned ShuffleNumElems = NumElems; - // In case both the input vectors are extracted from same base - // vector we do not need extra added to the end (Vec2Offset) while - // computing shuffle mask. - if (!VecIn2 || !(VecIn1.getOpcode() == ISD::EXTRACT_SUBVECTOR) || - !(VecIn2.getOpcode() == ISD::EXTRACT_SUBVECTOR) || - !(VecIn1.getOperand(0) == VecIn2.getOperand(0))) - Vec2Offset = InVT1.getVectorNumElements(); - // We can't generate a shuffle node with mismatched input and output types. // Try to make the types match the type of the output. if (InVT1 != VT || InVT2 != VT) { @@ -14291,6 +14283,7 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT || !isa(Op.getOperand(1))) return SDValue(); + SDValue ExtractedFromVec = Op.getOperand(0); // All inputs must have the same element type as the output. @@ -14313,44 +14306,6 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) { if (VecIn.size() < 2) return SDValue(); - // If all the Operands of BUILD_VECTOR extract from same - // vector, then split the vector efficiently based on the maximum - // vector access index and adjust the VectorMask and - // VecIn accordingly. - if (VecIn.size() == 2) { - unsigned MaxIndex = 0; - unsigned NearestPow2 = 0; - SDValue Vec = VecIn.back(); - EVT InVT = Vec.getValueType(); - MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout()); - SmallVector IndexVec(NumElems, 0); - - for (unsigned i = 0; i < NumElems; i++) { - if (VectorMask[i] <= 0) - continue; - unsigned Index = N->getOperand(i).getConstantOperandVal(1); - IndexVec[i] = Index; - MaxIndex = std::max(MaxIndex, Index); - } - - NearestPow2 = PowerOf2Ceil(MaxIndex); - if ((NearestPow2 > 2) && ((NumElems * 2) < NearestPow2)) { - unsigned SplitSize = NearestPow2 / 2; - EVT SplitVT = EVT::getVectorVT(*DAG.getContext(), - InVT.getVectorElementType(), SplitSize); - SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec, - DAG.getConstant(SplitSize, DL, IdxTy)); - SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec, - DAG.getConstant(0, DL, IdxTy)); - VecIn.pop_back(); - VecIn.push_back(VecIn1); - VecIn.push_back(VecIn2); - - for (unsigned i = 0; i < NumElems; i++) - VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2; - } - } - // TODO: We want to sort the vectors by descending length, so that adjacent // pairs have similar length, and the longer vector is always first in the // pair. diff --git a/test/CodeGen/X86/oddshuffles.ll b/test/CodeGen/X86/oddshuffles.ll index 782529213d1..0bda41a30c6 100644 --- a/test/CodeGen/X86/oddshuffles.ll +++ b/test/CodeGen/X86/oddshuffles.ll @@ -940,17 +940,17 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2 ; ; AVX2-LABEL: interleave_24i16_out: ; AVX2: # BB#0: -; AVX2-NEXT: vmovdqu 32(%rdi), %xmm0 -; AVX2-NEXT: vmovdqu (%rdi), %ymm1 -; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3,4],ymm0[5],ymm1[6,7,8,9],ymm0[10],ymm1[11,12],ymm0[13],ymm1[14,15] +; AVX2-NEXT: vmovdqu (%rdi), %ymm0 +; AVX2-NEXT: vmovdqu 32(%rdi), %xmm1 +; AVX2-NEXT: vpblendw {{.*#+}} ymm2 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7,8,9],ymm1[10],ymm0[11,12],ymm1[13],ymm0[14,15] ; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 ; AVX2-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm3[1],xmm2[2,3],xmm3[4],xmm2[5,6],xmm3[7] ; AVX2-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,6,7,12,13,2,3,8,9,14,15,4,5,10,11] -; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm0[0],ymm1[1,2],ymm0[3],ymm1[4,5],ymm0[6],ymm1[7],ymm0[8],ymm1[9,10],ymm0[11],ymm1[12,13],ymm0[14],ymm1[15] +; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7],ymm1[8],ymm0[9,10],ymm1[11],ymm0[12,13],ymm1[14],ymm0[15] ; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 ; AVX2-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7] ; AVX2-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[2,3,8,9,14,15,4,5,10,11,0,1,6,7,12,13] -; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3],ymm0[4],ymm1[5,6],ymm0[7],ymm1[8],ymm0[9],ymm1[10,11],ymm0[12],ymm1[13,14],ymm0[15] +; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3],ymm1[4],ymm0[5,6],ymm1[7],ymm0[8],ymm1[9],ymm0[10,11],ymm1[12],ymm0[13,14],ymm1[15] ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3],xmm0[4,5],xmm1[6],xmm0[7] ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,10,11,0,1,6,7,12,13,2,3,8,9,14,15] diff --git a/test/CodeGen/X86/shuffle-vs-trunc-512.ll b/test/CodeGen/X86/shuffle-vs-trunc-512.ll index 05947829edb..86e3fc110d3 100644 --- a/test/CodeGen/X86/shuffle-vs-trunc-512.ll +++ b/test/CodeGen/X86/shuffle-vs-trunc-512.ll @@ -261,33 +261,81 @@ define <16 x i8> @trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_ ; ; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62: ; AVX512BW: # BB#0: -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm2 -; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u] -; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u] -; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax +; AVX512BW-NEXT: vpextrb $1, %xmm0, %ecx +; AVX512BW-NEXT: vmovd %ecx, %xmm1 +; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $13, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2 +; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2 +; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax +; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0 +; AVX512BW-NEXT: vpextrb $1, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX512BW-NEXT: vpextrb $14, %xmm0, %eax +; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62: ; AVX512BWVL: # BB#0: -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1 -; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1 -; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm2 -; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1] -; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm0 -; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2 -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u] -; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u] -; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1] -; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax +; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %ecx +; AVX512BWVL-NEXT: vmovd %ecx, %xmm1 +; AVX512BWVL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $13, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2 +; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2 +; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax +; AVX512BWVL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0 +; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1 +; AVX512BWVL-NEXT: vpextrb $14, %xmm0, %eax +; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0 ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32> diff --git a/test/CodeGen/X86/vector-shuffle-512-v16.ll b/test/CodeGen/X86/vector-shuffle-512-v16.ll index b6ac0b9b8ac..84602f4ef21 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v16.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v16.ll @@ -295,10 +295,13 @@ define <8 x i32> @test_v16i32_1_3_5_7_9_11_13_15(<16 x i32> %v) { define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) { ; ALL-LABEL: test_v16i32_0_1_2_12: ; ALL: # BB#0: -; ALL-NEXT: vextracti32x8 $1, %zmm0, %ymm1 -; ALL-NEXT: vextracti128 $1, %ymm1, %xmm1 -; ALL-NEXT: vpbroadcastd %xmm1, %xmm1 -; ALL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3] +; ALL-NEXT: vpextrd $1, %xmm0, %eax +; ALL-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1 +; ALL-NEXT: vpextrd $2, %xmm0, %eax +; ALL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm0 +; ALL-NEXT: vmovd %xmm0, %eax +; ALL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; ALL-NEXT: vzeroupper ; ALL-NEXT: retq %res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32> diff --git a/test/CodeGen/X86/vector-shuffle-512-v8.ll b/test/CodeGen/X86/vector-shuffle-512-v8.ll index d41a3e83492..e06d2b97902 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v8.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v8.ll @@ -2726,17 +2726,20 @@ define <4 x i64> @test_v8i64_1257 (<8 x i64> %v) { define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) { ; AVX512F-LABEL: test_v8i64_2_5: ; AVX512F: # BB#0: -; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1 +; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0 ; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512F-32-LABEL: test_v8i64_2_5: ; AVX512F-32: # BB#0: -; AVX512F-32-NEXT: vextracti64x4 $1, %zmm0, %ymm1 -; AVX512F-32-NEXT: vextracti128 $1, %ymm0, %xmm0 -; AVX512F-32-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1 +; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0 +; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax +; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1 +; AVX512F-32-NEXT: vpextrd $3, %xmm0, %eax +; AVX512F-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 ; AVX512F-32-NEXT: vzeroupper ; AVX512F-32-NEXT: retl %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32> diff --git a/test/CodeGen/X86/x86-interleaved-access.ll b/test/CodeGen/X86/x86-interleaved-access.ll index 77f0f550e06..4bef4e2b58d 100644 --- a/test/CodeGen/X86/x86-interleaved-access.ll +++ b/test/CodeGen/X86/x86-interleaved-access.ll @@ -591,37 +591,37 @@ define <16 x i1> @interleaved_load_vf16_i8_stride4(<64 x i8>* %ptr) { ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3] ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4 -; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5 -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm6 -; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7 -; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] -; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3] -; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = -; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5 -; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7 -; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1] -; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = -; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4 -; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1 -; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1] -; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> -; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm5 -; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = +; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5 +; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1] +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7 +; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3] +; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = +; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm6 +; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7 +; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1] +; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = +; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3 +; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1 +; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5 +; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1] ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] -; AVX2-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0 +; AVX2-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1] ; AVX2-NEXT: vpand %xmm1, %xmm2, %xmm2 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -860,15 +860,15 @@ define <32 x i1> @interleaved_load_vf32_i8_stride4(<128 x i8>* %ptr) { ; AVX512-NEXT: vpmovdw %zmm1, %ymm3 ; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2 ; AVX512-NEXT: vpmovwb %zmm2, %ymm8 +; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = ; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm14 ; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm9 -; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 = ; AVX512-NEXT: vpshufb %xmm7, %xmm9, %xmm4 ; AVX512-NEXT: vpshufb %xmm7, %xmm14, %xmm5 ; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1] ; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5 -; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm10 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u> +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm10 ; AVX512-NEXT: vpshufb %xmm3, %xmm10, %xmm6 ; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm4 ; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1] -- 2.11.0