SelectionDAG &DAG) {
MVT VT = Amt.getSimpleValueType();
if (!(VT == MVT::v8i16 || VT == MVT::v4i32 ||
- (Subtarget.hasInt256() && VT == MVT::v16i16)))
+ (Subtarget.hasInt256() && VT == MVT::v16i16) ||
+ (!Subtarget.hasAVX512() && VT == MVT::v16i8)))
return SDValue();
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
ret <8 x i16> %1
}
-define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {
-; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X86: # %bb.0:
-; X86-NEXT: movdqa %xmm0, %xmm1
-; X86-NEXT: movdqa %xmm0, %xmm2
-; X86-NEXT: psllw $4, %xmm2
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
-; X86-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
-; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X86-NEXT: movdqa %xmm1, %xmm2
-; X86-NEXT: psllw $2, %xmm2
-; X86-NEXT: pand {{\.LCPI.*}}, %xmm2
-; X86-NEXT: paddb %xmm0, %xmm0
-; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X86-NEXT: movdqa %xmm1, %xmm2
-; X86-NEXT: paddb %xmm1, %xmm2
-; X86-NEXT: paddb %xmm0, %xmm0
-; X86-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X86-NEXT: movdqa %xmm1, %xmm0
-; X86-NEXT: retl
-;
-; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64: # %bb.0:
-; X64-NEXT: movdqa %xmm0, %xmm1
-; X64-NEXT: movdqa %xmm0, %xmm2
-; X64-NEXT: psllw $4, %xmm2
-; X64-NEXT: pand {{.*}}(%rip), %xmm2
-; X64-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,8192,24640,8192,24640,8192,24640]
-; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm2
-; X64-NEXT: psllw $2, %xmm2
-; X64-NEXT: pand {{.*}}(%rip), %xmm2
-; X64-NEXT: paddb %xmm0, %xmm0
-; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm2
-; X64-NEXT: paddb %xmm1, %xmm2
-; X64-NEXT: paddb %xmm0, %xmm0
-; X64-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; X64-NEXT: movdqa %xmm1, %xmm0
-; X64-NEXT: retq
-;
+define <16 x i8> @mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8(<16 x i8> %a0) nounwind {\r
+; X86-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:\r
+; X86: # %bb.0:\r
+; X86-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]\r
+; X86-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X86-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; X86-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X86-NEXT: pmullw %xmm2, %xmm0\r
+; X86-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; X86-NEXT: pand %xmm2, %xmm0\r
+; X86-NEXT: pmullw {{\.LCPI.*}}, %xmm1\r
+; X86-NEXT: pand %xmm2, %xmm1\r
+; X86-NEXT: packuswb %xmm0, %xmm1\r
+; X86-NEXT: movdqa %xmm1, %xmm0\r
+; X86-NEXT: retl\r
+;\r
+; X64-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:\r
+; X64: # %bb.0:\r
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,1,2,4,8,1,2,4,8,1,2,4,8]\r
+; X64-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X64-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; X64-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X64-NEXT: pmullw %xmm2, %xmm0\r
+; X64-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; X64-NEXT: pand %xmm2, %xmm0\r
+; X64-NEXT: pmullw {{.*}}(%rip), %xmm1\r
+; X64-NEXT: pand %xmm2, %xmm1\r
+; X64-NEXT: packuswb %xmm0, %xmm1\r
+; X64-NEXT: movdqa %xmm1, %xmm0\r
+; X64-NEXT: retq\r
+;\r
; X64-XOP-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
; X64-XOP: # %bb.0:
; X64-XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; X64-XOP-NEXT: retq
-;
-; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:
-; X64-AVX2: # %bb.0:
-; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm1
-; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,8192,24640,8192,24640,8192,24640]
-; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT: vpsllw $2, %xmm0, %xmm1
-; X64-AVX2-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT: vpaddb %xmm0, %xmm0, %xmm1
-; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; X64-AVX2-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; X64-AVX2-NEXT: retq
- %1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8>
- ret <16 x i8> %1
+;\r
+; X64-AVX2-LABEL: mul_v16i8_1_2_4_8_1_2_4_8_1_2_4_8_1_2_4_8:\r
+; X64-AVX2: # %bb.0:\r
+; X64-AVX2-NEXT: vpmovsxbw %xmm0, %ymm0\r
+; X64-AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0\r
+; X64-AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1\r
+; X64-AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>\r
+; X64-AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1\r
+; X64-AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0\r
+; X64-AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]\r
+; X64-AVX2-NEXT: vzeroupper\r
+; X64-AVX2-NEXT: retq\r
+ %1 = mul <16 x i8> %a0, <i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8, i8 1, i8 2, i8 4, i8 8>\r
+ ret <16 x i8> %1\r
}
;
ret <8 x i16> %shift
}
-define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
-; SSE2-LABEL: constant_shift_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
-; SSE2-NEXT: pxor %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psllw $4, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pxor %xmm3, %xmm3
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm4
-; SSE2-NEXT: pandn %xmm0, %xmm4
-; SSE2-NEXT: psllw $2, %xmm0
-; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: por %xmm4, %xmm0
-; SSE2-NEXT: paddb %xmm2, %xmm2
-; SSE2-NEXT: pcmpgtb %xmm2, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm2
-; SSE2-NEXT: pandn %xmm0, %xmm2
-; SSE2-NEXT: paddb %xmm0, %xmm0
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: por %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: constant_shift_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm0, %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: psllw $4, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [8192,24640,41088,57536,49376,32928,16480,32]
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: psllw $2, %xmm2
-; SSE41-NEXT: pand {{.*}}(%rip), %xmm2
-; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm2
-; SSE41-NEXT: paddb %xmm1, %xmm2
-; SSE41-NEXT: paddb %xmm0, %xmm0
-; SSE41-NEXT: pblendvb %xmm0, %xmm2, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
-;
-; AVX-LABEL: constant_shift_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vpsllw $4, %xmm0, %xmm1
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
-; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpsllw $2, %xmm0, %xmm1
-; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
-; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT: vpaddb %xmm0, %xmm0, %xmm1
-; AVX-NEXT: vpaddb %xmm2, %xmm2, %xmm2
-; AVX-NEXT: vpblendvb %xmm2, %xmm1, %xmm0, %xmm0
-; AVX-NEXT: retq
-;
-; XOP-LABEL: constant_shift_v16i8:
-; XOP: # %bb.0:
+define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {\r
+; SSE2-LABEL: constant_shift_v16i8:\r
+; SSE2: # %bb.0:\r
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; SSE2-NEXT: movdqa %xmm1, %xmm2\r
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]\r
+; SSE2-NEXT: movdqa %xmm0, %xmm3\r
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]\r
+; SSE2-NEXT: pmullw %xmm2, %xmm3\r
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; SSE2-NEXT: pand %xmm2, %xmm3\r
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]\r
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]\r
+; SSE2-NEXT: pmullw %xmm1, %xmm0\r
+; SSE2-NEXT: pand %xmm2, %xmm0\r
+; SSE2-NEXT: packuswb %xmm3, %xmm0\r
+; SSE2-NEXT: retq\r
+;\r
+; SSE41-LABEL: constant_shift_v16i8:\r
+; SSE41: # %bb.0:\r
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; SSE41-NEXT: pmullw %xmm2, %xmm0\r
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; SSE41-NEXT: pand %xmm2, %xmm0\r
+; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm1\r
+; SSE41-NEXT: pand %xmm2, %xmm1\r
+; SSE41-NEXT: packuswb %xmm0, %xmm1\r
+; SSE41-NEXT: movdqa %xmm1, %xmm0\r
+; SSE41-NEXT: retq\r
+;\r
+; AVX1-LABEL: constant_shift_v16i8:\r
+; AVX1: # %bb.0:\r
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; AVX1-NEXT: vpmullw %xmm1, %xmm2, %xmm1\r
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1\r
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; AVX1-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0\r
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0\r
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0\r
+; AVX1-NEXT: retq\r
+;\r
+; AVX2-LABEL: constant_shift_v16i8:\r
+; AVX2: # %bb.0:\r
+; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0\r
+; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0\r
+; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1\r
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>\r
+; AVX2-NEXT: vpshufb %xmm2, %xmm1, %xmm1\r
+; AVX2-NEXT: vpshufb %xmm2, %xmm0, %xmm0\r
+; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]\r
+; AVX2-NEXT: vzeroupper\r
+; AVX2-NEXT: retq\r
+;\r
+; XOP-LABEL: constant_shift_v16i8:\r
+; XOP: # %bb.0:\r
; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
; XOP-NEXT: retq
;
; AVX512BWVL-NEXT: vpmovwb %ymm0, %xmm0
; AVX512BWVL-NEXT: vzeroupper
; AVX512BWVL-NEXT: retq
-;
-; X32-SSE-LABEL: constant_shift_v16i8:
-; X32-SSE: # %bb.0:
-; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [8192,24640,41088,57536,49376,32928,16480,32]
-; X32-SSE-NEXT: pxor %xmm1, %xmm1
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $4, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pxor %xmm3, %xmm3
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm3
-; X32-SSE-NEXT: movdqa %xmm3, %xmm4
-; X32-SSE-NEXT: pandn %xmm0, %xmm4
-; X32-SSE-NEXT: psllw $2, %xmm0
-; X32-SSE-NEXT: pand {{\.LCPI.*}}, %xmm0
-; X32-SSE-NEXT: pand %xmm3, %xmm0
-; X32-SSE-NEXT: por %xmm4, %xmm0
-; X32-SSE-NEXT: paddb %xmm2, %xmm2
-; X32-SSE-NEXT: pcmpgtb %xmm2, %xmm1
-; X32-SSE-NEXT: movdqa %xmm1, %xmm2
-; X32-SSE-NEXT: pandn %xmm0, %xmm2
-; X32-SSE-NEXT: paddb %xmm0, %xmm0
-; X32-SSE-NEXT: pand %xmm1, %xmm0
-; X32-SSE-NEXT: por %xmm2, %xmm0
-; X32-SSE-NEXT: retl
- %shift = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
- ret <16 x i8> %shift
+;\r
+; X32-SSE-LABEL: constant_shift_v16i8:\r
+; X32-SSE: # %bb.0:\r
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; X32-SSE-NEXT: movdqa %xmm1, %xmm2\r
+; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]\r
+; X32-SSE-NEXT: movdqa %xmm0, %xmm3\r
+; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]\r
+; X32-SSE-NEXT: pmullw %xmm2, %xmm3\r
+; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]\r
+; X32-SSE-NEXT: pand %xmm2, %xmm3\r
+; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]\r
+; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]\r
+; X32-SSE-NEXT: pmullw %xmm1, %xmm0\r
+; X32-SSE-NEXT: pand %xmm2, %xmm0\r
+; X32-SSE-NEXT: packuswb %xmm3, %xmm0\r
+; X32-SSE-NEXT: retl\r
+ %shift = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>\r
+ ret <16 x i8> %shift\r
}
;
ret <16 x i16> %shift
}
-define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
-; AVX1-LABEL: constant_shift_v32i8:
-; AVX1: # %bb.0:
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
-; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
-; AVX1-NEXT: vpsllw $4, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpsllw $2, %xmm0, %xmm2
-; AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; AVX1-NEXT: retq
-;
-; AVX2-LABEL: constant_shift_v32i8:
+define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {\r
+; AVX1-LABEL: constant_shift_v32i8:\r
+; AVX1: # %bb.0:\r
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2\r
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3\r
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]\r
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3\r
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero\r
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]\r
+; AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2\r
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2\r
+; AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2\r
+; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1\r
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1\r
+; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0\r
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0\r
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0\r
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0\r
+; AVX1-NEXT: retq\r
+;\r
+; AVX2-LABEL: constant_shift_v32i8:\r
; AVX2: # %bb.0:
; AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
; AVX512BWVL-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm0
; AVX512BWVL-NEXT: vpmovwb %zmm0, %ymm0
; AVX512BWVL-NEXT: retq
-;
-; X32-AVX1-LABEL: constant_shift_v32i8:
-; X32-AVX1: # %bb.0:
-; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
-; X32-AVX1-NEXT: vpsllw $4, %xmm1, %xmm2
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
-; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8192,24640,41088,57536,49376,32928,16480,32]
-; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpsllw $2, %xmm1, %xmm2
-; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
-; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm6
-; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm2
-; X32-AVX1-NEXT: vpaddb %xmm6, %xmm6, %xmm7
-; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm1, %xmm1
-; X32-AVX1-NEXT: vpsllw $4, %xmm0, %xmm2
-; X32-AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpblendvb %xmm4, %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpsllw $2, %xmm0, %xmm2
-; X32-AVX1-NEXT: vpand %xmm5, %xmm2, %xmm2
-; X32-AVX1-NEXT: vpblendvb %xmm6, %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm2
-; X32-AVX1-NEXT: vpblendvb %xmm7, %xmm2, %xmm0, %xmm0
-; X32-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
-; X32-AVX1-NEXT: retl
-;
-; X32-AVX2-LABEL: constant_shift_v32i8:
+;\r
+; X32-AVX1-LABEL: constant_shift_v32i8:\r
+; X32-AVX1: # %bb.0:\r
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,128,64,32,16,8,4,2,1]\r
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X32-AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2\r
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm3\r
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]\r
+; X32-AVX1-NEXT: vpand %xmm4, %xmm3, %xmm3\r
+; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero\r
+; X32-AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,2,4,8,16,32,64,128]\r
+; X32-AVX1-NEXT: vpmullw %xmm5, %xmm2, %xmm2\r
+; X32-AVX1-NEXT: vpand %xmm4, %xmm2, %xmm2\r
+; X32-AVX1-NEXT: vpackuswb %xmm3, %xmm2, %xmm2\r
+; X32-AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]\r
+; X32-AVX1-NEXT: vpmullw %xmm1, %xmm3, %xmm1\r
+; X32-AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1\r
+; X32-AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero\r
+; X32-AVX1-NEXT: vpmullw %xmm5, %xmm0, %xmm0\r
+; X32-AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0\r
+; X32-AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0\r
+; X32-AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0\r
+; X32-AVX1-NEXT: retl\r
+;\r
+; X32-AVX2-LABEL: constant_shift_v32i8:\r
; X32-AVX2: # %bb.0:
; X32-AVX2-NEXT: vpsllw $4, %ymm0, %ymm1
; X32-AVX2-NEXT: vpand {{\.LCPI.*}}, %ymm1, %ymm1