As discussed on D56387, if we're shifting to extract the upper/lower half of a vXi64 vector then we're actually better off performing this at the subvector level as its very likely to fold into something.
combineConcatVectorOps can perform this in reverse if necessary.
// If we're extracting the lowest subvector and we're the only user,
// we may be able to perform this with a smaller vector width.
+ unsigned InOpcode = InVec.getOpcode();
if (IdxVal == 0 && InVec.hasOneUse()) {
- unsigned InOpcode = InVec.getOpcode();
if (VT == MVT::v2f64 && InVecVT == MVT::v4f64) {
// v2f64 CVTDQ2PD(v4i32).
if (InOpcode == ISD::SINT_TO_FP &&
}
}
+ // Always split vXi64 logical shifts where we're extracting the upper 32-bits
+ // as this is very likely to fold into a shuffle/truncation.
+ if ((InOpcode == X86ISD::VSHLI || InOpcode == X86ISD::VSRLI) &&
+ InVecVT.getScalarSizeInBits() == 64 &&
+ InVec.getConstantOperandAPInt(1) == 32) {
+ SDLoc DL(N);
+ SDValue Ext =
+ extractSubVector(InVec.getOperand(0), IdxVal, DAG, DL, SizeInBits);
+ return DAG.getNode(InOpcode, DL, VT, Ext, InVec.getOperand(1));
+ }
+
return SDValue();
}
;
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_lshr:
; AVX2-SLOW: # %bb.0:
-; AVX2-SLOW-NEXT: vpsrlq $32, %ymm0, %ymm0
-; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
+; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
+; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; AVX2-SLOW-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
; AVX-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
-; AVX-NEXT: vpsllq $32, %ymm0, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
-; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
+; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
entry:
;
; AVX2-64-LABEL: uitofp_v4i64_v4f64:
; AVX2-64: # %bb.0:
-; AVX2-64-NEXT: vpsrlq $32, %ymm0, %ymm1
-; AVX2-64-NEXT: vextracti128 $1, %ymm1, %xmm2
+; AVX2-64-NEXT: vextracti128 $1, %ymm0, %xmm1
+; AVX2-64-NEXT: vpsrlq $32, %xmm1, %xmm1
+; AVX2-64-NEXT: vpextrq $1, %xmm1, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm2, %xmm2
+; AVX2-64-NEXT: vmovq %xmm1, %rax
+; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm1
+; AVX2-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; AVX2-64-NEXT: vpsrlq $32, %xmm0, %xmm2
; AVX2-64-NEXT: vpextrq $1, %xmm2, %rax
; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm3, %xmm3
; AVX2-64-NEXT: vmovq %xmm2, %rax
; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm4, %xmm2
; AVX2-64-NEXT: vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm3[0]
-; AVX2-64-NEXT: vpextrq $1, %xmm1, %rax
-; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm4, %xmm3
-; AVX2-64-NEXT: vmovq %xmm1, %rax
-; AVX2-64-NEXT: vcvtsi2sd %rax, %xmm4, %xmm1
-; AVX2-64-NEXT: vunpcklpd {{.*#+}} xmm1 = xmm1[0],xmm3[0]
-; AVX2-64-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
+; AVX2-64-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX2-64-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4.294967296E+9,4.294967296E+9,4.294967296E+9,4.294967296E+9]
; AVX2-64-NEXT: vmulpd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: vxorpd %xmm2, %xmm2, %xmm2