return SDValue();
}
+// Simplify PMULDQ and PMULUDQ operations.
+static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI) {
+ SDValue LHS = N->getOperand(0);
+ SDValue RHS = N->getOperand(1);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
+ !DCI.isBeforeLegalizeOps());
+ APInt DemandedMask(APInt::getLowBitsSet(64, 32));
+
+ // PMULQDQ/PMULUDQ only uses lower 32 bits from each vector element.
+ KnownBits LHSKnown;
+ if (TLI.SimplifyDemandedBits(LHS, DemandedMask, LHSKnown, TLO)) {
+ DCI.CommitTargetLoweringOpt(TLO);
+ return SDValue(N, 0);
+ }
+
+ KnownBits RHSKnown;
+ if (TLI.SimplifyDemandedBits(RHS, DemandedMask, RHSKnown, TLO)) {
+ DCI.CommitTargetLoweringOpt(TLO);
+ return SDValue(N, 0);
+ }
+
+ return SDValue();
+}
+
SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
DAGCombinerInfo &DCI) const {
SelectionDAG &DAG = DCI.DAG;
case ISD::MSCATTER: return combineGatherScatter(N, DAG, DCI, Subtarget);
case X86ISD::PCMPEQ:
case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
+ case X86ISD::PMULDQ:
+ case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
}
return SDValue();
define <2 x i64> @mul_v2i64_neg_15_63(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_15_63:
; X86: # %bb.0:
-; X86-NEXT: movdqa %xmm0, %xmm1
-; X86-NEXT: psrlq $32, %xmm1
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967281,4294967295,4294967233,4294967295]
-; X86-NEXT: pmuludq %xmm2, %xmm1
-; X86-NEXT: movdqa %xmm2, %xmm3
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967281,4294967295,4294967233,4294967295]
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: pmuludq %xmm1, %xmm2
+; X86-NEXT: movdqa %xmm0, %xmm3
; X86-NEXT: psrlq $32, %xmm3
-; X86-NEXT: pmuludq %xmm0, %xmm3
-; X86-NEXT: paddq %xmm1, %xmm3
-; X86-NEXT: psllq $32, %xmm3
-; X86-NEXT: pmuludq %xmm2, %xmm0
+; X86-NEXT: pmuludq %xmm1, %xmm3
+; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddq %xmm3, %xmm0
+; X86-NEXT: psllq $32, %xmm0
+; X86-NEXT: paddq %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_15_63:
define <2 x i64> @mul_v2i64_neg_17_65(<2 x i64> %a0) nounwind {
; X86-LABEL: mul_v2i64_neg_17_65:
; X86: # %bb.0:
-; X86-NEXT: movdqa %xmm0, %xmm1
-; X86-NEXT: psrlq $32, %xmm1
-; X86-NEXT: movdqa {{.*#+}} xmm2 = [4294967279,4294967295,4294967231,4294967295]
-; X86-NEXT: pmuludq %xmm2, %xmm1
-; X86-NEXT: movdqa %xmm2, %xmm3
+; X86-NEXT: movdqa {{.*#+}} xmm1 = [4294967279,4294967295,4294967231,4294967295]
+; X86-NEXT: movdqa %xmm0, %xmm2
+; X86-NEXT: pmuludq %xmm1, %xmm2
+; X86-NEXT: movdqa %xmm0, %xmm3
; X86-NEXT: psrlq $32, %xmm3
-; X86-NEXT: pmuludq %xmm0, %xmm3
-; X86-NEXT: paddq %xmm1, %xmm3
-; X86-NEXT: psllq $32, %xmm3
-; X86-NEXT: pmuludq %xmm2, %xmm0
+; X86-NEXT: pmuludq %xmm1, %xmm3
+; X86-NEXT: pmuludq {{\.LCPI.*}}, %xmm0
; X86-NEXT: paddq %xmm3, %xmm0
+; X86-NEXT: psllq $32, %xmm0
+; X86-NEXT: paddq %xmm2, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: mul_v2i64_neg_17_65: