}
break;
}
+ case X86ISD::MOVQ2DQ: {
+ // Move from MMX to XMM. Upper half of XMM should be 0.
+ if (DemandedElts.countTrailingZeros() >= (NumElts / 2))
+ Known.setAllZero();
+ break;
+ }
}
// Handle target shuffles.
; X86-NEXT: movq (%eax), %mm0
; X86-NEXT: paddd %mm0, %mm0
; X86-NEXT: movq2dq %mm0, %xmm0
-; X86-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X86-NEXT: cvtdq2ps %xmm0, %xmm0
; X86-NEXT: retl
;
; X64-NEXT: movq (%rdi), %mm0
; X64-NEXT: paddd %mm0, %mm0
; X64-NEXT: movq2dq %mm0, %xmm0
-; X64-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
; X64-NEXT: cvtdq2ps %xmm0, %xmm0
; X64-NEXT: retq
%2 = bitcast <1 x i64>* %0 to x86_mmx*
define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
; X32-LABEL: mmx_movzl:
; X32: ## %bb.0:
-; X32-NEXT: movq2dq %mm0, %xmm0
-; X32-NEXT: movl $32, %eax
-; X32-NEXT: pinsrd $0, %eax, %xmm0
-; X32-NEXT: pxor %xmm1, %xmm1
-; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7]
-; X32-NEXT: movdq2q %xmm1, %mm0
+; X32-NEXT: movq LCPI0_0, %mm0
; X32-NEXT: retl
;
; X64-LABEL: mmx_movzl: