EVT VT = N->getValueType(0);
SDLoc DL(N);
+ unsigned Opc = N->getOpcode();
+ bool IsDiv = (ISD::SDIV == Opc) || (ISD::UDIV == Opc);
+
// X / undef -> undef
// X % undef -> undef
// X / 0 -> undef
// X % 0 -> undef
// NOTE: This includes vectors where any divisor element is zero/undef.
- if (DAG.isUndef(N->getOpcode(), {N0, N1}))
+ if (DAG.isUndef(Opc, {N0, N1}))
return DAG.getUNDEF(VT);
// undef / X -> 0
if (N0.isUndef())
return DAG.getConstant(0, DL, VT);
+ // TODO: 0 / X -> 0
+ // TODO: 0 % X -> 0
+
+ // X / X -> 1
+ // X % X -> 0
+ if (N0 == N1)
+ return DAG.getConstant(IsDiv ? 1 : 0, DL, VT);
+
+ // TODO: X / 1 -> X
+ // TODO: X % 1 -> 0
+ // If this is a boolean op (single-bit element type), we can't have
+ // division-by-zero or remainder-by-zero, so assume the divisor is 1.
+ // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
+
return SDValue();
}
ret <4 x i32> %1
}
-; TODO fold (sdiv x, x) -> 1
+; fold (sdiv x, x) -> 1
define i32 @combine_sdiv_dupe(i32 %x) {
; CHECK-LABEL: combine_sdiv_dupe:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %edi
+; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retq
%1 = sdiv i32 %x, %x
ret i32 %1
}
define <4 x i32> @combine_vec_sdiv_dupe(<4 x i32> %x) {
-; SSE2-LABEL: combine_vec_sdiv_dupe:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %ecx
-; SSE2-NEXT: movl %ecx, %eax
-; SSE2-NEXT: cltd
-; SSE2-NEXT: idivl %ecx
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: movl %ecx, %eax
-; SSE2-NEXT: cltd
-; SSE2-NEXT: idivl %ecx
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: movl %ecx, %eax
-; SSE2-NEXT: cltd
-; SSE2-NEXT: idivl %ecx
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: movl %ecx, %eax
-; SSE2-NEXT: cltd
-; SSE2-NEXT: idivl %ecx
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
+; SSE-LABEL: combine_vec_sdiv_dupe:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,1,1,1]
+; SSE-NEXT: retq
;
-; SSE41-LABEL: combine_vec_sdiv_dupe:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pextrd $1, %xmm0, %ecx
-; SSE41-NEXT: movl %ecx, %eax
-; SSE41-NEXT: cltd
-; SSE41-NEXT: idivl %ecx
-; SSE41-NEXT: movl %eax, %ecx
-; SSE41-NEXT: movd %xmm0, %esi
-; SSE41-NEXT: movl %esi, %eax
-; SSE41-NEXT: cltd
-; SSE41-NEXT: idivl %esi
-; SSE41-NEXT: movd %eax, %xmm1
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE41-NEXT: pextrd $2, %xmm0, %ecx
-; SSE41-NEXT: movl %ecx, %eax
-; SSE41-NEXT: cltd
-; SSE41-NEXT: idivl %ecx
-; SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; SSE41-NEXT: pextrd $3, %xmm0, %ecx
-; SSE41-NEXT: movl %ecx, %eax
-; SSE41-NEXT: cltd
-; SSE41-NEXT: idivl %ecx
-; SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
+; AVX1-LABEL: combine_vec_sdiv_dupe:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [1,1,1,1]
+; AVX1-NEXT: retq
;
-; AVX-LABEL: combine_vec_sdiv_dupe:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: movl %eax, %ecx
-; AVX-NEXT: vmovd %xmm0, %esi
-; AVX-NEXT: movl %esi, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX2ORLATER-LABEL: combine_vec_sdiv_dupe:
+; AVX2ORLATER: # %bb.0:
+; AVX2ORLATER-NEXT: vbroadcastss {{.*#+}} xmm0 = [1,1,1,1]
+; AVX2ORLATER-NEXT: retq
+;
+; XOP-LABEL: combine_vec_sdiv_dupe:
+; XOP: # %bb.0:
+; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [1,1,1,1]
+; XOP-NEXT: retq
%1 = sdiv <4 x i32> %x, %x
ret <4 x i32> %1
}
ret <4 x i32> %1
}
-; TODO fold (srem x, x) -> 0
+; fold (srem x, x) -> 0
define i32 @combine_srem_dupe(i32 %x) {
; CHECK-LABEL: combine_srem_dupe:
; CHECK: # %bb.0:
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %edi
-; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
%1 = srem i32 %x, %x
ret i32 %1
define <4 x i32> @combine_vec_srem_dupe(<4 x i32> %x) {
; SSE-LABEL: combine_vec_srem_dupe:
; SSE: # %bb.0:
-; SSE-NEXT: pextrd $1, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: movl %edx, %ecx
-; SSE-NEXT: movd %xmm0, %esi
-; SSE-NEXT: movl %esi, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movd %edx, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $2, %edx, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %ecx
-; SSE-NEXT: movl %ecx, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $3, %edx, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_srem_dupe:
; AVX: # %bb.0:
-; AVX-NEXT: vpextrd $1, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: movl %edx, %ecx
-; AVX-NEXT: vmovd %xmm0, %esi
-; AVX-NEXT: movl %esi, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vmovd %edx, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %ecx
-; AVX-NEXT: movl %ecx, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = srem <4 x i32> %x, %x
ret <4 x i32> %1
ret <4 x i32> %1
}
-; TODO fold (udiv x, x) -> 1
+; fold (udiv x, x) -> 1
define i32 @combine_udiv_dupe(i32 %x) {
; CHECK-LABEL: combine_udiv_dupe:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: divl %edi
+; CHECK-NEXT: movl $1, %eax
; CHECK-NEXT: retq
%1 = udiv i32 %x, %x
ret i32 %1
}
define <4 x i32> @combine_vec_udiv_dupe(<4 x i32> %x) {
-; SSE2-LABEL: combine_vec_udiv_dupe:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: xorl %edx, %edx
-; SSE2-NEXT: divl %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: xorl %edx, %edx
-; SSE2-NEXT: divl %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl %edx, %edx
-; SSE2-NEXT: divl %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: xorl %edx, %edx
-; SSE2-NEXT: divl %eax
-; SSE2-NEXT: movd %eax, %xmm0
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
+; SSE-LABEL: combine_vec_udiv_dupe:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps {{.*#+}} xmm0 = [1,1,1,1]
+; SSE-NEXT: retq
;
-; SSE41-LABEL: combine_vec_udiv_dupe:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pextrd $1, %xmm0, %eax
-; SSE41-NEXT: xorl %edx, %edx
-; SSE41-NEXT: divl %eax
-; SSE41-NEXT: movl %eax, %ecx
-; SSE41-NEXT: movd %xmm0, %eax
-; SSE41-NEXT: xorl %edx, %edx
-; SSE41-NEXT: divl %eax
-; SSE41-NEXT: movd %eax, %xmm1
-; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE41-NEXT: pextrd $2, %xmm0, %eax
-; SSE41-NEXT: xorl %edx, %edx
-; SSE41-NEXT: divl %eax
-; SSE41-NEXT: pinsrd $2, %eax, %xmm1
-; SSE41-NEXT: pextrd $3, %xmm0, %eax
-; SSE41-NEXT: xorl %edx, %edx
-; SSE41-NEXT: divl %eax
-; SSE41-NEXT: pinsrd $3, %eax, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
+; AVX1-LABEL: combine_vec_udiv_dupe:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovaps {{.*#+}} xmm0 = [1,1,1,1]
+; AVX1-NEXT: retq
;
-; AVX-LABEL: combine_vec_udiv_dupe:
-; AVX: # %bb.0:
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: movl %eax, %ecx
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT: retq
+; AVX2-LABEL: combine_vec_udiv_dupe:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vbroadcastss {{.*#+}} xmm0 = [1,1,1,1]
+; AVX2-NEXT: retq
;
; XOP-LABEL: combine_vec_udiv_dupe:
; XOP: # %bb.0:
-; XOP-NEXT: vpextrd $1, %xmm0, %eax
-; XOP-NEXT: xorl %edx, %edx
-; XOP-NEXT: divl %eax
-; XOP-NEXT: movl %eax, %ecx
-; XOP-NEXT: vmovd %xmm0, %eax
-; XOP-NEXT: xorl %edx, %edx
-; XOP-NEXT: divl %eax
-; XOP-NEXT: vmovd %eax, %xmm1
-; XOP-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; XOP-NEXT: vpextrd $2, %xmm0, %eax
-; XOP-NEXT: xorl %edx, %edx
-; XOP-NEXT: divl %eax
-; XOP-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; XOP-NEXT: vpextrd $3, %xmm0, %eax
-; XOP-NEXT: xorl %edx, %edx
-; XOP-NEXT: divl %eax
-; XOP-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; XOP-NEXT: vmovaps {{.*#+}} xmm0 = [1,1,1,1]
; XOP-NEXT: retq
%1 = udiv <4 x i32> %x, %x
ret <4 x i32> %1
ret <4 x i32> %1
}
-; TODO fold (urem x, x) -> 0
+; fold (urem x, x) -> 0
define i32 @combine_urem_dupe(i32 %x) {
; CHECK-LABEL: combine_urem_dupe:
; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: divl %edi
-; CHECK-NEXT: movl %edx, %eax
+; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: retq
%1 = urem i32 %x, %x
ret i32 %1
define <4 x i32> @combine_vec_urem_dupe(<4 x i32> %x) {
; SSE-LABEL: combine_vec_urem_dupe:
; SSE: # %bb.0:
-; SSE-NEXT: pextrd $1, %xmm0, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: divl %eax
-; SSE-NEXT: movl %edx, %ecx
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: divl %eax
-; SSE-NEXT: movd %edx, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: divl %eax
-; SSE-NEXT: pinsrd $2, %edx, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %eax
-; SSE-NEXT: xorl %edx, %edx
-; SSE-NEXT: divl %eax
-; SSE-NEXT: pinsrd $3, %edx, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_urem_dupe:
; AVX: # %bb.0:
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: movl %edx, %ecx
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vmovd %edx, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vpinsrd $2, %edx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: xorl %edx, %edx
-; AVX-NEXT: divl %eax
-; AVX-NEXT: vpinsrd $3, %edx, %xmm1, %xmm0
+; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = urem <4 x i32> %x, %x
ret <4 x i32> %1