; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
-; FIXME: Div/rem by zero is undef.
+; Div/rem by zero is undef.
define i32 @srem0(i32 %x) {
; CHECK-LABEL: srem0:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: retq
%rem = srem i32 %x, 0
ret i32 %rem
define i32 @urem0(i32 %x) {
; CHECK-LABEL: urem0:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movl %edx, %eax
; CHECK-NEXT: retq
%rem = urem i32 %x, 0
ret i32 %rem
define i32 @sdiv0(i32 %x) {
; CHECK-LABEL: sdiv0:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
; CHECK-NEXT: retq
%div = sdiv i32 %x, 0
ret i32 %div
define i32 @udiv0(i32 %x) {
; CHECK-LABEL: udiv0:
; CHECK: # BB#0:
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: movl %edi, %eax
-; CHECK-NEXT: divl %ecx
; CHECK-NEXT: retq
%div = udiv i32 %x, 0
ret i32 %div
}
-; FIXME: Div/rem by zero vectors is undef.
+; Div/rem by zero vectors is undef.
define <4 x i32> @srem_vec0(<4 x i32> %x) {
; CHECK-LABEL: srem_vec0:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %edx, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT: movd %xmm2, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %edx, %xmm2
-; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %edx, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %edx, %xmm0
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%rem = srem <4 x i32> %x, zeroinitializer
ret <4 x i32> %rem
define <4 x i32> @urem_vec0(<4 x i32> %x) {
; CHECK-LABEL: urem_vec0:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %edx, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT: movd %xmm2, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %edx, %xmm2
-; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %edx, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %edx, %xmm0
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%rem = urem <4 x i32> %x, zeroinitializer
ret <4 x i32> %rem
define <4 x i32> @sdiv_vec0(<4 x i32> %x) {
; CHECK-LABEL: sdiv_vec0:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT: movd %xmm2, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %eax, %xmm2
-; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: cltd
-; CHECK-NEXT: idivl %ecx
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%div = sdiv <4 x i32> %x, zeroinitializer
ret <4 x i32> %div
define <4 x i32> @udiv_vec0(<4 x i32> %x) {
; CHECK-LABEL: udiv_vec0:
; CHECK: # BB#0:
-; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
-; CHECK-NEXT: movd %xmm1, %eax
-; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,2,3]
-; CHECK-NEXT: movd %xmm2, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %eax, %xmm2
-; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %eax, %xmm1
-; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; CHECK-NEXT: movd %xmm0, %eax
-; CHECK-NEXT: xorl %edx, %edx
-; CHECK-NEXT: divl %ecx
-; CHECK-NEXT: movd %eax, %xmm0
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: retq
%div = udiv <4 x i32> %x, zeroinitializer
ret <4 x i32> %div
ret <8 x i16> %0
}
-define <4 x i32> @sdiv_zero(<4 x i32> %var) {
-; SSE-LABEL: sdiv_zero:
-; SSE: # BB#0: # %entry
-; SSE-NEXT: pextrd $1, %xmm0, %eax
-; SSE-NEXT: xorl %esi, %esi
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movl %eax, %ecx
-; SSE-NEXT: movd %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: movd %eax, %xmm1
-; SSE-NEXT: pinsrd $1, %ecx, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %esi
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
-; SSE-NEXT: retq
-;
-; AVX-LABEL: sdiv_zero:
-; AVX: # BB#0: # %entry
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %esi, %esi
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: movl %eax, %ecx
-; AVX-NEXT: vmovd %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vmovd %eax, %xmm1
-; AVX-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %esi
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
-; AVX-NEXT: retq
-entry:
- %0 = sdiv <4 x i32> %var, <i32 0, i32 0, i32 0, i32 0>
- ret <4 x i32> %0
-}
-
define <4 x i32> @sdiv_vec4x32(<4 x i32> %var) {
; SSE-LABEL: sdiv_vec4x32:
; SSE: # BB#0: # %entry
ret <16 x i16> %a0
}
+; TODO: The div-by-0 lanes are folded away, so we use scalar ops. Would it be better to keep this in the vector unit?
+
define <4 x i32> @sdiv_non_splat(<4 x i32> %x) {
; SSE-LABEL: sdiv_non_splat:
; SSE: # BB#0:
-; SSE-NEXT: pextrd $1, %xmm0, %eax
-; SSE-NEXT: xorl %ecx, %ecx
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: movd %xmm0, %edx
-; SSE-NEXT: movl %edx, %esi
-; SSE-NEXT: shrl $31, %esi
-; SSE-NEXT: addl %edx, %esi
-; SSE-NEXT: sarl %esi
-; SSE-NEXT: movd %esi, %xmm1
-; SSE-NEXT: pinsrd $1, %eax, %xmm1
-; SSE-NEXT: pextrd $2, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $2, %eax, %xmm1
-; SSE-NEXT: pextrd $3, %xmm0, %eax
-; SSE-NEXT: cltd
-; SSE-NEXT: idivl %ecx
-; SSE-NEXT: pinsrd $3, %eax, %xmm1
-; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: movd %xmm0, %eax
+; SSE-NEXT: movl %eax, %ecx
+; SSE-NEXT: shrl $31, %ecx
+; SSE-NEXT: addl %eax, %ecx
+; SSE-NEXT: sarl %ecx
+; SSE-NEXT: movd %ecx, %xmm0
; SSE-NEXT: retq
;
; AVX-LABEL: sdiv_non_splat:
; AVX: # BB#0:
-; AVX-NEXT: vpextrd $1, %xmm0, %eax
-; AVX-NEXT: xorl %ecx, %ecx
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vmovd %xmm0, %edx
-; AVX-NEXT: movl %edx, %esi
-; AVX-NEXT: shrl $31, %esi
-; AVX-NEXT: addl %edx, %esi
-; AVX-NEXT: sarl %esi
-; AVX-NEXT: vmovd %esi, %xmm1
-; AVX-NEXT: vpinsrd $1, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $2, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
-; AVX-NEXT: vpextrd $3, %xmm0, %eax
-; AVX-NEXT: cltd
-; AVX-NEXT: idivl %ecx
-; AVX-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
+; AVX-NEXT: vmovd %xmm0, %eax
+; AVX-NEXT: movl %eax, %ecx
+; AVX-NEXT: shrl $31, %ecx
+; AVX-NEXT: addl %eax, %ecx
+; AVX-NEXT: sarl %ecx
+; AVX-NEXT: vmovd %ecx, %xmm0
; AVX-NEXT: retq
%y = sdiv <4 x i32> %x, <i32 2, i32 0, i32 0, i32 0>
ret <4 x i32> %y