; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpnltsd {sae}, %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: shlb $7, %al
-; CHECK-NEXT: sarb $7, %al
+; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res4 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 5, i8 %x3, i32 8)
; CHECK-NEXT: kandw %k2, %k1, %k1
; CHECK-NEXT: korw %k1, %k0, %k0
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: shlb $7, %al
-; CHECK-NEXT: sarb $7, %al
+; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.sd(<2 x double> %x0, <2 x double> %x1, i32 2, i8 -1, i32 4)
; CHECK-NEXT: kmovw %edi, %k1
; CHECK-NEXT: vcmpunordss %xmm1, %xmm0, %k0 {%k1}
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: shlb $7, %al
-; CHECK-NEXT: sarb $7, %al
+; CHECK-NEXT: andl $1, %eax
; CHECK-NEXT: retq
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 %x3, i32 4)
; CHECK: ## BB#0:
; CHECK-NEXT: vcmpless %xmm1, %xmm0, %k1
; CHECK-NEXT: vcmpunordss {sae}, %xmm1, %xmm0, %k0 {%k1}
-; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k1
-; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
; CHECK-NEXT: andl $1, %edi
-; CHECK-NEXT: kmovw %edi, %k2
-; CHECK-NEXT: kandw %k2, %k1, %k1
-; CHECK-NEXT: kandw %k1, %k0, %k0
+; CHECK-NEXT: kmovw %edi, %k1
+; CHECK-NEXT: vcmpneqss %xmm1, %xmm0, %k2 {%k1}
+; CHECK-NEXT: kmovw %k2, %ecx
+; CHECK-NEXT: vcmpnltss {sae}, %xmm1, %xmm0, %k1 {%k1}
+; CHECK-NEXT: kmovw %k1, %edx
+; CHECK-NEXT: andl $1, %edx
; CHECK-NEXT: kmovw %k0, %eax
-; CHECK-NEXT: shlb $7, %al
-; CHECK-NEXT: sarb $7, %al
+; CHECK-NEXT: andb %cl, %al
+; CHECK-NEXT: andb %dl, %al
; CHECK-NEXT: retq
%res1 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 2, i8 -1, i32 4)
%res2 = call i8 @llvm.x86.avx512.mask.cmp.ss(<4 x float> %x0, <4 x float> %x1, i32 3, i8 -1, i32 8)