From 94a3208c98e145ff06c97057546ec006bd6f2bcf Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Tue, 30 Oct 2018 20:44:54 +0000 Subject: [PATCH] [x86] try to make test immune to better div optimization; NFCI git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@345640 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/copy-eflags.ll | 51 ++++++++++++++++++++--------------------- 1 file changed, 25 insertions(+), 26 deletions(-) diff --git a/test/CodeGen/X86/copy-eflags.ll b/test/CodeGen/X86/copy-eflags.ll index 10fccacf193..836027f47bf 100644 --- a/test/CodeGen/X86/copy-eflags.ll +++ b/test/CodeGen/X86/copy-eflags.ll @@ -308,47 +308,46 @@ bb1: ; Use a particular instruction pattern in order to lower to the post-RA pseudo ; used to lower SETB into an SBB pattern in order to make sure that kind of ; usage of a copied EFLAGS continues to work. -define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3) { +define void @PR37431(i32* %arg1, i8* %arg2, i8* %arg3, i32 %x) nounwind { ; X32-LABEL: PR37431: ; X32: # %bb.0: # %entry +; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi -; X32-NEXT: .cfi_def_cfa_offset 8 -; X32-NEXT: .cfi_offset %esi, -8 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl (%eax), %eax ; X32-NEXT: movl %eax, %ecx ; X32-NEXT: sarl $31, %ecx ; X32-NEXT: cmpl %eax, %eax ; X32-NEXT: sbbl %ecx, %eax -; X32-NEXT: setb %al -; X32-NEXT: sbbb %cl, %cl +; X32-NEXT: setb %cl +; X32-NEXT: sbbb %dl, %dl ; X32-NEXT: movl {{[0-9]+}}(%esp), %esi -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movb %cl, (%edx) -; X32-NEXT: movzbl %al, %eax -; X32-NEXT: xorl %ecx, %ecx -; X32-NEXT: subl %eax, %ecx -; X32-NEXT: xorl %eax, %eax -; X32-NEXT: xorl %edx, %edx -; X32-NEXT: idivl %ecx +; X32-NEXT: movl {{[0-9]+}}(%esp), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %edi +; X32-NEXT: movb %dl, (%edi) +; X32-NEXT: movzbl %cl, %ecx +; X32-NEXT: xorl %edi, %edi +; X32-NEXT: subl %ecx, %edi +; X32-NEXT: cltd +; X32-NEXT: idivl %edi ; X32-NEXT: movb %dl, (%esi) ; X32-NEXT: popl %esi -; X32-NEXT: .cfi_def_cfa_offset 4 +; X32-NEXT: popl %edi ; X32-NEXT: retl ; ; X64-LABEL: PR37431: ; X64: # %bb.0: # %entry -; X64-NEXT: movq %rdx, %rcx -; X64-NEXT: movslq (%rdi), %rax -; X64-NEXT: cmpq %rax, %rax -; X64-NEXT: sbbb %dl, %dl -; X64-NEXT: cmpq %rax, %rax -; X64-NEXT: movb %dl, (%rsi) -; X64-NEXT: sbbl %esi, %esi -; X64-NEXT: xorl %eax, %eax -; X64-NEXT: xorl %edx, %edx -; X64-NEXT: idivl %esi -; X64-NEXT: movb %dl, (%rcx) +; X64-NEXT: movl %ecx, %eax +; X64-NEXT: movq %rdx, %r8 +; X64-NEXT: movslq (%rdi), %rdx +; X64-NEXT: cmpq %rdx, %rax +; X64-NEXT: sbbb %cl, %cl +; X64-NEXT: cmpq %rdx, %rax +; X64-NEXT: movb %cl, (%rsi) +; X64-NEXT: sbbl %ecx, %ecx +; X64-NEXT: cltd +; X64-NEXT: idivl %ecx +; X64-NEXT: movb %dl, (%r8) ; X64-NEXT: retq entry: %tmp = load i32, i32* %arg1 @@ -358,7 +357,7 @@ entry: %tmp4 = sub i8 0, %tmp3 store i8 %tmp4, i8* %arg2 %tmp5 = sext i8 %tmp4 to i32 - %tmp6 = srem i32 0, %tmp5 + %tmp6 = srem i32 %x, %tmp5 %tmp7 = trunc i32 %tmp6 to i8 store i8 %tmp7, i8* %arg3 ret void -- 2.11.0