From db343e334dad64a21ae96bc17254be96921fb7d4 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 20 Feb 2017 00:37:20 +0000 Subject: [PATCH] [X86] Add test cases showing missed opportunities to use rotate right by 1 instructions when operation reads/writes memory. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@295625 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/rotate.ll | 83 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) diff --git a/test/CodeGen/X86/rotate.ll b/test/CodeGen/X86/rotate.ll index 657b312b06c..307f2f72f0c 100644 --- a/test/CodeGen/X86/rotate.ll +++ b/test/CodeGen/X86/rotate.ll @@ -541,3 +541,86 @@ define i8 @rotr1_8(i8 %A) nounwind { %D = or i8 %B, %C ret i8 %D } + +define void @rotr1_64_mem(i64* %Aptr) nounwind { +; 32-LABEL: rotr1_64_mem: +; 32: # BB#0: +; 32-NEXT: pushl %esi +; 32-NEXT: movl 8(%esp), %eax +; 32-NEXT: movl (%eax), %ecx +; 32-NEXT: movl 4(%eax), %edx +; 32-NEXT: movl %edx, %esi +; 32-NEXT: shldl $31, %ecx, %esi +; 32-NEXT: shldl $31, %edx, %ecx +; 32-NEXT: movl %ecx, 4(%eax) +; 32-NEXT: movl %esi, (%eax) +; 32-NEXT: popl %esi + +; 64-LABEL: rotr1_64_mem: +; 64: # BB#0: +; 64-NEXT: rolq $63, (%rdi) +; 64-NEXT: retq + %A = load i64, i64 *%Aptr + %B = shl i64 %A, 63 + %C = lshr i64 %A, 1 + %D = or i64 %B, %C + store i64 %D, i64* %Aptr + ret void +} + +define void @rotr1_32_mem(i32* %Aptr) nounwind { +; 32-LABEL: rotr1_32_mem: +; 32: # BB#0: +; 32-NEXT: movl 4(%esp), %eax +; 32-NEXT: roll $31, (%eax) +; 32-NEXT: retl +; +; 64-LABEL: rotr1_32_mem: +; 64: # BB#0: +; 64-NEXT: roll $31, (%rdi) +; 64-NEXT: retq + %A = load i32, i32 *%Aptr + %B = shl i32 %A, 31 + %C = lshr i32 %A, 1 + %D = or i32 %B, %C + store i32 %D, i32* %Aptr + ret void +} + +define void @rotr1_16_mem(i16* %Aptr) nounwind { +; 32-LABEL: rotr1_16_mem: +; 32: # BB#0: +; 32-NEXT: movl 4(%esp), %eax +; 32-NEXT: rolw $15, (%eax) +; 32-NEXT: retl +; +; 64-LABEL: rotr1_16_mem: +; 64: # BB#0: +; 64-NEXT: rolw $15, (%rdi) +; 64-NEXT: retq + %A = load i16, i16 *%Aptr + %B = shl i16 %A, 15 + %C = lshr i16 %A, 1 + %D = or i16 %B, %C + store i16 %D, i16* %Aptr + ret void +} + +define void @rotr1_8_mem(i8* %Aptr) nounwind { +; 32-LABEL: rotr1_8_mem: +; 32: # BB#0: +; 32-NEXT: movl 4(%esp), %eax +; 32-NEXT: rolb $7, (%eax) +; 32-NEXT: retl +; +; 64-LABEL: rotr1_8_mem: +; 64: # BB#0: +; 64-NEXT: rolb $7, (%rdi) +; 64-NEXT: retq + %A = load i8, i8 *%Aptr + %B = shl i8 %A, 7 + %C = lshr i8 %A, 1 + %D = or i8 %B, %C + store i8 %D, i8* %Aptr + ret void +} -- 2.11.0