From e18f8d63bd7f8cb0baa12f142a2542aeb40847d6 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 8 Dec 2020 14:48:24 +0000 Subject: [PATCH] [X86] Regenerate store-narrow.ll tests Replace X32 check prefixes with X86 - X32 is generally used for gnux triple tests --- llvm/test/CodeGen/X86/store-narrow.ll | 160 +++++++++++++++++----------------- 1 file changed, 81 insertions(+), 79 deletions(-) diff --git a/llvm/test/CodeGen/X86/store-narrow.ll b/llvm/test/CodeGen/X86/store-narrow.ll index f71f2eefa0f..122e384ee3c 100644 --- a/llvm/test/CodeGen/X86/store-narrow.ll +++ b/llvm/test/CodeGen/X86/store-narrow.ll @@ -1,22 +1,24 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; rdar://7860110 ; RUN: llc -mtriple=x86_64-apple-darwin10.2 < %s | FileCheck %s -check-prefix=X64 -; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=1 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWON -; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=0 < %s | FileCheck %s -check-prefix=X32 -check-prefix=X32-BWOFF +; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=1 < %s | FileCheck %s -check-prefixes=X86,X86-BWON +; RUN: llc -mtriple=i686-apple-darwin10.2 -fixup-byte-word-insts=0 < %s | FileCheck %s -check-prefixes=X86,X86-BWOFF + target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64" +; rdar://7860110 + define void @test1(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp { ; X64-LABEL: test1: ; X64: ## %bb.0: ## %entry ; X64-NEXT: movb %sil, (%rdi) ; X64-NEXT: retq ; -; X32-LABEL: test1: -; X32: ## %bb.0: ## %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movb %al, (%ecx) -; X32-NEXT: retl +; X86-LABEL: test1: +; X86: ## %bb.0: ## %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movb %al, (%ecx) +; X86-NEXT: retl entry: %A = load i32, i32* %a0, align 4 %B = and i32 %A, -256 ; 0xFFFFFF00 @@ -32,12 +34,12 @@ define void @test2(i32* nocapture %a0, i8 zeroext %a1) nounwind ssp { ; X64-NEXT: movb %sil, 1(%rdi) ; X64-NEXT: retq ; -; X32-LABEL: test2: -; X32: ## %bb.0: ## %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movb %al, 1(%ecx) -; X32-NEXT: retl +; X86-LABEL: test2: +; X86: ## %bb.0: ## %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movb %al, 1(%ecx) +; X86-NEXT: retl entry: %A = load i32, i32* %a0, align 4 %B = and i32 %A, -65281 ; 0xFFFF00FF @@ -54,19 +56,19 @@ define void @test3(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { ; X64-NEXT: movw %si, (%rdi) ; X64-NEXT: retq ; -; X32-BWON-LABEL: test3: -; X32-BWON: ## %bb.0: ## %entry -; X32-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; X32-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWON-NEXT: movw %ax, (%ecx) -; X32-BWON-NEXT: retl +; X86-BWON-LABEL: test3: +; X86-BWON: ## %bb.0: ## %entry +; X86-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWON-NEXT: movw %ax, (%ecx) +; X86-BWON-NEXT: retl ; -; X32-BWOFF-LABEL: test3: -; X32-BWOFF: ## %bb.0: ## %entry -; X32-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax -; X32-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWOFF-NEXT: movw %ax, (%ecx) -; X32-BWOFF-NEXT: retl +; X86-BWOFF-LABEL: test3: +; X86-BWOFF: ## %bb.0: ## %entry +; X86-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax +; X86-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWOFF-NEXT: movw %ax, (%ecx) +; X86-BWOFF-NEXT: retl entry: %A = load i32, i32* %a0, align 4 %B = and i32 %A, -65536 ; 0xFFFF0000 @@ -82,19 +84,19 @@ define void @test4(i32* nocapture %a0, i16 zeroext %a1) nounwind ssp { ; X64-NEXT: movw %si, 2(%rdi) ; X64-NEXT: retq ; -; X32-BWON-LABEL: test4: -; X32-BWON: ## %bb.0: ## %entry -; X32-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; X32-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWON-NEXT: movw %ax, 2(%ecx) -; X32-BWON-NEXT: retl +; X86-BWON-LABEL: test4: +; X86-BWON: ## %bb.0: ## %entry +; X86-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWON-NEXT: movw %ax, 2(%ecx) +; X86-BWON-NEXT: retl ; -; X32-BWOFF-LABEL: test4: -; X32-BWOFF: ## %bb.0: ## %entry -; X32-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax -; X32-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWOFF-NEXT: movw %ax, 2(%ecx) -; X32-BWOFF-NEXT: retl +; X86-BWOFF-LABEL: test4: +; X86-BWOFF: ## %bb.0: ## %entry +; X86-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax +; X86-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWOFF-NEXT: movw %ax, 2(%ecx) +; X86-BWOFF-NEXT: retl entry: %A = load i32, i32* %a0, align 4 %B = and i32 %A, 65535 ; 0x0000FFFF @@ -111,19 +113,19 @@ define void @test5(i64* nocapture %a0, i16 zeroext %a1) nounwind ssp { ; X64-NEXT: movw %si, 2(%rdi) ; X64-NEXT: retq ; -; X32-BWON-LABEL: test5: -; X32-BWON: ## %bb.0: ## %entry -; X32-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax -; X32-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWON-NEXT: movw %ax, 2(%ecx) -; X32-BWON-NEXT: retl +; X86-BWON-LABEL: test5: +; X86-BWON: ## %bb.0: ## %entry +; X86-BWON-NEXT: movzwl {{[0-9]+}}(%esp), %eax +; X86-BWON-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWON-NEXT: movw %ax, 2(%ecx) +; X86-BWON-NEXT: retl ; -; X32-BWOFF-LABEL: test5: -; X32-BWOFF: ## %bb.0: ## %entry -; X32-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax -; X32-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-BWOFF-NEXT: movw %ax, 2(%ecx) -; X32-BWOFF-NEXT: retl +; X86-BWOFF-LABEL: test5: +; X86-BWOFF: ## %bb.0: ## %entry +; X86-BWOFF-NEXT: movw {{[0-9]+}}(%esp), %ax +; X86-BWOFF-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-BWOFF-NEXT: movw %ax, 2(%ecx) +; X86-BWOFF-NEXT: retl entry: %A = load i64, i64* %a0, align 4 %B = and i64 %A, -4294901761 ; 0xFFFFFFFF0000FFFF @@ -140,12 +142,12 @@ define void @test6(i64* nocapture %a0, i8 zeroext %a1) nounwind ssp { ; X64-NEXT: movb %sil, 5(%rdi) ; X64-NEXT: retq ; -; X32-LABEL: test6: -; X32: ## %bb.0: ## %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %al -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X32-NEXT: movb %al, 5(%ecx) -; X32-NEXT: retl +; X86-LABEL: test6: +; X86: ## %bb.0: ## %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %al +; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X86-NEXT: movb %al, 5(%ecx) +; X86-NEXT: retl entry: %A = load i64, i64* %a0, align 4 %B = and i64 %A, -280375465082881 ; 0xFFFF00FFFFFFFFFF @@ -163,14 +165,14 @@ define i32 @test7(i64* nocapture %a0, i8 zeroext %a1, i32* %P2) nounwind { ; X64-NEXT: movb %sil, 5(%rdi) ; X64-NEXT: retq ; -; X32-LABEL: test7: -; X32: ## %bb.0: ## %entry -; X32-NEXT: movb {{[0-9]+}}(%esp), %cl -; X32-NEXT: movl {{[0-9]+}}(%esp), %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl (%eax), %eax -; X32-NEXT: movb %cl, 5(%edx) -; X32-NEXT: retl +; X86-LABEL: test7: +; X86: ## %bb.0: ## %entry +; X86-NEXT: movb {{[0-9]+}}(%esp), %cl +; X86-NEXT: movl {{[0-9]+}}(%esp), %edx +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movl (%eax), %eax +; X86-NEXT: movb %cl, 5(%edx) +; X86-NEXT: retl entry: %OtherLoad = load i32 , i32 *%P2 %A = load i64, i64* %a0, align 4 @@ -192,10 +194,10 @@ define void @test8() nounwind { ; X64-NEXT: orb $1, {{.*}}(%rip) ; X64-NEXT: retq ; -; X32-LABEL: test8: -; X32: ## %bb.0: -; X32-NEXT: orb $1, _g_16 -; X32-NEXT: retl +; X86-LABEL: test8: +; X86: ## %bb.0: +; X86-NEXT: orb $1, _g_16 +; X86-NEXT: retl %tmp = load i32, i32* @g_16 store i32 0, i32* @g_16 %or = or i32 %tmp, 1 @@ -209,10 +211,10 @@ define void @test9() nounwind { ; X64-NEXT: orb $1, {{.*}}(%rip) ; X64-NEXT: retq ; -; X32-LABEL: test9: -; X32: ## %bb.0: -; X32-NEXT: orb $1, _g_16 -; X32-NEXT: retl +; X86-LABEL: test9: +; X86: ## %bb.0: +; X86-NEXT: orb $1, _g_16 +; X86-NEXT: retl %tmp = load i32, i32* @g_16 %or = or i32 %tmp, 1 store i32 %or, i32* @g_16 @@ -228,12 +230,12 @@ define i8 @test10(i8* %P) nounwind ssp { ; X64-NEXT: ## kill: def $al killed $al killed $eax ; X64-NEXT: retq ; -; X32-LABEL: test10: -; X32: ## %bb.0: ## %entry -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movsbl (%eax), %eax -; X32-NEXT: movb %ah, %al -; X32-NEXT: retl +; X86-LABEL: test10: +; X86: ## %bb.0: ## %entry +; X86-NEXT: movl {{[0-9]+}}(%esp), %eax +; X86-NEXT: movsbl (%eax), %eax +; X86-NEXT: movb %ah, %al +; X86-NEXT: retl entry: %tmp = load i8, i8* %P, align 1 %conv = sext i8 %tmp to i32 -- 2.11.0