From 865ab505ecf7ae47287a87188d21898d3d865824 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 19 Jun 2019 18:06:59 +0000 Subject: [PATCH] [x86] add test for unaligned 32-byte load/store splitting; NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@363852 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/sandybridge-loads.ll | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/test/CodeGen/X86/sandybridge-loads.ll b/test/CodeGen/X86/sandybridge-loads.ll index 239ab1d8a3c..50766a2d009 100644 --- a/test/CodeGen/X86/sandybridge-loads.ll +++ b/test/CodeGen/X86/sandybridge-loads.ll @@ -1,7 +1,7 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=corei7-avx | FileCheck %s -define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { +define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind { ; CHECK-LABEL: wideloads: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %xmm0 @@ -26,7 +26,7 @@ define void @wideloads(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwi ret void } -define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp { +define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind { ; CHECK-LABEL: widestores: ; CHECK: # %bb.0: ; CHECK-NEXT: vmovaps (%rdi), %ymm0 @@ -44,3 +44,20 @@ define void @widestores(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounw ret void } +define void @widestores_unaligned_load(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind { +; CHECK-LABEL: widestores_unaligned_load: +; CHECK: # %bb.0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vmovaps (%rsi), %xmm1 +; CHECK-NEXT: vmovaps 16(%rsi), %xmm2 +; CHECK-NEXT: vmovaps %ymm0, (%rsi) +; CHECK-NEXT: vmovaps %xmm2, 16(%rdi) +; CHECK-NEXT: vmovaps %xmm1, (%rdi) +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %v0 = load <8 x float>, <8 x float>* %a, align 32 ; <--- aligned + %v1 = load <8 x float>, <8 x float>* %b, align 16 ; <--- unaligned + store <8 x float> %v0, <8 x float>* %b, align 32 ; <--- aligned + store <8 x float> %v1, <8 x float>* %a, align 16 ; <--- unaligned + ret void +} -- 2.11.0