From 1094358cae5bda7109ce72298dc1c3881df6343c Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 3 Sep 2016 04:37:50 +0000 Subject: [PATCH] [AVX-512] Add EVEX encoded VPCMPEQ and VPCMPGT to the load folding tables. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@280581 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrInfo.cpp | 24 +++++++++++++ test/CodeGen/X86/stack-folding-int-avx512vl.ll | 48 ++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 test/CodeGen/X86/stack-folding-int-avx512vl.ll diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 0f4736bb760..ef2dda319d7 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -1769,6 +1769,14 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VPMULUDQZrr, X86::VPMULUDQZrm, 0 }, { X86::VBROADCASTSSZrkz, X86::VBROADCASTSSZmkz, TB_NO_REVERSE }, { X86::VBROADCASTSDZrkz, X86::VBROADCASTSDZmkz, TB_NO_REVERSE }, + { X86::VPCMPEQBZrr, X86::VPCMPEQBZrm, 0 }, + { X86::VPCMPEQDZrr, X86::VPCMPEQDZrm, 0 }, + { X86::VPCMPEQQZrr, X86::VPCMPEQQZrm, 0 }, + { X86::VPCMPEQWZrr, X86::VPCMPEQWZrm, 0 }, + { X86::VPCMPGTBZrr, X86::VPCMPGTBZrm, 0 }, + { X86::VPCMPGTDZrr, X86::VPCMPGTDZrm, 0 }, + { X86::VPCMPGTQZrr, X86::VPCMPGTQZrm, 0 }, + { X86::VPCMPGTWZrr, X86::VPCMPGTWZrm, 0 }, // AVX-512{F,VL} foldable instructions { X86::VBROADCASTSSZ256rkz, X86::VBROADCASTSSZ256mkz, TB_NO_REVERSE }, @@ -1836,6 +1844,22 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VCMPPDZ256rri, X86::VCMPPDZ256rmi, 0 }, { X86::VCMPPSZ128rri, X86::VCMPPSZ128rmi, 0 }, { X86::VCMPPSZ256rri, X86::VCMPPSZ256rmi, 0 }, + { X86::VPCMPEQBZ128rr, X86::VPCMPEQBZ128rm, 0 }, + { X86::VPCMPEQBZ256rr, X86::VPCMPEQBZ256rm, 0 }, + { X86::VPCMPEQDZ128rr, X86::VPCMPEQDZ128rm, 0 }, + { X86::VPCMPEQDZ256rr, X86::VPCMPEQDZ256rm, 0 }, + { X86::VPCMPEQQZ128rr, X86::VPCMPEQQZ128rm, 0 }, + { X86::VPCMPEQQZ256rr, X86::VPCMPEQQZ256rm, 0 }, + { X86::VPCMPEQWZ128rr, X86::VPCMPEQWZ128rm, 0 }, + { X86::VPCMPEQWZ256rr, X86::VPCMPEQWZ256rm, 0 }, + { X86::VPCMPGTBZ128rr, X86::VPCMPGTBZ128rm, 0 }, + { X86::VPCMPGTBZ256rr, X86::VPCMPGTBZ256rm, 0 }, + { X86::VPCMPGTDZ128rr, X86::VPCMPGTDZ128rm, 0 }, + { X86::VPCMPGTDZ256rr, X86::VPCMPGTDZ256rm, 0 }, + { X86::VPCMPGTQZ128rr, X86::VPCMPGTQZ128rm, 0 }, + { X86::VPCMPGTQZ256rr, X86::VPCMPGTQZ256rm, 0 }, + { X86::VPCMPGTWZ128rr, X86::VPCMPGTWZ128rm, 0 }, + { X86::VPCMPGTWZ256rr, X86::VPCMPGTWZ256rm, 0 }, // AES foldable instructions { X86::AESDECLASTrr, X86::AESDECLASTrm, TB_ALIGN_16 }, diff --git a/test/CodeGen/X86/stack-folding-int-avx512vl.ll b/test/CodeGen/X86/stack-folding-int-avx512vl.ll new file mode 100644 index 00000000000..f92f9593f13 --- /dev/null +++ b/test/CodeGen/X86/stack-folding-int-avx512vl.ll @@ -0,0 +1,48 @@ +; RUN: llc -O3 -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512bw < %s | FileCheck %s + +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" +target triple = "x86_64-unknown-unknown" + +; Stack reload folding tests. +; +; By including a nop call with sideeffects we can force a partial register spill of the +; relevant registers and check that the reload is correctly folded into the instruction. + +define i16 @stack_fold_pcmpeqb(<16 x i8> %a0, <16 x i8> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqb + ;CHECK: vpcmpeqb {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <16 x i8> %a0, %a1 + %3 = bitcast <16 x i1> %2 to i16 + ret i16 %3 +} + +define i8 @stack_fold_pcmpeqd(<4 x i32> %a0, <4 x i32> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqd + ;CHECK: vpcmpeqd {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <4 x i32> %a0, %a1 + %3 = shufflevector <4 x i1> %2, <4 x i1> undef, <8 x i32> + %4 = bitcast <8 x i1> %3 to i8 + ret i8 %4 +} + +define i8 @stack_fold_pcmpeqq(<2 x i64> %a0, <2 x i64> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqq + ;CHECK: vpcmpeqq {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <2 x i64> %a0, %a1 + %3 = shufflevector <2 x i1> %2, <2 x i1> undef, <8 x i32> + %4 = bitcast <8 x i1> %3 to i8 + ret i8 %4 +} + +define i8 @stack_fold_pcmpeqw(<8 x i16> %a0, <8 x i16> %a1) { + ;CHECK-LABEL: stack_fold_pcmpeqw + ;CHECK: vpcmpeqw {{-?[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}}, {{%k[0-7]}} {{.*#+}} 16-byte Folded Reload + %1 = tail call <2 x i64> asm sideeffect "nop", "=x,~{xmm2},~{xmm3},~{xmm4},~{xmm5},~{xmm6},~{xmm7},~{xmm8},~{xmm9},~{xmm10},~{xmm11},~{xmm12},~{xmm13},~{xmm14},~{xmm15},~{xmm16},~{xmm17},~{xmm18},~{xmm19},~{xmm20},~{xmm21},~{xmm22},~{xmm23},~{xmm24},~{xmm25},~{xmm26},~{xmm27},~{xmm28},~{xmm29},~{xmm30},~{xmm31},~{flags}"() + %2 = icmp eq <8 x i16> %a0, %a1 + %3 = bitcast <8 x i1> %2 to i8 + ret i8 %3 +} + -- 2.11.0