From b4220f48edbf671fe06b518ab9f50a359656329c Mon Sep 17 00:00:00 2001 From: Tom Stellard Date: Thu, 14 Jun 2018 19:26:37 +0000 Subject: [PATCH] AMDGPU/GlobalISel: Implement select() for @llvm.amdgcn.cvt.pkrtz Reviewers: arsenm, nhaehnle Reviewed By: arsenm Subscribers: kzhuravl, wdng, yaxunl, rovka, kristof.beyls, dstuttard, tpr, t-tye, llvm-commits Differential Revision: https://reviews.llvm.org/D45907 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@334757 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AMDGPU/AMDGPUGISel.td | 33 ++++++++++++++++ lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp | 15 ++++++++ lib/Target/AMDGPU/AMDGPUInstructionSelector.h | 1 + .../GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir | 44 ++++++++++++++++++++++ 4 files changed, 93 insertions(+) create mode 100644 test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir diff --git a/lib/Target/AMDGPU/AMDGPUGISel.td b/lib/Target/AMDGPU/AMDGPUGISel.td index f27f52eb9be..768fba3f2fa 100644 --- a/lib/Target/AMDGPU/AMDGPUGISel.td +++ b/lib/Target/AMDGPU/AMDGPUGISel.td @@ -46,5 +46,38 @@ class GISelVop2Pat < (inst src0_vt:$src0, src1_vt:$src1) >; +class GISelVop2CommutePat < + SDPatternOperator node, + Instruction inst, + ValueType dst_vt, + ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat < + + (dst_vt (node (src1_vt VGPR_32:$src1), (src0_vt (sd_vsrc0 src0_vt:$src0)))), + (inst src0_vt:$src0, src1_vt:$src1) +>; + +multiclass GISelVop2IntrPat < + SDPatternOperator node, Instruction inst, + ValueType dst_vt, ValueType src_vt = dst_vt> { + + def : GISelVop2Pat ; + + // FIXME: Intrinsics aren't marked as commutable, so we need to add an explcit + // pattern to handle commuting. This is another reason why legalizing to a + // generic machine instruction may be better that matching the intrinsic + // directly. + def : GISelVop2CommutePat ; +} + def : GISelSop2Pat ; def : GISelVop2Pat ; + +// FIXME: Select directly to _e32 so we don't need to deal with modifiers. +// FIXME: We can't re-use SelectionDAG patterns here because they match +// against a custom SDNode and we would need to create a generic machine +// instruction that is equivalent to the custom SDNode. This would also require +// us to custom legalize the intrinsic to the new generic machine instruction, +// but I can't get custom legalizing of intrinsic to work and I'm not sure if +// this is even supported yet. +defm : GISelVop2IntrPat < + int_amdgcn_cvt_pkrtz, V_CVT_PKRTZ_F16_F32_e32, v2f16, f32>; diff --git a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp index c3dee4e55dc..0b605fd92a7 100644 --- a/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp +++ b/lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp @@ -159,6 +159,19 @@ bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const { return selectG_ADD(I); } +bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I, + CodeGenCoverage &CoverageInfo) const { + unsigned IntrinsicID = I.getOperand(1).getIntrinsicID(); + + switch (IntrinsicID) { + default: + break; + case Intrinsic::amdgcn_cvt_pkrtz: + return selectImpl(I, CoverageInfo); + } + return false; +} + bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const { MachineBasicBlock *BB = I.getParent(); MachineFunction *MF = BB->getParent(); @@ -522,6 +535,8 @@ bool AMDGPUInstructionSelector::select(MachineInstr &I, return selectG_CONSTANT(I); case TargetOpcode::G_GEP: return selectG_GEP(I); + case TargetOpcode::G_INTRINSIC: + return selectG_INTRINSIC(I, CoverageInfo); case TargetOpcode::G_LOAD: return selectG_LOAD(I); case TargetOpcode::G_STORE: diff --git a/lib/Target/AMDGPU/AMDGPUInstructionSelector.h b/lib/Target/AMDGPU/AMDGPUInstructionSelector.h index b304ec2f783..6c05f3cc0c1 100644 --- a/lib/Target/AMDGPU/AMDGPUInstructionSelector.h +++ b/lib/Target/AMDGPU/AMDGPUInstructionSelector.h @@ -63,6 +63,7 @@ private: bool selectG_CONSTANT(MachineInstr &I) const; bool selectG_ADD(MachineInstr &I) const; bool selectG_GEP(MachineInstr &I) const; + bool selectG_INTRINSIC(MachineInstr &I, CodeGenCoverage &CoverageInfo) const; bool hasVgprParts(ArrayRef AddrInfo) const; void getAddrModeInfo(const MachineInstr &Load, const MachineRegisterInfo &MRI, SmallVectorImpl &AddrInfo) const; diff --git a/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir b/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir new file mode 100644 index 00000000000..852c20834e7 --- /dev/null +++ b/test/CodeGen/AMDGPU/GlobalISel/inst-select-amdgcn.cvt.pkrtz.mir @@ -0,0 +1,44 @@ +# RUN: llc -march=amdgcn -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s -check-prefixes=GCN + +--- | + define void @cvt_pkrtz(i32 addrspace(1)* %global0) { ret void } +... +--- + +name: cvt_pkrtz +legalized: true +regBankSelected: true + +# GCN-LABEL: name: cvt_pkrtz +body: | + bb.0: + liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr3_vgpr4 + + ; GCN: [[SGPR0:%[0-9]+]]:sreg_32_xm0 = COPY $sgpr0 + %0:sgpr(s32) = COPY $sgpr0 + ; GCN: [[VGPR0:%[0-9]+]]:vgpr_32 = COPY $vgpr0 + %1:vgpr(s32) = COPY $vgpr0 + ; GCN: [[VGPR1:%[0-9]+]]:vgpr_32 = COPY $vgpr1 + %2:vgpr(s32) = COPY $vgpr1 + %3:vgpr(s64) = COPY $vgpr3_vgpr4 + + ; cvt_pkrtz vs + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[SGPR0]], [[VGPR0]] + %4:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %0 + + ; cvt_pkrtz sv + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[SGPR0]], [[VGPR0]] + %5:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %0, %1 + + ; cvt_pkrtz vv + ; GCN: V_CVT_PKRTZ_F16_F32_e32 [[VGPR0]], [[VGPR1]] + %6:vgpr(<2 x s16>) = G_INTRINSIC intrinsic(@llvm.amdgcn.cvt.pkrtz), %1, %2 + + %7:vgpr(s32) = G_BITCAST %4 + %8:vgpr(s32) = G_BITCAST %5 + %9:vgpr(s32) = G_BITCAST %6 + G_STORE %7, %3 :: (store 4 into %ir.global0) + G_STORE %8, %3 :: (store 4 into %ir.global0) + G_STORE %9, %3 :: (store 4 into %ir.global0) +... +--- -- 2.11.0