From: Matt Arsenault Date: Mon, 17 Jul 2017 22:35:50 +0000 (+0000) Subject: AMDGPU: Annotate features from x work item/group IDs. X-Git-Tag: android-x86-7.1-r4~13411 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=bcb8abee35d0ea58ee3cae055612f7d10df8f026;p=android-x86%2Fexternal-llvm.git AMDGPU: Annotate features from x work item/group IDs. This wasn't necessary before since they are always enabled for kernels, but this is necessary if they need to be forwarded to a callable function. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@308226 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp b/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp index 3bc73bf6e50..66249276cd8 100644 --- a/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp +++ b/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp @@ -129,8 +129,16 @@ bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively( // // TODO: We should not add the attributes if the known compile time workgroup // size is 1 for y/z. -static StringRef intrinsicToAttrName(Intrinsic::ID ID, bool &IsQueuePtr) { +static StringRef intrinsicToAttrName(Intrinsic::ID ID, + bool &NonKernelOnly, + bool &IsQueuePtr) { switch (ID) { + case Intrinsic::amdgcn_workitem_id_x: + NonKernelOnly = true; + return "amdgpu-work-item-id-x"; + case Intrinsic::amdgcn_workgroup_id_x: + NonKernelOnly = true; + return "amdgpu-work-group-id-x"; case Intrinsic::amdgcn_workitem_id_y: case Intrinsic::r600_read_tidig_y: return "amdgpu-work-item-id-y"; @@ -172,12 +180,12 @@ static bool handleAttr(Function &Parent, const Function &Callee, static void copyFeaturesToFunction(Function &Parent, const Function &Callee, bool &NeedQueuePtr) { - + // X ids unnecessarily propagated to kernels. static const StringRef AttrNames[] = { - // .x omitted + { "amdgpu-work-item-id-x" }, { "amdgpu-work-item-id-y" }, { "amdgpu-work-item-id-z" }, - // .x omitted + { "amdgpu-work-group-id-x" }, { "amdgpu-work-group-id-y" }, { "amdgpu-work-group-id-z" }, { "amdgpu-dispatch-ptr" }, @@ -198,6 +206,7 @@ bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) { bool Changed = false; bool NeedQueuePtr = false; + bool IsFunc = !AMDGPU::isEntryFunctionCC(F.getCallingConv()); for (BasicBlock &BB : F) { for (Instruction &I : BB) { @@ -214,8 +223,10 @@ bool AMDGPUAnnotateKernelFeatures::addFeatureAttributes(Function &F) { copyFeaturesToFunction(F, *Callee, NeedQueuePtr); Changed = true; } else { - StringRef AttrName = intrinsicToAttrName(IID, NeedQueuePtr); - if (!AttrName.empty()) { + bool NonKernelOnly = false; + StringRef AttrName = intrinsicToAttrName(IID, + NonKernelOnly, NeedQueuePtr); + if (!AttrName.empty() && (IsFunc || !NonKernelOnly)) { F.addFnAttr(AttrName); Changed = true; } diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp index 7fe671a533a..49c8adc81fe 100644 --- a/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp +++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp @@ -42,6 +42,9 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) WorkGroupIDZSystemSGPR(AMDGPU::NoRegister), WorkGroupInfoSystemSGPR(AMDGPU::NoRegister), PrivateSegmentWaveByteOffsetSystemSGPR(AMDGPU::NoRegister), + WorkItemIDXVGPR(AMDGPU::NoRegister), + WorkItemIDYVGPR(AMDGPU::NoRegister), + WorkItemIDZVGPR(AMDGPU::NoRegister), PSInputAddr(0), PSInputEnable(0), ReturnsVoid(true), @@ -87,7 +90,6 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) ScratchWaveOffsetReg = AMDGPU::SGPR4; FrameOffsetReg = AMDGPU::SGPR5; StackPtrOffsetReg = AMDGPU::SGPR32; - return; } CallingConv::ID CC = F->getCallingConv(); @@ -101,17 +103,25 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) if (ST.debuggerEmitPrologue()) { // Enable everything. + WorkGroupIDX = true; WorkGroupIDY = true; WorkGroupIDZ = true; + WorkItemIDX = true; WorkItemIDY = true; WorkItemIDZ = true; } else { + if (F->hasFnAttribute("amdgpu-work-group-id-x")) + WorkGroupIDX = true; + if (F->hasFnAttribute("amdgpu-work-group-id-y")) WorkGroupIDY = true; if (F->hasFnAttribute("amdgpu-work-group-id-z")) WorkGroupIDZ = true; + if (F->hasFnAttribute("amdgpu-work-item-id-x")) + WorkItemIDX = true; + if (F->hasFnAttribute("amdgpu-work-item-id-y")) WorkItemIDY = true; @@ -119,22 +129,24 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) WorkItemIDZ = true; } - // X, XY, and XYZ are the only supported combinations, so make sure Y is - // enabled if Z is. - if (WorkItemIDZ) - WorkItemIDY = true; - const MachineFrameInfo &FrameInfo = MF.getFrameInfo(); bool MaySpill = ST.isVGPRSpillingEnabled(*F); bool HasStackObjects = FrameInfo.hasStackObjects() || FrameInfo.hasCalls(); - if (HasStackObjects || MaySpill) { - PrivateSegmentWaveByteOffset = true; + if (isEntryFunction()) { + // X, XY, and XYZ are the only supported combinations, so make sure Y is + // enabled if Z is. + if (WorkItemIDZ) + WorkItemIDY = true; + + if (HasStackObjects || MaySpill) { + PrivateSegmentWaveByteOffset = true; - // HS and GS always have the scratch wave offset in SGPR5 on GFX9. - if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 && - (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS)) - PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5; + // HS and GS always have the scratch wave offset in SGPR5 on GFX9. + if (ST.getGeneration() >= AMDGPUSubtarget::GFX9 && + (CC == CallingConv::AMDGPU_HS || CC == CallingConv::AMDGPU_GS)) + PrivateSegmentWaveByteOffsetSystemSGPR = AMDGPU::SGPR5; + } } if (ST.isAmdCodeObjectV2(MF)) { @@ -160,7 +172,8 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF) // We don't need to worry about accessing spills with flat instructions. // TODO: On VI where we must use flat for global, we should be able to omit // this if it is never used for generic access. - if (HasStackObjects && ST.hasFlatAddressSpace() && ST.isAmdHsaOS()) + if (HasStackObjects && ST.hasFlatAddressSpace() && ST.isAmdHsaOS() && + isEntryFunction()) FlatScratchInit = true; } diff --git a/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/lib/Target/AMDGPU/SIMachineFunctionInfo.h index 05aa249584b..a9a229dfb35 100644 --- a/lib/Target/AMDGPU/SIMachineFunctionInfo.h +++ b/lib/Target/AMDGPU/SIMachineFunctionInfo.h @@ -119,6 +119,11 @@ class SIMachineFunctionInfo final : public AMDGPUMachineFunction { unsigned WorkGroupInfoSystemSGPR; unsigned PrivateSegmentWaveByteOffsetSystemSGPR; + // VGPR inputs. These are always v0, v1 and v2 for entry functions. + unsigned WorkItemIDXVGPR; + unsigned WorkItemIDYVGPR; + unsigned WorkItemIDZVGPR; + // Graphics info. unsigned PSInputAddr; unsigned PSInputEnable; diff --git a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll index 39760d9e578..32bcb21279c 100644 --- a/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll +++ b/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll @@ -1,8 +1,10 @@ ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-annotate-kernel-features %s | FileCheck -check-prefix=HSA %s +declare i32 @llvm.amdgcn.workgroup.id.x() #0 declare i32 @llvm.amdgcn.workgroup.id.y() #0 declare i32 @llvm.amdgcn.workgroup.id.z() #0 +declare i32 @llvm.amdgcn.workitem.id.x() #0 declare i32 @llvm.amdgcn.workitem.id.y() #0 declare i32 @llvm.amdgcn.workitem.id.z() #0 @@ -12,56 +14,70 @@ declare i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() #0 declare i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() #0 declare i64 @llvm.amdgcn.dispatch.id() #0 -; HSA: define void @use_workitem_id_y() #1 { +; HSA: define void @use_workitem_id_x() #1 { +define void @use_workitem_id_x() #1 { + %val = call i32 @llvm.amdgcn.workitem.id.x() + store volatile i32 %val, i32 addrspace(1)* undef + ret void +} + +; HSA: define void @use_workitem_id_y() #2 { define void @use_workitem_id_y() #1 { %val = call i32 @llvm.amdgcn.workitem.id.y() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workitem_id_z() #2 { +; HSA: define void @use_workitem_id_z() #3 { define void @use_workitem_id_z() #1 { %val = call i32 @llvm.amdgcn.workitem.id.z() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_y() #3 { +; HSA: define void @use_workgroup_id_x() #4 { +define void @use_workgroup_id_x() #1 { + %val = call i32 @llvm.amdgcn.workgroup.id.x() + store volatile i32 %val, i32 addrspace(1)* undef + ret void +} + +; HSA: define void @use_workgroup_id_y() #5 { define void @use_workgroup_id_y() #1 { %val = call i32 @llvm.amdgcn.workgroup.id.y() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_z() #4 { +; HSA: define void @use_workgroup_id_z() #6 { define void @use_workgroup_id_z() #1 { %val = call i32 @llvm.amdgcn.workgroup.id.z() store volatile i32 %val, i32 addrspace(1)* undef ret void } -; HSA: define void @use_dispatch_ptr() #5 { +; HSA: define void @use_dispatch_ptr() #7 { define void @use_dispatch_ptr() #1 { %dispatch.ptr = call i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr() store volatile i8 addrspace(2)* %dispatch.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @use_queue_ptr() #6 { +; HSA: define void @use_queue_ptr() #8 { define void @use_queue_ptr() #1 { %queue.ptr = call i8 addrspace(2)* @llvm.amdgcn.queue.ptr() store volatile i8 addrspace(2)* %queue.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @use_dispatch_id() #7 { +; HSA: define void @use_dispatch_id() #9 { define void @use_dispatch_id() #1 { %val = call i64 @llvm.amdgcn.dispatch.id() store volatile i64 %val, i64 addrspace(1)* undef ret void } -; HSA: define void @use_workgroup_id_y_workgroup_id_z() #8 { +; HSA: define void @use_workgroup_id_y_workgroup_id_z() #10 { define void @use_workgroup_id_y_workgroup_id_z() #1 { %val0 = call i32 @llvm.amdgcn.workgroup.id.y() %val1 = call i32 @llvm.amdgcn.workgroup.id.z() @@ -70,67 +86,91 @@ define void @use_workgroup_id_y_workgroup_id_z() #1 { ret void } -; HSA: define void @func_indirect_use_workitem_id_y() #1 { +; HSA: define void @func_indirect_use_workitem_id_x() #1 { +define void @func_indirect_use_workitem_id_x() #1 { + call void @use_workitem_id_x() + ret void +} + +; HSA: define void @kernel_indirect_use_workitem_id_x() #1 { +define void @kernel_indirect_use_workitem_id_x() #1 { + call void @use_workitem_id_x() + ret void +} + +; HSA: define void @func_indirect_use_workitem_id_y() #2 { define void @func_indirect_use_workitem_id_y() #1 { call void @use_workitem_id_y() ret void } -; HSA: define void @func_indirect_use_workitem_id_z() #2 { +; HSA: define void @func_indirect_use_workitem_id_z() #3 { define void @func_indirect_use_workitem_id_z() #1 { call void @use_workitem_id_z() ret void } -; HSA: define void @func_indirect_use_workgroup_id_y() #3 { +; HSA: define void @func_indirect_use_workgroup_id_x() #4 { +define void @func_indirect_use_workgroup_id_x() #1 { + call void @use_workgroup_id_x() + ret void +} + +; HSA: define void @kernel_indirect_use_workgroup_id_x() #4 { +define void @kernel_indirect_use_workgroup_id_x() #1 { + call void @use_workgroup_id_x() + ret void +} + +; HSA: define void @func_indirect_use_workgroup_id_y() #5 { define void @func_indirect_use_workgroup_id_y() #1 { call void @use_workgroup_id_y() ret void } -; HSA: define void @func_indirect_use_workgroup_id_z() #4 { +; HSA: define void @func_indirect_use_workgroup_id_z() #6 { define void @func_indirect_use_workgroup_id_z() #1 { call void @use_workgroup_id_z() ret void } -; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #3 { +; HSA: define void @func_indirect_indirect_use_workgroup_id_y() #5 { define void @func_indirect_indirect_use_workgroup_id_y() #1 { call void @func_indirect_use_workgroup_id_y() ret void } -; HSA: define void @indirect_x2_use_workgroup_id_y() #3 { +; HSA: define void @indirect_x2_use_workgroup_id_y() #5 { define void @indirect_x2_use_workgroup_id_y() #1 { call void @func_indirect_indirect_use_workgroup_id_y() ret void } -; HSA: define void @func_indirect_use_dispatch_ptr() #5 { +; HSA: define void @func_indirect_use_dispatch_ptr() #7 { define void @func_indirect_use_dispatch_ptr() #1 { call void @use_dispatch_ptr() ret void } -; HSA: define void @func_indirect_use_queue_ptr() #6 { +; HSA: define void @func_indirect_use_queue_ptr() #8 { define void @func_indirect_use_queue_ptr() #1 { call void @use_queue_ptr() ret void } -; HSA: define void @func_indirect_use_dispatch_id() #7 { +; HSA: define void @func_indirect_use_dispatch_id() #9 { define void @func_indirect_use_dispatch_id() #1 { call void @use_dispatch_id() ret void } -; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #9 { +; HSA: define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #11 { define void @func_indirect_use_workgroup_id_y_workgroup_id_z() #1 { call void @func_indirect_use_workgroup_id_y_workgroup_id_z() ret void } -; HSA: define void @recursive_use_workitem_id_y() #1 { +; HSA: define void @recursive_use_workitem_id_y() #2 { define void @recursive_use_workitem_id_y() #1 { %val = call i32 @llvm.amdgcn.workitem.id.y() store volatile i32 %val, i32 addrspace(1)* undef @@ -138,27 +178,27 @@ define void @recursive_use_workitem_id_y() #1 { ret void } -; HSA: define void @call_recursive_use_workitem_id_y() #1 { +; HSA: define void @call_recursive_use_workitem_id_y() #2 { define void @call_recursive_use_workitem_id_y() #1 { call void @recursive_use_workitem_id_y() ret void } -; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #6 { +; HSA: define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #8 { define void @use_group_to_flat_addrspacecast(i32 addrspace(3)* %ptr) #1 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof ret void } -; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #10 { +; HSA: define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #12 { define void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* %ptr) #2 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof ret void } -; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #11 { +; HSA: define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #13 { define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %ptr) #2 { %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(4)* store volatile i32 0, i32 addrspace(4)* %stof @@ -166,45 +206,45 @@ define void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* %p ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast() #6 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast() #8 { define void @indirect_use_group_to_flat_addrspacecast() #1 { call void @use_group_to_flat_addrspacecast(i32 addrspace(3)* null) ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #9 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast_gfx9() #11 { define void @indirect_use_group_to_flat_addrspacecast_gfx9() #1 { call void @use_group_to_flat_addrspacecast_gfx9(i32 addrspace(3)* null) ret void } -; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #6 { +; HSA: define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #8 { define void @indirect_use_group_to_flat_addrspacecast_queue_ptr_gfx9() #1 { call void @use_group_to_flat_addrspacecast_queue_ptr_gfx9(i32 addrspace(3)* null) ret void } -; HSA: define void @use_kernarg_segment_ptr() #12 { +; HSA: define void @use_kernarg_segment_ptr() #14 { define void @use_kernarg_segment_ptr() #1 { %kernarg.segment.ptr = call i8 addrspace(2)* @llvm.amdgcn.kernarg.segment.ptr() store volatile i8 addrspace(2)* %kernarg.segment.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @func_indirect_use_kernarg_segment_ptr() #12 { +; HSA: define void @func_indirect_use_kernarg_segment_ptr() #14 { define void @func_indirect_use_kernarg_segment_ptr() #1 { call void @use_kernarg_segment_ptr() ret void } -; HSA: define void @use_implicitarg_ptr() #12 { +; HSA: define void @use_implicitarg_ptr() #14 { define void @use_implicitarg_ptr() #1 { %implicitarg.ptr = call i8 addrspace(2)* @llvm.amdgcn.implicitarg.ptr() store volatile i8 addrspace(2)* %implicitarg.ptr, i8 addrspace(2)* addrspace(1)* undef ret void } -; HSA: define void @func_indirect_use_implicitarg_ptr() #12 { +; HSA: define void @func_indirect_use_implicitarg_ptr() #14 { define void @func_indirect_use_implicitarg_ptr() #1 { call void @use_implicitarg_ptr() ret void @@ -215,15 +255,17 @@ attributes #1 = { nounwind "target-cpu"="fiji" } attributes #2 = { nounwind "target-cpu"="gfx900" } ; HSA: attributes #0 = { nounwind readnone speculatable } -; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" } -; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" } -; HSA: attributes #3 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" } -; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" } -; HSA: attributes #5 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" } -; HSA: attributes #6 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" } -; HSA: attributes #7 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" } -; HSA: attributes #8 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" } -; HSA: attributes #9 = { nounwind "target-cpu"="fiji" } -; HSA: attributes #10 = { nounwind "target-cpu"="gfx900" } -; HSA: attributes #11 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" } -; HSA: attributes #12 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" } +; HSA: attributes #1 = { nounwind "amdgpu-work-item-id-x" "target-cpu"="fiji" } +; HSA: attributes #2 = { nounwind "amdgpu-work-item-id-y" "target-cpu"="fiji" } +; HSA: attributes #3 = { nounwind "amdgpu-work-item-id-z" "target-cpu"="fiji" } +; HSA: attributes #4 = { nounwind "amdgpu-work-group-id-x" "target-cpu"="fiji" } +; HSA: attributes #5 = { nounwind "amdgpu-work-group-id-y" "target-cpu"="fiji" } +; HSA: attributes #6 = { nounwind "amdgpu-work-group-id-z" "target-cpu"="fiji" } +; HSA: attributes #7 = { nounwind "amdgpu-dispatch-ptr" "target-cpu"="fiji" } +; HSA: attributes #8 = { nounwind "amdgpu-queue-ptr" "target-cpu"="fiji" } +; HSA: attributes #9 = { nounwind "amdgpu-dispatch-id" "target-cpu"="fiji" } +; HSA: attributes #10 = { nounwind "amdgpu-work-group-id-y" "amdgpu-work-group-id-z" "target-cpu"="fiji" } +; HSA: attributes #11 = { nounwind "target-cpu"="fiji" } +; HSA: attributes #12 = { nounwind "target-cpu"="gfx900" } +; HSA: attributes #13 = { nounwind "amdgpu-queue-ptr" "target-cpu"="gfx900" } +; HSA: attributes #14 = { nounwind "amdgpu-kernarg-segment-ptr" "target-cpu"="fiji" }