GCCBuiltin<"__builtin_amdgcn_s_getreg">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
+def int_amdgcn_groupstaticsize :
+ GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+
def int_amdgcn_dispatch_ptr :
GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
MachineInstr * MI, MachineBasicBlock * BB) const {
switch (MI->getOpcode()) {
- default:
- return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
case AMDGPU::BRANCH:
return BB;
+ case AMDGPU::GET_GROUPSTATICSIZE: {
+ const SIInstrInfo *TII =
+ static_cast<const SIInstrInfo *>(Subtarget->getInstrInfo());
+ MachineFunction *MF = BB->getParent();
+ SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
+ DebugLoc DL = MI->getDebugLoc();
+ BuildMI (*BB, MI, DL, TII->get(AMDGPU::S_MOVK_I32))
+ .addOperand(MI->getOperand(0))
+ .addImm(MFI->LDSSize);
+ MI->eraseFromParent();
+ return BB;
+ }
+ default:
+ return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
}
return BB;
}
def SGPR_USE : InstSI <(outs),(ins), "", []>;
}
+let usesCustomInserter = 1, SALU = 1 in {
+def GET_GROUPSTATICSIZE : InstSI <(outs SReg_32:$sdst), (ins), "",
+ [(set SReg_32:$sdst, (int_amdgcn_groupstaticsize))]>;
+} // End let usesCustomInserter = 1, SALU = 1
+
// SI pseudo instructions. These are used by the CFG structurizer pass
// and should be lowered to ISA instructions prior to codegen.
--- /dev/null
+; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s
+
+
+@lds0 = addrspace(3) global [512 x float] undef, align 4
+@lds1 = addrspace(3) global [256 x float] undef, align 4
+
+; FUNC-LABEL: {{^}}groupstaticsize_test0:
+; CHECK: s_movk_i32 s{{[0-9]+}}, 0x800
+define void @get_groupstaticsize_test0(float addrspace(1)* %out, i32 addrspace(1)* %lds_size) #0 {
+ %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %idx.0 = add nsw i32 %tid.x, 64
+ %static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
+ store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %val0 = load float, float addrspace(3)* %arrayidx0, align 4
+ store float %val0, float addrspace(1)* %out, align 4
+
+ ret void
+}
+
+
+; FUNC-LABEL: {{^}}groupstaticsize_test1:
+; CHECK: s_movk_i32 s{{[0-9]+}}, 0xc00
+define void @groupstaticsize_test1(float addrspace(1)* %out, i32 %cond, i32 addrspace(1)* %lds_size) {
+entry:
+ %static_lds_size = call i32 @llvm.amdgcn.groupstaticsize() #1
+ store i32 %static_lds_size, i32 addrspace(1)* %lds_size, align 4
+ %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
+ %idx.0 = add nsw i32 %tid.x, 64
+ %tmp = icmp eq i32 %cond, 0
+ br i1 %tmp, label %if, label %else
+
+if: ; preds = %entry
+ %arrayidx0 = getelementptr inbounds [512 x float], [512 x float] addrspace(3)* @lds0, i32 0, i32 %idx.0
+ %val0 = load float, float addrspace(3)* %arrayidx0, align 4
+ store float %val0, float addrspace(1)* %out, align 4
+ br label %endif
+
+else: ; preds = %entry
+ %arrayidx1 = getelementptr inbounds [256 x float], [256 x float] addrspace(3)* @lds1, i32 0, i32 %idx.0
+ %val1 = load float, float addrspace(3)* %arrayidx1, align 4
+ store float %val1, float addrspace(1)* %out, align 4
+ br label %endif
+
+endif: ; preds = %else, %if
+ ret void
+}
+
+
+declare i32 @llvm.amdgcn.groupstaticsize() #1
+declare i32 @llvm.amdgcn.workitem.id.x() #1
+
+attributes #0 = { nounwind }
+attributes #1 = { nounwind readnone }