1 //===- PPCCallingConv.td - Calling Conventions for PowerPC -*- tablegen -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This describes the calling conventions for the PowerPC 32- and 64-bit
13 //===----------------------------------------------------------------------===//
15 /// CCIfSubtarget - Match if the current subtarget has a feature F.
16 class CCIfSubtarget<string F, CCAction A>
17 : CCIf<!strconcat("static_cast<const PPCSubtarget&>"
18 "(State.getMachineFunction().getSubtarget()).",
21 class CCIfNotSubtarget<string F, CCAction A>
22 : CCIf<!strconcat("!static_cast<const PPCSubtarget&>"
23 "(State.getMachineFunction().getSubtarget()).",
26 class CCIfOrigArgWasNotPPCF128<CCAction A>
27 : CCIf<"!static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
29 class CCIfOrigArgWasPPCF128<CCAction A>
30 : CCIf<"static_cast<PPCCCState *>(&State)->WasOriginalArgPPCF128(ValNo)",
33 //===----------------------------------------------------------------------===//
34 // Return Value Calling Convention
35 //===----------------------------------------------------------------------===//
37 // PPC64 AnyReg return-value convention. No explicit register is specified for
38 // the return-value. The register allocator is allowed and expected to choose
41 // This calling convention is currently only supported by the stackmap and
42 // patchpoint intrinsics. All other uses will result in an assert on Debug
43 // builds. On Release builds we fallback to the PPC C calling convention.
44 def RetCC_PPC64_AnyReg : CallingConv<[
45 CCCustom<"CC_PPC_AnyReg_Error">
48 // Return-value convention for PowerPC
49 def RetCC_PPC : CallingConv<[
50 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
52 // On PPC64, integer return values are always promoted to i64
53 CCIfType<[i32, i1], CCIfSubtarget<"isPPC64()", CCPromoteToType<i64>>>,
54 CCIfType<[i1], CCIfNotSubtarget<"isPPC64()", CCPromoteToType<i32>>>,
56 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
57 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
58 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
60 // Floating point types returned as "direct" go into F1 .. F8; note that
61 // only the ELFv2 ABI fully utilizes all these registers.
62 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
63 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
65 // QPX vectors are returned in QF1 and QF2.
66 CCIfType<[v4f64, v4f32, v4i1],
67 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
69 // Vector types returned as "direct" go into V2 .. V9; note that only the
70 // ELFv2 ABI fully utilizes all these registers.
71 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32],
72 CCIfSubtarget<"hasAltivec()",
73 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
74 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
75 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>>
78 // No explicit register is specified for the AnyReg calling convention. The
79 // register allocator may assign the arguments to any free register.
81 // This calling convention is currently only supported by the stackmap and
82 // patchpoint intrinsics. All other uses will result in an assert on Debug
83 // builds. On Release builds we fallback to the PPC C calling convention.
84 def CC_PPC64_AnyReg : CallingConv<[
85 CCCustom<"CC_PPC_AnyReg_Error">
88 // Note that we don't currently have calling conventions for 64-bit
89 // PowerPC, but handle all the complexities of the ABI in the lowering
90 // logic. FIXME: See if the logic can be simplified with use of CCs.
91 // This may require some extensions to current table generation.
93 // Simple calling convention for 64-bit ELF PowerPC fast isel.
94 // Only handle ints and floats. All ints are promoted to i64.
95 // Vector types and quadword ints are not handled.
96 def CC_PPC64_ELF_FIS : CallingConv<[
97 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<CC_PPC64_AnyReg>>,
99 CCIfType<[i1], CCPromoteToType<i64>>,
100 CCIfType<[i8], CCPromoteToType<i64>>,
101 CCIfType<[i16], CCPromoteToType<i64>>,
102 CCIfType<[i32], CCPromoteToType<i64>>,
103 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6, X7, X8, X9, X10]>>,
104 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>
107 // Simple return-value convention for 64-bit ELF PowerPC fast isel.
108 // All small ints are promoted to i64. Vector types, quadword ints,
109 // and multiple register returns are "supported" to avoid compile
110 // errors, but none are handled by the fast selector.
111 def RetCC_PPC64_ELF_FIS : CallingConv<[
112 CCIfCC<"CallingConv::AnyReg", CCDelegateTo<RetCC_PPC64_AnyReg>>,
114 CCIfType<[i1], CCPromoteToType<i64>>,
115 CCIfType<[i8], CCPromoteToType<i64>>,
116 CCIfType<[i16], CCPromoteToType<i64>>,
117 CCIfType<[i32], CCPromoteToType<i64>>,
118 CCIfType<[i64], CCAssignToReg<[X3, X4, X5, X6]>>,
119 CCIfType<[i128], CCAssignToReg<[X3, X4, X5, X6]>>,
120 CCIfType<[f32], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
121 CCIfType<[f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
122 CCIfType<[v4f64, v4f32, v4i1],
123 CCIfSubtarget<"hasQPX()", CCAssignToReg<[QF1, QF2]>>>,
124 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32],
125 CCIfSubtarget<"hasAltivec()",
126 CCAssignToReg<[V2, V3, V4, V5, V6, V7, V8, V9]>>>,
127 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
128 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9]>>>
131 //===----------------------------------------------------------------------===//
132 // PowerPC System V Release 4 32-bit ABI
133 //===----------------------------------------------------------------------===//
135 def CC_PPC32_SVR4_Common : CallingConv<[
136 CCIfType<[i1], CCPromoteToType<i32>>,
138 // The ABI requires i64 to be passed in two adjacent registers with the first
139 // register having an odd register number.
141 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
142 CCIfOrigArgWasNotPPCF128<
143 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>>,
146 CCIfSplit<CCIfNotSubtarget<"useSoftFloat()",
147 CCCustom<"CC_PPC32_SVR4_Custom_AlignArgRegs">>>>,
148 CCIfSplit<CCIfSubtarget<"useSoftFloat()",
149 CCIfOrigArgWasPPCF128<CCCustom<
150 "CC_PPC32_SVR4_Custom_SkipLastArgRegsPPCF128">>>>,
152 // The 'nest' parameter, if any, is passed in R11.
153 CCIfNest<CCAssignToReg<[R11]>>,
155 // The first 8 integer arguments are passed in integer registers.
156 CCIfType<[i32], CCAssignToReg<[R3, R4, R5, R6, R7, R8, R9, R10]>>,
158 // Make sure the i64 words from a long double are either both passed in
159 // registers or both passed on the stack.
160 CCIfType<[f64], CCIfSplit<CCCustom<"CC_PPC32_SVR4_Custom_AlignFPArgRegs">>>,
162 // FP values are passed in F1 - F8.
163 CCIfType<[f32, f64], CCAssignToReg<[F1, F2, F3, F4, F5, F6, F7, F8]>>,
165 // Split arguments have an alignment of 8 bytes on the stack.
166 CCIfType<[i32], CCIfSplit<CCAssignToStack<4, 8>>>,
168 CCIfType<[i32], CCAssignToStack<4, 4>>,
170 // Floats are stored in double precision format, thus they have the same
171 // alignment and size as doubles.
172 CCIfType<[f32,f64], CCAssignToStack<8, 8>>,
174 // QPX vectors that are stored in double precision need 32-byte alignment.
175 CCIfType<[v4f64, v4i1], CCAssignToStack<32, 32>>,
177 // Vectors get 16-byte stack slots that are 16-byte aligned.
178 CCIfType<[v16i8, v8i16, v4i32, v4f32, v2f64, v2i64], CCAssignToStack<16, 16>>
181 // This calling convention puts vector arguments always on the stack. It is used
182 // to assign vector arguments which belong to the variable portion of the
183 // parameter list of a variable argument function.
184 def CC_PPC32_SVR4_VarArg : CallingConv<[
185 CCDelegateTo<CC_PPC32_SVR4_Common>
188 // In contrast to CC_PPC32_SVR4_VarArg, this calling convention first tries to
189 // put vector arguments in vector registers before putting them on the stack.
190 def CC_PPC32_SVR4 : CallingConv<[
191 // QPX vectors mirror the scalar FP convention.
192 CCIfType<[v4f64, v4f32, v4i1], CCIfSubtarget<"hasQPX()",
193 CCAssignToReg<[QF1, QF2, QF3, QF4, QF5, QF6, QF7, QF8]>>>,
195 // The first 12 Vector arguments are passed in AltiVec registers.
196 CCIfType<[v16i8, v8i16, v4i32, v2i64, v1i128, v4f32],
197 CCIfSubtarget<"hasAltivec()", CCAssignToReg<[V2, V3, V4, V5, V6, V7,
198 V8, V9, V10, V11, V12, V13]>>>,
199 CCIfType<[v2f64, v2i64], CCIfSubtarget<"hasVSX()",
200 CCAssignToReg<[VSH2, VSH3, VSH4, VSH5, VSH6, VSH7, VSH8, VSH9,
201 VSH10, VSH11, VSH12, VSH13]>>>,
203 CCDelegateTo<CC_PPC32_SVR4_Common>
206 // Helper "calling convention" to handle aggregate by value arguments.
207 // Aggregate by value arguments are always placed in the local variable space
208 // of the caller. This calling convention is only used to assign those stack
209 // offsets in the callers stack frame.
211 // Still, the address of the aggregate copy in the callers stack frame is passed
212 // in a GPR (or in the parameter list area if all GPRs are allocated) from the
213 // caller to the callee. The location for the address argument is assigned by
214 // the CC_PPC32_SVR4 calling convention.
216 // The only purpose of CC_PPC32_SVR4_Custom_Dummy is to skip arguments which are
217 // not passed by value.
219 def CC_PPC32_SVR4_ByVal : CallingConv<[
220 CCIfByVal<CCPassByVal<4, 4>>,
222 CCCustom<"CC_PPC32_SVR4_Custom_Dummy">
225 def CSR_Altivec : CalleeSavedRegs<(add V20, V21, V22, V23, V24, V25, V26, V27,
226 V28, V29, V30, V31)>;
228 def CSR_Darwin32 : CalleeSavedRegs<(add R13, R14, R15, R16, R17, R18, R19, R20,
229 R21, R22, R23, R24, R25, R26, R27, R28,
230 R29, R30, R31, F14, F15, F16, F17, F18,
231 F19, F20, F21, F22, F23, F24, F25, F26,
232 F27, F28, F29, F30, F31, CR2, CR3, CR4
235 def CSR_Darwin32_Altivec : CalleeSavedRegs<(add CSR_Darwin32, CSR_Altivec)>;
237 def CSR_SVR432 : CalleeSavedRegs<(add R14, R15, R16, R17, R18, R19, R20,
238 R21, R22, R23, R24, R25, R26, R27, R28,
239 R29, R30, R31, F14, F15, F16, F17, F18,
240 F19, F20, F21, F22, F23, F24, F25, F26,
241 F27, F28, F29, F30, F31, CR2, CR3, CR4
244 def CSR_SVR432_Altivec : CalleeSavedRegs<(add CSR_SVR432, CSR_Altivec)>;
246 def CSR_Darwin64 : CalleeSavedRegs<(add X13, X14, X15, X16, X17, X18, X19, X20,
247 X21, X22, X23, X24, X25, X26, X27, X28,
248 X29, X30, X31, F14, F15, F16, F17, F18,
249 F19, F20, F21, F22, F23, F24, F25, F26,
250 F27, F28, F29, F30, F31, CR2, CR3, CR4
253 def CSR_Darwin64_Altivec : CalleeSavedRegs<(add CSR_Darwin64, CSR_Altivec)>;
255 def CSR_SVR464 : CalleeSavedRegs<(add X14, X15, X16, X17, X18, X19, X20,
256 X21, X22, X23, X24, X25, X26, X27, X28,
257 X29, X30, X31, F14, F15, F16, F17, F18,
258 F19, F20, F21, F22, F23, F24, F25, F26,
259 F27, F28, F29, F30, F31, CR2, CR3, CR4
262 // CSRs that are handled by prologue, epilogue.
263 def CSR_SRV464_TLS_PE : CalleeSavedRegs<(add)>;
265 def CSR_SVR464_ViaCopy : CalleeSavedRegs<(add CSR_SVR464)>;
267 def CSR_SVR464_Altivec : CalleeSavedRegs<(add CSR_SVR464, CSR_Altivec)>;
269 def CSR_SVR464_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_Altivec)>;
271 def CSR_SVR464_R2 : CalleeSavedRegs<(add CSR_SVR464, X2)>;
273 def CSR_SVR464_R2_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2)>;
275 def CSR_SVR464_R2_Altivec : CalleeSavedRegs<(add CSR_SVR464_Altivec, X2)>;
277 def CSR_SVR464_R2_Altivec_ViaCopy : CalleeSavedRegs<(add CSR_SVR464_R2_Altivec)>;
279 def CSR_NoRegs : CalleeSavedRegs<(add)>;
281 def CSR_64_AllRegs: CalleeSavedRegs<(add X0, (sequence "X%u", 3, 10),
282 (sequence "X%u", 14, 31),
283 (sequence "F%u", 0, 31),
284 (sequence "CR%u", 0, 7))>;
286 def CSR_64_AllRegs_Altivec : CalleeSavedRegs<(add CSR_64_AllRegs,
287 (sequence "V%u", 0, 31))>;
289 def CSR_64_AllRegs_VSX : CalleeSavedRegs<(add CSR_64_AllRegs_Altivec,
290 (sequence "VSL%u", 0, 31),
291 (sequence "VSH%u", 0, 31))>;