2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGJITCodeGenerator_h
27 #define DFGJITCodeGenerator_h
31 #include "CodeBlock.h"
32 #include <dfg/DFGGenerationInfo.h>
33 #include <dfg/DFGGraph.h>
34 #include <dfg/DFGJITCompiler.h>
35 #include <dfg/DFGOperations.h>
36 #include <dfg/DFGRegisterBank.h>
38 namespace JSC { namespace DFG {
40 class SpeculateIntegerOperand;
41 class SpeculateStrictInt32Operand;
42 class SpeculateCellOperand;
45 // === JITCodeGenerator ===
47 // This class provides common infrastructure used by the speculative &
48 // non-speculative JITs. Provides common mechanisms for virtual and
49 // physical register management, calls out from JIT code to helper
51 class JITCodeGenerator {
53 typedef MacroAssembler::TrustedImm32 TrustedImm32;
54 typedef MacroAssembler::Imm32 Imm32;
56 // These constants are used to set priorities for spill order for
57 // the register allocator.
60 SpillOrderConstant = 1, // no spill, and cheap fill
61 SpillOrderSpilled = 2, // no spill
62 SpillOrderJS = 4, // needs spill
63 SpillOrderCell = 4, // needs spill
64 SpillOrderInteger = 5, // needs spill and box
65 SpillOrderDouble = 6, // needs spill and convert
71 GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
72 FPRReg fillDouble(NodeIndex);
73 GPRReg fillJSValue(NodeIndex);
75 // lock and unlock GPR & FPR registers.
84 void unlock(GPRReg reg)
88 void unlock(FPRReg reg)
93 // Used to check whether a child node is on its last use,
94 // and its machine registers may be reused.
95 bool canReuse(NodeIndex nodeIndex)
97 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
98 GenerationInfo& info = m_generationInfo[virtualRegister];
99 return info.canReuse();
101 GPRReg reuse(GPRReg reg)
106 FPRReg reuse(FPRReg reg)
112 // Allocate a gpr/fpr.
115 VirtualRegister spillMe;
116 GPRReg gpr = m_gprs.allocate(spillMe);
117 if (spillMe != InvalidVirtualRegister)
123 VirtualRegister spillMe;
124 FPRReg fpr = m_fprs.allocate(spillMe);
125 if (spillMe != InvalidVirtualRegister)
130 // Check whether a VirtualRegsiter is currently in a machine register.
131 // We use this when filling operands to fill those that are already in
132 // machine registers first (by locking VirtualRegsiters that are already
133 // in machine register before filling those that are not we attempt to
134 // avoid spilling values we will need immediately).
135 bool isFilled(NodeIndex nodeIndex)
137 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
138 GenerationInfo& info = m_generationInfo[virtualRegister];
139 return info.registerFormat() != DataFormatNone;
141 bool isFilledDouble(NodeIndex nodeIndex)
143 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
144 GenerationInfo& info = m_generationInfo[virtualRegister];
145 return info.registerFormat() == DataFormatDouble;
149 JITCodeGenerator(JITCompiler& jit, bool isSpeculative)
151 , m_isSpeculative(isSpeculative)
153 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
154 , m_blockHeads(jit.graph().m_blocks.size())
158 // These methods convert between doubles, and doubles boxed and JSValues.
159 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
161 JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr);
162 JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr);
163 m_jit.moveDoubleToPtr(fpReg, reg);
164 m_jit.subPtr(JITCompiler::tagTypeNumberRegister, reg);
167 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
169 JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(gpr);
170 JITCompiler::FPRegisterID fpReg = JITCompiler::fprToRegisterID(fpr);
171 m_jit.addPtr(JITCompiler::tagTypeNumberRegister, reg);
172 m_jit.movePtrToDouble(reg, fpReg);
175 GPRReg boxDouble(FPRReg fpr)
177 return boxDouble(fpr, allocate());
179 FPRReg unboxDouble(GPRReg gpr)
181 return unboxDouble(gpr, fprAllocate());
184 // Called on an operand once it has been consumed by a parent node.
185 void use(NodeIndex nodeIndex)
187 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister;
188 GenerationInfo& info = m_generationInfo[virtualRegister];
190 // use() returns true when the value becomes dead, and any
191 // associated resources may be freed.
195 // Release the associated machine registers.
196 DataFormat registerFormat = info.registerFormat();
197 if (registerFormat == DataFormatDouble)
198 m_fprs.release(info.fpr());
199 else if (registerFormat != DataFormatNone)
200 m_gprs.release(info.gpr());
203 // Spill a VirtualRegister to the RegisterFile.
204 void spill(VirtualRegister spillMe)
206 GenerationInfo& info = m_generationInfo[spillMe];
208 // Check the GenerationInfo to see if this value need writing
209 // to the RegisterFile - if not, mark it as spilled & return.
210 if (!info.needsSpill()) {
215 DataFormat spillFormat = info.registerFormat();
216 if (spillFormat == DataFormatDouble) {
217 // All values are spilled as JSValues, so box the double via a temporary gpr.
218 GPRReg gpr = boxDouble(info.fpr());
219 m_jit.storePtr(JITCompiler::gprToRegisterID(gpr), JITCompiler::addressFor(spillMe));
221 info.spill(DataFormatJSDouble);
225 // The following code handles JSValues, int32s, and cells.
226 ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
228 JITCompiler::RegisterID reg = JITCompiler::gprToRegisterID(info.gpr());
229 // We need to box int32 and cell values ...
230 // but on JSVALUE64 boxing a cell is a no-op!
231 if (spillFormat == DataFormatInteger)
232 m_jit.orPtr(JITCompiler::tagTypeNumberRegister, reg);
234 // Spill the value, and record it as spilled in its boxed form.
235 m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
236 info.spill((DataFormat)(spillFormat | DataFormatJS));
239 // Checks/accessors for constant values.
240 bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
241 bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
242 bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
243 bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
244 int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
245 double valueOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.valueOfDoubleConstant(nodeIndex); }
246 JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
248 Identifier* identifier(unsigned index)
250 return &m_jit.codeBlock()->identifier(index);
253 // Spill all VirtualRegisters back to the RegisterFile.
254 void flushRegisters()
256 for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
257 VirtualRegister name = m_gprs.name(gpr);
258 if (name != InvalidVirtualRegister) {
263 for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
264 VirtualRegister name = m_fprs.name(fpr);
265 if (name != InvalidVirtualRegister) {
273 // Used to ASSERT flushRegisters() has been called prior to
274 // calling out from JIT code to a C helper function.
277 for (GPRReg gpr = gpr0; gpr < numberOfGPRs; next(gpr)) {
278 VirtualRegister name = m_gprs.name(gpr);
279 if (name != InvalidVirtualRegister)
282 for (FPRReg fpr = fpr0; fpr < numberOfFPRs; next(fpr)) {
283 VirtualRegister name = m_fprs.name(fpr);
284 if (name != InvalidVirtualRegister)
291 // Get the JSValue representation of a constant.
292 JSValue constantAsJSValue(NodeIndex nodeIndex)
294 Node& node = m_jit.graph()[nodeIndex];
295 if (isInt32Constant(nodeIndex))
296 return jsNumber(node.int32Constant());
297 if (isDoubleConstant(nodeIndex))
298 return JSValue(JSValue::EncodeAsDouble, node.numericConstant());
299 ASSERT(isJSConstant(nodeIndex));
300 return valueOfJSConstant(nodeIndex);
302 MacroAssembler::ImmPtr constantAsJSValueAsImmPtr(NodeIndex nodeIndex)
304 return MacroAssembler::ImmPtr(JSValue::encode(constantAsJSValue(nodeIndex)));
307 // Helper functions to enable code sharing in implementations of bit/shift ops.
308 void bitOp(NodeType op, int32_t imm, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID result)
312 m_jit.and32(Imm32(imm), op1, result);
315 m_jit.or32(Imm32(imm), op1, result);
318 m_jit.xor32(Imm32(imm), op1, result);
321 ASSERT_NOT_REACHED();
324 void bitOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID op2, MacroAssembler::RegisterID result)
328 m_jit.and32(op1, op2, result);
331 m_jit.or32(op1, op2, result);
334 m_jit.xor32(op1, op2, result);
337 ASSERT_NOT_REACHED();
340 void shiftOp(NodeType op, MacroAssembler::RegisterID op1, int32_t shiftAmount, MacroAssembler::RegisterID result)
344 m_jit.rshift32(op1, Imm32(shiftAmount), result);
347 m_jit.lshift32(op1, Imm32(shiftAmount), result);
350 m_jit.urshift32(op1, Imm32(shiftAmount), result);
353 ASSERT_NOT_REACHED();
356 void shiftOp(NodeType op, MacroAssembler::RegisterID op1, MacroAssembler::RegisterID shiftAmount, MacroAssembler::RegisterID result)
360 m_jit.rshift32(op1, shiftAmount, result);
363 m_jit.lshift32(op1, shiftAmount, result);
366 m_jit.urshift32(op1, shiftAmount, result);
369 ASSERT_NOT_REACHED();
373 // Called once a node has completed code generation but prior to setting
374 // its result, to free up its children. (This must happen prior to setting
375 // the nodes result, since the node may have the same VirtualRegister as
376 // a child, and as such will use the same GeneratioInfo).
377 void useChildren(Node&);
379 // These method called to initialize the the GenerationInfo
380 // to describe the result of an operation.
381 void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger)
383 Node& node = m_jit.graph()[nodeIndex];
386 VirtualRegister virtualRegister = node.virtualRegister;
387 GenerationInfo& info = m_generationInfo[virtualRegister];
389 if (format == DataFormatInteger) {
390 m_jit.jitAssertIsInt32(reg);
391 m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
392 info.initInteger(nodeIndex, node.refCount, reg);
394 ASSERT(format == DataFormatJSInteger);
395 m_jit.jitAssertIsJSInt32(reg);
396 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
397 info.initJSValue(nodeIndex, node.refCount, reg, format);
400 void noResult(NodeIndex nodeIndex)
402 Node& node = m_jit.graph()[nodeIndex];
405 VirtualRegister virtualRegister = node.virtualRegister;
406 GenerationInfo& info = m_generationInfo[virtualRegister];
407 info.initNone(nodeIndex, node.refCount);
409 void cellResult(GPRReg reg, NodeIndex nodeIndex)
411 Node& node = m_jit.graph()[nodeIndex];
414 VirtualRegister virtualRegister = node.virtualRegister;
415 m_gprs.retain(reg, virtualRegister, SpillOrderCell);
416 GenerationInfo& info = m_generationInfo[virtualRegister];
417 info.initCell(nodeIndex, node.refCount, reg);
419 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS)
421 if (format == DataFormatJSInteger)
422 m_jit.jitAssertIsJSInt32(reg);
424 Node& node = m_jit.graph()[nodeIndex];
427 VirtualRegister virtualRegister = node.virtualRegister;
428 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
429 GenerationInfo& info = m_generationInfo[virtualRegister];
430 info.initJSValue(nodeIndex, node.refCount, reg, format);
432 void doubleResult(FPRReg reg, NodeIndex nodeIndex)
434 Node& node = m_jit.graph()[nodeIndex];
437 VirtualRegister virtualRegister = node.virtualRegister;
438 m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
439 GenerationInfo& info = m_generationInfo[virtualRegister];
440 info.initDouble(nodeIndex, node.refCount, reg);
442 void initConstantInfo(NodeIndex nodeIndex)
444 ASSERT(isInt32Constant(nodeIndex) || isDoubleConstant(nodeIndex) || isJSConstant(nodeIndex));
445 Node& node = m_jit.graph()[nodeIndex];
446 m_generationInfo[node.virtualRegister].initConstant(nodeIndex, node.refCount);
449 // These methods used to sort arguments into the correct registers.
450 template<GPRReg destA, GPRReg destB>
451 void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
453 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
454 // (1) both are already in arg regs, the right way around.
455 // (2) both are already in arg regs, the wrong way around.
456 // (3) neither are currently in arg registers.
457 // (4) srcA in in its correct reg.
458 // (5) srcA in in the incorrect reg.
459 // (6) srcB in in its correct reg.
460 // (7) srcB in in the incorrect reg.
462 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
463 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
464 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
465 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
468 // Handle the easy cases - two simple moves.
469 m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA));
470 m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB));
471 } else if (srcA != destB) {
472 // Handle the non-swap case - just put srcB in place first.
473 m_jit.move(JITCompiler::gprToRegisterID(srcB), JITCompiler::gprToRegisterID(destB));
474 m_jit.move(JITCompiler::gprToRegisterID(srcA), JITCompiler::gprToRegisterID(destA));
476 m_jit.swap(JITCompiler::gprToRegisterID(destB), JITCompiler::gprToRegisterID(destB));
478 template<FPRReg destA, FPRReg destB>
479 void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
481 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
482 // (1) both are already in arg regs, the right way around.
483 // (2) both are already in arg regs, the wrong way around.
484 // (3) neither are currently in arg registers.
485 // (4) srcA in in its correct reg.
486 // (5) srcA in in the incorrect reg.
487 // (6) srcB in in its correct reg.
488 // (7) srcB in in the incorrect reg.
490 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
491 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
492 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
493 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
496 // Handle the easy cases - two simple moves.
497 m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA));
498 m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB));
503 // Handle the non-swap case - just put srcB in place first.
504 m_jit.moveDouble(JITCompiler::fprToRegisterID(srcB), JITCompiler::fprToRegisterID(destB));
505 m_jit.moveDouble(JITCompiler::fprToRegisterID(srcA), JITCompiler::fprToRegisterID(destA));
509 ASSERT(srcB == destA && srcA == destB);
510 // Need to swap; pick a temporary register.
512 if (destA != JITCompiler::argumentFPR3 && destA != JITCompiler::argumentFPR3)
513 temp = JITCompiler::argumentFPR3;
514 else if (destA != JITCompiler::argumentFPR2 && destA != JITCompiler::argumentFPR2)
515 temp = JITCompiler::argumentFPR2;
517 ASSERT(destA != JITCompiler::argumentFPR1 && destA != JITCompiler::argumentFPR1);
518 temp = JITCompiler::argumentFPR1;
520 m_jit.moveDouble(JITCompiler::fprToRegisterID(destA), JITCompiler::fprToRegisterID(temp));
521 m_jit.moveDouble(JITCompiler::fprToRegisterID(destB), JITCompiler::fprToRegisterID(destA));
522 m_jit.moveDouble(JITCompiler::fprToRegisterID(temp), JITCompiler::fprToRegisterID(destB));
524 void setupStubArguments(GPRReg arg1, GPRReg arg2)
526 setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2);
528 void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
530 // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
531 // Then we can use setupTwoStubArgs to fix arg2/arg3.
532 if (arg2 != JITCompiler::argumentGPR1 && arg3 != JITCompiler::argumentGPR1) {
533 m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
534 setupTwoStubArgs<JITCompiler::argumentGPR2, JITCompiler::argumentGPR3>(arg2, arg3);
538 // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
539 // Then we can use setupTwoStubArgs to fix arg1/arg3.
540 if (arg1 != JITCompiler::argumentGPR2 && arg3 != JITCompiler::argumentGPR2) {
541 m_jit.move(JITCompiler::gprToRegisterID(arg2), JITCompiler::argumentRegister2);
542 setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR3>(arg1, arg3);
546 // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
547 // Then we can use setupTwoStubArgs to fix arg1/arg2.
548 if (arg1 != JITCompiler::argumentGPR3 && arg2 != JITCompiler::argumentGPR3) {
549 m_jit.move(JITCompiler::gprToRegisterID(arg3), JITCompiler::argumentRegister3);
550 setupTwoStubArgs<JITCompiler::argumentGPR1, JITCompiler::argumentGPR2>(arg1, arg2);
554 // If we get here, we haven't been able to move any of arg1/arg2/arg3.
555 // Since all three are blocked, then all three must already be in the argument register.
556 // But are they in the right ones?
558 // First, ensure arg1 is in place.
559 if (arg1 != JITCompiler::argumentGPR1) {
560 m_jit.swap(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
562 // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
563 ASSERT(arg2 == JITCompiler::argumentGPR1 || arg3 == JITCompiler::argumentGPR1);
564 // If arg2 was in argumentGPR1 it no longer is (due to the swap).
565 // Otherwise arg3 must have been. Mark him as moved.
566 if (arg2 == JITCompiler::argumentGPR1)
572 // Either arg2 & arg3 need swapping, or we're all done.
573 ASSERT((arg2 == JITCompiler::argumentGPR2 || arg3 == JITCompiler::argumentGPR3)
574 || (arg2 == JITCompiler::argumentGPR3 || arg3 == JITCompiler::argumentGPR2));
576 if (arg2 != JITCompiler::argumentGPR2)
577 m_jit.swap(JITCompiler::argumentRegister2, JITCompiler::argumentRegister3);
580 // These methods add calls to C++ helper functions.
581 void callOperation(J_DFGOperation_EJP operation, GPRReg result, GPRReg arg1, void* pointer)
585 m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
586 m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister2);
587 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
589 appendCallWithExceptionCheck(operation);
590 m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
592 void callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
594 callOperation((J_DFGOperation_EJP)operation, result, arg1, identifier);
596 void callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
600 m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
601 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
603 appendCallWithExceptionCheck(operation);
604 m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
606 void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
610 m_jit.move(JITCompiler::gprToRegisterID(arg1), JITCompiler::argumentRegister1);
611 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
613 appendCallWithExceptionCheck(operation);
614 m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
616 void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
620 setupStubArguments(arg1, arg2);
621 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
623 appendCallWithExceptionCheck(operation);
624 m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
626 void callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
630 setupStubArguments(arg1, arg2);
631 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
633 appendCallWithExceptionCheck(operation);
634 m_jit.move(JITCompiler::returnValueRegister, JITCompiler::gprToRegisterID(result));
636 void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1, GPRReg arg2, void* pointer)
640 setupStubArguments(arg1, arg2);
641 m_jit.move(JITCompiler::TrustedImmPtr(pointer), JITCompiler::argumentRegister3);
642 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
644 appendCallWithExceptionCheck(operation);
646 void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
648 callOperation((V_DFGOperation_EJJP)operation, arg1, arg2, identifier);
650 void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
654 setupStubArguments(arg1, arg2, arg3);
655 m_jit.move(JITCompiler::callFrameRegister, JITCompiler::argumentRegister0);
657 appendCallWithExceptionCheck(operation);
659 void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
663 setupTwoStubArgs<JITCompiler::argumentFPR0, JITCompiler::argumentFPR1>(arg1, arg2);
665 m_jit.appendCall(operation);
666 m_jit.moveDouble(JITCompiler::fpReturnValueRegister, JITCompiler::fprToRegisterID(result));
669 void appendCallWithExceptionCheck(const FunctionPtr& function)
671 m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].exceptionInfo);
674 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
676 m_branches.append(BranchRecord(jump, destination));
681 for (size_t i = 0; i < m_branches.size(); ++i) {
682 BranchRecord& branch = m_branches[i];
683 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
688 void dump(const char* label = 0);
691 #if DFG_CONSISTENCY_CHECK
692 void checkConsistency();
694 void checkConsistency() {}
697 // The JIT, while also provides MacroAssembler functionality.
699 // This flag is used to distinguish speculative and non-speculative
700 // code generation. This is significant when filling spilled values
701 // from the RegisterFile. When spilling we attempt to store information
702 // as to the type of boxed value being stored (int32, double, cell), and
703 // when filling on the speculative path we will retrieve this type info
704 // where available. On the non-speculative path, however, we cannot rely
705 // on the spill format info, since the a value being loaded might have
706 // been spilled by either the speculative or non-speculative paths (where
707 // we entered the non-speculative path on an intervening bail-out), and
708 // the value may have been boxed differently on the two paths.
709 bool m_isSpeculative;
710 // The current node being generated.
712 NodeIndex m_compileIndex;
713 // Virtual and physical register maps.
714 Vector<GenerationInfo, 32> m_generationInfo;
715 RegisterBank<GPRReg, numberOfGPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_gprs;
716 RegisterBank<FPRReg, numberOfFPRs, SpillOrder, SpillOrderNone, SpillOrderMax> m_fprs;
718 Vector<MacroAssembler::Label> m_blockHeads;
719 struct BranchRecord {
720 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
722 , destination(destination)
726 MacroAssembler::Jump jump;
727 BlockIndex destination;
729 Vector<BranchRecord, 8> m_branches;
732 // === Operand types ===
734 // IntegerOperand, DoubleOperand and JSValueOperand.
736 // These classes are used to lock the operands to a node into machine
737 // registers. These classes implement of pattern of locking a value
738 // into register at the point of construction only if it is already in
739 // registers, and otherwise loading it lazily at the point it is first
740 // used. We do so in order to attempt to avoid spilling one operand
741 // in order to make space available for another.
743 class IntegerOperand {
745 explicit IntegerOperand(JITCodeGenerator* jit, NodeIndex index)
748 , m_gprOrInvalid(InvalidGPRReg)
750 , m_format(DataFormatNone)
754 if (jit->isFilled(index))
760 ASSERT(m_gprOrInvalid != InvalidGPRReg);
761 m_jit->unlock(m_gprOrInvalid);
764 NodeIndex index() const
771 if (m_gprOrInvalid == InvalidGPRReg)
772 m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
773 return m_gprOrInvalid;
778 gpr(); // m_format is set when m_gpr is locked.
779 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
783 MacroAssembler::RegisterID registerID()
785 return JITCompiler::gprToRegisterID(gpr());
789 JITCodeGenerator* m_jit;
791 GPRReg m_gprOrInvalid;
795 class DoubleOperand {
797 explicit DoubleOperand(JITCodeGenerator* jit, NodeIndex index)
800 , m_fprOrInvalid(InvalidFPRReg)
803 if (jit->isFilledDouble(index))
809 ASSERT(m_fprOrInvalid != InvalidFPRReg);
810 m_jit->unlock(m_fprOrInvalid);
813 NodeIndex index() const
820 if (m_fprOrInvalid == InvalidFPRReg)
821 m_fprOrInvalid = m_jit->fillDouble(index());
822 return m_fprOrInvalid;
825 MacroAssembler::FPRegisterID registerID()
827 return JITCompiler::fprToRegisterID(fpr());
831 JITCodeGenerator* m_jit;
833 FPRReg m_fprOrInvalid;
836 class JSValueOperand {
838 explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index)
841 , m_gprOrInvalid(InvalidGPRReg)
844 if (jit->isFilled(index))
850 ASSERT(m_gprOrInvalid != InvalidGPRReg);
851 m_jit->unlock(m_gprOrInvalid);
854 NodeIndex index() const
861 if (m_gprOrInvalid == InvalidGPRReg)
862 m_gprOrInvalid = m_jit->fillJSValue(index());
863 return m_gprOrInvalid;
866 MacroAssembler::RegisterID registerID()
868 return JITCompiler::gprToRegisterID(gpr());
872 JITCodeGenerator* m_jit;
874 GPRReg m_gprOrInvalid;
878 // === Temporaries ===
880 // These classes are used to allocate temporary registers.
881 // A mechanism is provided to attempt to reuse the registers
882 // currently allocated to child nodes whose value is consumed
883 // by, and not live after, this operation.
887 GPRTemporary(JITCodeGenerator*);
888 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&);
889 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
890 GPRTemporary(JITCodeGenerator*, IntegerOperand&);
891 GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&);
892 GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
893 GPRTemporary(JITCodeGenerator*, JSValueOperand&);
897 m_jit->unlock(gpr());
902 ASSERT(m_gpr != InvalidGPRReg);
906 MacroAssembler::RegisterID registerID()
908 ASSERT(m_gpr != InvalidGPRReg);
909 return JITCompiler::gprToRegisterID(m_gpr);
913 GPRTemporary(JITCodeGenerator* jit, GPRReg lockedGPR)
920 JITCodeGenerator* m_jit;
926 FPRTemporary(JITCodeGenerator*);
927 FPRTemporary(JITCodeGenerator*, DoubleOperand&);
928 FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&);
932 m_jit->unlock(fpr());
937 ASSERT(m_fpr != InvalidFPRReg);
941 MacroAssembler::FPRegisterID registerID()
943 ASSERT(m_fpr != InvalidFPRReg);
944 return JITCompiler::fprToRegisterID(m_fpr);
948 FPRTemporary(JITCodeGenerator* jit, FPRReg lockedFPR)
955 JITCodeGenerator* m_jit;
962 // These classes lock the result of a call to a C++ helper function.
964 class GPRResult : public GPRTemporary {
966 GPRResult(JITCodeGenerator* jit)
967 : GPRTemporary(jit, lockedResult(jit))
972 static GPRReg lockedResult(JITCodeGenerator* jit)
974 jit->lock(JITCompiler::returnValueGPR);
975 return JITCompiler::returnValueGPR;
979 class FPRResult : public FPRTemporary {
981 FPRResult(JITCodeGenerator* jit)
982 : FPRTemporary(jit, lockedResult(jit))
987 static FPRReg lockedResult(JITCodeGenerator* jit)
989 jit->lock(JITCompiler::returnValueFPR);
990 return JITCompiler::returnValueFPR;
994 } } // namespace JSC::DFG