2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGJITCodeGenerator_h
27 #define DFGJITCodeGenerator_h
31 #include "CodeBlock.h"
32 #include <dfg/DFGGenerationInfo.h>
33 #include <dfg/DFGGraph.h>
34 #include <dfg/DFGJITCompiler.h>
35 #include <dfg/DFGNode.h>
36 #include <dfg/DFGOperations.h>
37 #include <dfg/DFGRegisterBank.h>
39 namespace JSC { namespace DFG {
41 class SpeculateIntegerOperand;
42 class SpeculateStrictInt32Operand;
43 class SpeculateDoubleOperand;
44 class SpeculateCellOperand;
45 class SpeculateBooleanOperand;
48 // === JITCodeGenerator ===
50 // This class provides common infrastructure used by the speculative &
51 // non-speculative JITs. Provides common mechanisms for virtual and
52 // physical register management, calls out from JIT code to helper
54 class JITCodeGenerator {
56 typedef MacroAssembler::TrustedImm32 TrustedImm32;
57 typedef MacroAssembler::Imm32 Imm32;
59 // These constants are used to set priorities for spill order for
60 // the register allocator.
62 SpillOrderConstant = 1, // no spill, and cheap fill
63 SpillOrderSpilled = 2, // no spill
64 SpillOrderJS = 4, // needs spill
65 SpillOrderCell = 4, // needs spill
66 SpillOrderInteger = 5, // needs spill and box
67 SpillOrderDouble = 6, // needs spill and convert
70 enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
74 GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
75 FPRReg fillDouble(NodeIndex);
76 GPRReg fillJSValue(NodeIndex);
78 // lock and unlock GPR & FPR registers.
87 void unlock(GPRReg reg)
91 void unlock(FPRReg reg)
96 // Used to check whether a child node is on its last use,
97 // and its machine registers may be reused.
98 bool canReuse(NodeIndex nodeIndex)
100 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
101 GenerationInfo& info = m_generationInfo[virtualRegister];
102 return info.canReuse();
104 GPRReg reuse(GPRReg reg)
109 FPRReg reuse(FPRReg reg)
115 // Allocate a gpr/fpr.
118 VirtualRegister spillMe;
119 GPRReg gpr = m_gprs.allocate(spillMe);
120 if (spillMe != InvalidVirtualRegister)
124 GPRReg allocate(GPRReg specific)
126 VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
127 if (spillMe != InvalidVirtualRegister)
133 return m_gprs.tryAllocate();
137 VirtualRegister spillMe;
138 FPRReg fpr = m_fprs.allocate(spillMe);
139 if (spillMe != InvalidVirtualRegister)
144 // Check whether a VirtualRegsiter is currently in a machine register.
145 // We use this when filling operands to fill those that are already in
146 // machine registers first (by locking VirtualRegsiters that are already
147 // in machine register before filling those that are not we attempt to
148 // avoid spilling values we will need immediately).
149 bool isFilled(NodeIndex nodeIndex)
151 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
152 GenerationInfo& info = m_generationInfo[virtualRegister];
153 return info.registerFormat() != DataFormatNone;
155 bool isFilledDouble(NodeIndex nodeIndex)
157 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
158 GenerationInfo& info = m_generationInfo[virtualRegister];
159 return info.registerFormat() == DataFormatDouble;
162 // Called on an operand once it has been consumed by a parent node.
163 void use(NodeIndex nodeIndex)
165 VirtualRegister virtualRegister = m_jit.graph()[nodeIndex].virtualRegister();
166 GenerationInfo& info = m_generationInfo[virtualRegister];
168 // use() returns true when the value becomes dead, and any
169 // associated resources may be freed.
173 // Release the associated machine registers.
174 DataFormat registerFormat = info.registerFormat();
175 if (registerFormat == DataFormatDouble)
176 m_fprs.release(info.fpr());
177 else if (registerFormat != DataFormatNone)
178 m_gprs.release(info.gpr());
181 static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR, WriteBarrierUseKind);
183 static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg)
185 if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0)
186 return GPRInfo::regT0;
188 if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1)
189 return GPRInfo::regT1;
191 if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2)
192 return GPRInfo::regT2;
194 return GPRInfo::regT3;
198 JITCodeGenerator(JITCompiler& jit, bool isSpeculative)
200 , m_isSpeculative(isSpeculative)
202 , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
203 , m_blockHeads(jit.graph().m_blocks.size())
207 void clearGenerationInfo();
209 // These methods are used when generating 'unexpected'
210 // calls out from JIT code to C++ helper routines -
211 // they spill all live values to the appropriate
212 // slots in the RegisterFile without changing any state
213 // in the GenerationInfo.
214 void silentSpillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg)
216 GenerationInfo& info = m_generationInfo[spillMe];
217 ASSERT(info.registerFormat() != DataFormatNone);
218 ASSERT(info.registerFormat() != DataFormatDouble);
220 if (!info.needsSpill() || (info.gpr() == exclude))
223 DataFormat registerFormat = info.registerFormat();
225 if (registerFormat == DataFormatInteger) {
226 m_jit.store32(info.gpr(), JITCompiler::addressFor(spillMe));
228 ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell);
229 m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
232 void silentSpillFPR(VirtualRegister spillMe, FPRReg exclude = InvalidFPRReg)
234 GenerationInfo& info = m_generationInfo[spillMe];
235 ASSERT(info.registerFormat() == DataFormatDouble);
237 if (info.fpr() == exclude)
239 if (!info.needsSpill()) {
240 // it's either a constant or it's already been spilled
241 ASSERT(m_jit.graph()[info.nodeIndex()].isConstant() || info.spillFormat() != DataFormatNone);
245 // it's neither a constant nor has it been spilled.
246 ASSERT(!m_jit.graph()[info.nodeIndex()].isConstant());
247 ASSERT(info.spillFormat() == DataFormatNone);
249 m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
252 void silentFillGPR(VirtualRegister spillMe, GPRReg exclude = InvalidGPRReg)
254 GenerationInfo& info = m_generationInfo[spillMe];
255 if (info.gpr() == exclude)
258 NodeIndex nodeIndex = info.nodeIndex();
259 Node& node = m_jit.graph()[nodeIndex];
260 ASSERT(info.registerFormat() != DataFormatNone);
261 ASSERT(info.registerFormat() != DataFormatDouble);
262 DataFormat registerFormat = info.registerFormat();
264 if (registerFormat == DataFormatInteger) {
265 if (node.isConstant()) {
266 ASSERT(isInt32Constant(nodeIndex));
267 m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), info.gpr());
269 m_jit.load32(JITCompiler::addressFor(spillMe), info.gpr());
273 if (node.isConstant())
274 m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), info.gpr());
276 ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell);
277 m_jit.loadPtr(JITCompiler::addressFor(spillMe), info.gpr());
280 void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg exclude = InvalidFPRReg)
282 GenerationInfo& info = m_generationInfo[spillMe];
283 if (info.fpr() == exclude)
286 NodeIndex nodeIndex = info.nodeIndex();
287 Node& node = m_jit.graph()[nodeIndex];
288 ASSERT(info.registerFormat() == DataFormatDouble);
290 if (node.isConstant()) {
291 ASSERT(isNumberConstant(nodeIndex));
292 m_jit.move(JITCompiler::ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample);
293 m_jit.movePtrToDouble(canTrample, info.fpr());
297 if (info.spillFormat() != DataFormatNone) {
298 // it was already spilled previously, which means we need unboxing.
299 ASSERT(info.spillFormat() & DataFormatJS);
300 m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample);
301 unboxDouble(canTrample, info.fpr());
305 m_jit.loadDouble(JITCompiler::addressFor(spillMe), info.fpr());
308 void silentSpillAllRegisters(GPRReg exclude)
310 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
311 if (iter.name() != InvalidVirtualRegister)
312 silentSpillGPR(iter.name(), exclude);
314 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
315 if (iter.name() != InvalidVirtualRegister)
316 silentSpillFPR(iter.name());
319 void silentSpillAllRegisters(FPRReg exclude)
321 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
322 if (iter.name() != InvalidVirtualRegister)
323 silentSpillGPR(iter.name());
325 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
326 if (iter.name() != InvalidVirtualRegister)
327 silentSpillFPR(iter.name(), exclude);
330 void silentFillAllRegisters(GPRReg exclude)
332 GPRReg canTrample = GPRInfo::regT0;
333 if (exclude == GPRInfo::regT0)
334 canTrample = GPRInfo::regT1;
336 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
337 if (iter.name() != InvalidVirtualRegister)
338 silentFillFPR(iter.name(), canTrample);
340 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
341 if (iter.name() != InvalidVirtualRegister)
342 silentFillGPR(iter.name(), exclude);
345 void silentFillAllRegisters(FPRReg exclude)
347 GPRReg canTrample = GPRInfo::regT0;
349 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
350 if (iter.name() != InvalidVirtualRegister) {
351 ASSERT_UNUSED(exclude, iter.regID() != exclude);
352 silentFillFPR(iter.name(), canTrample, exclude);
355 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
356 if (iter.name() != InvalidVirtualRegister)
357 silentFillGPR(iter.name());
361 // These methods convert between doubles, and doubles boxed and JSValues.
362 GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
364 return m_jit.boxDouble(fpr, gpr);
366 FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
368 return m_jit.unboxDouble(gpr, fpr);
370 GPRReg boxDouble(FPRReg fpr)
372 return boxDouble(fpr, allocate());
375 // Spill a VirtualRegister to the RegisterFile.
376 void spill(VirtualRegister spillMe)
378 GenerationInfo& info = m_generationInfo[spillMe];
380 // Check the GenerationInfo to see if this value need writing
381 // to the RegisterFile - if not, mark it as spilled & return.
382 if (!info.needsSpill()) {
387 DataFormat spillFormat = info.registerFormat();
388 if (spillFormat == DataFormatDouble) {
389 // All values are spilled as JSValues, so box the double via a temporary gpr.
390 GPRReg gpr = boxDouble(info.fpr());
391 m_jit.storePtr(gpr, JITCompiler::addressFor(spillMe));
393 info.spill(DataFormatJSDouble);
397 // The following code handles JSValues, int32s, and cells.
398 ASSERT(spillFormat == DataFormatInteger || spillFormat == DataFormatCell || spillFormat & DataFormatJS);
400 GPRReg reg = info.gpr();
401 // We need to box int32 and cell values ...
402 // but on JSVALUE64 boxing a cell is a no-op!
403 if (spillFormat == DataFormatInteger)
404 m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
406 // Spill the value, and record it as spilled in its boxed form.
407 m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
408 info.spill((DataFormat)(spillFormat | DataFormatJS));
411 bool isStrictInt32(NodeIndex);
413 bool isKnownInteger(NodeIndex);
414 bool isKnownNumeric(NodeIndex);
415 bool isKnownCell(NodeIndex);
417 bool isKnownNotInteger(NodeIndex);
418 bool isKnownNotNumber(NodeIndex);
420 bool isKnownBoolean(NodeIndex);
422 // Checks/accessors for constant values.
423 bool isConstant(NodeIndex nodeIndex) { return m_jit.isConstant(nodeIndex); }
424 bool isJSConstant(NodeIndex nodeIndex) { return m_jit.isJSConstant(nodeIndex); }
425 bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.isInt32Constant(nodeIndex); }
426 bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.isDoubleConstant(nodeIndex); }
427 bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.isNumberConstant(nodeIndex); }
428 bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.isBooleanConstant(nodeIndex); }
429 int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.valueOfInt32Constant(nodeIndex); }
430 double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.valueOfNumberConstant(nodeIndex); }
431 JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.valueOfJSConstant(nodeIndex); }
432 bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.valueOfBooleanConstant(nodeIndex); }
433 bool isNullConstant(NodeIndex nodeIndex)
435 if (!isConstant(nodeIndex))
437 return valueOfJSConstant(nodeIndex).isNull();
440 Identifier* identifier(unsigned index)
442 return &m_jit.codeBlock()->identifier(index);
445 // Spill all VirtualRegisters back to the RegisterFile.
446 void flushRegisters()
448 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
449 if (iter.name() != InvalidVirtualRegister) {
454 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
455 if (iter.name() != InvalidVirtualRegister) {
463 // Used to ASSERT flushRegisters() has been called prior to
464 // calling out from JIT code to a C helper function.
467 for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
468 if (iter.name() != InvalidVirtualRegister)
471 for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
472 if (iter.name() != InvalidVirtualRegister)
479 MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
481 return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
484 // Helper functions to enable code sharing in implementations of bit/shift ops.
485 void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
489 m_jit.and32(Imm32(imm), op1, result);
492 m_jit.or32(Imm32(imm), op1, result);
495 m_jit.xor32(Imm32(imm), op1, result);
498 ASSERT_NOT_REACHED();
501 void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
505 m_jit.and32(op1, op2, result);
508 m_jit.or32(op1, op2, result);
511 m_jit.xor32(op1, op2, result);
514 ASSERT_NOT_REACHED();
517 void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
521 m_jit.rshift32(op1, Imm32(shiftAmount), result);
524 m_jit.lshift32(op1, Imm32(shiftAmount), result);
527 m_jit.urshift32(op1, Imm32(shiftAmount), result);
530 ASSERT_NOT_REACHED();
533 void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
537 m_jit.rshift32(op1, shiftAmount, result);
540 m_jit.lshift32(op1, shiftAmount, result);
543 m_jit.urshift32(op1, shiftAmount, result);
546 ASSERT_NOT_REACHED();
550 // Returns the node index of the branch node if peephole is okay, NoNode otherwise.
551 NodeIndex detectPeepHoleBranch()
553 NodeIndex lastNodeIndex = m_jit.graph().m_blocks[m_block]->end - 1;
555 // Check that no intervening nodes will be generated.
556 for (NodeIndex index = m_compileIndex + 1; index < lastNodeIndex; ++index) {
557 if (m_jit.graph()[index].shouldGenerate())
561 // Check if the lastNode is a branch on this node.
562 Node& lastNode = m_jit.graph()[lastNodeIndex];
563 return lastNode.op == Branch && lastNode.child1() == m_compileIndex ? lastNodeIndex : NoNode;
566 JITCompiler::Call cachedGetById(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), NodeType = GetById);
567 void cachedPutById(GPRReg baseGPR, GPRReg valueGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
568 void cachedGetMethod(GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
570 void nonSpeculativeNonPeepholeCompareNull(NodeIndex operand, bool invert = false);
571 void nonSpeculativePeepholeBranchNull(NodeIndex operand, NodeIndex branchNodeIndex, bool invert = false);
572 bool nonSpeculativeCompareNull(Node&, NodeIndex operand, bool invert = false);
574 void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, Z_DFGOperation_EJJ helperFunction);
575 void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, Z_DFGOperation_EJJ helperFunction);
576 bool nonSpeculativeCompare(Node&, MacroAssembler::RelationalCondition, Z_DFGOperation_EJJ helperFunction);
578 void nonSpeculativePeepholeStrictEq(Node&, NodeIndex branchNodeIndex, bool invert = false);
579 void nonSpeculativeNonPeepholeStrictEq(Node&, bool invert = false);
580 bool nonSpeculativeStrictEq(Node&, bool invert = false);
582 void emitBranch(Node&);
584 void nonSpeculativeLogicalNot(Node&);
586 MacroAssembler::Address addressOfCallData(int idx)
588 return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + idx) * static_cast<int>(sizeof(Register)));
591 void emitCall(Node&);
593 void speculationCheck(MacroAssembler::Jump jumpToFail);
595 // Called once a node has completed code generation but prior to setting
596 // its result, to free up its children. (This must happen prior to setting
597 // the nodes result, since the node may have the same VirtualRegister as
598 // a child, and as such will use the same GeneratioInfo).
599 void useChildren(Node&);
601 // These method called to initialize the the GenerationInfo
602 // to describe the result of an operation.
603 void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren)
605 Node& node = m_jit.graph()[nodeIndex];
606 if (mode == CallUseChildren)
609 VirtualRegister virtualRegister = node.virtualRegister();
610 GenerationInfo& info = m_generationInfo[virtualRegister];
612 if (format == DataFormatInteger) {
613 m_jit.jitAssertIsInt32(reg);
614 m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
615 info.initInteger(nodeIndex, node.refCount(), reg);
617 ASSERT(format == DataFormatJSInteger);
618 m_jit.jitAssertIsJSInt32(reg);
619 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
620 info.initJSValue(nodeIndex, node.refCount(), reg, format);
623 void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
625 integerResult(reg, nodeIndex, DataFormatInteger, mode);
627 void noResult(NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
629 if (mode == UseChildrenCalledExplicitly)
631 Node& node = m_jit.graph()[nodeIndex];
634 void cellResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
636 Node& node = m_jit.graph()[nodeIndex];
637 if (mode == CallUseChildren)
640 VirtualRegister virtualRegister = node.virtualRegister();
641 m_gprs.retain(reg, virtualRegister, SpillOrderCell);
642 GenerationInfo& info = m_generationInfo[virtualRegister];
643 info.initCell(nodeIndex, node.refCount(), reg);
645 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
647 if (format == DataFormatJSInteger)
648 m_jit.jitAssertIsJSInt32(reg);
650 Node& node = m_jit.graph()[nodeIndex];
651 if (mode == CallUseChildren)
654 VirtualRegister virtualRegister = node.virtualRegister();
655 m_gprs.retain(reg, virtualRegister, SpillOrderJS);
656 GenerationInfo& info = m_generationInfo[virtualRegister];
657 info.initJSValue(nodeIndex, node.refCount(), reg, format);
659 void jsValueResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
661 jsValueResult(reg, nodeIndex, DataFormatJS, mode);
663 void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
665 Node& node = m_jit.graph()[nodeIndex];
666 if (mode == CallUseChildren)
669 VirtualRegister virtualRegister = node.virtualRegister();
670 m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
671 GenerationInfo& info = m_generationInfo[virtualRegister];
672 info.initDouble(nodeIndex, node.refCount(), reg);
674 void initConstantInfo(NodeIndex nodeIndex)
676 ASSERT(isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex) || isJSConstant(nodeIndex));
677 Node& node = m_jit.graph()[nodeIndex];
678 m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
681 // These methods used to sort arguments into the correct registers.
682 template<GPRReg destA, GPRReg destB>
683 void setupTwoStubArgs(GPRReg srcA, GPRReg srcB)
685 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
686 // (1) both are already in arg regs, the right way around.
687 // (2) both are already in arg regs, the wrong way around.
688 // (3) neither are currently in arg registers.
689 // (4) srcA in in its correct reg.
690 // (5) srcA in in the incorrect reg.
691 // (6) srcB in in its correct reg.
692 // (7) srcB in in the incorrect reg.
694 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
695 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
696 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
697 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
700 // Handle the easy cases - two simple moves.
701 m_jit.move(srcA, destA);
702 m_jit.move(srcB, destB);
703 } else if (srcA != destB) {
704 // Handle the non-swap case - just put srcB in place first.
705 m_jit.move(srcB, destB);
706 m_jit.move(srcA, destA);
708 m_jit.swap(destA, destB);
710 template<FPRReg destA, FPRReg destB>
711 void setupTwoStubArgs(FPRReg srcA, FPRReg srcB)
713 // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
714 // (1) both are already in arg regs, the right way around.
715 // (2) both are already in arg regs, the wrong way around.
716 // (3) neither are currently in arg registers.
717 // (4) srcA in in its correct reg.
718 // (5) srcA in in the incorrect reg.
719 // (6) srcB in in its correct reg.
720 // (7) srcB in in the incorrect reg.
722 // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
723 // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
724 // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
725 // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
728 // Handle the easy cases - two simple moves.
729 m_jit.moveDouble(srcA, destA);
730 m_jit.moveDouble(srcB, destB);
735 // Handle the non-swap case - just put srcB in place first.
736 m_jit.moveDouble(srcB, destB);
737 m_jit.moveDouble(srcA, destA);
741 ASSERT(srcB == destA && srcA == destB);
742 // Need to swap; pick a temporary register.
744 if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
745 temp = FPRInfo::argumentFPR3;
746 else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
747 temp = FPRInfo::argumentFPR2;
749 ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
750 temp = FPRInfo::argumentFPR1;
752 m_jit.moveDouble(destA, temp);
753 m_jit.moveDouble(destB, destA);
754 m_jit.moveDouble(temp, destB);
756 void setupStubArguments(GPRReg arg1, GPRReg arg2)
758 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
760 void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
762 // If neither of arg2/arg3 are in our way, then we can move arg1 into place.
763 // Then we can use setupTwoStubArgs to fix arg2/arg3.
764 if (arg2 != GPRInfo::argumentGPR1 && arg3 != GPRInfo::argumentGPR1) {
765 m_jit.move(arg1, GPRInfo::argumentGPR1);
766 setupTwoStubArgs<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
770 // If neither of arg1/arg3 are in our way, then we can move arg2 into place.
771 // Then we can use setupTwoStubArgs to fix arg1/arg3.
772 if (arg1 != GPRInfo::argumentGPR2 && arg3 != GPRInfo::argumentGPR2) {
773 m_jit.move(arg2, GPRInfo::argumentGPR2);
774 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
778 // If neither of arg1/arg2 are in our way, then we can move arg3 into place.
779 // Then we can use setupTwoStubArgs to fix arg1/arg2.
780 if (arg1 != GPRInfo::argumentGPR3 && arg2 != GPRInfo::argumentGPR3) {
781 m_jit.move(arg3, GPRInfo::argumentGPR3);
782 setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
786 // If we get here, we haven't been able to move any of arg1/arg2/arg3.
787 // Since all three are blocked, then all three must already be in the argument register.
788 // But are they in the right ones?
790 // First, ensure arg1 is in place.
791 if (arg1 != GPRInfo::argumentGPR1) {
792 m_jit.swap(arg1, GPRInfo::argumentGPR1);
794 // If arg1 wasn't in argumentGPR1, one of arg2/arg3 must be.
795 ASSERT(arg2 == GPRInfo::argumentGPR1 || arg3 == GPRInfo::argumentGPR1);
796 // If arg2 was in argumentGPR1 it no longer is (due to the swap).
797 // Otherwise arg3 must have been. Mark him as moved.
798 if (arg2 == GPRInfo::argumentGPR1)
804 // Either arg2 & arg3 need swapping, or we're all done.
805 ASSERT((arg2 == GPRInfo::argumentGPR2 || arg3 == GPRInfo::argumentGPR3)
806 || (arg2 == GPRInfo::argumentGPR3 || arg3 == GPRInfo::argumentGPR2));
808 if (arg2 != GPRInfo::argumentGPR2)
809 m_jit.swap(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3);
812 // These methods add calls to C++ helper functions.
813 void callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
817 m_jit.move(JITCompiler::TrustedImmPtr(pointer), GPRInfo::argumentGPR1);
818 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
820 appendCallWithExceptionCheck(operation);
821 m_jit.move(GPRInfo::returnValueGPR, result);
823 void callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier)
825 callOperation((J_DFGOperation_EP)operation, result, identifier);
827 void callOperation(J_DFGOperation_EJP operation, GPRReg result, GPRReg arg1, void* pointer)
831 m_jit.move(arg1, GPRInfo::argumentGPR1);
832 m_jit.move(JITCompiler::TrustedImmPtr(pointer), GPRInfo::argumentGPR2);
833 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
835 appendCallWithExceptionCheck(operation);
836 m_jit.move(GPRInfo::returnValueGPR, result);
838 void callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
840 callOperation((J_DFGOperation_EJP)operation, result, arg1, identifier);
842 void callOperation(J_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
846 m_jit.move(arg1, GPRInfo::argumentGPR1);
847 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
849 appendCallWithExceptionCheck(operation);
850 m_jit.move(GPRInfo::returnValueGPR, result);
852 void callOperation(Z_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
856 m_jit.move(arg1, GPRInfo::argumentGPR1);
857 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
859 appendCallWithExceptionCheck(operation);
860 m_jit.move(GPRInfo::returnValueGPR, result);
862 void callOperation(Z_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
866 setupStubArguments(arg1, arg2);
867 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
869 appendCallWithExceptionCheck(operation);
870 m_jit.move(GPRInfo::returnValueGPR, result);
872 void callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
876 setupStubArguments(arg1, arg2);
877 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
879 appendCallWithExceptionCheck(operation);
880 m_jit.move(GPRInfo::returnValueGPR, result);
882 void callOperation(V_DFGOperation_EJJP operation, GPRReg arg1, GPRReg arg2, void* pointer)
886 setupStubArguments(arg1, arg2);
887 m_jit.move(JITCompiler::TrustedImmPtr(pointer), GPRInfo::argumentGPR3);
888 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
890 appendCallWithExceptionCheck(operation);
892 void callOperation(V_DFGOperation_EJJI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
894 callOperation((V_DFGOperation_EJJP)operation, arg1, arg2, identifier);
896 void callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
900 setupStubArguments(arg1, arg2, arg3);
901 m_jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
903 appendCallWithExceptionCheck(operation);
905 void callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
909 setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
911 m_jit.appendCall(operation);
912 m_jit.moveDouble(FPRInfo::returnValueFPR, result);
915 JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
917 return m_jit.appendCallWithExceptionCheck(function, m_jit.graph()[m_compileIndex].codeOrigin);
920 void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
922 m_branches.append(BranchRecord(jump, destination));
927 for (size_t i = 0; i < m_branches.size(); ++i) {
928 BranchRecord& branch = m_branches[i];
929 branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
934 void dump(const char* label = 0);
937 #if ENABLE(DFG_CONSISTENCY_CHECK)
938 void checkConsistency();
940 void checkConsistency() {}
943 // The JIT, while also provides MacroAssembler functionality.
945 // This flag is used to distinguish speculative and non-speculative
946 // code generation. This is significant when filling spilled values
947 // from the RegisterFile. When spilling we attempt to store information
948 // as to the type of boxed value being stored (int32, double, cell), and
949 // when filling on the speculative path we will retrieve this type info
950 // where available. On the non-speculative path, however, we cannot rely
951 // on the spill format info, since the a value being loaded might have
952 // been spilled by either the speculative or non-speculative paths (where
953 // we entered the non-speculative path on an intervening bail-out), and
954 // the value may have been boxed differently on the two paths.
955 bool m_isSpeculative;
956 // The current node being generated.
958 NodeIndex m_compileIndex;
959 // Virtual and physical register maps.
960 Vector<GenerationInfo, 32> m_generationInfo;
961 RegisterBank<GPRInfo> m_gprs;
962 RegisterBank<FPRInfo> m_fprs;
964 Vector<MacroAssembler::Label> m_blockHeads;
965 struct BranchRecord {
966 BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
968 , destination(destination)
972 MacroAssembler::Jump jump;
973 BlockIndex destination;
975 Vector<BranchRecord, 8> m_branches;
978 // === Operand types ===
980 // IntegerOperand, DoubleOperand and JSValueOperand.
982 // These classes are used to lock the operands to a node into machine
983 // registers. These classes implement of pattern of locking a value
984 // into register at the point of construction only if it is already in
985 // registers, and otherwise loading it lazily at the point it is first
986 // used. We do so in order to attempt to avoid spilling one operand
987 // in order to make space available for another.
989 class IntegerOperand {
991 explicit IntegerOperand(JITCodeGenerator* jit, NodeIndex index)
994 , m_gprOrInvalid(InvalidGPRReg)
996 , m_format(DataFormatNone)
1000 if (jit->isFilled(index))
1006 ASSERT(m_gprOrInvalid != InvalidGPRReg);
1007 m_jit->unlock(m_gprOrInvalid);
1010 NodeIndex index() const
1017 gpr(); // m_format is set when m_gpr is locked.
1018 ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
1024 if (m_gprOrInvalid == InvalidGPRReg)
1025 m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
1026 return m_gprOrInvalid;
1031 m_jit->use(m_index);
1035 JITCodeGenerator* m_jit;
1037 GPRReg m_gprOrInvalid;
1038 DataFormat m_format;
1041 class DoubleOperand {
1043 explicit DoubleOperand(JITCodeGenerator* jit, NodeIndex index)
1046 , m_fprOrInvalid(InvalidFPRReg)
1049 if (jit->isFilledDouble(index))
1055 ASSERT(m_fprOrInvalid != InvalidFPRReg);
1056 m_jit->unlock(m_fprOrInvalid);
1059 NodeIndex index() const
1066 if (m_fprOrInvalid == InvalidFPRReg)
1067 m_fprOrInvalid = m_jit->fillDouble(index());
1068 return m_fprOrInvalid;
1073 m_jit->use(m_index);
1077 JITCodeGenerator* m_jit;
1079 FPRReg m_fprOrInvalid;
1082 class JSValueOperand {
1084 explicit JSValueOperand(JITCodeGenerator* jit, NodeIndex index)
1087 , m_gprOrInvalid(InvalidGPRReg)
1090 if (jit->isFilled(index))
1096 ASSERT(m_gprOrInvalid != InvalidGPRReg);
1097 m_jit->unlock(m_gprOrInvalid);
1100 NodeIndex index() const
1107 if (m_gprOrInvalid == InvalidGPRReg)
1108 m_gprOrInvalid = m_jit->fillJSValue(index());
1109 return m_gprOrInvalid;
1114 m_jit->use(m_index);
1118 JITCodeGenerator* m_jit;
1120 GPRReg m_gprOrInvalid;
1124 // === Temporaries ===
1126 // These classes are used to allocate temporary registers.
1127 // A mechanism is provided to attempt to reuse the registers
1128 // currently allocated to child nodes whose value is consumed
1129 // by, and not live after, this operation.
1131 class GPRTemporary {
1133 GPRTemporary(JITCodeGenerator*);
1134 GPRTemporary(JITCodeGenerator*, GPRReg specific);
1135 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&);
1136 GPRTemporary(JITCodeGenerator*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
1137 GPRTemporary(JITCodeGenerator*, IntegerOperand&);
1138 GPRTemporary(JITCodeGenerator*, IntegerOperand&, IntegerOperand&);
1139 GPRTemporary(JITCodeGenerator*, SpeculateCellOperand&);
1140 GPRTemporary(JITCodeGenerator*, SpeculateBooleanOperand&);
1141 GPRTemporary(JITCodeGenerator*, JSValueOperand&);
1145 m_jit->unlock(gpr());
1150 ASSERT(m_gpr != InvalidGPRReg);
1155 JITCodeGenerator* m_jit;
1159 class FPRTemporary {
1161 FPRTemporary(JITCodeGenerator*);
1162 FPRTemporary(JITCodeGenerator*, DoubleOperand&);
1163 FPRTemporary(JITCodeGenerator*, DoubleOperand&, DoubleOperand&);
1164 FPRTemporary(JITCodeGenerator*, SpeculateDoubleOperand&);
1165 FPRTemporary(JITCodeGenerator*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
1169 m_jit->unlock(fpr());
1174 ASSERT(m_fpr != InvalidFPRReg);
1179 FPRTemporary(JITCodeGenerator* jit, FPRReg lockedFPR)
1186 JITCodeGenerator* m_jit;
1193 // These classes lock the result of a call to a C++ helper function.
1195 class GPRResult : public GPRTemporary {
1197 GPRResult(JITCodeGenerator* jit)
1198 : GPRTemporary(jit, GPRInfo::returnValueGPR)
1203 class FPRResult : public FPRTemporary {
1205 FPRResult(JITCodeGenerator* jit)
1206 : FPRTemporary(jit, lockedResult(jit))
1211 static FPRReg lockedResult(JITCodeGenerator* jit)
1213 jit->lock(FPRInfo::returnValueFPR);
1214 return FPRInfo::returnValueFPR;
1218 } } // namespace JSC::DFG