2 * Copyright (C) 2011 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGJITCompiler.h"
31 #include "CodeBlock.h"
32 #include "DFGJITCodeGenerator.h"
33 #include "DFGNonSpeculativeJIT.h"
34 #include "DFGOperations.h"
35 #include "DFGRegisterBank.h"
36 #include "DFGSpeculativeJIT.h"
37 #include "JSGlobalData.h"
38 #include "LinkBuffer.h"
40 namespace JSC { namespace DFG {
42 // This method used to fill a numeric value to a FPR when linking speculative -> non-speculative.
43 void JITCompiler::fillNumericToDouble(NodeIndex nodeIndex, FPRReg fpr, GPRReg temporary)
45 Node& node = graph()[nodeIndex];
47 if (node.isConstant()) {
48 ASSERT(isNumberConstant(nodeIndex));
49 move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(nodeIndex)))), temporary);
50 movePtrToDouble(temporary, fpr);
52 loadPtr(addressFor(node.virtualRegister()), temporary);
53 Jump isInteger = branchPtr(MacroAssembler::AboveOrEqual, temporary, GPRInfo::tagTypeNumberRegister);
54 unboxDouble(temporary, fpr);
55 Jump hasUnboxedDouble = jump();
57 convertInt32ToDouble(temporary, fpr);
58 hasUnboxedDouble.link(this);
62 // This method used to fill an integer value to a GPR when linking speculative -> non-speculative.
63 void JITCompiler::fillInt32ToInteger(NodeIndex nodeIndex, GPRReg gpr)
65 Node& node = graph()[nodeIndex];
67 if (node.isConstant()) {
68 ASSERT(isInt32Constant(nodeIndex));
69 move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
71 #if ENABLE(DFG_JIT_ASSERT)
72 // Redundant load, just so we can check the tag!
73 loadPtr(addressFor(node.virtualRegister()), gpr);
74 jitAssertIsJSInt32(gpr);
76 load32(addressFor(node.virtualRegister()), gpr);
80 // This method used to fill a JSValue to a GPR when linking speculative -> non-speculative.
81 void JITCompiler::fillToJS(NodeIndex nodeIndex, GPRReg gpr)
83 Node& node = graph()[nodeIndex];
85 if (node.isConstant()) {
86 if (isInt32Constant(nodeIndex)) {
87 JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex));
88 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
89 } else if (isNumberConstant(nodeIndex)) {
90 JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex));
91 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
93 ASSERT(isJSConstant(nodeIndex));
94 JSValue jsValue = valueOfJSConstant(nodeIndex);
95 move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr);
100 loadPtr(addressFor(node.virtualRegister()), gpr);
103 #if ENABLE(DFG_OSR_EXIT)
104 void JITCompiler::exitSpeculativeWithOSR(const OSRExit& exit, SpeculationRecovery* recovery, Vector<BytecodeAndMachineOffset>& decodedCodeMap)
106 // 1) Pro-forma stuff.
107 exit.m_check.link(this);
109 #if ENABLE(DFG_DEBUG_VERBOSE)
110 fprintf(stderr, "OSR exit for Node @%d (bc#%u) at JIT offset 0x%x ", (int)exit.m_nodeIndex, exit.m_bytecodeIndex, debugOffset());
113 #if ENABLE(DFG_JIT_BREAK_ON_SPECULATION_FAILURE)
117 #if ENABLE(DFG_VERBOSE_SPECULATION_FAILURE)
118 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
119 debugInfo->codeBlock = m_codeBlock;
120 debugInfo->debugOffset = debugOffset();
122 debugCall(debugOperationPrintSpeculationFailure, debugInfo);
125 #if ENABLE(DFG_SUCCESS_STATS)
126 static SamplingCounter counter("SpeculationFailure");
130 // 2) Perform speculation recovery. This only comes into play when an operation
131 // starts mutating state before verifying the speculation it has already made.
133 GPRReg alreadyBoxed = InvalidGPRReg;
136 switch (recovery->type()) {
138 sub32(recovery->src(), recovery->dest());
139 orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
140 alreadyBoxed = recovery->dest();
143 case BooleanSpeculationCheck:
144 xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
152 // 3) Figure out how many scratch slots we'll need. We need one for every GPR/FPR
153 // whose destination is now occupied by a DFG virtual register, and we need
154 // one for every displaced virtual register if there are more than
155 // GPRInfo::numberOfRegisters of them. Also see if there are any constants,
156 // any undefined slots, any FPR slots, and any unboxed ints.
158 Vector<bool> poisonedVirtualRegisters(exit.m_variables.size());
159 for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i)
160 poisonedVirtualRegisters[i] = false;
162 unsigned numberOfPoisonedVirtualRegisters = 0;
163 unsigned numberOfDisplacedVirtualRegisters = 0;
165 // Booleans for fast checks. We expect that most OSR exits do not have to rebox
166 // Int32s, have no FPRs, and have no constants. If there are constants, we
167 // expect most of them to be jsUndefined(); if that's true then we handle that
168 // specially to minimize code size and execution time.
169 bool haveUnboxedInt32s = false;
170 bool haveFPRs = false;
171 bool haveConstants = false;
172 bool haveUndefined = false;
174 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
175 const ValueRecovery& recovery = exit.valueRecovery(index);
176 switch (recovery.technique()) {
177 case DisplacedInRegisterFile:
178 numberOfDisplacedVirtualRegisters++;
179 ASSERT((int)recovery.virtualRegister() >= 0);
181 // See if we might like to store to this virtual register before doing
182 // virtual register shuffling. If so, we say that the virtual register
183 // is poisoned: it cannot be stored to until after displaced virtual
184 // registers are handled. We track poisoned virtual register carefully
185 // to ensure this happens efficiently. Note that we expect this case
186 // to be rare, so the handling of it is optimized for the cases in
187 // which it does not happen.
188 if (recovery.virtualRegister() < (int)exit.m_variables.size()) {
189 switch (exit.m_variables[recovery.virtualRegister()].technique()) {
191 case UnboxedInt32InGPR:
193 if (!poisonedVirtualRegisters[recovery.virtualRegister()]) {
194 poisonedVirtualRegisters[recovery.virtualRegister()] = true;
195 numberOfPoisonedVirtualRegisters++;
204 case UnboxedInt32InGPR:
205 haveUnboxedInt32s = true;
213 haveConstants = true;
214 if (recovery.constant().isUndefined())
215 haveUndefined = true;
223 EncodedJSValue* scratchBuffer = static_cast<EncodedJSValue*>(globalData()->osrScratchBufferForSize(sizeof(EncodedJSValue) * (numberOfPoisonedVirtualRegisters + (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters ? 0 : numberOfDisplacedVirtualRegisters))));
225 // From here on, the code assumes that it is profitable to maximize the distance
226 // between when something is computed and when it is stored.
228 // 4) Perform all reboxing of integers.
230 if (haveUnboxedInt32s) {
231 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
232 const ValueRecovery& recovery = exit.valueRecovery(index);
233 if (recovery.technique() == UnboxedInt32InGPR && recovery.gpr() != alreadyBoxed)
234 orPtr(GPRInfo::tagTypeNumberRegister, recovery.gpr());
238 // 5) Dump all non-poisoned GPRs. For poisoned GPRs, save them into the scratch storage.
239 // Note that GPRs do not have a fast change (like haveFPRs) because we expect that
240 // most OSR failure points will have at least one GPR that needs to be dumped.
242 unsigned scratchIndex = 0;
243 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
244 const ValueRecovery& recovery = exit.valueRecovery(index);
245 int operand = exit.operandForIndex(index);
246 switch (recovery.technique()) {
248 case UnboxedInt32InGPR:
249 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
250 storePtr(recovery.gpr(), scratchBuffer + scratchIndex++);
252 storePtr(recovery.gpr(), addressFor((VirtualRegister)operand));
259 // At this point all GPRs are available for scratch use.
262 // 6) Box all doubles (relies on there being more GPRs than FPRs)
264 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
265 const ValueRecovery& recovery = exit.valueRecovery(index);
266 if (recovery.technique() != InFPR)
268 FPRReg fpr = recovery.fpr();
269 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(fpr));
273 // 7) Dump all doubles into the register file, or to the scratch storage if
274 // the destination virtual register is poisoned.
276 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
277 const ValueRecovery& recovery = exit.valueRecovery(index);
278 if (recovery.technique() != InFPR)
280 GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr()));
281 if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)])
282 storePtr(gpr, scratchBuffer + scratchIndex++);
284 storePtr(gpr, addressFor((VirtualRegister)exit.operandForIndex(index)));
288 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
290 // 8) Reshuffle displaced virtual registers. Optimize for the case that
291 // the number of displaced virtual registers is not more than the number
292 // of available physical registers.
294 if (numberOfDisplacedVirtualRegisters) {
295 if (numberOfDisplacedVirtualRegisters <= GPRInfo::numberOfRegisters) {
296 // So far this appears to be the case that triggers all the time, but
297 // that is far from guaranteed.
299 unsigned displacementIndex = 0;
300 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
301 const ValueRecovery& recovery = exit.valueRecovery(index);
302 if (recovery.technique() != DisplacedInRegisterFile)
304 loadPtr(addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++));
307 displacementIndex = 0;
308 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
309 const ValueRecovery& recovery = exit.valueRecovery(index);
310 if (recovery.technique() != DisplacedInRegisterFile)
312 storePtr(GPRInfo::toRegister(displacementIndex++), addressFor((VirtualRegister)exit.operandForIndex(index)));
315 // FIXME: This should use the shuffling algorithm that we use
316 // for speculative->non-speculative jumps, if we ever discover that
317 // some hot code with lots of live values that get displaced and
318 // spilled really enjoys frequently failing speculation.
320 // For now this code is engineered to be correct but probably not
321 // super. In particular, it correctly handles cases where for example
322 // the displacements are a permutation of the destination values, like
327 // It accomplishes this by simply lifting all of the virtual registers
328 // from their old (DFG JIT) locations and dropping them in a scratch
329 // location in memory, and then transferring from that scratch location
330 // to their new (old JIT) locations.
332 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
333 const ValueRecovery& recovery = exit.valueRecovery(index);
334 if (recovery.technique() != DisplacedInRegisterFile)
336 loadPtr(addressFor(recovery.virtualRegister()), GPRInfo::regT0);
337 storePtr(GPRInfo::regT0, scratchBuffer + scratchIndex++);
340 scratchIndex = numberOfPoisonedVirtualRegisters;
341 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
342 const ValueRecovery& recovery = exit.valueRecovery(index);
343 if (recovery.technique() != DisplacedInRegisterFile)
345 loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
346 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)exit.operandForIndex(index)));
349 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters + numberOfDisplacedVirtualRegisters);
353 // 9) Dump all poisoned virtual registers.
356 if (numberOfPoisonedVirtualRegisters) {
357 for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) {
358 if (!poisonedVirtualRegisters[virtualRegister])
361 const ValueRecovery& recovery = exit.m_variables[virtualRegister];
362 switch (recovery.technique()) {
364 case UnboxedInt32InGPR:
366 loadPtr(scratchBuffer + scratchIndex++, GPRInfo::regT0);
367 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)virtualRegister));
375 ASSERT(scratchIndex == numberOfPoisonedVirtualRegisters);
377 // 10) Dump all constants. Optimize for Undefined, since that's a constant we see
382 move(TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0);
384 for (int index = 0; index < exit.numberOfRecoveries(); ++index) {
385 const ValueRecovery& recovery = exit.valueRecovery(index);
386 if (recovery.technique() != Constant)
388 if (recovery.constant().isUndefined())
389 storePtr(GPRInfo::regT0, addressFor((VirtualRegister)exit.operandForIndex(index)));
391 storePtr(TrustedImmPtr(JSValue::encode(recovery.constant())), addressFor((VirtualRegister)exit.operandForIndex(index)));
395 // 11) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
396 // that all new calls into this code will go to the new JIT, so the execute
397 // counter only affects call frames that performed OSR exit and call frames
398 // that were still executing the old JIT at the time of another call frame's
399 // OSR exit. We want to ensure that the following is true:
401 // (a) Code the performs an OSR exit gets a chance to reenter optimized
402 // code eventually, since optimized code is faster. But we don't
403 // want to do such reentery too aggressively (see (c) below).
405 // (b) If there is code on the call stack that is still running the old
406 // JIT's code and has never OSR'd, then it should get a chance to
407 // perform OSR entry despite the fact that we've exited.
409 // (c) Code the performs an OSR exit should not immediately retry OSR
410 // entry, since both forms of OSR are expensive. OSR entry is
411 // particularly expensive.
413 // To ensure (c), we'd like to set the execute counter to
414 // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
415 // (a) and (b), since then every OSR exit would delay the opportunity for
416 // every call frame to perform OSR entry. Essentially, if OSR exit happens
417 // frequently and the function has few loops, then the counter will never
418 // become non-negative and OSR entry will never be triggered. OSR entry
419 // will only happen if a loop gets hot in the old JIT, which does a pretty
420 // good job of ensuring (a) and (b). This heuristic may need to be
421 // rethought in the future, particularly if we support reoptimizing code
422 // with new value profiles gathered from code that did OSR exit.
424 store32(Imm32(codeBlock()->alternative()->counterValueForOptimizeAfterWarmUp()), codeBlock()->alternative()->addressOfExecuteCounter());
426 // 12) Load the result of the last bytecode operation into regT0.
428 if (exit.m_lastSetOperand != std::numeric_limits<int>::max())
429 loadPtr(addressFor((VirtualRegister)exit.m_lastSetOperand), GPRInfo::cachedResultRegister);
431 // 13) Fix call frame.
433 ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT);
434 storePtr(TrustedImmPtr(codeBlock()->alternative()), addressFor((VirtualRegister)RegisterFile::CodeBlock));
436 // 14) Jump into the corresponding baseline JIT code.
438 BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned, BytecodeAndMachineOffset::getBytecodeIndex>(decodedCodeMap.begin(), decodedCodeMap.size(), exit.m_bytecodeIndex);
441 ASSERT(mapping->m_bytecodeIndex == exit.m_bytecodeIndex);
443 void* jumpTarget = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(codeBlock()->alternative()->getJITCode().start()) + mapping->m_machineCodeOffset);
445 ASSERT(GPRInfo::regT1 != GPRInfo::cachedResultRegister);
447 move(TrustedImmPtr(jumpTarget), GPRInfo::regT1);
448 jump(GPRInfo::regT1);
450 #if ENABLE(DFG_DEBUG_VERBOSE)
451 fprintf(stderr, " -> %p\n", jumpTarget);
455 void JITCompiler::linkOSRExits(SpeculativeJIT& speculative)
457 Vector<BytecodeAndMachineOffset> decodedCodeMap;
458 ASSERT(codeBlock()->alternative());
459 ASSERT(codeBlock()->alternative()->getJITType() == JITCode::BaselineJIT);
460 ASSERT(codeBlock()->alternative()->jitCodeMap());
461 codeBlock()->alternative()->jitCodeMap()->decode(decodedCodeMap);
463 OSRExitVector::Iterator exitsIter = speculative.osrExits().begin();
464 OSRExitVector::Iterator exitsEnd = speculative.osrExits().end();
466 while (exitsIter != exitsEnd) {
467 const OSRExit& exit = *exitsIter;
468 exitSpeculativeWithOSR(exit, speculative.speculationRecovery(exit.m_recoveryIndex), decodedCodeMap);
472 #else // ENABLE(DFG_OSR_EXIT)
473 class GeneralizedRegister {
475 GeneralizedRegister() { }
477 static GeneralizedRegister createGPR(GPRReg gpr)
479 GeneralizedRegister result;
480 result.m_isFPR = false;
481 result.m_register.gpr = gpr;
485 static GeneralizedRegister createFPR(FPRReg fpr)
487 GeneralizedRegister result;
488 result.m_isFPR = true;
489 result.m_register.fpr = fpr;
501 return m_register.gpr;
507 return m_register.fpr;
510 const SpeculationCheck::RegisterInfo& findInSpeculationCheck(const SpeculationCheck& check)
513 return check.m_fprInfo[FPRInfo::toIndex(fpr())];
514 return check.m_gprInfo[GPRInfo::toIndex(gpr())];
517 const EntryLocation::RegisterInfo& findInEntryLocation(const EntryLocation& entry)
520 return entry.m_fprInfo[FPRInfo::toIndex(fpr())];
521 return entry.m_gprInfo[GPRInfo::toIndex(gpr())];
524 DataFormat previousDataFormat(const SpeculationCheck& check)
526 return findInSpeculationCheck(check).format;
529 DataFormat nextDataFormat(const EntryLocation& entry)
531 return findInEntryLocation(entry).format;
534 void convert(DataFormat oldDataFormat, DataFormat newDataFormat, JITCompiler& jit)
536 if (LIKELY(!needDataFormatConversion(oldDataFormat, newDataFormat)))
539 if (oldDataFormat == DataFormatInteger) {
540 jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr());
544 ASSERT(newDataFormat == DataFormatInteger);
545 jit.zeroExtend32ToPtr(gpr(), gpr());
549 void moveTo(GeneralizedRegister& other, DataFormat myDataFormat, DataFormat otherDataFormat, JITCompiler& jit, FPRReg scratchFPR)
551 if (UNLIKELY(isFPR())) {
552 if (UNLIKELY(other.isFPR())) {
553 jit.moveDouble(fpr(), other.fpr());
557 JITCompiler::Jump done;
559 if (scratchFPR != InvalidFPRReg) {
560 // we have a scratch FPR, so attempt a conversion to int
561 JITCompiler::JumpList notInt;
562 jit.branchConvertDoubleToInt32(fpr(), other.gpr(), notInt, scratchFPR);
563 jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
568 jit.boxDouble(fpr(), other.gpr());
575 if (UNLIKELY(other.isFPR())) {
576 jit.unboxDouble(gpr(), other.fpr());
580 if (LIKELY(!needDataFormatConversion(myDataFormat, otherDataFormat))) {
581 jit.move(gpr(), other.gpr());
585 if (myDataFormat == DataFormatInteger) {
586 jit.orPtr(gpr(), GPRInfo::tagTypeNumberRegister, other.gpr());
590 ASSERT(otherDataFormat == DataFormatInteger);
591 jit.zeroExtend32ToPtr(gpr(), other.gpr());
594 void swapWith(GeneralizedRegister& other, DataFormat myDataFormat, DataFormat myNewDataFormat, DataFormat otherDataFormat, DataFormat otherNewDataFormat, JITCompiler& jit, GPRReg scratchGPR, FPRReg scratchFPR)
596 if (UNLIKELY(isFPR())) {
597 if (UNLIKELY(other.isFPR())) {
598 if (scratchFPR == InvalidFPRReg)
599 jit.moveDoubleToPtr(fpr(), scratchGPR);
601 jit.moveDouble(fpr(), scratchFPR);
602 jit.moveDouble(other.fpr(), fpr());
603 if (scratchFPR == InvalidFPRReg)
604 jit.movePtrToDouble(scratchGPR, other.fpr());
606 jit.moveDouble(scratchFPR, other.fpr());
610 jit.move(other.gpr(), scratchGPR);
612 JITCompiler::Jump done;
614 if (scratchFPR != InvalidFPRReg) {
615 JITCompiler::JumpList notInt;
616 jit.branchConvertDoubleToInt32(fpr(), other.gpr(), notInt, scratchFPR);
617 jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
622 jit.boxDouble(fpr(), other.gpr());
627 jit.unboxDouble(scratchGPR, fpr());
631 if (UNLIKELY(other.isFPR())) {
632 other.swapWith(*this, otherDataFormat, otherNewDataFormat, myDataFormat, myNewDataFormat, jit, scratchGPR, scratchFPR);
636 jit.swap(gpr(), other.gpr());
638 if (UNLIKELY(needDataFormatConversion(otherDataFormat, myNewDataFormat))) {
639 if (otherDataFormat == DataFormatInteger)
640 jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr());
641 else if (myNewDataFormat == DataFormatInteger)
642 jit.zeroExtend32ToPtr(gpr(), gpr());
645 if (UNLIKELY(needDataFormatConversion(myDataFormat, otherNewDataFormat))) {
646 if (myDataFormat == DataFormatInteger)
647 jit.orPtr(GPRInfo::tagTypeNumberRegister, other.gpr());
648 else if (otherNewDataFormat == DataFormatInteger)
649 jit.zeroExtend32ToPtr(other.gpr(), other.gpr());
661 struct ShuffledRegister {
662 GeneralizedRegister reg;
663 ShuffledRegister* previous;
668 ShuffledRegister() { }
670 ShuffledRegister(GeneralizedRegister reg)
679 bool isEndOfNonCyclingPermutation()
681 return hasTo && !hasFrom;
684 void handleNonCyclingPermutation(const SpeculationCheck& check, const EntryLocation& entry, JITCompiler& jit, FPRReg& scratchFPR1, FPRReg& scratchFPR2)
686 ShuffledRegister* cur = this;
687 while (cur->previous) {
688 cur->previous->reg.moveTo(cur->reg, cur->previous->reg.previousDataFormat(check), cur->reg.nextDataFormat(entry), jit, scratchFPR1);
690 if (cur->reg.isFPR()) {
691 if (scratchFPR1 == InvalidFPRReg)
692 scratchFPR1 = cur->reg.fpr();
694 ASSERT(scratchFPR1 != cur->reg.fpr());
695 scratchFPR2 = cur->reg.fpr();
701 if (cur->reg.isFPR()) {
702 if (scratchFPR1 == InvalidFPRReg)
703 scratchFPR1 = cur->reg.fpr();
705 ASSERT(scratchFPR1 != cur->reg.fpr());
706 scratchFPR2 = cur->reg.fpr();
711 void handleCyclingPermutation(const SpeculationCheck& check, const EntryLocation& entry, JITCompiler& jit, GPRReg scratchGPR, FPRReg scratchFPR1, FPRReg scratchFPR2)
713 // first determine the cycle length
715 unsigned cycleLength = 0;
717 ShuffledRegister* cur = this;
718 ShuffledRegister* next = 0;
725 } while (cur != this);
728 ASSERT(next->previous == cur);
730 // now determine the best way to handle the permutation, depending on the
733 switch (cycleLength) {
735 reg.convert(reg.previousDataFormat(check), reg.nextDataFormat(entry), jit);
739 reg.swapWith(previous->reg, reg.previousDataFormat(check), reg.nextDataFormat(entry), previous->reg.previousDataFormat(check), previous->reg.nextDataFormat(entry), jit, scratchGPR, scratchFPR1);
743 GeneralizedRegister scratch;
744 if (UNLIKELY(reg.isFPR() && next->reg.isFPR())) {
745 if (scratchFPR2 == InvalidFPRReg) {
746 scratch = GeneralizedRegister::createGPR(scratchGPR);
747 reg.moveTo(scratch, DataFormatDouble, DataFormatJSDouble, jit, scratchFPR1);
749 scratch = GeneralizedRegister::createFPR(scratchFPR2);
750 reg.moveTo(scratch, DataFormatDouble, DataFormatDouble, jit, scratchFPR1);
753 scratch = GeneralizedRegister::createGPR(scratchGPR);
754 reg.moveTo(scratch, reg.previousDataFormat(check), next->reg.nextDataFormat(entry), jit, scratchFPR1);
758 while (cur->previous != this) {
760 cur->previous->reg.moveTo(cur->reg, cur->previous->reg.previousDataFormat(check), cur->reg.nextDataFormat(entry), jit, scratchFPR1);
764 if (UNLIKELY(reg.isFPR() && next->reg.isFPR())) {
765 if (scratchFPR2 == InvalidFPRReg)
766 scratch.moveTo(next->reg, DataFormatJSDouble, DataFormatDouble, jit, scratchFPR1);
768 scratch.moveTo(next->reg, DataFormatDouble, DataFormatDouble, jit, scratchFPR1);
770 scratch.moveTo(next->reg, next->reg.nextDataFormat(entry), next->reg.nextDataFormat(entry), jit, scratchFPR1);
775 static ShuffledRegister* lookup(ShuffledRegister* gprs, ShuffledRegister* fprs, GeneralizedRegister& reg)
778 return fprs + FPRInfo::toIndex(reg.fpr());
779 return gprs + GPRInfo::toIndex(reg.gpr());
784 T& lookupForRegister(T* gprs, T* fprs, unsigned index)
786 ASSERT(index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
787 if (index < GPRInfo::numberOfRegisters)
789 return fprs[index - GPRInfo::numberOfRegisters];
792 // This is written in a way that allows for a HashMap<NodeIndex, GeneralizedRegister> to be
793 // easily substituted, if it is found to be wise to do so. So far performance measurements
794 // indicate that this is faster, likely because the HashMap would have never grown very big
795 // and we would thus be wasting time performing complex hashing logic that, though O(1) on
796 // average, would be less than the ~7 loop iterations that the find() method below would do
797 // (since it's uncommon that we'd have register allocated more than 7 registers, in the
799 class NodeToRegisterMap {
803 GeneralizedRegister second;
810 typedef Tuple* iterator;
817 void set(NodeIndex first, GeneralizedRegister second)
819 m_payload[m_occupancy].first = first;
820 m_payload[m_occupancy].second = second;
829 Tuple* find(NodeIndex first)
831 for (unsigned i = m_occupancy; i-- > 0;) {
832 if (m_payload[i].first == first)
833 return m_payload + i;
844 Tuple m_payload[GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters];
845 unsigned m_occupancy;
848 void JITCompiler::jumpFromSpeculativeToNonSpeculative(const SpeculationCheck& check, const EntryLocation& entry, SpeculationRecovery* recovery, NodeToRegisterMap& checkNodeToRegisterMap, NodeToRegisterMap& entryNodeToRegisterMap)
850 ASSERT(check.m_nodeIndex == entry.m_nodeIndex);
852 // Link the jump from the Speculative path to here.
853 check.m_check.link(this);
855 #if ENABLE(DFG_DEBUG_VERBOSE)
856 fprintf(stderr, "Speculation failure for Node @%d at JIT offset 0x%x\n", (int)check.m_nodeIndex, debugOffset());
858 #if ENABLE(DFG_JIT_BREAK_ON_SPECULATION_FAILURE)
862 #if ENABLE(DFG_VERBOSE_SPECULATION_FAILURE)
863 SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
864 debugInfo->codeBlock = m_codeBlock;
865 debugInfo->debugOffset = debugOffset();
867 debugCall(debugOperationPrintSpeculationFailure, debugInfo);
870 #if ENABLE(DFG_SUCCESS_STATS)
871 static SamplingCounter counter("SpeculationFailure");
875 // Does this speculation check require any additional recovery to be performed,
876 // to restore any state that has been overwritten before we enter back in to the
877 // non-speculative path.
879 switch (recovery->type()) {
880 case SpeculativeAdd: {
881 ASSERT(check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].nodeIndex != NoNode);
883 sub32(recovery->src(), recovery->dest());
885 // If recovery->dest() should have been boxed prior to the addition, then rebox
887 DataFormat format = check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].format;
888 ASSERT(format == DataFormatInteger || format == DataFormatJSInteger || format == DataFormatJS);
889 if (format != DataFormatInteger)
890 orPtr(GPRInfo::tagTypeNumberRegister, recovery->dest());
894 case BooleanSpeculationCheck: {
895 ASSERT(check.m_gprInfo[GPRInfo::toIndex(recovery->dest())].nodeIndex != NoNode);
896 // Rebox the (non-)boolean
897 xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), recovery->dest());
902 ASSERT_NOT_REACHED();
907 // First, we need a reverse mapping that tells us, for a NodeIndex, which register
910 checkNodeToRegisterMap.clear();
911 entryNodeToRegisterMap.clear();
913 GPRReg scratchGPR = InvalidGPRReg;
914 FPRReg scratchFPR1 = InvalidFPRReg;
915 FPRReg scratchFPR2 = InvalidFPRReg;
916 bool needToRestoreTagMaskRegister = false;
918 for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
919 NodeIndex nodeIndexInCheck = check.m_gprInfo[index].nodeIndex;
920 if (nodeIndexInCheck != NoNode)
921 checkNodeToRegisterMap.set(nodeIndexInCheck, GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
922 NodeIndex nodeIndexInEntry = entry.m_gprInfo[index].nodeIndex;
923 if (nodeIndexInEntry != NoNode)
924 entryNodeToRegisterMap.set(nodeIndexInEntry, GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
925 else if (nodeIndexInCheck == NoNode)
926 scratchGPR = GPRInfo::toRegister(index);
929 for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
930 NodeIndex nodeIndexInCheck = check.m_fprInfo[index].nodeIndex;
931 if (nodeIndexInCheck != NoNode)
932 checkNodeToRegisterMap.set(nodeIndexInCheck, GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
933 NodeIndex nodeIndexInEntry = entry.m_fprInfo[index].nodeIndex;
934 if (nodeIndexInEntry != NoNode)
935 entryNodeToRegisterMap.set(nodeIndexInEntry, GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
936 else if (nodeIndexInCheck == NoNode) {
937 if (scratchFPR1 == InvalidFPRReg)
938 scratchFPR1 = FPRInfo::toRegister(index);
940 scratchFPR2 = FPRInfo::toRegister(index);
944 ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
947 // 1) Spill any values that are not spilled on speculative, but are spilled
948 // on non-speculative.
949 // 2) For the set of nodes that are in registers on both paths, perform a
951 // 3) Fill any values that were spilled on speculative, but are not spilled
952 // on non-speculative.
954 // If we find registers that can be used as scratch registers along the way,
957 // Part 1: spill any values that are not spilled on speculative, but are
958 // spilled on non-speculative.
960 // This also sets up some data structures that Part 2 will need.
962 ShuffledRegister gprs[GPRInfo::numberOfRegisters];
963 ShuffledRegister fprs[FPRInfo::numberOfRegisters];
965 for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index)
966 gprs[index] = ShuffledRegister(GeneralizedRegister::createGPR(GPRInfo::toRegister(index)));
967 for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index)
968 fprs[index] = ShuffledRegister(GeneralizedRegister::createFPR(FPRInfo::toRegister(index)));
970 for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
971 NodeIndex nodeIndex = check.m_gprInfo[index].nodeIndex;
973 // Bail out if this register isn't assigned to anything.
974 if (nodeIndex == NoNode)
977 // If the non-speculative path also has a register for the nodeIndex that this
978 // register stores, link them together.
979 NodeToRegisterMap::iterator mapIterator = entryNodeToRegisterMap.find(nodeIndex);
980 if (mapIterator != entryNodeToRegisterMap.end()) {
981 gprs[index].hasFrom = true;
983 ShuffledRegister* next = ShuffledRegister::lookup(gprs, fprs, mapIterator->second);
984 next->previous = gprs + index;
987 // If the non-speculative path has not spilled this register, then skip the spillin
988 // part below regardless of whether or not the speculative path has spilled it.
989 if (!mapIterator->second.findInEntryLocation(entry).isSpilled)
992 // If the non-speculative entry isn't using this register and it does not need
993 // the value in this register to be placed into any other register, then this
994 // register can be used for scratch.
995 if (entry.m_gprInfo[index].nodeIndex == NoNode)
996 scratchGPR = GPRInfo::toRegister(index);
999 // If the speculative path has already spilled the register then there is no need to
1001 if (check.m_gprInfo[index].isSpilled)
1004 DataFormat dataFormat = check.m_gprInfo[index].format;
1005 VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
1007 ASSERT(dataFormat == DataFormatInteger || DataFormatCell || dataFormat & DataFormatJS);
1008 if (dataFormat == DataFormatInteger)
1009 orPtr(GPRInfo::tagTypeNumberRegister, GPRInfo::toRegister(index));
1010 storePtr(GPRInfo::toRegister(index), addressFor(virtualRegister));
1013 if (scratchGPR == InvalidGPRReg) {
1014 scratchGPR = GPRInfo::tagMaskRegister;
1015 needToRestoreTagMaskRegister = true;
1018 for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
1019 NodeIndex nodeIndex = check.m_fprInfo[index].nodeIndex;
1020 if (nodeIndex == NoNode)
1023 NodeToRegisterMap::iterator mapIterator = entryNodeToRegisterMap.find(nodeIndex);
1024 if (mapIterator != entryNodeToRegisterMap.end()) {
1025 fprs[index].hasFrom = true;
1027 ShuffledRegister* next = ShuffledRegister::lookup(gprs, fprs, mapIterator->second);
1028 next->previous = fprs + index;
1031 if (!mapIterator->second.findInEntryLocation(entry).isSpilled)
1034 // If the non-speculative entry isn't using this register and it does not need
1035 // the value in this register to be placed into any other register, then this
1036 // register can be used for scratch.
1037 if (entry.m_fprInfo[index].nodeIndex == NoNode) {
1038 if (scratchFPR1 == InvalidFPRReg)
1039 scratchFPR1 = FPRInfo::toRegister(index);
1040 else if (scratchFPR2)
1041 scratchFPR2 = FPRInfo::toRegister(index);
1042 ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
1046 if (check.m_fprInfo[index].isSpilled)
1049 VirtualRegister virtualRegister = graph()[nodeIndex].virtualRegister();
1051 moveDoubleToPtr(FPRInfo::toRegister(index), scratchGPR);
1052 subPtr(GPRInfo::tagTypeNumberRegister, scratchGPR);
1053 storePtr(scratchGPR, addressFor(virtualRegister));
1056 #if !ASSERT_DISABLED
1057 // Assert that we've not assigned a scratch register to something that we're going to shuffle.
1058 ASSERT(scratchGPR != InvalidGPRReg);
1059 if (scratchGPR != GPRInfo::tagMaskRegister) {
1060 ASSERT(!gprs[GPRInfo::toIndex(scratchGPR)].hasTo);
1061 ASSERT(!gprs[GPRInfo::toIndex(scratchGPR)].hasFrom);
1063 if (scratchFPR1 != InvalidFPRReg) {
1064 ASSERT(scratchFPR1 != scratchFPR2);
1065 ASSERT(!fprs[FPRInfo::toIndex(scratchFPR1)].hasTo);
1066 ASSERT(!fprs[FPRInfo::toIndex(scratchFPR1)].hasFrom);
1067 if (scratchFPR2 != InvalidFPRReg) {
1068 ASSERT(!fprs[FPRInfo::toIndex(scratchFPR2)].hasTo);
1069 ASSERT(!fprs[FPRInfo::toIndex(scratchFPR2)].hasFrom);
1072 ASSERT(scratchFPR2 == InvalidFPRReg);
1075 // Part 2: For the set of nodes that are in registers on both paths,
1076 // perform a shuffling.
1078 for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
1079 ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
1080 if (!reg.isEndOfNonCyclingPermutation() || reg.handled || (!reg.hasFrom && !reg.hasTo))
1083 reg.handleNonCyclingPermutation(check, entry, *this, scratchFPR1, scratchFPR2);
1084 ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
1087 for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
1088 ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
1089 if (reg.handled || (!reg.hasFrom && !reg.hasTo))
1092 reg.handleCyclingPermutation(check, entry, *this, scratchGPR, scratchFPR1, scratchFPR2);
1093 ASSERT((scratchFPR1 == InvalidFPRReg && scratchFPR2 == InvalidFPRReg) || (scratchFPR1 != scratchFPR2));
1096 #if !ASSERT_DISABLED
1097 for (unsigned index = 0; index < GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters; ++index) {
1098 ShuffledRegister& reg = lookupForRegister(gprs, fprs, index);
1099 ASSERT(reg.handled || (!reg.hasFrom && !reg.hasTo));
1103 // Part 3: Fill any values that were spilled on speculative, but are not spilled
1104 // on non-speculative.
1106 for (unsigned index = 0; index < FPRInfo::numberOfRegisters; ++index) {
1107 NodeIndex nodeIndex = entry.m_fprInfo[index].nodeIndex;
1108 if (nodeIndex == NoNode || entry.m_fprInfo[index].isSpilled)
1111 NodeToRegisterMap::iterator mapIterator = checkNodeToRegisterMap.find(nodeIndex);
1112 if (mapIterator != checkNodeToRegisterMap.end()
1113 && !mapIterator->second.findInSpeculationCheck(check).isSpilled)
1116 fillNumericToDouble(nodeIndex, FPRInfo::toRegister(index), GPRInfo::regT0);
1119 for (unsigned index = 0; index < GPRInfo::numberOfRegisters; ++index) {
1120 NodeIndex nodeIndex = entry.m_gprInfo[index].nodeIndex;
1121 if (nodeIndex == NoNode || entry.m_gprInfo[index].isSpilled)
1124 NodeToRegisterMap::iterator mapIterator = checkNodeToRegisterMap.find(nodeIndex);
1125 if (mapIterator != checkNodeToRegisterMap.end()
1126 && !mapIterator->second.findInSpeculationCheck(check).isSpilled)
1129 DataFormat dataFormat = entry.m_gprInfo[index].format;
1130 if (dataFormat == DataFormatInteger)
1131 fillInt32ToInteger(nodeIndex, GPRInfo::toRegister(index));
1133 ASSERT(dataFormat & DataFormatJS || dataFormat == DataFormatCell); // Treat cell as JSValue for now!
1134 fillToJS(nodeIndex, GPRInfo::toRegister(index));
1135 // FIXME: For subtypes of DataFormatJS, should jitAssert the subtype?
1139 if (needToRestoreTagMaskRegister)
1140 move(TrustedImmPtr(reinterpret_cast<void*>(TagMask)), GPRInfo::tagMaskRegister);
1142 // Jump into the non-speculative path.
1143 jump(entry.m_entry);
1146 void JITCompiler::linkSpeculationChecks(SpeculativeJIT& speculative, NonSpeculativeJIT& nonSpeculative)
1148 // Iterators to walk over the set of bail outs & corresponding entry points.
1149 SpeculationCheckVector::Iterator checksIter = speculative.speculationChecks().begin();
1150 SpeculationCheckVector::Iterator checksEnd = speculative.speculationChecks().end();
1151 NonSpeculativeJIT::EntryLocationVector::Iterator entriesIter = nonSpeculative.entryLocations().begin();
1152 NonSpeculativeJIT::EntryLocationVector::Iterator entriesEnd = nonSpeculative.entryLocations().end();
1154 NodeToRegisterMap checkNodeToRegisterMap;
1155 NodeToRegisterMap entryNodeToRegisterMap;
1157 // Iterate over the speculation checks.
1158 while (checksIter != checksEnd) {
1159 // For every bail out from the speculative path, we must have provided an entry point
1160 // into the non-speculative one.
1161 ASSERT(checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
1163 // There may be multiple bail outs that map to the same entry point!
1165 ASSERT(checksIter != checksEnd);
1166 ASSERT(entriesIter != entriesEnd);
1168 // Plant code to link this speculation failure.
1169 const SpeculationCheck& check = *checksIter;
1170 const EntryLocation& entry = *entriesIter;
1171 jumpFromSpeculativeToNonSpeculative(check, entry, speculative.speculationRecovery(check.m_recoveryIndex), checkNodeToRegisterMap, entryNodeToRegisterMap);
1173 } while (checksIter != checksEnd && checksIter->m_nodeIndex == entriesIter->m_nodeIndex);
1177 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56289
1178 ASSERT(!(checksIter != checksEnd));
1179 ASSERT(!(entriesIter != entriesEnd));
1181 #endif // ENABLE(DFG_OSR_EXIT)
1183 void JITCompiler::compileEntry()
1185 m_startOfCode = label();
1187 // This code currently matches the old JIT. In the function header we need to
1188 // pop the return address (since we do not allow any recursion on the machine
1189 // stack), and perform a fast register file check.
1190 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
1191 // We'll need to convert the remaining cti_ style calls (specifically the register file
1192 // check) which will be dependent on stack layout. (We'd need to account for this in
1193 // both normal return code and when jumping to an exception handler).
1194 preserveReturnAddressAfterCall(GPRInfo::regT2);
1195 emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
1198 void JITCompiler::compileBody()
1200 // We generate the speculative code path, followed by the non-speculative
1201 // code for the function. Next we need to link the two together, making
1202 // bail-outs from the speculative path jump to the corresponding point on
1203 // the non-speculative one (and generating any code necessary to juggle
1204 // register values around, rebox values, and ensure spilled, to match the
1205 // non-speculative path's requirements).
1207 #if ENABLE(DFG_JIT_BREAK_ON_EVERY_FUNCTION)
1208 // Handy debug tool!
1212 // First generate the speculative path.
1213 Label speculativePathBegin = label();
1214 SpeculativeJIT speculative(*this);
1215 #if !ENABLE(DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE)
1216 bool compiledSpeculative = speculative.compile();
1218 bool compiledSpeculative = false;
1221 // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator
1222 // to allow it to check which nodes in the graph may bail out, and may need to reenter the
1223 // non-speculative path.
1224 if (compiledSpeculative) {
1225 #if ENABLE(DFG_OSR_ENTRY)
1226 m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
1229 #if ENABLE(DFG_OSR_EXIT)
1230 linkOSRExits(speculative);
1232 SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks());
1233 NonSpeculativeJIT nonSpeculative(*this);
1234 nonSpeculative.compile(checkIterator);
1236 // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one.
1237 linkSpeculationChecks(speculative, nonSpeculative);
1240 // If compilation through the SpeculativeJIT failed, throw away the code we generated.
1242 m_propertyAccesses.clear();
1244 m_methodGets.clear();
1245 rewindToLabel(speculativePathBegin);
1247 #if ENABLE(DFG_OSR_EXIT)
1248 SpeculationCheckIndexIterator checkIterator;
1250 SpeculationCheckVector noChecks;
1251 SpeculationCheckIndexIterator checkIterator(noChecks);
1253 NonSpeculativeJIT nonSpeculative(*this);
1254 nonSpeculative.compile(checkIterator);
1257 // Iterate over the m_calls vector, checking for exception checks,
1258 // and linking them to here.
1259 for (unsigned i = 0; i < m_calls.size(); ++i) {
1260 Jump& exceptionCheck = m_calls[i].m_exceptionCheck;
1261 if (exceptionCheck.isSet()) {
1262 exceptionCheck.link(this);
1263 ++m_exceptionCheckCount;
1266 // If any exception checks were linked, generate code to lookup a handler.
1267 if (m_exceptionCheckCount) {
1268 // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
1269 // an identifier for the operation that threw the exception, which we can use
1270 // to look up handler information. The identifier we use is the return address
1271 // of the call out from JIT code that threw the exception; this is still
1272 // available on the stack, just below the stack pointer!
1273 move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
1274 peek(GPRInfo::argumentGPR1, -1);
1275 m_calls.append(CallRecord(call(), lookupExceptionHandler));
1276 // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR,
1277 // and the address of the handler in returnValueGPR2.
1278 jump(GPRInfo::returnValueGPR2);
1282 void JITCompiler::link(LinkBuffer& linkBuffer)
1284 // Link the code, populate data in CodeBlock data structures.
1285 #if ENABLE(DFG_DEBUG_VERBOSE)
1286 fprintf(stderr, "JIT code for %p start at [%p, %p)\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize());
1289 // Link all calls out from the JIT code to their respective functions.
1290 for (unsigned i = 0; i < m_calls.size(); ++i) {
1291 if (m_calls[i].m_function.value())
1292 linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
1295 if (m_codeBlock->needsCallReturnIndices()) {
1296 m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionCheckCount);
1297 for (unsigned i = 0; i < m_calls.size(); ++i) {
1298 if (m_calls[i].m_handlesExceptions) {
1299 unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call);
1300 unsigned exceptionInfo = m_calls[i].m_codeOrigin.bytecodeIndex();
1301 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
1306 m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size());
1307 for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) {
1308 StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
1309 info.callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall);
1310 info.u.unset.deltaCheckImmToCall = m_propertyAccesses[i].m_deltaCheckImmToCall;
1311 info.deltaCallToStructCheck = m_propertyAccesses[i].m_deltaCallToStructCheck;
1312 info.u.unset.deltaCallToLoadOrStore = m_propertyAccesses[i].m_deltaCallToLoadOrStore;
1313 info.deltaCallToSlowCase = m_propertyAccesses[i].m_deltaCallToSlowCase;
1314 info.deltaCallToDone = m_propertyAccesses[i].m_deltaCallToDone;
1315 info.baseGPR = m_propertyAccesses[i].m_baseGPR;
1316 info.valueGPR = m_propertyAccesses[i].m_valueGPR;
1317 info.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
1320 m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
1321 for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
1322 CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
1323 info.isCall = m_jsCalls[i].m_isCall;
1325 info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall));
1326 info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
1327 info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
1330 m_codeBlock->addMethodCallLinkInfos(m_methodGets.size());
1331 for (unsigned i = 0; i < m_methodGets.size(); ++i) {
1332 MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
1333 info.cachedStructure.setLocation(linkBuffer.locationOf(m_methodGets[i].m_structToCompare));
1334 info.cachedPrototypeStructure.setLocation(linkBuffer.locationOf(m_methodGets[i].m_protoStructToCompare));
1335 info.cachedFunction.setLocation(linkBuffer.locationOf(m_methodGets[i].m_putFunction));
1336 info.cachedPrototype.setLocation(linkBuffer.locationOf(m_methodGets[i].m_protoObj));
1337 info.callReturnLocation = linkBuffer.locationOf(m_methodGets[i].m_slowCall);
1341 void JITCompiler::compile(JITCode& entry)
1343 // Preserve the return address to the callframe.
1345 // Generate the body of the program.
1348 LinkBuffer linkBuffer(*m_globalData, this);
1350 entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
1353 void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck)
1357 // === Function header code generation ===
1358 // This is the main entry point, without performing an arity check.
1359 // If we needed to perform an arity check we will already have moved the return address,
1360 // so enter after this.
1361 Label fromArityCheck(this);
1362 // Setup a pointer to the codeblock in the CallFrameHeader.
1363 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
1364 // Plant a check that sufficient space is available in the RegisterFile.
1365 // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
1366 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
1367 Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1);
1368 // Return here after register file check.
1369 Label fromRegisterFileCheck = label();
1372 // === Function body code generation ===
1375 // === Function footer code generation ===
1377 // Generate code to perform the slow register file check (if the fast one in
1378 // the function header fails), and generate the entry point with arity check.
1380 // Generate the register file check; if the fast check in the function head fails,
1381 // we need to call out to a helper function to check whether more space is available.
1382 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
1383 registerFileCheck.link(this);
1384 move(stackPointerRegister, GPRInfo::argumentGPR0);
1385 poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
1386 Call callRegisterFileCheck = call();
1387 jump(fromRegisterFileCheck);
1389 // The fast entry point into a function does not check the correct number of arguments
1390 // have been passed to the call (we only use the fast entry point where we can statically
1391 // determine the correct number of arguments have been passed, or have already checked).
1392 // In cases where an arity check is necessary, we enter here.
1393 // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
1394 Label arityCheck = label();
1395 preserveReturnAddressAfterCall(GPRInfo::regT2);
1396 emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC);
1397 branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this);
1398 move(stackPointerRegister, GPRInfo::argumentGPR0);
1399 poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
1400 Call callArityCheck = call();
1401 move(GPRInfo::regT0, GPRInfo::callFrameRegister);
1402 jump(fromArityCheck);
1406 LinkBuffer linkBuffer(*m_globalData, this);
1409 // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs.
1410 linkBuffer.link(callRegisterFileCheck, cti_register_file_check);
1411 linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck);
1413 entryWithArityCheck = linkBuffer.locationOf(arityCheck);
1414 entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT);
1417 #if ENABLE(DFG_JIT_ASSERT)
1418 void JITCompiler::jitAssertIsInt32(GPRReg gpr)
1421 Jump checkInt32 = branchPtr(BelowOrEqual, gpr, TrustedImmPtr(reinterpret_cast<void*>(static_cast<uintptr_t>(0xFFFFFFFFu))));
1423 checkInt32.link(this);
1429 void JITCompiler::jitAssertIsJSInt32(GPRReg gpr)
1431 Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1433 checkJSInt32.link(this);
1436 void JITCompiler::jitAssertIsJSNumber(GPRReg gpr)
1438 Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1440 checkJSNumber.link(this);
1443 void JITCompiler::jitAssertIsJSDouble(GPRReg gpr)
1445 Jump checkJSInt32 = branchPtr(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
1446 Jump checkJSNumber = branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
1447 checkJSInt32.link(this);
1449 checkJSNumber.link(this);
1452 void JITCompiler::jitAssertIsCell(GPRReg gpr)
1454 Jump checkCell = branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
1456 checkCell.link(this);
1460 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86_64) // Or any other 64-bit platform!
1461 void JITCompiler::emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, uint32_t increment)
1463 jit.addPtr(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
1467 #if ENABLE(SAMPLING_COUNTERS) && CPU(X86) // Or any other little-endian 32-bit platform!
1468 void JITCompiler::emitCount(MacroAsembler& jit, AbstractSamplingCounter& counter, uint32_t increment)
1470 intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
1471 jit.add32(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
1472 jit.addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
1476 #if ENABLE(SAMPLING_FLAGS)
1477 void JITCompiler::setSamplingFlag(int32_t flag)
1481 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
1484 void JITCompiler::clearSamplingFlag(int32_t flag)
1488 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
1492 } } // namespace JSC::DFG