2 * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved.
3 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
15 * its contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include "CompactJITCodeMap.h"
34 #include "EvalCodeCache.h"
35 #include "Instruction.h"
37 #include "JITWriteBarrier.h"
38 #include "JSGlobalObject.h"
39 #include "JumpTable.h"
41 #include "PredictionTracker.h"
42 #include "RegExpObject.h"
44 #include "WeakReferenceHarvester.h"
45 #include "ValueProfile.h"
46 #include <wtf/FastAllocBase.h>
47 #include <wtf/PassOwnPtr.h>
48 #include <wtf/RefPtr.h>
49 #include <wtf/SegmentedVector.h>
50 #include <wtf/SentinelLinkedList.h>
51 #include <wtf/Vector.h>
54 #include "StructureStubInfo.h"
57 // Register numbers used in bytecode operations have different meaning according to their ranges:
58 // 0x80000000-0xFFFFFFFF Negative indices from the CallFrame pointer are entries in the call frame, see RegisterFile.h.
59 // 0x00000000-0x3FFFFFFF Forwards indices from the CallFrame pointer are local vars and temporaries with the function's callframe.
60 // 0x40000000-0x7FFFFFFF Positive indices from 0x40000000 specify entries in the constant pool on the CodeBlock.
61 static const int FirstConstantRegisterIndex = 0x40000000;
65 enum HasSeenShouldRepatch {
71 enum CodeType { GlobalCode, EvalCode, FunctionCode };
73 inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
75 static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
83 CodeLocationLabel nativeCode;
87 struct ExpressionRangeInfo {
89 MaxOffset = (1 << 7) - 1,
90 MaxDivot = (1 << 25) - 1
92 uint32_t instructionOffset : 25;
93 uint32_t divotPoint : 25;
94 uint32_t startOffset : 7;
95 uint32_t endOffset : 7;
99 uint32_t instructionOffset;
104 struct CallLinkInfo: public BasicRawSentinelNode<CallLinkInfo> {
106 : hasSeenShouldRepatch(false)
118 CodeLocationLabel callReturnLocation; // it's a near call in the old JIT, or a normal call in DFG
119 CodeLocationDataLabelPtr hotPathBegin;
120 CodeLocationNearCall hotPathOther;
121 JITWriteBarrier<JSFunction> callee;
122 bool hasSeenShouldRepatch : 1;
126 bool isLinked() { return callee; }
127 void unlink(JSGlobalData&, RepatchBuffer&);
131 return hasSeenShouldRepatch;
136 hasSeenShouldRepatch = true;
140 struct MethodCallLinkInfo {
156 CodeLocationCall callReturnLocation;
157 JITWriteBarrier<Structure> cachedStructure;
158 JITWriteBarrier<Structure> cachedPrototypeStructure;
159 // We'd like this to actually be JSFunction, but InternalFunction and JSFunction
160 // don't have a common parent class and we allow specialisation on both
161 JITWriteBarrier<JSObject> cachedFunction;
162 JITWriteBarrier<JSObject> cachedPrototype;
166 struct GlobalResolveInfo {
167 GlobalResolveInfo(unsigned bytecodeOffset)
169 , bytecodeOffset(bytecodeOffset)
173 WriteBarrier<Structure> structure;
175 unsigned bytecodeOffset;
178 // This structure is used to map from a call return location
179 // (given as an offset in bytes into the JIT code) back to
180 // the bytecode index of the corresponding bytecode operation.
181 // This is then used to look up the corresponding handler.
182 struct CallReturnOffsetToBytecodeOffset {
183 CallReturnOffsetToBytecodeOffset(unsigned callReturnOffset, unsigned bytecodeOffset)
184 : callReturnOffset(callReturnOffset)
185 , bytecodeOffset(bytecodeOffset)
189 unsigned callReturnOffset;
190 unsigned bytecodeOffset;
193 // valueAtPosition helpers for the binarySearch algorithm.
195 inline void* getStructureStubInfoReturnLocation(StructureStubInfo* structureStubInfo)
197 return structureStubInfo->callReturnLocation.executableAddress();
200 inline void* getCallLinkInfoReturnLocation(CallLinkInfo* callLinkInfo)
202 return callLinkInfo->callReturnLocation.executableAddress();
205 inline void* getMethodCallLinkInfoReturnLocation(MethodCallLinkInfo* methodCallLinkInfo)
207 return methodCallLinkInfo->callReturnLocation.executableAddress();
210 inline unsigned getCallReturnOffset(CallReturnOffsetToBytecodeOffset* pc)
212 return pc->callReturnOffset;
216 class CodeBlock: public WeakReferenceHarvester {
217 WTF_MAKE_FAST_ALLOCATED;
220 CodeBlock(ScriptExecutable* ownerExecutable, CodeType, JSGlobalObject*, PassRefPtr<SourceProvider>, unsigned sourceOffset, SymbolTable*, bool isConstructor, PassOwnPtr<CodeBlock> alternative);
222 WriteBarrier<JSGlobalObject> m_globalObject;
226 virtual ~CodeBlock();
228 CodeBlock* alternative() { return m_alternative.get(); }
229 PassOwnPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
231 void setPredictions(PassOwnPtr<PredictionTracker> predictions) { m_predictions = predictions; }
232 PredictionTracker* predictions() const { return m_predictions.get(); }
234 void visitAggregate(SlotVisitor&);
235 void visitWeakReferences(SlotVisitor&);
237 static void dumpStatistics();
239 #if !defined(NDEBUG) || ENABLE_OPCODE_SAMPLING
240 void dump(ExecState*) const;
241 void printStructures(const Instruction*) const;
242 void printStructure(const char* name, const Instruction*, int operand) const;
245 bool isStrictMode() const { return m_isStrictMode; }
247 inline bool isKnownNotImmediate(int index)
249 if (index == m_thisRegister && !m_isStrictMode)
252 if (isConstantRegisterIndex(index))
253 return getConstant(index).isCell();
258 ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
260 return index >= m_numVars;
263 HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
264 int lineNumberForBytecodeOffset(unsigned bytecodeOffset);
265 void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset);
269 StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
271 return *(binarySearch<StructureStubInfo, void*, getStructureStubInfoReturnLocation>(m_structureStubInfos.begin(), m_structureStubInfos.size(), returnAddress.value()));
274 CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
276 return *(binarySearch<CallLinkInfo, void*, getCallLinkInfoReturnLocation>(m_callLinkInfos.begin(), m_callLinkInfos.size(), returnAddress.value()));
279 MethodCallLinkInfo& getMethodCallLinkInfo(ReturnAddressPtr returnAddress)
281 return *(binarySearch<MethodCallLinkInfo, void*, getMethodCallLinkInfoReturnLocation>(m_methodCallLinkInfos.begin(), m_methodCallLinkInfos.size(), returnAddress.value()));
284 unsigned bytecodeOffset(ReturnAddressPtr returnAddress)
288 Vector<CallReturnOffsetToBytecodeOffset>& callIndices = m_rareData->m_callReturnIndexVector;
289 if (!callIndices.size())
291 return binarySearch<CallReturnOffsetToBytecodeOffset, unsigned, getCallReturnOffset>(callIndices.begin(), callIndices.size(), getJITCode().offsetOf(returnAddress.value()))->bytecodeOffset;
296 bool hasIncomingCalls() { return m_incomingCalls.begin() != m_incomingCalls.end(); }
298 void linkIncomingCall(CallLinkInfo* incoming)
300 m_incomingCalls.push(incoming);
303 void unlinkIncomingCalls();
306 #if ENABLE(TIERED_COMPILATION)
307 void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
309 m_jitCodeMap = jitCodeMap;
311 CompactJITCodeMap* jitCodeMap()
313 return m_jitCodeMap.get();
317 #if ENABLE(INTERPRETER)
318 unsigned bytecodeOffset(Instruction* returnAddress)
320 return static_cast<Instruction*>(returnAddress) - instructions().begin();
324 void setIsNumericCompareFunction(bool isNumericCompareFunction) { m_isNumericCompareFunction = isNumericCompareFunction; }
325 bool isNumericCompareFunction() { return m_isNumericCompareFunction; }
327 Vector<Instruction>& instructions() { return m_instructions; }
328 void discardBytecode() { m_instructions.clear(); }
331 unsigned instructionCount() { return m_instructionCount; }
332 void setInstructionCount(unsigned instructionCount) { m_instructionCount = instructionCount; }
336 void setJITCode(const JITCode& code, MacroAssemblerCodePtr codeWithArityCheck)
339 m_jitCodeWithArityCheck = codeWithArityCheck;
341 JITCode& getJITCode() { return m_jitCode; }
342 JITCode::JITType getJITType() { return m_jitCode.jitType(); }
343 ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
344 virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
345 virtual CodeBlock* replacement() = 0;
346 virtual bool canCompileWithDFG() = 0;
347 bool hasOptimizedReplacement()
349 ASSERT(getJITType() == JITCode::BaselineJIT);
350 bool result = replacement()->getJITType() > getJITType();
353 ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
355 ASSERT(replacement()->getJITType() == JITCode::BaselineJIT);
356 ASSERT(replacement() == this);
362 JITCode::JITType getJITType() { return JITCode::BaselineJIT; }
365 ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
367 void setGlobalData(JSGlobalData* globalData) { m_globalData = globalData; }
368 JSGlobalData* globalData() { return m_globalData; }
370 void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
371 int thisRegister() const { return m_thisRegister; }
373 void setNeedsFullScopeChain(bool needsFullScopeChain) { m_needsFullScopeChain = needsFullScopeChain; }
374 bool needsFullScopeChain() const { return m_needsFullScopeChain; }
375 void setUsesEval(bool usesEval) { m_usesEval = usesEval; }
376 bool usesEval() const { return m_usesEval; }
378 void setArgumentsRegister(int argumentsRegister)
380 ASSERT(argumentsRegister != -1);
381 m_argumentsRegister = argumentsRegister;
382 ASSERT(usesArguments());
384 int argumentsRegister()
386 ASSERT(usesArguments());
387 return m_argumentsRegister;
389 void setActivationRegister(int activationRegister)
391 m_activationRegister = activationRegister;
393 int activationRegister()
395 ASSERT(needsFullScopeChain());
396 return m_activationRegister;
398 bool usesArguments() const { return m_argumentsRegister != -1; }
400 CodeType codeType() const { return m_codeType; }
402 SourceProvider* source() const { return m_source.get(); }
403 unsigned sourceOffset() const { return m_sourceOffset; }
405 size_t numberOfJumpTargets() const { return m_jumpTargets.size(); }
406 void addJumpTarget(unsigned jumpTarget) { m_jumpTargets.append(jumpTarget); }
407 unsigned jumpTarget(int index) const { return m_jumpTargets[index]; }
408 unsigned lastJumpTarget() const { return m_jumpTargets.last(); }
410 void createActivation(CallFrame*);
412 void clearEvalCache();
414 #if ENABLE(INTERPRETER)
415 void addPropertyAccessInstruction(unsigned propertyAccessInstruction)
417 if (!m_globalData->canUseJIT())
418 m_propertyAccessInstructions.append(propertyAccessInstruction);
420 void addGlobalResolveInstruction(unsigned globalResolveInstruction)
422 if (!m_globalData->canUseJIT())
423 m_globalResolveInstructions.append(globalResolveInstruction);
425 bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
428 void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
429 size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
430 StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
432 void addGlobalResolveInfo(unsigned globalResolveInstruction)
434 if (m_globalData->canUseJIT())
435 m_globalResolveInfos.append(GlobalResolveInfo(globalResolveInstruction));
437 GlobalResolveInfo& globalResolveInfo(int index) { return m_globalResolveInfos[index]; }
438 bool hasGlobalResolveInfoAtBytecodeOffset(unsigned bytecodeOffset);
440 void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
441 size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
442 CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
444 void addMethodCallLinkInfos(unsigned n) { ASSERT(m_globalData->canUseJIT()); m_methodCallLinkInfos.grow(n); }
445 MethodCallLinkInfo& methodCallLinkInfo(int index) { return m_methodCallLinkInfos[index]; }
448 #if ENABLE(VALUE_PROFILER)
449 ValueProfile* addValueProfile(int bytecodeOffset)
451 m_valueProfiles.append(ValueProfile(bytecodeOffset));
452 return &m_valueProfiles.last();
454 unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
455 ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
456 ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
458 return WTF::genericBinarySearch<ValueProfile, int, getValueProfileBytecodeOffset>(m_valueProfiles, m_valueProfiles.size(), bytecodeOffset);
460 ValueProfile* valueProfileForArgument(int argumentIndex)
462 int index = argumentIndex - 1;
463 if (static_cast<unsigned>(index) >= m_valueProfiles.size())
465 ValueProfile* result = valueProfile(argumentIndex - 1);
466 if (result->m_bytecodeOffset != -1)
472 unsigned globalResolveInfoCount() const
475 if (m_globalData->canUseJIT())
476 return m_globalResolveInfos.size();
481 // Exception handling support
483 size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
484 void addExceptionHandler(const HandlerInfo& hanler) { createRareDataIfNecessary(); return m_rareData->m_exceptionHandlers.append(hanler); }
485 HandlerInfo& exceptionHandler(int index) { ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }
487 void addExpressionInfo(const ExpressionRangeInfo& expressionInfo)
489 createRareDataIfNecessary();
490 m_rareData->m_expressionInfo.append(expressionInfo);
493 void addLineInfo(unsigned bytecodeOffset, int lineNo)
495 createRareDataIfNecessary();
496 Vector<LineInfo>& lineInfo = m_rareData->m_lineInfo;
497 if (!lineInfo.size() || lineInfo.last().lineNumber != lineNo) {
498 LineInfo info = { bytecodeOffset, lineNo };
499 lineInfo.append(info);
503 bool hasExpressionInfo() { return m_rareData && m_rareData->m_expressionInfo.size(); }
504 bool hasLineInfo() { return m_rareData && m_rareData->m_lineInfo.size(); }
505 // We only generate exception handling info if the user is debugging
506 // (and may want line number info), or if the function contains exception handler.
507 bool needsCallReturnIndices()
510 (m_rareData->m_expressionInfo.size() || m_rareData->m_lineInfo.size() || m_rareData->m_exceptionHandlers.size());
514 Vector<CallReturnOffsetToBytecodeOffset>& callReturnIndexVector()
516 createRareDataIfNecessary();
517 return m_rareData->m_callReturnIndexVector;
523 size_t numberOfIdentifiers() const { return m_identifiers.size(); }
524 void addIdentifier(const Identifier& i) { return m_identifiers.append(i); }
525 Identifier& identifier(int index) { return m_identifiers[index]; }
527 size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
528 void addConstant(JSValue v)
530 m_constantRegisters.append(WriteBarrier<Unknown>());
531 m_constantRegisters.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), v);
533 WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
534 ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
535 ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
537 unsigned addFunctionDecl(FunctionExecutable* n)
539 unsigned size = m_functionDecls.size();
540 m_functionDecls.append(WriteBarrier<FunctionExecutable>());
541 m_functionDecls.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
544 FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
545 int numberOfFunctionDecls() { return m_functionDecls.size(); }
546 unsigned addFunctionExpr(FunctionExecutable* n)
548 unsigned size = m_functionExprs.size();
549 m_functionExprs.append(WriteBarrier<FunctionExecutable>());
550 m_functionExprs.last().set(m_globalObject->globalData(), m_ownerExecutable.get(), n);
553 FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
555 unsigned addRegExp(RegExp* r)
557 createRareDataIfNecessary();
558 unsigned size = m_rareData->m_regexps.size();
559 m_rareData->m_regexps.append(WriteBarrier<RegExp>(*m_globalData, ownerExecutable(), r));
562 unsigned numberOfRegExps() const
566 return m_rareData->m_regexps.size();
568 RegExp* regexp(int index) const { ASSERT(m_rareData); return m_rareData->m_regexps[index].get(); }
570 unsigned addConstantBuffer(unsigned length)
572 createRareDataIfNecessary();
573 unsigned size = m_rareData->m_constantBuffers.size();
574 m_rareData->m_constantBuffers.append(Vector<JSValue>(length));
578 JSValue* constantBuffer(unsigned index)
581 return m_rareData->m_constantBuffers[index].data();
584 JSGlobalObject* globalObject() { return m_globalObject.get(); }
588 size_t numberOfImmediateSwitchJumpTables() const { return m_rareData ? m_rareData->m_immediateSwitchJumpTables.size() : 0; }
589 SimpleJumpTable& addImmediateSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_immediateSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_immediateSwitchJumpTables.last(); }
590 SimpleJumpTable& immediateSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_immediateSwitchJumpTables[tableIndex]; }
592 size_t numberOfCharacterSwitchJumpTables() const { return m_rareData ? m_rareData->m_characterSwitchJumpTables.size() : 0; }
593 SimpleJumpTable& addCharacterSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_characterSwitchJumpTables.append(SimpleJumpTable()); return m_rareData->m_characterSwitchJumpTables.last(); }
594 SimpleJumpTable& characterSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_characterSwitchJumpTables[tableIndex]; }
596 size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
597 StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
598 StringJumpTable& stringSwitchJumpTable(int tableIndex) { ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
601 SymbolTable* symbolTable() { return m_symbolTable; }
602 SharedSymbolTable* sharedSymbolTable() { ASSERT(m_codeType == FunctionCode); return static_cast<SharedSymbolTable*>(m_symbolTable); }
604 EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
608 void copyDataFromAlternative();
610 // Functions for controlling when tiered compilation kicks in. This
611 // controls both when the optimizing compiler is invoked and when OSR
612 // entry happens. Two triggers exist: the loop trigger and the return
613 // trigger. In either case, when an addition to m_executeCounter
614 // causes it to become non-negative, the optimizing compiler is
615 // invoked. This includes a fast check to see if this CodeBlock has
616 // already been optimized (i.e. replacement() returns a CodeBlock
617 // that was optimized with a higher tier JIT than this one). In the
618 // case of the loop trigger, if the optimized compilation succeeds
619 // (or has already succeeded in the past) then OSR is attempted to
620 // redirect program flow into the optimized code.
622 // These functions are called from within the optimization triggers,
623 // and are used as a single point at which we define the heuristics
624 // for how much warm-up is mandated before the next optimization
625 // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
626 // as this is called from the CodeBlock constructor.
628 // These functions are provided to support calling
629 // optimizeAfterWarmUp() from JIT-generated code.
630 int32_t counterValueForOptimizeAfterWarmUp()
635 int32_t* addressOfExecuteCounter()
637 return &m_executeCounter;
640 // Call this to force the next optimization trigger to fire. This is
641 // rarely wise, since optimization triggers are typically more
642 // expensive than executing baseline code.
643 void optimizeNextInvocation()
645 m_executeCounter = 0;
648 // Call this to prevent optimization from happening again. Note that
649 // optimization will still happen after roughly 2^29 invocations,
650 // so this is really meant to delay that as much as possible. This
651 // is called if optimization failed, and we expect it to fail in
652 // the future as well.
653 void dontOptimizeAnytimeSoon()
655 m_executeCounter = std::numeric_limits<int32_t>::min();
658 // Call this to reinitialize the counter to its starting state,
659 // forcing a warm-up to happen before the next optimization trigger
660 // fires. This is called in the CodeBlock constructor. It also
661 // makes sense to call this if an OSR exit occurred. Note that
662 // OSR exit code is code generated, so the value of the execute
663 // counter that this corresponds to is also available directly.
664 void optimizeAfterWarmUp()
666 m_executeCounter = counterValueForOptimizeAfterWarmUp();
669 // Call this to cause an optimization trigger to fire soon, but
670 // not necessarily the next one. This makes sense if optimization
671 // succeeds. Successfuly optimization means that all calls are
672 // relinked to the optimized code, so this only affects call
673 // frames that are still executing this CodeBlock. The value here
674 // is tuned to strike a balance between the cost of OSR entry
675 // (which is too high to warrant making every loop back edge to
676 // trigger OSR immediately) and the cost of executing baseline
677 // code (which is high enough that we don't necessarily want to
678 // have a full warm-up). The intuition for calling this instead of
679 // optimizeNextInvocation() is for the case of recursive functions
680 // with loops. Consider that there may be N call frames of some
681 // recursive function, for a reasonably large value of N. The top
682 // one triggers optimization, and then returns, and then all of
683 // the others return. We don't want optimization to be triggered on
684 // each return, as that would be superfluous. It only makes sense
685 // to trigger optimization if one of those functions becomes hot
686 // in the baseline code.
689 m_executeCounter = -100;
692 // The amount by which the JIT will increment m_executeCounter.
693 static const unsigned executeCounterIncrementForLoop = 1;
694 static const unsigned executeCounterIncrementForReturn = 15;
696 #if ENABLE(VALUE_PROFILER)
697 bool shouldOptimizeNow();
699 bool shouldOptimizeNow() { return false; }
702 #if ENABLE(VERBOSE_VALUE_PROFILE)
703 void dumpValueProfiles();
706 // FIXME: Make these remaining members private.
708 int m_numCalleeRegisters;
710 int m_numCapturedVars;
712 bool m_isConstructor;
715 #if !defined(NDEBUG) || ENABLE(OPCODE_SAMPLING)
716 void dump(ExecState*, const Vector<Instruction>::const_iterator& begin, Vector<Instruction>::const_iterator&) const;
718 CString registerName(ExecState*, int r) const;
719 void printUnaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
720 void printBinaryOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
721 void printConditionalJump(ExecState*, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator&, int location, const char* op) const;
722 void printGetByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
723 void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
725 void visitStructures(SlotVisitor&, Instruction* vPC) const;
727 void createRareDataIfNecessary()
730 m_rareData = adoptPtr(new RareData);
733 WriteBarrier<ScriptExecutable> m_ownerExecutable;
734 JSGlobalData* m_globalData;
736 Vector<Instruction> m_instructions;
738 unsigned m_instructionCount;
742 int m_argumentsRegister;
743 int m_activationRegister;
745 bool m_needsFullScopeChain;
747 bool m_isNumericCompareFunction;
752 RefPtr<SourceProvider> m_source;
753 unsigned m_sourceOffset;
755 #if ENABLE(INTERPRETER)
756 Vector<unsigned> m_propertyAccessInstructions;
757 Vector<unsigned> m_globalResolveInstructions;
760 Vector<StructureStubInfo> m_structureStubInfos;
761 Vector<GlobalResolveInfo> m_globalResolveInfos;
762 Vector<CallLinkInfo> m_callLinkInfos;
763 Vector<MethodCallLinkInfo> m_methodCallLinkInfos;
765 MacroAssemblerCodePtr m_jitCodeWithArityCheck;
766 SentinelLinkedList<CallLinkInfo, BasicRawSentinelNode<CallLinkInfo> > m_incomingCalls;
768 #if ENABLE(TIERED_COMPILATION)
769 OwnPtr<CompactJITCodeMap> m_jitCodeMap;
771 #if ENABLE(VALUE_PROFILER)
772 SegmentedVector<ValueProfile, 8> m_valueProfiles;
775 Vector<unsigned> m_jumpTargets;
776 Vector<unsigned> m_loopTargets;
779 Vector<Identifier> m_identifiers;
780 COMPILE_ASSERT(sizeof(Register) == sizeof(WriteBarrier<Unknown>), Register_must_be_same_size_as_WriteBarrier_Unknown);
781 Vector<WriteBarrier<Unknown> > m_constantRegisters;
782 Vector<WriteBarrier<FunctionExecutable> > m_functionDecls;
783 Vector<WriteBarrier<FunctionExecutable> > m_functionExprs;
785 SymbolTable* m_symbolTable;
787 OwnPtr<CodeBlock> m_alternative;
789 OwnPtr<PredictionTracker> m_predictions;
791 int32_t m_executeCounter;
792 uint8_t m_optimizationDelayCounter;
795 WTF_MAKE_FAST_ALLOCATED;
797 Vector<HandlerInfo> m_exceptionHandlers;
800 Vector<WriteBarrier<RegExp> > m_regexps;
802 // Buffers used for large array literals
803 Vector<Vector<JSValue> > m_constantBuffers;
806 Vector<SimpleJumpTable> m_immediateSwitchJumpTables;
807 Vector<SimpleJumpTable> m_characterSwitchJumpTables;
808 Vector<StringJumpTable> m_stringSwitchJumpTables;
810 EvalCodeCache m_evalCodeCache;
812 // Expression info - present if debugging.
813 Vector<ExpressionRangeInfo> m_expressionInfo;
814 // Line info - present if profiling or debugging.
815 Vector<LineInfo> m_lineInfo;
817 Vector<CallReturnOffsetToBytecodeOffset> m_callReturnIndexVector;
821 friend void WTF::deleteOwnedPtr<RareData>(RareData*);
823 OwnPtr<RareData> m_rareData;
826 // Program code is not marked by any function, so we make the global object
827 // responsible for marking it.
829 class GlobalCodeBlock : public CodeBlock {
831 GlobalCodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, PassOwnPtr<CodeBlock> alternative)
832 : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, &m_unsharedSymbolTable, false, alternative)
837 SymbolTable m_unsharedSymbolTable;
840 class ProgramCodeBlock : public GlobalCodeBlock {
842 ProgramCodeBlock(ProgramExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, PassOwnPtr<CodeBlock> alternative)
843 : GlobalCodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, 0, alternative)
849 virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
850 virtual CodeBlock* replacement();
851 virtual bool canCompileWithDFG();
855 class EvalCodeBlock : public GlobalCodeBlock {
857 EvalCodeBlock(EvalExecutable* ownerExecutable, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, int baseScopeDepth, PassOwnPtr<CodeBlock> alternative)
858 : GlobalCodeBlock(ownerExecutable, EvalCode, globalObject, sourceProvider, 0, alternative)
859 , m_baseScopeDepth(baseScopeDepth)
863 int baseScopeDepth() const { return m_baseScopeDepth; }
865 const Identifier& variable(unsigned index) { return m_variables[index]; }
866 unsigned numVariables() { return m_variables.size(); }
867 void adoptVariables(Vector<Identifier>& variables)
869 ASSERT(m_variables.isEmpty());
870 m_variables.swap(variables);
875 virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
876 virtual CodeBlock* replacement();
877 virtual bool canCompileWithDFG();
881 int m_baseScopeDepth;
882 Vector<Identifier> m_variables;
885 class FunctionCodeBlock : public CodeBlock {
887 // Rather than using the usual RefCounted::create idiom for SharedSymbolTable we just use new
888 // as we need to initialise the CodeBlock before we could initialise any RefPtr to hold the shared
889 // symbol table, so we just pass as a raw pointer with a ref count of 1. We then manually deref
890 // in the destructor.
891 FunctionCodeBlock(FunctionExecutable* ownerExecutable, CodeType codeType, JSGlobalObject* globalObject, PassRefPtr<SourceProvider> sourceProvider, unsigned sourceOffset, bool isConstructor, PassOwnPtr<CodeBlock> alternative)
892 : CodeBlock(ownerExecutable, codeType, globalObject, sourceProvider, sourceOffset, SharedSymbolTable::create().leakRef(), isConstructor, alternative)
897 sharedSymbolTable()->deref();
902 virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
903 virtual CodeBlock* replacement();
904 virtual bool canCompileWithDFG();
908 inline Register& ExecState::r(int index)
910 CodeBlock* codeBlock = this->codeBlock();
911 if (codeBlock->isConstantRegisterIndex(index))
912 return *reinterpret_cast<Register*>(&codeBlock->constantRegister(index));
916 inline Register& ExecState::uncheckedR(int index)
918 ASSERT(index < FirstConstantRegisterIndex);
924 #endif // CodeBlock_h