2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef JITInlineMethods_h
27 #define JITInlineMethods_h
34 /* Deprecated: Please use JITStubCall instead. */
36 ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
38 unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
39 peek(dst, argumentStackOffset);
42 ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
44 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
47 ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
49 ASSERT(m_codeBlock->isConstantRegisterIndex(src));
50 return m_codeBlock->getConstant(src);
53 ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
55 storePtr(from, payloadFor(entry, callFrameRegister));
58 ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
61 store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
63 storePtr(from, payloadFor(entry, callFrameRegister));
66 ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
68 store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
69 store32(from, intPayloadFor(entry, callFrameRegister));
72 ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
74 storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
77 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
79 loadPtr(Address(from, entry * sizeof(Register)), to);
81 killLastResultRegister();
85 ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
87 failures.append(branchPtr(NotEqual, Address(src), TrustedImmPtr(m_globalData->jsStringVPtr)));
88 failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
89 failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
90 loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
91 loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
92 load16(MacroAssembler::Address(dst, 0), dst);
95 ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
97 load32(Address(from, entry * sizeof(Register)), to);
99 killLastResultRegister();
103 ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
105 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
107 Call nakedCall = nearCall();
108 m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
112 ALWAYS_INLINE bool JIT::atJumpTarget()
114 while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
115 if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
117 ++m_jumpTargetsPosition;
122 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
124 ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
126 JSInterfaceJIT::beginUninterruptedSequence();
127 #if CPU(ARM_TRADITIONAL)
129 // Ensure the label after the sequence can also fit
130 insnSpace += sizeof(ARMWord);
131 constSpace += sizeof(uint64_t);
134 ensureSpace(insnSpace, constSpace);
138 insnSpace += sizeof(SH4Word);
139 constSpace += sizeof(uint64_t);
142 m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
145 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
147 m_uninterruptedInstructionSequenceBegin = label();
148 m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
153 ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
156 #if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
157 /* There are several cases when the uninterrupted sequence is larger than
158 * maximum required offset for pathing the same sequence. Eg.: if in a
159 * uninterrupted sequence the last macroassembler's instruction is a stub
160 * call, it emits store instruction(s) which should not be included in the
161 * calculation of length of uninterrupted sequence. So, the insnSpace and
162 * constSpace should be upper limit instead of hard limit.
165 if ((dst > 15) || (dst < -16)) {
170 if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
173 ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
174 ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
176 JSInterfaceJIT::endUninterruptedSequence();
183 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
185 move(linkRegister, reg);
188 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
190 move(reg, linkRegister);
193 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
195 loadPtr(address, linkRegister);
199 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
201 m_assembler.stspr(reg);
204 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
206 m_assembler.ldspr(reg);
209 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
211 loadPtrLinkReg(address);
216 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
218 move(returnAddressRegister, reg);
221 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
223 move(reg, returnAddressRegister);
226 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
228 loadPtr(address, returnAddressRegister);
231 #else // CPU(X86) || CPU(X86_64)
233 ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
238 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
243 ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
250 ALWAYS_INLINE void JIT::restoreArgumentReference()
252 move(stackPointerRegister, firstArgumentRegister);
253 poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
256 ALWAYS_INLINE void JIT::updateTopCallFrame()
258 storePtr(callFrameRegister, &m_globalData->topCallFrame);
261 ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
264 // Within a trampoline the return address will be on the stack at this point.
265 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
267 move(stackPointerRegister, firstArgumentRegister);
269 move(stackPointerRegister, firstArgumentRegister);
271 // In the trampoline on x86-64, the first argument register is not overwritten.
274 ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
276 return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
279 ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
281 if (!m_codeBlock->isKnownNotImmediate(vReg))
285 ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
287 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
289 m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
292 ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
294 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
296 const JumpList::JumpVector& jumpVector = jumpList.jumps();
297 size_t size = jumpVector.size();
298 for (size_t i = 0; i < size; ++i)
299 m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
302 ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
304 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
306 m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
309 ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
311 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
313 jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
316 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
318 return branch8(NotEqual, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
321 #if ENABLE(SAMPLING_FLAGS)
322 ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
326 or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
329 ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
333 and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
337 #if ENABLE(SAMPLING_COUNTERS)
338 ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
340 #if CPU(X86_64) // Or any other 64-bit plattform.
341 addPtr(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
342 #elif CPU(X86) // Or any other little-endian 32-bit plattform.
343 intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
344 add32(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
345 addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
347 #error "SAMPLING_FLAGS not implemented on this platform."
352 #if ENABLE(OPCODE_SAMPLING)
354 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
356 move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
357 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
360 ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
362 storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
367 #if ENABLE(CODEBLOCK_SAMPLING)
369 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
371 move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
372 storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
375 ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
377 storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
382 ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
384 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
387 template <typename ClassType, typename StructureType> inline void JIT::emitAllocateBasicJSObject(StructureType structure, void* vtable, RegisterID result, RegisterID storagePtr)
389 NewSpace::SizeClass* sizeClass = &m_globalData->heap.sizeClassFor(sizeof(ClassType));
390 loadPtr(&sizeClass->firstFreeCell, result);
391 addSlowCase(branchTestPtr(Zero, result));
393 // remove the object from the free list
394 loadPtr(Address(result), storagePtr);
395 storePtr(storagePtr, &sizeClass->firstFreeCell);
397 // initialize the object's vtable
398 storePtr(TrustedImmPtr(vtable), Address(result));
400 // initialize the object's structure
401 storePtr(structure, Address(result, JSCell::structureOffset()));
403 // initialize the inheritor ID
404 storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID()));
406 // initialize the object's property storage pointer
407 addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr);
408 storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage()));
411 template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch)
413 emitAllocateBasicJSObject<JSFinalObject>(structure, m_globalData->jsFinalObjectVPtr, result, scratch);
416 inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr)
418 emitAllocateBasicJSObject<JSFunction>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), m_globalData->jsFunctionVPtr, result, storagePtr);
420 // store the function's scope chain
421 storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain()));
423 // store the function's executable member
424 storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable()));
426 // store the function's name
427 ASSERT(executable->nameValue());
428 int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset();
429 storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
430 #if USE(JSVALUE32_64)
431 store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
435 #if ENABLE(VALUE_PROFILER)
436 inline void JIT::emitValueProfilingSite(ValueProfilingSiteKind siteKind)
438 if (!shouldEmitProfiling())
441 const RegisterID value = regT0;
442 const RegisterID scratch = regT3;
444 ValueProfile* valueProfile;
445 if (siteKind == FirstProfilingSite)
446 valueProfile = m_codeBlock->addValueProfile(m_bytecodeOffset);
448 ASSERT(siteKind == SubsequentProfilingSite);
449 valueProfile = m_codeBlock->valueProfileForBytecodeOffset(m_bytecodeOffset);
452 ASSERT(valueProfile);
454 if (m_randomGenerator.getUint32() & 1)
455 add32(Imm32(1), bucketCounterRegister);
457 add32(Imm32(3), bucketCounterRegister);
458 and32(Imm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
459 move(ImmPtr(valueProfile->m_buckets), scratch);
460 storePtr(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
464 #if USE(JSVALUE32_64)
466 inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
468 RegisterID mappedTag;
469 if (getMappedTag(index, mappedTag)) {
470 move(mappedTag, tag);
475 if (m_codeBlock->isConstantRegisterIndex(index)) {
476 move(Imm32(getConstantOperand(index).tag()), tag);
481 load32(tagFor(index), tag);
485 inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
487 RegisterID mappedPayload;
488 if (getMappedPayload(index, mappedPayload)) {
489 move(mappedPayload, payload);
494 if (m_codeBlock->isConstantRegisterIndex(index)) {
495 move(Imm32(getConstantOperand(index).payload()), payload);
500 load32(payloadFor(index), payload);
504 inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
506 move(Imm32(v.payload()), payload);
507 move(Imm32(v.tag()), tag);
510 inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
512 ASSERT(tag != payload);
514 if (base == callFrameRegister) {
515 ASSERT(payload != base);
516 emitLoadPayload(index, payload);
517 emitLoadTag(index, tag);
521 if (payload == base) { // avoid stomping base
522 load32(tagFor(index, base), tag);
523 load32(payloadFor(index, base), payload);
527 load32(payloadFor(index, base), payload);
528 load32(tagFor(index, base), tag);
531 inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
533 if (isMapped(index1)) {
534 emitLoad(index1, tag1, payload1);
535 emitLoad(index2, tag2, payload2);
538 emitLoad(index2, tag2, payload2);
539 emitLoad(index1, tag1, payload1);
542 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
544 if (m_codeBlock->isConstantRegisterIndex(index)) {
545 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
546 loadDouble(&inConstantPool, value);
548 loadDouble(addressFor(index), value);
551 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
553 if (m_codeBlock->isConstantRegisterIndex(index)) {
554 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
555 char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
556 convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
558 convertInt32ToDouble(payloadFor(index), value);
561 inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
563 store32(payload, payloadFor(index, base));
564 store32(tag, tagFor(index, base));
567 inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
569 store32(payload, payloadFor(index, callFrameRegister));
571 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
574 inline void JIT::emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32)
576 store32(payload, payloadFor(index, callFrameRegister));
578 store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
581 inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
583 store32(payload, payloadFor(index, callFrameRegister));
585 store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
588 inline void JIT::emitStoreBool(unsigned index, RegisterID payload, bool indexIsBool)
590 store32(payload, payloadFor(index, callFrameRegister));
592 store32(TrustedImm32(JSValue::BooleanTag), tagFor(index, callFrameRegister));
595 inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
597 storeDouble(value, addressFor(index));
600 inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
602 store32(Imm32(constant.payload()), payloadFor(index, base));
603 store32(Imm32(constant.tag()), tagFor(index, base));
606 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
608 emitStore(dst, jsUndefined());
611 inline bool JIT::isLabeled(unsigned bytecodeOffset)
613 for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
614 unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
615 if (jumpTarget == bytecodeOffset)
617 if (jumpTarget > bytecodeOffset)
623 inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
625 if (isLabeled(bytecodeOffset))
628 m_mappedBytecodeOffset = bytecodeOffset;
629 m_mappedVirtualRegisterIndex = virtualRegisterIndex;
631 m_mappedPayload = payload;
634 inline void JIT::unmap(RegisterID registerID)
636 if (m_mappedTag == registerID)
637 m_mappedTag = (RegisterID)-1;
638 else if (m_mappedPayload == registerID)
639 m_mappedPayload = (RegisterID)-1;
642 inline void JIT::unmap()
644 m_mappedBytecodeOffset = (unsigned)-1;
645 m_mappedVirtualRegisterIndex = (unsigned)-1;
646 m_mappedTag = (RegisterID)-1;
647 m_mappedPayload = (RegisterID)-1;
650 inline bool JIT::isMapped(unsigned virtualRegisterIndex)
652 if (m_mappedBytecodeOffset != m_bytecodeOffset)
654 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
659 inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
661 if (m_mappedBytecodeOffset != m_bytecodeOffset)
663 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
665 if (m_mappedPayload == (RegisterID)-1)
667 payload = m_mappedPayload;
671 inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
673 if (m_mappedBytecodeOffset != m_bytecodeOffset)
675 if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
677 if (m_mappedTag == (RegisterID)-1)
683 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
685 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
686 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
689 addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
693 inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
695 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
696 if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
699 addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
703 inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
705 if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
709 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
711 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
714 ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
716 if (isOperandConstantImmediateInt(op1)) {
717 constant = getConstantOperand(op1).asInt32();
722 if (isOperandConstantImmediateInt(op2)) {
723 constant = getConstantOperand(op2).asInt32();
731 #else // USE(JSVALUE32_64)
733 ALWAYS_INLINE void JIT::killLastResultRegister()
735 m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
738 // get arg puts an arg from the SF register array into a h/w register
739 ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
741 ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
743 // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
744 if (m_codeBlock->isConstantRegisterIndex(src)) {
745 JSValue value = m_codeBlock->getConstant(src);
746 move(ImmPtr(JSValue::encode(value)), dst);
747 killLastResultRegister();
751 if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
752 // The argument we want is already stored in eax
753 if (dst != cachedResultRegister)
754 move(cachedResultRegister, dst);
755 killLastResultRegister();
759 loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
760 killLastResultRegister();
763 ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
765 if (src2 == m_lastResultBytecodeRegister) {
766 emitGetVirtualRegister(src2, dst2);
767 emitGetVirtualRegister(src1, dst1);
769 emitGetVirtualRegister(src1, dst1);
770 emitGetVirtualRegister(src2, dst2);
774 ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
776 return getConstantOperand(src).asInt32();
779 ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
781 return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
784 ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
786 storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
787 m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
790 ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
792 storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
795 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
798 return branchTestPtr(Zero, reg, tagMaskRegister);
800 return branchTest32(Zero, reg, TrustedImm32(TagMask));
804 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
807 orPtr(reg2, scratch);
808 return emitJumpIfJSCell(scratch);
811 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
813 addSlowCase(emitJumpIfJSCell(reg));
816 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
819 return branchTestPtr(NonZero, reg, tagMaskRegister);
821 return branchTest32(NonZero, reg, TrustedImm32(TagMask));
825 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
827 addSlowCase(emitJumpIfNotJSCell(reg));
830 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
832 if (!m_codeBlock->isKnownNotImmediate(vReg))
833 emitJumpSlowCaseIfNotJSCell(reg);
838 inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
840 if (m_codeBlock->isConstantRegisterIndex(index)) {
841 WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
842 loadDouble(&inConstantPool, value);
844 loadDouble(addressFor(index), value);
847 inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
849 if (m_codeBlock->isConstantRegisterIndex(index)) {
850 ASSERT(isOperandConstantImmediateInt(index));
851 convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
853 convertInt32ToDouble(addressFor(index), value);
857 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
860 return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
862 return branchTest32(NonZero, reg, TrustedImm32(TagTypeNumber));
866 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
869 return branchPtr(Below, reg, tagTypeNumberRegister);
871 return branchTest32(Zero, reg, TrustedImm32(TagTypeNumber));
875 ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
878 andPtr(reg2, scratch);
879 return emitJumpIfNotImmediateInteger(scratch);
882 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
884 addSlowCase(emitJumpIfNotImmediateInteger(reg));
887 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
889 addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
892 ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
894 addSlowCase(emitJumpIfNotImmediateNumber(reg));
897 #if USE(JSVALUE32_64)
898 ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
900 subPtr(TrustedImm32(TagTypeNumber), reg);
903 ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
905 return branchSubPtr(Zero, TrustedImm32(TagTypeNumber), reg);
909 ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
912 emitFastArithIntToImmNoCheck(src, dest);
916 addPtr(TrustedImm32(TagTypeNumber), dest);
920 // operand is int32_t, must have been zero-extended if register is 64-bit.
921 ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
926 orPtr(tagTypeNumberRegister, dest);
928 signExtend32ToPtr(src, dest);
930 emitFastArithReTagImmediate(dest, dest);
934 ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
936 or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
939 #endif // USE(JSVALUE32_64)
943 #endif // ENABLE(JIT)