2 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 // This probably does not belong here; adding here for now as a quick Windows build fix.
32 #if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
33 #include "MacroAssembler.h"
34 JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
37 #include "CodeBlock.h"
38 #include "CryptographicallyRandomNumber.h"
39 #include "DFGNode.h" // for DFG_SUCCESS_STATS
40 #include "Interpreter.h"
41 #include "JITInlineMethods.h"
42 #include "JITStubCall.h"
44 #include "JSFunction.h"
45 #include "LinkBuffer.h"
46 #include "RepatchBuffer.h"
47 #include "ResultType.h"
48 #include "SamplingTool.h"
54 void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
56 RepatchBuffer repatchBuffer(codeblock);
57 repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
60 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
62 RepatchBuffer repatchBuffer(codeblock);
63 repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
66 void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
68 RepatchBuffer repatchBuffer(codeblock);
69 repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
72 JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
73 : m_interpreter(globalData->interpreter)
74 , m_globalData(globalData)
75 , m_codeBlock(codeBlock)
76 , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
77 , m_bytecodeOffset((unsigned)-1)
79 , m_jumpTargetIndex(0)
80 , m_mappedBytecodeOffset((unsigned)-1)
81 , m_mappedVirtualRegisterIndex((unsigned)-1)
82 , m_mappedTag((RegisterID)-1)
83 , m_mappedPayload((RegisterID)-1)
85 , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
86 , m_jumpTargetsPosition(0)
88 #if USE(OS_RANDOMNESS)
89 , m_randomGenerator(cryptographicallyRandomNumber())
91 , m_randomGenerator(static_cast<unsigned>(randomNumber() * 0xFFFFFFF))
96 #if ENABLE(TIERED_COMPILATION)
97 void JIT::emitOptimizationCheck(OptimizationCheckKind kind)
99 if (!shouldEmitProfiling())
102 Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? CodeBlock::executeCounterIncrementForLoop : CodeBlock::executeCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfExecuteCounter()));
103 JITStubCall stubCall(this, kind == LoopOptimizationCheck ? cti_optimize_from_loop : cti_optimize_from_ret);
104 if (kind == LoopOptimizationCheck)
105 stubCall.addArgument(Imm32(m_bytecodeOffset));
107 skipOptimize.link(this);
111 #if USE(JSVALUE32_64)
112 void JIT::emitTimeoutCheck()
114 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
115 JITStubCall stubCall(this, cti_timeout_check);
116 stubCall.addArgument(regT1, regT0); // save last result registers.
117 stubCall.call(timeoutCheckRegister);
118 stubCall.getArgument(0, regT1, regT0); // reload last result registers.
119 skipTimeout.link(this);
122 void JIT::emitTimeoutCheck()
124 Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
125 JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
126 skipTimeout.link(this);
128 killLastResultRegister();
132 #define NEXT_OPCODE(name) \
133 m_bytecodeOffset += OPCODE_LENGTH(name); \
136 #if USE(JSVALUE32_64)
137 #define DEFINE_BINARY_OP(name) \
139 JITStubCall stubCall(this, cti_##name); \
140 stubCall.addArgument(currentInstruction[2].u.operand); \
141 stubCall.addArgument(currentInstruction[3].u.operand); \
142 stubCall.call(currentInstruction[1].u.operand); \
146 #define DEFINE_UNARY_OP(name) \
148 JITStubCall stubCall(this, cti_##name); \
149 stubCall.addArgument(currentInstruction[2].u.operand); \
150 stubCall.call(currentInstruction[1].u.operand); \
154 #else // USE(JSVALUE32_64)
156 #define DEFINE_BINARY_OP(name) \
158 JITStubCall stubCall(this, cti_##name); \
159 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
160 stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
161 stubCall.call(currentInstruction[1].u.operand); \
165 #define DEFINE_UNARY_OP(name) \
167 JITStubCall stubCall(this, cti_##name); \
168 stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
169 stubCall.call(currentInstruction[1].u.operand); \
172 #endif // USE(JSVALUE32_64)
174 #define DEFINE_OP(name) \
176 emit_##name(currentInstruction); \
180 #define DEFINE_SLOWCASE_OP(name) \
182 emitSlow_##name(currentInstruction, iter); \
186 void JIT::privateCompileMainPass()
188 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
189 unsigned instructionCount = m_codeBlock->instructions().size();
191 m_globalResolveInfoIndex = 0;
192 m_callLinkInfoIndex = 0;
194 for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
195 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
196 ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
198 #if ENABLE(OPCODE_SAMPLING)
199 if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
200 sampleInstruction(currentInstruction);
205 killLastResultRegister();
208 m_labels[m_bytecodeOffset] = label();
210 #if ENABLE(TIERED_COMPILATION)
211 if (m_canBeOptimized)
212 m_jitCodeMapEncoder.append(m_bytecodeOffset, differenceBetween(m_startOfCode, label()));
215 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
216 DEFINE_BINARY_OP(op_del_by_val)
217 DEFINE_BINARY_OP(op_in)
218 DEFINE_BINARY_OP(op_less)
219 DEFINE_BINARY_OP(op_lesseq)
220 DEFINE_BINARY_OP(op_greater)
221 DEFINE_BINARY_OP(op_greatereq)
222 DEFINE_UNARY_OP(op_is_boolean)
223 DEFINE_UNARY_OP(op_is_function)
224 DEFINE_UNARY_OP(op_is_number)
225 DEFINE_UNARY_OP(op_is_object)
226 DEFINE_UNARY_OP(op_is_string)
227 DEFINE_UNARY_OP(op_is_undefined)
229 DEFINE_UNARY_OP(op_negate)
231 DEFINE_UNARY_OP(op_typeof)
239 DEFINE_OP(op_call_eval)
240 DEFINE_OP(op_call_varargs)
242 DEFINE_OP(op_construct)
243 DEFINE_OP(op_get_callee)
244 DEFINE_OP(op_create_this)
245 DEFINE_OP(op_convert_this)
246 DEFINE_OP(op_init_lazy_reg)
247 DEFINE_OP(op_create_arguments)
249 DEFINE_OP(op_del_by_id)
253 DEFINE_OP(op_create_activation)
255 DEFINE_OP(op_eq_null)
256 DEFINE_OP(op_get_by_id)
257 DEFINE_OP(op_get_arguments_length)
258 DEFINE_OP(op_get_by_val)
259 DEFINE_OP(op_get_argument_by_val)
260 DEFINE_OP(op_get_by_pname)
261 DEFINE_OP(op_get_global_var)
262 DEFINE_OP(op_get_pnames)
263 DEFINE_OP(op_get_scoped_var)
264 DEFINE_OP(op_check_has_instance)
265 DEFINE_OP(op_instanceof)
266 DEFINE_OP(op_jeq_null)
269 DEFINE_OP(op_jmp_scopes)
270 DEFINE_OP(op_jneq_null)
271 DEFINE_OP(op_jneq_ptr)
273 DEFINE_OP(op_jlesseq)
274 DEFINE_OP(op_jgreater)
275 DEFINE_OP(op_jgreatereq)
277 DEFINE_OP(op_jnlesseq)
278 DEFINE_OP(op_jngreater)
279 DEFINE_OP(op_jngreatereq)
282 DEFINE_OP(op_load_varargs)
284 DEFINE_OP(op_loop_hint)
285 DEFINE_OP(op_loop_if_less)
286 DEFINE_OP(op_loop_if_lesseq)
287 DEFINE_OP(op_loop_if_greater)
288 DEFINE_OP(op_loop_if_greatereq)
289 DEFINE_OP(op_loop_if_true)
290 DEFINE_OP(op_loop_if_false)
292 DEFINE_OP(op_method_check)
296 #if USE(JSVALUE32_64)
300 DEFINE_OP(op_neq_null)
301 DEFINE_OP(op_new_array)
302 DEFINE_OP(op_new_array_buffer)
303 DEFINE_OP(op_new_func)
304 DEFINE_OP(op_new_func_exp)
305 DEFINE_OP(op_new_object)
306 DEFINE_OP(op_new_regexp)
307 DEFINE_OP(op_next_pname)
309 DEFINE_OP(op_nstricteq)
310 DEFINE_OP(op_pop_scope)
311 DEFINE_OP(op_post_dec)
312 DEFINE_OP(op_post_inc)
313 DEFINE_OP(op_pre_dec)
314 DEFINE_OP(op_pre_inc)
315 DEFINE_OP(op_profile_did_call)
316 DEFINE_OP(op_profile_will_call)
317 DEFINE_OP(op_push_new_scope)
318 DEFINE_OP(op_push_scope)
319 DEFINE_OP(op_put_by_id)
320 DEFINE_OP(op_put_by_index)
321 DEFINE_OP(op_put_by_val)
322 DEFINE_OP(op_put_getter)
323 DEFINE_OP(op_put_global_var)
324 DEFINE_OP(op_put_scoped_var)
325 DEFINE_OP(op_put_setter)
326 DEFINE_OP(op_resolve)
327 DEFINE_OP(op_resolve_base)
328 DEFINE_OP(op_ensure_property_exists)
329 DEFINE_OP(op_resolve_global)
330 DEFINE_OP(op_resolve_global_dynamic)
331 DEFINE_OP(op_resolve_skip)
332 DEFINE_OP(op_resolve_with_base)
333 DEFINE_OP(op_resolve_with_this)
335 DEFINE_OP(op_call_put_result)
336 DEFINE_OP(op_ret_object_or_this)
338 DEFINE_OP(op_urshift)
341 DEFINE_OP(op_stricteq)
343 DEFINE_OP(op_switch_char)
344 DEFINE_OP(op_switch_imm)
345 DEFINE_OP(op_switch_string)
346 DEFINE_OP(op_tear_off_activation)
347 DEFINE_OP(op_tear_off_arguments)
349 DEFINE_OP(op_throw_reference_error)
350 DEFINE_OP(op_to_jsnumber)
351 DEFINE_OP(op_to_primitive)
353 case op_get_array_length:
354 case op_get_by_id_chain:
355 case op_get_by_id_generic:
356 case op_get_by_id_proto:
357 case op_get_by_id_self:
358 case op_get_by_id_getter_chain:
359 case op_get_by_id_getter_proto:
360 case op_get_by_id_getter_self:
361 case op_get_by_id_custom_chain:
362 case op_get_by_id_custom_proto:
363 case op_get_by_id_custom_self:
364 case op_get_string_length:
365 case op_put_by_id_generic:
366 case op_put_by_id_replace:
367 case op_put_by_id_transition:
368 ASSERT_NOT_REACHED();
372 ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
375 // Reset this, in order to guard its use with ASSERTs.
376 m_bytecodeOffset = (unsigned)-1;
380 void JIT::privateCompileLinkPass()
382 unsigned jmpTableCount = m_jmpTable.size();
383 for (unsigned i = 0; i < jmpTableCount; ++i)
384 m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
388 void JIT::privateCompileSlowCases()
390 Instruction* instructionsBegin = m_codeBlock->instructions().begin();
392 m_propertyAccessInstructionIndex = 0;
393 m_globalResolveInfoIndex = 0;
394 m_callLinkInfoIndex = 0;
396 #if !ASSERT_DISABLED && ENABLE(VALUE_PROFILER)
397 // Use this to assert that slow-path code associates new profiling sites with existing
398 // ValueProfiles rather than creating new ones. This ensures that for a given instruction
399 // (say, get_by_id) we get combined statistics for both the fast-path executions of that
400 // instructions and the slow-path executions. Furthermore, if the slow-path code created
401 // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
402 // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
403 unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
406 for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
408 killLastResultRegister();
411 m_bytecodeOffset = iter->to;
413 unsigned firstTo = m_bytecodeOffset;
415 Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
417 switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
418 DEFINE_SLOWCASE_OP(op_add)
419 DEFINE_SLOWCASE_OP(op_bitand)
420 DEFINE_SLOWCASE_OP(op_bitnot)
421 DEFINE_SLOWCASE_OP(op_bitor)
422 DEFINE_SLOWCASE_OP(op_bitxor)
423 DEFINE_SLOWCASE_OP(op_call)
424 DEFINE_SLOWCASE_OP(op_call_eval)
425 DEFINE_SLOWCASE_OP(op_call_varargs)
426 DEFINE_SLOWCASE_OP(op_construct)
427 DEFINE_SLOWCASE_OP(op_convert_this)
428 DEFINE_SLOWCASE_OP(op_create_this)
429 DEFINE_SLOWCASE_OP(op_div)
430 DEFINE_SLOWCASE_OP(op_eq)
431 DEFINE_SLOWCASE_OP(op_get_by_id)
432 DEFINE_SLOWCASE_OP(op_get_arguments_length)
433 DEFINE_SLOWCASE_OP(op_get_by_val)
434 DEFINE_SLOWCASE_OP(op_get_argument_by_val)
435 DEFINE_SLOWCASE_OP(op_get_by_pname)
436 DEFINE_SLOWCASE_OP(op_check_has_instance)
437 DEFINE_SLOWCASE_OP(op_instanceof)
438 DEFINE_SLOWCASE_OP(op_jfalse)
439 DEFINE_SLOWCASE_OP(op_jless)
440 DEFINE_SLOWCASE_OP(op_jlesseq)
441 DEFINE_SLOWCASE_OP(op_jgreater)
442 DEFINE_SLOWCASE_OP(op_jgreatereq)
443 DEFINE_SLOWCASE_OP(op_jnless)
444 DEFINE_SLOWCASE_OP(op_jnlesseq)
445 DEFINE_SLOWCASE_OP(op_jngreater)
446 DEFINE_SLOWCASE_OP(op_jngreatereq)
447 DEFINE_SLOWCASE_OP(op_jtrue)
448 DEFINE_SLOWCASE_OP(op_load_varargs)
449 DEFINE_SLOWCASE_OP(op_loop_if_less)
450 DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
451 DEFINE_SLOWCASE_OP(op_loop_if_greater)
452 DEFINE_SLOWCASE_OP(op_loop_if_greatereq)
453 DEFINE_SLOWCASE_OP(op_loop_if_true)
454 DEFINE_SLOWCASE_OP(op_loop_if_false)
455 DEFINE_SLOWCASE_OP(op_lshift)
456 DEFINE_SLOWCASE_OP(op_method_check)
457 DEFINE_SLOWCASE_OP(op_mod)
458 DEFINE_SLOWCASE_OP(op_mul)
459 #if USE(JSVALUE32_64)
460 DEFINE_SLOWCASE_OP(op_negate)
462 DEFINE_SLOWCASE_OP(op_neq)
463 DEFINE_SLOWCASE_OP(op_new_object)
464 DEFINE_SLOWCASE_OP(op_new_func)
465 DEFINE_SLOWCASE_OP(op_new_func_exp)
466 DEFINE_SLOWCASE_OP(op_not)
467 DEFINE_SLOWCASE_OP(op_nstricteq)
468 DEFINE_SLOWCASE_OP(op_post_dec)
469 DEFINE_SLOWCASE_OP(op_post_inc)
470 DEFINE_SLOWCASE_OP(op_pre_dec)
471 DEFINE_SLOWCASE_OP(op_pre_inc)
472 DEFINE_SLOWCASE_OP(op_put_by_id)
473 DEFINE_SLOWCASE_OP(op_put_by_val)
474 DEFINE_SLOWCASE_OP(op_resolve_global)
475 DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
476 DEFINE_SLOWCASE_OP(op_rshift)
477 DEFINE_SLOWCASE_OP(op_urshift)
478 DEFINE_SLOWCASE_OP(op_stricteq)
479 DEFINE_SLOWCASE_OP(op_sub)
480 DEFINE_SLOWCASE_OP(op_to_jsnumber)
481 DEFINE_SLOWCASE_OP(op_to_primitive)
483 ASSERT_NOT_REACHED();
486 ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
487 ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
489 emitJumpSlowToHot(jump(), 0);
492 ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
493 ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
494 #if ENABLE(VALUE_PROFILER)
495 ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
499 // Reset this, in order to guard its use with ASSERTs.
500 m_bytecodeOffset = (unsigned)-1;
504 JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
506 #if ENABLE(TIERED_COMPILATION)
507 m_canBeOptimized = m_codeBlock->canCompileWithDFG();
508 if (m_canBeOptimized)
509 m_startOfCode = label();
512 // Just add a little bit of randomness to the codegen
513 if (m_randomGenerator.getUint32() & 1)
516 // Could use a pop_m, but would need to offset the following instruction if so.
517 preserveReturnAddressAfterCall(regT2);
518 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
520 Label beginLabel(this);
522 sampleCodeBlock(m_codeBlock);
523 #if ENABLE(OPCODE_SAMPLING)
524 sampleInstruction(m_codeBlock->instructions().begin());
527 Jump registerFileCheck;
528 if (m_codeBlock->codeType() == FunctionCode) {
529 #if ENABLE(DFG_SUCCESS_STATS)
530 static SamplingCounter counter("orignalJIT");
534 #if ENABLE(VALUE_PROFILER)
535 ASSERT(m_bytecodeOffset == (unsigned)-1);
536 for (int argumentRegister = -RegisterFile::CallFrameHeaderSize - m_codeBlock->m_numParameters + 1; argumentRegister < -RegisterFile::CallFrameHeaderSize; ++argumentRegister) {
537 loadPtr(Address(callFrameRegister, argumentRegister * sizeof(Register)), regT0);
538 emitValueProfilingSite(FirstProfilingSite);
542 // In the case of a fast linked call, we do not set this up in the caller.
543 emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
545 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
546 registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
549 Label functionBody = label();
551 privateCompileMainPass();
552 privateCompileLinkPass();
553 privateCompileSlowCases();
556 if (m_codeBlock->codeType() == FunctionCode) {
557 registerFileCheck.link(this);
558 m_bytecodeOffset = 0;
559 JITStubCall(this, cti_register_file_check).call();
561 m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
565 arityCheck = label();
566 preserveReturnAddressAfterCall(regT2);
567 emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
568 branch32(Equal, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
569 restoreArgumentReference();
571 JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
576 ASSERT(m_jmpTable.isEmpty());
578 LinkBuffer patchBuffer(*m_globalData, this);
580 // Translate vPC offsets into addresses in JIT generated code, for switch tables.
581 for (unsigned i = 0; i < m_switches.size(); ++i) {
582 SwitchRecord record = m_switches[i];
583 unsigned bytecodeOffset = record.bytecodeOffset;
585 if (record.type != SwitchRecord::String) {
586 ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
587 ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
589 record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
591 for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
592 unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
593 record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
596 ASSERT(record.type == SwitchRecord::String);
598 record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
600 StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
601 for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
602 unsigned offset = it->second.branchOffset;
603 it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
608 for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
609 HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
610 handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
613 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
615 patchBuffer.link(iter->from, FunctionPtr(iter->to));
618 if (m_codeBlock->needsCallReturnIndices()) {
619 m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
620 for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
621 m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
624 // Link absolute addresses for jsr
625 for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
626 patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
628 m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
629 for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) {
630 StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
631 info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
632 info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
634 m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
635 for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
636 CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
637 info.isCall = m_callStructureStubCompilationInfo[i].isCall;
638 info.callReturnLocation = CodeLocationLabel(patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation));
639 info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
640 info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
642 unsigned methodCallCount = m_methodCallCompilationInfo.size();
643 m_codeBlock->addMethodCallLinkInfos(methodCallCount);
644 for (unsigned i = 0; i < methodCallCount; ++i) {
645 MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
646 info.cachedStructure.setLocation(patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare));
647 info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
650 #if ENABLE(TIERED_COMPILATION)
651 if (m_canBeOptimized)
652 m_codeBlock->setJITCodeMap(m_jitCodeMapEncoder.finish());
655 if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
656 *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
658 return JITCode(patchBuffer.finalizeCode(), JITCode::BaselineJIT);
661 void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData, CodeSpecializationKind kind)
663 RepatchBuffer repatchBuffer(callerCodeBlock);
665 // Currently we only link calls with the exact number of arguments.
666 // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
667 if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
668 ASSERT(!callLinkInfo->isLinked());
669 callLinkInfo->callee.set(*globalData, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
670 repatchBuffer.relink(callLinkInfo->hotPathOther, code);
673 calleeCodeBlock->linkIncomingCall(callLinkInfo);
676 // patch the call so we do not continue to try to link.
677 if (kind == CodeForCall) {
678 repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualCall());
682 ASSERT(kind == CodeForConstruct);
683 repatchBuffer.relink(CodeLocationNearCall(callLinkInfo->callReturnLocation), globalData->jitStubs->ctiVirtualConstruct());
688 #endif // ENABLE(JIT)