45594c57e8efc54275c09b0d3a529a71f66dafde
[vuplus_webkit] / Source / JavaScriptCore / jit / JITPropertyAccess32_64.cpp
1 /*
2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43
44 #ifndef NDEBUG
45 #include <stdio.h>
46 #endif
47
48 using namespace std;
49
50 namespace JSC {
51     
52 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
53 {
54     unsigned base = currentInstruction[1].u.operand;
55     unsigned property = currentInstruction[2].u.operand;
56     unsigned value = currentInstruction[3].u.operand;
57     
58     JITStubCall stubCall(this, cti_op_put_by_index);
59     stubCall.addArgument(base);
60     stubCall.addArgument(Imm32(property));
61     stubCall.addArgument(value);
62     stubCall.call();
63 }
64
65 void JIT::emit_op_put_getter(Instruction* currentInstruction)
66 {
67     unsigned base = currentInstruction[1].u.operand;
68     unsigned property = currentInstruction[2].u.operand;
69     unsigned function = currentInstruction[3].u.operand;
70     
71     JITStubCall stubCall(this, cti_op_put_getter);
72     stubCall.addArgument(base);
73     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
74     stubCall.addArgument(function);
75     stubCall.call();
76 }
77
78 void JIT::emit_op_put_setter(Instruction* currentInstruction)
79 {
80     unsigned base = currentInstruction[1].u.operand;
81     unsigned property = currentInstruction[2].u.operand;
82     unsigned function = currentInstruction[3].u.operand;
83     
84     JITStubCall stubCall(this, cti_op_put_setter);
85     stubCall.addArgument(base);
86     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
87     stubCall.addArgument(function);
88     stubCall.call();
89 }
90
91 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
92 {
93     unsigned dst = currentInstruction[1].u.operand;
94     unsigned base = currentInstruction[2].u.operand;
95     unsigned property = currentInstruction[3].u.operand;
96     
97     JITStubCall stubCall(this, cti_op_del_by_id);
98     stubCall.addArgument(base);
99     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
100     stubCall.call(dst);
101 }
102
103 void JIT::emit_op_method_check(Instruction* currentInstruction)
104 {
105     // Assert that the following instruction is a get_by_id.
106     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
107     
108     currentInstruction += OPCODE_LENGTH(op_method_check);
109     
110     // Do the method check - check the object & its prototype's structure inline (this is the common case).
111     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
112     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
113     
114     int dst = currentInstruction[1].u.operand;
115     int base = currentInstruction[2].u.operand;
116     
117     emitLoad(base, regT1, regT0);
118     emitJumpSlowCaseIfNotJSCell(base, regT1);
119     
120     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
121     
122     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
123     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
124     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
125     
126     // This will be relinked to load the function without doing a load.
127     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
128     
129     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
130     
131     move(TrustedImm32(JSValue::CellTag), regT1);
132     Jump match = jump();
133     
134     ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
135     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
136     ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
137     
138     // Link the failure cases here.
139     structureCheck.link(this);
140     protoStructureCheck.link(this);
141     
142     // Do a regular(ish) get_by_id (the slow case will be link to
143     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
144     compileGetByIdHotPath();
145     
146     match.link(this);
147     emitStore(dst, regT1, regT0);
148     map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
149     
150     // We've already generated the following get_by_id, so make sure it's skipped over.
151     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
152 }
153
154 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
155 {
156     currentInstruction += OPCODE_LENGTH(op_method_check);
157     
158     int dst = currentInstruction[1].u.operand;
159     int base = currentInstruction[2].u.operand;
160     int ident = currentInstruction[3].u.operand;
161     
162     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
163     
164     // We've already generated the following get_by_id, so make sure it's skipped over.
165     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
166 }
167
168 JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
169 {
170     JSInterfaceJIT jit;
171     JumpList failures;
172     failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
173     failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
174     
175     // Load string length to regT1, and start the process of loading the data pointer into regT0
176     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
177     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
178     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
179     
180     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
181     failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
182     
183     // Load the character
184     jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
185     
186     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
187     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
188     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
189     jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
190     jit.ret();
191
192     failures.link(&jit);
193     jit.move(TrustedImm32(0), regT0);
194     jit.ret();
195     
196     LinkBuffer patchBuffer(*globalData, &jit);
197     return patchBuffer.finalizeCode();
198 }
199
200 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
201 {
202     unsigned dst = currentInstruction[1].u.operand;
203     unsigned base = currentInstruction[2].u.operand;
204     unsigned property = currentInstruction[3].u.operand;
205     
206     emitLoad2(base, regT1, regT0, property, regT3, regT2);
207     
208     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
209     emitJumpSlowCaseIfNotJSCell(base, regT1);
210     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
211     
212     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
213     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
214     
215     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
216     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
217     addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
218     
219     emitStore(dst, regT1, regT0);
220     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
221 }
222
223 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
224 {
225     unsigned dst = currentInstruction[1].u.operand;
226     unsigned base = currentInstruction[2].u.operand;
227     unsigned property = currentInstruction[3].u.operand;
228     
229     linkSlowCase(iter); // property int32 check
230     linkSlowCaseIfNotJSCell(iter, base); // base cell check
231
232     Jump nonCell = jump();
233     linkSlowCase(iter); // base array check
234     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
235     emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator).code());
236     Jump failed = branchTestPtr(Zero, regT0);
237     emitStore(dst, regT1, regT0);
238     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
239     failed.link(this);
240     notString.link(this);
241     nonCell.link(this);
242
243     linkSlowCase(iter); // vector length check
244     linkSlowCase(iter); // empty value
245     
246     JITStubCall stubCall(this, cti_op_get_by_val);
247     stubCall.addArgument(base);
248     stubCall.addArgument(property);
249     stubCall.call(dst);
250 }
251
252 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
253 {
254     unsigned base = currentInstruction[1].u.operand;
255     unsigned property = currentInstruction[2].u.operand;
256     unsigned value = currentInstruction[3].u.operand;
257     
258     emitLoad2(base, regT1, regT0, property, regT3, regT2);
259     
260     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
261     emitJumpSlowCaseIfNotJSCell(base, regT1);
262     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
263     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
264     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
265     
266     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
267     
268     Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
269     
270     Label storeResult(this);
271     emitLoad(value, regT1, regT0);
272     store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
273     store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
274     Jump end = jump();
275     
276     empty.link(this);
277     add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
278     branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
279     
280     add32(TrustedImm32(1), regT2, regT0);
281     store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
282     jump().linkTo(storeResult, this);
283     
284     end.link(this);
285 }
286
287 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
288 {
289     unsigned base = currentInstruction[1].u.operand;
290     unsigned property = currentInstruction[2].u.operand;
291     unsigned value = currentInstruction[3].u.operand;
292     
293     linkSlowCase(iter); // property int32 check
294     linkSlowCaseIfNotJSCell(iter, base); // base cell check
295     linkSlowCase(iter); // base not array check
296     linkSlowCase(iter); // in vector check
297     
298     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
299     stubPutByValCall.addArgument(base);
300     stubPutByValCall.addArgument(property);
301     stubPutByValCall.addArgument(value);
302     stubPutByValCall.call();
303 }
304
305 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
306 {
307     int dst = currentInstruction[1].u.operand;
308     int base = currentInstruction[2].u.operand;
309     
310     emitLoad(base, regT1, regT0);
311     emitJumpSlowCaseIfNotJSCell(base, regT1);
312     compileGetByIdHotPath();
313     emitStore(dst, regT1, regT0);
314     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
315 }
316
317 void JIT::compileGetByIdHotPath()
318 {
319     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
320     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
321     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
322     // to jump back to if one of these trampolies finds a match.
323     
324     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
325     
326     Label hotPathBegin(this);
327     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
328     m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
329     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
330     
331     DataLabelPtr structureToCompare;
332     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
333     addSlowCase(structureCheck);
334     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
335     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
336     
337     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
338     DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
339     ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
340     DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
341     ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
342     
343     Label putResult(this);
344     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
345     
346     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
347 }
348
349 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
350 {
351     int dst = currentInstruction[1].u.operand;
352     int base = currentInstruction[2].u.operand;
353     int ident = currentInstruction[3].u.operand;
354     
355     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
356 }
357
358 void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
359 {
360     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
361     // so that we only need track one pointer into the slow case code - we track a pointer to the location
362     // of the call (which we can use to look up the patch information), but should a array-length or
363     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
364     // the distance from the call to the head of the slow case.
365     linkSlowCaseIfNotJSCell(iter, base);
366     linkSlowCase(iter);
367     
368     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
369     
370 #ifndef NDEBUG
371     Label coldPathBegin(this);
372 #endif
373     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
374     stubCall.addArgument(regT1, regT0);
375     stubCall.addArgument(TrustedImmPtr(ident));
376     Call call = stubCall.call(dst);
377     
378     END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
379     
380     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
381     
382     // Track the location of the call; this will be used to recover patch information.
383     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
384 }
385
386 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
387 {
388     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
389     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
390     // such that the Structure & offset are always at the same distance from this.
391     
392     int base = currentInstruction[1].u.operand;
393     int value = currentInstruction[3].u.operand;
394     
395     emitLoad2(base, regT1, regT0, value, regT3, regT2);
396     
397     emitJumpSlowCaseIfNotJSCell(base, regT1);
398     
399     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
400     
401     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
402     
403     Label hotPathBegin(this);
404     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
405     m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
406     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
407     
408     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
409     DataLabelPtr structureToCompare;
410     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
411     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
412     
413     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
414     DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchPutByIdDefaultOffset)); // payload
415     DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchPutByIdDefaultOffset)); // tag
416     
417     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
418     
419     ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
420     ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
421 }
422
423 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
424 {
425     int base = currentInstruction[1].u.operand;
426     int ident = currentInstruction[2].u.operand;
427     int direct = currentInstruction[8].u.operand;
428
429     linkSlowCaseIfNotJSCell(iter, base);
430     linkSlowCase(iter);
431     
432     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
433     stubCall.addArgument(regT1, regT0);
434     stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
435     stubCall.addArgument(regT3, regT2); 
436     Call call = stubCall.call();
437     
438     // Track the location of the call; this will be used to recover patch information.
439     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
440 }
441
442 // Compile a store into an object's property storage.  May overwrite base.
443 void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset)
444 {
445     int offset = cachedOffset;
446     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
447     emitStore(offset, valueTag, valuePayload, base);
448 }
449
450 // Compile a load from an object's property storage.  May overwrite base.
451 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
452 {
453     int offset = cachedOffset;
454     RegisterID temp = resultPayload;
455     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), temp);
456     emitLoad(offset, resultTag, resultPayload, temp);
457 }
458
459 void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
460 {
461     loadPtr(base->addressOfPropertyStorage(), resultTag);
462     load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
463     load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
464 }
465
466 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
467 {
468     // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
469 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
470     // For MIPS, we don't add sizeof(void*) to the stack offset.
471     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
472     // For MIPS, we don't add sizeof(void*) to the stack offset.
473     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
474 #else
475     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
476     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
477 #endif
478
479     JumpList failureCases;
480     failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
481     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
482     testPrototype(oldStructure->storedPrototype(), failureCases);
483     
484     if (!direct) {
485         // Verify that nothing in the prototype chain has a setter for this property. 
486         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
487             testPrototype((*it)->storedPrototype(), failureCases);
488     }
489
490     // Reallocate property storage if needed.
491     Call callTarget;
492     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
493     if (willNeedStorageRealloc) {
494         // This trampoline was called to like a JIT stub; before we can can call again we need to
495         // remove the return address from the stack, to prevent the stack from becoming misaligned.
496         preserveReturnAddressAfterCall(regT3);
497         
498         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
499         stubCall.skipArgument(); // base
500         stubCall.skipArgument(); // ident
501         stubCall.skipArgument(); // value
502         stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
503         stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
504         stubCall.call(regT0);
505
506         restoreReturnAddressBeforeReturn(regT3);
507
508 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
509         // For MIPS, we don't add sizeof(void*) to the stack offset.
510         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
511         // For MIPS, we don't add sizeof(void*) to the stack offset.
512         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
513 #else
514         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
515         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
516 #endif
517     }
518
519     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
520
521     storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
522 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
523     // For MIPS, we don't add sizeof(void*) to the stack offset.
524     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
525     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
526 #else
527     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
528     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
529 #endif
530     compilePutDirectOffset(regT0, regT2, regT3, cachedOffset);
531     
532     ret();
533     
534     ASSERT(!failureCases.empty());
535     failureCases.link(this);
536     restoreArgumentReferenceForTrampoline();
537     Call failureCall = tailRecursiveCall();
538     
539     LinkBuffer patchBuffer(*m_globalData, this);
540     
541     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
542     
543     if (willNeedStorageRealloc) {
544         ASSERT(m_calls.size() == 1);
545         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
546     }
547     
548     stubInfo->stubRoutine = patchBuffer.finalizeCode();
549     RepatchBuffer repatchBuffer(m_codeBlock);
550     repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
551 }
552
553 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
554 {
555     RepatchBuffer repatchBuffer(codeBlock);
556     
557     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
558     // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
559     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
560     
561     int offset = sizeof(JSValue) * cachedOffset;
562
563     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
564     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
565     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
566     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
567 }
568
569 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
570 {
571     RepatchBuffer repatchBuffer(codeBlock);
572     
573     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
574     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
575     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
576     
577     int offset = sizeof(JSValue) * cachedOffset;
578
579     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
580     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
581     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
582     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
583 }
584
585 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
586 {
587     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
588     
589     // regT0 holds a JSCell*
590     
591     // Check for array
592     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
593     
594     // Checks out okay! - get the length from the storage
595     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
596     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
597     
598     Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
599     move(regT2, regT0);
600     move(TrustedImm32(JSValue::Int32Tag), regT1);
601     Jump success = jump();
602     
603     LinkBuffer patchBuffer(*m_globalData, this);
604     
605     // Use the patch information to link the failure cases back to the original slow case routine.
606     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
607     patchBuffer.link(failureCases1, slowCaseBegin);
608     patchBuffer.link(failureCases2, slowCaseBegin);
609     
610     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
611     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
612     
613     // Track the stub we have created so that it will be deleted later.
614     stubInfo->stubRoutine = patchBuffer.finalizeCode();
615     
616     // Finally patch the jump to slow case back in the hot path to jump here instead.
617     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
618     RepatchBuffer repatchBuffer(m_codeBlock);
619     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
620     
621     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
622     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
623 }
624
625 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
626 {
627     // regT0 holds a JSCell*
628     
629     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
630     // referencing the prototype object - let's speculatively load it's table nice and early!)
631     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
632     
633     Jump failureCases1 = checkStructure(regT0, structure);
634     
635     // Check the prototype object's Structure had not changed.
636     move(TrustedImmPtr(protoObject), regT3);
637     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
638
639     bool needsStubLink = false;
640     // Checks out okay!
641     if (slot.cachedPropertyType() == PropertySlot::Getter) {
642         needsStubLink = true;
643         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
644         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
645         stubCall.addArgument(regT1);
646         stubCall.addArgument(regT0);
647         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
648         stubCall.call();
649     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
650         needsStubLink = true;
651         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
652         stubCall.addArgument(TrustedImmPtr(protoObject));
653         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
654         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
655         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
656         stubCall.call();
657     } else
658         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
659     
660     Jump success = jump();
661     
662     LinkBuffer patchBuffer(*m_globalData, this);
663     
664     // Use the patch information to link the failure cases back to the original slow case routine.
665     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
666     patchBuffer.link(failureCases1, slowCaseBegin);
667     patchBuffer.link(failureCases2, slowCaseBegin);
668     
669     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
670     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
671
672     if (needsStubLink) {
673         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
674             if (iter->to)
675                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
676         }
677     }
678
679     // Track the stub we have created so that it will be deleted later.
680     stubInfo->stubRoutine = patchBuffer.finalizeCode();
681     
682     // Finally patch the jump to slow case back in the hot path to jump here instead.
683     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
684     RepatchBuffer repatchBuffer(m_codeBlock);
685     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
686     
687     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
688     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
689 }
690
691
692 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
693 {
694     // regT0 holds a JSCell*
695     Jump failureCase = checkStructure(regT0, structure);
696     bool needsStubLink = false;
697     if (slot.cachedPropertyType() == PropertySlot::Getter) {
698         needsStubLink = true;
699         compileGetDirectOffset(regT0, regT2, regT1, cachedOffset);
700         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
701         stubCall.addArgument(regT1);
702         stubCall.addArgument(regT0);
703         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
704         stubCall.call();
705     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
706         needsStubLink = true;
707         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
708         stubCall.addArgument(regT0);
709         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
710         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
711         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
712         stubCall.call();
713     } else
714         compileGetDirectOffset(regT0, regT1, regT0, cachedOffset);
715
716     Jump success = jump();
717     
718     LinkBuffer patchBuffer(*m_globalData, this);
719     if (needsStubLink) {
720         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
721             if (iter->to)
722                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
723         }
724     }    
725     // Use the patch information to link the failure cases back to the original slow case routine.
726     CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
727     if (!lastProtoBegin)
728         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
729     
730     patchBuffer.link(failureCase, lastProtoBegin);
731     
732     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
733     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
734
735     CodeRef stubRoutine = patchBuffer.finalizeCode();
736
737     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure);
738     
739     // Finally patch the jump to slow case back in the hot path to jump here instead.
740     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
741     RepatchBuffer repatchBuffer(m_codeBlock);
742     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
743 }
744
745 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
746 {
747     // regT0 holds a JSCell*
748     
749     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
750     // referencing the prototype object - let's speculatively load it's table nice and early!)
751     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
752     
753     // Check eax is an object of the right Structure.
754     Jump failureCases1 = checkStructure(regT0, structure);
755     
756     // Check the prototype object's Structure had not changed.
757     move(TrustedImmPtr(protoObject), regT3);
758     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
759     
760     bool needsStubLink = false;
761     if (slot.cachedPropertyType() == PropertySlot::Getter) {
762         needsStubLink = true;
763         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
764         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
765         stubCall.addArgument(regT1);
766         stubCall.addArgument(regT0);
767         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
768         stubCall.call();
769     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
770         needsStubLink = true;
771         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
772         stubCall.addArgument(TrustedImmPtr(protoObject));
773         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
774         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
775         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
776         stubCall.call();
777     } else
778         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
779     
780     Jump success = jump();
781     
782     LinkBuffer patchBuffer(*m_globalData, this);
783     if (needsStubLink) {
784         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
785             if (iter->to)
786                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
787         }
788     }
789     // Use the patch information to link the failure cases back to the original slow case routine.
790     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
791     patchBuffer.link(failureCases1, lastProtoBegin);
792     patchBuffer.link(failureCases2, lastProtoBegin);
793     
794     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
795     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
796     
797     CodeRef stubRoutine = patchBuffer.finalizeCode();
798
799     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure);
800     
801     // Finally patch the jump to slow case back in the hot path to jump here instead.
802     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
803     RepatchBuffer repatchBuffer(m_codeBlock);
804     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
805 }
806
807 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
808 {
809     // regT0 holds a JSCell*
810     ASSERT(count);
811     
812     JumpList bucketsOfFail;
813     
814     // Check eax is an object of the right Structure.
815     bucketsOfFail.append(checkStructure(regT0, structure));
816     
817     Structure* currStructure = structure;
818     WriteBarrier<Structure>* it = chain->head();
819     JSObject* protoObject = 0;
820     for (unsigned i = 0; i < count; ++i, ++it) {
821         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
822         currStructure = it->get();
823         testPrototype(protoObject, bucketsOfFail);
824     }
825     ASSERT(protoObject);
826     
827     bool needsStubLink = false;
828     if (slot.cachedPropertyType() == PropertySlot::Getter) {
829         needsStubLink = true;
830         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
831         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
832         stubCall.addArgument(regT1);
833         stubCall.addArgument(regT0);
834         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
835         stubCall.call();
836     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
837         needsStubLink = true;
838         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
839         stubCall.addArgument(TrustedImmPtr(protoObject));
840         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
841         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
842         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
843         stubCall.call();
844     } else
845         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
846
847     Jump success = jump();
848     
849     LinkBuffer patchBuffer(*m_globalData, this);
850     if (needsStubLink) {
851         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
852             if (iter->to)
853                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
854         }
855     }
856     // Use the patch information to link the failure cases back to the original slow case routine.
857     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
858     
859     patchBuffer.link(bucketsOfFail, lastProtoBegin);
860     
861     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
862     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
863     
864     CodeRef stubRoutine = patchBuffer.finalizeCode();
865     
866     // Track the stub we have created so that it will be deleted later.
867     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain);
868     
869     // Finally patch the jump to slow case back in the hot path to jump here instead.
870     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
871     RepatchBuffer repatchBuffer(m_codeBlock);
872     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
873 }
874
875 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
876 {
877     // regT0 holds a JSCell*
878     ASSERT(count);
879     
880     JumpList bucketsOfFail;
881     
882     // Check eax is an object of the right Structure.
883     bucketsOfFail.append(checkStructure(regT0, structure));
884     
885     Structure* currStructure = structure;
886     WriteBarrier<Structure>* it = chain->head();
887     JSObject* protoObject = 0;
888     for (unsigned i = 0; i < count; ++i, ++it) {
889         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
890         currStructure = it->get();
891         testPrototype(protoObject, bucketsOfFail);
892     }
893     ASSERT(protoObject);
894     
895     bool needsStubLink = false;
896     if (slot.cachedPropertyType() == PropertySlot::Getter) {
897         needsStubLink = true;
898         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
899         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
900         stubCall.addArgument(regT1);
901         stubCall.addArgument(regT0);
902         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
903         stubCall.call();
904     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
905         needsStubLink = true;
906         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
907         stubCall.addArgument(TrustedImmPtr(protoObject));
908         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
909         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
910         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
911         stubCall.call();
912     } else
913         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
914     Jump success = jump();
915     
916     LinkBuffer patchBuffer(*m_globalData, this);
917     if (needsStubLink) {
918         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
919             if (iter->to)
920                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
921         }
922     }
923     // Use the patch information to link the failure cases back to the original slow case routine.
924     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
925     
926     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
927     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
928     
929     // Track the stub we have created so that it will be deleted later.
930     CodeRef stubRoutine = patchBuffer.finalizeCode();
931     stubInfo->stubRoutine = stubRoutine;
932     
933     // Finally patch the jump to slow case back in the hot path to jump here instead.
934     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
935     RepatchBuffer repatchBuffer(m_codeBlock);
936     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
937     
938     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
939     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
940 }
941
942 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
943 {
944     ASSERT(sizeof(JSValue) == 8);
945     
946     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
947     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
948     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
949 }
950
951 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
952 {
953     unsigned dst = currentInstruction[1].u.operand;
954     unsigned base = currentInstruction[2].u.operand;
955     unsigned property = currentInstruction[3].u.operand;
956     unsigned expected = currentInstruction[4].u.operand;
957     unsigned iter = currentInstruction[5].u.operand;
958     unsigned i = currentInstruction[6].u.operand;
959     
960     emitLoad2(property, regT1, regT0, base, regT3, regT2);
961     emitJumpSlowCaseIfNotJSCell(property, regT1);
962     addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
963     // Property registers are now available as the property is known
964     emitJumpSlowCaseIfNotJSCell(base, regT3);
965     emitLoadPayload(iter, regT1);
966     
967     // Test base's structure
968     loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
969     addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
970     load32(addressFor(i), regT3);
971     sub32(TrustedImm32(1), regT3);
972     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
973     compileGetDirectOffset(regT2, regT1, regT0, regT3);    
974     
975     emitStore(dst, regT1, regT0);
976     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
977 }
978
979 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
980 {
981     unsigned dst = currentInstruction[1].u.operand;
982     unsigned base = currentInstruction[2].u.operand;
983     unsigned property = currentInstruction[3].u.operand;
984     
985     linkSlowCaseIfNotJSCell(iter, property);
986     linkSlowCase(iter);
987     linkSlowCaseIfNotJSCell(iter, base);
988     linkSlowCase(iter);
989     linkSlowCase(iter);
990     
991     JITStubCall stubCall(this, cti_op_get_by_val);
992     stubCall.addArgument(base);
993     stubCall.addArgument(property);
994     stubCall.call(dst);
995 }
996
997 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
998 {
999     int dst = currentInstruction[1].u.operand;
1000     int index = currentInstruction[2].u.operand;
1001     int skip = currentInstruction[3].u.operand;
1002
1003     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
1004     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1005     ASSERT(skip || !checkTopLevel);
1006     if (checkTopLevel && skip--) {
1007         Jump activationNotCreated;
1008         if (checkTopLevel)
1009             activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
1010         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1011         activationNotCreated.link(this);
1012     }
1013     while (skip--)
1014         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1015
1016     loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
1017     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1018
1019     emitLoad(index, regT1, regT0, regT2);
1020     emitStore(dst, regT1, regT0);
1021     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
1022 }
1023
1024 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
1025 {
1026     int index = currentInstruction[1].u.operand;
1027     int skip = currentInstruction[2].u.operand;
1028     int value = currentInstruction[3].u.operand;
1029
1030     emitLoad(value, regT1, regT0);
1031
1032     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
1033     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1034     ASSERT(skip || !checkTopLevel);
1035     if (checkTopLevel && skip--) {
1036         Jump activationNotCreated;
1037         if (checkTopLevel)
1038             activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
1039         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1040         activationNotCreated.link(this);
1041     }
1042     while (skip--)
1043         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1044     loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
1045
1046     emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
1047
1048     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1049     emitStore(index, regT1, regT0, regT2);
1050     map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
1051 }
1052
1053 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
1054 {
1055     int dst = currentInstruction[1].u.operand;
1056     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1057     ASSERT(globalObject->isGlobalObject());
1058     int index = currentInstruction[2].u.operand;
1059
1060     loadPtr(&globalObject->m_registers, regT2);
1061
1062     emitLoad(index, regT1, regT0, regT2);
1063     emitStore(dst, regT1, regT0);
1064     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
1065 }
1066
1067 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
1068 {
1069     int index = currentInstruction[1].u.operand;
1070     int value = currentInstruction[2].u.operand;
1071
1072     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1073
1074     emitLoad(value, regT1, regT0);
1075     move(TrustedImmPtr(globalObject), regT2);
1076
1077     emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
1078
1079     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1080     emitStore(index, regT1, regT0, regT2);
1081     map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
1082 }
1083
1084 void JIT::emitWriteBarrier(RegisterID owner, RegisterID scratch, WriteBarrierUseKind useKind)
1085 {
1086     UNUSED_PARAM(owner);
1087     UNUSED_PARAM(scratch);
1088     UNUSED_PARAM(useKind);
1089     ASSERT(owner != scratch);
1090     
1091 #if ENABLE(WRITE_BARRIER_PROFILING)
1092     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1093 #endif
1094 }
1095
1096 } // namespace JSC
1097
1098 #endif // USE(JSVALUE32_64)
1099 #endif // ENABLE(JIT)