initial import
[vuplus_webkit] / Source / JavaScriptCore / jit / JITPropertyAccess32_64.cpp
1 /*
2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43
44 #ifndef NDEBUG
45 #include <stdio.h>
46 #endif
47
48 using namespace std;
49
50 namespace JSC {
51     
52 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
53 {
54     unsigned base = currentInstruction[1].u.operand;
55     unsigned property = currentInstruction[2].u.operand;
56     unsigned value = currentInstruction[3].u.operand;
57     
58     JITStubCall stubCall(this, cti_op_put_by_index);
59     stubCall.addArgument(base);
60     stubCall.addArgument(Imm32(property));
61     stubCall.addArgument(value);
62     stubCall.call();
63 }
64
65 void JIT::emit_op_put_getter(Instruction* currentInstruction)
66 {
67     unsigned base = currentInstruction[1].u.operand;
68     unsigned property = currentInstruction[2].u.operand;
69     unsigned function = currentInstruction[3].u.operand;
70     
71     JITStubCall stubCall(this, cti_op_put_getter);
72     stubCall.addArgument(base);
73     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
74     stubCall.addArgument(function);
75     stubCall.call();
76 }
77
78 void JIT::emit_op_put_setter(Instruction* currentInstruction)
79 {
80     unsigned base = currentInstruction[1].u.operand;
81     unsigned property = currentInstruction[2].u.operand;
82     unsigned function = currentInstruction[3].u.operand;
83     
84     JITStubCall stubCall(this, cti_op_put_setter);
85     stubCall.addArgument(base);
86     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
87     stubCall.addArgument(function);
88     stubCall.call();
89 }
90
91 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
92 {
93     unsigned dst = currentInstruction[1].u.operand;
94     unsigned base = currentInstruction[2].u.operand;
95     unsigned property = currentInstruction[3].u.operand;
96     
97     JITStubCall stubCall(this, cti_op_del_by_id);
98     stubCall.addArgument(base);
99     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
100     stubCall.call(dst);
101 }
102
103 void JIT::emit_op_method_check(Instruction* currentInstruction)
104 {
105     // Assert that the following instruction is a get_by_id.
106     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
107     
108     currentInstruction += OPCODE_LENGTH(op_method_check);
109     
110     // Do the method check - check the object & its prototype's structure inline (this is the common case).
111     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessCompilationInfo.size()));
112     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
113     
114     int dst = currentInstruction[1].u.operand;
115     int base = currentInstruction[2].u.operand;
116     
117     emitLoad(base, regT1, regT0);
118     emitJumpSlowCaseIfNotJSCell(base, regT1);
119     
120     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
121     
122     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
123     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
124     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
125     
126     // This will be relinked to load the function without doing a load.
127     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
128     
129     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
130     
131     move(TrustedImm32(JSValue::CellTag), regT1);
132     Jump match = jump();
133     
134     ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
135     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
136     ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
137     
138     // Link the failure cases here.
139     structureCheck.link(this);
140     protoStructureCheck.link(this);
141     
142     // Do a regular(ish) get_by_id (the slow case will be link to
143     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
144     compileGetByIdHotPath();
145     
146     match.link(this);
147     emitStore(dst, regT1, regT0);
148     map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
149     
150     // We've already generated the following get_by_id, so make sure it's skipped over.
151     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
152 }
153
154 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
155 {
156     currentInstruction += OPCODE_LENGTH(op_method_check);
157     
158     int dst = currentInstruction[1].u.operand;
159     int base = currentInstruction[2].u.operand;
160     int ident = currentInstruction[3].u.operand;
161     
162     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
163     
164     // We've already generated the following get_by_id, so make sure it's skipped over.
165     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
166 }
167
168 JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
169 {
170     JSInterfaceJIT jit;
171     JumpList failures;
172     failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
173     failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
174     
175     // Load string length to regT1, and start the process of loading the data pointer into regT0
176     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
177     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
178     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
179     
180     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
181     failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
182     
183     // Load the character
184     jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
185     
186     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
187     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
188     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
189     jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
190     jit.ret();
191
192     failures.link(&jit);
193     jit.move(TrustedImm32(0), regT0);
194     jit.ret();
195     
196     LinkBuffer patchBuffer(*globalData, &jit);
197     return patchBuffer.finalizeCode();
198 }
199
200 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
201 {
202     unsigned dst = currentInstruction[1].u.operand;
203     unsigned base = currentInstruction[2].u.operand;
204     unsigned property = currentInstruction[3].u.operand;
205     
206     emitLoad2(base, regT1, regT0, property, regT3, regT2);
207     
208     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
209     emitJumpSlowCaseIfNotJSCell(base, regT1);
210     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
211     
212     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
213     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
214     
215     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
216     load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
217     addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
218     
219     emitStore(dst, regT1, regT0);
220     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
221 }
222
223 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
224 {
225     unsigned dst = currentInstruction[1].u.operand;
226     unsigned base = currentInstruction[2].u.operand;
227     unsigned property = currentInstruction[3].u.operand;
228     
229     linkSlowCase(iter); // property int32 check
230     linkSlowCaseIfNotJSCell(iter, base); // base cell check
231
232     Jump nonCell = jump();
233     linkSlowCase(iter); // base array check
234     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
235     emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator).code());
236     Jump failed = branchTestPtr(Zero, regT0);
237     emitStore(dst, regT1, regT0);
238     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
239     failed.link(this);
240     notString.link(this);
241     nonCell.link(this);
242
243     linkSlowCase(iter); // vector length check
244     linkSlowCase(iter); // empty value
245     
246     JITStubCall stubCall(this, cti_op_get_by_val);
247     stubCall.addArgument(base);
248     stubCall.addArgument(property);
249     stubCall.call(dst);
250 }
251
252 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
253 {
254     unsigned base = currentInstruction[1].u.operand;
255     unsigned property = currentInstruction[2].u.operand;
256     unsigned value = currentInstruction[3].u.operand;
257     
258     emitLoad2(base, regT1, regT0, property, regT3, regT2);
259     
260     addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
261     emitJumpSlowCaseIfNotJSCell(base, regT1);
262     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
263     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
264     addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
265     
266     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
267     
268     Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
269     
270     Label storeResult(this);
271     emitLoad(value, regT1, regT0);
272     store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
273     store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
274     Jump end = jump();
275     
276     empty.link(this);
277     add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
278     branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
279     
280     add32(TrustedImm32(1), regT2, regT0);
281     store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
282     jump().linkTo(storeResult, this);
283     
284     end.link(this);
285 }
286
287 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
288 {
289     unsigned base = currentInstruction[1].u.operand;
290     unsigned property = currentInstruction[2].u.operand;
291     unsigned value = currentInstruction[3].u.operand;
292     
293     linkSlowCase(iter); // property int32 check
294     linkSlowCaseIfNotJSCell(iter, base); // base cell check
295     linkSlowCase(iter); // base not array check
296     linkSlowCase(iter); // in vector check
297     
298     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
299     stubPutByValCall.addArgument(base);
300     stubPutByValCall.addArgument(property);
301     stubPutByValCall.addArgument(value);
302     stubPutByValCall.call();
303 }
304
305 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
306 {
307     int dst = currentInstruction[1].u.operand;
308     int base = currentInstruction[2].u.operand;
309     
310     emitLoad(base, regT1, regT0);
311     emitJumpSlowCaseIfNotJSCell(base, regT1);
312     compileGetByIdHotPath();
313     emitStore(dst, regT1, regT0);
314     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
315 }
316
317 void JIT::compileGetByIdHotPath()
318 {
319     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
320     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
321     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
322     // to jump back to if one of these trampolies finds a match.
323     
324     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
325     
326     Label hotPathBegin(this);
327     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
328     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
329     
330     DataLabelPtr structureToCompare;
331     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
332     addSlowCase(structureCheck);
333     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
334     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
335     
336     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
337     DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
338     ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetGetByIdPropertyMapOffset1);
339     DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
340     ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetGetByIdPropertyMapOffset2);
341     
342     Label putResult(this);
343     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
344     
345     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
346 }
347
348 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
349 {
350     int dst = currentInstruction[1].u.operand;
351     int base = currentInstruction[2].u.operand;
352     int ident = currentInstruction[3].u.operand;
353     
354     compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
355 }
356
357 void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
358 {
359     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
360     // so that we only need track one pointer into the slow case code - we track a pointer to the location
361     // of the call (which we can use to look up the patch information), but should a array-length or
362     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
363     // the distance from the call to the head of the slow case.
364     linkSlowCaseIfNotJSCell(iter, base);
365     linkSlowCase(iter);
366     
367     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
368     
369 #ifndef NDEBUG
370     Label coldPathBegin(this);
371 #endif
372     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
373     stubCall.addArgument(regT1, regT0);
374     stubCall.addArgument(TrustedImmPtr(ident));
375     Call call = stubCall.call(dst);
376     
377     END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
378     
379     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
380     
381     // Track the location of the call; this will be used to recover patch information.
382     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
383 }
384
385 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
386 {
387     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
388     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
389     // such that the Structure & offset are always at the same distance from this.
390     
391     int base = currentInstruction[1].u.operand;
392     int value = currentInstruction[3].u.operand;
393     
394     emitLoad2(base, regT1, regT0, value, regT3, regT2);
395     
396     emitJumpSlowCaseIfNotJSCell(base, regT1);
397     
398     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
399     
400     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
401     
402     Label hotPathBegin(this);
403     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
404     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
405     
406     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
407     DataLabelPtr structureToCompare;
408     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
409     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
410     
411     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
412     DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchPutByIdDefaultOffset)); // payload
413     DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchPutByIdDefaultOffset)); // tag
414     
415     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
416     
417     ASSERT_JIT_OFFSET_UNUSED(displacementLabel1, differenceBetween(hotPathBegin, displacementLabel1), patchOffsetPutByIdPropertyMapOffset1);
418     ASSERT_JIT_OFFSET_UNUSED(displacementLabel2, differenceBetween(hotPathBegin, displacementLabel2), patchOffsetPutByIdPropertyMapOffset2);
419 }
420
421 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
422 {
423     int base = currentInstruction[1].u.operand;
424     int ident = currentInstruction[2].u.operand;
425     int direct = currentInstruction[8].u.operand;
426
427     linkSlowCaseIfNotJSCell(iter, base);
428     linkSlowCase(iter);
429     
430     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
431     stubCall.addArgument(regT1, regT0);
432     stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
433     stubCall.addArgument(regT3, regT2); 
434     Call call = stubCall.call();
435     
436     // Track the location of the call; this will be used to recover patch information.
437     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
438 }
439
440 // Compile a store into an object's property storage.  May overwrite base.
441 void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
442 {
443     int offset = cachedOffset;
444     if (structure->isUsingInlineStorage())
445         offset += JSObject::offsetOfInlineStorage() /  sizeof(Register);
446     else
447         loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
448     emitStore(offset, valueTag, valuePayload, base);
449 }
450
451 // Compile a load from an object's property storage.  May overwrite base.
452 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
453 {
454     int offset = cachedOffset;
455     if (structure->isUsingInlineStorage()) {
456         offset += JSObject::offsetOfInlineStorage() / sizeof(Register);
457         emitLoad(offset, resultTag, resultPayload, base);
458     } else {
459         RegisterID temp = resultPayload;
460         loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), temp);
461         emitLoad(offset, resultTag, resultPayload, temp);
462     }
463 }
464
465 void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
466 {
467     load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
468     load32(reinterpret_cast<char*>(&base->m_propertyStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
469 }
470
471 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
472 {
473     // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
474 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
475     // For MIPS, we don't add sizeof(void*) to the stack offset.
476     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
477     // For MIPS, we don't add sizeof(void*) to the stack offset.
478     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
479 #else
480     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
481     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
482 #endif
483
484     JumpList failureCases;
485     failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
486     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
487     testPrototype(oldStructure->storedPrototype(), failureCases);
488     
489     if (!direct) {
490         // Verify that nothing in the prototype chain has a setter for this property. 
491         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
492             testPrototype((*it)->storedPrototype(), failureCases);
493     }
494
495     // Reallocate property storage if needed.
496     Call callTarget;
497     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
498     if (willNeedStorageRealloc) {
499         // This trampoline was called to like a JIT stub; before we can can call again we need to
500         // remove the return address from the stack, to prevent the stack from becoming misaligned.
501         preserveReturnAddressAfterCall(regT3);
502         
503         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
504         stubCall.skipArgument(); // base
505         stubCall.skipArgument(); // ident
506         stubCall.skipArgument(); // value
507         stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
508         stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
509         stubCall.call(regT0);
510
511         restoreReturnAddressBeforeReturn(regT3);
512
513 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
514         // For MIPS, we don't add sizeof(void*) to the stack offset.
515         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
516         // For MIPS, we don't add sizeof(void*) to the stack offset.
517         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
518 #else
519         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
520         load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
521 #endif
522     }
523
524     emitWriteBarrier(regT0, regT1, WriteBarrierForPropertyAccess);
525
526     storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
527 #if CPU(MIPS) || CPU(SH4) || CPU(ARM)
528     // For MIPS, we don't add sizeof(void*) to the stack offset.
529     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
530     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
531 #else
532     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
533     load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
534 #endif
535     compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
536     
537     ret();
538     
539     ASSERT(!failureCases.empty());
540     failureCases.link(this);
541     restoreArgumentReferenceForTrampoline();
542     Call failureCall = tailRecursiveCall();
543     
544     LinkBuffer patchBuffer(*m_globalData, this);
545     
546     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
547     
548     if (willNeedStorageRealloc) {
549         ASSERT(m_calls.size() == 1);
550         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
551     }
552     
553     stubInfo->stubRoutine = patchBuffer.finalizeCode();
554     RepatchBuffer repatchBuffer(m_codeBlock);
555     repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
556 }
557
558 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
559 {
560     RepatchBuffer repatchBuffer(codeBlock);
561     
562     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
563     // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
564     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
565     
566     int offset = sizeof(JSValue) * cachedOffset;
567
568     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
569     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
570     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
571     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
572 }
573
574 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
575 {
576     RepatchBuffer repatchBuffer(codeBlock);
577     
578     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
579     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
580     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
581     
582     int offset = sizeof(JSValue) * cachedOffset;
583
584     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
585     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
586     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
587     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
588 }
589
590 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
591 {
592     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
593     
594     // regT0 holds a JSCell*
595     
596     // Check for array
597     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
598     
599     // Checks out okay! - get the length from the storage
600     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
601     load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
602     
603     Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
604     move(regT2, regT0);
605     move(TrustedImm32(JSValue::Int32Tag), regT1);
606     Jump success = jump();
607     
608     LinkBuffer patchBuffer(*m_globalData, this);
609     
610     // Use the patch information to link the failure cases back to the original slow case routine.
611     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
612     patchBuffer.link(failureCases1, slowCaseBegin);
613     patchBuffer.link(failureCases2, slowCaseBegin);
614     
615     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
616     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
617     
618     // Track the stub we have created so that it will be deleted later.
619     stubInfo->stubRoutine = patchBuffer.finalizeCode();
620     
621     // Finally patch the jump to slow case back in the hot path to jump here instead.
622     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
623     RepatchBuffer repatchBuffer(m_codeBlock);
624     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
625     
626     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
627     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
628 }
629
630 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
631 {
632     // regT0 holds a JSCell*
633     
634     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
635     // referencing the prototype object - let's speculatively load it's table nice and early!)
636     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
637     
638     Jump failureCases1 = checkStructure(regT0, structure);
639     
640     // Check the prototype object's Structure had not changed.
641     move(TrustedImmPtr(protoObject), regT3);
642     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
643
644     bool needsStubLink = false;
645     // Checks out okay!
646     if (slot.cachedPropertyType() == PropertySlot::Getter) {
647         needsStubLink = true;
648         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
649         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
650         stubCall.addArgument(regT1);
651         stubCall.addArgument(regT0);
652         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
653         stubCall.call();
654     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
655         needsStubLink = true;
656         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
657         stubCall.addArgument(TrustedImmPtr(protoObject));
658         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
659         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
660         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
661         stubCall.call();
662     } else
663         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
664     
665     Jump success = jump();
666     
667     LinkBuffer patchBuffer(*m_globalData, this);
668     
669     // Use the patch information to link the failure cases back to the original slow case routine.
670     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
671     patchBuffer.link(failureCases1, slowCaseBegin);
672     patchBuffer.link(failureCases2, slowCaseBegin);
673     
674     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
675     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
676
677     if (needsStubLink) {
678         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
679             if (iter->to)
680                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
681         }
682     }
683
684     // Track the stub we have created so that it will be deleted later.
685     stubInfo->stubRoutine = patchBuffer.finalizeCode();
686     
687     // Finally patch the jump to slow case back in the hot path to jump here instead.
688     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
689     RepatchBuffer repatchBuffer(m_codeBlock);
690     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
691     
692     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
693     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
694 }
695
696
697 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
698 {
699     // regT0 holds a JSCell*
700     Jump failureCase = checkStructure(regT0, structure);
701     bool needsStubLink = false;
702     if (slot.cachedPropertyType() == PropertySlot::Getter) {
703         needsStubLink = true;
704         compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
705         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
706         stubCall.addArgument(regT1);
707         stubCall.addArgument(regT0);
708         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
709         stubCall.call();
710     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
711         needsStubLink = true;
712         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
713         stubCall.addArgument(regT0);
714         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
715         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
716         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
717         stubCall.call();
718     } else
719         compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
720
721     Jump success = jump();
722     
723     LinkBuffer patchBuffer(*m_globalData, this);
724     if (needsStubLink) {
725         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
726             if (iter->to)
727                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
728         }
729     }    
730     // Use the patch information to link the failure cases back to the original slow case routine.
731     CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
732     if (!lastProtoBegin)
733         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
734     
735     patchBuffer.link(failureCase, lastProtoBegin);
736     
737     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
738     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
739
740     CodeRef stubRoutine = patchBuffer.finalizeCode();
741
742     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure);
743     
744     // Finally patch the jump to slow case back in the hot path to jump here instead.
745     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
746     RepatchBuffer repatchBuffer(m_codeBlock);
747     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
748 }
749
750 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
751 {
752     // regT0 holds a JSCell*
753     
754     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
755     // referencing the prototype object - let's speculatively load it's table nice and early!)
756     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
757     
758     // Check eax is an object of the right Structure.
759     Jump failureCases1 = checkStructure(regT0, structure);
760     
761     // Check the prototype object's Structure had not changed.
762     move(TrustedImmPtr(protoObject), regT3);
763     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
764     
765     bool needsStubLink = false;
766     if (slot.cachedPropertyType() == PropertySlot::Getter) {
767         needsStubLink = true;
768         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
769         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
770         stubCall.addArgument(regT1);
771         stubCall.addArgument(regT0);
772         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
773         stubCall.call();
774     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
775         needsStubLink = true;
776         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
777         stubCall.addArgument(TrustedImmPtr(protoObject));
778         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
779         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
780         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
781         stubCall.call();
782     } else
783         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
784     
785     Jump success = jump();
786     
787     LinkBuffer patchBuffer(*m_globalData, this);
788     if (needsStubLink) {
789         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
790             if (iter->to)
791                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
792         }
793     }
794     // Use the patch information to link the failure cases back to the original slow case routine.
795     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
796     patchBuffer.link(failureCases1, lastProtoBegin);
797     patchBuffer.link(failureCases2, lastProtoBegin);
798     
799     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
800     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
801     
802     CodeRef stubRoutine = patchBuffer.finalizeCode();
803
804     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure);
805     
806     // Finally patch the jump to slow case back in the hot path to jump here instead.
807     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
808     RepatchBuffer repatchBuffer(m_codeBlock);
809     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
810 }
811
812 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
813 {
814     // regT0 holds a JSCell*
815     ASSERT(count);
816     
817     JumpList bucketsOfFail;
818     
819     // Check eax is an object of the right Structure.
820     bucketsOfFail.append(checkStructure(regT0, structure));
821     
822     Structure* currStructure = structure;
823     WriteBarrier<Structure>* it = chain->head();
824     JSObject* protoObject = 0;
825     for (unsigned i = 0; i < count; ++i, ++it) {
826         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
827         currStructure = it->get();
828         testPrototype(protoObject, bucketsOfFail);
829     }
830     ASSERT(protoObject);
831     
832     bool needsStubLink = false;
833     if (slot.cachedPropertyType() == PropertySlot::Getter) {
834         needsStubLink = true;
835         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
836         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
837         stubCall.addArgument(regT1);
838         stubCall.addArgument(regT0);
839         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
840         stubCall.call();
841     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
842         needsStubLink = true;
843         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
844         stubCall.addArgument(TrustedImmPtr(protoObject));
845         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
846         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
847         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
848         stubCall.call();
849     } else
850         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
851
852     Jump success = jump();
853     
854     LinkBuffer patchBuffer(*m_globalData, this);
855     if (needsStubLink) {
856         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
857             if (iter->to)
858                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
859         }
860     }
861     // Use the patch information to link the failure cases back to the original slow case routine.
862     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
863     
864     patchBuffer.link(bucketsOfFail, lastProtoBegin);
865     
866     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
867     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
868     
869     CodeRef stubRoutine = patchBuffer.finalizeCode();
870     
871     // Track the stub we have created so that it will be deleted later.
872     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain);
873     
874     // Finally patch the jump to slow case back in the hot path to jump here instead.
875     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
876     RepatchBuffer repatchBuffer(m_codeBlock);
877     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
878 }
879
880 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
881 {
882     // regT0 holds a JSCell*
883     ASSERT(count);
884     
885     JumpList bucketsOfFail;
886     
887     // Check eax is an object of the right Structure.
888     bucketsOfFail.append(checkStructure(regT0, structure));
889     
890     Structure* currStructure = structure;
891     WriteBarrier<Structure>* it = chain->head();
892     JSObject* protoObject = 0;
893     for (unsigned i = 0; i < count; ++i, ++it) {
894         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
895         currStructure = it->get();
896         testPrototype(protoObject, bucketsOfFail);
897     }
898     ASSERT(protoObject);
899     
900     bool needsStubLink = false;
901     if (slot.cachedPropertyType() == PropertySlot::Getter) {
902         needsStubLink = true;
903         compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
904         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
905         stubCall.addArgument(regT1);
906         stubCall.addArgument(regT0);
907         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
908         stubCall.call();
909     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
910         needsStubLink = true;
911         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
912         stubCall.addArgument(TrustedImmPtr(protoObject));
913         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
914         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
915         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
916         stubCall.call();
917     } else
918         compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
919     Jump success = jump();
920     
921     LinkBuffer patchBuffer(*m_globalData, this);
922     if (needsStubLink) {
923         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
924             if (iter->to)
925                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
926         }
927     }
928     // Use the patch information to link the failure cases back to the original slow case routine.
929     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
930     
931     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
932     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
933     
934     // Track the stub we have created so that it will be deleted later.
935     CodeRef stubRoutine = patchBuffer.finalizeCode();
936     stubInfo->stubRoutine = stubRoutine;
937     
938     // Finally patch the jump to slow case back in the hot path to jump here instead.
939     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
940     RepatchBuffer repatchBuffer(m_codeBlock);
941     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
942     
943     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
944     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
945 }
946
947 void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset)
948 {
949     ASSERT(sizeof(JSValue) == 8);
950     
951     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
952     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
953     loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
954 }
955
956 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
957 {
958     unsigned dst = currentInstruction[1].u.operand;
959     unsigned base = currentInstruction[2].u.operand;
960     unsigned property = currentInstruction[3].u.operand;
961     unsigned expected = currentInstruction[4].u.operand;
962     unsigned iter = currentInstruction[5].u.operand;
963     unsigned i = currentInstruction[6].u.operand;
964     
965     emitLoad2(property, regT1, regT0, base, regT3, regT2);
966     emitJumpSlowCaseIfNotJSCell(property, regT1);
967     addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
968     // Property registers are now available as the property is known
969     emitJumpSlowCaseIfNotJSCell(base, regT3);
970     emitLoadPayload(iter, regT1);
971     
972     // Test base's structure
973     loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
974     addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
975     load32(addressFor(i), regT3);
976     sub32(TrustedImm32(1), regT3);
977     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
978     compileGetDirectOffset(regT2, regT1, regT0, regT3);    
979     
980     emitStore(dst, regT1, regT0);
981     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
982 }
983
984 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
985 {
986     unsigned dst = currentInstruction[1].u.operand;
987     unsigned base = currentInstruction[2].u.operand;
988     unsigned property = currentInstruction[3].u.operand;
989     
990     linkSlowCaseIfNotJSCell(iter, property);
991     linkSlowCase(iter);
992     linkSlowCaseIfNotJSCell(iter, base);
993     linkSlowCase(iter);
994     linkSlowCase(iter);
995     
996     JITStubCall stubCall(this, cti_op_get_by_val);
997     stubCall.addArgument(base);
998     stubCall.addArgument(property);
999     stubCall.call(dst);
1000 }
1001
1002 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
1003 {
1004     int dst = currentInstruction[1].u.operand;
1005     int index = currentInstruction[2].u.operand;
1006     int skip = currentInstruction[3].u.operand;
1007
1008     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
1009     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1010     ASSERT(skip || !checkTopLevel);
1011     if (checkTopLevel && skip--) {
1012         Jump activationNotCreated;
1013         if (checkTopLevel)
1014             activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
1015         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1016         activationNotCreated.link(this);
1017     }
1018     while (skip--)
1019         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1020
1021     loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
1022     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1023
1024     emitLoad(index, regT1, regT0, regT2);
1025     emitStore(dst, regT1, regT0);
1026     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
1027 }
1028
1029 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
1030 {
1031     int index = currentInstruction[1].u.operand;
1032     int skip = currentInstruction[2].u.operand;
1033     int value = currentInstruction[3].u.operand;
1034
1035     emitLoad(value, regT1, regT0);
1036
1037     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
1038     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1039     ASSERT(skip || !checkTopLevel);
1040     if (checkTopLevel && skip--) {
1041         Jump activationNotCreated;
1042         if (checkTopLevel)
1043             activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
1044         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1045         activationNotCreated.link(this);
1046     }
1047     while (skip--)
1048         loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
1049     loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
1050
1051     emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
1052
1053     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1054     emitStore(index, regT1, regT0, regT2);
1055     map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
1056 }
1057
1058 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
1059 {
1060     int dst = currentInstruction[1].u.operand;
1061     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1062     ASSERT(globalObject->isGlobalObject());
1063     int index = currentInstruction[2].u.operand;
1064
1065     loadPtr(&globalObject->m_registers, regT2);
1066
1067     emitLoad(index, regT1, regT0, regT2);
1068     emitStore(dst, regT1, regT0);
1069     map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
1070 }
1071
1072 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
1073 {
1074     int index = currentInstruction[1].u.operand;
1075     int value = currentInstruction[2].u.operand;
1076
1077     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1078
1079     emitLoad(value, regT1, regT0);
1080     move(TrustedImmPtr(globalObject), regT2);
1081
1082     emitWriteBarrier(regT2, regT3, WriteBarrierForVariableAccess);
1083
1084     loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
1085     emitStore(index, regT1, regT0, regT2);
1086     map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
1087 }
1088
1089 void JIT::emitWriteBarrier(RegisterID owner, RegisterID scratch, WriteBarrierUseKind useKind)
1090 {
1091     UNUSED_PARAM(owner);
1092     UNUSED_PARAM(scratch);
1093     UNUSED_PARAM(useKind);
1094     ASSERT(owner != scratch);
1095     
1096 #if ENABLE(WRITE_BARRIER_PROFILING)
1097     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1098 #endif
1099 }
1100
1101 } // namespace JSC
1102
1103 #endif // USE(JSVALUE32_64)
1104 #endif // ENABLE(JIT)