tizen beta release
[profile/ivi/webkit-efl.git] / Source / JavaScriptCore / jit / JITPropertyAccess.cpp
1 /*
2  * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
24  */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "GetterSetter.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "JSPropertyNameIterator.h"
38 #include "Interpreter.h"
39 #include "LinkBuffer.h"
40 #include "RepatchBuffer.h"
41 #include "ResultType.h"
42 #include "SamplingTool.h"
43
44 #ifndef NDEBUG
45 #include <stdio.h>
46 #endif
47
48 using namespace std;
49
50 namespace JSC {
51 #if USE(JSVALUE64)
52
53 JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
54 {
55     JSInterfaceJIT jit;
56     JumpList failures;
57     failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
58
59     // Load string length to regT2, and start the process of loading the data pointer into regT0
60     jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
61     jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
62     failures.append(jit.branchTest32(Zero, regT0));
63
64     // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
65     failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
66     
67     // Load the character
68     JumpList is16Bit;
69     JumpList cont8Bit;
70     // Load the string flags
71     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
72     jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
73     is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
74     jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
75     cont8Bit.append(jit.jump());
76     is16Bit.link(&jit);
77     jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
78     cont8Bit.link(&jit);
79
80     failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
81     jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
82     jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
83     jit.ret();
84     
85     failures.link(&jit);
86     jit.move(TrustedImm32(0), regT0);
87     jit.ret();
88     
89     LinkBuffer patchBuffer(*globalData, &jit);
90     return patchBuffer.finalizeCode();
91 }
92
93 void JIT::emit_op_get_by_val(Instruction* currentInstruction)
94 {
95     unsigned dst = currentInstruction[1].u.operand;
96     unsigned base = currentInstruction[2].u.operand;
97     unsigned property = currentInstruction[3].u.operand;
98
99     emitGetVirtualRegisters(base, regT0, property, regT1);
100     emitJumpSlowCaseIfNotImmediateInteger(regT1);
101
102     // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
103     // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
104     // number was signed since m_vectorLength is always less than intmax (since the total allocation
105     // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
106     // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
107     // extending since it makes it easier to re-tag the value in the slow case.
108     zeroExtend32ToPtr(regT1, regT1);
109
110     emitJumpSlowCaseIfNotJSCell(regT0, base);
111     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
112
113     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
114     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
115
116     loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
117     addSlowCase(branchTestPtr(Zero, regT0));
118
119     emitValueProfilingSite(FirstProfilingSite);
120     emitPutVirtualRegister(dst);
121 }
122
123 void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
124 {
125     unsigned dst = currentInstruction[1].u.operand;
126     unsigned base = currentInstruction[2].u.operand;
127     unsigned property = currentInstruction[3].u.operand;
128     
129     linkSlowCase(iter); // property int32 check
130     linkSlowCaseIfNotJSCell(iter, base); // base cell check
131     Jump nonCell = jump();
132     linkSlowCase(iter); // base array check
133     Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
134     emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
135     Jump failed = branchTestPtr(Zero, regT0);
136     emitPutVirtualRegister(dst, regT0);
137     emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
138     failed.link(this);
139     notString.link(this);
140     nonCell.link(this);
141     
142     linkSlowCase(iter); // vector length check
143     linkSlowCase(iter); // empty value
144     
145     JITStubCall stubCall(this, cti_op_get_by_val);
146     stubCall.addArgument(base, regT2);
147     stubCall.addArgument(property, regT2);
148     stubCall.call(dst);
149
150     emitValueProfilingSite(SubsequentProfilingSite);
151 }
152
153 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
154 {
155     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), scratch);
156     loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
157 }
158
159 void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
160 {
161     unsigned dst = currentInstruction[1].u.operand;
162     unsigned base = currentInstruction[2].u.operand;
163     unsigned property = currentInstruction[3].u.operand;
164     unsigned expected = currentInstruction[4].u.operand;
165     unsigned iter = currentInstruction[5].u.operand;
166     unsigned i = currentInstruction[6].u.operand;
167
168     emitGetVirtualRegister(property, regT0);
169     addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
170     emitGetVirtualRegisters(base, regT0, iter, regT1);
171     emitJumpSlowCaseIfNotJSCell(regT0, base);
172
173     // Test base's structure
174     loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
175     addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
176     load32(addressFor(i), regT3);
177     sub32(TrustedImm32(1), regT3);
178     addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
179     compileGetDirectOffset(regT0, regT0, regT3, regT1);
180
181     emitPutVirtualRegister(dst, regT0);
182 }
183
184 void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
185 {
186     unsigned dst = currentInstruction[1].u.operand;
187     unsigned base = currentInstruction[2].u.operand;
188     unsigned property = currentInstruction[3].u.operand;
189
190     linkSlowCase(iter);
191     linkSlowCaseIfNotJSCell(iter, base);
192     linkSlowCase(iter);
193     linkSlowCase(iter);
194
195     JITStubCall stubCall(this, cti_op_get_by_val);
196     stubCall.addArgument(base, regT2);
197     stubCall.addArgument(property, regT2);
198     stubCall.call(dst);
199 }
200
201 void JIT::emit_op_put_by_val(Instruction* currentInstruction)
202 {
203     unsigned base = currentInstruction[1].u.operand;
204     unsigned property = currentInstruction[2].u.operand;
205     unsigned value = currentInstruction[3].u.operand;
206
207     emitGetVirtualRegisters(base, regT0, property, regT1);
208     emitJumpSlowCaseIfNotImmediateInteger(regT1);
209     // See comment in op_get_by_val.
210     zeroExtend32ToPtr(regT1, regT1);
211     emitJumpSlowCaseIfNotJSCell(regT0, base);
212     addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
213     addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
214
215     loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
216     Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
217
218     Label storeResult(this);
219     emitGetVirtualRegister(value, regT3);
220     storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
221     Jump end = jump();
222     
223     empty.link(this);
224     add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
225     branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
226
227     add32(TrustedImm32(1), regT1);
228     store32(regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
229     sub32(TrustedImm32(1), regT1);
230     jump().linkTo(storeResult, this);
231
232     end.link(this);
233
234     emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
235 }
236
237 void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
238 {
239     unsigned base = currentInstruction[1].u.operand;
240     unsigned property = currentInstruction[2].u.operand;
241     unsigned value = currentInstruction[3].u.operand;
242
243     linkSlowCase(iter); // property int32 check
244     linkSlowCaseIfNotJSCell(iter, base); // base cell check
245     linkSlowCase(iter); // base not array check
246     linkSlowCase(iter); // in vector check
247
248     JITStubCall stubPutByValCall(this, cti_op_put_by_val);
249     stubPutByValCall.addArgument(regT0);
250     stubPutByValCall.addArgument(property, regT2);
251     stubPutByValCall.addArgument(value, regT2);
252     stubPutByValCall.call();
253 }
254
255 void JIT::emit_op_put_by_index(Instruction* currentInstruction)
256 {
257     JITStubCall stubCall(this, cti_op_put_by_index);
258     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
259     stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
260     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
261     stubCall.call();
262 }
263
264 void JIT::emit_op_put_getter(Instruction* currentInstruction)
265 {
266     JITStubCall stubCall(this, cti_op_put_getter);
267     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
268     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
269     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
270     stubCall.call();
271 }
272
273 void JIT::emit_op_put_setter(Instruction* currentInstruction)
274 {
275     JITStubCall stubCall(this, cti_op_put_setter);
276     stubCall.addArgument(currentInstruction[1].u.operand, regT2);
277     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
278     stubCall.addArgument(currentInstruction[3].u.operand, regT2);
279     stubCall.call();
280 }
281
282 void JIT::emit_op_del_by_id(Instruction* currentInstruction)
283 {
284     JITStubCall stubCall(this, cti_op_del_by_id);
285     stubCall.addArgument(currentInstruction[2].u.operand, regT2);
286     stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
287     stubCall.call(currentInstruction[1].u.operand);
288 }
289
290 void JIT::emit_op_method_check(Instruction* currentInstruction)
291 {
292     // Assert that the following instruction is a get_by_id.
293     ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
294
295     currentInstruction += OPCODE_LENGTH(op_method_check);
296     unsigned resultVReg = currentInstruction[1].u.operand;
297     unsigned baseVReg = currentInstruction[2].u.operand;
298     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
299
300     emitGetVirtualRegister(baseVReg, regT0);
301
302     // Do the method check - check the object & its prototype's structure inline (this is the common case).
303     m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
304     MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
305
306     Jump notCell = emitJumpIfNotJSCell(regT0);
307
308     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
309
310     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
311     DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
312     Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
313
314     // This will be relinked to load the function without doing a load.
315     DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
316
317     END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
318
319     Jump match = jump();
320
321     ASSERT_JIT_OFFSET_UNUSED(protoObj, differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
322     ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
323     ASSERT_JIT_OFFSET_UNUSED(putFunction, differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
324
325     // Link the failure cases here.
326     notCell.link(this);
327     structureCheck.link(this);
328     protoStructureCheck.link(this);
329
330     // Do a regular(ish) get_by_id (the slow case will be link to
331     // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
332     compileGetByIdHotPath(baseVReg, ident);
333
334     match.link(this);
335     emitValueProfilingSite(FirstProfilingSite);
336     emitPutVirtualRegister(resultVReg);
337
338     // We've already generated the following get_by_id, so make sure it's skipped over.
339     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
340 }
341
342 void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
343 {
344     currentInstruction += OPCODE_LENGTH(op_method_check);
345     unsigned resultVReg = currentInstruction[1].u.operand;
346     unsigned baseVReg = currentInstruction[2].u.operand;
347     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
348
349     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
350     emitValueProfilingSite(SubsequentProfilingSite);
351
352     // We've already generated the following get_by_id, so make sure it's skipped over.
353     m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
354 }
355
356 void JIT::emit_op_get_by_id(Instruction* currentInstruction)
357 {
358     unsigned resultVReg = currentInstruction[1].u.operand;
359     unsigned baseVReg = currentInstruction[2].u.operand;
360     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
361
362     emitGetVirtualRegister(baseVReg, regT0);
363     compileGetByIdHotPath(baseVReg, ident);
364     emitValueProfilingSite(FirstProfilingSite);
365     emitPutVirtualRegister(resultVReg);
366 }
367
368 void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
369 {
370     // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
371     // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
372     // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
373     // to jump back to if one of these trampolies finds a match.
374
375     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
376
377     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
378
379     Label hotPathBegin(this);
380     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
381     m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
382     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
383
384     DataLabelPtr structureToCompare;
385     Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
386     addSlowCase(structureCheck);
387     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
388     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
389
390     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
391     DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
392     ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
393
394     Label putResult(this);
395
396     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
397
398     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
399 }
400
401 void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
402 {
403     unsigned resultVReg = currentInstruction[1].u.operand;
404     unsigned baseVReg = currentInstruction[2].u.operand;
405     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
406
407     compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
408     emitValueProfilingSite(SubsequentProfilingSite);
409 }
410
411 void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
412 {
413     // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
414     // so that we only need track one pointer into the slow case code - we track a pointer to the location
415     // of the call (which we can use to look up the patch information), but should a array-length or
416     // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
417     // the distance from the call to the head of the slow case.
418
419     linkSlowCaseIfNotJSCell(iter, baseVReg);
420     linkSlowCase(iter);
421
422     BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
423
424 #ifndef NDEBUG
425     Label coldPathBegin(this);
426 #endif
427     JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
428     stubCall.addArgument(regT0);
429     stubCall.addArgument(TrustedImmPtr(ident));
430     Call call = stubCall.call(resultVReg);
431
432     END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
433
434     ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
435
436     // Track the location of the call; this will be used to recover patch information.
437     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
438 }
439
440 void JIT::emit_op_put_by_id(Instruction* currentInstruction)
441 {
442     unsigned baseVReg = currentInstruction[1].u.operand;
443     unsigned valueVReg = currentInstruction[3].u.operand;
444
445     // In order to be able to patch both the Structure, and the object offset, we store one pointer,
446     // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
447     // such that the Structure & offset are always at the same distance from this.
448
449     emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
450
451     // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
452     emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
453
454     BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
455
456     Label hotPathBegin(this);
457     m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
458     m_propertyAccessCompilationInfo.last().bytecodeIndex = m_bytecodeOffset;
459     m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
460
461     // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
462     DataLabelPtr structureToCompare;
463     addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
464     ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
465
466     loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
467     DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
468
469     END_UNINTERRUPTED_SEQUENCE(sequencePutById);
470
471     emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
472
473     ASSERT_JIT_OFFSET_UNUSED(displacementLabel, differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
474 }
475
476 void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
477 {
478     unsigned baseVReg = currentInstruction[1].u.operand;
479     Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
480     unsigned direct = currentInstruction[8].u.operand;
481
482     linkSlowCaseIfNotJSCell(iter, baseVReg);
483     linkSlowCase(iter);
484
485     JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
486     stubCall.addArgument(regT0);
487     stubCall.addArgument(TrustedImmPtr(ident));
488     stubCall.addArgument(regT1);
489     Call call = stubCall.call();
490
491     // Track the location of the call; this will be used to recover patch information.
492     m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
493 }
494
495 // Compile a store into an object's property storage.  May overwrite the
496 // value in objectReg.
497 void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset)
498 {
499     int offset = cachedOffset * sizeof(JSValue);
500     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
501     storePtr(value, Address(base, offset));
502 }
503
504 // Compile a load from an object's property storage.  May overwrite base.
505 void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset)
506 {
507     int offset = cachedOffset * sizeof(JSValue);
508     loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), result);
509     loadPtr(Address(result, offset), result);
510 }
511
512 void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
513 {
514     loadPtr(base->addressOfPropertyStorage(), result);
515     loadPtr(Address(result, cachedOffset * sizeof(WriteBarrier<Unknown>)), result);
516 }
517
518 void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
519 {
520     JumpList failureCases;
521     // Check eax is an object of the right Structure.
522     failureCases.append(emitJumpIfNotJSCell(regT0));
523     failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
524     
525     testPrototype(oldStructure->storedPrototype(), failureCases);
526     
527     ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
528
529     // ecx = baseObject->m_structure
530     if (!direct) {
531         for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
532             ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
533             testPrototype((*it)->storedPrototype(), failureCases);
534         }
535     }
536
537     Call callTarget;
538
539     // emit a call only if storage realloc is needed
540     bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
541     if (willNeedStorageRealloc) {
542         // This trampoline was called to like a JIT stub; before we can can call again we need to
543         // remove the return address from the stack, to prevent the stack from becoming misaligned.
544         preserveReturnAddressAfterCall(regT3);
545  
546         JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
547         stubCall.skipArgument(); // base
548         stubCall.skipArgument(); // ident
549         stubCall.skipArgument(); // value
550         stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
551         stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
552         stubCall.call(regT0);
553         emitGetJITStubArg(2, regT1);
554
555         restoreReturnAddressBeforeReturn(regT3);
556     }
557
558     // Planting the new structure triggers the write barrier so we need
559     // an unconditional barrier here.
560     emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
561
562     storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
563     compilePutDirectOffset(regT0, regT1, cachedOffset);
564
565     ret();
566     
567     ASSERT(!failureCases.empty());
568     failureCases.link(this);
569     restoreArgumentReferenceForTrampoline();
570     Call failureCall = tailRecursiveCall();
571
572     LinkBuffer patchBuffer(*m_globalData, this);
573
574     patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
575
576     if (willNeedStorageRealloc) {
577         ASSERT(m_calls.size() == 1);
578         patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
579     }
580     
581     stubInfo->stubRoutine = patchBuffer.finalizeCode();
582     RepatchBuffer repatchBuffer(m_codeBlock);
583     repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
584 }
585
586 void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
587 {
588     RepatchBuffer repatchBuffer(codeBlock);
589
590     // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
591     // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
592     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
593
594     int offset = sizeof(JSValue) * cachedOffset;
595
596     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
597     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
598     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
599 }
600
601 void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
602 {
603     RepatchBuffer repatchBuffer(codeBlock);
604
605     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
606     // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
607     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
608
609     int offset = sizeof(JSValue) * cachedOffset;
610
611     // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
612     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
613     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
614 }
615
616 void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
617 {
618     StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
619
620     // Check eax is an array
621     Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
622
623     // Checks out okay! - get the length from the storage
624     loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
625     load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
626     Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
627
628     emitFastArithIntToImmNoCheck(regT2, regT0);
629     Jump success = jump();
630
631     LinkBuffer patchBuffer(*m_globalData, this);
632
633     // Use the patch information to link the failure cases back to the original slow case routine.
634     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
635     patchBuffer.link(failureCases1, slowCaseBegin);
636     patchBuffer.link(failureCases2, slowCaseBegin);
637
638     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
639     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
640
641     // Track the stub we have created so that it will be deleted later.
642     stubInfo->stubRoutine = patchBuffer.finalizeCode();
643
644     // Finally patch the jump to slow case back in the hot path to jump here instead.
645     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
646     RepatchBuffer repatchBuffer(m_codeBlock);
647     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
648
649     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
650     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
651 }
652
653 void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
654 {
655     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
656     // referencing the prototype object - let's speculatively load it's table nice and early!)
657     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
658
659     // Check eax is an object of the right Structure.
660     Jump failureCases1 = checkStructure(regT0, structure);
661
662     // Check the prototype object's Structure had not changed.
663     move(TrustedImmPtr(protoObject), regT3);
664     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
665
666     bool needsStubLink = false;
667     
668     // Checks out okay!
669     if (slot.cachedPropertyType() == PropertySlot::Getter) {
670         needsStubLink = true;
671         compileGetDirectOffset(protoObject, regT1, cachedOffset);
672         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
673         stubCall.addArgument(regT1);
674         stubCall.addArgument(regT0);
675         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
676         stubCall.call();
677     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
678         needsStubLink = true;
679         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
680         stubCall.addArgument(TrustedImmPtr(protoObject));
681         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
682         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
683         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
684         stubCall.call();
685     } else
686         compileGetDirectOffset(protoObject, regT0, cachedOffset);
687     Jump success = jump();
688     LinkBuffer patchBuffer(*m_globalData, this);
689
690     // Use the patch information to link the failure cases back to the original slow case routine.
691     CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
692     patchBuffer.link(failureCases1, slowCaseBegin);
693     patchBuffer.link(failureCases2, slowCaseBegin);
694
695     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
696     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
697
698     if (needsStubLink) {
699         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
700             if (iter->to)
701                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
702         }
703     }
704     // Track the stub we have created so that it will be deleted later.
705     stubInfo->stubRoutine = patchBuffer.finalizeCode();
706
707     // Finally patch the jump to slow case back in the hot path to jump here instead.
708     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
709     RepatchBuffer repatchBuffer(m_codeBlock);
710     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
711
712     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
713     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
714 }
715
716 void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
717 {
718     Jump failureCase = checkStructure(regT0, structure);
719     bool needsStubLink = false;
720     bool isDirect = false;
721     if (slot.cachedPropertyType() == PropertySlot::Getter) {
722         needsStubLink = true;
723         compileGetDirectOffset(regT0, regT1, cachedOffset);
724         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
725         stubCall.addArgument(regT1);
726         stubCall.addArgument(regT0);
727         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
728         stubCall.call();
729     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
730         needsStubLink = true;
731         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
732         stubCall.addArgument(regT0);
733         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
734         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
735         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
736         stubCall.call();
737     } else {
738         isDirect = true;
739         compileGetDirectOffset(regT0, regT0, cachedOffset);
740     }
741     Jump success = jump();
742
743     LinkBuffer patchBuffer(*m_globalData, this);
744
745     if (needsStubLink) {
746         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
747             if (iter->to)
748                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
749         }
750     }
751
752     // Use the patch information to link the failure cases back to the original slow case routine.
753     CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
754     if (!lastProtoBegin)
755         lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
756
757     patchBuffer.link(failureCase, lastProtoBegin);
758
759     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
760     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
761
762     MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
763
764     polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
765
766     // Finally patch the jump to slow case back in the hot path to jump here instead.
767     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
768     RepatchBuffer repatchBuffer(m_codeBlock);
769     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
770 }
771
772 void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
773 {
774     // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
775     // referencing the prototype object - let's speculatively load it's table nice and early!)
776     JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
777
778     // Check eax is an object of the right Structure.
779     Jump failureCases1 = checkStructure(regT0, structure);
780
781     // Check the prototype object's Structure had not changed.
782     move(TrustedImmPtr(protoObject), regT3);
783     Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
784
785     // Checks out okay!
786     bool needsStubLink = false;
787     bool isDirect = false;
788     if (slot.cachedPropertyType() == PropertySlot::Getter) {
789         needsStubLink = true;
790         compileGetDirectOffset(protoObject, regT1, cachedOffset);
791         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
792         stubCall.addArgument(regT1);
793         stubCall.addArgument(regT0);
794         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
795         stubCall.call();
796     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
797         needsStubLink = true;
798         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
799         stubCall.addArgument(TrustedImmPtr(protoObject));
800         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
801         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
802         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
803         stubCall.call();
804     } else {
805         isDirect = true;
806         compileGetDirectOffset(protoObject, regT0, cachedOffset);
807     }
808
809     Jump success = jump();
810
811     LinkBuffer patchBuffer(*m_globalData, this);
812
813     if (needsStubLink) {
814         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
815             if (iter->to)
816                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
817         }
818     }
819
820     // Use the patch information to link the failure cases back to the original slow case routine.
821     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
822     patchBuffer.link(failureCases1, lastProtoBegin);
823     patchBuffer.link(failureCases2, lastProtoBegin);
824
825     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
826     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
827
828     MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
829     prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
830
831     // Finally patch the jump to slow case back in the hot path to jump here instead.
832     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
833     RepatchBuffer repatchBuffer(m_codeBlock);
834     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
835 }
836
837 void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
838 {
839     ASSERT(count);
840     JumpList bucketsOfFail;
841
842     // Check eax is an object of the right Structure.
843     Jump baseObjectCheck = checkStructure(regT0, structure);
844     bucketsOfFail.append(baseObjectCheck);
845
846     Structure* currStructure = structure;
847     WriteBarrier<Structure>* it = chain->head();
848     JSObject* protoObject = 0;
849     for (unsigned i = 0; i < count; ++i, ++it) {
850         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
851         currStructure = it->get();
852         testPrototype(protoObject, bucketsOfFail);
853     }
854     ASSERT(protoObject);
855     
856     bool needsStubLink = false;
857     bool isDirect = false;
858     if (slot.cachedPropertyType() == PropertySlot::Getter) {
859         needsStubLink = true;
860         compileGetDirectOffset(protoObject, regT1, cachedOffset);
861         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
862         stubCall.addArgument(regT1);
863         stubCall.addArgument(regT0);
864         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
865         stubCall.call();
866     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
867         needsStubLink = true;
868         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
869         stubCall.addArgument(TrustedImmPtr(protoObject));
870         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
871         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
872         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
873         stubCall.call();
874     } else {
875         isDirect = true;
876         compileGetDirectOffset(protoObject, regT0, cachedOffset);
877     }
878     Jump success = jump();
879
880     LinkBuffer patchBuffer(*m_globalData, this);
881     
882     if (needsStubLink) {
883         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
884             if (iter->to)
885                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
886         }
887     }
888
889     // Use the patch information to link the failure cases back to the original slow case routine.
890     CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
891
892     patchBuffer.link(bucketsOfFail, lastProtoBegin);
893
894     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
895     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
896
897     CodeRef stubRoutine = patchBuffer.finalizeCode();
898
899     // Track the stub we have created so that it will be deleted later.
900     prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
901
902     // Finally patch the jump to slow case back in the hot path to jump here instead.
903     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
904     RepatchBuffer repatchBuffer(m_codeBlock);
905     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
906 }
907
908 void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
909 {
910     ASSERT(count);
911
912     JumpList bucketsOfFail;
913
914     // Check eax is an object of the right Structure.
915     bucketsOfFail.append(checkStructure(regT0, structure));
916
917     Structure* currStructure = structure;
918     WriteBarrier<Structure>* it = chain->head();
919     JSObject* protoObject = 0;
920     for (unsigned i = 0; i < count; ++i, ++it) {
921         protoObject = asObject(currStructure->prototypeForLookup(callFrame));
922         currStructure = it->get();
923         testPrototype(protoObject, bucketsOfFail);
924     }
925     ASSERT(protoObject);
926
927     bool needsStubLink = false;
928     if (slot.cachedPropertyType() == PropertySlot::Getter) {
929         needsStubLink = true;
930         compileGetDirectOffset(protoObject, regT1, cachedOffset);
931         JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
932         stubCall.addArgument(regT1);
933         stubCall.addArgument(regT0);
934         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
935         stubCall.call();
936     } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
937         needsStubLink = true;
938         JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
939         stubCall.addArgument(TrustedImmPtr(protoObject));
940         stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
941         stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
942         stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
943         stubCall.call();
944     } else
945         compileGetDirectOffset(protoObject, regT0, cachedOffset);
946     Jump success = jump();
947
948     LinkBuffer patchBuffer(*m_globalData, this);
949
950     if (needsStubLink) {
951         for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
952             if (iter->to)
953                 patchBuffer.link(iter->from, FunctionPtr(iter->to));
954         }
955     }
956
957     // Use the patch information to link the failure cases back to the original slow case routine.
958     patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
959
960     // On success return back to the hot patch code, at a point it will perform the store to dest for us.
961     patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
962
963     // Track the stub we have created so that it will be deleted later.
964     CodeRef stubRoutine = patchBuffer.finalizeCode();
965     stubInfo->stubRoutine = stubRoutine;
966
967     // Finally patch the jump to slow case back in the hot path to jump here instead.
968     CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
969     RepatchBuffer repatchBuffer(m_codeBlock);
970     repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
971
972     // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
973     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
974 }
975
976 void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
977 {
978     int skip = currentInstruction[3].u.operand;
979
980     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
981     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
982     ASSERT(skip || !checkTopLevel);
983     if (checkTopLevel && skip--) {
984         Jump activationNotCreated;
985         if (checkTopLevel)
986             activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
987         loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
988         activationNotCreated.link(this);
989     }
990     while (skip--)
991         loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
992
993     loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
994     loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
995     loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
996     emitValueProfilingSite(FirstProfilingSite);
997     emitPutVirtualRegister(currentInstruction[1].u.operand);
998 }
999
1000 void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
1001 {
1002     int skip = currentInstruction[2].u.operand;
1003
1004     emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
1005
1006     emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
1007     bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
1008     ASSERT(skip || !checkTopLevel);
1009     if (checkTopLevel && skip--) {
1010         Jump activationNotCreated;
1011         if (checkTopLevel)
1012             activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
1013         loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
1014         activationNotCreated.link(this);
1015     }
1016     while (skip--)
1017         loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
1018     loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
1019
1020     emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
1021
1022     loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
1023     storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
1024 }
1025
1026 void JIT::emit_op_get_global_var(Instruction* currentInstruction)
1027 {
1028     JSVariableObject* globalObject = m_codeBlock->globalObject();
1029     loadPtr(&globalObject->m_registers, regT0);
1030     loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
1031     emitValueProfilingSite(FirstProfilingSite);
1032     emitPutVirtualRegister(currentInstruction[1].u.operand);
1033 }
1034
1035 void JIT::emit_op_put_global_var(Instruction* currentInstruction)
1036 {
1037     JSGlobalObject* globalObject = m_codeBlock->globalObject();
1038
1039     emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
1040
1041     move(TrustedImmPtr(globalObject), regT1);
1042     loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
1043     storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
1044     emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
1045 }
1046
1047 void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
1048 {
1049     repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
1050     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), reinterpret_cast<void*>(-1));
1051     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetGetByIdPropertyMapOffset), 0);
1052     repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase), stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
1053 }
1054
1055 void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
1056 {
1057     if (isDirectPutById(stubInfo))
1058         repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
1059     else
1060         repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
1061     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), reinterpret_cast<void*>(-1));
1062     repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(patchOffsetPutByIdPropertyMapOffset), 0);
1063 }
1064
1065 #endif // USE(JSVALUE64)
1066
1067 void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
1068 {
1069     UNUSED_PARAM(owner);
1070     UNUSED_PARAM(scratch);
1071     UNUSED_PARAM(scratch2);
1072     UNUSED_PARAM(useKind);
1073     UNUSED_PARAM(value);
1074     UNUSED_PARAM(mode);
1075     ASSERT(owner != scratch);
1076     ASSERT(owner != scratch2);
1077     
1078 #if ENABLE(WRITE_BARRIER_PROFILING)
1079     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1080 #endif
1081     
1082 #if ENABLE(GGC)
1083     Jump filterCells;
1084     if (mode == ShouldFilterImmediates)
1085         filterCells = emitJumpIfNotJSCell(value);
1086     move(owner, scratch);
1087     andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
1088     move(owner, scratch2);
1089     // consume additional 8 bits as we're using an approximate filter
1090     rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
1091     andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
1092     Jump filter = branchTest8(Zero, BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
1093     move(owner, scratch2);
1094     rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
1095     andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
1096     store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
1097     filter.link(this);
1098     if (mode == ShouldFilterImmediates)
1099         filterCells.link(this);
1100 #endif
1101 }
1102
1103 void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
1104 {
1105     UNUSED_PARAM(owner);
1106     UNUSED_PARAM(scratch);
1107     UNUSED_PARAM(useKind);
1108     UNUSED_PARAM(value);
1109     UNUSED_PARAM(mode);
1110     
1111 #if ENABLE(WRITE_BARRIER_PROFILING)
1112     emitCount(WriteBarrierCounters::jitCounterFor(useKind));
1113 #endif
1114     
1115 #if ENABLE(GGC)
1116     Jump filterCells;
1117     if (mode == ShouldFilterImmediates)
1118         filterCells = emitJumpIfNotJSCell(value);
1119     uint8_t* cardAddress = Heap::addressOfCardFor(owner);
1120     move(TrustedImmPtr(cardAddress), scratch);
1121     store8(TrustedImm32(1), Address(scratch));
1122     if (mode == ShouldFilterImmediates)
1123         filterCells.link(this);
1124 #endif
1125 }
1126
1127 void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
1128 {
1129     if (prototype.isNull())
1130         return;
1131
1132     ASSERT(prototype.isCell());
1133     move(TrustedImmPtr(prototype.asCell()), regT3);
1134     failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
1135 }
1136
1137 void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
1138 {
1139     RepatchBuffer repatchBuffer(codeBlock);
1140     
1141     CodeLocationDataLabelPtr structureLocation = methodCallLinkInfo.cachedStructure.location();
1142     methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure);
1143     
1144     Structure* prototypeStructure = proto->structure();
1145     methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), codeBlock->ownerExecutable(), prototypeStructure);
1146     methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), codeBlock->ownerExecutable(), proto);
1147     methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), codeBlock->ownerExecutable(), callee);
1148     repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update));
1149 }
1150
1151 bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
1152 {
1153     switch (stubInfo->accessType) {
1154     case access_put_by_id_transition_normal:
1155         return false;
1156     case access_put_by_id_transition_direct:
1157         return true;
1158     case access_put_by_id_replace:
1159     case access_put_by_id_generic: {
1160         void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
1161         if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
1162             || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
1163             || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
1164             return true;
1165         ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
1166                || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
1167                || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
1168         return false;
1169     }
1170     default:
1171         ASSERT_NOT_REACHED();
1172         return false;
1173     }
1174 }
1175
1176 } // namespace JSC
1177
1178 #endif // ENABLE(JIT)