1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/codegen.h"
10 #include "src/ic/ic.h"
11 #include "src/ic/stub-cache.h"
17 // ----------------------------------------------------------------------------
18 // Static IC stub generators.
21 #define __ ACCESS_MASM(masm)
24 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
25 Label* global_object) {
27 // type: holds the receiver instance type on entry.
28 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
29 __ b(eq, global_object);
30 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
31 __ b(eq, global_object);
32 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
33 __ b(eq, global_object);
37 // Helper function used from LoadIC GenerateNormal.
39 // elements: Property dictionary. It is not clobbered if a jump to the miss
41 // name: Property name. It is not clobbered if a jump to the miss label is
43 // result: Register for the result. It is only updated if a jump to the miss
44 // label is not done. Can be the same as elements or name clobbering
45 // one of these in the case of not jumping to the miss label.
46 // The two scratch registers need to be different from elements, name and
48 // The generated code assumes that the receiver has slow properties,
49 // is not a global object and does not have interceptors.
50 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
51 Register elements, Register name,
52 Register result, Register scratch1,
54 // Main use of the scratch registers.
55 // scratch1: Used as temporary and to hold the capacity of the property
57 // scratch2: Used as temporary.
60 // Probe the dictionary.
61 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
62 name, scratch1, scratch2);
64 // If probing finds an entry check that the value is a normal
66 __ bind(&done); // scratch2 == elements + 4 * index
67 const int kElementsStartOffset =
68 NameDictionary::kHeaderSize +
69 NameDictionary::kElementsStartIndex * kPointerSize;
70 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
71 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
72 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
75 // Get the value at the masked, scaled index and return.
77 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
81 // Helper function used from StoreIC::GenerateNormal.
83 // elements: Property dictionary. It is not clobbered if a jump to the miss
85 // name: Property name. It is not clobbered if a jump to the miss label is
87 // value: The value to store.
88 // The two scratch registers need to be different from elements, name and
90 // The generated code assumes that the receiver has slow properties,
91 // is not a global object and does not have interceptors.
92 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93 Register elements, Register name,
94 Register value, Register scratch1,
96 // Main use of the scratch registers.
97 // scratch1: Used as temporary and to hold the capacity of the property
99 // scratch2: Used as temporary.
102 // Probe the dictionary.
103 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104 name, scratch1, scratch2);
106 // If probing finds an entry in the dictionary check that the value
107 // is a normal property that is not read only.
108 __ bind(&done); // scratch2 == elements + 4 * index
109 const int kElementsStartOffset =
110 NameDictionary::kHeaderSize +
111 NameDictionary::kElementsStartIndex * kPointerSize;
112 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113 const int kTypeAndReadOnlyMask =
114 (PropertyDetails::TypeField::kMask |
115 PropertyDetails::AttributesField::encode(READ_ONLY))
117 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
118 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
121 // Store the value at the masked, scaled index and return.
122 const int kValueOffset = kElementsStartOffset + kPointerSize;
123 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
124 __ str(value, MemOperand(scratch2));
126 // Update the write barrier. Make sure not to clobber the value.
127 __ mov(scratch1, value);
128 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
133 // Checks the receiver for special cases (value type, slow case bits).
134 // Falls through for regular JS object.
135 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
136 Register receiver, Register map,
138 int interceptor_bit, Label* slow) {
139 // Check that the object isn't a smi.
140 __ JumpIfSmi(receiver, slow);
141 // Get the map of the receiver.
142 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
144 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
146 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
148 // Check that the object is some kind of JS object EXCEPT JS Value type.
149 // In the case that the object is a value-wrapper object,
150 // we enter the runtime system to make sure that indexing into string
151 // objects work as intended.
152 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
153 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
154 __ cmp(scratch, Operand(JS_OBJECT_TYPE));
159 // Loads an indexed element from a fast case array.
160 // If not_fast_array is NULL, doesn't perform the elements map check.
161 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
162 Register key, Register elements,
163 Register scratch1, Register scratch2,
164 Register result, Label* not_fast_array,
165 Label* out_of_range) {
168 // receiver - holds the receiver on entry.
169 // Unchanged unless 'result' is the same register.
171 // key - holds the smi key on entry.
172 // Unchanged unless 'result' is the same register.
174 // elements - holds the elements of the receiver on exit.
176 // result - holds the result on exit if the load succeeded.
177 // Allowed to be the the same as 'receiver' or 'key'.
178 // Unchanged on bailout so 'receiver' and 'key' can be safely
179 // used by further computation.
181 // Scratch registers:
183 // scratch1 - used to hold elements map and elements length.
184 // Holds the elements map if not_fast_array branch is taken.
186 // scratch2 - used to hold the loaded value.
188 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
189 if (not_fast_array != NULL) {
190 // Check that the object is in fast mode and writable.
191 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
192 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
193 __ cmp(scratch1, ip);
194 __ b(ne, not_fast_array);
196 __ AssertFastElements(elements);
198 // Check that the key (index) is within bounds.
199 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
200 __ cmp(key, Operand(scratch1));
201 __ b(hs, out_of_range);
202 // Fast case: Do the load.
203 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
204 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
205 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
206 __ cmp(scratch2, ip);
207 // In case the loaded value is the_hole we have to consult GetProperty
208 // to ensure the prototype chain is searched.
209 __ b(eq, out_of_range);
210 __ mov(result, scratch2);
214 // Checks whether a key is an array index string or a unique name.
215 // Falls through if a key is a unique name.
216 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
217 Register map, Register hash,
218 Label* index_string, Label* not_unique) {
219 // The key is not a smi.
222 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
223 __ b(hi, not_unique);
224 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
227 // Is the string an array index, with cached numeric value?
228 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
229 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
230 __ b(eq, index_string);
232 // Is the string internalized? We know it's a string, so a single
233 // bit test is enough.
235 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
236 STATIC_ASSERT(kInternalizedTag == 0);
237 __ tst(hash, Operand(kIsNotInternalizedMask));
238 __ b(ne, not_unique);
244 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
245 // The return address is in lr.
246 Register receiver = ReceiverRegister();
247 Register name = NameRegister();
248 DCHECK(receiver.is(r1));
251 // Probe the stub cache.
252 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
253 Code::ComputeHandlerFlags(Code::LOAD_IC));
254 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3,
257 // Cache miss: Jump to runtime.
262 void LoadIC::GenerateNormal(MacroAssembler* masm) {
263 Register dictionary = r0;
264 DCHECK(!dictionary.is(ReceiverRegister()));
265 DCHECK(!dictionary.is(NameRegister()));
270 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
271 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4);
274 // Dictionary load failed, go slow (but don't miss).
276 GenerateRuntimeGetProperty(masm);
280 // A register that isn't one of the parameters to the load ic.
281 static const Register LoadIC_TempRegister() { return r3; }
284 void LoadIC::GenerateMiss(MacroAssembler* masm) {
285 // The return address is in lr.
286 Isolate* isolate = masm->isolate();
288 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
290 __ mov(LoadIC_TempRegister(), ReceiverRegister());
291 __ Push(LoadIC_TempRegister(), NameRegister());
293 // Perform tail call to the entry.
294 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
295 __ TailCallExternalReference(ref, 2, 1);
299 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
300 // The return address is in lr.
302 __ mov(LoadIC_TempRegister(), ReceiverRegister());
303 __ Push(LoadIC_TempRegister(), NameRegister());
305 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
309 static MemOperand GenerateMappedArgumentsLookup(
310 MacroAssembler* masm, Register object, Register key, Register scratch1,
311 Register scratch2, Register scratch3, Label* unmapped_case,
313 Heap* heap = masm->isolate()->heap();
315 // Check that the receiver is a JSObject. Because of the map check
316 // later, we do not need to check for interceptors or whether it
317 // requires access checks.
318 __ JumpIfSmi(object, slow_case);
319 // Check that the object is some kind of JSObject.
320 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
323 // Check that the key is a positive smi.
324 __ tst(key, Operand(0x80000001));
327 // Load the elements into scratch1 and check its map.
328 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
329 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
330 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
332 // Check if element is in the range of mapped arguments. If not, jump
333 // to the unmapped lookup with the parameter map in scratch1.
334 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
335 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
336 __ cmp(key, Operand(scratch2));
337 __ b(cs, unmapped_case);
339 // Load element index and check whether it is the hole.
341 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
343 __ mov(scratch3, Operand(kPointerSize >> 1));
344 __ mul(scratch3, key, scratch3);
345 __ add(scratch3, scratch3, Operand(kOffset));
347 __ ldr(scratch2, MemOperand(scratch1, scratch3));
348 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
349 __ cmp(scratch2, scratch3);
350 __ b(eq, unmapped_case);
352 // Load value from context and return it. We can reuse scratch1 because
353 // we do not jump to the unmapped lookup (which requires the parameter
355 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
356 __ mov(scratch3, Operand(kPointerSize >> 1));
357 __ mul(scratch3, scratch2, scratch3);
358 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
359 return MemOperand(scratch1, scratch3);
363 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
365 Register parameter_map,
368 // Element is in arguments backing store, which is referenced by the
369 // second element of the parameter_map. The parameter_map register
370 // must be loaded with the parameter map of the arguments object and is
372 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
373 Register backing_store = parameter_map;
374 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
375 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
376 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
378 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
379 __ cmp(key, Operand(scratch));
381 __ mov(scratch, Operand(kPointerSize >> 1));
382 __ mul(scratch, key, scratch);
383 __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
384 return MemOperand(backing_store, scratch);
388 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
389 // The return address is in lr.
390 Register receiver = ReceiverRegister();
391 Register key = NameRegister();
392 DCHECK(receiver.is(r1));
396 MemOperand mapped_location = GenerateMappedArgumentsLookup(
397 masm, receiver, key, r0, r3, r4, ¬in, &slow);
398 __ ldr(r0, mapped_location);
401 // The unmapped lookup expects that the parameter map is in r0.
402 MemOperand unmapped_location =
403 GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
404 __ ldr(r0, unmapped_location);
405 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
414 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
415 Register receiver = ReceiverRegister();
416 Register key = NameRegister();
417 Register value = ValueRegister();
418 DCHECK(receiver.is(r1));
420 DCHECK(value.is(r0));
423 MemOperand mapped_location = GenerateMappedArgumentsLookup(
424 masm, receiver, key, r3, r4, r5, ¬in, &slow);
425 __ str(value, mapped_location);
428 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
431 // The unmapped lookup expects that the parameter map is in r3.
432 MemOperand unmapped_location =
433 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
434 __ str(value, unmapped_location);
437 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
444 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
445 // The return address is in lr.
446 Isolate* isolate = masm->isolate();
448 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
450 __ Push(ReceiverRegister(), NameRegister());
452 // Perform tail call to the entry.
453 ExternalReference ref =
454 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
456 __ TailCallExternalReference(ref, 2, 1);
460 // IC register specifications
461 const Register LoadIC::ReceiverRegister() { return r1; }
462 const Register LoadIC::NameRegister() { return r2; }
465 const Register LoadIC::SlotRegister() {
466 DCHECK(FLAG_vector_ics);
471 const Register LoadIC::VectorRegister() {
472 DCHECK(FLAG_vector_ics);
477 const Register StoreIC::ReceiverRegister() { return r1; }
478 const Register StoreIC::NameRegister() { return r2; }
479 const Register StoreIC::ValueRegister() { return r0; }
482 const Register KeyedStoreIC::MapRegister() { return r3; }
485 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
486 // The return address is in lr.
488 __ Push(ReceiverRegister(), NameRegister());
490 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
494 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
495 // The return address is in lr.
496 Label slow, check_name, index_smi, index_name, property_array_property;
497 Label probe_dictionary, check_number_dictionary;
499 Register key = NameRegister();
500 Register receiver = ReceiverRegister();
502 DCHECK(receiver.is(r1));
504 Isolate* isolate = masm->isolate();
506 // Check that the key is a smi.
507 __ JumpIfNotSmi(key, &check_name);
509 // Now the key is known to be a smi. This place is also jumped to from below
510 // where a numeric string is converted to a smi.
512 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
513 Map::kHasIndexedInterceptor, &slow);
515 // Check the receiver's map to see if it has fast elements.
516 __ CheckFastElements(r0, r3, &check_number_dictionary);
518 GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
519 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
522 __ bind(&check_number_dictionary);
523 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
524 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
526 // Check whether the elements is a number dictionary.
529 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
532 __ SmiUntag(r0, key);
533 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
536 // Slow case, key and receiver still in r2 and r1.
538 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
540 GenerateRuntimeGetProperty(masm);
542 __ bind(&check_name);
543 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
545 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
546 Map::kHasNamedInterceptor, &slow);
548 // If the receiver is a fast-case object, check the keyed lookup
549 // cache. Otherwise probe the dictionary.
550 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
551 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
552 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
554 __ b(eq, &probe_dictionary);
556 // Load the map of the receiver, compute the keyed lookup cache hash
557 // based on 32 bits of the map pointer and the name hash.
558 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
559 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
560 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
561 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
562 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
563 __ And(r3, r3, Operand(mask));
565 // Load the key (consisting of map and unique name) from the cache and
567 Label load_in_object_property;
568 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
569 Label hit_on_nth_entry[kEntriesPerBucket];
570 ExternalReference cache_keys =
571 ExternalReference::keyed_lookup_cache_keys(isolate);
573 __ mov(r4, Operand(cache_keys));
574 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
576 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
577 Label try_next_entry;
578 // Load map and move r4 to next entry.
579 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
581 __ b(ne, &try_next_entry);
582 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name
584 __ b(eq, &hit_on_nth_entry[i]);
585 __ bind(&try_next_entry);
588 // Last entry: Load map and move r4 to name.
589 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
592 __ ldr(r5, MemOperand(r4));
597 // r0 : receiver's map
598 // r3 : lookup cache index
599 ExternalReference cache_field_offsets =
600 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
603 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
604 __ bind(&hit_on_nth_entry[i]);
605 __ mov(r4, Operand(cache_field_offsets));
607 __ add(r3, r3, Operand(i));
609 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
610 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
611 __ sub(r5, r5, r6, SetCC);
612 __ b(ge, &property_array_property);
614 __ jmp(&load_in_object_property);
618 // Load in-object property.
619 __ bind(&load_in_object_property);
620 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
621 __ add(r6, r6, r5); // Index from start of object.
622 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag.
623 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
624 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
628 // Load property array property.
629 __ bind(&property_array_property);
630 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
631 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
632 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
633 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
637 // Do a quick inline probe of the receiver's dictionary, if it
639 __ bind(&probe_dictionary);
641 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
642 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
643 GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
644 // Load the property to r0.
645 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
646 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
650 __ bind(&index_name);
651 __ IndexFromHash(r3, key);
652 // Now jump to the place where smi keys are handled.
657 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
658 // Return address is in lr.
661 Register receiver = ReceiverRegister();
662 Register index = NameRegister();
663 Register scratch = r3;
664 Register result = r0;
665 DCHECK(!scratch.is(receiver) && !scratch.is(index));
667 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
668 &miss, // When not a string.
669 &miss, // When not a number.
670 &miss, // When index out of range.
671 STRING_INDEX_IS_ARRAY_INDEX);
672 char_at_generator.GenerateFast(masm);
675 StubRuntimeCallHelper call_helper;
676 char_at_generator.GenerateSlow(masm, call_helper);
683 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
684 // Return address is in lr.
687 Register receiver = ReceiverRegister();
688 Register key = NameRegister();
689 Register scratch1 = r3;
690 Register scratch2 = r4;
691 DCHECK(!scratch1.is(receiver) && !scratch1.is(key));
692 DCHECK(!scratch2.is(receiver) && !scratch2.is(key));
694 // Check that the receiver isn't a smi.
695 __ JumpIfSmi(receiver, &slow);
697 // Check that the key is an array index, that is Uint32.
698 __ NonNegativeSmiTst(key);
701 // Get the map of the receiver.
702 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
704 // Check that it has indexed interceptor and access checks
705 // are not enabled for this object.
706 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset));
707 __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask));
708 __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor));
711 // Everything is fine, call runtime.
712 __ Push(receiver, key); // Receiver, key.
714 // Perform tail call to the entry.
715 __ TailCallExternalReference(
716 ExternalReference(IC_Utility(kLoadElementWithInterceptor),
725 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
726 // Push receiver, key and value for runtime call.
727 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
729 ExternalReference ref =
730 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
731 __ TailCallExternalReference(ref, 3, 1);
735 void StoreIC::GenerateSlow(MacroAssembler* masm) {
736 // Push receiver, key and value for runtime call.
737 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
739 // The slow case calls into the runtime to complete the store without causing
740 // an IC miss that would otherwise cause a transition to the generic stub.
741 ExternalReference ref =
742 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
743 __ TailCallExternalReference(ref, 3, 1);
747 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
748 // Push receiver, key and value for runtime call.
749 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
751 // The slow case calls into the runtime to complete the store without causing
752 // an IC miss that would otherwise cause a transition to the generic stub.
753 ExternalReference ref =
754 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
755 __ TailCallExternalReference(ref, 3, 1);
759 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
760 StrictMode strict_mode) {
761 // Push receiver, key and value for runtime call.
762 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
764 __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
767 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
771 static void KeyedStoreGenerateGenericHelper(
772 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
773 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
774 Register value, Register key, Register receiver, Register receiver_map,
775 Register elements_map, Register elements) {
776 Label transition_smi_elements;
777 Label finish_object_store, non_double_value, transition_double_elements;
778 Label fast_double_without_map_check;
780 // Fast case: Do the store, could be either Object or double.
781 __ bind(fast_object);
782 Register scratch_value = r4;
783 Register address = r5;
784 if (check_map == kCheckMap) {
785 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
787 Operand(masm->isolate()->factory()->fixed_array_map()));
788 __ b(ne, fast_double);
791 // HOLECHECK: guards "A[i] = V"
792 // We have to go to the runtime if the current value is the hole because
793 // there may be a callback on the element
794 Label holecheck_passed1;
795 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
796 __ ldr(scratch_value,
797 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
798 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
799 __ b(ne, &holecheck_passed1);
800 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
803 __ bind(&holecheck_passed1);
805 // Smi stores don't require further checks.
807 __ JumpIfNotSmi(value, &non_smi_value);
809 if (increment_length == kIncrementLength) {
810 // Add 1 to receiver->length.
811 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
812 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
814 // It's irrelevant whether array is smi-only or not when writing a smi.
815 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
816 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
819 __ bind(&non_smi_value);
820 // Escape to elements kind transition case.
821 __ CheckFastObjectElements(receiver_map, scratch_value,
822 &transition_smi_elements);
824 // Fast elements array, store the value to the elements backing store.
825 __ bind(&finish_object_store);
826 if (increment_length == kIncrementLength) {
827 // Add 1 to receiver->length.
828 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
829 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
831 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
832 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
833 __ str(value, MemOperand(address));
834 // Update write barrier for the elements array address.
835 __ mov(scratch_value, value); // Preserve the value which is returned.
836 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
837 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
840 __ bind(fast_double);
841 if (check_map == kCheckMap) {
842 // Check for fast double array case. If this fails, call through to the
844 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
848 // HOLECHECK: guards "A[i] double hole?"
849 // We have to see if the double version of the hole is present. If so
850 // go to the runtime.
851 __ add(address, elements,
852 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
854 __ ldr(scratch_value,
855 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
856 __ cmp(scratch_value, Operand(kHoleNanUpper32));
857 __ b(ne, &fast_double_without_map_check);
858 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
861 __ bind(&fast_double_without_map_check);
862 __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
863 &transition_double_elements);
864 if (increment_length == kIncrementLength) {
865 // Add 1 to receiver->length.
866 __ add(scratch_value, key, Operand(Smi::FromInt(1)));
867 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
871 __ bind(&transition_smi_elements);
872 // Transition the array appropriately depending on the value type.
873 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
874 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
875 __ b(ne, &non_double_value);
877 // Value is a double. Transition FAST_SMI_ELEMENTS ->
878 // FAST_DOUBLE_ELEMENTS and complete the store.
879 __ LoadTransitionedArrayMapConditional(
880 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
881 AllocationSiteMode mode =
882 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
883 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
884 receiver_map, mode, slow);
885 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
886 __ jmp(&fast_double_without_map_check);
888 __ bind(&non_double_value);
889 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
890 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
891 receiver_map, r4, slow);
892 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
893 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
894 masm, receiver, key, value, receiver_map, mode, slow);
895 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
896 __ jmp(&finish_object_store);
898 __ bind(&transition_double_elements);
899 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
900 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
901 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
902 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
903 receiver_map, r4, slow);
904 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
905 ElementsTransitionGenerator::GenerateDoubleToObject(
906 masm, receiver, key, value, receiver_map, mode, slow);
907 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
908 __ jmp(&finish_object_store);
912 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
913 StrictMode strict_mode) {
914 // ---------- S t a t e --------------
918 // -- lr : return address
919 // -----------------------------------
920 Label slow, fast_object, fast_object_grow;
921 Label fast_double, fast_double_grow;
922 Label array, extra, check_if_double_array;
925 Register value = ValueRegister();
926 Register key = NameRegister();
927 Register receiver = ReceiverRegister();
928 DCHECK(receiver.is(r1));
930 DCHECK(value.is(r0));
931 Register receiver_map = r3;
932 Register elements_map = r6;
933 Register elements = r9; // Elements array of the receiver.
934 // r4 and r5 are used as general scratch registers.
936 // Check that the key is a smi.
937 __ JumpIfNotSmi(key, &slow);
938 // Check that the object isn't a smi.
939 __ JumpIfSmi(receiver, &slow);
940 // Get the map of the object.
941 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
942 // Check that the receiver does not require access checks and is not observed.
943 // The generic stub does not perform map checks or handle observed objects.
944 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
945 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
947 // Check if the object is a JS array or not.
948 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
949 __ cmp(r4, Operand(JS_ARRAY_TYPE));
951 // Check that the object is some kind of JSObject.
952 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
955 // Object case: Check key against length in the elements array.
956 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
957 // Check array bounds. Both the key and the length of FixedArray are smis.
958 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
959 __ cmp(key, Operand(ip));
960 __ b(lo, &fast_object);
962 // Slow case, handle jump to runtime.
964 // Entry registers are intact.
968 GenerateRuntimeSetProperty(masm, strict_mode);
970 // Extra capacity case: Check if there is extra capacity to
971 // perform the store and update the length. Used for adding one
972 // element to the array by writing to array[array.length].
974 // Condition code from comparing key and array length is still available.
975 __ b(ne, &slow); // Only support writing to writing to array[array.length].
976 // Check for room in the elements backing store.
977 // Both the key and the length of FixedArray are smis.
978 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
979 __ cmp(key, Operand(ip));
981 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
982 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
983 __ b(ne, &check_if_double_array);
984 __ jmp(&fast_object_grow);
986 __ bind(&check_if_double_array);
988 Operand(masm->isolate()->factory()->fixed_double_array_map()));
990 __ jmp(&fast_double_grow);
992 // Array case: Get the length and the elements array from the JS
993 // array. Check that the array is in fast mode (and writable); if it
994 // is the length is always a smi.
996 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
998 // Check the key against the length in the array.
999 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
1000 __ cmp(key, Operand(ip));
1003 KeyedStoreGenerateGenericHelper(
1004 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
1005 value, key, receiver, receiver_map, elements_map, elements);
1006 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1007 &slow, kDontCheckMap, kIncrementLength, value,
1008 key, receiver, receiver_map, elements_map,
1013 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1014 Register receiver = ReceiverRegister();
1015 Register name = NameRegister();
1016 DCHECK(receiver.is(r1));
1017 DCHECK(name.is(r2));
1018 DCHECK(ValueRegister().is(r0));
1020 // Get the receiver from the stack and probe the stub cache.
1021 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
1022 Code::ComputeHandlerFlags(Code::STORE_IC));
1024 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r3,
1027 // Cache miss: Jump to runtime.
1032 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1033 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1035 // Perform tail call to the entry.
1036 ExternalReference ref =
1037 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1038 __ TailCallExternalReference(ref, 3, 1);
1042 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1044 Register receiver = ReceiverRegister();
1045 Register name = NameRegister();
1046 Register value = ValueRegister();
1047 Register dictionary = r3;
1048 DCHECK(receiver.is(r1));
1049 DCHECK(name.is(r2));
1050 DCHECK(value.is(r0));
1052 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
1054 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
1055 Counters* counters = masm->isolate()->counters();
1056 __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
1060 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
1065 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1066 StrictMode strict_mode) {
1067 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1069 __ mov(r0, Operand(Smi::FromInt(strict_mode)));
1072 // Do tail-call to runtime routine.
1073 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
1080 Condition CompareIC::ComputeCondition(Token::Value op) {
1082 case Token::EQ_STRICT:
1095 return kNoCondition;
1100 bool CompareIC::HasInlinedSmiCode(Address address) {
1101 // The address of the instruction following the call.
1102 Address cmp_instruction_address =
1103 Assembler::return_address_from_call_start(address);
1105 // If the instruction following the call is not a cmp rx, #yyy, nothing
1107 Instr instr = Assembler::instr_at(cmp_instruction_address);
1108 return Assembler::IsCmpImmediate(instr);
1112 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1113 Address cmp_instruction_address =
1114 Assembler::return_address_from_call_start(address);
1116 // If the instruction following the call is not a cmp rx, #yyy, nothing
1118 Instr instr = Assembler::instr_at(cmp_instruction_address);
1119 if (!Assembler::IsCmpImmediate(instr)) {
1123 // The delta to the start of the map check instruction and the
1124 // condition code uses at the patched jump.
1125 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
1126 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
1127 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
1128 // nothing was inlined.
1133 if (FLAG_trace_ic) {
1134 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
1135 cmp_instruction_address, delta);
1138 Address patch_address =
1139 cmp_instruction_address - delta * Instruction::kInstrSize;
1140 Instr instr_at_patch = Assembler::instr_at(patch_address);
1141 Instr branch_instr =
1142 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1143 // This is patching a conditional "jump if not smi/jump if smi" site.
1144 // Enabling by changing from
1146 // b eq/ne, <target>
1148 // tst rx, #kSmiTagMask
1149 // b ne/eq, <target>
1150 // and vice-versa to be disabled again.
1151 CodePatcher patcher(patch_address, 2);
1152 Register reg = Assembler::GetRn(instr_at_patch);
1153 if (check == ENABLE_INLINED_SMI_CHECK) {
1154 DCHECK(Assembler::IsCmpRegister(instr_at_patch));
1155 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
1156 Assembler::GetRm(instr_at_patch).code());
1157 patcher.masm()->tst(reg, Operand(kSmiTagMask));
1159 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1160 DCHECK(Assembler::IsTstImmediate(instr_at_patch));
1161 patcher.masm()->cmp(reg, reg);
1163 DCHECK(Assembler::IsBranch(branch_instr));
1164 if (Assembler::GetCondition(branch_instr) == eq) {
1165 patcher.EmitCondition(ne);
1167 DCHECK(Assembler::GetCondition(branch_instr) == ne);
1168 patcher.EmitCondition(eq);
1172 } // namespace v8::internal
1174 #endif // V8_TARGET_ARCH_ARM