1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #if V8_TARGET_ARCH_MIPS
35 #include "code-stubs.h"
38 #include "stub-cache.h"
44 // ----------------------------------------------------------------------------
45 // Static IC stub generators.
48 #define __ ACCESS_MASM(masm)
51 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
53 Label* global_object) {
55 // type: holds the receiver instance type on entry.
56 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
57 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
58 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
62 // Generated code falls through if the receiver is a regular non-global
63 // JS object with slow properties and no interceptors.
64 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
71 // receiver: holds the receiver on entry and is unchanged.
72 // elements: holds the property dictionary on fall through.
74 // scratch0: used to holds the receiver map.
75 // scratch1: used to holds the receiver instance type, receiver bit mask
78 // Check that the receiver isn't a smi.
79 __ JumpIfSmi(receiver, miss);
81 // Check that the receiver is a valid JS object.
82 __ GetObjectType(receiver, scratch0, scratch1);
83 __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
85 // If this assert fails, we have to check upper bound too.
86 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
88 GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
90 // Check that the global object does not require access checks.
91 __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
92 __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
93 (1 << Map::kHasNamedInterceptor)));
94 __ Branch(miss, ne, scratch1, Operand(zero_reg));
96 __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
97 __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
98 __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
99 __ Branch(miss, ne, scratch1, Operand(scratch0));
103 // Helper function used from LoadIC GenerateNormal.
105 // elements: Property dictionary. It is not clobbered if a jump to the miss
107 // name: Property name. It is not clobbered if a jump to the miss label is
109 // result: Register for the result. It is only updated if a jump to the miss
110 // label is not done. Can be the same as elements or name clobbering
111 // one of these in the case of not jumping to the miss label.
112 // The two scratch registers need to be different from elements, name and
114 // The generated code assumes that the receiver has slow properties,
115 // is not a global object and does not have interceptors.
116 // The address returned from GenerateStringDictionaryProbes() in scratch2
118 static void GenerateDictionaryLoad(MacroAssembler* masm,
125 // Main use of the scratch registers.
126 // scratch1: Used as temporary and to hold the capacity of the property
128 // scratch2: Used as temporary.
131 // Probe the dictionary.
132 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
140 // If probing finds an entry check that the value is a normal
142 __ bind(&done); // scratch2 == elements + 4 * index.
143 const int kElementsStartOffset = NameDictionary::kHeaderSize +
144 NameDictionary::kElementsStartIndex * kPointerSize;
145 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
146 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
149 Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
150 __ Branch(miss, ne, at, Operand(zero_reg));
152 // Get the value at the masked, scaled index and return.
154 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
158 // Helper function used from StoreIC::GenerateNormal.
160 // elements: Property dictionary. It is not clobbered if a jump to the miss
162 // name: Property name. It is not clobbered if a jump to the miss label is
164 // value: The value to store.
165 // The two scratch registers need to be different from elements, name and
167 // The generated code assumes that the receiver has slow properties,
168 // is not a global object and does not have interceptors.
169 // The address returned from GenerateStringDictionaryProbes() in scratch2
171 static void GenerateDictionaryStore(MacroAssembler* masm,
178 // Main use of the scratch registers.
179 // scratch1: Used as temporary and to hold the capacity of the property
181 // scratch2: Used as temporary.
184 // Probe the dictionary.
185 NameDictionaryLookupStub::GeneratePositiveLookup(masm,
193 // If probing finds an entry in the dictionary check that the value
194 // is a normal property that is not read only.
195 __ bind(&done); // scratch2 == elements + 4 * index.
196 const int kElementsStartOffset = NameDictionary::kHeaderSize +
197 NameDictionary::kElementsStartIndex * kPointerSize;
198 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
199 const int kTypeAndReadOnlyMask =
200 (PropertyDetails::TypeField::kMask |
201 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
202 __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
203 __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
204 __ Branch(miss, ne, at, Operand(zero_reg));
206 // Store the value at the masked, scaled index and return.
207 const int kValueOffset = kElementsStartOffset + kPointerSize;
208 __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
209 __ sw(value, MemOperand(scratch2));
211 // Update the write barrier. Make sure not to clobber the value.
212 __ mov(scratch1, value);
214 elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
218 // Checks the receiver for special cases (value type, slow case bits).
219 // Falls through for regular JS object.
220 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
226 // Check that the object isn't a smi.
227 __ JumpIfSmi(receiver, slow);
228 // Get the map of the receiver.
229 __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
231 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
232 __ And(at, scratch, Operand(KeyedLoadIC::kSlowCaseBitFieldMask));
233 __ Branch(slow, ne, at, Operand(zero_reg));
234 // Check that the object is some kind of JS object EXCEPT JS Value type.
235 // In the case that the object is a value-wrapper object,
236 // we enter the runtime system to make sure that indexing into string
237 // objects work as intended.
238 ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
239 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
240 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
244 // Loads an indexed element from a fast case array.
245 // If not_fast_array is NULL, doesn't perform the elements map check.
246 static void GenerateFastArrayLoad(MacroAssembler* masm,
253 Label* not_fast_array,
254 Label* out_of_range) {
257 // receiver - holds the receiver on entry.
258 // Unchanged unless 'result' is the same register.
260 // key - holds the smi key on entry.
261 // Unchanged unless 'result' is the same register.
263 // elements - holds the elements of the receiver on exit.
265 // result - holds the result on exit if the load succeeded.
266 // Allowed to be the the same as 'receiver' or 'key'.
267 // Unchanged on bailout so 'receiver' and 'key' can be safely
268 // used by further computation.
270 // Scratch registers:
272 // scratch1 - used to hold elements map and elements length.
273 // Holds the elements map if not_fast_array branch is taken.
275 // scratch2 - used to hold the loaded value.
277 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
278 if (not_fast_array != NULL) {
279 // Check that the object is in fast mode (not dictionary).
280 __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
281 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
282 __ Branch(not_fast_array, ne, scratch1, Operand(at));
284 __ AssertFastElements(elements);
287 // Check that the key (index) is within bounds.
288 __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
289 __ Branch(out_of_range, hs, key, Operand(scratch1));
291 // Fast case: Do the load.
292 __ Addu(scratch1, elements,
293 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
295 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
296 __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
297 __ addu(at, at, scratch1);
298 __ lw(scratch2, MemOperand(at));
300 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
301 // In case the loaded value is the_hole we have to consult GetProperty
302 // to ensure the prototype chain is searched.
303 __ Branch(out_of_range, eq, scratch2, Operand(at));
304 __ mov(result, scratch2);
308 // Checks whether a key is an array index string or a unique name.
309 // Falls through if a key is a unique name.
310 static void GenerateKeyNameCheck(MacroAssembler* masm,
316 // The key is not a smi.
319 __ GetObjectType(key, map, hash);
320 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
321 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
322 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
324 // Is the string an array index, with cached numeric value?
325 __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
326 __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
327 __ Branch(index_string, eq, at, Operand(zero_reg));
329 // Is the string internalized? We know it's a string, so a single
330 // bit test is enough.
332 __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
333 STATIC_ASSERT(kInternalizedTag == 0);
334 __ And(at, hash, Operand(kIsNotInternalizedMask));
335 __ Branch(not_unique, ne, at, Operand(zero_reg));
341 void LoadIC::GenerateMegamorphic(MacroAssembler* masm,
342 ExtraICState extra_state) {
343 // ----------- S t a t e -------------
345 // -- ra : return address
347 // -----------------------------------
349 // Probe the stub cache.
350 Code::Flags flags = Code::ComputeFlags(
351 Code::HANDLER, MONOMORPHIC, extra_state,
352 Code::NORMAL, Code::LOAD_IC);
353 masm->isolate()->stub_cache()->GenerateProbe(
354 masm, flags, a0, a2, a3, t0, t1, t2);
356 // Cache miss: Jump to runtime.
361 void LoadIC::GenerateNormal(MacroAssembler* masm) {
362 // ----------- S t a t e -------------
364 // -- lr : return address
366 // -----------------------------------
369 GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
372 GenerateDictionaryLoad(masm, &miss, a1, a2, v0, a3, t0);
375 // Cache miss: Jump to runtime.
381 void LoadIC::GenerateMiss(MacroAssembler* masm) {
382 // ----------- S t a t e -------------
384 // -- ra : return address
386 // -----------------------------------
387 Isolate* isolate = masm->isolate();
389 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
394 // Perform tail call to the entry.
395 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
396 __ TailCallExternalReference(ref, 2, 1);
400 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
401 // ---------- S t a t e --------------
403 // -- ra : return address
405 // -----------------------------------
410 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
414 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
420 Label* unmapped_case,
422 // Check that the receiver is a JSObject. Because of the map check
423 // later, we do not need to check for interceptors or whether it
424 // requires access checks.
425 __ JumpIfSmi(object, slow_case);
426 // Check that the object is some kind of JSObject.
427 __ GetObjectType(object, scratch1, scratch2);
428 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
430 // Check that the key is a positive smi.
431 __ And(scratch1, key, Operand(0x80000001));
432 __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
434 // Load the elements into scratch1 and check its map.
435 __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
436 __ CheckMap(scratch1,
438 Heap::kNonStrictArgumentsElementsMapRootIndex,
441 // Check if element is in the range of mapped arguments. If not, jump
442 // to the unmapped lookup with the parameter map in scratch1.
443 __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
444 __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
445 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
447 // Load element index and check whether it is the hole.
449 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
451 __ li(scratch3, Operand(kPointerSize >> 1));
452 __ Mul(scratch3, key, scratch3);
453 __ Addu(scratch3, scratch3, Operand(kOffset));
455 __ Addu(scratch2, scratch1, scratch3);
456 __ lw(scratch2, MemOperand(scratch2));
457 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
458 __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
460 // Load value from context and return it. We can reuse scratch1 because
461 // we do not jump to the unmapped lookup (which requires the parameter
463 __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
464 __ li(scratch3, Operand(kPointerSize >> 1));
465 __ Mul(scratch3, scratch2, scratch3);
466 __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
467 __ Addu(scratch2, scratch1, scratch3);
468 return MemOperand(scratch2);
472 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
474 Register parameter_map,
477 // Element is in arguments backing store, which is referenced by the
478 // second element of the parameter_map. The parameter_map register
479 // must be loaded with the parameter map of the arguments object and is
481 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
482 Register backing_store = parameter_map;
483 __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
484 __ CheckMap(backing_store,
486 Heap::kFixedArrayMapRootIndex,
489 __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
490 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
491 __ li(scratch, Operand(kPointerSize >> 1));
492 __ Mul(scratch, key, scratch);
495 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
496 __ Addu(scratch, backing_store, scratch);
497 return MemOperand(scratch);
501 void KeyedLoadIC::GenerateNonStrictArguments(MacroAssembler* masm) {
502 // ---------- S t a t e --------------
503 // -- lr : return address
506 // -----------------------------------
508 MemOperand mapped_location =
509 GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, ¬in, &slow);
510 __ Ret(USE_DELAY_SLOT);
511 __ lw(v0, mapped_location);
513 // The unmapped lookup expects that the parameter map is in a2.
514 MemOperand unmapped_location =
515 GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
516 __ lw(a2, unmapped_location);
517 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
518 __ Branch(&slow, eq, a2, Operand(a3));
519 __ Ret(USE_DELAY_SLOT);
526 void KeyedStoreIC::GenerateNonStrictArguments(MacroAssembler* masm) {
527 // ---------- S t a t e --------------
531 // -- lr : return address
532 // -----------------------------------
534 // Store address is returned in register (of MemOperand) mapped_location.
535 MemOperand mapped_location =
536 GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow);
537 __ sw(a0, mapped_location);
539 ASSERT_EQ(mapped_location.offset(), 0);
540 __ RecordWrite(a3, mapped_location.rm(), t5,
541 kRAHasNotBeenSaved, kDontSaveFPRegs);
542 __ Ret(USE_DELAY_SLOT);
543 __ mov(v0, a0); // (In delay slot) return the value stored in v0.
545 // The unmapped lookup expects that the parameter map is in a3.
546 // Store address is returned in register (of MemOperand) unmapped_location.
547 MemOperand unmapped_location =
548 GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
549 __ sw(a0, unmapped_location);
551 ASSERT_EQ(unmapped_location.offset(), 0);
552 __ RecordWrite(a3, unmapped_location.rm(), t5,
553 kRAHasNotBeenSaved, kDontSaveFPRegs);
554 __ Ret(USE_DELAY_SLOT);
555 __ mov(v0, a0); // (In delay slot) return the value stored in v0.
561 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
562 // ---------- S t a t e --------------
563 // -- ra : return address
566 // -----------------------------------
567 Isolate* isolate = masm->isolate();
569 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
573 // Perform tail call to the entry.
574 ExternalReference ref =
575 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
577 __ TailCallExternalReference(ref, 2, 1);
581 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
582 // ---------- S t a t e --------------
583 // -- ra : return address
586 // -----------------------------------
590 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
594 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
595 // ---------- S t a t e --------------
596 // -- ra : return address
599 // -----------------------------------
600 Label slow, check_name, index_smi, index_name, property_array_property;
601 Label probe_dictionary, check_number_dictionary;
604 Register receiver = a1;
606 Isolate* isolate = masm->isolate();
608 // Check that the key is a smi.
609 __ JumpIfNotSmi(key, &check_name);
611 // Now the key is known to be a smi. This place is also jumped to from below
612 // where a numeric string is converted to a smi.
614 GenerateKeyedLoadReceiverCheck(
615 masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
617 // Check the receiver's map to see if it has fast elements.
618 __ CheckFastElements(a2, a3, &check_number_dictionary);
620 GenerateFastArrayLoad(
621 masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
623 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
626 __ bind(&check_number_dictionary);
627 __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
628 __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
630 // Check whether the elements is a number dictionary.
634 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
635 __ Branch(&slow, ne, a3, Operand(at));
636 __ sra(a2, a0, kSmiTagSize);
637 __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
640 // Slow case, key and receiver still in a0 and a1.
642 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
646 GenerateRuntimeGetProperty(masm);
648 __ bind(&check_name);
649 GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
651 GenerateKeyedLoadReceiverCheck(
652 masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
655 // If the receiver is a fast-case object, check the keyed lookup
656 // cache. Otherwise probe the dictionary.
657 __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
658 __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
659 __ LoadRoot(at, Heap::kHashTableMapRootIndex);
660 __ Branch(&probe_dictionary, eq, t0, Operand(at));
662 // Load the map of the receiver, compute the keyed lookup cache hash
663 // based on 32 bits of the map pointer and the name hash.
664 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
665 __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
666 __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
667 __ sra(at, t0, Name::kHashShift);
669 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
670 __ And(a3, a3, Operand(mask));
672 // Load the key (consisting of map and unique name) from the cache and
674 Label load_in_object_property;
675 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
676 Label hit_on_nth_entry[kEntriesPerBucket];
677 ExternalReference cache_keys =
678 ExternalReference::keyed_lookup_cache_keys(isolate);
679 __ li(t0, Operand(cache_keys));
680 __ sll(at, a3, kPointerSizeLog2 + 1);
683 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
684 Label try_next_entry;
685 __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
686 __ Branch(&try_next_entry, ne, a2, Operand(t1));
687 __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
688 __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
689 __ bind(&try_next_entry);
692 __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
693 __ Branch(&slow, ne, a2, Operand(t1));
694 __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
695 __ Branch(&slow, ne, a0, Operand(t1));
700 // a2 : receiver's map
701 // a3 : lookup cache index
702 ExternalReference cache_field_offsets =
703 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
706 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
707 __ bind(&hit_on_nth_entry[i]);
708 __ li(t0, Operand(cache_field_offsets));
709 __ sll(at, a3, kPointerSizeLog2);
711 __ lw(t1, MemOperand(at, kPointerSize * i));
712 __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
714 __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
716 __ Branch(&load_in_object_property);
720 // Load in-object property.
721 __ bind(&load_in_object_property);
722 __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
723 __ addu(t2, t2, t1); // Index from start of object.
724 __ Subu(a1, a1, Operand(kHeapObjectTag)); // Remove the heap tag.
725 __ sll(at, t2, kPointerSizeLog2);
727 __ lw(v0, MemOperand(at));
728 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
734 // Load property array property.
735 __ bind(&property_array_property);
736 __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
737 __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
738 __ sll(t0, t1, kPointerSizeLog2);
740 __ lw(v0, MemOperand(t0));
741 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
748 // Do a quick inline probe of the receiver's dictionary, if it
750 __ bind(&probe_dictionary);
754 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
755 __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
756 GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
757 // Load the property to v0.
758 GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
759 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
765 __ bind(&index_name);
766 __ IndexFromHash(a3, key);
767 // Now jump to the place where smi keys are handled.
768 __ Branch(&index_smi);
772 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
773 // ---------- S t a t e --------------
774 // -- ra : return address
775 // -- a0 : key (index)
777 // -----------------------------------
780 Register receiver = a1;
782 Register scratch = a3;
783 Register result = v0;
785 StringCharAtGenerator char_at_generator(receiver,
789 &miss, // When not a string.
790 &miss, // When not a number.
791 &miss, // When index out of range.
792 STRING_INDEX_IS_ARRAY_INDEX);
793 char_at_generator.GenerateFast(masm);
796 StubRuntimeCallHelper call_helper;
797 char_at_generator.GenerateSlow(masm, call_helper);
804 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
805 StrictModeFlag strict_mode) {
806 // ---------- S t a t e --------------
810 // -- ra : return address
811 // -----------------------------------
813 // Push receiver, key and value for runtime call.
815 __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
816 __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode.
819 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
823 static void KeyedStoreGenerateGenericHelper(
824 MacroAssembler* masm,
828 KeyedStoreCheckMap check_map,
829 KeyedStoreIncrementLength increment_length,
833 Register receiver_map,
834 Register elements_map,
836 Label transition_smi_elements;
837 Label finish_object_store, non_double_value, transition_double_elements;
838 Label fast_double_without_map_check;
840 // Fast case: Do the store, could be either Object or double.
841 __ bind(fast_object);
842 Register scratch_value = t0;
843 Register address = t1;
844 if (check_map == kCheckMap) {
845 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
846 __ Branch(fast_double, ne, elements_map,
847 Operand(masm->isolate()->factory()->fixed_array_map()));
850 // HOLECHECK: guards "A[i] = V"
851 // We have to go to the runtime if the current value is the hole because
852 // there may be a callback on the element.
853 Label holecheck_passed1;
854 __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
855 __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
856 __ addu(address, address, at);
857 __ lw(scratch_value, MemOperand(address));
858 __ Branch(&holecheck_passed1, ne, scratch_value,
859 Operand(masm->isolate()->factory()->the_hole_value()));
860 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
863 __ bind(&holecheck_passed1);
865 // Smi stores don't require further checks.
867 __ JumpIfNotSmi(value, &non_smi_value);
869 if (increment_length == kIncrementLength) {
870 // Add 1 to receiver->length.
871 __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
872 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
874 // It's irrelevant whether array is smi-only or not when writing a smi.
875 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
876 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
877 __ Addu(address, address, scratch_value);
878 __ sw(value, MemOperand(address));
881 __ bind(&non_smi_value);
882 // Escape to elements kind transition case.
883 __ CheckFastObjectElements(receiver_map, scratch_value,
884 &transition_smi_elements);
886 // Fast elements array, store the value to the elements backing store.
887 __ bind(&finish_object_store);
888 if (increment_length == kIncrementLength) {
889 // Add 1 to receiver->length.
890 __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
891 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
893 __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
894 __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
895 __ Addu(address, address, scratch_value);
896 __ sw(value, MemOperand(address));
897 // Update write barrier for the elements array address.
898 __ mov(scratch_value, value); // Preserve the value which is returned.
899 __ RecordWrite(elements,
908 __ bind(fast_double);
909 if (check_map == kCheckMap) {
910 // Check for fast double array case. If this fails, call through to the
912 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
913 __ Branch(slow, ne, elements_map, Operand(at));
916 // HOLECHECK: guards "A[i] double hole?"
917 // We have to see if the double version of the hole is present. If so
918 // go to the runtime.
919 __ Addu(address, elements,
920 Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)
922 __ sll(at, key, kPointerSizeLog2);
923 __ addu(address, address, at);
924 __ lw(scratch_value, MemOperand(address));
925 __ Branch(&fast_double_without_map_check, ne, scratch_value,
926 Operand(kHoleNanUpper32));
927 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
930 __ bind(&fast_double_without_map_check);
931 __ StoreNumberToDoubleElements(value,
933 elements, // Overwritten.
934 a3, // Scratch regs...
937 &transition_double_elements);
938 if (increment_length == kIncrementLength) {
939 // Add 1 to receiver->length.
940 __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
941 __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
945 __ bind(&transition_smi_elements);
946 // Transition the array appropriately depending on the value type.
947 __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
948 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
949 __ Branch(&non_double_value, ne, t0, Operand(at));
951 // Value is a double. Transition FAST_SMI_ELEMENTS ->
952 // FAST_DOUBLE_ELEMENTS and complete the store.
953 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
954 FAST_DOUBLE_ELEMENTS,
958 ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
959 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
960 FAST_DOUBLE_ELEMENTS);
961 ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
962 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
963 __ jmp(&fast_double_without_map_check);
965 __ bind(&non_double_value);
966 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
967 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
972 ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
973 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
974 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
976 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
977 __ jmp(&finish_object_store);
979 __ bind(&transition_double_elements);
980 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
981 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
982 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
983 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
988 ASSERT(receiver_map.is(a3)); // Transition code expects map in a3
989 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
990 ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
991 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
992 __ jmp(&finish_object_store);
996 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
997 StrictModeFlag strict_mode) {
998 // ---------- S t a t e --------------
1002 // -- ra : return address
1003 // -----------------------------------
1004 Label slow, fast_object, fast_object_grow;
1005 Label fast_double, fast_double_grow;
1006 Label array, extra, check_if_double_array;
1009 Register value = a0;
1011 Register receiver = a2;
1012 Register receiver_map = a3;
1013 Register elements_map = t2;
1014 Register elements = t3; // Elements array of the receiver.
1015 // t0 and t1 are used as general scratch registers.
1017 // Check that the key is a smi.
1018 __ JumpIfNotSmi(key, &slow);
1019 // Check that the object isn't a smi.
1020 __ JumpIfSmi(receiver, &slow);
1021 // Get the map of the object.
1022 __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
1023 // Check that the receiver does not require access checks and is not observed.
1024 // The generic stub does not perform map checks or handle observed objects.
1025 __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
1026 __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
1027 1 << Map::kIsObserved));
1028 __ Branch(&slow, ne, t0, Operand(zero_reg));
1029 // Check if the object is a JS array or not.
1030 __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
1031 __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
1032 // Check that the object is some kind of JSObject.
1033 __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
1035 // Object case: Check key against length in the elements array.
1036 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1037 // Check array bounds. Both the key and the length of FixedArray are smis.
1038 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1039 __ Branch(&fast_object, lo, key, Operand(t0));
1041 // Slow case, handle jump to runtime.
1043 // Entry registers are intact.
1047 GenerateRuntimeSetProperty(masm, strict_mode);
1049 // Extra capacity case: Check if there is extra capacity to
1050 // perform the store and update the length. Used for adding one
1051 // element to the array by writing to array[array.length].
1053 // Condition code from comparing key and array length is still available.
1054 // Only support writing to array[array.length].
1055 __ Branch(&slow, ne, key, Operand(t0));
1056 // Check for room in the elements backing store.
1057 // Both the key and the length of FixedArray are smis.
1058 __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
1059 __ Branch(&slow, hs, key, Operand(t0));
1060 __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1062 &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
1064 __ jmp(&fast_object_grow);
1066 __ bind(&check_if_double_array);
1067 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
1068 __ jmp(&fast_double_grow);
1070 // Array case: Get the length and the elements array from the JS
1071 // array. Check that the array is in fast mode (and writable); if it
1072 // is the length is always a smi.
1074 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1076 // Check the key against the length in the array.
1077 __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1078 __ Branch(&extra, hs, key, Operand(t0));
1080 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
1081 &slow, kCheckMap, kDontIncrementLength,
1082 value, key, receiver, receiver_map,
1083 elements_map, elements);
1084 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1085 &slow, kDontCheckMap, kIncrementLength,
1086 value, key, receiver, receiver_map,
1087 elements_map, elements);
1091 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
1092 // ---------- S t a t e --------------
1093 // -- ra : return address
1096 // -----------------------------------
1099 // Check that the receiver isn't a smi.
1100 __ JumpIfSmi(a1, &slow);
1102 // Check that the key is an array index, that is Uint32.
1103 __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
1104 __ Branch(&slow, ne, t0, Operand(zero_reg));
1106 // Get the map of the receiver.
1107 __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
1109 // Check that it has indexed interceptor and access checks
1110 // are not enabled for this object.
1111 __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
1112 __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
1113 __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
1114 // Everything is fine, call runtime.
1115 __ Push(a1, a0); // Receiver, key.
1117 // Perform tail call to the entry.
1118 __ TailCallExternalReference(ExternalReference(
1119 IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
1126 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
1127 // ---------- S t a t e --------------
1131 // -- ra : return address
1132 // -----------------------------------
1134 // Push receiver, key and value for runtime call.
1135 __ Push(a2, a1, a0);
1137 ExternalReference ref =
1138 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
1139 __ TailCallExternalReference(ref, 3, 1);
1143 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1144 // ---------- S t a t e --------------
1148 // -- ra : return address
1149 // -----------------------------------
1151 // Push receiver, key and value for runtime call.
1152 __ Push(a1, a2, a0);
1154 // The slow case calls into the runtime to complete the store without causing
1155 // an IC miss that would otherwise cause a transition to the generic stub.
1156 ExternalReference ref =
1157 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1158 __ TailCallExternalReference(ref, 3, 1);
1162 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
1163 // ---------- S t a t e --------------
1167 // -- ra : return address
1168 // -----------------------------------
1170 // Push receiver, key and value for runtime call.
1171 // We can't use MultiPush as the order of the registers is important.
1172 __ Push(a2, a1, a0);
1174 // The slow case calls into the runtime to complete the store without causing
1175 // an IC miss that would otherwise cause a transition to the generic stub.
1176 ExternalReference ref =
1177 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
1179 __ TailCallExternalReference(ref, 3, 1);
1183 void StoreIC::GenerateMegamorphic(MacroAssembler* masm,
1184 ExtraICState extra_ic_state) {
1185 // ----------- S t a t e -------------
1189 // -- ra : return address
1190 // -----------------------------------
1192 // Get the receiver from the stack and probe the stub cache.
1193 Code::Flags flags = Code::ComputeFlags(
1194 Code::HANDLER, MONOMORPHIC, extra_ic_state,
1195 Code::NORMAL, Code::STORE_IC);
1196 masm->isolate()->stub_cache()->GenerateProbe(
1197 masm, flags, a1, a2, a3, t0, t1, t2);
1199 // Cache miss: Jump to runtime.
1204 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1205 // ----------- S t a t e -------------
1209 // -- ra : return address
1210 // -----------------------------------
1212 __ Push(a1, a2, a0);
1213 // Perform tail call to the entry.
1214 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
1216 __ TailCallExternalReference(ref, 3, 1);
1220 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1221 // ----------- S t a t e -------------
1225 // -- ra : return address
1226 // -----------------------------------
1229 GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
1231 GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
1232 Counters* counters = masm->isolate()->counters();
1233 __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
1237 __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
1242 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1243 StrictModeFlag strict_mode) {
1244 // ----------- S t a t e -------------
1248 // -- ra : return address
1249 // -----------------------------------
1251 __ Push(a1, a2, a0);
1253 __ li(a1, Operand(Smi::FromInt(NONE))); // PropertyAttributes.
1254 __ li(a0, Operand(Smi::FromInt(strict_mode)));
1257 // Do tail-call to runtime routine.
1258 __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
1265 Condition CompareIC::ComputeCondition(Token::Value op) {
1267 case Token::EQ_STRICT:
1280 return kNoCondition;
1285 bool CompareIC::HasInlinedSmiCode(Address address) {
1286 // The address of the instruction following the call.
1287 Address andi_instruction_address =
1288 address + Assembler::kCallTargetAddressOffset;
1290 // If the instruction following the call is not a andi at, rx, #yyy, nothing
1292 Instr instr = Assembler::instr_at(andi_instruction_address);
1293 return Assembler::IsAndImmediate(instr) &&
1294 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
1298 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1299 Address andi_instruction_address =
1300 address + Assembler::kCallTargetAddressOffset;
1302 // If the instruction following the call is not a andi at, rx, #yyy, nothing
1304 Instr instr = Assembler::instr_at(andi_instruction_address);
1305 if (!(Assembler::IsAndImmediate(instr) &&
1306 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
1310 // The delta to the start of the map check instruction and the
1311 // condition code uses at the patched jump.
1312 int delta = Assembler::GetImmediate16(instr);
1313 delta += Assembler::GetRs(instr) * kImm16Mask;
1314 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
1315 // signals that nothing was inlined.
1320 if (FLAG_trace_ic) {
1321 PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
1322 address, andi_instruction_address, delta);
1325 Address patch_address =
1326 andi_instruction_address - delta * Instruction::kInstrSize;
1327 Instr instr_at_patch = Assembler::instr_at(patch_address);
1328 Instr branch_instr =
1329 Assembler::instr_at(patch_address + Instruction::kInstrSize);
1330 // This is patching a conditional "jump if not smi/jump if smi" site.
1331 // Enabling by changing from
1333 // Branch <target>, eq, at, Operand(zero_reg)
1335 // andi at, rx, #kSmiTagMask
1336 // Branch <target>, ne, at, Operand(zero_reg)
1337 // and vice-versa to be disabled again.
1338 CodePatcher patcher(patch_address, 2);
1339 Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
1340 if (check == ENABLE_INLINED_SMI_CHECK) {
1341 ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1342 ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
1343 patcher.masm()->andi(at, reg, kSmiTagMask);
1345 ASSERT(check == DISABLE_INLINED_SMI_CHECK);
1346 ASSERT(Assembler::IsAndImmediate(instr_at_patch));
1347 patcher.masm()->andi(at, reg, 0);
1349 ASSERT(Assembler::IsBranch(branch_instr));
1350 if (Assembler::IsBeq(branch_instr)) {
1351 patcher.ChangeBranchCondition(ne);
1353 ASSERT(Assembler::IsBne(branch_instr));
1354 patcher.ChangeBranchCondition(eq);
1359 } } // namespace v8::internal
1361 #endif // V8_TARGET_ARCH_MIPS