1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_ARM64
9 #include "src/codegen.h"
10 #include "src/ic/ic.h"
11 #include "src/ic/stub-cache.h"
17 #define __ ACCESS_MASM(masm)
20 // "type" holds an instance type on entry and is not clobbered.
21 // Generated code branch on "global_object" if type is any kind of global
23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24 Label* global_object) {
25 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
26 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
27 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
28 __ B(eq, global_object);
32 // Helper function used from LoadIC GenerateNormal.
34 // elements: Property dictionary. It is not clobbered if a jump to the miss
36 // name: Property name. It is not clobbered if a jump to the miss label is
38 // result: Register for the result. It is only updated if a jump to the miss
40 // The scratch registers need to be different from elements, name and result.
41 // The generated code assumes that the receiver has slow properties,
42 // is not a global object and does not have interceptors.
43 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
44 Register elements, Register name,
45 Register result, Register scratch1,
47 DCHECK(!AreAliased(elements, name, scratch1, scratch2));
48 DCHECK(!AreAliased(result, scratch1, scratch2));
52 // Probe the dictionary.
53 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
54 name, scratch1, scratch2);
56 // If probing finds an entry check that the value is a normal property.
59 static const int kElementsStartOffset =
60 NameDictionary::kHeaderSize +
61 NameDictionary::kElementsStartIndex * kPointerSize;
62 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
63 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
64 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
67 // Get the value at the masked, scaled index and return.
69 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
73 // Helper function used from StoreIC::GenerateNormal.
75 // elements: Property dictionary. It is not clobbered if a jump to the miss
77 // name: Property name. It is not clobbered if a jump to the miss label is
79 // value: The value to store (never clobbered).
81 // The generated code assumes that the receiver has slow properties,
82 // is not a global object and does not have interceptors.
83 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
84 Register elements, Register name,
85 Register value, Register scratch1,
87 DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
91 // Probe the dictionary.
92 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
93 name, scratch1, scratch2);
95 // If probing finds an entry in the dictionary check that the value
96 // is a normal property that is not read only.
99 static const int kElementsStartOffset =
100 NameDictionary::kHeaderSize +
101 NameDictionary::kElementsStartIndex * kPointerSize;
102 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
103 static const int kTypeAndReadOnlyMask =
104 PropertyDetails::TypeField::kMask |
105 PropertyDetails::AttributesField::encode(READ_ONLY);
106 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
107 __ Tst(scratch1, kTypeAndReadOnlyMask);
110 // Store the value at the masked, scaled index and return.
111 static const int kValueOffset = kElementsStartOffset + kPointerSize;
112 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
113 __ Str(value, MemOperand(scratch2));
115 // Update the write barrier. Make sure not to clobber the value.
116 __ Mov(scratch1, value);
117 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
122 // Checks the receiver for special cases (value type, slow case bits).
123 // Falls through for regular JS object and return the map of the
124 // receiver in 'map_scratch' if the receiver is not a SMI.
125 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
127 Register map_scratch,
129 int interceptor_bit, Label* slow) {
130 DCHECK(!AreAliased(map_scratch, scratch));
132 // Check that the object isn't a smi.
133 __ JumpIfSmi(receiver, slow);
134 // Get the map of the receiver.
135 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
137 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
138 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
139 __ Tbnz(scratch, interceptor_bit, slow);
141 // Check that the object is some kind of JS object EXCEPT JS Value type.
142 // In the case that the object is a value-wrapper object, we enter the
143 // runtime system to make sure that indexing into string objects work
145 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
146 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
147 __ Cmp(scratch, JS_OBJECT_TYPE);
152 // Loads an indexed element from a fast case array.
153 // If not_fast_array is NULL, doesn't perform the elements map check.
155 // receiver - holds the receiver on entry.
156 // Unchanged unless 'result' is the same register.
158 // key - holds the smi key on entry.
159 // Unchanged unless 'result' is the same register.
161 // elements - holds the elements of the receiver on exit.
163 // elements_map - holds the elements map on exit if the not_fast_array branch is
164 // taken. Otherwise, this is used as a scratch register.
166 // result - holds the result on exit if the load succeeded.
167 // Allowed to be the the same as 'receiver' or 'key'.
168 // Unchanged on bailout so 'receiver' and 'key' can be safely
169 // used by further computation.
170 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
171 Register key, Register elements,
172 Register elements_map, Register scratch2,
173 Register result, Label* not_fast_array,
175 DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
177 // Check for fast array.
178 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
179 if (not_fast_array != NULL) {
180 // Check that the object is in fast mode and writable.
181 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
182 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
185 __ AssertFastElements(elements);
188 // The elements_map register is only used for the not_fast_array path, which
189 // was handled above. From this point onward it is a scratch register.
190 Register scratch1 = elements_map;
192 // Check that the key (index) is within bounds.
193 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
194 __ Cmp(key, scratch1);
197 // Fast case: Do the load.
198 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
199 __ SmiUntag(scratch2, key);
200 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
202 // In case the loaded value is the_hole we have to consult GetProperty
203 // to ensure the prototype chain is searched.
204 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
206 // Move the value to the result register.
207 // 'result' can alias with 'receiver' or 'key' but these two must be
208 // preserved if we jump to 'slow'.
209 __ Mov(result, scratch2);
213 // Checks whether a key is an array index string or a unique name.
214 // Falls through if a key is a unique name.
215 // The map of the key is returned in 'map_scratch'.
216 // If the jump to 'index_string' is done the hash of the key is left
217 // in 'hash_scratch'.
218 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
219 Register map_scratch, Register hash_scratch,
220 Label* index_string, Label* not_unique) {
221 DCHECK(!AreAliased(key, map_scratch, hash_scratch));
223 // Is the key a name?
225 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
227 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
230 // Is the string an array index with cached numeric value?
231 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
232 __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
235 // Is the string internalized? We know it's a string, so a single bit test is
237 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
238 STATIC_ASSERT(kInternalizedTag == 0);
239 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
242 // Fall through if the key is a unique name.
246 // Neither 'object' nor 'key' are modified by this function.
248 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
249 // left with the object's elements map. Otherwise, it is used as a scratch
251 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
252 Register object, Register key,
253 Register map, Register scratch1,
255 Label* unmapped_case,
257 DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
259 Heap* heap = masm->isolate()->heap();
261 // Check that the receiver is a JSObject. Because of the elements
262 // map check later, we do not need to check for interceptors or
263 // whether it requires access checks.
264 __ JumpIfSmi(object, slow_case);
265 // Check that the object is some kind of JSObject.
266 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
269 // Check that the key is a positive smi.
270 __ JumpIfNotSmi(key, slow_case);
271 __ Tbnz(key, kXSignBit, slow_case);
273 // Load the elements object and check its map.
274 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
275 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
276 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
278 // Check if element is in the range of mapped arguments. If not, jump
279 // to the unmapped lookup.
280 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
281 __ Sub(scratch1, scratch1, Smi::FromInt(2));
282 __ Cmp(key, scratch1);
283 __ B(hs, unmapped_case);
285 // Load element index and check whether it is the hole.
286 static const int offset =
287 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
289 __ Add(scratch1, map, offset);
290 __ SmiUntag(scratch2, key);
291 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
292 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
294 // Load value from context and return it.
295 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
296 __ SmiUntag(scratch1);
297 __ Lsl(scratch1, scratch1, kPointerSizeLog2);
298 __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
299 // The base of the result (scratch2) is passed to RecordWrite in
300 // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
301 return MemOperand(scratch2, scratch1);
305 // The 'parameter_map' register must be loaded with the parameter map of the
306 // arguments object and is overwritten.
307 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
309 Register parameter_map,
312 DCHECK(!AreAliased(key, parameter_map, scratch));
314 // Element is in arguments backing store, which is referenced by the
315 // second element of the parameter_map.
316 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
317 Register backing_store = parameter_map;
318 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
319 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
320 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
322 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
323 __ Cmp(key, scratch);
326 __ Add(backing_store, backing_store,
327 FixedArray::kHeaderSize - kHeapObjectTag);
328 __ SmiUntag(scratch, key);
329 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
333 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
334 // The return address is in lr.
335 Register receiver = ReceiverRegister();
336 Register name = NameRegister();
337 DCHECK(receiver.is(x1));
340 // Probe the stub cache.
341 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
342 Code::ComputeHandlerFlags(Code::LOAD_IC));
343 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
346 // Cache miss: Jump to runtime.
351 void LoadIC::GenerateNormal(MacroAssembler* masm) {
352 Register dictionary = x0;
353 DCHECK(!dictionary.is(ReceiverRegister()));
354 DCHECK(!dictionary.is(NameRegister()));
358 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset));
359 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), x0, x3, x4);
362 // Dictionary load failed, go slow (but don't miss).
364 GenerateRuntimeGetProperty(masm);
368 void LoadIC::GenerateMiss(MacroAssembler* masm) {
369 // The return address is in lr.
370 Isolate* isolate = masm->isolate();
371 ASM_LOCATION("LoadIC::GenerateMiss");
373 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
375 // Perform tail call to the entry.
376 __ Push(ReceiverRegister(), NameRegister());
377 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
378 __ TailCallExternalReference(ref, 2, 1);
382 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
383 // The return address is in lr.
384 __ Push(ReceiverRegister(), NameRegister());
385 __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
389 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
390 // The return address is in lr.
391 Register result = x0;
392 Register receiver = ReceiverRegister();
393 Register key = NameRegister();
394 DCHECK(receiver.is(x1));
397 Label miss, unmapped;
399 Register map_scratch = x0;
400 MemOperand mapped_location = GenerateMappedArgumentsLookup(
401 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
402 __ Ldr(result, mapped_location);
406 // Parameter map is left in map_scratch when a jump on unmapped is done.
407 MemOperand unmapped_location =
408 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
409 __ Ldr(result, unmapped_location);
410 __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
418 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
419 ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
421 Register value = ValueRegister();
422 Register key = NameRegister();
423 Register receiver = ReceiverRegister();
424 DCHECK(receiver.is(x1));
426 DCHECK(value.is(x0));
430 // These registers are used by GenerateMappedArgumentsLookup to build a
431 // MemOperand. They are live for as long as the MemOperand is live.
432 Register mapped1 = x4;
433 Register mapped2 = x5;
435 MemOperand mapped = GenerateMappedArgumentsLookup(
436 masm, receiver, key, map, mapped1, mapped2, ¬in, &slow);
437 Operand mapped_offset = mapped.OffsetAsOperand();
438 __ Str(value, mapped);
439 __ Add(x10, mapped.base(), mapped_offset);
441 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
446 // These registers are used by GenerateMappedArgumentsLookup to build a
447 // MemOperand. They are live for as long as the MemOperand is live.
448 Register unmapped1 = map; // This is assumed to alias 'map'.
449 Register unmapped2 = x4;
450 MemOperand unmapped =
451 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
452 Operand unmapped_offset = unmapped.OffsetAsOperand();
453 __ Str(value, unmapped);
454 __ Add(x10, unmapped.base(), unmapped_offset);
456 __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
464 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
465 // The return address is in lr.
466 Isolate* isolate = masm->isolate();
468 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
470 __ Push(ReceiverRegister(), NameRegister());
472 // Perform tail call to the entry.
473 ExternalReference ref =
474 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
476 __ TailCallExternalReference(ref, 2, 1);
480 // IC register specifications
481 const Register LoadIC::ReceiverRegister() { return x1; }
482 const Register LoadIC::NameRegister() { return x2; }
484 const Register LoadIC::SlotRegister() {
485 DCHECK(FLAG_vector_ics);
490 const Register LoadIC::VectorRegister() {
491 DCHECK(FLAG_vector_ics);
496 const Register StoreIC::ReceiverRegister() { return x1; }
497 const Register StoreIC::NameRegister() { return x2; }
498 const Register StoreIC::ValueRegister() { return x0; }
501 const Register KeyedStoreIC::MapRegister() { return x3; }
504 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
505 // The return address is in lr.
506 __ Push(ReceiverRegister(), NameRegister());
507 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
511 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
512 Register receiver, Register scratch1,
513 Register scratch2, Register scratch3,
514 Register scratch4, Register scratch5,
516 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
519 Isolate* isolate = masm->isolate();
520 Label check_number_dictionary;
521 // If we can load the value, it should be returned in x0.
522 Register result = x0;
524 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
525 Map::kHasIndexedInterceptor, slow);
527 // Check the receiver's map to see if it has fast elements.
528 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
530 GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
532 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
536 __ Bind(&check_number_dictionary);
537 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
538 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
540 // Check whether we have a number dictionary.
541 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
543 __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
548 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
549 Register receiver, Register scratch1,
550 Register scratch2, Register scratch3,
551 Register scratch4, Register scratch5,
553 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
556 Isolate* isolate = masm->isolate();
557 Label probe_dictionary, property_array_property;
558 // If we can load the value, it should be returned in x0.
559 Register result = x0;
561 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
562 Map::kHasNamedInterceptor, slow);
564 // If the receiver is a fast-case object, check the keyed lookup cache.
565 // Otherwise probe the dictionary.
566 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
567 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
568 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
570 // We keep the map of the receiver in scratch1.
571 Register receiver_map = scratch1;
573 // Load the map of the receiver, compute the keyed lookup cache hash
574 // based on 32 bits of the map pointer and the name hash.
575 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
576 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
577 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
578 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
579 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
580 __ And(scratch2, scratch2, mask);
582 // Load the key (consisting of map and unique name) from the cache and
584 Label load_in_object_property;
585 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
586 Label hit_on_nth_entry[kEntriesPerBucket];
587 ExternalReference cache_keys =
588 ExternalReference::keyed_lookup_cache_keys(isolate);
590 __ Mov(scratch3, cache_keys);
591 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
593 for (int i = 0; i < kEntriesPerBucket - 1; i++) {
594 Label try_next_entry;
595 // Load map and make scratch3 pointing to the next entry.
596 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
597 __ Cmp(receiver_map, scratch4);
598 __ B(ne, &try_next_entry);
599 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name
600 __ Cmp(key, scratch4);
601 __ B(eq, &hit_on_nth_entry[i]);
602 __ Bind(&try_next_entry);
606 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
607 __ Cmp(receiver_map, scratch4);
609 __ Ldr(scratch4, MemOperand(scratch3));
610 __ Cmp(key, scratch4);
614 ExternalReference cache_field_offsets =
615 ExternalReference::keyed_lookup_cache_field_offsets(isolate);
618 for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
619 __ Bind(&hit_on_nth_entry[i]);
620 __ Mov(scratch3, cache_field_offsets);
622 __ Add(scratch2, scratch2, i);
624 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
626 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
627 __ Subs(scratch4, scratch4, scratch5);
628 __ B(ge, &property_array_property);
630 __ B(&load_in_object_property);
634 // Load in-object property.
635 __ Bind(&load_in_object_property);
636 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
637 __ Add(scratch5, scratch5, scratch4); // Index from start of object.
638 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag.
639 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
640 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
644 // Load property array property.
645 __ Bind(&property_array_property);
646 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
647 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
648 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
649 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
653 // Do a quick inline probe of the receiver's dictionary, if it exists.
654 __ Bind(&probe_dictionary);
655 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
656 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
657 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
658 // Load the property.
659 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
660 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
666 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
667 // The return address is in lr.
668 Label slow, check_name, index_smi, index_name;
670 Register key = NameRegister();
671 Register receiver = ReceiverRegister();
673 DCHECK(receiver.is(x1));
675 __ JumpIfNotSmi(key, &check_name);
677 // Now the key is known to be a smi. This place is also jumped to from below
678 // where a numeric string is converted to a smi.
679 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
683 __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
685 GenerateRuntimeGetProperty(masm);
687 __ Bind(&check_name);
688 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
690 GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
692 __ Bind(&index_name);
693 __ IndexFromHash(x3, key);
694 // Now jump to the place where smi keys are handled.
699 void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
700 // Return address is in lr.
703 Register receiver = ReceiverRegister();
704 Register index = NameRegister();
705 Register result = x0;
706 Register scratch = x3;
707 DCHECK(!scratch.is(receiver) && !scratch.is(index));
709 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
710 &miss, // When not a string.
711 &miss, // When not a number.
712 &miss, // When index out of range.
713 STRING_INDEX_IS_ARRAY_INDEX);
714 char_at_generator.GenerateFast(masm);
717 StubRuntimeCallHelper call_helper;
718 char_at_generator.GenerateSlow(masm, call_helper);
725 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
726 // Return address is in lr.
729 Register receiver = ReceiverRegister();
730 Register key = NameRegister();
731 Register scratch1 = x3;
732 Register scratch2 = x4;
733 DCHECK(!AreAliased(scratch1, scratch2, receiver, key));
735 // Check that the receiver isn't a smi.
736 __ JumpIfSmi(receiver, &slow);
738 // Check that the key is an array index, that is Uint32.
739 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
741 // Get the map of the receiver.
742 Register map = scratch1;
743 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
745 // Check that it has indexed interceptor and access checks
746 // are not enabled for this object.
747 __ Ldrb(scratch2, FieldMemOperand(map, Map::kBitFieldOffset));
748 DCHECK(kSlowCaseBitFieldMask == ((1 << Map::kIsAccessCheckNeeded) |
749 (1 << Map::kHasIndexedInterceptor)));
750 __ Tbnz(scratch2, Map::kIsAccessCheckNeeded, &slow);
751 __ Tbz(scratch2, Map::kHasIndexedInterceptor, &slow);
753 // Everything is fine, call runtime.
754 __ Push(receiver, key);
755 __ TailCallExternalReference(
756 ExternalReference(IC_Utility(kLoadElementWithInterceptor),
765 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
766 ASM_LOCATION("KeyedStoreIC::GenerateMiss");
768 // Push receiver, key and value for runtime call.
769 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
771 ExternalReference ref =
772 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
773 __ TailCallExternalReference(ref, 3, 1);
777 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
778 ASM_LOCATION("KeyedStoreIC::GenerateSlow");
780 // Push receiver, key and value for runtime call.
781 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
783 // The slow case calls into the runtime to complete the store without causing
784 // an IC miss that would otherwise cause a transition to the generic stub.
785 ExternalReference ref =
786 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
787 __ TailCallExternalReference(ref, 3, 1);
791 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
792 StrictMode strict_mode) {
793 ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
795 // Push receiver, key and value for runtime call.
796 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
798 // Push strict_mode for runtime call.
799 __ Mov(x10, Smi::FromInt(strict_mode));
802 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
806 static void KeyedStoreGenerateGenericHelper(
807 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
808 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
809 Register value, Register key, Register receiver, Register receiver_map,
810 Register elements_map, Register elements) {
811 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
814 Label transition_smi_elements;
815 Label transition_double_elements;
816 Label fast_double_without_map_check;
817 Label non_double_value;
820 __ Bind(fast_object);
821 if (check_map == kCheckMap) {
822 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
824 Operand(masm->isolate()->factory()->fixed_array_map()));
825 __ B(ne, fast_double);
828 // HOLECHECK: guards "A[i] = V"
829 // We have to go to the runtime if the current value is the hole because there
830 // may be a callback on the element.
831 Label holecheck_passed;
832 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
833 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
834 __ Ldr(x11, MemOperand(x10));
835 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
836 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
837 __ bind(&holecheck_passed);
839 // Smi stores don't require further checks.
840 __ JumpIfSmi(value, &finish_store);
842 // Escape to elements kind transition case.
843 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
845 __ Bind(&finish_store);
846 if (increment_length == kIncrementLength) {
847 // Add 1 to receiver->length.
848 __ Add(x10, key, Smi::FromInt(1));
849 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
852 Register address = x11;
853 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
854 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
855 __ Str(value, MemOperand(address));
857 Label dont_record_write;
858 __ JumpIfSmi(value, &dont_record_write);
860 // Update write barrier for the elements array address.
861 __ Mov(x10, value); // Preserve the value which is returned.
862 __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
863 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
865 __ Bind(&dont_record_write);
869 __ Bind(fast_double);
870 if (check_map == kCheckMap) {
871 // Check for fast double array case. If this fails, call through to the
873 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
876 // HOLECHECK: guards "A[i] double hole?"
877 // We have to see if the double version of the hole is present. If so go to
879 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
880 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
881 __ Ldr(x11, MemOperand(x10));
882 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
883 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
885 __ Bind(&fast_double_without_map_check);
886 __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
887 &transition_double_elements);
888 if (increment_length == kIncrementLength) {
889 // Add 1 to receiver->length.
890 __ Add(x10, key, Smi::FromInt(1));
891 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
896 __ Bind(&transition_smi_elements);
897 // Transition the array appropriately depending on the value type.
898 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
899 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
901 // Value is a double. Transition FAST_SMI_ELEMENTS ->
902 // FAST_DOUBLE_ELEMENTS and complete the store.
903 __ LoadTransitionedArrayMapConditional(
904 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
905 AllocationSiteMode mode =
906 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
907 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
908 receiver_map, mode, slow);
909 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
910 __ B(&fast_double_without_map_check);
912 __ Bind(&non_double_value);
913 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
914 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
915 receiver_map, x10, x11, slow);
917 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
918 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
919 masm, receiver, key, value, receiver_map, mode, slow);
921 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
924 __ Bind(&transition_double_elements);
925 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
926 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
927 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
928 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
929 receiver_map, x10, x11, slow);
930 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
931 ElementsTransitionGenerator::GenerateDoubleToObject(
932 masm, receiver, key, value, receiver_map, mode, slow);
933 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
938 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
939 StrictMode strict_mode) {
940 ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
945 Label fast_object_grow;
946 Label fast_double_grow;
949 Register value = ValueRegister();
950 Register key = NameRegister();
951 Register receiver = ReceiverRegister();
952 DCHECK(receiver.is(x1));
954 DCHECK(value.is(x0));
956 Register receiver_map = x3;
957 Register elements = x4;
958 Register elements_map = x5;
960 __ JumpIfNotSmi(key, &slow);
961 __ JumpIfSmi(receiver, &slow);
962 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
964 // Check that the receiver does not require access checks and is not observed.
965 // The generic stub does not perform map checks or handle observed objects.
966 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
967 __ TestAndBranchIfAnySet(
968 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
970 // Check if the object is a JS array or not.
971 Register instance_type = x10;
972 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
974 // Check that the object is some kind of JSObject.
975 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
978 // Object case: Check key against length in the elements array.
979 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
980 // Check array bounds. Both the key and the length of FixedArray are smis.
981 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
982 __ Cmp(x10, Operand::UntagSmi(key));
983 __ B(hi, &fast_object);
987 // Slow case, handle jump to runtime.
992 GenerateRuntimeSetProperty(masm, strict_mode);
996 // Extra capacity case: Check if there is extra capacity to
997 // perform the store and update the length. Used for adding one
998 // element to the array by writing to array[array.length].
1000 // Check for room in the elements backing store.
1001 // Both the key and the length of FixedArray are smis.
1002 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
1003 __ Cmp(x10, Operand::UntagSmi(key));
1006 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
1007 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
1008 __ B(eq, &fast_object_grow);
1009 __ Cmp(elements_map,
1010 Operand(masm->isolate()->factory()->fixed_double_array_map()));
1011 __ B(eq, &fast_double_grow);
1016 // Array case: Get the length and the elements array from the JS
1017 // array. Check that the array is in fast mode (and writable); if it
1018 // is the length is always a smi.
1020 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
1022 // Check the key against the length in the array.
1023 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
1024 __ Cmp(x10, Operand::UntagSmi(key));
1025 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
1028 KeyedStoreGenerateGenericHelper(
1029 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
1030 value, key, receiver, receiver_map, elements_map, elements);
1031 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
1032 &slow, kDontCheckMap, kIncrementLength, value,
1033 key, receiver, receiver_map, elements_map,
1038 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
1039 Register receiver = ReceiverRegister();
1040 Register name = NameRegister();
1041 DCHECK(!AreAliased(receiver, name, ValueRegister(), x3, x4, x5, x6));
1043 // Probe the stub cache.
1044 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
1045 Code::ComputeHandlerFlags(Code::STORE_IC));
1046 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, x3,
1049 // Cache miss: Jump to runtime.
1054 void StoreIC::GenerateMiss(MacroAssembler* masm) {
1055 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1057 // Tail call to the entry.
1058 ExternalReference ref =
1059 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
1060 __ TailCallExternalReference(ref, 3, 1);
1064 void StoreIC::GenerateNormal(MacroAssembler* masm) {
1066 Register value = ValueRegister();
1067 Register receiver = ReceiverRegister();
1068 Register name = NameRegister();
1069 Register dictionary = x3;
1070 DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
1072 __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
1074 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
1075 Counters* counters = masm->isolate()->counters();
1076 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
1079 // Cache miss: Jump to runtime.
1081 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
1086 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
1087 StrictMode strict_mode) {
1088 ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
1090 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1092 __ Mov(x10, Smi::FromInt(strict_mode));
1095 // Do tail-call to runtime routine.
1096 __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
1100 void StoreIC::GenerateSlow(MacroAssembler* masm) {
1101 // ---------- S t a t e --------------
1105 // -- lr : return address
1106 // -----------------------------------
1108 // Push receiver, name and value for runtime call.
1109 __ Push(ReceiverRegister(), NameRegister(), ValueRegister());
1111 // The slow case calls into the runtime to complete the store without causing
1112 // an IC miss that would otherwise cause a transition to the generic stub.
1113 ExternalReference ref =
1114 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
1115 __ TailCallExternalReference(ref, 3, 1);
1119 Condition CompareIC::ComputeCondition(Token::Value op) {
1121 case Token::EQ_STRICT:
1139 bool CompareIC::HasInlinedSmiCode(Address address) {
1140 // The address of the instruction following the call.
1141 Address info_address = Assembler::return_address_from_call_start(address);
1143 InstructionSequence* patch_info = InstructionSequence::At(info_address);
1144 return patch_info->IsInlineData();
1148 // Activate a SMI fast-path by patching the instructions generated by
1149 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
1150 // JumpPatchSite::EmitPatchInfo().
1151 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
1152 // The patch information is encoded in the instruction stream using
1153 // instructions which have no side effects, so we can safely execute them.
1154 // The patch information is encoded directly after the call to the helper
1155 // function which is requesting this patch operation.
1156 Address info_address = Assembler::return_address_from_call_start(address);
1157 InlineSmiCheckInfo info(info_address);
1159 // Check and decode the patch information instruction.
1160 if (!info.HasSmiCheck()) {
1164 if (FLAG_trace_ic) {
1165 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", address,
1166 info_address, reinterpret_cast<void*>(info.SmiCheck()));
1169 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
1170 // and JumpPatchSite::EmitJumpIfSmi().
1172 // tb(n)z xzr, #0, <target>
1174 // tb(!n)z test_reg, #0, <target>
1175 Instruction* to_patch = info.SmiCheck();
1176 PatchingAssembler patcher(to_patch, 1);
1177 DCHECK(to_patch->IsTestBranch());
1178 DCHECK(to_patch->ImmTestBranchBit5() == 0);
1179 DCHECK(to_patch->ImmTestBranchBit40() == 0);
1181 STATIC_ASSERT(kSmiTag == 0);
1182 STATIC_ASSERT(kSmiTagMask == 1);
1184 int branch_imm = to_patch->ImmTestBranch();
1186 if (check == ENABLE_INLINED_SMI_CHECK) {
1187 DCHECK(to_patch->Rt() == xzr.code());
1188 smi_reg = info.SmiRegister();
1190 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
1191 DCHECK(to_patch->Rt() != xzr.code());
1195 if (to_patch->Mask(TestBranchMask) == TBZ) {
1196 // This is JumpIfNotSmi(smi_reg, branch_imm).
1197 patcher.tbnz(smi_reg, 0, branch_imm);
1199 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
1200 // This is JumpIfSmi(smi_reg, branch_imm).
1201 patcher.tbz(smi_reg, 0, branch_imm);
1205 } // namespace v8::internal
1207 #endif // V8_TARGET_ARCH_ARM64