1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/base/bits.h"
10 #include "src/bootstrapper.h"
11 #include "src/code-stubs.h"
12 #include "src/codegen.h"
13 #include "src/ic/handler-compiler.h"
14 #include "src/ic/ic.h"
15 #include "src/isolate.h"
16 #include "src/jsregexp.h"
17 #include "src/regexp-macro-assembler.h"
18 #include "src/runtime/runtime.h"
24 static void InitializeArrayConstructorDescriptor(
25 Isolate* isolate, CodeStubDescriptor* descriptor,
26 int constant_stack_parameter_count) {
27 Address deopt_handler =
28 Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
30 if (constant_stack_parameter_count == 0) {
31 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
32 JS_FUNCTION_STUB_MODE);
34 descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
35 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
40 static void InitializeInternalArrayConstructorDescriptor(
41 Isolate* isolate, CodeStubDescriptor* descriptor,
42 int constant_stack_parameter_count) {
43 Address deopt_handler =
44 Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
46 if (constant_stack_parameter_count == 0) {
47 descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
48 JS_FUNCTION_STUB_MODE);
50 descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
51 JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
56 void ArrayNoArgumentConstructorStub::InitializeDescriptor(
57 CodeStubDescriptor* descriptor) {
58 InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
62 void ArraySingleArgumentConstructorStub::InitializeDescriptor(
63 CodeStubDescriptor* descriptor) {
64 InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
68 void ArrayNArgumentsConstructorStub::InitializeDescriptor(
69 CodeStubDescriptor* descriptor) {
70 InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
75 CodeStubDescriptor* descriptor) {
76 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
81 CodeStubDescriptor* descriptor) {
82 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
87 CodeStubDescriptor* descriptor) {
88 InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
92 #define __ ACCESS_MASM(masm)
95 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
97 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
98 Register rhs, Label* lhs_not_nan,
99 Label* slow, bool strict);
100 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
104 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
105 ExternalReference miss) {
106 // Update the static counter each time a new code stub is generated.
107 isolate()->counters()->code_stubs()->Increment();
109 CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
110 int param_count = descriptor.GetEnvironmentParameterCount();
112 // Call the runtime system in a fresh internal frame.
113 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
114 DCHECK(param_count == 0 ||
115 r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
117 for (int i = 0; i < param_count; ++i) {
118 __ push(descriptor.GetEnvironmentParameterRegister(i));
120 __ CallExternalReference(miss, param_count);
127 void DoubleToIStub::Generate(MacroAssembler* masm) {
128 Label out_of_range, only_low, negate, done, fastpath_done;
129 Register input_reg = source();
130 Register result_reg = destination();
131 DCHECK(is_truncating());
133 int double_offset = offset();
135 // Immediate values for this stub fit in instructions, so it's safe to use ip.
136 Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
137 Register scratch_low =
138 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
139 Register scratch_high =
140 GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
141 DoubleRegister double_scratch = kScratchDoubleReg;
144 // Account for saved regs if input is sp.
145 if (input_reg.is(sp)) double_offset += kPointerSize;
147 if (!skip_fastpath()) {
148 // Load double input.
149 __ lfd(double_scratch, MemOperand(input_reg, double_offset));
151 // Do fast-path convert from double to int.
152 __ ConvertDoubleToInt64(double_scratch,
153 #if !V8_TARGET_ARCH_PPC64
159 #if V8_TARGET_ARCH_PPC64
160 __ TestIfInt32(result_reg, r0);
162 __ TestIfInt32(scratch, result_reg, r0);
164 __ beq(&fastpath_done);
167 __ Push(scratch_high, scratch_low);
168 // Account for saved regs if input is sp.
169 if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
172 MemOperand(input_reg, double_offset + Register::kExponentOffset));
174 MemOperand(input_reg, double_offset + Register::kMantissaOffset));
176 __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
177 // Load scratch with exponent - 1. This is faster than loading
178 // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
179 STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
180 __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
181 // If exponent is greater than or equal to 84, the 32 less significant
182 // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
184 // Compare exponent with 84 (compare exponent - 1 with 83).
185 __ cmpi(scratch, Operand(83));
186 __ bge(&out_of_range);
188 // If we reach this code, 31 <= exponent <= 83.
189 // So, we don't have to handle cases where 0 <= exponent <= 20 for
190 // which we would need to shift right the high part of the mantissa.
191 // Scratch contains exponent - 1.
192 // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
193 __ subfic(scratch, scratch, Operand(51));
194 __ cmpi(scratch, Operand::Zero());
196 // 21 <= exponent <= 51, shift scratch_low and scratch_high
197 // to generate the result.
198 __ srw(scratch_low, scratch_low, scratch);
199 // Scratch contains: 52 - exponent.
200 // We needs: exponent - 20.
201 // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
202 __ subfic(scratch, scratch, Operand(32));
203 __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
204 // Set the implicit 1 before the mantissa part in scratch_high.
205 STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
206 __ oris(result_reg, result_reg,
207 Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
208 __ slw(r0, result_reg, scratch);
209 __ orx(result_reg, scratch_low, r0);
212 __ bind(&out_of_range);
213 __ mov(result_reg, Operand::Zero());
217 // 52 <= exponent <= 83, shift only scratch_low.
218 // On entry, scratch contains: 52 - exponent.
219 __ neg(scratch, scratch);
220 __ slw(result_reg, scratch_low, scratch);
223 // If input was positive, scratch_high ASR 31 equals 0 and
224 // scratch_high LSR 31 equals zero.
225 // New result = (result eor 0) + 0 = result.
226 // If the input was negative, we have to negate the result.
227 // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
228 // New result = (result eor 0xffffffff) + 1 = 0 - result.
229 __ srawi(r0, scratch_high, 31);
230 #if V8_TARGET_ARCH_PPC64
231 __ srdi(r0, r0, Operand(32));
233 __ xor_(result_reg, result_reg, r0);
234 __ srwi(r0, scratch_high, Operand(31));
235 __ add(result_reg, result_reg, r0);
238 __ Pop(scratch_high, scratch_low);
240 __ bind(&fastpath_done);
247 // Handle the case where the lhs and rhs are the same object.
248 // Equality is almost reflexive (everything but NaN), so this is a test
249 // for "identity and not NaN".
250 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
253 Label heap_number, return_equal;
255 __ bne(¬_identical);
257 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
258 // so we do the second best thing - test it ourselves.
259 // They are both equal and they are not both Smis so both of them are not
260 // Smis. If it's not a heap number, then return equal.
261 if (cond == lt || cond == gt) {
262 __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
265 __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
266 __ beq(&heap_number);
267 // Comparing JS objects with <=, >= is complicated.
269 __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
271 // Normally here we fall through to return_equal, but undefined is
272 // special: (undefined == undefined) == true, but
273 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
274 if (cond == le || cond == ge) {
275 __ cmpi(r7, Operand(ODDBALL_TYPE));
276 __ bne(&return_equal);
277 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
279 __ bne(&return_equal);
281 // undefined <= undefined should fail.
282 __ li(r3, Operand(GREATER));
284 // undefined >= undefined should fail.
285 __ li(r3, Operand(LESS));
292 __ bind(&return_equal);
294 __ li(r3, Operand(GREATER)); // Things aren't less than themselves.
295 } else if (cond == gt) {
296 __ li(r3, Operand(LESS)); // Things aren't greater than themselves.
298 __ li(r3, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
302 // For less and greater we don't have to check for NaN since the result of
303 // x < x is false regardless. For the others here is some code to check
305 if (cond != lt && cond != gt) {
306 __ bind(&heap_number);
307 // It is a heap number, so return non-equal if it's NaN and equal if it's
310 // The representation of NaN values has all exponent bits (52..62) set,
311 // and not all mantissa bits (0..51) clear.
312 // Read top bits of double representation (second word of value).
313 __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
314 // Test that exponent bits are all set.
315 STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
316 __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
317 __ cmpli(r6, Operand(0x7ff));
318 __ bne(&return_equal);
320 // Shift out flag and all exponent bits, retaining only mantissa.
321 __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
322 // Or with all low-bits of mantissa.
323 __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
325 __ cmpi(r3, Operand::Zero());
326 // For equal we already have the right value in r3: Return zero (equal)
327 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
328 // not (it's a NaN). For <= and >= we need to load r0 with the failing
329 // value if it's a NaN.
331 if (CpuFeatures::IsSupported(ISELECT)) {
332 __ li(r4, Operand((cond == le) ? GREATER : LESS));
333 __ isel(eq, r3, r3, r4);
337 // All-zero means Infinity means equal.
341 __ li(r3, Operand(GREATER)); // NaN <= NaN should fail.
343 __ li(r3, Operand(LESS)); // NaN >= NaN should fail.
349 // No fall through here.
351 __ bind(¬_identical);
355 // See comment at call site.
356 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
357 Register rhs, Label* lhs_not_nan,
358 Label* slow, bool strict) {
359 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
362 __ JumpIfSmi(rhs, &rhs_is_smi);
364 // Lhs is a Smi. Check whether the rhs is a heap number.
365 __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
367 // If rhs is not a number and lhs is a Smi then strict equality cannot
368 // succeed. Return non-equal
369 // If rhs is r3 then there is already a non zero value in it.
373 __ mov(r3, Operand(NOT_EQUAL));
378 // Smi compared non-strictly with a non-Smi non-heap-number. Call
383 // Lhs is a smi, rhs is a number.
384 // Convert lhs to a double in d7.
385 __ SmiToDouble(d7, lhs);
386 // Load the double from rhs, tagged HeapNumber r3, to d6.
387 __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
389 // We now have both loaded as doubles but we can skip the lhs nan check
393 __ bind(&rhs_is_smi);
394 // Rhs is a smi. Check whether the non-smi lhs is a heap number.
395 __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
397 // If lhs is not a number and rhs is a smi then strict equality cannot
398 // succeed. Return non-equal.
399 // If lhs is r3 then there is already a non zero value in it.
403 __ mov(r3, Operand(NOT_EQUAL));
408 // Smi compared non-strictly with a non-smi non-heap-number. Call
413 // Rhs is a smi, lhs is a heap number.
414 // Load the double from lhs, tagged HeapNumber r4, to d7.
415 __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
416 // Convert rhs to a double in d6.
417 __ SmiToDouble(d6, rhs);
418 // Fall through to both_loaded_as_doubles.
422 // See comment at call site.
423 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
425 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
427 // If either operand is a JS object or an oddball value, then they are
428 // not equal since their pointers are different.
429 // There is no test for undetectability in strict equality.
430 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
431 Label first_non_object;
432 // Get the type of the first operand into r5 and compare it with
433 // FIRST_SPEC_OBJECT_TYPE.
434 __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
435 __ blt(&first_non_object);
437 // Return non-zero (r3 is not zero)
438 Label return_not_equal;
439 __ bind(&return_not_equal);
442 __ bind(&first_non_object);
443 // Check for oddballs: true, false, null, undefined.
444 __ cmpi(r5, Operand(ODDBALL_TYPE));
445 __ beq(&return_not_equal);
447 __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
448 __ bge(&return_not_equal);
450 // Check for oddballs: true, false, null, undefined.
451 __ cmpi(r6, Operand(ODDBALL_TYPE));
452 __ beq(&return_not_equal);
454 // Now that we have the types we might as well check for
455 // internalized-internalized.
456 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
458 __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
459 __ beq(&return_not_equal, cr0);
463 // See comment at call site.
464 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
466 Label* both_loaded_as_doubles,
467 Label* not_heap_numbers, Label* slow) {
468 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
470 __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
471 __ bne(not_heap_numbers);
472 __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
474 __ bne(slow); // First was a heap number, second wasn't. Go slow case.
476 // Both are heap numbers. Load them up then jump to the code we have
478 __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
479 __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
481 __ b(both_loaded_as_doubles);
485 // Fast negative check for internalized-to-internalized equality.
486 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
487 Register lhs, Register rhs,
488 Label* possible_strings,
489 Label* not_both_strings) {
490 DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
492 // r5 is object type of rhs.
494 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
495 __ andi(r0, r5, Operand(kIsNotStringMask));
496 __ bne(&object_test, cr0);
497 __ andi(r0, r5, Operand(kIsNotInternalizedMask));
498 __ bne(possible_strings, cr0);
499 __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
500 __ bge(not_both_strings);
501 __ andi(r0, r6, Operand(kIsNotInternalizedMask));
502 __ bne(possible_strings, cr0);
504 // Both are internalized. We already checked they weren't the same pointer
505 // so they are not equal.
506 __ li(r3, Operand(NOT_EQUAL));
509 __ bind(&object_test);
510 __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
511 __ blt(not_both_strings);
512 __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
513 __ blt(not_both_strings);
514 // If both objects are undetectable, they are equal. Otherwise, they
515 // are not equal, since they are different objects and an object is not
516 // equal to undefined.
517 __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
518 __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
519 __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
521 __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
522 __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
527 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
529 CompareICState::State expected,
532 if (expected == CompareICState::SMI) {
533 __ JumpIfNotSmi(input, fail);
534 } else if (expected == CompareICState::NUMBER) {
535 __ JumpIfSmi(input, &ok);
536 __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
539 // We could be strict about internalized/non-internalized here, but as long as
540 // hydrogen doesn't care, the stub doesn't have to care either.
545 // On entry r4 and r5 are the values to be compared.
546 // On exit r3 is 0, positive or negative to indicate the result of
548 void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
551 Condition cc = GetCondition();
554 CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
555 CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
557 Label slow; // Call builtin.
558 Label not_smis, both_loaded_as_doubles, lhs_not_nan;
560 Label not_two_smis, smi_done;
562 __ JumpIfNotSmi(r5, ¬_two_smis);
567 __ bind(¬_two_smis);
569 // NOTICE! This code is only reached after a smi-fast-case check, so
570 // it is certain that at least one operand isn't a smi.
572 // Handle the case where the objects are identical. Either returns the answer
573 // or goes to slow. Only falls through if the objects were not identical.
574 EmitIdenticalObjectComparison(masm, &slow, cc);
576 // If either is a Smi (we know that not both are), then they can only
577 // be strictly equal if the other is a HeapNumber.
578 STATIC_ASSERT(kSmiTag == 0);
579 DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0));
580 __ and_(r5, lhs, rhs);
581 __ JumpIfNotSmi(r5, ¬_smis);
582 // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
583 // 1) Return the answer.
585 // 3) Fall through to both_loaded_as_doubles.
586 // 4) Jump to lhs_not_nan.
587 // In cases 3 and 4 we have found out we were dealing with a number-number
588 // comparison. The double values of the numbers have been loaded
590 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
592 __ bind(&both_loaded_as_doubles);
593 // The arguments have been converted to doubles and stored in d6 and d7
594 __ bind(&lhs_not_nan);
598 Label nan, equal, less_than;
600 if (CpuFeatures::IsSupported(ISELECT)) {
602 __ li(r4, Operand(GREATER));
603 __ li(r5, Operand(LESS));
604 __ isel(eq, r3, r0, r4);
605 __ isel(lt, r3, r5, r3);
610 __ li(r3, Operand(GREATER));
613 __ li(r3, Operand(EQUAL));
616 __ li(r3, Operand(LESS));
621 // If one of the sides was a NaN then the v flag is set. Load r3 with
622 // whatever it takes to make the comparison fail, since comparisons with NaN
624 if (cc == lt || cc == le) {
625 __ li(r3, Operand(GREATER));
627 __ li(r3, Operand(LESS));
632 // At this point we know we are dealing with two different objects,
633 // and neither of them is a Smi. The objects are in rhs_ and lhs_.
635 // This returns non-equal for some object types, or falls through if it
637 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
640 Label check_for_internalized_strings;
641 Label flat_string_check;
642 // Check for heap-number-heap-number comparison. Can jump to slow case,
643 // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
644 // that case. If the inputs are not doubles then jumps to
645 // check_for_internalized_strings.
646 // In this case r5 will contain the type of rhs_. Never falls through.
647 EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
648 &check_for_internalized_strings,
651 __ bind(&check_for_internalized_strings);
652 // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
653 // internalized strings.
654 if (cc == eq && !strict()) {
655 // Returns an answer for two internalized strings or two detectable objects.
656 // Otherwise jumps to string case or not both strings case.
657 // Assumes that r5 is the type of rhs_ on entry.
658 EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
662 // Check for both being sequential one-byte strings,
663 // and inline if that is the case.
664 __ bind(&flat_string_check);
666 __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
668 __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
671 StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
673 StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
675 // Never falls through to here.
680 // Figure out which native to call and setup the arguments.
681 Builtins::JavaScript native;
683 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
685 native = Builtins::COMPARE;
686 int ncr; // NaN compare result
687 if (cc == lt || cc == le) {
690 DCHECK(cc == gt || cc == ge); // remaining cases
693 __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
697 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
698 // tagged as a small integer.
699 __ InvokeBuiltin(native, JUMP_FUNCTION);
706 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
707 // We don't allow a GC during a store buffer overflow so there is no need to
708 // store the registers in any particular way, but we do have to store and
711 __ MultiPush(kJSCallerSaved | r0.bit());
712 if (save_doubles()) {
713 __ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
715 const int argument_count = 1;
716 const int fp_argument_count = 0;
717 const Register scratch = r4;
719 AllowExternalCallThatCantCauseGC scope(masm);
720 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
721 __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
722 __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
724 if (save_doubles()) {
725 __ RestoreFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
727 __ MultiPop(kJSCallerSaved | r0.bit());
733 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
734 __ PushSafepointRegisters();
739 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
740 __ PopSafepointRegisters();
745 void MathPowStub::Generate(MacroAssembler* masm) {
746 const Register base = r4;
747 const Register exponent = MathPowTaggedDescriptor::exponent();
748 DCHECK(exponent.is(r5));
749 const Register heapnumbermap = r8;
750 const Register heapnumber = r3;
751 const DoubleRegister double_base = d1;
752 const DoubleRegister double_exponent = d2;
753 const DoubleRegister double_result = d3;
754 const DoubleRegister double_scratch = d0;
755 const Register scratch = r11;
756 const Register scratch2 = r10;
758 Label call_runtime, done, int_exponent;
759 if (exponent_type() == ON_STACK) {
760 Label base_is_smi, unpack_exponent;
761 // The exponent and base are supplied as arguments on the stack.
762 // This can only happen if the stub is called from non-optimized code.
763 // Load input parameters from stack to double registers.
764 __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
765 __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
767 __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
769 __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
770 __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
771 __ cmp(scratch, heapnumbermap);
772 __ bne(&call_runtime);
774 __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
775 __ b(&unpack_exponent);
777 __ bind(&base_is_smi);
778 __ ConvertIntToDouble(scratch, double_base);
779 __ bind(&unpack_exponent);
781 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
782 __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
783 __ cmp(scratch, heapnumbermap);
784 __ bne(&call_runtime);
786 __ lfd(double_exponent,
787 FieldMemOperand(exponent, HeapNumber::kValueOffset));
788 } else if (exponent_type() == TAGGED) {
789 // Base is already in double_base.
790 __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
792 __ lfd(double_exponent,
793 FieldMemOperand(exponent, HeapNumber::kValueOffset));
796 if (exponent_type() != INTEGER) {
797 // Detect integer exponents stored as double.
798 __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
800 __ beq(&int_exponent);
802 if (exponent_type() == ON_STACK) {
803 // Detect square root case. Crankshaft detects constant +/-0.5 at
804 // compile time and uses DoMathPowHalf instead. We then skip this check
805 // for non-constant cases of +/-0.5 as these hardly occur.
806 Label not_plus_half, not_minus_inf1, not_minus_inf2;
809 __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
810 __ fcmpu(double_exponent, double_scratch);
811 __ bne(¬_plus_half);
813 // Calculates square root of base. Check for the special case of
814 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
815 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
816 __ fcmpu(double_base, double_scratch);
817 __ bne(¬_minus_inf1);
818 __ fneg(double_result, double_scratch);
820 __ bind(¬_minus_inf1);
822 // Add +0 to convert -0 to +0.
823 __ fadd(double_scratch, double_base, kDoubleRegZero);
824 __ fsqrt(double_result, double_scratch);
827 __ bind(¬_plus_half);
828 __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
829 __ fcmpu(double_exponent, double_scratch);
830 __ bne(&call_runtime);
832 // Calculates square root of base. Check for the special case of
833 // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
834 __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
835 __ fcmpu(double_base, double_scratch);
836 __ bne(¬_minus_inf2);
837 __ fmr(double_result, kDoubleRegZero);
839 __ bind(¬_minus_inf2);
841 // Add +0 to convert -0 to +0.
842 __ fadd(double_scratch, double_base, kDoubleRegZero);
843 __ LoadDoubleLiteral(double_result, 1.0, scratch);
844 __ fsqrt(double_scratch, double_scratch);
845 __ fdiv(double_result, double_result, double_scratch);
852 AllowExternalCallThatCantCauseGC scope(masm);
853 __ PrepareCallCFunction(0, 2, scratch);
854 __ MovToFloatParameters(double_base, double_exponent);
856 ExternalReference::power_double_double_function(isolate()), 0, 2);
860 __ MovFromFloatResult(double_result);
864 // Calculate power with integer exponent.
865 __ bind(&int_exponent);
867 // Get two copies of exponent in the registers scratch and exponent.
868 if (exponent_type() == INTEGER) {
869 __ mr(scratch, exponent);
871 // Exponent has previously been stored into scratch as untagged integer.
872 __ mr(exponent, scratch);
874 __ fmr(double_scratch, double_base); // Back up base.
875 __ li(scratch2, Operand(1));
876 __ ConvertIntToDouble(scratch2, double_result);
878 // Get absolute value of exponent.
879 __ cmpi(scratch, Operand::Zero());
880 if (CpuFeatures::IsSupported(ISELECT)) {
881 __ neg(scratch2, scratch);
882 __ isel(lt, scratch, scratch2, scratch);
884 Label positive_exponent;
885 __ bge(&positive_exponent);
886 __ neg(scratch, scratch);
887 __ bind(&positive_exponent);
890 Label while_true, no_carry, loop_end;
891 __ bind(&while_true);
892 __ andi(scratch2, scratch, Operand(1));
893 __ beq(&no_carry, cr0);
894 __ fmul(double_result, double_result, double_scratch);
896 __ ShiftRightArithImm(scratch, scratch, 1, SetRC);
897 __ beq(&loop_end, cr0);
898 __ fmul(double_scratch, double_scratch, double_scratch);
902 __ cmpi(exponent, Operand::Zero());
905 __ li(scratch2, Operand(1));
906 __ ConvertIntToDouble(scratch2, double_scratch);
907 __ fdiv(double_result, double_scratch, double_result);
908 // Test whether result is zero. Bail out to check for subnormal result.
909 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
910 __ fcmpu(double_result, kDoubleRegZero);
912 // double_exponent may not containe the exponent value if the input was a
913 // smi. We set it with exponent value before bailing out.
914 __ ConvertIntToDouble(exponent, double_exponent);
916 // Returning or bailing out.
917 Counters* counters = isolate()->counters();
918 if (exponent_type() == ON_STACK) {
919 // The arguments are still on the stack.
920 __ bind(&call_runtime);
921 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
923 // The stub is called from non-optimized code, which expects the result
924 // as heap number in exponent.
926 __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
928 __ stfd(double_result,
929 FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
930 DCHECK(heapnumber.is(r3));
931 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
937 AllowExternalCallThatCantCauseGC scope(masm);
938 __ PrepareCallCFunction(0, 2, scratch);
939 __ MovToFloatParameters(double_base, double_exponent);
941 ExternalReference::power_double_double_function(isolate()), 0, 2);
945 __ MovFromFloatResult(double_result);
948 __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
954 bool CEntryStub::NeedsImmovableCode() { return true; }
957 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
958 CEntryStub::GenerateAheadOfTime(isolate);
959 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
960 StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
961 ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
962 CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
963 CreateWeakCellStub::GenerateAheadOfTime(isolate);
964 BinaryOpICStub::GenerateAheadOfTime(isolate);
965 StoreRegistersStateStub::GenerateAheadOfTime(isolate);
966 RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
967 BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
971 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
972 StoreRegistersStateStub stub(isolate);
977 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
978 RestoreRegistersStateStub stub(isolate);
983 void CodeStub::GenerateFPStubs(Isolate* isolate) {
984 // Generate if not already in cache.
985 SaveFPRegsMode mode = kSaveFPRegs;
986 CEntryStub(isolate, 1, mode).GetCode();
987 StoreBufferOverflowStub(isolate, mode).GetCode();
988 isolate->set_fp_stubs_generated(true);
992 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
993 CEntryStub stub(isolate, 1, kDontSaveFPRegs);
998 void CEntryStub::Generate(MacroAssembler* masm) {
999 // Called from JavaScript; parameters are on stack as if calling JS function.
1000 // r3: number of arguments including receiver
1001 // r4: pointer to builtin function
1002 // fp: frame pointer (restored after C call)
1003 // sp: stack pointer (restored as callee's sp after C call)
1004 // cp: current context (C callee-saved)
1006 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1010 // Compute the argv pointer.
1011 __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
1013 __ subi(r4, r4, Operand(kPointerSize));
1015 // Enter the exit frame that transitions from JavaScript to C++.
1016 FrameScope scope(masm, StackFrame::MANUAL);
1018 // Need at least one extra slot for return address location.
1019 int arg_stack_space = 1;
1022 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
1023 // Pass buffer for return value on stack if necessary
1024 if (result_size() > 1) {
1025 DCHECK_EQ(2, result_size());
1026 arg_stack_space += 2;
1030 __ EnterExitFrame(save_doubles(), arg_stack_space);
1032 // Store a copy of argc in callee-saved registers for later.
1035 // r3, r14: number of arguments including receiver (C callee-saved)
1036 // r4: pointer to the first argument
1037 // r15: pointer to builtin function (C callee-saved)
1039 // Result returned in registers or stack, depending on result size and ABI.
1041 Register isolate_reg = r5;
1042 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
1043 if (result_size() > 1) {
1044 // The return value is 16-byte non-scalar value.
1045 // Use frame storage reserved by calling function to pass return
1046 // buffer as implicit first argument.
1049 __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
1055 __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
1057 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
1058 // Native AIX/PPC64 Linux use a function descriptor.
1059 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
1060 __ LoadP(ip, MemOperand(r15, 0)); // Instruction address
1061 Register target = ip;
1062 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
1064 Register target = ip;
1066 Register target = r15;
1069 // To let the GC traverse the return address of the exit frames, we need to
1070 // know where the return address is. The CEntryStub is unmovable, so
1071 // we can store the address on the stack to be able to find it again and
1072 // we never have to restore it, because it will not change.
1073 // Compute the return address in lr to return to after the jump below. Pc is
1074 // already at '+ 8' from the current instruction but return is after three
1075 // instructions so add another 4 to pc to get the return address.
1077 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
1083 // Constant used below is dependent on size of Call() macro instructions
1084 __ addi(r0, r8, Operand(20));
1086 __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
1090 #if !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
1091 // If return value is on the stack, pop it to registers.
1092 if (result_size() > 1) {
1093 __ LoadP(r4, MemOperand(r3, kPointerSize));
1094 __ LoadP(r3, MemOperand(r3));
1098 // Runtime functions should not return 'the hole'. Allowing it to escape may
1099 // lead to crashes in the IC code later.
1100 if (FLAG_debug_code) {
1102 __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
1104 __ stop("The hole escaped");
1108 // Check result for exception sentinel.
1109 Label exception_returned;
1110 __ CompareRoot(r3, Heap::kExceptionRootIndex);
1111 __ beq(&exception_returned);
1113 ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
1116 // Check that there is no pending exception, otherwise we
1117 // should have returned the exception sentinel.
1118 if (FLAG_debug_code) {
1120 __ mov(r5, Operand(pending_exception_address));
1121 __ LoadP(r5, MemOperand(r5));
1122 __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
1123 // Cannot use check here as it attempts to generate call into runtime.
1125 __ stop("Unexpected pending exception");
1129 // Exit C frame and return.
1131 // sp: stack pointer
1132 // fp: frame pointer
1133 // r14: still holds argc (callee-saved).
1134 __ LeaveExitFrame(save_doubles(), r14, true);
1137 // Handling of exception.
1138 __ bind(&exception_returned);
1140 // Retrieve the pending exception.
1141 __ mov(r5, Operand(pending_exception_address));
1142 __ LoadP(r3, MemOperand(r5));
1144 // Clear the pending exception.
1145 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
1146 __ StoreP(r6, MemOperand(r5));
1148 // Special handling of termination exceptions which are uncatchable
1149 // by javascript code.
1150 Label throw_termination_exception;
1151 __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
1152 __ beq(&throw_termination_exception);
1154 // Handle normal exception.
1157 __ bind(&throw_termination_exception);
1158 __ ThrowUncatchable(r3);
1162 void JSEntryStub::Generate(MacroAssembler* masm) {
1169 Label invoke, handler_entry, exit;
1172 __ function_descriptor();
1174 ProfileEntryHookStub::MaybeCallEntryHook(masm);
1177 // preserve LR in pre-reserved slot in caller's frame
1179 __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1181 // Save callee saved registers on the stack.
1182 __ MultiPush(kCalleeSaved);
1184 // Floating point regs FPR0 - FRP13 are volatile
1185 // FPR14-FPR31 are non-volatile, but sub-calls will save them for us
1187 // int offset_to_argv = kPointerSize * 22; // matches (22*4) above
1188 // __ lwz(r7, MemOperand(sp, offset_to_argv));
1190 // Push a frame with special values setup to mark it as an entry frame.
1196 __ li(r0, Operand(-1)); // Push a bad frame pointer to fail if it is used.
1198 #if V8_OOL_CONSTANT_POOL
1199 __ mov(kConstantPoolRegister,
1200 Operand(isolate()->factory()->empty_constant_pool_array()));
1201 __ push(kConstantPoolRegister);
1203 int marker = type();
1204 __ LoadSmiLiteral(r0, Smi::FromInt(marker));
1207 // Save copies of the top frame descriptor on the stack.
1208 __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1209 __ LoadP(r0, MemOperand(r8));
1212 // Set up frame pointer for the frame to be pushed.
1213 __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1215 // If this is the outermost JS call, set js_entry_sp value.
1216 Label non_outermost_js;
1217 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
1218 __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1219 __ LoadP(r9, MemOperand(r8));
1220 __ cmpi(r9, Operand::Zero());
1221 __ bne(&non_outermost_js);
1222 __ StoreP(fp, MemOperand(r8));
1223 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
1226 __ bind(&non_outermost_js);
1227 __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
1229 __ push(ip); // frame-type
1231 // Jump to a faked try block that does the invoke, with a faked catch
1232 // block that sets the pending exception.
1235 __ bind(&handler_entry);
1236 handler_offset_ = handler_entry.pos();
1237 // Caught exception: Store result (exception) in the pending exception
1238 // field in the JSEnv and return a failure sentinel. Coming in here the
1239 // fp will be invalid because the PushTryHandler below sets it to 0 to
1240 // signal the existence of the JSEntry frame.
1241 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1244 __ StoreP(r3, MemOperand(ip));
1245 __ LoadRoot(r3, Heap::kExceptionRootIndex);
1248 // Invoke: Link this frame into the handler chain. There's only one
1249 // handler block in this code object, so its index is 0.
1251 // Must preserve r0-r4, r5-r7 are available. (needs update for PPC)
1252 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
1253 // If an exception not caught by another handler occurs, this handler
1254 // returns control to the code after the b(&invoke) above, which
1255 // restores all kCalleeSaved registers (including cp and fp) to their
1256 // saved values before returning a failure to C.
1258 // Clear any pending exceptions.
1259 __ mov(r8, Operand(isolate()->factory()->the_hole_value()));
1260 __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
1262 __ StoreP(r8, MemOperand(ip));
1264 // Invoke the function by calling through JS entry trampoline builtin.
1265 // Notice that we cannot store a reference to the trampoline code directly in
1266 // this stub, because runtime stubs are not traversed when doing GC.
1268 // Expected registers by Builtins::JSEntryTrampoline
1274 if (type() == StackFrame::ENTRY_CONSTRUCT) {
1275 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
1277 __ mov(ip, Operand(construct_entry));
1279 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
1280 __ mov(ip, Operand(entry));
1282 __ LoadP(ip, MemOperand(ip)); // deref address
1284 // Branch and link to JSEntryTrampoline.
1285 // the address points to the start of the code object, skip the header
1286 __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
1288 __ bctrl(); // make the call
1290 // Unlink this frame from the handler chain.
1293 __ bind(&exit); // r3 holds result
1294 // Check if the current stack frame is marked as the outermost JS frame.
1295 Label non_outermost_js_2;
1297 __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
1298 __ bne(&non_outermost_js_2);
1299 __ mov(r9, Operand::Zero());
1300 __ mov(r8, Operand(ExternalReference(js_entry_sp)));
1301 __ StoreP(r9, MemOperand(r8));
1302 __ bind(&non_outermost_js_2);
1304 // Restore the top frame descriptors from the stack.
1306 __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
1307 __ StoreP(r6, MemOperand(ip));
1309 // Reset the stack to the callee saved registers.
1310 __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
1312 // Restore callee-saved registers and return.
1314 if (FLAG_debug_code) {
1321 __ MultiPop(kCalleeSaved);
1323 __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
1329 // Uses registers r3 to r7.
1330 // Expected input (depending on whether args are in registers or on the stack):
1331 // * object: r3 or at sp + 1 * kPointerSize.
1332 // * function: r4 or at sp.
1334 // An inlined call site may have been generated before calling this stub.
1335 // In this case the offset to the inline site to patch is passed in r8.
1336 // (See LCodeGen::DoInstanceOfKnownGlobal)
1337 void InstanceofStub::Generate(MacroAssembler* masm) {
1338 // Call site inlining and patching implies arguments in registers.
1339 DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
1341 // Fixed register usage throughout the stub:
1342 const Register object = r3; // Object (lhs).
1343 Register map = r6; // Map of the object.
1344 const Register function = r4; // Function (rhs).
1345 const Register prototype = r7; // Prototype of the function.
1346 const Register inline_site = r9;
1347 const Register scratch = r5;
1348 Register scratch3 = no_reg;
1350 // delta = mov + unaligned LoadP + cmp + bne
1351 #if V8_TARGET_ARCH_PPC64
1352 const int32_t kDeltaToLoadBoolResult =
1353 (Assembler::kMovInstructions + 4) * Assembler::kInstrSize;
1355 const int32_t kDeltaToLoadBoolResult =
1356 (Assembler::kMovInstructions + 3) * Assembler::kInstrSize;
1359 Label slow, loop, is_instance, is_not_instance, not_js_object;
1361 if (!HasArgsInRegisters()) {
1362 __ LoadP(object, MemOperand(sp, 1 * kPointerSize));
1363 __ LoadP(function, MemOperand(sp, 0));
1366 // Check that the left hand is a JS object and load map.
1367 __ JumpIfSmi(object, ¬_js_object);
1368 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
1370 // If there is a call site cache don't look in the global cache, but do the
1371 // real lookup and update the call site cache.
1372 if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
1374 __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1376 __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
1378 __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
1379 __ Ret(HasArgsInRegisters() ? 0 : 2);
1384 // Get the prototype of the function.
1385 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
1387 // Check that the function prototype is a JS object.
1388 __ JumpIfSmi(prototype, &slow);
1389 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
1391 // Update the global instanceof or call site inlined cache with the current
1392 // map and function. The cached answer will be set when it is known below.
1393 if (!HasCallSiteInlineCheck()) {
1394 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
1395 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
1397 DCHECK(HasArgsInRegisters());
1398 // Patch the (relocated) inlined map check.
1400 // The offset was stored in r8
1401 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
1402 const Register offset = r8;
1403 __ mflr(inline_site);
1404 __ sub(inline_site, inline_site, offset);
1405 // Get the map location in r8 and patch it.
1406 __ GetRelocatedValue(inline_site, offset, scratch);
1407 __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
1410 // Register mapping: r6 is object map and r7 is function prototype.
1411 // Get prototype of object into r5.
1412 __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
1414 // We don't need map any more. Use it as a scratch register.
1418 // Loop through the prototype chain looking for the function prototype.
1419 __ LoadRoot(scratch3, Heap::kNullValueRootIndex);
1421 __ cmp(scratch, prototype);
1422 __ beq(&is_instance);
1423 __ cmp(scratch, scratch3);
1424 __ beq(&is_not_instance);
1425 __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
1426 __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
1428 Factory* factory = isolate()->factory();
1430 __ bind(&is_instance);
1431 if (!HasCallSiteInlineCheck()) {
1432 __ LoadSmiLiteral(r3, Smi::FromInt(0));
1433 __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
1434 if (ReturnTrueFalseObject()) {
1435 __ Move(r3, factory->true_value());
1438 // Patch the call site to return true.
1439 __ LoadRoot(r3, Heap::kTrueValueRootIndex);
1440 __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1441 // Get the boolean result location in scratch and patch it.
1442 __ SetRelocatedValue(inline_site, scratch, r3);
1444 if (!ReturnTrueFalseObject()) {
1445 __ LoadSmiLiteral(r3, Smi::FromInt(0));
1448 __ Ret(HasArgsInRegisters() ? 0 : 2);
1450 __ bind(&is_not_instance);
1451 if (!HasCallSiteInlineCheck()) {
1452 __ LoadSmiLiteral(r3, Smi::FromInt(1));
1453 __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
1454 if (ReturnTrueFalseObject()) {
1455 __ Move(r3, factory->false_value());
1458 // Patch the call site to return false.
1459 __ LoadRoot(r3, Heap::kFalseValueRootIndex);
1460 __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
1461 // Get the boolean result location in scratch and patch it.
1462 __ SetRelocatedValue(inline_site, scratch, r3);
1464 if (!ReturnTrueFalseObject()) {
1465 __ LoadSmiLiteral(r3, Smi::FromInt(1));
1468 __ Ret(HasArgsInRegisters() ? 0 : 2);
1470 Label object_not_null, object_not_null_or_smi;
1471 __ bind(¬_js_object);
1472 // Before null, smi and string value checks, check that the rhs is a function
1473 // as for a non-function rhs an exception needs to be thrown.
1474 __ JumpIfSmi(function, &slow);
1475 __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
1478 // Null is not instance of anything.
1479 __ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
1480 __ bne(&object_not_null);
1481 if (ReturnTrueFalseObject()) {
1482 __ Move(r3, factory->false_value());
1484 __ LoadSmiLiteral(r3, Smi::FromInt(1));
1486 __ Ret(HasArgsInRegisters() ? 0 : 2);
1488 __ bind(&object_not_null);
1489 // Smi values are not instances of anything.
1490 __ JumpIfNotSmi(object, &object_not_null_or_smi);
1491 if (ReturnTrueFalseObject()) {
1492 __ Move(r3, factory->false_value());
1494 __ LoadSmiLiteral(r3, Smi::FromInt(1));
1496 __ Ret(HasArgsInRegisters() ? 0 : 2);
1498 __ bind(&object_not_null_or_smi);
1499 // String values are not instances of anything.
1500 __ IsObjectJSStringType(object, scratch, &slow);
1501 if (ReturnTrueFalseObject()) {
1502 __ Move(r3, factory->false_value());
1504 __ LoadSmiLiteral(r3, Smi::FromInt(1));
1506 __ Ret(HasArgsInRegisters() ? 0 : 2);
1508 // Slow-case. Tail call builtin.
1510 if (!ReturnTrueFalseObject()) {
1511 if (HasArgsInRegisters()) {
1514 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
1517 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
1519 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
1521 if (CpuFeatures::IsSupported(ISELECT)) {
1522 __ cmpi(r3, Operand::Zero());
1523 __ LoadRoot(r3, Heap::kTrueValueRootIndex);
1524 __ LoadRoot(r4, Heap::kFalseValueRootIndex);
1525 __ isel(eq, r3, r3, r4);
1527 Label true_value, done;
1528 __ cmpi(r3, Operand::Zero());
1529 __ beq(&true_value);
1531 __ LoadRoot(r3, Heap::kFalseValueRootIndex);
1534 __ bind(&true_value);
1535 __ LoadRoot(r3, Heap::kTrueValueRootIndex);
1539 __ Ret(HasArgsInRegisters() ? 0 : 2);
1544 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
1546 Register receiver = LoadDescriptor::ReceiverRegister();
1547 // Ensure that the vector and slot registers won't be clobbered before
1548 // calling the miss handler.
1549 DCHECK(!FLAG_vector_ics ||
1550 !AreAliased(r7, r8, VectorLoadICDescriptor::VectorRegister(),
1551 VectorLoadICDescriptor::SlotRegister()));
1553 NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7,
1556 PropertyAccessCompiler::TailCallBuiltin(
1557 masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
1561 void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
1562 // Return address is in lr.
1565 Register receiver = LoadDescriptor::ReceiverRegister();
1566 Register index = LoadDescriptor::NameRegister();
1567 Register scratch = r8;
1568 Register result = r3;
1569 DCHECK(!scratch.is(receiver) && !scratch.is(index));
1570 DCHECK(!FLAG_vector_ics ||
1571 (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
1572 result.is(VectorLoadICDescriptor::SlotRegister())));
1574 // StringCharAtGenerator doesn't use the result register until it's passed
1575 // the different miss possibilities. If it did, we would have a conflict
1576 // when FLAG_vector_ics is true.
1577 StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
1578 &miss, // When not a string.
1579 &miss, // When not a number.
1580 &miss, // When index out of range.
1581 STRING_INDEX_IS_ARRAY_INDEX,
1582 RECEIVER_IS_STRING);
1583 char_at_generator.GenerateFast(masm);
1586 StubRuntimeCallHelper call_helper;
1587 char_at_generator.GenerateSlow(masm, call_helper);
1590 PropertyAccessCompiler::TailCallBuiltin(
1591 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1595 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
1596 // The displacement is the offset of the last parameter (if any)
1597 // relative to the frame pointer.
1598 const int kDisplacement =
1599 StandardFrameConstants::kCallerSPOffset - kPointerSize;
1600 DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
1601 DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
1603 // Check that the key is a smi.
1605 __ JumpIfNotSmi(r4, &slow);
1607 // Check if the calling frame is an arguments adaptor frame.
1609 __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1610 __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
1611 STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
1612 __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
1615 // Check index against formal parameters count limit passed in
1616 // through register r3. Use unsigned comparison to get negative
1621 // Read the argument from the stack and return it.
1623 __ SmiToPtrArrayOffset(r6, r6);
1625 __ LoadP(r3, MemOperand(r6, kDisplacement));
1628 // Arguments adaptor case: Check index against actual arguments
1629 // limit found in the arguments adaptor frame. Use unsigned
1630 // comparison to get negative check for free.
1632 __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
1636 // Read the argument from the adaptor frame and return it.
1638 __ SmiToPtrArrayOffset(r6, r6);
1640 __ LoadP(r3, MemOperand(r6, kDisplacement));
1643 // Slow-case: Handle non-smi or out-of-bounds access to arguments
1644 // by calling the runtime system.
1647 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
1651 void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
1652 // sp[0] : number of parameters
1653 // sp[1] : receiver displacement
1656 // Check if the calling frame is an arguments adaptor frame.
1658 __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1659 __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
1660 STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
1661 __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
1664 // Patch the arguments.length and the parameters pointer in the current frame.
1665 __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
1666 __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));
1667 __ SmiToPtrArrayOffset(r5, r5);
1669 __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
1670 __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
1673 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1677 void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
1679 // sp[0] : number of parameters (tagged)
1680 // sp[1] : address of receiver argument
1682 // Registers used over whole function:
1683 // r9 : allocated object (tagged)
1684 // r11 : mapped parameter count (tagged)
1686 __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
1687 // r4 = parameter count (tagged)
1689 // Check if the calling frame is an arguments adaptor frame.
1691 Label adaptor_frame, try_allocate;
1692 __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1693 __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
1694 STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
1695 __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
1696 __ beq(&adaptor_frame);
1698 // No adaptor, parameter count = argument count.
1700 __ b(&try_allocate);
1702 // We have an adaptor frame. Patch the parameters pointer.
1703 __ bind(&adaptor_frame);
1704 __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
1705 __ SmiToPtrArrayOffset(r7, r5);
1707 __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
1708 __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
1710 // r4 = parameter count (tagged)
1711 // r5 = argument count (tagged)
1712 // Compute the mapped parameter count = min(r4, r5) in r4.
1714 if (CpuFeatures::IsSupported(ISELECT)) {
1715 __ isel(lt, r4, r4, r5);
1723 __ bind(&try_allocate);
1725 // Compute the sizes of backing store, parameter map, and arguments object.
1726 // 1. Parameter map, has 2 extra words containing context and backing store.
1727 const int kParameterMapHeaderSize =
1728 FixedArray::kHeaderSize + 2 * kPointerSize;
1729 // If there are no mapped parameters, we do not need the parameter_map.
1730 __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
1731 if (CpuFeatures::IsSupported(ISELECT)) {
1732 __ SmiToPtrArrayOffset(r11, r4);
1733 __ addi(r11, r11, Operand(kParameterMapHeaderSize));
1734 __ isel(eq, r11, r0, r11);
1738 __ li(r11, Operand::Zero());
1741 __ SmiToPtrArrayOffset(r11, r4);
1742 __ addi(r11, r11, Operand(kParameterMapHeaderSize));
1746 // 2. Backing store.
1747 __ SmiToPtrArrayOffset(r7, r5);
1748 __ add(r11, r11, r7);
1749 __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
1751 // 3. Arguments object.
1752 __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
1754 // Do the allocation of all three objects in one go.
1755 __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT);
1757 // r3 = address of new object(s) (tagged)
1758 // r5 = argument count (smi-tagged)
1759 // Get the arguments boilerplate from the current native context into r4.
1760 const int kNormalOffset =
1761 Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
1762 const int kAliasedOffset =
1763 Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
1766 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1767 __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
1768 __ cmpi(r4, Operand::Zero());
1769 if (CpuFeatures::IsSupported(ISELECT)) {
1770 __ LoadP(r11, MemOperand(r7, kNormalOffset));
1771 __ LoadP(r7, MemOperand(r7, kAliasedOffset));
1772 __ isel(eq, r7, r11, r7);
1776 __ LoadP(r7, MemOperand(r7, kNormalOffset));
1779 __ LoadP(r7, MemOperand(r7, kAliasedOffset));
1783 // r3 = address of new object (tagged)
1784 // r4 = mapped parameter count (tagged)
1785 // r5 = argument count (smi-tagged)
1786 // r7 = address of arguments map (tagged)
1787 __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
1788 __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
1789 __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
1790 __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
1792 // Set up the callee in-object property.
1793 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
1794 __ LoadP(r6, MemOperand(sp, 2 * kPointerSize));
1795 __ AssertNotSmi(r6);
1796 const int kCalleeOffset =
1797 JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
1798 __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0);
1800 // Use the length (smi tagged) and set that as an in-object property too.
1802 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
1803 const int kLengthOffset =
1804 JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
1805 __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0);
1807 // Set up the elements pointer in the allocated arguments object.
1808 // If we allocated a parameter map, r7 will point there, otherwise
1809 // it will point to the backing store.
1810 __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
1811 __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
1813 // r3 = address of new object (tagged)
1814 // r4 = mapped parameter count (tagged)
1815 // r5 = argument count (tagged)
1816 // r7 = address of parameter map or backing store (tagged)
1817 // Initialize parameter map. If there are no mapped arguments, we're done.
1818 Label skip_parameter_map;
1819 __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
1820 if (CpuFeatures::IsSupported(ISELECT)) {
1821 __ isel(eq, r6, r7, r6);
1822 __ beq(&skip_parameter_map);
1826 // Move backing store address to r6, because it is
1827 // expected there when filling in the unmapped arguments.
1829 __ b(&skip_parameter_map);
1833 __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
1834 __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
1835 __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0);
1836 __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
1837 __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
1839 __ SmiToPtrArrayOffset(r9, r4);
1841 __ addi(r9, r9, Operand(kParameterMapHeaderSize));
1842 __ StoreP(r9, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
1845 // Copy the parameter slots and the holes in the arguments.
1846 // We need to fill in mapped_parameter_count slots. They index the context,
1847 // where parameters are stored in reverse order, at
1848 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
1849 // The mapped parameter thus need to get indices
1850 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
1851 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
1852 // We loop from right to left.
1853 Label parameters_loop, parameters_test;
1855 __ LoadP(r11, MemOperand(sp, 0 * kPointerSize));
1856 __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
1857 __ sub(r11, r11, r4);
1858 __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
1859 __ SmiToPtrArrayOffset(r6, r9);
1861 __ addi(r6, r6, Operand(kParameterMapHeaderSize));
1863 // r9 = loop variable (tagged)
1864 // r4 = mapping index (tagged)
1865 // r6 = address of backing store (tagged)
1866 // r7 = address of parameter map (tagged)
1867 // r8 = temporary scratch (a.o., for address calculation)
1868 // r10 = the hole value
1869 __ b(¶meters_test);
1871 __ bind(¶meters_loop);
1872 __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0);
1873 __ SmiToPtrArrayOffset(r8, r9);
1874 __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
1875 __ StorePX(r11, MemOperand(r8, r7));
1876 __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
1877 __ StorePX(r10, MemOperand(r8, r6));
1878 __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
1879 __ bind(¶meters_test);
1880 __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
1881 __ bne(¶meters_loop);
1883 __ bind(&skip_parameter_map);
1884 // r5 = argument count (tagged)
1885 // r6 = address of backing store (tagged)
1887 // Copy arguments header and remaining slots (if there are any).
1888 __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
1889 __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
1890 __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
1892 Label arguments_loop, arguments_test;
1894 __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
1895 __ SmiToPtrArrayOffset(r8, r11);
1897 __ b(&arguments_test);
1899 __ bind(&arguments_loop);
1900 __ subi(r7, r7, Operand(kPointerSize));
1901 __ LoadP(r9, MemOperand(r7, 0));
1902 __ SmiToPtrArrayOffset(r8, r11);
1904 __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
1905 __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
1907 __ bind(&arguments_test);
1909 __ blt(&arguments_loop);
1911 // Return and remove the on-stack parameters.
1912 __ addi(sp, sp, Operand(3 * kPointerSize));
1915 // Do the runtime call to allocate the arguments object.
1916 // r5 = argument count (tagged)
1918 __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); // Patch argument count.
1919 __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
1923 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
1924 // Return address is in lr.
1927 Register receiver = LoadDescriptor::ReceiverRegister();
1928 Register key = LoadDescriptor::NameRegister();
1930 // Check that the key is an array index, that is Uint32.
1931 __ TestIfPositiveSmi(key, r0);
1934 // Everything is fine, call runtime.
1935 __ Push(receiver, key); // Receiver, key.
1937 // Perform tail call to the entry.
1938 __ TailCallExternalReference(
1939 ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
1944 PropertyAccessCompiler::TailCallBuiltin(
1945 masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
1949 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
1950 // sp[0] : number of parameters
1951 // sp[4] : receiver displacement
1953 // Check if the calling frame is an arguments adaptor frame.
1954 Label adaptor_frame, try_allocate, runtime;
1955 __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1956 __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
1957 STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
1958 __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
1959 __ beq(&adaptor_frame);
1961 // Get the length from the frame.
1962 __ LoadP(r4, MemOperand(sp, 0));
1963 __ b(&try_allocate);
1965 // Patch the arguments.length and the parameters pointer.
1966 __ bind(&adaptor_frame);
1967 __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
1968 __ StoreP(r4, MemOperand(sp, 0));
1969 __ SmiToPtrArrayOffset(r6, r4);
1971 __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
1972 __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
1974 // Try the new space allocation. Start out with computing the size
1975 // of the arguments object and the elements array in words.
1976 Label add_arguments_object;
1977 __ bind(&try_allocate);
1978 __ cmpi(r4, Operand::Zero());
1979 __ beq(&add_arguments_object);
1981 __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize));
1982 __ bind(&add_arguments_object);
1983 __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
1985 // Do the allocation of both objects in one go.
1986 __ Allocate(r4, r3, r5, r6, &runtime,
1987 static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
1989 // Get the arguments boilerplate from the current native context.
1991 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
1992 __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
1995 MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
1997 __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
1998 __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
1999 __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
2000 __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
2002 // Get the length (smi tagged) and set that as an in-object property too.
2003 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2004 __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
2007 FieldMemOperand(r3, JSObject::kHeaderSize +
2008 Heap::kArgumentsLengthIndex * kPointerSize),
2011 // If there are no actual arguments, we're done.
2013 __ cmpi(r4, Operand::Zero());
2016 // Get the parameters pointer from the stack.
2017 __ LoadP(r5, MemOperand(sp, 1 * kPointerSize));
2019 // Set up the elements pointer in the allocated arguments object and
2020 // initialize the header in the elements fixed array.
2021 __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
2022 __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
2023 __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
2024 __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
2025 __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
2026 // Untag the length for the loop.
2029 // Copy the fixed array slots.
2031 // Set up r7 to point just prior to the first array slot.
2033 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
2036 // Pre-decrement r5 with kPointerSize on each iteration.
2037 // Pre-decrement in order to skip receiver.
2038 __ LoadPU(r6, MemOperand(r5, -kPointerSize));
2039 // Pre-increment r7 with kPointerSize on each iteration.
2040 __ StorePU(r6, MemOperand(r7, kPointerSize));
2043 // Return and remove the on-stack parameters.
2045 __ addi(sp, sp, Operand(3 * kPointerSize));
2048 // Do the runtime call to allocate the arguments object.
2050 __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
2054 void RegExpExecStub::Generate(MacroAssembler* masm) {
2055 // Just jump directly to runtime if native RegExp is not selected at compile
2056 // time or if regexp entry in generated code is turned off runtime switch or
2058 #ifdef V8_INTERPRETED_REGEXP
2059 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2060 #else // V8_INTERPRETED_REGEXP
2062 // Stack frame on entry.
2063 // sp[0]: last_match_info (expected JSArray)
2064 // sp[4]: previous index
2065 // sp[8]: subject string
2066 // sp[12]: JSRegExp object
2068 const int kLastMatchInfoOffset = 0 * kPointerSize;
2069 const int kPreviousIndexOffset = 1 * kPointerSize;
2070 const int kSubjectOffset = 2 * kPointerSize;
2071 const int kJSRegExpOffset = 3 * kPointerSize;
2073 Label runtime, br_over, encoding_type_UC16;
2075 // Allocation of registers for this function. These are in callee save
2076 // registers and will be preserved by the call to the native RegExp code, as
2077 // this code is called using the normal C calling convention. When calling
2078 // directly from generated code the native RegExp code will not do a GC and
2079 // therefore the content of these registers are safe to use after the call.
2080 Register subject = r14;
2081 Register regexp_data = r15;
2082 Register last_match_info_elements = r16;
2083 Register code = r17;
2085 // Ensure register assigments are consistent with callee save masks
2086 DCHECK(subject.bit() & kCalleeSaved);
2087 DCHECK(regexp_data.bit() & kCalleeSaved);
2088 DCHECK(last_match_info_elements.bit() & kCalleeSaved);
2089 DCHECK(code.bit() & kCalleeSaved);
2091 // Ensure that a RegExp stack is allocated.
2092 ExternalReference address_of_regexp_stack_memory_address =
2093 ExternalReference::address_of_regexp_stack_memory_address(isolate());
2094 ExternalReference address_of_regexp_stack_memory_size =
2095 ExternalReference::address_of_regexp_stack_memory_size(isolate());
2096 __ mov(r3, Operand(address_of_regexp_stack_memory_size));
2097 __ LoadP(r3, MemOperand(r3, 0));
2098 __ cmpi(r3, Operand::Zero());
2101 // Check that the first argument is a JSRegExp object.
2102 __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
2103 __ JumpIfSmi(r3, &runtime);
2104 __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
2107 // Check that the RegExp has been compiled (data contains a fixed array).
2108 __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
2109 if (FLAG_debug_code) {
2110 __ TestIfSmi(regexp_data, r0);
2111 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
2112 __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
2113 __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
2116 // regexp_data: RegExp data (FixedArray)
2117 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2118 __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
2119 // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
2120 __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
2123 // regexp_data: RegExp data (FixedArray)
2124 // Check that the number of captures fit in the static offsets vector buffer.
2126 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2127 // Check (number_of_captures + 1) * 2 <= offsets vector size
2128 // Or number_of_captures * 2 <= offsets vector size - 2
2129 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
2130 // SmiUntag (which is a nop for 32-bit).
2131 __ SmiToShortArrayOffset(r5, r5);
2132 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
2133 __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
2136 // Reset offset for possibly sliced string.
2137 __ li(r11, Operand::Zero());
2138 __ LoadP(subject, MemOperand(sp, kSubjectOffset));
2139 __ JumpIfSmi(subject, &runtime);
2140 __ mr(r6, subject); // Make a copy of the original subject string.
2141 __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
2142 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
2143 // subject: subject string
2144 // r6: subject string
2145 // r3: subject string instance type
2146 // regexp_data: RegExp data (FixedArray)
2147 // Handle subject string according to its encoding and representation:
2148 // (1) Sequential string? If yes, go to (5).
2149 // (2) Anything but sequential or cons? If yes, go to (6).
2150 // (3) Cons string. If the string is flat, replace subject with first string.
2151 // Otherwise bailout.
2152 // (4) Is subject external? If yes, go to (7).
2153 // (5) Sequential string. Load regexp code according to encoding.
2157 // Deferred code at the end of the stub:
2158 // (6) Not a long external string? If yes, go to (8).
2159 // (7) External string. Make it, offset-wise, look like a sequential string.
2161 // (8) Short external string or not a string? If yes, bail out to runtime.
2162 // (9) Sliced string. Replace subject with parent. Go to (4).
2164 Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
2165 not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
2167 // (1) Sequential string? If yes, go to (5).
2168 STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
2169 kShortExternalStringMask) == 0x93);
2170 __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
2171 kShortExternalStringMask));
2172 STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
2173 __ beq(&seq_string, cr0); // Go to (5).
2175 // (2) Anything but sequential or cons? If yes, go to (6).
2176 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2177 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2178 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2179 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2180 STATIC_ASSERT(kExternalStringTag < 0xffffu);
2181 __ cmpi(r4, Operand(kExternalStringTag));
2182 __ bge(¬_seq_nor_cons); // Go to (6).
2184 // (3) Cons string. Check that it's flat.
2185 // Replace subject with first string and reload instance type.
2186 __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
2187 __ CompareRoot(r3, Heap::kempty_stringRootIndex);
2189 __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
2191 // (4) Is subject external? If yes, go to (7).
2192 __ bind(&check_underlying);
2193 __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
2194 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
2195 STATIC_ASSERT(kSeqStringTag == 0);
2196 STATIC_ASSERT(kStringRepresentationMask == 3);
2197 __ andi(r0, r3, Operand(kStringRepresentationMask));
2198 // The underlying external string is never a short external string.
2199 STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
2200 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
2201 __ bne(&external_string, cr0); // Go to (7).
2203 // (5) Sequential string. Load regexp code according to encoding.
2204 __ bind(&seq_string);
2205 // subject: sequential subject string (or look-alike, external string)
2206 // r6: original subject string
2207 // Load previous index and check range before r6 is overwritten. We have to
2208 // use r6 instead of subject here because subject might have been only made
2209 // to look like a sequential string when it actually is an external string.
2210 __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
2211 __ JumpIfNotSmi(r4, &runtime);
2212 __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
2217 STATIC_ASSERT(4 == kOneByteStringTag);
2218 STATIC_ASSERT(kTwoByteStringTag == 0);
2219 STATIC_ASSERT(kStringEncodingMask == 4);
2220 __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
2221 __ beq(&encoding_type_UC16, cr0);
2223 FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
2225 __ bind(&encoding_type_UC16);
2226 __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
2229 // (E) Carry on. String handling is done.
2230 // code: irregexp code
2231 // Check that the irregexp code has been generated for the actual string
2232 // encoding. If it has, the field contains a code object otherwise it contains
2233 // a smi (code flushing support).
2234 __ JumpIfSmi(code, &runtime);
2236 // r4: previous index
2237 // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
2238 // code: Address of generated regexp code
2239 // subject: Subject string
2240 // regexp_data: RegExp data (FixedArray)
2241 // All checks done. Now push arguments for native regexp code.
2242 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
2244 // Isolates: note we add an additional parameter here (isolate pointer).
2245 const int kRegExpExecuteArguments = 10;
2246 const int kParameterRegisters = 8;
2247 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
2249 // Stack pointer now points to cell where return address is to be written.
2250 // Arguments are before that on the stack or in registers.
2252 // Argument 10 (in stack parameter area): Pass current isolate address.
2253 __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2254 __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
2256 // Argument 9 is a dummy that reserves the space used for
2257 // the return address added by the ExitFrame in native calls.
2259 // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
2260 __ li(r10, Operand(1));
2262 // Argument 7 (r9): Start (high end) of backtracking stack memory area.
2263 __ mov(r3, Operand(address_of_regexp_stack_memory_address));
2264 __ LoadP(r3, MemOperand(r3, 0));
2265 __ mov(r5, Operand(address_of_regexp_stack_memory_size));
2266 __ LoadP(r5, MemOperand(r5, 0));
2269 // Argument 6 (r8): Set the number of capture registers to zero to force
2270 // global egexps to behave as non-global. This does not affect non-global
2272 __ li(r8, Operand::Zero());
2274 // Argument 5 (r7): static offsets vector buffer.
2277 Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
2279 // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
2280 // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
2281 __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
2282 __ xori(r6, r6, Operand(1));
2283 // Load the length from the original subject string from the previous stack
2284 // frame. Therefore we have to use fp, which points exactly to two pointer
2285 // sizes below the previous sp. (Because creating a new stack frame pushes
2286 // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
2287 __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
2288 // If slice offset is not 0, load the length from the original sliced string.
2289 // Argument 4, r6: End of string data
2290 // Argument 3, r5: Start of string data
2291 // Prepare start and end index of the input.
2292 __ ShiftLeft_(r11, r11, r6);
2293 __ add(r11, r18, r11);
2294 __ ShiftLeft_(r5, r4, r6);
2295 __ add(r5, r11, r5);
2297 __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
2299 __ ShiftLeft_(r6, r18, r6);
2300 __ add(r6, r11, r6);
2302 // Argument 2 (r4): Previous index.
2305 // Argument 1 (r3): Subject string.
2308 // Locate the code entry and call it.
2309 __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
2312 #if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
2313 // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
2314 // RegExp routine. Extract the instruction address here since
2315 // DirectCEntryStub::GenerateCall will not do it for calls out to
2316 // what it thinks is C code compiled for the simulator/host
2318 __ LoadP(code, MemOperand(code, 0)); // Instruction address
2321 DirectCEntryStub stub(isolate());
2322 stub.GenerateCall(masm, code);
2324 __ LeaveExitFrame(false, no_reg, true);
2327 // subject: subject string (callee saved)
2328 // regexp_data: RegExp data (callee saved)
2329 // last_match_info_elements: Last match info elements (callee saved)
2330 // Check the result.
2332 __ cmpi(r3, Operand(1));
2333 // We expect exactly one result since we force the called regexp to behave
2337 __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
2339 __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
2340 // If not exception it can only be retry. Handle that in the runtime system.
2342 // Result must now be exception. If there is no pending exception already a
2343 // stack overflow (on the backtrack stack) was detected in RegExp code but
2344 // haven't created the exception yet. Handle that in the runtime system.
2345 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
2346 __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
2347 __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
2349 __ LoadP(r3, MemOperand(r5, 0));
2353 __ StoreP(r4, MemOperand(r5, 0)); // Clear pending exception.
2355 // Check if the exception is a termination. If so, throw as uncatchable.
2356 __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
2358 Label termination_exception;
2359 __ beq(&termination_exception);
2363 __ bind(&termination_exception);
2364 __ ThrowUncatchable(r3);
2367 // For failure and exception return null.
2368 __ mov(r3, Operand(isolate()->factory()->null_value()));
2369 __ addi(sp, sp, Operand(4 * kPointerSize));
2372 // Process the result from the native regexp code.
2375 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
2376 // Calculate number of capture registers (number_of_captures + 1) * 2.
2377 // SmiToShortArrayOffset accomplishes the multiplication by 2 and
2378 // SmiUntag (which is a nop for 32-bit).
2379 __ SmiToShortArrayOffset(r4, r4);
2380 __ addi(r4, r4, Operand(2));
2382 __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
2383 __ JumpIfSmi(r3, &runtime);
2384 __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE);
2386 // Check that the JSArray is in fast case.
2387 __ LoadP(last_match_info_elements,
2388 FieldMemOperand(r3, JSArray::kElementsOffset));
2390 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
2391 __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
2393 // Check that the last match info has space for the capture registers and the
2394 // additional information.
2396 r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
2397 __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
2398 __ SmiUntag(r0, r3);
2402 // r4: number of capture registers
2403 // subject: subject string
2404 // Store the capture count.
2406 __ StoreP(r5, FieldMemOperand(last_match_info_elements,
2407 RegExpImpl::kLastCaptureCountOffset),
2409 // Store last subject and last input.
2410 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
2411 RegExpImpl::kLastSubjectOffset),
2414 __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
2415 subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
2417 __ StoreP(subject, FieldMemOperand(last_match_info_elements,
2418 RegExpImpl::kLastInputOffset),
2420 __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
2421 subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
2423 // Get the static offsets vector filled by the native regexp code.
2424 ExternalReference address_of_static_offsets_vector =
2425 ExternalReference::address_of_static_offsets_vector(isolate());
2426 __ mov(r5, Operand(address_of_static_offsets_vector));
2428 // r4: number of capture registers
2429 // r5: offsets vector
2431 // Capture register counter starts from number of capture registers and
2432 // counts down until wraping after zero.
2434 r3, last_match_info_elements,
2435 Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
2436 __ addi(r5, r5, Operand(-kIntSize)); // bias down for lwzu
2438 __ bind(&next_capture);
2439 // Read the value from the static offsets vector buffer.
2440 __ lwzu(r6, MemOperand(r5, kIntSize));
2441 // Store the smi value in the last match info.
2443 __ StorePU(r6, MemOperand(r3, kPointerSize));
2444 __ bdnz(&next_capture);
2446 // Return last match info.
2447 __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
2448 __ addi(sp, sp, Operand(4 * kPointerSize));
2451 // Do the runtime call to execute the regexp.
2453 __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
2455 // Deferred code for string handling.
2456 // (6) Not a long external string? If yes, go to (8).
2457 __ bind(¬_seq_nor_cons);
2458 // Compare flags are still set.
2459 __ bgt(¬_long_external); // Go to (8).
2461 // (7) External string. Make it, offset-wise, look like a sequential string.
2462 __ bind(&external_string);
2463 __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
2464 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
2465 if (FLAG_debug_code) {
2466 // Assert that we do not have a cons or slice (indirect strings) here.
2467 // Sequential strings have already been ruled out.
2468 STATIC_ASSERT(kIsIndirectStringMask == 1);
2469 __ andi(r0, r3, Operand(kIsIndirectStringMask));
2470 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
2473 FieldMemOperand(subject, ExternalString::kResourceDataOffset));
2474 // Move the pointer so that offset-wise, it looks like a sequential string.
2475 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
2476 __ subi(subject, subject,
2477 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
2478 __ b(&seq_string); // Go to (5).
2480 // (8) Short external string or not a string? If yes, bail out to runtime.
2481 __ bind(¬_long_external);
2482 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
2483 __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
2484 __ bne(&runtime, cr0);
2486 // (9) Sliced string. Replace subject with parent. Go to (4).
2487 // Load offset into r11 and replace subject string with parent.
2488 __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
2490 __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
2491 __ b(&check_underlying); // Go to (4).
2492 #endif // V8_INTERPRETED_REGEXP
2496 static void GenerateRecordCallTarget(MacroAssembler* masm) {
2497 // Cache the called function in a feedback vector slot. Cache states
2498 // are uninitialized, monomorphic (indicated by a JSFunction), and
2500 // r3 : number of arguments to the construct function
2501 // r4 : the function to call
2502 // r5 : Feedback vector
2503 // r6 : slot in feedback vector (Smi)
2504 Label initialize, done, miss, megamorphic, not_array_function;
2506 DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
2507 masm->isolate()->heap()->megamorphic_symbol());
2508 DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
2509 masm->isolate()->heap()->uninitialized_symbol());
2511 // Load the cache state into r7.
2512 __ SmiToPtrArrayOffset(r7, r6);
2514 __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
2516 // A monomorphic cache hit or an already megamorphic state: invoke the
2517 // function without changing the state.
2521 if (!FLAG_pretenuring_call_new) {
2522 // If we came here, we need to see if we are the array function.
2523 // If we didn't have a matching function, and we didn't find the megamorph
2524 // sentinel, then we have in the slot either some other function or an
2525 // AllocationSite. Do a map check on the object in ecx.
2526 __ LoadP(r8, FieldMemOperand(r7, 0));
2527 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
2530 // Make sure the function is the Array() function
2531 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
2533 __ bne(&megamorphic);
2539 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
2541 __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
2542 __ beq(&initialize);
2543 // MegamorphicSentinel is an immortal immovable object (undefined) so no
2544 // write-barrier is needed.
2545 __ bind(&megamorphic);
2546 __ SmiToPtrArrayOffset(r7, r6);
2548 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2549 __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
2552 // An uninitialized cache is patched with the function
2553 __ bind(&initialize);
2555 if (!FLAG_pretenuring_call_new) {
2556 // Make sure the function is the Array() function.
2557 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
2559 __ bne(¬_array_function);
2561 // The target function is the Array constructor,
2562 // Create an AllocationSite if we don't already have it, store it in the
2565 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2567 // Arguments register must be smi-tagged to call out.
2569 __ Push(r6, r5, r4, r3);
2571 CreateAllocationSiteStub create_stub(masm->isolate());
2572 __ CallStub(&create_stub);
2574 __ Pop(r6, r5, r4, r3);
2579 __ bind(¬_array_function);
2582 __ SmiToPtrArrayOffset(r7, r6);
2584 __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2585 __ StoreP(r4, MemOperand(r7, 0));
2587 __ Push(r7, r5, r4);
2588 __ RecordWrite(r5, r7, r4, kLRHasNotBeenSaved, kDontSaveFPRegs,
2589 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
2596 static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
2597 // Do not transform the receiver for strict mode functions and natives.
2598 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
2599 __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
2601 #if V8_TARGET_ARCH_PPC64
2602 SharedFunctionInfo::kStrictModeFunction,
2604 SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
2609 // Do not transform the receiver for native.
2611 #if V8_TARGET_ARCH_PPC64
2612 SharedFunctionInfo::kNative,
2614 SharedFunctionInfo::kNative + kSmiTagSize,
2621 static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
2622 // Check for function proxy.
2623 STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
2624 __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
2625 __ bne(non_function);
2626 __ push(r4); // put proxy as additional argument
2627 __ li(r3, Operand(argc + 1));
2628 __ li(r5, Operand::Zero());
2629 __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
2631 Handle<Code> adaptor =
2632 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
2633 __ Jump(adaptor, RelocInfo::CODE_TARGET);
2636 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
2637 // of the original receiver from the call site).
2638 __ bind(non_function);
2639 __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0);
2640 __ li(r3, Operand(argc)); // Set up the number of arguments.
2641 __ li(r5, Operand::Zero());
2642 __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
2643 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2644 RelocInfo::CODE_TARGET);
2648 static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
2649 // Wrap the receiver and patch it back onto the stack.
2651 FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
2653 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
2656 __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
2661 static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
2662 bool needs_checks, bool call_as_method) {
2663 // r4 : the function to call
2664 Label slow, non_function, wrap, cont;
2667 // Check that the function is really a JavaScript function.
2668 // r4: pushed function (to be verified)
2669 __ JumpIfSmi(r4, &non_function);
2671 // Goto slow case if we do not have a function.
2672 __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
2676 // Fast-case: Invoke the function now.
2677 // r4: pushed function
2678 ParameterCount actual(argc);
2680 if (call_as_method) {
2682 EmitContinueIfStrictOrNative(masm, &cont);
2685 // Compute the receiver in sloppy mode.
2686 __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
2689 __ JumpIfSmi(r6, &wrap);
2690 __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
2699 __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
2702 // Slow-case: Non-function called.
2704 EmitSlowCase(masm, argc, &non_function);
2707 if (call_as_method) {
2709 EmitWrapCase(masm, argc, &cont);
2714 void CallFunctionStub::Generate(MacroAssembler* masm) {
2715 CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
2719 void CallConstructStub::Generate(MacroAssembler* masm) {
2720 // r3 : number of arguments
2721 // r4 : the function to call
2722 // r5 : feedback vector
2723 // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
2725 Label slow, non_function_call;
2727 // Check that the function is not a smi.
2728 __ JumpIfSmi(r4, &non_function_call);
2729 // Check that the function is a JSFunction.
2730 __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
2733 if (RecordCallTarget()) {
2734 GenerateRecordCallTarget(masm);
2736 __ SmiToPtrArrayOffset(r8, r6);
2738 if (FLAG_pretenuring_call_new) {
2739 // Put the AllocationSite from the feedback vector into r5.
2740 // By adding kPointerSize we encode that we know the AllocationSite
2741 // entry is at the feedback vector slot given by r6 + 1.
2742 __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
2744 // Put the AllocationSite from the feedback vector into r5, or undefined.
2745 __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
2746 __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
2747 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
2748 if (CpuFeatures::IsSupported(ISELECT)) {
2749 __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
2750 __ isel(eq, r5, r5, r8);
2752 Label feedback_register_initialized;
2753 __ beq(&feedback_register_initialized);
2754 __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
2755 __ bind(&feedback_register_initialized);
2759 __ AssertUndefinedOrAllocationSite(r5, r8);
2762 // Pass function as original constructor.
2765 // Jump to the function-specific construct stub.
2766 Register jmp_reg = r7;
2767 __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
2769 FieldMemOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset));
2770 __ addi(ip, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
2771 __ JumpToJSEntry(ip);
2773 // r3: number of arguments
2774 // r4: called object
2778 STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
2779 __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
2780 __ bne(&non_function_call);
2781 __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
2784 __ bind(&non_function_call);
2785 __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
2787 // Set expected number of arguments to zero (not changing r3).
2788 __ li(r5, Operand::Zero());
2789 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
2790 RelocInfo::CODE_TARGET);
2794 static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
2795 __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2797 FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
2799 FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
2803 void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
2808 int argc = arg_count();
2809 ParameterCount actual(argc);
2811 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
2815 __ mov(r3, Operand(arg_count()));
2816 __ SmiToPtrArrayOffset(r7, r6);
2818 __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
2820 // Verify that r7 contains an AllocationSite
2821 __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
2822 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
2826 ArrayConstructorStub stub(masm->isolate(), arg_count());
2827 __ TailCallStub(&stub);
2832 // The slow case, we need this no matter what to complete a call after a miss.
2833 CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
2836 __ stop("Unexpected code address");
2840 void CallICStub::Generate(MacroAssembler* masm) {
2842 // r6 - slot id (Smi)
2844 const int with_types_offset =
2845 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
2846 const int generic_offset =
2847 FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
2848 Label extra_checks_or_miss, slow_start;
2849 Label slow, non_function, wrap, cont;
2850 Label have_js_function;
2851 int argc = arg_count();
2852 ParameterCount actual(argc);
2854 // The checks. First, does r4 match the recorded monomorphic target?
2855 __ SmiToPtrArrayOffset(r7, r6);
2857 __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
2859 // We don't know that we have a weak cell. We might have a private symbol
2860 // or an AllocationSite, but the memory is safe to examine.
2861 // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to
2863 // WeakCell::kValueOffset - contains a JSFunction or Smi(0)
2864 // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not
2865 // computed, meaning that it can't appear to be a pointer. If the low bit is
2866 // 0, then hash is computed, but the 0 bit prevents the field from appearing
2868 STATIC_ASSERT(WeakCell::kSize >= kPointerSize);
2869 STATIC_ASSERT(AllocationSite::kTransitionInfoOffset ==
2870 WeakCell::kValueOffset &&
2871 WeakCell::kValueOffset == Symbol::kHashFieldSlot);
2873 __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset));
2875 __ bne(&extra_checks_or_miss);
2877 // The compare above could have been a SMI/SMI comparison. Guard against this
2878 // convincing us that we have a monomorphic JSFunction.
2879 __ JumpIfSmi(r4, &extra_checks_or_miss);
2881 __ bind(&have_js_function);
2882 if (CallAsMethod()) {
2883 EmitContinueIfStrictOrNative(masm, &cont);
2884 // Compute the receiver in sloppy mode.
2885 __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
2887 __ JumpIfSmi(r6, &wrap);
2888 __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
2894 __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
2897 EmitSlowCase(masm, argc, &non_function);
2899 if (CallAsMethod()) {
2901 EmitWrapCase(masm, argc, &cont);
2904 __ bind(&extra_checks_or_miss);
2905 Label uninitialized, miss;
2907 __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
2908 __ beq(&slow_start);
2910 // The following cases attempt to handle MISS cases without going to the
2912 if (FLAG_trace_ic) {
2916 __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
2917 __ beq(&uninitialized);
2919 // We are going megamorphic. If the feedback is a JSFunction, it is fine
2920 // to handle it here. More complex cases are dealt with in the runtime.
2921 __ AssertNotSmi(r7);
2922 __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
2924 __ SmiToPtrArrayOffset(r7, r6);
2926 __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
2927 __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
2928 // We have to update statistics for runtime profiling.
2929 __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
2930 __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
2931 __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
2932 __ LoadP(r7, FieldMemOperand(r5, generic_offset));
2933 __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
2934 __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
2937 __ bind(&uninitialized);
2939 // We are going monomorphic, provided we actually have a JSFunction.
2940 __ JumpIfSmi(r4, &miss);
2942 // Goto miss case if we do not have a function.
2943 __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
2946 // Make sure the function is not the Array() function, which requires special
2947 // behavior on MISS.
2948 __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
2953 __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
2954 __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
2955 __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
2957 // Store the function. Use a stub since we need a frame for allocation.
2962 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2963 CreateWeakCellStub create_stub(masm->isolate());
2965 __ CallStub(&create_stub);
2969 __ b(&have_js_function);
2971 // We are here because tracing is on or we encountered a MISS case we can't
2977 __ bind(&slow_start);
2978 // Check that the function is really a JavaScript function.
2979 // r4: pushed function (to be verified)
2980 __ JumpIfSmi(r4, &non_function);
2982 // Goto slow case if we do not have a function.
2983 __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
2985 __ b(&have_js_function);
2989 void CallICStub::GenerateMiss(MacroAssembler* masm) {
2990 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
2992 // Push the function and feedback info.
2993 __ Push(r4, r5, r6);
2996 IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
2997 : IC::kCallIC_Customization_Miss;
2999 ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
3000 __ CallExternalReference(miss, 3);
3002 // Move result to r4 and exit the internal frame.
3007 // StringCharCodeAtGenerator
3008 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3009 // If the receiver is a smi trigger the non-string case.
3010 if (check_mode_ == RECEIVER_IS_UNKNOWN) {
3011 __ JumpIfSmi(object_, receiver_not_string_);
3013 // Fetch the instance type of the receiver into result register.
3014 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3015 __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3016 // If the receiver is not a string trigger the non-string case.
3017 __ andi(r0, result_, Operand(kIsNotStringMask));
3018 __ bne(receiver_not_string_, cr0);
3021 // If the index is non-smi trigger the non-smi case.
3022 __ JumpIfNotSmi(index_, &index_not_smi_);
3023 __ bind(&got_smi_index_);
3025 // Check for index out of range.
3026 __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
3027 __ cmpl(ip, index_);
3028 __ ble(index_out_of_range_);
3030 __ SmiUntag(index_);
3032 StringCharLoadGenerator::Generate(masm, object_, index_, result_,
3040 void StringCharCodeAtGenerator::GenerateSlow(
3041 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3042 __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
3044 // Index is not a smi.
3045 __ bind(&index_not_smi_);
3046 // If index is a heap number, try converting it to an integer.
3047 __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
3049 call_helper.BeforeCall(masm);
3051 __ push(index_); // Consumed by runtime conversion function.
3052 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
3053 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
3055 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
3056 // NumberToSmi discards numbers that are not exact integers.
3057 __ CallRuntime(Runtime::kNumberToSmi, 1);
3059 // Save the conversion result before the pop instructions below
3060 // have a chance to overwrite it.
3061 __ Move(index_, r3);
3063 // Reload the instance type.
3064 __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3065 __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3066 call_helper.AfterCall(masm);
3067 // If index is still not a smi, it must be out of range.
3068 __ JumpIfNotSmi(index_, index_out_of_range_);
3069 // Otherwise, return to the fast path.
3070 __ b(&got_smi_index_);
3072 // Call runtime. We get here when the receiver is a string and the
3073 // index is a number, but the code of getting the actual character
3074 // is too complex (e.g., when the string needs to be flattened).
3075 __ bind(&call_runtime_);
3076 call_helper.BeforeCall(masm);
3078 __ Push(object_, index_);
3079 __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
3080 __ Move(result_, r3);
3081 call_helper.AfterCall(masm);
3084 __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
3088 // -------------------------------------------------------------------------
3089 // StringCharFromCodeGenerator
3091 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3092 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3093 DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
3094 __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCode));
3095 __ ori(r0, r0, Operand(kSmiTagMask));
3096 __ and_(r0, code_, r0);
3097 __ cmpi(r0, Operand::Zero());
3098 __ bne(&slow_case_);
3100 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3101 // At this point code register contains smi tagged one-byte char code.
3103 __ SmiToPtrArrayOffset(code_, code_);
3104 __ add(result_, result_, code_);
3106 __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3107 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
3108 __ beq(&slow_case_);
3113 void StringCharFromCodeGenerator::GenerateSlow(
3114 MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
3115 __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
3117 __ bind(&slow_case_);
3118 call_helper.BeforeCall(masm);
3120 __ CallRuntime(Runtime::kCharFromCode, 1);
3121 __ Move(result_, r3);
3122 call_helper.AfterCall(masm);
3125 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
3129 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
3132 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
3133 Register src, Register count,
3135 String::Encoding encoding) {
3136 if (FLAG_debug_code) {
3137 // Check that destination is word aligned.
3138 __ andi(r0, dest, Operand(kPointerAlignmentMask));
3139 __ Check(eq, kDestinationOfCopyNotAligned, cr0);
3142 // Nothing to do for zero characters.
3144 if (encoding == String::TWO_BYTE_ENCODING) {
3145 // double the length
3146 __ add(count, count, count, LeaveOE, SetRC);
3149 __ cmpi(count, Operand::Zero());
3153 // Copy count bytes from src to dst.
3156 __ bind(&byte_loop);
3157 __ lbz(scratch, MemOperand(src));
3158 __ addi(src, src, Operand(1));
3159 __ stb(scratch, MemOperand(dest));
3160 __ addi(dest, dest, Operand(1));
3161 __ bdnz(&byte_loop);
3167 void SubStringStub::Generate(MacroAssembler* masm) {
3170 // Stack frame on entry.
3171 // lr: return address
3176 // This stub is called from the native-call %_SubString(...), so
3177 // nothing can be assumed about the arguments. It is tested that:
3178 // "string" is a sequential string,
3179 // both "from" and "to" are smis, and
3180 // 0 <= from <= to <= string.length.
3181 // If any of these assumptions fail, we call the runtime system.
3183 const int kToOffset = 0 * kPointerSize;
3184 const int kFromOffset = 1 * kPointerSize;
3185 const int kStringOffset = 2 * kPointerSize;
3187 __ LoadP(r5, MemOperand(sp, kToOffset));
3188 __ LoadP(r6, MemOperand(sp, kFromOffset));
3190 // If either to or from had the smi tag bit set, then fail to generic runtime
3191 __ JumpIfNotSmi(r5, &runtime);
3192 __ JumpIfNotSmi(r6, &runtime);
3194 __ SmiUntag(r6, SetRC);
3195 // Both r5 and r6 are untagged integers.
3197 // We want to bailout to runtime here if From is negative.
3198 __ blt(&runtime, cr0); // From < 0.
3201 __ bgt(&runtime); // Fail if from > to.
3204 // Make sure first argument is a string.
3205 __ LoadP(r3, MemOperand(sp, kStringOffset));
3206 __ JumpIfSmi(r3, &runtime);
3207 Condition is_string = masm->IsObjectStringType(r3, r4);
3208 __ b(NegateCondition(is_string), &runtime, cr0);
3211 __ cmpi(r5, Operand(1));
3212 __ b(eq, &single_char);
3214 // Short-cut for the case of trivial substring.
3216 // r3: original string
3217 // r5: result string length
3218 __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
3219 __ SmiUntag(r0, r7);
3221 // Return original string.
3223 // Longer than original string's length or negative: unsafe arguments.
3225 // Shorter than original string's length: an actual substring.
3227 // Deal with different string types: update the index if necessary
3228 // and put the underlying string into r8.
3229 // r3: original string
3230 // r4: instance type
3232 // r6: from index (untagged)
3233 Label underlying_unpacked, sliced_string, seq_or_external_string;
3234 // If the string is not indirect, it can only be sequential or external.
3235 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
3236 STATIC_ASSERT(kIsIndirectStringMask != 0);
3237 __ andi(r0, r4, Operand(kIsIndirectStringMask));
3238 __ beq(&seq_or_external_string, cr0);
3240 __ andi(r0, r4, Operand(kSlicedNotConsMask));
3241 __ bne(&sliced_string, cr0);
3242 // Cons string. Check whether it is flat, then fetch first part.
3243 __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
3244 __ CompareRoot(r8, Heap::kempty_stringRootIndex);
3246 __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
3247 // Update instance type.
3248 __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
3249 __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
3250 __ b(&underlying_unpacked);
3252 __ bind(&sliced_string);
3253 // Sliced string. Fetch parent and correct start index by offset.
3254 __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
3255 __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
3256 __ SmiUntag(r4, r7);
3257 __ add(r6, r6, r4); // Add offset to index.
3258 // Update instance type.
3259 __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
3260 __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
3261 __ b(&underlying_unpacked);
3263 __ bind(&seq_or_external_string);
3264 // Sequential or external string. Just move string to the expected register.
3267 __ bind(&underlying_unpacked);
3269 if (FLAG_string_slices) {
3271 // r8: underlying subject string
3272 // r4: instance type of underlying subject string
3274 // r6: adjusted start index (untagged)
3275 __ cmpi(r5, Operand(SlicedString::kMinLength));
3276 // Short slice. Copy instead of slicing.
3277 __ blt(©_routine);
3278 // Allocate new sliced string. At this point we do not reload the instance
3279 // type including the string encoding because we simply rely on the info
3280 // provided by the original string. It does not matter if the original
3281 // string's encoding is wrong because we always have to recheck encoding of
3282 // the newly created string's parent anyways due to externalized strings.
3283 Label two_byte_slice, set_slice_header;
3284 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
3285 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
3286 __ andi(r0, r4, Operand(kStringEncodingMask));
3287 __ beq(&two_byte_slice, cr0);
3288 __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
3289 __ b(&set_slice_header);
3290 __ bind(&two_byte_slice);
3291 __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
3292 __ bind(&set_slice_header);
3294 __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
3295 __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
3298 __ bind(©_routine);
3301 // r8: underlying subject string
3302 // r4: instance type of underlying subject string
3304 // r6: adjusted start index (untagged)
3305 Label two_byte_sequential, sequential_string, allocate_result;
3306 STATIC_ASSERT(kExternalStringTag != 0);
3307 STATIC_ASSERT(kSeqStringTag == 0);
3308 __ andi(r0, r4, Operand(kExternalStringTag));
3309 __ beq(&sequential_string, cr0);
3311 // Handle external string.
3312 // Rule out short external strings.
3313 STATIC_ASSERT(kShortExternalStringTag != 0);
3314 __ andi(r0, r4, Operand(kShortExternalStringTag));
3315 __ bne(&runtime, cr0);
3316 __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
3317 // r8 already points to the first character of underlying string.
3318 __ b(&allocate_result);
3320 __ bind(&sequential_string);
3321 // Locate first character of underlying subject string.
3322 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
3323 __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3325 __ bind(&allocate_result);
3326 // Sequential acii string. Allocate the result.
3327 STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
3328 __ andi(r0, r4, Operand(kStringEncodingMask));
3329 __ beq(&two_byte_sequential, cr0);
3331 // Allocate and copy the resulting one-byte string.
3332 __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
3334 // Locate first character of substring to copy.
3336 // Locate first character of result.
3337 __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3339 // r3: result string
3340 // r4: first character of result string
3341 // r5: result string length
3342 // r8: first character of substring to copy
3343 STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3344 StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
3345 String::ONE_BYTE_ENCODING);
3348 // Allocate and copy the resulting two-byte string.
3349 __ bind(&two_byte_sequential);
3350 __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
3352 // Locate first character of substring to copy.
3353 __ ShiftLeftImm(r4, r6, Operand(1));
3355 // Locate first character of result.
3356 __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3358 // r3: result string.
3359 // r4: first character of result.
3360 // r5: result length.
3361 // r8: first character of substring to copy.
3362 STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3363 StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
3364 String::TWO_BYTE_ENCODING);
3366 __ bind(&return_r3);
3367 Counters* counters = isolate()->counters();
3368 __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
3372 // Just jump to runtime to create the sub string.
3374 __ TailCallRuntime(Runtime::kSubString, 3, 1);
3376 __ bind(&single_char);
3377 // r3: original string
3378 // r4: instance type
3380 // r6: from index (untagged)
3382 StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
3383 STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
3384 generator.GenerateFast(masm);
3387 generator.SkipSlow(masm, &runtime);
3391 void ToNumberStub::Generate(MacroAssembler* masm) {
3392 // The ToNumber stub takes one argument in r3.
3394 __ JumpIfNotSmi(r3, ¬_smi);
3398 Label not_heap_number;
3399 __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
3400 __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
3402 // r4: instance type.
3403 __ cmpi(r4, Operand(HEAP_NUMBER_TYPE));
3404 __ bne(¬_heap_number);
3406 __ bind(¬_heap_number);
3408 Label not_string, slow_string;
3409 __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE));
3410 __ bge(¬_string);
3411 // Check if string has a cached array index.
3412 __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset));
3413 __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC);
3414 __ bne(&slow_string, cr0);
3415 __ IndexFromHash(r5, r3);
3417 __ bind(&slow_string);
3418 __ push(r3); // Push argument.
3419 __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
3420 __ bind(¬_string);
3423 __ cmpi(r4, Operand(ODDBALL_TYPE));
3424 __ bne(¬_oddball);
3425 __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset));
3427 __ bind(¬_oddball);
3429 __ push(r3); // Push argument.
3430 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
3434 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
3438 Register scratch2) {
3439 Register length = scratch1;
3442 Label strings_not_equal, check_zero_length;
3443 __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
3444 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
3445 __ cmp(length, scratch2);
3446 __ beq(&check_zero_length);
3447 __ bind(&strings_not_equal);
3448 __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
3451 // Check if the length is zero.
3452 Label compare_chars;
3453 __ bind(&check_zero_length);
3454 STATIC_ASSERT(kSmiTag == 0);
3455 __ cmpi(length, Operand::Zero());
3456 __ bne(&compare_chars);
3457 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3460 // Compare characters.
3461 __ bind(&compare_chars);
3462 GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
3463 &strings_not_equal);
3465 // Characters are equal.
3466 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3471 void StringHelper::GenerateCompareFlatOneByteStrings(
3472 MacroAssembler* masm, Register left, Register right, Register scratch1,
3473 Register scratch2, Register scratch3) {
3474 Label result_not_equal, compare_lengths;
3475 // Find minimum length and length difference.
3476 __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
3477 __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
3478 __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
3479 Register length_delta = scratch3;
3480 if (CpuFeatures::IsSupported(ISELECT)) {
3481 __ isel(gt, scratch1, scratch2, scratch1, cr0);
3485 __ mr(scratch1, scratch2);
3488 Register min_length = scratch1;
3489 STATIC_ASSERT(kSmiTag == 0);
3490 __ cmpi(min_length, Operand::Zero());
3491 __ beq(&compare_lengths);
3494 GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
3497 // Compare lengths - strings up to min-length are equal.
3498 __ bind(&compare_lengths);
3499 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
3500 // Use length_delta as result if it's zero.
3501 __ mr(r3, length_delta);
3502 __ cmpi(r3, Operand::Zero());
3503 __ bind(&result_not_equal);
3504 // Conditionally update the result based either on length_delta or
3505 // the last comparion performed in the loop above.
3506 if (CpuFeatures::IsSupported(ISELECT)) {
3507 __ li(r4, Operand(GREATER));
3508 __ li(r5, Operand(LESS));
3509 __ isel(eq, r3, r0, r4);
3510 __ isel(lt, r3, r5, r3);
3513 Label less_equal, equal;
3514 __ ble(&less_equal);
3515 __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
3517 __ bind(&less_equal);
3519 __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
3526 void StringHelper::GenerateOneByteCharsCompareLoop(
3527 MacroAssembler* masm, Register left, Register right, Register length,
3528 Register scratch1, Label* chars_not_equal) {
3529 // Change index to run from -length to -1 by adding length to string
3530 // start. This means that loop ends when index reaches zero, which
3531 // doesn't need an additional compare.
3532 __ SmiUntag(length);
3533 __ addi(scratch1, length,
3534 Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
3535 __ add(left, left, scratch1);
3536 __ add(right, right, scratch1);
3537 __ subfic(length, length, Operand::Zero());
3538 Register index = length; // index = -length;
3543 __ lbzx(scratch1, MemOperand(left, index));
3544 __ lbzx(r0, MemOperand(right, index));
3545 __ cmp(scratch1, r0);
3546 __ bne(chars_not_equal);
3547 __ addi(index, index, Operand(1));
3548 __ cmpi(index, Operand::Zero());
3553 void StringCompareStub::Generate(MacroAssembler* masm) {
3556 Counters* counters = isolate()->counters();
3558 // Stack frame on entry.
3559 // sp[0]: right string
3560 // sp[4]: left string
3561 __ LoadP(r3, MemOperand(sp)); // Load right in r3, left in r4.
3562 __ LoadP(r4, MemOperand(sp, kPointerSize));
3567 STATIC_ASSERT(EQUAL == 0);
3568 STATIC_ASSERT(kSmiTag == 0);
3569 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3570 __ IncrementCounter(counters->string_compare_native(), 1, r4, r5);
3571 __ addi(sp, sp, Operand(2 * kPointerSize));
3576 // Check that both objects are sequential one-byte strings.
3577 __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
3579 // Compare flat one-byte strings natively. Remove arguments from stack first.
3580 __ IncrementCounter(counters->string_compare_native(), 1, r5, r6);
3581 __ addi(sp, sp, Operand(2 * kPointerSize));
3582 StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
3584 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
3585 // tagged as a small integer.
3587 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3591 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
3592 // ----------- S t a t e -------------
3595 // -- lr : return address
3596 // -----------------------------------
3598 // Load r5 with the allocation site. We stick an undefined dummy value here
3599 // and replace it with the real allocation site later when we instantiate this
3600 // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
3601 __ Move(r5, handle(isolate()->heap()->undefined_value()));
3603 // Make sure that we actually patched the allocation site.
3604 if (FLAG_debug_code) {
3605 __ TestIfSmi(r5, r0);
3606 __ Assert(ne, kExpectedAllocationSite, cr0);
3608 __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
3609 __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
3612 __ Assert(eq, kExpectedAllocationSite);
3615 // Tail call into the stub that handles binary operations with allocation
3617 BinaryOpWithAllocationSiteStub stub(isolate(), state());
3618 __ TailCallStub(&stub);
3622 void CompareICStub::GenerateSmis(MacroAssembler* masm) {
3623 DCHECK(state() == CompareICState::SMI);
3626 __ JumpIfNotSmi(r5, &miss);
3628 if (GetCondition() == eq) {
3629 // For equality we do not care about the sign of the result.
3630 // __ sub(r3, r3, r4, SetCC);
3633 // Untag before subtracting to avoid handling overflow.
3645 void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
3646 DCHECK(state() == CompareICState::NUMBER);
3649 Label unordered, maybe_undefined1, maybe_undefined2;
3651 Label equal, less_than;
3653 if (left() == CompareICState::SMI) {
3654 __ JumpIfNotSmi(r4, &miss);
3656 if (right() == CompareICState::SMI) {
3657 __ JumpIfNotSmi(r3, &miss);
3660 // Inlining the double comparison and falling back to the general compare
3661 // stub if NaN is involved.
3662 // Load left and right operand.
3663 Label done, left, left_smi, right_smi;
3664 __ JumpIfSmi(r3, &right_smi);
3665 __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
3667 __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
3669 __ bind(&right_smi);
3670 __ SmiToDouble(d1, r3);
3673 __ JumpIfSmi(r4, &left_smi);
3674 __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
3676 __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
3679 __ SmiToDouble(d0, r4);
3686 // Don't base result on status bits when a NaN is involved.
3687 __ bunordered(&unordered);
3689 // Return a result of -1, 0, or 1, based on status bits.
3690 if (CpuFeatures::IsSupported(ISELECT)) {
3692 __ li(r4, Operand(GREATER));
3693 __ li(r5, Operand(LESS));
3694 __ isel(eq, r3, r0, r4);
3695 __ isel(lt, r3, r5, r3);
3700 // assume greater than
3701 __ li(r3, Operand(GREATER));
3704 __ li(r3, Operand(EQUAL));
3706 __ bind(&less_than);
3707 __ li(r3, Operand(LESS));
3711 __ bind(&unordered);
3712 __ bind(&generic_stub);
3713 CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
3714 CompareICState::GENERIC, CompareICState::GENERIC);
3715 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
3717 __ bind(&maybe_undefined1);
3718 if (Token::IsOrderedRelationalCompareOp(op())) {
3719 __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
3721 __ JumpIfSmi(r4, &unordered);
3722 __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
3723 __ bne(&maybe_undefined2);
3727 __ bind(&maybe_undefined2);
3728 if (Token::IsOrderedRelationalCompareOp(op())) {
3729 __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
3738 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
3739 DCHECK(state() == CompareICState::INTERNALIZED_STRING);
3740 Label miss, not_equal;
3742 // Registers containing left and right operands respectively.
3744 Register right = r3;
3748 // Check that both operands are heap objects.
3749 __ JumpIfEitherSmi(left, right, &miss);
3751 // Check that both operands are symbols.
3752 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3753 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3754 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3755 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3756 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3757 __ orx(tmp1, tmp1, tmp2);
3758 __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3761 // Internalized strings are compared by identity.
3762 __ cmp(left, right);
3764 // Make sure r3 is non-zero. At this point input operands are
3765 // guaranteed to be non-zero.
3766 DCHECK(right.is(r3));
3767 STATIC_ASSERT(EQUAL == 0);
3768 STATIC_ASSERT(kSmiTag == 0);
3769 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3770 __ bind(¬_equal);
3778 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
3779 DCHECK(state() == CompareICState::UNIQUE_NAME);
3780 DCHECK(GetCondition() == eq);
3783 // Registers containing left and right operands respectively.
3785 Register right = r3;
3789 // Check that both operands are heap objects.
3790 __ JumpIfEitherSmi(left, right, &miss);
3792 // Check that both operands are unique names. This leaves the instance
3793 // types loaded in tmp1 and tmp2.
3794 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3795 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3796 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3797 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3799 __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
3800 __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
3802 // Unique names are compared by identity.
3803 __ cmp(left, right);
3805 // Make sure r3 is non-zero. At this point input operands are
3806 // guaranteed to be non-zero.
3807 DCHECK(right.is(r3));
3808 STATIC_ASSERT(EQUAL == 0);
3809 STATIC_ASSERT(kSmiTag == 0);
3810 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3818 void CompareICStub::GenerateStrings(MacroAssembler* masm) {
3819 DCHECK(state() == CompareICState::STRING);
3820 Label miss, not_identical, is_symbol;
3822 bool equality = Token::IsEqualityOp(op());
3824 // Registers containing left and right operands respectively.
3826 Register right = r3;
3832 // Check that both operands are heap objects.
3833 __ JumpIfEitherSmi(left, right, &miss);
3835 // Check that both operands are strings. This leaves the instance
3836 // types loaded in tmp1 and tmp2.
3837 __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
3838 __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
3839 __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
3840 __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
3841 STATIC_ASSERT(kNotStringTag != 0);
3842 __ orx(tmp3, tmp1, tmp2);
3843 __ andi(r0, tmp3, Operand(kIsNotStringMask));
3846 // Fast check for identical strings.
3847 __ cmp(left, right);
3848 STATIC_ASSERT(EQUAL == 0);
3849 STATIC_ASSERT(kSmiTag == 0);
3850 __ bne(¬_identical);
3851 __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
3853 __ bind(¬_identical);
3855 // Handle not identical strings.
3857 // Check that both strings are internalized strings. If they are, we're done
3858 // because we already know they are not identical. We know they are both
3861 DCHECK(GetCondition() == eq);
3862 STATIC_ASSERT(kInternalizedTag == 0);
3863 __ orx(tmp3, tmp1, tmp2);
3864 __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
3865 __ bne(&is_symbol, cr0);
3866 // Make sure r3 is non-zero. At this point input operands are
3867 // guaranteed to be non-zero.
3868 DCHECK(right.is(r3));
3870 __ bind(&is_symbol);
3873 // Check that both strings are sequential one-byte.
3875 __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
3878 // Compare flat one-byte strings. Returns when done.
3880 StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
3883 StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
3887 // Handle more complex cases in runtime.
3889 __ Push(left, right);
3891 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
3893 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
3901 void CompareICStub::GenerateObjects(MacroAssembler* masm) {
3902 DCHECK(state() == CompareICState::OBJECT);
3904 __ and_(r5, r4, r3);
3905 __ JumpIfSmi(r5, &miss);
3907 __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
3909 __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
3912 DCHECK(GetCondition() == eq);
3921 void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
3923 Handle<WeakCell> cell = Map::WeakCellForMap(known_map_);
3924 __ and_(r5, r4, r3);
3925 __ JumpIfSmi(r5, &miss);
3926 __ GetWeakValue(r7, cell);
3927 __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
3928 __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
3942 void CompareICStub::GenerateMiss(MacroAssembler* masm) {
3944 // Call the runtime system in a fresh internal frame.
3945 ExternalReference miss =
3946 ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
3948 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
3951 __ LoadSmiLiteral(r0, Smi::FromInt(op()));
3953 __ CallExternalReference(miss, 3);
3954 // Compute the entry point of the rewritten stub.
3955 __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
3956 // Restore registers.
3960 __ JumpToJSEntry(r5);
3964 // This stub is paired with DirectCEntryStub::GenerateCall
3965 void DirectCEntryStub::Generate(MacroAssembler* masm) {
3966 // Place the return address on the stack, making the call
3967 // GC safe. The RegExp backend also relies on this.
3969 __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
3970 __ Call(ip); // Call the C++ function.
3971 __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
3977 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
3978 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3979 // Native AIX/PPC64 Linux use a function descriptor.
3980 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
3981 __ LoadP(ip, MemOperand(target, 0)); // Instruction address
3983 // ip needs to be set for DirectCEentryStub::Generate, and also
3984 // for ABI_TOC_ADDRESSABILITY_VIA_IP.
3985 __ Move(ip, target);
3988 intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
3989 __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
3990 __ Call(r0); // Call the stub.
3994 void NameDictionaryLookupStub::GenerateNegativeLookup(
3995 MacroAssembler* masm, Label* miss, Label* done, Register receiver,
3996 Register properties, Handle<Name> name, Register scratch0) {
3997 DCHECK(name->IsUniqueName());
3998 // If names of slots in range from 1 to kProbes - 1 for the hash value are
3999 // not equal to the name and kProbes-th slot is not used (its name is the
4000 // undefined value), it guarantees the hash table doesn't contain the
4001 // property. It's true even if some slots represent deleted properties
4002 // (their names are the hole value).
4003 for (int i = 0; i < kInlinedProbes; i++) {
4004 // scratch0 points to properties hash.
4005 // Compute the masked index: (hash + i + i * i) & mask.
4006 Register index = scratch0;
4007 // Capacity is smi 2^n.
4008 __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
4009 __ subi(index, index, Operand(1));
4011 ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
4012 __ and_(index, index, ip);
4014 // Scale the index by multiplying by the entry size.
4015 STATIC_ASSERT(NameDictionary::kEntrySize == 3);
4016 __ ShiftLeftImm(ip, index, Operand(1));
4017 __ add(index, index, ip); // index *= 3.
4019 Register entity_name = scratch0;
4020 // Having undefined at this place means the name is not contained.
4021 Register tmp = properties;
4022 __ SmiToPtrArrayOffset(ip, index);
4023 __ add(tmp, properties, ip);
4024 __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
4026 DCHECK(!tmp.is(entity_name));
4027 __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
4028 __ cmp(entity_name, tmp);
4031 // Load the hole ready for use below:
4032 __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
4034 // Stop if found the property.
4035 __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
4039 __ cmp(entity_name, tmp);
4042 // Check if the entry name is not a unique name.
4043 __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
4044 __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
4045 __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
4048 // Restore the properties.
4049 __ LoadP(properties,
4050 FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4053 const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
4054 r5.bit() | r4.bit() | r3.bit());
4057 __ MultiPush(spill_mask);
4059 __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
4060 __ mov(r4, Operand(Handle<Name>(name)));
4061 NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
4063 __ cmpi(r3, Operand::Zero());
4065 __ MultiPop(spill_mask); // MultiPop does not touch condition flags
4073 // Probe the name dictionary in the |elements| register. Jump to the
4074 // |done| label if a property with the given name is found. Jump to
4075 // the |miss| label otherwise.
4076 // If lookup was successful |scratch2| will be equal to elements + 4 * index.
4077 void NameDictionaryLookupStub::GeneratePositiveLookup(
4078 MacroAssembler* masm, Label* miss, Label* done, Register elements,
4079 Register name, Register scratch1, Register scratch2) {
4080 DCHECK(!elements.is(scratch1));
4081 DCHECK(!elements.is(scratch2));
4082 DCHECK(!name.is(scratch1));
4083 DCHECK(!name.is(scratch2));
4085 __ AssertName(name);
4087 // Compute the capacity mask.
4088 __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
4089 __ SmiUntag(scratch1); // convert smi to int
4090 __ subi(scratch1, scratch1, Operand(1));
4092 // Generate an unrolled loop that performs a few probes before
4093 // giving up. Measurements done on Gmail indicate that 2 probes
4094 // cover ~93% of loads from dictionaries.
4095 for (int i = 0; i < kInlinedProbes; i++) {
4096 // Compute the masked index: (hash + i + i * i) & mask.
4097 __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
4099 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4100 // the hash in a separate instruction. The value hash + i + i * i is right
4101 // shifted in the following and instruction.
4102 DCHECK(NameDictionary::GetProbeOffset(i) <
4103 1 << (32 - Name::kHashFieldOffset));
4104 __ addi(scratch2, scratch2,
4105 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4107 __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
4108 __ and_(scratch2, scratch1, scratch2);
4110 // Scale the index by multiplying by the element size.
4111 DCHECK(NameDictionary::kEntrySize == 3);
4112 // scratch2 = scratch2 * 3.
4113 __ ShiftLeftImm(ip, scratch2, Operand(1));
4114 __ add(scratch2, scratch2, ip);
4116 // Check if the key is identical to the name.
4117 __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
4118 __ add(scratch2, elements, ip);
4119 __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
4124 const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
4125 r5.bit() | r4.bit() | r3.bit()) &
4126 ~(scratch1.bit() | scratch2.bit());
4129 __ MultiPush(spill_mask);
4131 DCHECK(!elements.is(r4));
4133 __ mr(r3, elements);
4135 __ mr(r3, elements);
4138 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
4140 __ cmpi(r3, Operand::Zero());
4141 __ mr(scratch2, r5);
4142 __ MultiPop(spill_mask);
4150 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
4151 // This stub overrides SometimesSetsUpAFrame() to return false. That means
4152 // we cannot call anything that could cause a GC from this stub.
4154 // result: NameDictionary to probe
4156 // dictionary: NameDictionary to probe.
4157 // index: will hold an index of entry if lookup is successful.
4158 // might alias with result_.
4160 // result_ is zero if lookup failed, non zero otherwise.
4162 Register result = r3;
4163 Register dictionary = r3;
4165 Register index = r5;
4168 Register undefined = r8;
4169 Register entry_key = r9;
4170 Register scratch = r9;
4172 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
4174 __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
4176 __ subi(mask, mask, Operand(1));
4178 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
4180 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
4182 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
4183 // Compute the masked index: (hash + i + i * i) & mask.
4184 // Capacity is smi 2^n.
4186 // Add the probe offset (i + i * i) left shifted to avoid right shifting
4187 // the hash in a separate instruction. The value hash + i + i * i is right
4188 // shifted in the following and instruction.
4189 DCHECK(NameDictionary::GetProbeOffset(i) <
4190 1 << (32 - Name::kHashFieldOffset));
4191 __ addi(index, hash,
4192 Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
4196 __ srwi(r0, index, Operand(Name::kHashShift));
4197 __ and_(index, mask, r0);
4199 // Scale the index by multiplying by the entry size.
4200 DCHECK(NameDictionary::kEntrySize == 3);
4201 __ ShiftLeftImm(scratch, index, Operand(1));
4202 __ add(index, index, scratch); // index *= 3.
4204 __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
4205 __ add(index, dictionary, scratch);
4206 __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
4208 // Having undefined at this place means the name is not contained.
4209 __ cmp(entry_key, undefined);
4210 __ beq(¬_in_dictionary);
4212 // Stop if found the property.
4213 __ cmp(entry_key, key);
4214 __ beq(&in_dictionary);
4216 if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
4217 // Check if the entry name is not a unique name.
4218 __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
4219 __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
4220 __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
4224 __ bind(&maybe_in_dictionary);
4225 // If we are doing negative lookup then probing failure should be
4226 // treated as a lookup success. For positive lookup probing failure
4227 // should be treated as lookup failure.
4228 if (mode() == POSITIVE_LOOKUP) {
4229 __ li(result, Operand::Zero());
4233 __ bind(&in_dictionary);
4234 __ li(result, Operand(1));
4237 __ bind(¬_in_dictionary);
4238 __ li(result, Operand::Zero());
4243 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
4245 StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
4247 // Hydrogen code stubs need stub2 at snapshot time.
4248 StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
4253 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
4254 // the value has just been written into the object, now this stub makes sure
4255 // we keep the GC informed. The word in the object where the value has been
4256 // written is in the address register.
4257 void RecordWriteStub::Generate(MacroAssembler* masm) {
4258 Label skip_to_incremental_noncompacting;
4259 Label skip_to_incremental_compacting;
4261 // The first two branch instructions are generated with labels so as to
4262 // get the offset fixed up correctly by the bind(Label*) call. We patch
4263 // it back and forth between branch condition True and False
4264 // when we start and stop incremental heap marking.
4265 // See RecordWriteStub::Patch for details.
4267 // Clear the bit, branch on True for NOP action initially
4268 __ crclr(Assembler::encode_crbit(cr2, CR_LT));
4269 __ blt(&skip_to_incremental_noncompacting, cr2);
4270 __ blt(&skip_to_incremental_compacting, cr2);
4272 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4273 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4274 MacroAssembler::kReturnAtEnd);
4278 __ bind(&skip_to_incremental_noncompacting);
4279 GenerateIncremental(masm, INCREMENTAL);
4281 __ bind(&skip_to_incremental_compacting);
4282 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
4284 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
4285 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
4286 // patching not required on PPC as the initial path is effectively NOP
4290 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
4293 if (remembered_set_action() == EMIT_REMEMBERED_SET) {
4294 Label dont_need_remembered_set;
4296 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
4297 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
4298 regs_.scratch0(), &dont_need_remembered_set);
4300 __ CheckPageFlag(regs_.object(), regs_.scratch0(),
4301 1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
4302 &dont_need_remembered_set);
4304 // First notify the incremental marker if necessary, then update the
4306 CheckNeedsToInformIncrementalMarker(
4307 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
4308 InformIncrementalMarker(masm);
4309 regs_.Restore(masm);
4310 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4311 MacroAssembler::kReturnAtEnd);
4313 __ bind(&dont_need_remembered_set);
4316 CheckNeedsToInformIncrementalMarker(
4317 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
4318 InformIncrementalMarker(masm);
4319 regs_.Restore(masm);
4324 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
4325 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
4326 int argument_count = 3;
4327 __ PrepareCallCFunction(argument_count, regs_.scratch0());
4329 r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
4330 DCHECK(!address.is(regs_.object()));
4331 DCHECK(!address.is(r3));
4332 __ mr(address, regs_.address());
4333 __ mr(r3, regs_.object());
4335 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
4337 AllowExternalCallThatCantCauseGC scope(masm);
4339 ExternalReference::incremental_marking_record_write_function(isolate()),
4341 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
4345 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
4346 MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
4349 Label need_incremental;
4350 Label need_incremental_pop_scratch;
4352 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
4353 __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
4354 __ and_(regs_.scratch0(), regs_.object(), r0);
4357 MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
4358 __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
4361 MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
4362 __ cmpi(regs_.scratch1(), Operand::Zero()); // PPC, we could do better here
4363 __ blt(&need_incremental);
4365 // Let's look at the color of the object: If it is not black we don't have
4366 // to inform the incremental marker.
4367 __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
4369 regs_.Restore(masm);
4370 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4371 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4372 MacroAssembler::kReturnAtEnd);
4379 // Get the value from the slot.
4380 __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
4382 if (mode == INCREMENTAL_COMPACTION) {
4383 Label ensure_not_white;
4385 __ CheckPageFlag(regs_.scratch0(), // Contains value.
4386 regs_.scratch1(), // Scratch.
4387 MemoryChunk::kEvacuationCandidateMask, eq,
4390 __ CheckPageFlag(regs_.object(),
4391 regs_.scratch1(), // Scratch.
4392 MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
4395 __ bind(&ensure_not_white);
4398 // We need extra registers for this, so we push the object and the address
4399 // register temporarily.
4400 __ Push(regs_.object(), regs_.address());
4401 __ EnsureNotWhite(regs_.scratch0(), // The value.
4402 regs_.scratch1(), // Scratch.
4403 regs_.object(), // Scratch.
4404 regs_.address(), // Scratch.
4405 &need_incremental_pop_scratch);
4406 __ Pop(regs_.object(), regs_.address());
4408 regs_.Restore(masm);
4409 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
4410 __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
4411 MacroAssembler::kReturnAtEnd);
4416 __ bind(&need_incremental_pop_scratch);
4417 __ Pop(regs_.object(), regs_.address());
4419 __ bind(&need_incremental);
4421 // Fall through when we need to inform the incremental marker.
4425 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
4426 // ----------- S t a t e -------------
4427 // -- r3 : element value to store
4428 // -- r6 : element index as smi
4429 // -- sp[0] : array literal index in function as smi
4430 // -- sp[4] : array literal
4431 // clobbers r3, r5, r7
4432 // -----------------------------------
4435 Label double_elements;
4437 Label slow_elements;
4438 Label fast_elements;
4440 // Get array literal index, array literal and its map.
4441 __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
4442 __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
4443 __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
4445 __ CheckFastElements(r5, r8, &double_elements);
4446 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
4447 __ JumpIfSmi(r3, &smi_element);
4448 __ CheckFastSmiElements(r5, r8, &fast_elements);
4450 // Store into the array literal requires a elements transition. Call into
4452 __ bind(&slow_elements);
4454 __ Push(r4, r6, r3);
4455 __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
4456 __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
4458 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
4460 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
4461 __ bind(&fast_elements);
4462 __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
4463 __ SmiToPtrArrayOffset(r9, r6);
4465 #if V8_TARGET_ARCH_PPC64
4466 // add due to offset alignment requirements of StorePU
4467 __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4468 __ StoreP(r3, MemOperand(r9));
4470 __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
4472 // Update the write barrier for the array store.
4473 __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
4474 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
4477 // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
4478 // and value is Smi.
4479 __ bind(&smi_element);
4480 __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
4481 __ SmiToPtrArrayOffset(r9, r6);
4483 __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
4486 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
4487 __ bind(&double_elements);
4488 __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
4489 __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
4494 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
4495 CEntryStub ces(isolate(), 1, kSaveFPRegs);
4496 __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
4497 int parameter_count_offset =
4498 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
4499 __ LoadP(r4, MemOperand(fp, parameter_count_offset));
4500 if (function_mode() == JS_FUNCTION_STUB_MODE) {
4501 __ addi(r4, r4, Operand(1));
4503 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
4504 __ slwi(r4, r4, Operand(kPointerSizeLog2));
4510 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
4511 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4512 VectorLoadStub stub(isolate(), state());
4513 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4517 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
4518 EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
4519 VectorKeyedLoadStub stub(isolate());
4520 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4524 void CallICTrampolineStub::Generate(MacroAssembler* masm) {
4525 EmitLoadTypeFeedbackVector(masm, r5);
4526 CallICStub stub(isolate(), state());
4527 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4531 void CallIC_ArrayTrampolineStub::Generate(MacroAssembler* masm) {
4532 EmitLoadTypeFeedbackVector(masm, r5);
4533 CallIC_ArrayStub stub(isolate(), state());
4534 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
4538 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
4539 if (masm->isolate()->function_entry_hook() != NULL) {
4540 PredictableCodeSizeScope predictable(masm,
4541 #if V8_TARGET_ARCH_PPC64
4542 14 * Assembler::kInstrSize);
4544 11 * Assembler::kInstrSize);
4546 ProfileEntryHookStub stub(masm->isolate());
4556 void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
4557 // The entry hook is a "push lr, ip" instruction, followed by a call.
4558 const int32_t kReturnAddressDistanceFromFunctionStart =
4559 Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
4561 // This should contain all kJSCallerSaved registers.
4562 const RegList kSavedRegs = kJSCallerSaved | // Caller saved registers.
4563 r15.bit(); // Saved stack pointer.
4565 // We also save lr, so the count here is one higher than the mask indicates.
4566 const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
4568 // Save all caller-save registers as this may be called from anywhere.
4570 __ MultiPush(kSavedRegs | ip.bit());
4572 // Compute the function's address for the first argument.
4573 __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
4575 // The caller's return address is two slots above the saved temporaries.
4576 // Grab that for the second argument to the hook.
4577 __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
4579 // Align the stack if necessary.
4580 int frame_alignment = masm->ActivationFrameAlignment();
4581 if (frame_alignment > kPointerSize) {
4583 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
4584 __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
4587 #if !defined(USE_SIMULATOR)
4588 uintptr_t entry_hook =
4589 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
4590 __ mov(ip, Operand(entry_hook));
4592 #if ABI_USES_FUNCTION_DESCRIPTORS
4593 // Function descriptor
4594 __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
4595 __ LoadP(ip, MemOperand(ip, 0));
4596 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
4597 // ip set above, so nothing to do.
4601 __ li(r0, Operand::Zero());
4602 __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
4604 // Under the simulator we need to indirect the entry hook through a
4605 // trampoline function at a known address.
4606 // It additionally takes an isolate as a third parameter
4607 __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
4609 ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
4610 __ mov(ip, Operand(ExternalReference(
4611 &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
4615 #if !defined(USE_SIMULATOR)
4616 __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
4619 // Restore the stack pointer if needed.
4620 if (frame_alignment > kPointerSize) {
4624 // Also pop lr to get Ret(0).
4625 __ MultiPop(kSavedRegs | ip.bit());
4632 static void CreateArrayDispatch(MacroAssembler* masm,
4633 AllocationSiteOverrideMode mode) {
4634 if (mode == DISABLE_ALLOCATION_SITES) {
4635 T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
4636 __ TailCallStub(&stub);
4637 } else if (mode == DONT_OVERRIDE) {
4639 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4640 for (int i = 0; i <= last_index; ++i) {
4641 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4642 __ Cmpi(r6, Operand(kind), r0);
4643 T stub(masm->isolate(), kind);
4644 __ TailCallStub(&stub, eq);
4647 // If we reached this point there is a problem.
4648 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4655 static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
4656 AllocationSiteOverrideMode mode) {
4657 // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
4658 // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
4659 // r3 - number of arguments
4660 // r4 - constructor?
4661 // sp[0] - last argument
4662 Label normal_sequence;
4663 if (mode == DONT_OVERRIDE) {
4664 DCHECK(FAST_SMI_ELEMENTS == 0);
4665 DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
4666 DCHECK(FAST_ELEMENTS == 2);
4667 DCHECK(FAST_HOLEY_ELEMENTS == 3);
4668 DCHECK(FAST_DOUBLE_ELEMENTS == 4);
4669 DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
4671 // is the low bit set? If so, we are holey and that is good.
4672 __ andi(r0, r6, Operand(1));
4673 __ bne(&normal_sequence, cr0);
4676 // look at the first argument
4677 __ LoadP(r8, MemOperand(sp, 0));
4678 __ cmpi(r8, Operand::Zero());
4679 __ beq(&normal_sequence);
4681 if (mode == DISABLE_ALLOCATION_SITES) {
4682 ElementsKind initial = GetInitialFastElementsKind();
4683 ElementsKind holey_initial = GetHoleyElementsKind(initial);
4685 ArraySingleArgumentConstructorStub stub_holey(
4686 masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
4687 __ TailCallStub(&stub_holey);
4689 __ bind(&normal_sequence);
4690 ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
4691 DISABLE_ALLOCATION_SITES);
4692 __ TailCallStub(&stub);
4693 } else if (mode == DONT_OVERRIDE) {
4694 // We are going to create a holey array, but our kind is non-holey.
4695 // Fix kind and retry (only if we have an allocation site in the slot).
4696 __ addi(r6, r6, Operand(1));
4698 if (FLAG_debug_code) {
4699 __ LoadP(r8, FieldMemOperand(r5, 0));
4700 __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
4701 __ Assert(eq, kExpectedAllocationSite);
4704 // Save the resulting elements kind in type info. We can't just store r6
4705 // in the AllocationSite::transition_info field because elements kind is
4706 // restricted to a portion of the field...upper bits need to be left alone.
4707 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4708 __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
4709 __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
4710 __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
4713 __ bind(&normal_sequence);
4715 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4716 for (int i = 0; i <= last_index; ++i) {
4717 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4718 __ mov(r0, Operand(kind));
4720 ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
4721 __ TailCallStub(&stub, eq);
4724 // If we reached this point there is a problem.
4725 __ Abort(kUnexpectedElementsKindInArrayConstructor);
4733 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
4735 GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
4736 for (int i = 0; i <= to_index; ++i) {
4737 ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
4738 T stub(isolate, kind);
4740 if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
4741 T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
4748 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
4749 ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
4751 ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
4753 ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
4758 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
4760 ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
4761 for (int i = 0; i < 2; i++) {
4762 // For internal arrays we only need a few things
4763 InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
4765 InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
4767 InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
4773 void ArrayConstructorStub::GenerateDispatchToArrayStub(
4774 MacroAssembler* masm, AllocationSiteOverrideMode mode) {
4775 if (argument_count() == ANY) {
4776 Label not_zero_case, not_one_case;
4777 __ cmpi(r3, Operand::Zero());
4778 __ bne(¬_zero_case);
4779 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4781 __ bind(¬_zero_case);
4782 __ cmpi(r3, Operand(1));
4783 __ bgt(¬_one_case);
4784 CreateArrayDispatchOneArgument(masm, mode);
4786 __ bind(¬_one_case);
4787 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4788 } else if (argument_count() == NONE) {
4789 CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
4790 } else if (argument_count() == ONE) {
4791 CreateArrayDispatchOneArgument(masm, mode);
4792 } else if (argument_count() == MORE_THAN_ONE) {
4793 CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
4800 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
4801 // ----------- S t a t e -------------
4802 // -- r3 : argc (only if argument_count() == ANY)
4803 // -- r4 : constructor
4804 // -- r5 : AllocationSite or undefined
4805 // -- sp[0] : return address
4806 // -- sp[4] : last argument
4807 // -----------------------------------
4809 if (FLAG_debug_code) {
4810 // The array construct code is only set for the global and natives
4811 // builtin Array functions which always have maps.
4813 // Initial map for the builtin Array function should be a map.
4814 __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
4815 // Will both indicate a NULL and a Smi.
4816 __ TestIfSmi(r7, r0);
4817 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
4818 __ CompareObjectType(r7, r7, r8, MAP_TYPE);
4819 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4821 // We should either have undefined in r5 or a valid AllocationSite
4822 __ AssertUndefinedOrAllocationSite(r5, r7);
4826 // Get the elements kind and case on that.
4827 __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
4830 __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
4832 STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
4833 __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
4834 GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
4837 GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
4841 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
4842 ElementsKind kind) {
4843 __ cmpli(r3, Operand(1));
4845 InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
4846 __ TailCallStub(&stub0, lt);
4848 InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
4849 __ TailCallStub(&stubN, gt);
4851 if (IsFastPackedElementsKind(kind)) {
4852 // We might need to create a holey array
4853 // look at the first argument
4854 __ LoadP(r6, MemOperand(sp, 0));
4855 __ cmpi(r6, Operand::Zero());
4857 InternalArraySingleArgumentConstructorStub stub1_holey(
4858 isolate(), GetHoleyElementsKind(kind));
4859 __ TailCallStub(&stub1_holey, ne);
4862 InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
4863 __ TailCallStub(&stub1);
4867 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
4868 // ----------- S t a t e -------------
4870 // -- r4 : constructor
4871 // -- sp[0] : return address
4872 // -- sp[4] : last argument
4873 // -----------------------------------
4875 if (FLAG_debug_code) {
4876 // The array construct code is only set for the global and natives
4877 // builtin Array functions which always have maps.
4879 // Initial map for the builtin Array function should be a map.
4880 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
4881 // Will both indicate a NULL and a Smi.
4882 __ TestIfSmi(r6, r0);
4883 __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
4884 __ CompareObjectType(r6, r6, r7, MAP_TYPE);
4885 __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
4888 // Figure out the right elements kind
4889 __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
4890 // Load the map's "bit field 2" into |result|.
4891 __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
4892 // Retrieve elements_kind from bit field 2.
4893 __ DecodeField<Map::ElementsKindBits>(r6);
4895 if (FLAG_debug_code) {
4897 __ cmpi(r6, Operand(FAST_ELEMENTS));
4899 __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
4900 __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
4904 Label fast_elements_case;
4905 __ cmpi(r6, Operand(FAST_ELEMENTS));
4906 __ beq(&fast_elements_case);
4907 GenerateCase(masm, FAST_HOLEY_ELEMENTS);
4909 __ bind(&fast_elements_case);
4910 GenerateCase(masm, FAST_ELEMENTS);
4914 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
4915 return ref0.address() - ref1.address();
4919 // Calls an API function. Allocates HandleScope, extracts returned value
4920 // from handle and propagates exceptions. Restores context. stack_space
4921 // - space to be unwound on exit (includes the call JS arguments space and
4922 // the additional space allocated for the fast call).
4923 static void CallApiFunctionAndReturn(MacroAssembler* masm,
4924 Register function_address,
4925 ExternalReference thunk_ref,
4927 MemOperand* stack_space_operand,
4928 MemOperand return_value_operand,
4929 MemOperand* context_restore_operand) {
4930 Isolate* isolate = masm->isolate();
4931 ExternalReference next_address =
4932 ExternalReference::handle_scope_next_address(isolate);
4933 const int kNextOffset = 0;
4934 const int kLimitOffset = AddressOffset(
4935 ExternalReference::handle_scope_limit_address(isolate), next_address);
4936 const int kLevelOffset = AddressOffset(
4937 ExternalReference::handle_scope_level_address(isolate), next_address);
4939 // Additional parameter is the address of the actual callback.
4940 DCHECK(function_address.is(r4) || function_address.is(r5));
4941 Register scratch = r6;
4943 __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate)));
4944 __ lbz(scratch, MemOperand(scratch, 0));
4945 __ cmpi(scratch, Operand::Zero());
4947 if (CpuFeatures::IsSupported(ISELECT)) {
4948 __ mov(scratch, Operand(thunk_ref));
4949 __ isel(eq, scratch, function_address, scratch);
4951 Label profiler_disabled;
4952 Label end_profiler_check;
4953 __ beq(&profiler_disabled);
4954 __ mov(scratch, Operand(thunk_ref));
4955 __ b(&end_profiler_check);
4956 __ bind(&profiler_disabled);
4957 __ mr(scratch, function_address);
4958 __ bind(&end_profiler_check);
4961 // Allocate HandleScope in callee-save registers.
4962 // r17 - next_address
4963 // r14 - next_address->kNextOffset
4964 // r15 - next_address->kLimitOffset
4965 // r16 - next_address->kLevelOffset
4966 __ mov(r17, Operand(next_address));
4967 __ LoadP(r14, MemOperand(r17, kNextOffset));
4968 __ LoadP(r15, MemOperand(r17, kLimitOffset));
4969 __ lwz(r16, MemOperand(r17, kLevelOffset));
4970 __ addi(r16, r16, Operand(1));
4971 __ stw(r16, MemOperand(r17, kLevelOffset));
4973 if (FLAG_log_timer_events) {
4974 FrameScope frame(masm, StackFrame::MANUAL);
4975 __ PushSafepointRegisters();
4976 __ PrepareCallCFunction(1, r3);
4977 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
4978 __ CallCFunction(ExternalReference::log_enter_external_function(isolate),
4980 __ PopSafepointRegisters();
4983 // Native call returns to the DirectCEntry stub which redirects to the
4984 // return address pushed on stack (could have moved after GC).
4985 // DirectCEntry stub itself is generated early and never moves.
4986 DirectCEntryStub stub(isolate);
4987 stub.GenerateCall(masm, scratch);
4989 if (FLAG_log_timer_events) {
4990 FrameScope frame(masm, StackFrame::MANUAL);
4991 __ PushSafepointRegisters();
4992 __ PrepareCallCFunction(1, r3);
4993 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
4994 __ CallCFunction(ExternalReference::log_leave_external_function(isolate),
4996 __ PopSafepointRegisters();
4999 Label promote_scheduled_exception;
5000 Label exception_handled;
5001 Label delete_allocated_handles;
5002 Label leave_exit_frame;
5003 Label return_value_loaded;
5005 // load value from ReturnValue
5006 __ LoadP(r3, return_value_operand);
5007 __ bind(&return_value_loaded);
5008 // No more valid handles (the result handle was the last one). Restore
5009 // previous handle scope.
5010 __ StoreP(r14, MemOperand(r17, kNextOffset));
5011 if (__ emit_debug_code()) {
5012 __ lwz(r4, MemOperand(r17, kLevelOffset));
5014 __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
5016 __ subi(r16, r16, Operand(1));
5017 __ stw(r16, MemOperand(r17, kLevelOffset));
5018 __ LoadP(r0, MemOperand(r17, kLimitOffset));
5020 __ bne(&delete_allocated_handles);
5022 // Check if the function scheduled an exception.
5023 __ bind(&leave_exit_frame);
5024 __ LoadRoot(r14, Heap::kTheHoleValueRootIndex);
5025 __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate)));
5026 __ LoadP(r15, MemOperand(r15));
5028 __ bne(&promote_scheduled_exception);
5029 __ bind(&exception_handled);
5031 bool restore_context = context_restore_operand != NULL;
5032 if (restore_context) {
5033 __ LoadP(cp, *context_restore_operand);
5035 // LeaveExitFrame expects unwind space to be in a register.
5036 if (stack_space_operand != NULL) {
5037 __ lwz(r14, *stack_space_operand);
5039 __ mov(r14, Operand(stack_space));
5041 __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL);
5044 __ bind(&promote_scheduled_exception);
5046 FrameScope frame(masm, StackFrame::INTERNAL);
5047 __ CallExternalReference(
5048 ExternalReference(Runtime::kPromoteScheduledException, isolate), 0);
5050 __ jmp(&exception_handled);
5052 // HandleScope limit has changed. Delete allocated extensions.
5053 __ bind(&delete_allocated_handles);
5054 __ StoreP(r15, MemOperand(r17, kLimitOffset));
5056 __ PrepareCallCFunction(1, r15);
5057 __ mov(r3, Operand(ExternalReference::isolate_address(isolate)));
5058 __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate),
5061 __ b(&leave_exit_frame);
5065 static void CallApiFunctionStubHelper(MacroAssembler* masm,
5066 const ParameterCount& argc,
5067 bool return_first_arg,
5068 bool call_data_undefined) {
5069 // ----------- S t a t e -------------
5071 // -- r7 : call_data
5073 // -- r4 : api_function_address
5074 // -- r6 : number of arguments if argc is a register
5077 // -- sp[0] : last argument
5079 // -- sp[(argc - 1)* 4] : first argument
5080 // -- sp[argc * 4] : receiver
5081 // -----------------------------------
5083 Register callee = r3;
5084 Register call_data = r7;
5085 Register holder = r5;
5086 Register api_function_address = r4;
5087 Register context = cp;
5089 typedef FunctionCallbackArguments FCA;
5091 STATIC_ASSERT(FCA::kContextSaveIndex == 6);
5092 STATIC_ASSERT(FCA::kCalleeIndex == 5);
5093 STATIC_ASSERT(FCA::kDataIndex == 4);
5094 STATIC_ASSERT(FCA::kReturnValueOffset == 3);
5095 STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
5096 STATIC_ASSERT(FCA::kIsolateIndex == 1);
5097 STATIC_ASSERT(FCA::kHolderIndex == 0);
5098 STATIC_ASSERT(FCA::kArgsLength == 7);
5100 DCHECK(argc.is_immediate() || r3.is(argc.reg()));
5104 // load context from callee
5105 __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
5113 Register scratch = call_data;
5114 if (!call_data_undefined) {
5115 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
5119 // return value default
5122 __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
5127 // Prepare arguments.
5130 // Allocate the v8::Arguments structure in the arguments' space since
5131 // it's not controlled by GC.
5134 // Create 5 extra slots on stack:
5135 // [0] space for DirectCEntryStub's LR save
5136 // [1-4] FunctionCallbackInfo
5137 const int kApiStackSpace = 5;
5138 const int kFunctionCallbackInfoOffset =
5139 (kStackFrameExtraParamSlot + 1) * kPointerSize;
5141 FrameScope frame_scope(masm, StackFrame::MANUAL);
5142 __ EnterExitFrame(false, kApiStackSpace);
5144 DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
5145 // r3 = FunctionCallbackInfo&
5146 // Arguments is after the return address.
5147 __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset));
5148 // FunctionCallbackInfo::implicit_args_
5149 __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
5150 if (argc.is_immediate()) {
5151 // FunctionCallbackInfo::values_
5152 __ addi(ip, scratch,
5153 Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize));
5154 __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
5155 // FunctionCallbackInfo::length_ = argc
5156 __ li(ip, Operand(argc.immediate()));
5157 __ stw(ip, MemOperand(r3, 2 * kPointerSize));
5158 // FunctionCallbackInfo::is_construct_call_ = 0
5159 __ li(ip, Operand::Zero());
5160 __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
5162 __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2));
5163 __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize));
5164 // FunctionCallbackInfo::values_
5165 __ add(r0, scratch, ip);
5166 __ StoreP(r0, MemOperand(r3, 1 * kPointerSize));
5167 // FunctionCallbackInfo::length_ = argc
5168 __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize));
5169 // FunctionCallbackInfo::is_construct_call_
5170 __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
5173 ExternalReference thunk_ref =
5174 ExternalReference::invoke_function_callback(masm->isolate());
5176 AllowExternalCallThatCantCauseGC scope(masm);
5177 MemOperand context_restore_operand(
5178 fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
5179 // Stores return the first js argument
5180 int return_value_offset = 0;
5181 if (return_first_arg) {
5182 return_value_offset = 2 + FCA::kArgsLength;
5184 return_value_offset = 2 + FCA::kReturnValueOffset;
5186 MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
5187 int stack_space = 0;
5188 MemOperand is_construct_call_operand =
5189 MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize);
5190 MemOperand* stack_space_operand = &is_construct_call_operand;
5191 if (argc.is_immediate()) {
5192 stack_space = argc.immediate() + FCA::kArgsLength + 1;
5193 stack_space_operand = NULL;
5195 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space,
5196 stack_space_operand, return_value_operand,
5197 &context_restore_operand);
5201 void CallApiFunctionStub::Generate(MacroAssembler* masm) {
5202 bool call_data_undefined = this->call_data_undefined();
5203 CallApiFunctionStubHelper(masm, ParameterCount(r6), false,
5204 call_data_undefined);
5208 void CallApiAccessorStub::Generate(MacroAssembler* masm) {
5209 bool is_store = this->is_store();
5210 int argc = this->argc();
5211 bool call_data_undefined = this->call_data_undefined();
5212 CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store,
5213 call_data_undefined);
5217 void CallApiGetterStub::Generate(MacroAssembler* masm) {
5218 // ----------- S t a t e -------------
5220 // -- sp[4 - kArgsLength*4] : PropertyCallbackArguments object
5222 // -- r5 : api_function_address
5223 // -----------------------------------
5225 Register api_function_address = ApiGetterDescriptor::function_address();
5226 DCHECK(api_function_address.is(r5));
5228 __ mr(r3, sp); // r0 = Handle<Name>
5229 __ addi(r4, r3, Operand(1 * kPointerSize)); // r4 = PCA
5231 // If ABI passes Handles (pointer-sized struct) in a register:
5233 // Create 2 extra slots on stack:
5234 // [0] space for DirectCEntryStub's LR save
5235 // [1] AccessorInfo&
5239 // Create 3 extra slots on stack:
5240 // [0] space for DirectCEntryStub's LR save
5241 // [1] copy of Handle (first arg)
5242 // [2] AccessorInfo&
5243 #if ABI_PASSES_HANDLES_IN_REGS
5244 const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
5245 const int kApiStackSpace = 2;
5247 const int kArg0Slot = kStackFrameExtraParamSlot + 1;
5248 const int kAccessorInfoSlot = kArg0Slot + 1;
5249 const int kApiStackSpace = 3;
5252 FrameScope frame_scope(masm, StackFrame::MANUAL);
5253 __ EnterExitFrame(false, kApiStackSpace);
5255 #if !ABI_PASSES_HANDLES_IN_REGS
5256 // pass 1st arg by reference
5257 __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
5258 __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
5261 // Create PropertyAccessorInfo instance on the stack above the exit frame with
5262 // r4 (internal::Object** args_) as the data.
5263 __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
5264 // r4 = AccessorInfo&
5265 __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
5267 const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
5269 ExternalReference thunk_ref =
5270 ExternalReference::invoke_accessor_getter_callback(isolate());
5271 CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
5272 kStackUnwindSpace, NULL,
5273 MemOperand(fp, 6 * kPointerSize), NULL);
5279 } // namespace v8::internal
5281 #endif // V8_TARGET_ARCH_PPC