1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_X64)
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
34 #include "regexp-macro-assembler.h"
39 #define __ ACCESS_MASM(masm)
41 void ToNumberStub::Generate(MacroAssembler* masm) {
42 // The ToNumber stub takes one argument in eax.
43 Label check_heap_number, call_builtin;
45 __ j(not_zero, &check_heap_number, Label::kNear);
48 __ bind(&check_heap_number);
49 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
50 Heap::kHeapNumberMapRootIndex);
51 __ j(not_equal, &call_builtin, Label::kNear);
54 __ bind(&call_builtin);
55 __ pop(rcx); // Pop return address.
57 __ push(rcx); // Push return address.
58 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
62 void FastNewClosureStub::Generate(MacroAssembler* masm) {
63 // Create a new closure from the given function info in new
64 // space. Set the context to the current context in rsi.
66 __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
68 // Get the function info from the stack.
69 __ movq(rdx, Operand(rsp, 1 * kPointerSize));
71 int map_index = (language_mode_ == CLASSIC_MODE)
72 ? Context::FUNCTION_MAP_INDEX
73 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
75 // Compute the function map in the current global context and set that
76 // as the map of the allocated object.
77 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
78 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
79 __ movq(rcx, Operand(rcx, Context::SlotOffset(map_index)));
80 __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
82 // Initialize the rest of the function. We don't have to update the
83 // write barrier because the allocated object is in new space.
84 __ LoadRoot(rbx, Heap::kEmptyFixedArrayRootIndex);
85 __ LoadRoot(rcx, Heap::kTheHoleValueRootIndex);
86 __ LoadRoot(rdi, Heap::kUndefinedValueRootIndex);
87 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), rbx);
88 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rbx);
89 __ movq(FieldOperand(rax, JSFunction::kPrototypeOrInitialMapOffset), rcx);
90 __ movq(FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset), rdx);
91 __ movq(FieldOperand(rax, JSFunction::kContextOffset), rsi);
92 __ movq(FieldOperand(rax, JSFunction::kLiteralsOffset), rbx);
93 __ movq(FieldOperand(rax, JSFunction::kNextFunctionLinkOffset), rdi);
95 // Initialize the code pointer in the function to be the one
96 // found in the shared function info object.
97 __ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
98 __ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
99 __ movq(FieldOperand(rax, JSFunction::kCodeEntryOffset), rdx);
102 // Return and remove the on-stack parameter.
103 __ ret(1 * kPointerSize);
105 // Create a new closure through the slower runtime call.
107 __ pop(rcx); // Temporarily remove return address.
111 __ PushRoot(Heap::kFalseValueRootIndex);
112 __ push(rcx); // Restore return address.
113 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
117 void FastNewContextStub::Generate(MacroAssembler* masm) {
118 // Try to allocate the context in new space.
120 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
121 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
122 rax, rbx, rcx, &gc, TAG_OBJECT);
124 // Get the function from the stack.
125 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
127 // Set up the object header.
128 __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex);
129 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
130 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
132 // Set up the fixed slots.
133 __ Set(rbx, 0); // Set to NULL.
134 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
135 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi);
136 __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
138 // Copy the global object from the previous context.
139 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
140 __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
142 // Copy the qmlglobal object from the previous context.
143 __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
144 __ movq(Operand(rax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), rbx);
146 // Initialize the rest of the slots to undefined.
147 __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
148 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
149 __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
152 // Return and remove the on-stack parameter.
154 __ ret(1 * kPointerSize);
156 // Need to collect. Call into runtime system.
158 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
162 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
163 // Stack layout on entry:
165 // [rsp + (1 * kPointerSize)]: function
166 // [rsp + (2 * kPointerSize)]: serialized scope info
168 // Try to allocate the context in new space.
170 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
171 __ AllocateInNewSpace(FixedArray::SizeFor(length),
172 rax, rbx, rcx, &gc, TAG_OBJECT);
174 // Get the function from the stack.
175 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
177 // Get the serialized scope info from the stack.
178 __ movq(rbx, Operand(rsp, 2 * kPointerSize));
180 // Set up the object header.
181 __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex);
182 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
183 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
185 // If this block context is nested in the global context we get a smi
186 // sentinel instead of a function. The block context should get the
187 // canonical empty function of the global context as its closure which
188 // we still have to look up.
189 Label after_sentinel;
190 __ JumpIfNotSmi(rcx, &after_sentinel, Label::kNear);
191 if (FLAG_debug_code) {
192 const char* message = "Expected 0 as a Smi sentinel";
193 __ cmpq(rcx, Immediate(0));
194 __ Assert(equal, message);
196 __ movq(rcx, GlobalObjectOperand());
197 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
198 __ movq(rcx, ContextOperand(rcx, Context::CLOSURE_INDEX));
199 __ bind(&after_sentinel);
201 // Set up the fixed slots.
202 __ movq(ContextOperand(rax, Context::CLOSURE_INDEX), rcx);
203 __ movq(ContextOperand(rax, Context::PREVIOUS_INDEX), rsi);
204 __ movq(ContextOperand(rax, Context::EXTENSION_INDEX), rbx);
206 // Copy the global object from the previous context.
207 __ movq(rbx, ContextOperand(rsi, Context::GLOBAL_INDEX));
208 __ movq(ContextOperand(rax, Context::GLOBAL_INDEX), rbx);
210 // Copy the qmlglobal object from the previous context.
211 __ movq(rbx, ContextOperand(rsi, Context::QML_GLOBAL_INDEX));
212 __ movq(ContextOperand(rax, Context::QML_GLOBAL_INDEX), rbx);
214 // Initialize the rest of the slots to the hole value.
215 __ LoadRoot(rbx, Heap::kTheHoleValueRootIndex);
216 for (int i = 0; i < slots_; i++) {
217 __ movq(ContextOperand(rax, i + Context::MIN_CONTEXT_SLOTS), rbx);
220 // Return and remove the on-stack parameter.
222 __ ret(2 * kPointerSize);
224 // Need to collect. Call into runtime system.
226 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
230 static void GenerateFastCloneShallowArrayCommon(
231 MacroAssembler* masm,
233 FastCloneShallowArrayStub::Mode mode,
235 // Registers on entry:
237 // rcx: boilerplate literal array.
238 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
240 // All sizes here are multiples of kPointerSize.
241 int elements_size = 0;
243 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
244 ? FixedDoubleArray::SizeFor(length)
245 : FixedArray::SizeFor(length);
247 int size = JSArray::kSize + elements_size;
249 // Allocate both the JS array and the elements array in one big
250 // allocation. This avoids multiple limit checks.
251 __ AllocateInNewSpace(size, rax, rbx, rdx, fail, TAG_OBJECT);
253 // Copy the JS array part.
254 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
255 if ((i != JSArray::kElementsOffset) || (length == 0)) {
256 __ movq(rbx, FieldOperand(rcx, i));
257 __ movq(FieldOperand(rax, i), rbx);
262 // Get hold of the elements array of the boilerplate and setup the
263 // elements pointer in the resulting object.
264 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
265 __ lea(rdx, Operand(rax, JSArray::kSize));
266 __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
268 // Copy the elements array.
269 if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
270 for (int i = 0; i < elements_size; i += kPointerSize) {
271 __ movq(rbx, FieldOperand(rcx, i));
272 __ movq(FieldOperand(rdx, i), rbx);
275 ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
277 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
278 __ movq(rbx, FieldOperand(rcx, i));
279 __ movq(FieldOperand(rdx, i), rbx);
281 while (i < elements_size) {
282 __ movsd(xmm0, FieldOperand(rcx, i));
283 __ movsd(FieldOperand(rdx, i), xmm0);
286 ASSERT(i == elements_size);
291 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
292 // Stack layout on entry:
294 // [rsp + kPointerSize]: constant elements.
295 // [rsp + (2 * kPointerSize)]: literal index.
296 // [rsp + (3 * kPointerSize)]: literals array.
298 // Load boilerplate object into rcx and check if we need to create a
300 __ movq(rcx, Operand(rsp, 3 * kPointerSize));
301 __ movq(rax, Operand(rsp, 2 * kPointerSize));
302 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
304 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
305 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
307 __ j(equal, &slow_case);
309 FastCloneShallowArrayStub::Mode mode = mode_;
310 // rcx is boilerplate object.
311 Factory* factory = masm->isolate()->factory();
312 if (mode == CLONE_ANY_ELEMENTS) {
313 Label double_elements, check_fast_elements;
314 __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
315 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
316 factory->fixed_cow_array_map());
317 __ j(not_equal, &check_fast_elements);
318 GenerateFastCloneShallowArrayCommon(masm, 0,
319 COPY_ON_WRITE_ELEMENTS, &slow_case);
320 __ ret(3 * kPointerSize);
322 __ bind(&check_fast_elements);
323 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
324 factory->fixed_array_map());
325 __ j(not_equal, &double_elements);
326 GenerateFastCloneShallowArrayCommon(masm, length_,
327 CLONE_ELEMENTS, &slow_case);
328 __ ret(3 * kPointerSize);
330 __ bind(&double_elements);
331 mode = CLONE_DOUBLE_ELEMENTS;
332 // Fall through to generate the code to handle double elements.
335 if (FLAG_debug_code) {
337 Heap::RootListIndex expected_map_index;
338 if (mode == CLONE_ELEMENTS) {
339 message = "Expected (writable) fixed array";
340 expected_map_index = Heap::kFixedArrayMapRootIndex;
341 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
342 message = "Expected (writable) fixed double array";
343 expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
345 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
346 message = "Expected copy-on-write fixed array";
347 expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
350 __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
351 __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
353 __ Assert(equal, message);
357 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
358 __ ret(3 * kPointerSize);
361 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
365 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
366 // Stack layout on entry:
368 // [rsp + kPointerSize]: object literal flags.
369 // [rsp + (2 * kPointerSize)]: constant properties.
370 // [rsp + (3 * kPointerSize)]: literal index.
371 // [rsp + (4 * kPointerSize)]: literals array.
373 // Load boilerplate object into ecx and check if we need to create a
376 __ movq(rcx, Operand(rsp, 4 * kPointerSize));
377 __ movq(rax, Operand(rsp, 3 * kPointerSize));
378 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
380 FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
381 __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
382 __ j(equal, &slow_case);
384 // Check that the boilerplate contains only fast properties and we can
385 // statically determine the instance size.
386 int size = JSObject::kHeaderSize + length_ * kPointerSize;
387 __ movq(rax, FieldOperand(rcx, HeapObject::kMapOffset));
388 __ movzxbq(rax, FieldOperand(rax, Map::kInstanceSizeOffset));
389 __ cmpq(rax, Immediate(size >> kPointerSizeLog2));
390 __ j(not_equal, &slow_case);
392 // Allocate the JS object and copy header together with all in-object
393 // properties from the boilerplate.
394 __ AllocateInNewSpace(size, rax, rbx, rdx, &slow_case, TAG_OBJECT);
395 for (int i = 0; i < size; i += kPointerSize) {
396 __ movq(rbx, FieldOperand(rcx, i));
397 __ movq(FieldOperand(rax, i), rbx);
400 // Return and remove the on-stack parameters.
401 __ ret(4 * kPointerSize);
404 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
408 // The stub expects its argument on the stack and returns its result in tos_:
409 // zero for false, and a non-zero value for true.
410 void ToBooleanStub::Generate(MacroAssembler* masm) {
411 // This stub overrides SometimesSetsUpAFrame() to return false. That means
412 // we cannot call anything that could cause a GC from this stub.
414 const Register argument = rax;
415 const Register map = rdx;
417 if (!types_.IsEmpty()) {
418 __ movq(argument, Operand(rsp, 1 * kPointerSize));
421 // undefined -> false
422 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
424 // Boolean -> its value
425 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
426 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
429 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
431 if (types_.Contains(SMI)) {
432 // Smis: 0 -> false, all other -> true
434 __ JumpIfNotSmi(argument, ¬_smi, Label::kNear);
435 // argument contains the correct return value already
436 if (!tos_.is(argument)) {
437 __ movq(tos_, argument);
439 __ ret(1 * kPointerSize);
441 } else if (types_.NeedsMap()) {
442 // If we need a map later and have a Smi -> patch.
443 __ JumpIfSmi(argument, &patch, Label::kNear);
446 if (types_.NeedsMap()) {
447 __ movq(map, FieldOperand(argument, HeapObject::kMapOffset));
449 if (types_.CanBeUndetectable()) {
450 __ testb(FieldOperand(map, Map::kBitFieldOffset),
451 Immediate(1 << Map::kIsUndetectable));
452 // Undetectable -> false.
453 Label not_undetectable;
454 __ j(zero, ¬_undetectable, Label::kNear);
456 __ ret(1 * kPointerSize);
457 __ bind(¬_undetectable);
461 if (types_.Contains(SPEC_OBJECT)) {
462 // spec object -> true.
464 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
465 __ j(below, ¬_js_object, Label::kNear);
466 // argument contains the correct return value already.
467 if (!tos_.is(argument)) {
470 __ ret(1 * kPointerSize);
471 __ bind(¬_js_object);
474 if (types_.Contains(STRING)) {
475 // String value -> false iff empty.
477 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
478 __ j(above_equal, ¬_string, Label::kNear);
479 __ movq(tos_, FieldOperand(argument, String::kLengthOffset));
480 __ ret(1 * kPointerSize); // the string length is OK as the return value
481 __ bind(¬_string);
484 if (types_.Contains(HEAP_NUMBER)) {
485 // heap number -> false iff +0, -0, or NaN.
486 Label not_heap_number, false_result;
487 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
488 __ j(not_equal, ¬_heap_number, Label::kNear);
489 __ xorps(xmm0, xmm0);
490 __ ucomisd(xmm0, FieldOperand(argument, HeapNumber::kValueOffset));
491 __ j(zero, &false_result, Label::kNear);
492 // argument contains the correct return value already.
493 if (!tos_.is(argument)) {
496 __ ret(1 * kPointerSize);
497 __ bind(&false_result);
499 __ ret(1 * kPointerSize);
500 __ bind(¬_heap_number);
504 GenerateTypeTransition(masm);
508 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
509 __ PushCallerSaved(save_doubles_);
510 const int argument_count = 1;
511 __ PrepareCallCFunction(argument_count);
513 __ LoadAddress(rcx, ExternalReference::isolate_address());
515 __ LoadAddress(rdi, ExternalReference::isolate_address());
518 AllowExternalCallThatCantCauseGC scope(masm);
520 ExternalReference::store_buffer_overflow_function(masm->isolate()),
522 __ PopCallerSaved(save_doubles_);
527 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
529 Heap::RootListIndex value,
531 const Register argument = rax;
532 if (types_.Contains(type)) {
533 // If we see an expected oddball, return its ToBoolean value tos_.
534 Label different_value;
535 __ CompareRoot(argument, value);
536 __ j(not_equal, &different_value, Label::kNear);
538 // If we have to return zero, there is no way around clearing tos_.
540 } else if (!tos_.is(argument)) {
541 // If we have to return non-zero, we can re-use the argument if it is the
542 // same register as the result, because we never see Smi-zero here.
545 __ ret(1 * kPointerSize);
546 __ bind(&different_value);
551 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
552 __ pop(rcx); // Get return address, operand is now on top of stack.
553 __ Push(Smi::FromInt(tos_.code()));
554 __ Push(Smi::FromInt(types_.ToByte()));
555 __ push(rcx); // Push return address.
556 // Patch the caller to an appropriate specialized stub and return the
557 // operation result to the caller of the stub.
558 __ TailCallExternalReference(
559 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
565 class FloatingPointHelper : public AllStatic {
567 // Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
568 // If the operands are not both numbers, jump to not_numbers.
569 // Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
570 // NumberOperands assumes both are smis or heap numbers.
571 static void LoadSSE2SmiOperands(MacroAssembler* masm);
572 static void LoadSSE2NumberOperands(MacroAssembler* masm);
573 static void LoadSSE2UnknownOperands(MacroAssembler* masm,
576 // Takes the operands in rdx and rax and loads them as integers in rax
578 static void LoadAsIntegers(MacroAssembler* masm,
579 Label* operand_conversion_failure,
580 Register heap_number_map);
581 // As above, but we know the operands to be numbers. In that case,
582 // conversion can't fail.
583 static void LoadNumbersAsIntegers(MacroAssembler* masm);
585 // Tries to convert two values to smis losslessly.
586 // This fails if either argument is not a Smi nor a HeapNumber,
587 // or if it's a HeapNumber with a value that can't be converted
588 // losslessly to a Smi. In that case, control transitions to the
589 // on_not_smis label.
590 // On success, either control goes to the on_success label (if one is
591 // provided), or it falls through at the end of the code (if on_success
593 // On success, both first and second holds Smi tagged values.
594 // One of first or second must be non-Smi when entering.
595 static void NumbersToSmis(MacroAssembler* masm,
606 // Get the integer part of a heap number.
607 // Overwrites the contents of rdi, rbx and rcx. Result cannot be rdi or rbx.
608 void IntegerConvert(MacroAssembler* masm,
611 // Result may be rcx. If result and source are the same register, source will
613 ASSERT(!result.is(rdi) && !result.is(rbx));
614 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use
615 // cvttsd2si (32-bit version) directly.
616 Register double_exponent = rbx;
617 Register double_value = rdi;
618 Label done, exponent_63_plus;
619 // Get double and extract exponent.
620 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset));
621 // Clear result preemptively, in case we need to return zero.
622 __ xorl(result, result);
623 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there.
624 // Double to remove sign bit, shift exponent down to least significant bits.
625 // and subtract bias to get the unshifted, unbiased exponent.
626 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0));
627 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits));
628 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias));
629 // Check whether the exponent is too big for a 63 bit unsigned integer.
630 __ cmpl(double_exponent, Immediate(63));
631 __ j(above_equal, &exponent_63_plus, Label::kNear);
632 // Handle exponent range 0..62.
633 __ cvttsd2siq(result, xmm0);
634 __ jmp(&done, Label::kNear);
636 __ bind(&exponent_63_plus);
637 // Exponent negative or 63+.
638 __ cmpl(double_exponent, Immediate(83));
639 // If exponent negative or above 83, number contains no significant bits in
640 // the range 0..2^31, so result is zero, and rcx already holds zero.
641 __ j(above, &done, Label::kNear);
643 // Exponent in rage 63..83.
644 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely
645 // the least significant exponent-52 bits.
647 // Negate low bits of mantissa if value is negative.
648 __ addq(double_value, double_value); // Move sign bit to carry.
649 __ sbbl(result, result); // And convert carry to -1 in result register.
650 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0.
651 __ addl(double_value, result);
652 // Do xor in opposite directions depending on where we want the result
653 // (depending on whether result is rcx or not).
655 if (result.is(rcx)) {
656 __ xorl(double_value, result);
657 // Left shift mantissa by (exponent - mantissabits - 1) to save the
658 // bits that have positional values below 2^32 (the extra -1 comes from the
659 // doubling done above to move the sign bit into the carry flag).
660 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
661 __ shll_cl(double_value);
662 __ movl(result, double_value);
664 // As the then-branch, but move double-value to result before shifting.
665 __ xorl(result, double_value);
666 __ leal(rcx, Operand(double_exponent, -HeapNumber::kMantissaBits - 1));
674 void UnaryOpStub::Generate(MacroAssembler* masm) {
675 switch (operand_type_) {
676 case UnaryOpIC::UNINITIALIZED:
677 GenerateTypeTransition(masm);
680 GenerateSmiStub(masm);
682 case UnaryOpIC::HEAP_NUMBER:
683 GenerateHeapNumberStub(masm);
685 case UnaryOpIC::GENERIC:
686 GenerateGenericStub(masm);
692 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
693 __ pop(rcx); // Save return address.
695 __ push(rax); // the operand
696 __ Push(Smi::FromInt(op_));
697 __ Push(Smi::FromInt(mode_));
698 __ Push(Smi::FromInt(operand_type_));
700 __ push(rcx); // Push return address.
702 // Patch the caller to an appropriate specialized stub and return the
703 // operation result to the caller of the stub.
704 __ TailCallExternalReference(
705 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
709 // TODO(svenpanne): Use virtual functions instead of switch.
710 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
713 GenerateSmiStubSub(masm);
716 GenerateSmiStubBitNot(masm);
724 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
726 GenerateSmiCodeSub(masm, &slow, &slow, Label::kNear, Label::kNear);
728 GenerateTypeTransition(masm);
732 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
734 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
736 GenerateTypeTransition(masm);
740 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
743 Label::Distance non_smi_near,
744 Label::Distance slow_near) {
746 __ JumpIfNotSmi(rax, non_smi, non_smi_near);
747 __ SmiNeg(rax, rax, &done, Label::kNear);
748 __ jmp(slow, slow_near);
754 void UnaryOpStub::GenerateSmiCodeBitNot(MacroAssembler* masm,
756 Label::Distance non_smi_near) {
757 __ JumpIfNotSmi(rax, non_smi, non_smi_near);
763 // TODO(svenpanne): Use virtual functions instead of switch.
764 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
767 GenerateHeapNumberStubSub(masm);
770 GenerateHeapNumberStubBitNot(masm);
778 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
779 Label non_smi, slow, call_builtin;
780 GenerateSmiCodeSub(masm, &non_smi, &call_builtin, Label::kNear);
782 GenerateHeapNumberCodeSub(masm, &slow);
784 GenerateTypeTransition(masm);
785 __ bind(&call_builtin);
786 GenerateGenericCodeFallback(masm);
790 void UnaryOpStub::GenerateHeapNumberStubBitNot(
791 MacroAssembler* masm) {
793 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
795 GenerateHeapNumberCodeBitNot(masm, &slow);
797 GenerateTypeTransition(masm);
801 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
803 // Check if the operand is a heap number.
804 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
805 Heap::kHeapNumberMapRootIndex);
806 __ j(not_equal, slow);
808 // Operand is a float, negate its value by flipping the sign bit.
809 if (mode_ == UNARY_OVERWRITE) {
810 __ Set(kScratchRegister, 0x01);
811 __ shl(kScratchRegister, Immediate(63));
812 __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister);
814 // Allocate a heap number before calculating the answer,
815 // so we don't have an untagged double around during GC.
816 Label slow_allocate_heapnumber, heapnumber_allocated;
817 __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber);
818 __ jmp(&heapnumber_allocated);
820 __ bind(&slow_allocate_heapnumber);
822 FrameScope scope(masm, StackFrame::INTERNAL);
824 __ CallRuntime(Runtime::kNumberAlloc, 0);
828 __ bind(&heapnumber_allocated);
829 // rcx: allocated 'empty' number
831 // Copy the double value to the new heap number, flipping the sign.
832 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset));
833 __ Set(kScratchRegister, 0x01);
834 __ shl(kScratchRegister, Immediate(63));
835 __ xor_(rdx, kScratchRegister); // Flip sign.
836 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx);
843 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
845 // Check if the operand is a heap number.
846 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
847 Heap::kHeapNumberMapRootIndex);
848 __ j(not_equal, slow);
850 // Convert the heap number in rax to an untagged integer in rcx.
851 IntegerConvert(masm, rax, rax);
853 // Do the bitwise operation and smi tag the result.
855 __ Integer32ToSmi(rax, rax);
860 // TODO(svenpanne): Use virtual functions instead of switch.
861 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
864 GenerateGenericStubSub(masm);
867 GenerateGenericStubBitNot(masm);
875 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
877 GenerateSmiCodeSub(masm, &non_smi, &slow, Label::kNear);
879 GenerateHeapNumberCodeSub(masm, &slow);
881 GenerateGenericCodeFallback(masm);
885 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
887 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
889 GenerateHeapNumberCodeBitNot(masm, &slow);
891 GenerateGenericCodeFallback(masm);
895 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
896 // Handle the slow case by jumping to the JavaScript builtin.
897 __ pop(rcx); // pop return address
899 __ push(rcx); // push return address
902 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
905 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
913 void UnaryOpStub::PrintName(StringStream* stream) {
914 const char* op_name = Token::Name(op_);
915 const char* overwrite_name = NULL; // Make g++ happy.
917 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
918 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
920 stream->Add("UnaryOpStub_%s_%s_%s",
923 UnaryOpIC::GetName(operand_type_));
927 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
928 __ pop(rcx); // Save return address.
931 // Left and right arguments are now on top.
932 // Push this stub's key. Although the operation and the type info are
933 // encoded into the key, the encoding is opaque, so push them too.
934 __ Push(Smi::FromInt(MinorKey()));
935 __ Push(Smi::FromInt(op_));
936 __ Push(Smi::FromInt(operands_type_));
938 __ push(rcx); // Push return address.
940 // Patch the caller to an appropriate specialized stub and return the
941 // operation result to the caller of the stub.
942 __ TailCallExternalReference(
943 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
950 void BinaryOpStub::Generate(MacroAssembler* masm) {
951 // Explicitly allow generation of nested stubs. It is safe here because
952 // generation code does not use any raw pointers.
953 AllowStubCallsScope allow_stub_calls(masm, true);
955 switch (operands_type_) {
956 case BinaryOpIC::UNINITIALIZED:
957 GenerateTypeTransition(masm);
959 case BinaryOpIC::SMI:
960 GenerateSmiStub(masm);
962 case BinaryOpIC::INT32:
964 // The int32 case is identical to the Smi case. We avoid creating this
967 case BinaryOpIC::HEAP_NUMBER:
968 GenerateHeapNumberStub(masm);
970 case BinaryOpIC::ODDBALL:
971 GenerateOddballStub(masm);
973 case BinaryOpIC::BOTH_STRING:
974 GenerateBothStringStub(masm);
976 case BinaryOpIC::STRING:
977 GenerateStringStub(masm);
979 case BinaryOpIC::GENERIC:
980 GenerateGeneric(masm);
988 void BinaryOpStub::PrintName(StringStream* stream) {
989 const char* op_name = Token::Name(op_);
990 const char* overwrite_name;
992 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
993 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
994 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
995 default: overwrite_name = "UnknownOverwrite"; break;
997 stream->Add("BinaryOpStub_%s_%s_%s",
1000 BinaryOpIC::GetName(operands_type_));
1004 void BinaryOpStub::GenerateSmiCode(
1005 MacroAssembler* masm,
1007 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1009 // Arguments to BinaryOpStub are in rdx and rax.
1010 Register left = rdx;
1011 Register right = rax;
1013 // We only generate heapnumber answers for overflowing calculations
1014 // for the four basic arithmetic operations and logical right shift by 0.
1015 bool generate_inline_heapnumber_results =
1016 (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
1017 (op_ == Token::ADD || op_ == Token::SUB ||
1018 op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
1020 // Smi check of both operands. If op is BIT_OR, the check is delayed
1021 // until after the OR operation.
1023 Label use_fp_on_smis;
1026 if (op_ != Token::BIT_OR) {
1027 Comment smi_check_comment(masm, "-- Smi check arguments");
1028 __ JumpIfNotBothSmi(left, right, ¬_smis);
1032 __ bind(&smi_values);
1033 // Perform the operation.
1034 Comment perform_smi(masm, "-- Perform smi operation");
1037 ASSERT(right.is(rax));
1038 __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
1042 __ SmiSub(left, left, right, &use_fp_on_smis);
1047 ASSERT(right.is(rax));
1048 __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative.
1052 // SmiDiv will not accept left in rdx or right in rax.
1057 __ SmiDiv(rax, left, right, &use_fp_on_smis);
1061 // SmiMod will not accept left in rdx or right in rax.
1066 __ SmiMod(rax, left, right, &use_fp_on_smis);
1069 case Token::BIT_OR: {
1070 ASSERT(right.is(rax));
1071 __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative.
1074 case Token::BIT_XOR:
1075 ASSERT(right.is(rax));
1076 __ SmiXor(right, right, left); // BIT_XOR is commutative.
1079 case Token::BIT_AND:
1080 ASSERT(right.is(rax));
1081 __ SmiAnd(right, right, left); // BIT_AND is commutative.
1085 __ SmiShiftLeft(left, left, right);
1090 __ SmiShiftArithmeticRight(left, left, right);
1095 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
1103 // 5. Emit return of result in rax. Some operations have registers pushed.
1106 if (use_fp_on_smis.is_linked()) {
1107 // 6. For some operations emit inline code to perform floating point
1108 // operations on known smis (e.g., if the result of the operation
1109 // overflowed the smi range).
1110 __ bind(&use_fp_on_smis);
1111 if (op_ == Token::DIV || op_ == Token::MOD) {
1112 // Restore left and right to rdx and rax.
1117 if (generate_inline_heapnumber_results) {
1118 __ AllocateHeapNumber(rcx, rbx, slow);
1119 Comment perform_float(masm, "-- Perform float operation on smis");
1120 if (op_ == Token::SHR) {
1121 __ SmiToInteger32(left, left);
1122 __ cvtqsi2sd(xmm0, left);
1124 FloatingPointHelper::LoadSSE2SmiOperands(masm);
1126 case Token::ADD: __ addsd(xmm0, xmm1); break;
1127 case Token::SUB: __ subsd(xmm0, xmm1); break;
1128 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1129 case Token::DIV: __ divsd(xmm0, xmm1); break;
1130 default: UNREACHABLE();
1133 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
1141 // 7. Non-smi operands reach the end of the code generated by
1142 // GenerateSmiCode, and fall through to subsequent code,
1143 // with the operands in rdx and rax.
1144 // But first we check if non-smi values are HeapNumbers holding
1145 // values that could be smi.
1147 Comment done_comment(masm, "-- Enter non-smi code");
1148 FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
1149 &smi_values, &fail);
1150 __ jmp(&smi_values);
1155 void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
1156 Label* allocation_failure,
1157 Label* non_numeric_failure) {
1163 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
1166 case Token::ADD: __ addsd(xmm0, xmm1); break;
1167 case Token::SUB: __ subsd(xmm0, xmm1); break;
1168 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1169 case Token::DIV: __ divsd(xmm0, xmm1); break;
1170 default: UNREACHABLE();
1172 GenerateHeapResultAllocation(masm, allocation_failure);
1173 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1178 // For MOD we jump to the allocation_failure label, to call runtime.
1179 __ jmp(allocation_failure);
1183 case Token::BIT_AND:
1184 case Token::BIT_XOR:
1188 Label non_smi_shr_result;
1189 Register heap_number_map = r9;
1190 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1191 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
1194 case Token::BIT_OR: __ orl(rax, rcx); break;
1195 case Token::BIT_AND: __ andl(rax, rcx); break;
1196 case Token::BIT_XOR: __ xorl(rax, rcx); break;
1197 case Token::SAR: __ sarl_cl(rax); break;
1198 case Token::SHL: __ shll_cl(rax); break;
1201 // Check if result is negative. This can only happen for a shift
1204 __ j(negative, &non_smi_shr_result);
1207 default: UNREACHABLE();
1209 STATIC_ASSERT(kSmiValueSize == 32);
1210 // Tag smi result and return.
1211 __ Integer32ToSmi(rax, rax);
1214 // Logical shift right can produce an unsigned int32 that is not
1215 // an int32, and so is not in the smi range. Allocate a heap number
1217 if (op_ == Token::SHR) {
1218 __ bind(&non_smi_shr_result);
1219 Label allocation_failed;
1220 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
1221 // Allocate heap number in new space.
1222 // Not using AllocateHeapNumber macro in order to reuse
1223 // already loaded heap_number_map.
1224 __ AllocateInNewSpace(HeapNumber::kSize,
1231 if (FLAG_debug_code) {
1232 __ AbortIfNotRootValue(heap_number_map,
1233 Heap::kHeapNumberMapRootIndex,
1234 "HeapNumberMap register clobbered.");
1236 __ movq(FieldOperand(rax, HeapObject::kMapOffset),
1238 __ cvtqsi2sd(xmm0, rbx);
1239 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
1242 __ bind(&allocation_failed);
1243 // We need tagged values in rdx and rax for the following code,
1244 // not int32 in rax and rcx.
1245 __ Integer32ToSmi(rax, rcx);
1246 __ Integer32ToSmi(rdx, rbx);
1247 __ jmp(allocation_failure);
1251 default: UNREACHABLE(); break;
1253 // No fall-through from this generated code.
1254 if (FLAG_debug_code) {
1255 __ Abort("Unexpected fall-through in "
1256 "BinaryStub::GenerateFloatingPointCode.");
1261 void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
1262 ASSERT(op_ == Token::ADD);
1263 Label left_not_string, call_runtime;
1265 // Registers containing left and right operands respectively.
1266 Register left = rdx;
1267 Register right = rax;
1269 // Test if left operand is a string.
1270 __ JumpIfSmi(left, &left_not_string, Label::kNear);
1271 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1272 __ j(above_equal, &left_not_string, Label::kNear);
1273 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1274 GenerateRegisterArgsPush(masm);
1275 __ TailCallStub(&string_add_left_stub);
1277 // Left operand is not a string, test right.
1278 __ bind(&left_not_string);
1279 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1280 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1281 __ j(above_equal, &call_runtime, Label::kNear);
1283 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1284 GenerateRegisterArgsPush(masm);
1285 __ TailCallStub(&string_add_right_stub);
1287 // Neither argument is a string.
1288 __ bind(&call_runtime);
1292 void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
1293 GenerateRegisterArgsPush(masm);
1296 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1299 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1302 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1305 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1308 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1311 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1313 case Token::BIT_AND:
1314 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1316 case Token::BIT_XOR:
1317 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1320 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1323 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1326 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1334 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1336 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1337 result_type_ == BinaryOpIC::SMI) {
1338 // Only allow smi results.
1339 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
1341 // Allow heap number result and don't make a transition if a heap number
1342 // cannot be allocated.
1343 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1346 // Code falls through if the result is not returned as either a smi or heap
1348 GenerateTypeTransition(masm);
1350 if (call_runtime.is_linked()) {
1351 __ bind(&call_runtime);
1352 GenerateCallRuntimeCode(masm);
1357 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1358 ASSERT(operands_type_ == BinaryOpIC::STRING);
1359 ASSERT(op_ == Token::ADD);
1360 GenerateStringAddCode(masm);
1361 // Try to add arguments as strings, otherwise, transition to the generic
1363 GenerateTypeTransition(masm);
1367 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1369 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1370 ASSERT(op_ == Token::ADD);
1371 // If both arguments are strings, call the string add stub.
1372 // Otherwise, do a transition.
1374 // Registers containing left and right operands respectively.
1375 Register left = rdx;
1376 Register right = rax;
1378 // Test if left operand is a string.
1379 __ JumpIfSmi(left, &call_runtime);
1380 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx);
1381 __ j(above_equal, &call_runtime);
1383 // Test if right operand is a string.
1384 __ JumpIfSmi(right, &call_runtime);
1385 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx);
1386 __ j(above_equal, &call_runtime);
1388 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1389 GenerateRegisterArgsPush(masm);
1390 __ TailCallStub(&string_add_stub);
1392 __ bind(&call_runtime);
1393 GenerateTypeTransition(masm);
1397 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1400 if (op_ == Token::ADD) {
1401 // Handle string addition here, because it is the only operation
1402 // that does not do a ToNumber conversion on the operands.
1403 GenerateStringAddCode(masm);
1406 // Convert oddball arguments to numbers.
1408 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1409 __ j(not_equal, &check, Label::kNear);
1410 if (Token::IsBitOp(op_)) {
1413 __ LoadRoot(rdx, Heap::kNanValueRootIndex);
1415 __ jmp(&done, Label::kNear);
1417 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1418 __ j(not_equal, &done, Label::kNear);
1419 if (Token::IsBitOp(op_)) {
1422 __ LoadRoot(rax, Heap::kNanValueRootIndex);
1426 GenerateHeapNumberStub(masm);
1430 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1431 Label gc_required, not_number;
1432 GenerateFloatingPointCode(masm, &gc_required, ¬_number);
1434 __ bind(¬_number);
1435 GenerateTypeTransition(masm);
1437 __ bind(&gc_required);
1438 GenerateCallRuntimeCode(masm);
1442 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
1443 Label call_runtime, call_string_add_or_runtime;
1445 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1447 GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
1449 __ bind(&call_string_add_or_runtime);
1450 if (op_ == Token::ADD) {
1451 GenerateStringAddCode(masm);
1454 __ bind(&call_runtime);
1455 GenerateCallRuntimeCode(masm);
1459 void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1460 Label* alloc_failure) {
1461 Label skip_allocation;
1462 OverwriteMode mode = mode_;
1464 case OVERWRITE_LEFT: {
1465 // If the argument in rdx is already an object, we skip the
1466 // allocation of a heap number.
1467 __ JumpIfNotSmi(rdx, &skip_allocation);
1468 // Allocate a heap number for the result. Keep eax and edx intact
1469 // for the possible runtime call.
1470 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1471 // Now rdx can be overwritten losing one of the arguments as we are
1472 // now done and will not need it any more.
1474 __ bind(&skip_allocation);
1475 // Use object in rdx as a result holder
1479 case OVERWRITE_RIGHT:
1480 // If the argument in rax is already an object, we skip the
1481 // allocation of a heap number.
1482 __ JumpIfNotSmi(rax, &skip_allocation);
1485 // Allocate a heap number for the result. Keep rax and rdx intact
1486 // for the possible runtime call.
1487 __ AllocateHeapNumber(rbx, rcx, alloc_failure);
1488 // Now rax can be overwritten losing one of the arguments as we are
1489 // now done and will not need it any more.
1491 __ bind(&skip_allocation);
1493 default: UNREACHABLE();
1498 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1506 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1509 // rsp[8]: argument (should be number).
1510 // rsp[0]: return address.
1512 // rax: tagged double result.
1515 // rsp[0]: return address.
1516 // xmm1: untagged double input argument
1518 // xmm1: untagged double result.
1521 Label runtime_call_clear_stack;
1523 const bool tagged = (argument_type_ == TAGGED);
1525 Label input_not_smi, loaded;
1526 // Test that rax is a number.
1527 __ movq(rax, Operand(rsp, kPointerSize));
1528 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear);
1529 // Input is a smi. Untag and load it onto the FPU stack.
1530 // Then load the bits of the double into rbx.
1531 __ SmiToInteger32(rax, rax);
1532 __ subq(rsp, Immediate(kDoubleSize));
1533 __ cvtlsi2sd(xmm1, rax);
1534 __ movsd(Operand(rsp, 0), xmm1);
1537 __ fld_d(Operand(rsp, 0));
1538 __ addq(rsp, Immediate(kDoubleSize));
1539 __ jmp(&loaded, Label::kNear);
1541 __ bind(&input_not_smi);
1542 // Check if input is a HeapNumber.
1543 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex);
1544 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
1545 __ j(not_equal, &runtime_call);
1546 // Input is a HeapNumber. Push it on the FPU stack and load its
1548 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1549 __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset));
1553 } else { // UNTAGGED.
1558 // ST[0] == double value, if TAGGED.
1559 // rbx = bits of double value.
1560 // rdx = also bits of double value.
1561 // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic):
1562 // h = h0 = bits ^ (bits >> 32);
1565 // h = h & (cacheSize - 1);
1566 // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1)
1567 __ sar(rdx, Immediate(32));
1572 __ sarl(rdx, Immediate(8));
1573 __ sarl(rcx, Immediate(16));
1574 __ sarl(rax, Immediate(24));
1578 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
1579 __ andl(rcx, Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
1581 // ST[0] == double value.
1582 // rbx = bits of double value.
1583 // rcx = TranscendentalCache::hash(double value).
1584 ExternalReference cache_array =
1585 ExternalReference::transcendental_cache_array_address(masm->isolate());
1586 __ movq(rax, cache_array);
1587 int cache_array_index =
1588 type_ * sizeof(Isolate::Current()->transcendental_cache()->caches_[0]);
1589 __ movq(rax, Operand(rax, cache_array_index));
1590 // rax points to the cache for the type type_.
1591 // If NULL, the cache hasn't been initialized yet, so go through runtime.
1593 __ j(zero, &runtime_call_clear_stack); // Only clears stack if TAGGED.
1595 // Check that the layout of cache elements match expectations.
1596 { // NOLINT - doesn't like a single brace on a line.
1597 TranscendentalCache::SubCache::Element test_elem[2];
1598 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
1599 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
1600 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
1601 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
1602 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
1603 // Two uint_32's and a pointer per element.
1604 CHECK_EQ(16, static_cast<int>(elem2_start - elem_start));
1605 CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start));
1606 CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start));
1607 CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start));
1610 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16].
1612 __ lea(rcx, Operand(rax, rcx, times_8, 0));
1613 // Check if cache matches: Double value is stored in uint32_t[2] array.
1615 __ cmpq(rbx, Operand(rcx, 0));
1616 __ j(not_equal, &cache_miss, Label::kNear);
1618 Counters* counters = masm->isolate()->counters();
1619 __ IncrementCounter(counters->transcendental_cache_hit(), 1);
1620 __ movq(rax, Operand(rcx, 2 * kIntSize));
1622 __ fstp(0); // Clear FPU stack.
1623 __ ret(kPointerSize);
1624 } else { // UNTAGGED.
1625 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1629 __ bind(&cache_miss);
1630 __ IncrementCounter(counters->transcendental_cache_miss(), 1);
1631 // Update cache with new value.
1633 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack);
1634 } else { // UNTAGGED.
1635 __ AllocateHeapNumber(rax, rdi, &skip_cache);
1636 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1637 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset));
1639 GenerateOperation(masm, type_);
1640 __ movq(Operand(rcx, 0), rbx);
1641 __ movq(Operand(rcx, 2 * kIntSize), rax);
1642 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset));
1644 __ ret(kPointerSize);
1645 } else { // UNTAGGED.
1646 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1649 // Skip cache and return answer directly, only in untagged case.
1650 __ bind(&skip_cache);
1651 __ subq(rsp, Immediate(kDoubleSize));
1652 __ movsd(Operand(rsp, 0), xmm1);
1653 __ fld_d(Operand(rsp, 0));
1654 GenerateOperation(masm, type_);
1655 __ fstp_d(Operand(rsp, 0));
1656 __ movsd(xmm1, Operand(rsp, 0));
1657 __ addq(rsp, Immediate(kDoubleSize));
1658 // We return the value in xmm1 without adding it to the cache, but
1659 // we cause a scavenging GC so that future allocations will succeed.
1661 FrameScope scope(masm, StackFrame::INTERNAL);
1662 // Allocate an unused object bigger than a HeapNumber.
1663 __ Push(Smi::FromInt(2 * kDoubleSize));
1664 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
1669 // Call runtime, doing whatever allocation and cleanup is necessary.
1671 __ bind(&runtime_call_clear_stack);
1673 __ bind(&runtime_call);
1674 __ TailCallExternalReference(
1675 ExternalReference(RuntimeFunction(), masm->isolate()), 1, 1);
1676 } else { // UNTAGGED.
1677 __ bind(&runtime_call_clear_stack);
1678 __ bind(&runtime_call);
1679 __ AllocateHeapNumber(rax, rdi, &skip_cache);
1680 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1);
1682 FrameScope scope(masm, StackFrame::INTERNAL);
1684 __ CallRuntime(RuntimeFunction(), 1);
1686 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1692 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
1694 // Add more cases when necessary.
1695 case TranscendentalCache::SIN: return Runtime::kMath_sin;
1696 case TranscendentalCache::COS: return Runtime::kMath_cos;
1697 case TranscendentalCache::TAN: return Runtime::kMath_tan;
1698 case TranscendentalCache::LOG: return Runtime::kMath_log;
1701 return Runtime::kAbort;
1706 void TranscendentalCacheStub::GenerateOperation(
1707 MacroAssembler* masm, TranscendentalCache::Type type) {
1709 // rax: Newly allocated HeapNumber, which must be preserved.
1710 // rbx: Bits of input double. Must be preserved.
1711 // rcx: Pointer to cache entry. Must be preserved.
1712 // st(0): Input double
1714 if (type == TranscendentalCache::SIN ||
1715 type == TranscendentalCache::COS ||
1716 type == TranscendentalCache::TAN) {
1717 // Both fsin and fcos require arguments in the range +/-2^63 and
1718 // return NaN for infinities and NaN. They can share all code except
1719 // the actual fsin/fcos operation.
1721 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
1722 // work. We must reduce it to the appropriate range.
1724 // Move exponent and sign bits to low bits.
1725 __ shr(rdi, Immediate(HeapNumber::kMantissaBits));
1727 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1));
1728 int supported_exponent_limit = (63 + HeapNumber::kExponentBias);
1729 __ cmpl(rdi, Immediate(supported_exponent_limit));
1730 __ j(below, &in_range);
1731 // Check for infinity and NaN. Both return NaN for sin.
1732 __ cmpl(rdi, Immediate(0x7ff));
1733 Label non_nan_result;
1734 __ j(not_equal, &non_nan_result, Label::kNear);
1735 // Input is +/-Infinity or NaN. Result is NaN.
1737 // NaN is represented by 0x7ff8000000000000.
1738 __ subq(rsp, Immediate(kPointerSize));
1739 __ movl(Operand(rsp, 4), Immediate(0x7ff80000));
1740 __ movl(Operand(rsp, 0), Immediate(0x00000000));
1741 __ fld_d(Operand(rsp, 0));
1742 __ addq(rsp, Immediate(kPointerSize));
1745 __ bind(&non_nan_result);
1747 // Use fpmod to restrict argument to the range +/-2*PI.
1748 __ movq(rdi, rax); // Save rax before using fnstsw_ax.
1752 // FPU Stack: input, 2*pi, input.
1754 Label no_exceptions;
1757 // Clear if Illegal Operand or Zero Division exceptions are set.
1758 __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word.
1759 __ j(zero, &no_exceptions);
1761 __ bind(&no_exceptions);
1764 // Compute st(0) % st(1)
1766 Label partial_remainder_loop;
1767 __ bind(&partial_remainder_loop);
1771 __ testl(rax, Immediate(0x400)); // Check C2 bit of FPU status word.
1772 // If C2 is set, computation only has partial result. Loop to
1773 // continue computation.
1774 __ j(not_zero, &partial_remainder_loop);
1776 // FPU Stack: input, 2*pi, input % 2*pi
1778 // FPU Stack: input % 2*pi, 2*pi,
1780 // FPU Stack: input % 2*pi
1781 __ movq(rax, rdi); // Restore rax, pointer to the new HeapNumber.
1784 case TranscendentalCache::SIN:
1787 case TranscendentalCache::COS:
1790 case TranscendentalCache::TAN:
1791 // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
1792 // FP register stack.
1794 __ fstp(0); // Pop FP register stack.
1801 ASSERT(type == TranscendentalCache::LOG);
1809 // Input: rdx, rax are the left and right objects of a bit op.
1810 // Output: rax, rcx are left and right integers for a bit op.
1811 void FloatingPointHelper::LoadNumbersAsIntegers(MacroAssembler* masm) {
1812 // Check float operands.
1815 Label rax_is_object;
1816 Label rdx_is_object;
1818 __ JumpIfNotSmi(rdx, &rdx_is_object);
1819 __ SmiToInteger32(rdx, rdx);
1820 __ JumpIfSmi(rax, &rax_is_smi);
1822 __ bind(&rax_is_object);
1823 IntegerConvert(masm, rcx, rax); // Uses rdi, rcx and rbx.
1826 __ bind(&rdx_is_object);
1827 IntegerConvert(masm, rdx, rdx); // Uses rdi, rcx and rbx.
1828 __ JumpIfNotSmi(rax, &rax_is_object);
1829 __ bind(&rax_is_smi);
1830 __ SmiToInteger32(rcx, rax);
1837 // Input: rdx, rax are the left and right objects of a bit op.
1838 // Output: rax, rcx are left and right integers for a bit op.
1839 // Jump to conversion_failure: rdx and rax are unchanged.
1840 void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
1841 Label* conversion_failure,
1842 Register heap_number_map) {
1843 // Check float operands.
1844 Label arg1_is_object, check_undefined_arg1;
1845 Label arg2_is_object, check_undefined_arg2;
1846 Label load_arg2, done;
1848 __ JumpIfNotSmi(rdx, &arg1_is_object);
1849 __ SmiToInteger32(r8, rdx);
1852 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1853 __ bind(&check_undefined_arg1);
1854 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
1855 __ j(not_equal, conversion_failure);
1859 __ bind(&arg1_is_object);
1860 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map);
1861 __ j(not_equal, &check_undefined_arg1);
1862 // Get the untagged integer version of the rdx heap number in rcx.
1863 IntegerConvert(masm, r8, rdx);
1865 // Here r8 has the untagged integer, rax has a Smi or a heap number.
1866 __ bind(&load_arg2);
1867 // Test if arg2 is a Smi.
1868 __ JumpIfNotSmi(rax, &arg2_is_object);
1869 __ SmiToInteger32(rcx, rax);
1872 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
1873 __ bind(&check_undefined_arg2);
1874 __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
1875 __ j(not_equal, conversion_failure);
1879 __ bind(&arg2_is_object);
1880 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
1881 __ j(not_equal, &check_undefined_arg2);
1882 // Get the untagged integer version of the rax heap number in rcx.
1883 IntegerConvert(masm, rcx, rax);
1889 void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) {
1890 __ SmiToInteger32(kScratchRegister, rdx);
1891 __ cvtlsi2sd(xmm0, kScratchRegister);
1892 __ SmiToInteger32(kScratchRegister, rax);
1893 __ cvtlsi2sd(xmm1, kScratchRegister);
1897 void FloatingPointHelper::LoadSSE2NumberOperands(MacroAssembler* masm) {
1898 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, done;
1899 // Load operand in rdx into xmm0.
1900 __ JumpIfSmi(rdx, &load_smi_rdx);
1901 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1902 // Load operand in rax into xmm1.
1903 __ JumpIfSmi(rax, &load_smi_rax);
1904 __ bind(&load_nonsmi_rax);
1905 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1908 __ bind(&load_smi_rdx);
1909 __ SmiToInteger32(kScratchRegister, rdx);
1910 __ cvtlsi2sd(xmm0, kScratchRegister);
1911 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1913 __ bind(&load_smi_rax);
1914 __ SmiToInteger32(kScratchRegister, rax);
1915 __ cvtlsi2sd(xmm1, kScratchRegister);
1921 void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm,
1922 Label* not_numbers) {
1923 Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done;
1924 // Load operand in rdx into xmm0, or branch to not_numbers.
1925 __ LoadRoot(rcx, Heap::kHeapNumberMapRootIndex);
1926 __ JumpIfSmi(rdx, &load_smi_rdx);
1927 __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), rcx);
1928 __ j(not_equal, not_numbers); // Argument in rdx is not a number.
1929 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
1930 // Load operand in rax into xmm1, or branch to not_numbers.
1931 __ JumpIfSmi(rax, &load_smi_rax);
1933 __ bind(&load_nonsmi_rax);
1934 __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), rcx);
1935 __ j(not_equal, not_numbers);
1936 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
1939 __ bind(&load_smi_rdx);
1940 __ SmiToInteger32(kScratchRegister, rdx);
1941 __ cvtlsi2sd(xmm0, kScratchRegister);
1942 __ JumpIfNotSmi(rax, &load_nonsmi_rax);
1944 __ bind(&load_smi_rax);
1945 __ SmiToInteger32(kScratchRegister, rax);
1946 __ cvtlsi2sd(xmm1, kScratchRegister);
1951 void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
1958 Label* on_not_smis) {
1959 Register heap_number_map = scratch3;
1960 Register smi_result = scratch1;
1963 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1966 __ JumpIfSmi(first, &first_smi, Label::kNear);
1967 __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
1968 __ j(not_equal, on_not_smis);
1969 // Convert HeapNumber to smi if possible.
1970 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
1971 __ movq(scratch2, xmm0);
1972 __ cvttsd2siq(smi_result, xmm0);
1973 // Check if conversion was successful by converting back and
1974 // comparing to the original double's bits.
1975 __ cvtlsi2sd(xmm1, smi_result);
1976 __ movq(kScratchRegister, xmm1);
1977 __ cmpq(scratch2, kScratchRegister);
1978 __ j(not_equal, on_not_smis);
1979 __ Integer32ToSmi(first, smi_result);
1981 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
1982 __ bind(&first_smi);
1983 if (FLAG_debug_code) {
1984 // Second should be non-smi if we get here.
1985 __ AbortIfSmi(second);
1987 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
1988 __ j(not_equal, on_not_smis);
1989 // Convert second to smi, if possible.
1990 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
1991 __ movq(scratch2, xmm0);
1992 __ cvttsd2siq(smi_result, xmm0);
1993 __ cvtlsi2sd(xmm1, smi_result);
1994 __ movq(kScratchRegister, xmm1);
1995 __ cmpq(scratch2, kScratchRegister);
1996 __ j(not_equal, on_not_smis);
1997 __ Integer32ToSmi(second, smi_result);
1998 if (on_success != NULL) {
2006 void MathPowStub::Generate(MacroAssembler* masm) {
2007 // Choose register conforming to calling convention (when bailing out).
2009 const Register exponent = rdx;
2011 const Register exponent = rdi;
2013 const Register base = rax;
2014 const Register scratch = rcx;
2015 const XMMRegister double_result = xmm3;
2016 const XMMRegister double_base = xmm2;
2017 const XMMRegister double_exponent = xmm1;
2018 const XMMRegister double_scratch = xmm4;
2020 Label call_runtime, done, exponent_not_smi, int_exponent;
2022 // Save 1 in double_result - we need this several times later on.
2023 __ movq(scratch, Immediate(1));
2024 __ cvtlsi2sd(double_result, scratch);
2026 if (exponent_type_ == ON_STACK) {
2027 Label base_is_smi, unpack_exponent;
2028 // The exponent and base are supplied as arguments on the stack.
2029 // This can only happen if the stub is called from non-optimized code.
2030 // Load input parameters from stack.
2031 __ movq(base, Operand(rsp, 2 * kPointerSize));
2032 __ movq(exponent, Operand(rsp, 1 * kPointerSize));
2033 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2034 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
2035 Heap::kHeapNumberMapRootIndex);
2036 __ j(not_equal, &call_runtime);
2038 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2039 __ jmp(&unpack_exponent, Label::kNear);
2041 __ bind(&base_is_smi);
2042 __ SmiToInteger32(base, base);
2043 __ cvtlsi2sd(double_base, base);
2044 __ bind(&unpack_exponent);
2046 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2047 __ SmiToInteger32(exponent, exponent);
2048 __ jmp(&int_exponent);
2050 __ bind(&exponent_not_smi);
2051 __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
2052 Heap::kHeapNumberMapRootIndex);
2053 __ j(not_equal, &call_runtime);
2054 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2055 } else if (exponent_type_ == TAGGED) {
2056 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2057 __ SmiToInteger32(exponent, exponent);
2058 __ jmp(&int_exponent);
2060 __ bind(&exponent_not_smi);
2061 __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
2064 if (exponent_type_ != INTEGER) {
2066 // Detect integer exponents stored as double.
2067 __ cvttsd2si(exponent, double_exponent);
2068 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
2069 __ cmpl(exponent, Immediate(0x80000000u));
2070 __ j(equal, &call_runtime);
2071 __ cvtlsi2sd(double_scratch, exponent);
2072 // Already ruled out NaNs for exponent.
2073 __ ucomisd(double_exponent, double_scratch);
2074 __ j(equal, &int_exponent);
2076 if (exponent_type_ == ON_STACK) {
2077 // Detect square root case. Crankshaft detects constant +/-0.5 at
2078 // compile time and uses DoMathPowHalf instead. We then skip this check
2079 // for non-constant cases of +/-0.5 as these hardly occur.
2080 Label continue_sqrt, continue_rsqrt, not_plus_half;
2082 // Load double_scratch with 0.5.
2083 __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
2084 __ movq(double_scratch, scratch);
2085 // Already ruled out NaNs for exponent.
2086 __ ucomisd(double_scratch, double_exponent);
2087 __ j(not_equal, ¬_plus_half, Label::kNear);
2089 // Calculates square root of base. Check for the special case of
2090 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
2091 // According to IEEE-754, double-precision -Infinity has the highest
2092 // 12 bits set and the lowest 52 bits cleared.
2093 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2094 __ movq(double_scratch, scratch);
2095 __ ucomisd(double_scratch, double_base);
2096 // Comparing -Infinity with NaN results in "unordered", which sets the
2097 // zero flag as if both were equal. However, it also sets the carry flag.
2098 __ j(not_equal, &continue_sqrt, Label::kNear);
2099 __ j(carry, &continue_sqrt, Label::kNear);
2101 // Set result to Infinity in the special case.
2102 __ xorps(double_result, double_result);
2103 __ subsd(double_result, double_scratch);
2106 __ bind(&continue_sqrt);
2107 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2108 __ xorps(double_scratch, double_scratch);
2109 __ addsd(double_scratch, double_base); // Convert -0 to 0.
2110 __ sqrtsd(double_result, double_scratch);
2114 __ bind(¬_plus_half);
2115 // Load double_scratch with -0.5 by substracting 1.
2116 __ subsd(double_scratch, double_result);
2117 // Already ruled out NaNs for exponent.
2118 __ ucomisd(double_scratch, double_exponent);
2119 __ j(not_equal, &fast_power, Label::kNear);
2121 // Calculates reciprocal of square root of base. Check for the special
2122 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
2123 // According to IEEE-754, double-precision -Infinity has the highest
2124 // 12 bits set and the lowest 52 bits cleared.
2125 __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
2126 __ movq(double_scratch, scratch);
2127 __ ucomisd(double_scratch, double_base);
2128 // Comparing -Infinity with NaN results in "unordered", which sets the
2129 // zero flag as if both were equal. However, it also sets the carry flag.
2130 __ j(not_equal, &continue_rsqrt, Label::kNear);
2131 __ j(carry, &continue_rsqrt, Label::kNear);
2133 // Set result to 0 in the special case.
2134 __ xorps(double_result, double_result);
2137 __ bind(&continue_rsqrt);
2138 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
2139 __ xorps(double_exponent, double_exponent);
2140 __ addsd(double_exponent, double_base); // Convert -0 to +0.
2141 __ sqrtsd(double_exponent, double_exponent);
2142 __ divsd(double_result, double_exponent);
2146 // Using FPU instructions to calculate power.
2147 Label fast_power_failed;
2148 __ bind(&fast_power);
2149 __ fnclex(); // Clear flags to catch exceptions later.
2150 // Transfer (B)ase and (E)xponent onto the FPU register stack.
2151 __ subq(rsp, Immediate(kDoubleSize));
2152 __ movsd(Operand(rsp, 0), double_exponent);
2153 __ fld_d(Operand(rsp, 0)); // E
2154 __ movsd(Operand(rsp, 0), double_base);
2155 __ fld_d(Operand(rsp, 0)); // B, E
2157 // Exponent is in st(1) and base is in st(0)
2158 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
2159 // FYL2X calculates st(1) * log2(st(0))
2162 __ frndint(); // rnd(X), X
2163 __ fsub(1); // rnd(X), X-rnd(X)
2164 __ fxch(1); // X - rnd(X), rnd(X)
2165 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
2166 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
2167 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
2168 __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
2169 // FSCALE calculates st(0) * 2^st(1)
2170 __ fscale(); // 2^X, rnd(X)
2172 // Bail out to runtime in case of exceptions in the status word.
2174 __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
2175 __ j(not_zero, &fast_power_failed, Label::kNear);
2176 __ fstp_d(Operand(rsp, 0));
2177 __ movsd(double_result, Operand(rsp, 0));
2178 __ addq(rsp, Immediate(kDoubleSize));
2181 __ bind(&fast_power_failed);
2183 __ addq(rsp, Immediate(kDoubleSize));
2184 __ jmp(&call_runtime);
2187 // Calculate power with integer exponent.
2188 __ bind(&int_exponent);
2189 const XMMRegister double_scratch2 = double_exponent;
2190 // Back up exponent as we need to check if exponent is negative later.
2191 __ movq(scratch, exponent); // Back up exponent.
2192 __ movsd(double_scratch, double_base); // Back up base.
2193 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
2195 // Get absolute value of exponent.
2196 Label no_neg, while_true, no_multiply;
2197 __ testl(scratch, scratch);
2198 __ j(positive, &no_neg, Label::kNear);
2202 __ bind(&while_true);
2203 __ shrl(scratch, Immediate(1));
2204 __ j(not_carry, &no_multiply, Label::kNear);
2205 __ mulsd(double_result, double_scratch);
2206 __ bind(&no_multiply);
2208 __ mulsd(double_scratch, double_scratch);
2209 __ j(not_zero, &while_true);
2211 // If the exponent is negative, return 1/result.
2212 __ testl(exponent, exponent);
2213 __ j(greater, &done);
2214 __ divsd(double_scratch2, double_result);
2215 __ movsd(double_result, double_scratch2);
2216 // Test whether result is zero. Bail out to check for subnormal result.
2217 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
2218 __ xorps(double_scratch2, double_scratch2);
2219 __ ucomisd(double_scratch2, double_result);
2220 // double_exponent aliased as double_scratch2 has already been overwritten
2221 // and may not have contained the exponent value in the first place when the
2222 // input was a smi. We reset it with exponent value before bailing out.
2223 __ j(not_equal, &done);
2224 __ cvtlsi2sd(double_exponent, exponent);
2226 // Returning or bailing out.
2227 Counters* counters = masm->isolate()->counters();
2228 if (exponent_type_ == ON_STACK) {
2229 // The arguments are still on the stack.
2230 __ bind(&call_runtime);
2231 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
2233 // The stub is called from non-optimized code, which expects the result
2234 // as heap number in eax.
2236 __ AllocateHeapNumber(rax, rcx, &call_runtime);
2237 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
2238 __ IncrementCounter(counters->math_pow(), 1);
2239 __ ret(2 * kPointerSize);
2241 __ bind(&call_runtime);
2242 // Move base to the correct argument register. Exponent is already in xmm1.
2243 __ movsd(xmm0, double_base);
2244 ASSERT(double_exponent.is(xmm1));
2246 AllowExternalCallThatCantCauseGC scope(masm);
2247 __ PrepareCallCFunction(2);
2249 ExternalReference::power_double_double_function(masm->isolate()), 2);
2251 // Return value is in xmm0.
2252 __ movsd(double_result, xmm0);
2253 // Restore context register.
2254 __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
2257 __ IncrementCounter(counters->math_pow(), 1);
2263 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
2264 // The key is in rdx and the parameter count is in rax.
2266 // The displacement is used for skipping the frame pointer on the
2267 // stack. It is the offset of the last parameter (if any) relative
2268 // to the frame pointer.
2269 static const int kDisplacement = 1 * kPointerSize;
2271 // Check that the key is a smi.
2273 __ JumpIfNotSmi(rdx, &slow);
2275 // Check if the calling frame is an arguments adaptor frame. We look at the
2276 // context offset, and if the frame is not a regular one, then we find a
2277 // Smi instead of the context. We can't use SmiCompare here, because that
2278 // only works for comparing two smis.
2280 __ movq(rbx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2281 __ Cmp(Operand(rbx, StandardFrameConstants::kContextOffset),
2282 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2283 __ j(equal, &adaptor);
2285 // Check index against formal parameters count limit passed in
2286 // through register rax. Use unsigned comparison to get negative
2289 __ j(above_equal, &slow);
2291 // Read the argument from the stack and return it.
2292 SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
2293 __ lea(rbx, Operand(rbp, index.reg, index.scale, 0));
2294 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2295 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2298 // Arguments adaptor case: Check index against actual arguments
2299 // limit found in the arguments adaptor frame. Use unsigned
2300 // comparison to get negative check for free.
2302 __ movq(rcx, Operand(rbx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2304 __ j(above_equal, &slow);
2306 // Read the argument from the stack and return it.
2307 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2);
2308 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0));
2309 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2);
2310 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement));
2313 // Slow-case: Handle non-smi or out-of-bounds access to arguments
2314 // by calling the runtime system.
2316 __ pop(rbx); // Return address.
2319 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
2323 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
2325 // rsp[0] : return address
2326 // rsp[8] : number of parameters (tagged)
2327 // rsp[16] : receiver displacement
2328 // rsp[24] : function
2329 // Registers used over the whole function:
2330 // rbx: the mapped parameter count (untagged)
2331 // rax: the allocated object (tagged).
2333 Factory* factory = masm->isolate()->factory();
2335 __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize));
2336 // rbx = parameter count (untagged)
2338 // Check if the calling frame is an arguments adaptor frame.
2340 Label adaptor_frame, try_allocate;
2341 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2342 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2343 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2344 __ j(equal, &adaptor_frame);
2346 // No adaptor, parameter count = argument count.
2348 __ jmp(&try_allocate, Label::kNear);
2350 // We have an adaptor frame. Patch the parameters pointer.
2351 __ bind(&adaptor_frame);
2352 __ SmiToInteger64(rcx,
2354 ArgumentsAdaptorFrameConstants::kLengthOffset));
2355 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2356 StandardFrameConstants::kCallerSPOffset));
2357 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2359 // rbx = parameter count (untagged)
2360 // rcx = argument count (untagged)
2361 // Compute the mapped parameter count = min(rbx, rcx) in rbx.
2363 __ j(less_equal, &try_allocate, Label::kNear);
2366 __ bind(&try_allocate);
2368 // Compute the sizes of backing store, parameter map, and arguments object.
2369 // 1. Parameter map, has 2 extra words containing context and backing store.
2370 const int kParameterMapHeaderSize =
2371 FixedArray::kHeaderSize + 2 * kPointerSize;
2372 Label no_parameter_map;
2375 __ j(zero, &no_parameter_map, Label::kNear);
2376 __ lea(r8, Operand(rbx, times_pointer_size, kParameterMapHeaderSize));
2377 __ bind(&no_parameter_map);
2379 // 2. Backing store.
2380 __ lea(r8, Operand(r8, rcx, times_pointer_size, FixedArray::kHeaderSize));
2382 // 3. Arguments object.
2383 __ addq(r8, Immediate(Heap::kArgumentsObjectSize));
2385 // Do the allocation of all three objects in one go.
2386 __ AllocateInNewSpace(r8, rax, rdx, rdi, &runtime, TAG_OBJECT);
2388 // rax = address of new object(s) (tagged)
2389 // rcx = argument count (untagged)
2390 // Get the arguments boilerplate from the current (global) context into rdi.
2391 Label has_mapped_parameters, copy;
2392 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2393 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
2395 __ j(not_zero, &has_mapped_parameters, Label::kNear);
2397 const int kIndex = Context::ARGUMENTS_BOILERPLATE_INDEX;
2398 __ movq(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
2399 __ jmp(©, Label::kNear);
2401 const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
2402 __ bind(&has_mapped_parameters);
2403 __ movq(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
2406 // rax = address of new object (tagged)
2407 // rbx = mapped parameter count (untagged)
2408 // rcx = argument count (untagged)
2409 // rdi = address of boilerplate object (tagged)
2410 // Copy the JS object part.
2411 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2412 __ movq(rdx, FieldOperand(rdi, i));
2413 __ movq(FieldOperand(rax, i), rdx);
2416 // Set up the callee in-object property.
2417 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
2418 __ movq(rdx, Operand(rsp, 3 * kPointerSize));
2419 __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2420 Heap::kArgumentsCalleeIndex * kPointerSize),
2423 // Use the length (smi tagged) and set that as an in-object property too.
2424 // Note: rcx is tagged from here on.
2425 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2426 __ Integer32ToSmi(rcx, rcx);
2427 __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2428 Heap::kArgumentsLengthIndex * kPointerSize),
2431 // Set up the elements pointer in the allocated arguments object.
2432 // If we allocated a parameter map, edi will point there, otherwise to the
2434 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSize));
2435 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2437 // rax = address of new object (tagged)
2438 // rbx = mapped parameter count (untagged)
2439 // rcx = argument count (tagged)
2440 // rdi = address of parameter map or backing store (tagged)
2442 // Initialize parameter map. If there are no mapped arguments, we're done.
2443 Label skip_parameter_map;
2445 __ j(zero, &skip_parameter_map);
2447 __ LoadRoot(kScratchRegister, Heap::kNonStrictArgumentsElementsMapRootIndex);
2448 // rbx contains the untagged argument count. Add 2 and tag to write.
2449 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2450 __ Integer64PlusConstantToSmi(r9, rbx, 2);
2451 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), r9);
2452 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 0 * kPointerSize), rsi);
2453 __ lea(r9, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2454 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize + 1 * kPointerSize), r9);
2456 // Copy the parameter slots and the holes in the arguments.
2457 // We need to fill in mapped_parameter_count slots. They index the context,
2458 // where parameters are stored in reverse order, at
2459 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
2460 // The mapped parameter thus need to get indices
2461 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
2462 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
2463 // We loop from right to left.
2464 Label parameters_loop, parameters_test;
2466 // Load tagged parameter count into r9.
2467 __ Integer32ToSmi(r9, rbx);
2468 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS));
2469 __ addq(r8, Operand(rsp, 1 * kPointerSize));
2471 __ Move(r11, factory->the_hole_value());
2473 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize));
2474 // r9 = loop variable (tagged)
2475 // r8 = mapping index (tagged)
2476 // r11 = the hole value
2477 // rdx = address of parameter map (tagged)
2478 // rdi = address of backing store (tagged)
2479 __ jmp(¶meters_test, Label::kNear);
2481 __ bind(¶meters_loop);
2482 __ SmiSubConstant(r9, r9, Smi::FromInt(1));
2483 __ SmiToInteger64(kScratchRegister, r9);
2484 __ movq(FieldOperand(rdx, kScratchRegister,
2486 kParameterMapHeaderSize),
2488 __ movq(FieldOperand(rdi, kScratchRegister,
2490 FixedArray::kHeaderSize),
2492 __ SmiAddConstant(r8, r8, Smi::FromInt(1));
2493 __ bind(¶meters_test);
2495 __ j(not_zero, ¶meters_loop, Label::kNear);
2497 __ bind(&skip_parameter_map);
2499 // rcx = argument count (tagged)
2500 // rdi = address of backing store (tagged)
2501 // Copy arguments header and remaining slots (if there are any).
2502 __ Move(FieldOperand(rdi, FixedArray::kMapOffset),
2503 factory->fixed_array_map());
2504 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2506 Label arguments_loop, arguments_test;
2508 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2509 // Untag rcx for the loop below.
2510 __ SmiToInteger64(rcx, rcx);
2511 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0));
2512 __ subq(rdx, kScratchRegister);
2513 __ jmp(&arguments_test, Label::kNear);
2515 __ bind(&arguments_loop);
2516 __ subq(rdx, Immediate(kPointerSize));
2517 __ movq(r9, Operand(rdx, 0));
2518 __ movq(FieldOperand(rdi, r8,
2520 FixedArray::kHeaderSize),
2522 __ addq(r8, Immediate(1));
2524 __ bind(&arguments_test);
2526 __ j(less, &arguments_loop, Label::kNear);
2528 // Return and remove the on-stack parameters.
2529 __ ret(3 * kPointerSize);
2531 // Do the runtime call to allocate the arguments object.
2532 // rcx = argument count (untagged)
2534 __ Integer32ToSmi(rcx, rcx);
2535 __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
2536 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2540 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
2541 // esp[0] : return address
2542 // esp[8] : number of parameters
2543 // esp[16] : receiver displacement
2544 // esp[24] : function
2546 // Check if the calling frame is an arguments adaptor frame.
2548 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2549 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2550 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2551 __ j(not_equal, &runtime);
2553 // Patch the arguments.length and the parameters pointer.
2554 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2555 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2556 __ SmiToInteger64(rcx, rcx);
2557 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2558 StandardFrameConstants::kCallerSPOffset));
2559 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2562 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
2566 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
2567 // rsp[0] : return address
2568 // rsp[8] : number of parameters
2569 // rsp[16] : receiver displacement
2570 // rsp[24] : function
2572 // Check if the calling frame is an arguments adaptor frame.
2573 Label adaptor_frame, try_allocate, runtime;
2574 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
2575 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset));
2576 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
2577 __ j(equal, &adaptor_frame);
2579 // Get the length from the frame.
2580 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2581 __ SmiToInteger64(rcx, rcx);
2582 __ jmp(&try_allocate);
2584 // Patch the arguments.length and the parameters pointer.
2585 __ bind(&adaptor_frame);
2586 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
2587 __ movq(Operand(rsp, 1 * kPointerSize), rcx);
2588 __ SmiToInteger64(rcx, rcx);
2589 __ lea(rdx, Operand(rdx, rcx, times_pointer_size,
2590 StandardFrameConstants::kCallerSPOffset));
2591 __ movq(Operand(rsp, 2 * kPointerSize), rdx);
2593 // Try the new space allocation. Start out with computing the size of
2594 // the arguments object and the elements array.
2595 Label add_arguments_object;
2596 __ bind(&try_allocate);
2598 __ j(zero, &add_arguments_object, Label::kNear);
2599 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize));
2600 __ bind(&add_arguments_object);
2601 __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict));
2603 // Do the allocation of both objects in one go.
2604 __ AllocateInNewSpace(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
2606 // Get the arguments boilerplate from the current (global) context.
2607 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
2608 __ movq(rdi, FieldOperand(rdi, GlobalObject::kGlobalContextOffset));
2610 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
2611 __ movq(rdi, Operand(rdi, offset));
2613 // Copy the JS object part.
2614 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
2615 __ movq(rbx, FieldOperand(rdi, i));
2616 __ movq(FieldOperand(rax, i), rbx);
2619 // Get the length (smi tagged) and set that as an in-object property too.
2620 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
2621 __ movq(rcx, Operand(rsp, 1 * kPointerSize));
2622 __ movq(FieldOperand(rax, JSObject::kHeaderSize +
2623 Heap::kArgumentsLengthIndex * kPointerSize),
2626 // If there are no actual arguments, we're done.
2631 // Get the parameters pointer from the stack.
2632 __ movq(rdx, Operand(rsp, 2 * kPointerSize));
2634 // Set up the elements pointer in the allocated arguments object and
2635 // initialize the header in the elements fixed array.
2636 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict));
2637 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
2638 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
2639 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
2642 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
2643 // Untag the length for the loop below.
2644 __ SmiToInteger64(rcx, rcx);
2646 // Copy the fixed array slots.
2649 __ movq(rbx, Operand(rdx, -1 * kPointerSize)); // Skip receiver.
2650 __ movq(FieldOperand(rdi, FixedArray::kHeaderSize), rbx);
2651 __ addq(rdi, Immediate(kPointerSize));
2652 __ subq(rdx, Immediate(kPointerSize));
2654 __ j(not_zero, &loop);
2656 // Return and remove the on-stack parameters.
2658 __ ret(3 * kPointerSize);
2660 // Do the runtime call to allocate the arguments object.
2662 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
2666 void RegExpExecStub::Generate(MacroAssembler* masm) {
2667 // Just jump directly to runtime if native RegExp is not selected at compile
2668 // time or if regexp entry in generated code is turned off runtime switch or
2670 #ifdef V8_INTERPRETED_REGEXP
2671 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
2672 #else // V8_INTERPRETED_REGEXP
2674 // Stack frame on entry.
2675 // rsp[0]: return address
2676 // rsp[8]: last_match_info (expected JSArray)
2677 // rsp[16]: previous index
2678 // rsp[24]: subject string
2679 // rsp[32]: JSRegExp object
2681 static const int kLastMatchInfoOffset = 1 * kPointerSize;
2682 static const int kPreviousIndexOffset = 2 * kPointerSize;
2683 static const int kSubjectOffset = 3 * kPointerSize;
2684 static const int kJSRegExpOffset = 4 * kPointerSize;
2687 // Ensure that a RegExp stack is allocated.
2688 Isolate* isolate = masm->isolate();
2689 ExternalReference address_of_regexp_stack_memory_address =
2690 ExternalReference::address_of_regexp_stack_memory_address(isolate);
2691 ExternalReference address_of_regexp_stack_memory_size =
2692 ExternalReference::address_of_regexp_stack_memory_size(isolate);
2693 __ Load(kScratchRegister, address_of_regexp_stack_memory_size);
2694 __ testq(kScratchRegister, kScratchRegister);
2695 __ j(zero, &runtime);
2697 // Check that the first argument is a JSRegExp object.
2698 __ movq(rax, Operand(rsp, kJSRegExpOffset));
2699 __ JumpIfSmi(rax, &runtime);
2700 __ CmpObjectType(rax, JS_REGEXP_TYPE, kScratchRegister);
2701 __ j(not_equal, &runtime);
2702 // Check that the RegExp has been compiled (data contains a fixed array).
2703 __ movq(rax, FieldOperand(rax, JSRegExp::kDataOffset));
2704 if (FLAG_debug_code) {
2705 Condition is_smi = masm->CheckSmi(rax);
2706 __ Check(NegateCondition(is_smi),
2707 "Unexpected type for RegExp data, FixedArray expected");
2708 __ CmpObjectType(rax, FIXED_ARRAY_TYPE, kScratchRegister);
2709 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
2712 // rax: RegExp data (FixedArray)
2713 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
2714 __ SmiToInteger32(rbx, FieldOperand(rax, JSRegExp::kDataTagOffset));
2715 __ cmpl(rbx, Immediate(JSRegExp::IRREGEXP));
2716 __ j(not_equal, &runtime);
2718 // rax: RegExp data (FixedArray)
2719 // Check that the number of captures fit in the static offsets vector buffer.
2720 __ SmiToInteger32(rdx,
2721 FieldOperand(rax, JSRegExp::kIrregexpCaptureCountOffset));
2722 // Calculate number of capture registers (number_of_captures + 1) * 2.
2723 __ leal(rdx, Operand(rdx, rdx, times_1, 2));
2724 // Check that the static offsets vector buffer is large enough.
2725 __ cmpl(rdx, Immediate(OffsetsVector::kStaticOffsetsVectorSize));
2726 __ j(above, &runtime);
2728 // rax: RegExp data (FixedArray)
2729 // rdx: Number of capture registers
2730 // Check that the second argument is a string.
2731 __ movq(rdi, Operand(rsp, kSubjectOffset));
2732 __ JumpIfSmi(rdi, &runtime);
2733 Condition is_string = masm->IsObjectStringType(rdi, rbx, rbx);
2734 __ j(NegateCondition(is_string), &runtime);
2736 // rdi: Subject string.
2737 // rax: RegExp data (FixedArray).
2738 // rdx: Number of capture registers.
2739 // Check that the third argument is a positive smi less than the string
2740 // length. A negative value will be greater (unsigned comparison).
2741 __ movq(rbx, Operand(rsp, kPreviousIndexOffset));
2742 __ JumpIfNotSmi(rbx, &runtime);
2743 __ SmiCompare(rbx, FieldOperand(rdi, String::kLengthOffset));
2744 __ j(above_equal, &runtime);
2746 // rax: RegExp data (FixedArray)
2747 // rdx: Number of capture registers
2748 // Check that the fourth object is a JSArray object.
2749 __ movq(rdi, Operand(rsp, kLastMatchInfoOffset));
2750 __ JumpIfSmi(rdi, &runtime);
2751 __ CmpObjectType(rdi, JS_ARRAY_TYPE, kScratchRegister);
2752 __ j(not_equal, &runtime);
2753 // Check that the JSArray is in fast case.
2754 __ movq(rbx, FieldOperand(rdi, JSArray::kElementsOffset));
2755 __ movq(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
2756 __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
2757 Heap::kFixedArrayMapRootIndex);
2758 __ j(not_equal, &runtime);
2759 // Check that the last match info has space for the capture registers and the
2760 // additional information. Ensure no overflow in add.
2761 STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
2762 __ SmiToInteger32(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
2763 __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
2765 __ j(greater, &runtime);
2767 // Reset offset for possibly sliced string.
2769 // rax: RegExp data (FixedArray)
2770 // Check the representation and encoding of the subject string.
2771 Label seq_ascii_string, seq_two_byte_string, check_code;
2772 __ movq(rdi, Operand(rsp, kSubjectOffset));
2773 // Make a copy of the original subject string.
2775 __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2776 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
2777 // First check for flat two byte string.
2778 __ andb(rbx, Immediate(kIsNotStringMask |
2779 kStringRepresentationMask |
2780 kStringEncodingMask |
2781 kShortExternalStringMask));
2782 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
2783 __ j(zero, &seq_two_byte_string, Label::kNear);
2784 // Any other flat string must be a flat ASCII string. None of the following
2785 // string type tests will succeed if subject is not a string or a short
2787 __ andb(rbx, Immediate(kIsNotStringMask |
2788 kStringRepresentationMask |
2789 kShortExternalStringMask));
2790 __ j(zero, &seq_ascii_string, Label::kNear);
2792 // rbx: whether subject is a string and if yes, its string representation
2793 // Check for flat cons string or sliced string.
2794 // A flat cons string is a cons string where the second part is the empty
2795 // string. In that case the subject string is just the first part of the cons
2796 // string. Also in this case the first part of the cons string is known to be
2797 // a sequential string or an external string.
2798 // In the case of a sliced string its offset has to be taken into account.
2799 Label cons_string, external_string, check_encoding;
2800 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
2801 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
2802 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
2803 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
2804 __ cmpq(rbx, Immediate(kExternalStringTag));
2805 __ j(less, &cons_string, Label::kNear);
2806 __ j(equal, &external_string);
2808 // Catch non-string subject or short external string.
2809 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
2810 __ testb(rbx, Immediate(kIsNotStringMask | kShortExternalStringMask));
2811 __ j(not_zero, &runtime);
2813 // String is sliced.
2814 __ SmiToInteger32(r14, FieldOperand(rdi, SlicedString::kOffsetOffset));
2815 __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset));
2816 // r14: slice offset
2817 // r15: original subject string
2818 // rdi: parent string
2819 __ jmp(&check_encoding, Label::kNear);
2820 // String is a cons string, check whether it is flat.
2821 __ bind(&cons_string);
2822 __ CompareRoot(FieldOperand(rdi, ConsString::kSecondOffset),
2823 Heap::kEmptyStringRootIndex);
2824 __ j(not_equal, &runtime);
2825 __ movq(rdi, FieldOperand(rdi, ConsString::kFirstOffset));
2826 // rdi: first part of cons string or parent of sliced string.
2827 // rbx: map of first part of cons string or map of parent of sliced string.
2828 // Is first part of cons or parent of slice a flat two byte string?
2829 __ bind(&check_encoding);
2830 __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
2831 __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
2832 Immediate(kStringRepresentationMask | kStringEncodingMask));
2833 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
2834 __ j(zero, &seq_two_byte_string, Label::kNear);
2835 // Any other flat string must be sequential ASCII or external.
2836 __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
2837 Immediate(kStringRepresentationMask));
2838 __ j(not_zero, &external_string);
2840 __ bind(&seq_ascii_string);
2841 // rdi: subject string (sequential ASCII)
2842 // rax: RegExp data (FixedArray)
2843 __ movq(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
2844 __ Set(rcx, 1); // Type is ASCII.
2845 __ jmp(&check_code, Label::kNear);
2847 __ bind(&seq_two_byte_string);
2848 // rdi: subject string (flat two-byte)
2849 // rax: RegExp data (FixedArray)
2850 __ movq(r11, FieldOperand(rax, JSRegExp::kDataUC16CodeOffset));
2851 __ Set(rcx, 0); // Type is two byte.
2853 __ bind(&check_code);
2854 // Check that the irregexp code has been generated for the actual string
2855 // encoding. If it has, the field contains a code object otherwise it contains
2856 // smi (code flushing support)
2857 __ JumpIfSmi(r11, &runtime);
2859 // rdi: subject string
2860 // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
2862 // Load used arguments before starting to push arguments for call to native
2863 // RegExp code to avoid handling changing stack height.
2864 __ SmiToInteger64(rbx, Operand(rsp, kPreviousIndexOffset));
2866 // rdi: subject string
2867 // rbx: previous index
2868 // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2870 // All checks done. Now push arguments for native regexp code.
2871 Counters* counters = masm->isolate()->counters();
2872 __ IncrementCounter(counters->regexp_entry_native(), 1);
2874 // Isolates: note we add an additional parameter here (isolate pointer).
2875 static const int kRegExpExecuteArguments = 8;
2876 int argument_slots_on_stack =
2877 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments);
2878 __ EnterApiExitFrame(argument_slots_on_stack);
2880 // Argument 8: Pass current isolate address.
2881 // __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2882 // Immediate(ExternalReference::isolate_address()));
2883 __ LoadAddress(kScratchRegister, ExternalReference::isolate_address());
2884 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize),
2887 // Argument 7: Indicate that this is a direct call from JavaScript.
2888 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize),
2891 // Argument 6: Start (high end) of backtracking stack memory area.
2892 __ movq(kScratchRegister, address_of_regexp_stack_memory_address);
2893 __ movq(r9, Operand(kScratchRegister, 0));
2894 __ movq(kScratchRegister, address_of_regexp_stack_memory_size);
2895 __ addq(r9, Operand(kScratchRegister, 0));
2896 // Argument 6 passed in r9 on Linux and on the stack on Windows.
2898 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9);
2901 // Argument 5: static offsets vector buffer.
2903 ExternalReference::address_of_static_offsets_vector(isolate));
2904 // Argument 5 passed in r8 on Linux and on the stack on Windows.
2906 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), r8);
2909 // First four arguments are passed in registers on both Linux and Windows.
2913 Register arg2 = rdx;
2914 Register arg1 = rcx;
2916 Register arg4 = rcx;
2917 Register arg3 = rdx;
2918 Register arg2 = rsi;
2919 Register arg1 = rdi;
2922 // Keep track on aliasing between argX defined above and the registers used.
2923 // rdi: subject string
2924 // rbx: previous index
2925 // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
2927 // r14: slice offset
2928 // r15: original subject string
2930 // Argument 2: Previous index.
2933 // Argument 4: End of string data
2934 // Argument 3: Start of string data
2935 Label setup_two_byte, setup_rest, got_length, length_not_from_slice;
2936 // Prepare start and end index of the input.
2937 // Load the length from the original sliced string if that is the case.
2939 __ SmiToInteger32(arg3, FieldOperand(r15, String::kLengthOffset));
2940 __ addq(r14, arg3); // Using arg3 as scratch.
2942 // rbx: start index of the input
2943 // r14: end index of the input
2944 // r15: original subject string
2945 __ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
2946 __ j(zero, &setup_two_byte, Label::kNear);
2947 __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
2948 __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
2949 __ jmp(&setup_rest, Label::kNear);
2950 __ bind(&setup_two_byte);
2951 __ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
2952 __ lea(arg3, FieldOperand(rdi, rbx, times_2, SeqTwoByteString::kHeaderSize));
2953 __ bind(&setup_rest);
2955 // Argument 1: Original subject string.
2956 // The original subject is in the previous stack frame. Therefore we have to
2957 // use rbp, which points exactly to one pointer size below the previous rsp.
2958 // (Because creating a new stack frame pushes the previous rbp onto the stack
2959 // and thereby moves up rsp by one kPointerSize.)
2962 // Locate the code entry and call it.
2963 __ addq(r11, Immediate(Code::kHeaderSize - kHeapObjectTag));
2966 __ LeaveApiExitFrame();
2968 // Check the result.
2971 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
2972 __ j(equal, &success, Label::kNear);
2973 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
2974 __ j(equal, &exception);
2975 __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
2976 // If none of the above, it can only be retry.
2977 // Handle that in the runtime system.
2978 __ j(not_equal, &runtime);
2980 // For failure return null.
2981 __ LoadRoot(rax, Heap::kNullValueRootIndex);
2982 __ ret(4 * kPointerSize);
2984 // Load RegExp data.
2986 __ movq(rax, Operand(rsp, kJSRegExpOffset));
2987 __ movq(rcx, FieldOperand(rax, JSRegExp::kDataOffset));
2988 __ SmiToInteger32(rax,
2989 FieldOperand(rcx, JSRegExp::kIrregexpCaptureCountOffset));
2990 // Calculate number of capture registers (number_of_captures + 1) * 2.
2991 __ leal(rdx, Operand(rax, rax, times_1, 2));
2993 // rdx: Number of capture registers
2994 // Load last_match_info which is still known to be a fast case JSArray.
2995 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
2996 __ movq(rbx, FieldOperand(rax, JSArray::kElementsOffset));
2998 // rbx: last_match_info backing store (FixedArray)
2999 // rdx: number of capture registers
3000 // Store the capture count.
3001 __ Integer32ToSmi(kScratchRegister, rdx);
3002 __ movq(FieldOperand(rbx, RegExpImpl::kLastCaptureCountOffset),
3004 // Store last subject and last input.
3005 __ movq(rax, Operand(rsp, kSubjectOffset));
3006 __ movq(FieldOperand(rbx, RegExpImpl::kLastSubjectOffset), rax);
3007 __ RecordWriteField(rbx,
3008 RegExpImpl::kLastSubjectOffset,
3012 __ movq(rax, Operand(rsp, kSubjectOffset));
3013 __ movq(FieldOperand(rbx, RegExpImpl::kLastInputOffset), rax);
3014 __ RecordWriteField(rbx,
3015 RegExpImpl::kLastInputOffset,
3020 // Get the static offsets vector filled by the native regexp code.
3022 ExternalReference::address_of_static_offsets_vector(isolate));
3024 // rbx: last_match_info backing store (FixedArray)
3025 // rcx: offsets vector
3026 // rdx: number of capture registers
3027 Label next_capture, done;
3028 // Capture register counter starts from number of capture registers and
3029 // counts down until wraping after zero.
3030 __ bind(&next_capture);
3031 __ subq(rdx, Immediate(1));
3032 __ j(negative, &done, Label::kNear);
3033 // Read the value from the static offsets vector buffer and make it a smi.
3034 __ movl(rdi, Operand(rcx, rdx, times_int_size, 0));
3035 __ Integer32ToSmi(rdi, rdi);
3036 // Store the smi value in the last match info.
3037 __ movq(FieldOperand(rbx,
3040 RegExpImpl::kFirstCaptureOffset),
3042 __ jmp(&next_capture);
3045 // Return last match info.
3046 __ movq(rax, Operand(rsp, kLastMatchInfoOffset));
3047 __ ret(4 * kPointerSize);
3049 __ bind(&exception);
3050 // Result must now be exception. If there is no pending exception already a
3051 // stack overflow (on the backtrack stack) was detected in RegExp code but
3052 // haven't created the exception yet. Handle that in the runtime system.
3053 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3054 ExternalReference pending_exception_address(
3055 Isolate::kPendingExceptionAddress, isolate);
3056 Operand pending_exception_operand =
3057 masm->ExternalOperand(pending_exception_address, rbx);
3058 __ movq(rax, pending_exception_operand);
3059 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3061 __ j(equal, &runtime);
3062 __ movq(pending_exception_operand, rdx);
3064 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3065 Label termination_exception;
3066 __ j(equal, &termination_exception, Label::kNear);
3069 __ bind(&termination_exception);
3070 __ ThrowUncatchable(rax);
3072 // External string. Short external strings have already been ruled out.
3073 // rdi: subject string (expected to be external)
3075 __ bind(&external_string);
3076 __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
3077 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
3078 if (FLAG_debug_code) {
3079 // Assert that we do not have a cons or slice (indirect strings) here.
3080 // Sequential strings have already been ruled out.
3081 __ testb(rbx, Immediate(kIsIndirectStringMask));
3082 __ Assert(zero, "external string expected, but not found");
3084 __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
3085 // Move the pointer so that offset-wise, it looks like a sequential string.
3086 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
3087 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
3088 STATIC_ASSERT(kTwoByteStringTag == 0);
3089 __ testb(rbx, Immediate(kStringEncodingMask));
3090 __ j(not_zero, &seq_ascii_string);
3091 __ jmp(&seq_two_byte_string);
3093 // Do the runtime call to execute the regexp.
3095 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3096 #endif // V8_INTERPRETED_REGEXP
3100 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
3101 const int kMaxInlineLength = 100;
3104 __ movq(r8, Operand(rsp, kPointerSize * 3));
3105 __ JumpIfNotSmi(r8, &slowcase);
3106 __ SmiToInteger32(rbx, r8);
3107 __ cmpl(rbx, Immediate(kMaxInlineLength));
3108 __ j(above, &slowcase);
3109 // Smi-tagging is equivalent to multiplying by 2.
3110 STATIC_ASSERT(kSmiTag == 0);
3111 STATIC_ASSERT(kSmiTagSize == 1);
3112 // Allocate RegExpResult followed by FixedArray with size in rbx.
3113 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
3114 // Elements: [Map][Length][..elements..]
3115 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
3117 rbx, // In: Number of elements.
3118 rax, // Out: Start of allocation (tagged).
3119 rcx, // Out: End of allocation.
3120 rdx, // Scratch register
3123 // rax: Start of allocated area, object-tagged.
3124 // rbx: Number of array elements as int32.
3125 // r8: Number of array elements as smi.
3127 // Set JSArray map to global.regexp_result_map().
3128 __ movq(rdx, ContextOperand(rsi, Context::GLOBAL_INDEX));
3129 __ movq(rdx, FieldOperand(rdx, GlobalObject::kGlobalContextOffset));
3130 __ movq(rdx, ContextOperand(rdx, Context::REGEXP_RESULT_MAP_INDEX));
3131 __ movq(FieldOperand(rax, HeapObject::kMapOffset), rdx);
3133 // Set empty properties FixedArray.
3134 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
3135 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
3137 // Set elements to point to FixedArray allocated right after the JSArray.
3138 __ lea(rcx, Operand(rax, JSRegExpResult::kSize));
3139 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx);
3141 // Set input, index and length fields from arguments.
3142 __ movq(r8, Operand(rsp, kPointerSize * 1));
3143 __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8);
3144 __ movq(r8, Operand(rsp, kPointerSize * 2));
3145 __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8);
3146 __ movq(r8, Operand(rsp, kPointerSize * 3));
3147 __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8);
3149 // Fill out the elements FixedArray.
3152 // rbx: Number of elements in array as int32.
3155 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
3156 __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister);
3158 __ Integer32ToSmi(rdx, rbx);
3159 __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
3160 // Fill contents of fixed-array with the-hole.
3161 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3162 __ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
3163 // Fill fixed array elements with hole.
3165 // rbx: Number of elements in array that remains to be filled, as int32.
3166 // rcx: Start of elements in FixedArray.
3171 __ j(less_equal, &done); // Jump if rcx is negative or zero.
3172 __ subl(rbx, Immediate(1));
3173 __ movq(Operand(rcx, rbx, times_pointer_size, 0), rdx);
3177 __ ret(3 * kPointerSize);
3180 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
3184 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
3191 // Use of registers. Register result is used as a temporary.
3192 Register number_string_cache = result;
3193 Register mask = scratch1;
3194 Register scratch = scratch2;
3196 // Load the number string cache.
3197 __ LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
3199 // Make the hash mask from the length of the number string cache. It
3200 // contains two elements (number and string) for each cache entry.
3202 mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
3203 __ shrl(mask, Immediate(1));
3204 __ subq(mask, Immediate(1)); // Make mask.
3206 // Calculate the entry in the number string cache. The hash value in the
3207 // number string cache for smis is just the smi value, and the hash for
3208 // doubles is the xor of the upper and lower words. See
3209 // Heap::GetNumberStringCache.
3211 Label load_result_from_cache;
3212 Factory* factory = masm->isolate()->factory();
3213 if (!object_is_smi) {
3214 __ JumpIfSmi(object, &is_smi);
3216 factory->heap_number_map(),
3220 STATIC_ASSERT(8 == kDoubleSize);
3221 __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
3222 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
3223 GenerateConvertHashCodeToIndex(masm, scratch, mask);
3225 Register index = scratch;
3226 Register probe = mask;
3228 FieldOperand(number_string_cache,
3231 FixedArray::kHeaderSize));
3232 __ JumpIfSmi(probe, not_found);
3233 __ movsd(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
3234 __ movsd(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
3235 __ ucomisd(xmm0, xmm1);
3236 __ j(parity_even, not_found); // Bail out if NaN is involved.
3237 __ j(not_equal, not_found); // The cache did not contain this value.
3238 __ jmp(&load_result_from_cache);
3242 __ SmiToInteger32(scratch, object);
3243 GenerateConvertHashCodeToIndex(masm, scratch, mask);
3245 Register index = scratch;
3246 // Check if the entry is the smi we are looking for.
3248 FieldOperand(number_string_cache,
3251 FixedArray::kHeaderSize));
3252 __ j(not_equal, not_found);
3254 // Get the result from the cache.
3255 __ bind(&load_result_from_cache);
3257 FieldOperand(number_string_cache,
3260 FixedArray::kHeaderSize + kPointerSize));
3261 Counters* counters = masm->isolate()->counters();
3262 __ IncrementCounter(counters->number_to_string_native(), 1);
3266 void NumberToStringStub::GenerateConvertHashCodeToIndex(MacroAssembler* masm,
3269 __ and_(hash, mask);
3270 // Each entry in string cache consists of two pointer sized fields,
3271 // but times_twice_pointer_size (multiplication by 16) scale factor
3272 // is not supported by addrmode on x64 platform.
3273 // So we have to premultiply entry index before lookup.
3274 __ shl(hash, Immediate(kPointerSizeLog2 + 1));
3278 void NumberToStringStub::Generate(MacroAssembler* masm) {
3281 __ movq(rbx, Operand(rsp, kPointerSize));
3283 // Generate code to lookup number in the number string cache.
3284 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime);
3285 __ ret(1 * kPointerSize);
3288 // Handle number to string in the runtime system if not found in the cache.
3289 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
3293 static int NegativeComparisonResult(Condition cc) {
3294 ASSERT(cc != equal);
3295 ASSERT((cc == less) || (cc == less_equal)
3296 || (cc == greater) || (cc == greater_equal));
3297 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
3301 void CompareStub::Generate(MacroAssembler* masm) {
3302 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
3304 Label check_unequal_objects, done;
3305 Factory* factory = masm->isolate()->factory();
3307 // Compare two smis if required.
3308 if (include_smi_compare_) {
3309 Label non_smi, smi_done;
3310 __ JumpIfNotBothSmi(rax, rdx, &non_smi);
3312 __ j(no_overflow, &smi_done);
3313 __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
3318 } else if (FLAG_debug_code) {
3320 __ JumpIfNotSmi(rdx, &ok);
3321 __ JumpIfNotSmi(rax, &ok);
3322 __ Abort("CompareStub: smi operands");
3326 // The compare stub returns a positive, negative, or zero 64-bit integer
3327 // value in rax, corresponding to result of comparing the two inputs.
3328 // NOTICE! This code is only reached after a smi-fast-case check, so
3329 // it is certain that at least one operand isn't a smi.
3331 // Two identical objects are equal unless they are both NaN or undefined.
3333 Label not_identical;
3335 __ j(not_equal, ¬_identical, Label::kNear);
3338 // Check for undefined. undefined OP undefined is false even though
3339 // undefined == undefined.
3340 Label check_for_nan;
3341 __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
3342 __ j(not_equal, &check_for_nan, Label::kNear);
3343 __ Set(rax, NegativeComparisonResult(cc_));
3345 __ bind(&check_for_nan);
3348 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
3349 // so we do the second best thing - test it ourselves.
3350 // Note: if cc_ != equal, never_nan_nan_ is not used.
3351 // We cannot set rax to EQUAL until just before return because
3352 // rax must be unchanged on jump to not_identical.
3353 if (never_nan_nan_ && (cc_ == equal)) {
3358 // If it's not a heap number, then return equal for (in)equality operator.
3359 __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
3360 factory->heap_number_map());
3361 __ j(equal, &heap_number, Label::kNear);
3363 // Call runtime on identical objects. Otherwise return equal.
3364 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3365 __ j(above_equal, ¬_identical, Label::kNear);
3370 __ bind(&heap_number);
3371 // It is a heap number, so return equal if it's not NaN.
3372 // For NaN, return 1 for every condition except greater and
3373 // greater-equal. Return -1 for them, so the comparison yields
3374 // false for all conditions except not-equal.
3376 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
3377 __ ucomisd(xmm0, xmm0);
3378 __ setcc(parity_even, rax);
3379 // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
3380 if (cc_ == greater_equal || cc_ == greater) {
3386 __ bind(¬_identical);
3389 if (cc_ == equal) { // Both strict and non-strict.
3390 Label slow; // Fallthrough label.
3392 // If we're doing a strict equality comparison, we don't have to do
3393 // type conversion, so we generate code to do fast comparison for objects
3394 // and oddballs. Non-smi numbers and strings still go through the usual
3397 // If either is a Smi (we know that not both are), then they can only
3398 // be equal if the other is a HeapNumber. If so, use the slow case.
3401 __ SelectNonSmi(rbx, rax, rdx, ¬_smis);
3403 // Check if the non-smi operand is a heap number.
3404 __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
3405 factory->heap_number_map());
3406 // If heap number, handle it in the slow case.
3408 // Return non-equal. ebx (the lower half of rbx) is not zero.
3415 // If either operand is a JSObject or an oddball value, then they are not
3416 // equal since their pointers are different
3417 // There is no test for undetectability in strict equality.
3419 // If the first object is a JS object, we have done pointer comparison.
3420 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
3421 Label first_non_object;
3422 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
3423 __ j(below, &first_non_object, Label::kNear);
3424 // Return non-zero (eax (not rax) is not zero)
3425 Label return_not_equal;
3426 STATIC_ASSERT(kHeapObjectTag != 0);
3427 __ bind(&return_not_equal);
3430 __ bind(&first_non_object);
3431 // Check for oddballs: true, false, null, undefined.
3432 __ CmpInstanceType(rcx, ODDBALL_TYPE);
3433 __ j(equal, &return_not_equal);
3435 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3436 __ j(above_equal, &return_not_equal);
3438 // Check for oddballs: true, false, null, undefined.
3439 __ CmpInstanceType(rcx, ODDBALL_TYPE);
3440 __ j(equal, &return_not_equal);
3442 // Fall through to the general case.
3447 // Generate the number comparison code.
3448 if (include_number_compare_) {
3449 Label non_number_comparison;
3451 FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
3454 __ ucomisd(xmm0, xmm1);
3456 // Don't base result on EFLAGS when a NaN is involved.
3457 __ j(parity_even, &unordered, Label::kNear);
3458 // Return a result of -1, 0, or 1, based on EFLAGS.
3459 __ setcc(above, rax);
3460 __ setcc(below, rcx);
3464 // If one of the numbers was NaN, then the result is always false.
3465 // The cc is never not-equal.
3466 __ bind(&unordered);
3467 ASSERT(cc_ != not_equal);
3468 if (cc_ == less || cc_ == less_equal) {
3475 // The number comparison code did not provide a valid result.
3476 __ bind(&non_number_comparison);
3479 // Fast negative check for symbol-to-symbol equality.
3480 Label check_for_strings;
3482 BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
3483 BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
3485 // We've already checked for object identity, so if both operands
3486 // are symbols they aren't equal. Register eax (not rax) already holds a
3487 // non-zero value, which indicates not equal, so just return.
3491 __ bind(&check_for_strings);
3493 __ JumpIfNotBothSequentialAsciiStrings(
3494 rdx, rax, rcx, rbx, &check_unequal_objects);
3496 // Inline comparison of ASCII strings.
3498 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
3504 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
3514 __ Abort("Unexpected fall-through from string comparison");
3517 __ bind(&check_unequal_objects);
3518 if (cc_ == equal && !strict_) {
3519 // Not strict equality. Objects are unequal if
3520 // they are both JSObjects and not undetectable,
3521 // and their pointers are different.
3522 Label not_both_objects, return_unequal;
3523 // At most one is a smi, so we can test for smi by adding the two.
3524 // A smi plus a heap object has the low bit set, a heap object plus
3525 // a heap object has the low bit clear.
3526 STATIC_ASSERT(kSmiTag == 0);
3527 STATIC_ASSERT(kSmiTagMask == 1);
3528 __ lea(rcx, Operand(rax, rdx, times_1, 0));
3529 __ testb(rcx, Immediate(kSmiTagMask));
3530 __ j(not_zero, ¬_both_objects, Label::kNear);
3531 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rbx);
3532 __ j(below, ¬_both_objects, Label::kNear);
3533 __ CmpObjectType(rdx, FIRST_SPEC_OBJECT_TYPE, rcx);
3534 __ j(below, ¬_both_objects, Label::kNear);
3535 __ testb(FieldOperand(rbx, Map::kBitFieldOffset),
3536 Immediate(1 << Map::kIsUndetectable));
3537 __ j(zero, &return_unequal, Label::kNear);
3538 __ testb(FieldOperand(rcx, Map::kBitFieldOffset),
3539 Immediate(1 << Map::kIsUndetectable));
3540 __ j(zero, &return_unequal, Label::kNear);
3541 // The objects are both undetectable, so they both compare as the value
3542 // undefined, and are equal.
3544 __ bind(&return_unequal);
3545 // Return non-equal by returning the non-zero object pointer in rax,
3546 // or return equal if we fell through to here.
3548 __ bind(¬_both_objects);
3551 // Push arguments below the return address to prepare jump to builtin.
3556 // Figure out which native to call and setup the arguments.
3557 Builtins::JavaScript builtin;
3559 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
3561 builtin = Builtins::COMPARE;
3562 __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
3565 // Restore return address on the stack.
3568 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
3569 // tagged as a small integer.
3570 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
3574 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
3578 __ JumpIfSmi(object, label);
3579 __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
3581 FieldOperand(scratch, Map::kInstanceTypeOffset));
3582 // Ensure that no non-strings have the symbol bit set.
3583 STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
3584 STATIC_ASSERT(kSymbolTag != 0);
3585 __ testb(scratch, Immediate(kIsSymbolMask));
3590 void StackCheckStub::Generate(MacroAssembler* masm) {
3591 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
3595 void InterruptStub::Generate(MacroAssembler* masm) {
3596 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
3600 static void GenerateRecordCallTarget(MacroAssembler* masm) {
3601 // Cache the called function in a global property cell. Cache states
3602 // are uninitialized, monomorphic (indicated by a JSFunction), and
3604 // rbx : cache cell for call target
3605 // rdi : the function to call
3606 Isolate* isolate = masm->isolate();
3607 Label initialize, done;
3609 // Load the cache state into rcx.
3610 __ movq(rcx, FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset));
3612 // A monomorphic cache hit or an already megamorphic state: invoke the
3613 // function without changing the state.
3615 __ j(equal, &done, Label::kNear);
3616 __ Cmp(rcx, TypeFeedbackCells::MegamorphicSentinel(isolate));
3617 __ j(equal, &done, Label::kNear);
3619 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
3621 __ Cmp(rcx, TypeFeedbackCells::UninitializedSentinel(isolate));
3622 __ j(equal, &initialize, Label::kNear);
3623 // MegamorphicSentinel is an immortal immovable object (undefined) so no
3624 // write-barrier is needed.
3625 __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
3626 TypeFeedbackCells::MegamorphicSentinel(isolate));
3627 __ jmp(&done, Label::kNear);
3629 // An uninitialized cache is patched with the function.
3630 __ bind(&initialize);
3631 __ movq(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset), rdi);
3632 // No need for a write barrier here - cells are rescanned.
3638 void CallFunctionStub::Generate(MacroAssembler* masm) {
3639 // rbx : cache cell for call target
3640 // rdi : the function to call
3641 Isolate* isolate = masm->isolate();
3642 Label slow, non_function;
3644 // The receiver might implicitly be the global object. This is
3645 // indicated by passing the hole as the receiver to the call
3647 if (ReceiverMightBeImplicit()) {
3649 // Get the receiver from the stack.
3650 // +1 ~ return address
3651 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize));
3652 // Call as function is indicated with the hole.
3653 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3654 __ j(not_equal, &call, Label::kNear);
3655 // Patch the receiver on the stack with the global receiver object.
3656 __ movq(rcx, GlobalObjectOperand());
3657 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
3658 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx);
3662 // Check that the function really is a JavaScript function.
3663 __ JumpIfSmi(rdi, &non_function);
3664 // Goto slow case if we do not have a function.
3665 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3666 __ j(not_equal, &slow);
3668 if (RecordCallTarget()) {
3669 GenerateRecordCallTarget(masm);
3672 // Fast-case: Just invoke the function.
3673 ParameterCount actual(argc_);
3675 if (ReceiverMightBeImplicit()) {
3676 Label call_as_function;
3677 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
3678 __ j(equal, &call_as_function);
3679 __ InvokeFunction(rdi,
3684 __ bind(&call_as_function);
3686 __ InvokeFunction(rdi,
3692 // Slow-case: Non-function called.
3694 if (RecordCallTarget()) {
3695 // If there is a call target cache, mark it megamorphic in the
3696 // non-function case. MegamorphicSentinel is an immortal immovable
3697 // object (undefined) so no write barrier is needed.
3698 __ Move(FieldOperand(rbx, JSGlobalPropertyCell::kValueOffset),
3699 TypeFeedbackCells::MegamorphicSentinel(isolate));
3701 // Check for function proxy.
3702 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3703 __ j(not_equal, &non_function);
3705 __ push(rdi); // put proxy as additional argument under return address
3707 __ Set(rax, argc_ + 1);
3709 __ SetCallKind(rcx, CALL_AS_METHOD);
3710 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY);
3712 Handle<Code> adaptor =
3713 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
3714 __ jmp(adaptor, RelocInfo::CODE_TARGET);
3717 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
3718 // of the original receiver from the call site).
3719 __ bind(&non_function);
3720 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi);
3723 __ SetCallKind(rcx, CALL_AS_METHOD);
3724 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION);
3725 Handle<Code> adaptor =
3726 Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline();
3727 __ Jump(adaptor, RelocInfo::CODE_TARGET);
3731 void CallConstructStub::Generate(MacroAssembler* masm) {
3732 // rax : number of arguments
3733 // rbx : cache cell for call target
3734 // rdi : constructor function
3735 Label slow, non_function_call;
3737 // Check that function is not a smi.
3738 __ JumpIfSmi(rdi, &non_function_call);
3739 // Check that function is a JSFunction.
3740 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
3741 __ j(not_equal, &slow);
3743 if (RecordCallTarget()) {
3744 GenerateRecordCallTarget(masm);
3747 // Jump to the function-specific construct stub.
3748 __ movq(rbx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
3749 __ movq(rbx, FieldOperand(rbx, SharedFunctionInfo::kConstructStubOffset));
3750 __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
3753 // rdi: called object
3754 // rax: number of arguments
3758 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE);
3759 __ j(not_equal, &non_function_call);
3760 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
3763 __ bind(&non_function_call);
3764 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
3766 // Set expected number of arguments to zero (not changing rax).
3768 __ SetCallKind(rcx, CALL_AS_METHOD);
3769 __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
3770 RelocInfo::CODE_TARGET);
3774 bool CEntryStub::NeedsImmovableCode() {
3779 bool CEntryStub::IsPregenerated() {
3781 return result_size_ == 1;
3788 void CodeStub::GenerateStubsAheadOfTime() {
3789 CEntryStub::GenerateAheadOfTime();
3790 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
3791 // It is important that the store buffer overflow stubs are generated first.
3792 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
3796 void CodeStub::GenerateFPStubs() {
3800 void CEntryStub::GenerateAheadOfTime() {
3801 CEntryStub stub(1, kDontSaveFPRegs);
3802 stub.GetCode()->set_is_pregenerated(true);
3803 CEntryStub save_doubles(1, kSaveFPRegs);
3804 save_doubles.GetCode()->set_is_pregenerated(true);
3808 void CEntryStub::GenerateCore(MacroAssembler* masm,
3809 Label* throw_normal_exception,
3810 Label* throw_termination_exception,
3811 Label* throw_out_of_memory_exception,
3813 bool always_allocate_scope) {
3814 // rax: result parameter for PerformGC, if any.
3815 // rbx: pointer to C function (C callee-saved).
3816 // rbp: frame pointer (restored after C call).
3817 // rsp: stack pointer (restored after C call).
3818 // r14: number of arguments including receiver (C callee-saved).
3819 // r15: pointer to the first argument (C callee-saved).
3820 // This pointer is reused in LeaveExitFrame(), so it is stored in a
3821 // callee-saved register.
3823 // Simple results returned in rax (both AMD64 and Win64 calling conventions).
3824 // Complex results must be written to address passed as first argument.
3825 // AMD64 calling convention: a struct of two pointers in rax+rdx
3827 // Check stack alignment.
3828 if (FLAG_debug_code) {
3829 __ CheckStackAlignment();
3833 // Pass failure code returned from last attempt as first argument to
3834 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
3835 // stack is known to be aligned. This function takes one argument which is
3836 // passed in register.
3842 __ movq(kScratchRegister,
3843 FUNCTION_ADDR(Runtime::PerformGC),
3844 RelocInfo::RUNTIME_ENTRY);
3845 __ call(kScratchRegister);
3848 ExternalReference scope_depth =
3849 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
3850 if (always_allocate_scope) {
3851 Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3852 __ incl(scope_depth_operand);
3857 // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9
3858 // Store Arguments object on stack, below the 4 WIN64 ABI parameter slots.
3859 __ movq(StackSpaceOperand(0), r14); // argc.
3860 __ movq(StackSpaceOperand(1), r15); // argv.
3861 if (result_size_ < 2) {
3862 // Pass a pointer to the Arguments object as the first argument.
3863 // Return result in single register (rax).
3864 __ lea(rcx, StackSpaceOperand(0));
3865 __ LoadAddress(rdx, ExternalReference::isolate_address());
3867 ASSERT_EQ(2, result_size_);
3868 // Pass a pointer to the result location as the first argument.
3869 __ lea(rcx, StackSpaceOperand(2));
3870 // Pass a pointer to the Arguments object as the second argument.
3871 __ lea(rdx, StackSpaceOperand(0));
3872 __ LoadAddress(r8, ExternalReference::isolate_address());
3876 // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9.
3877 __ movq(rdi, r14); // argc.
3878 __ movq(rsi, r15); // argv.
3879 __ movq(rdx, ExternalReference::isolate_address());
3882 // Result is in rax - do not destroy this register!
3884 if (always_allocate_scope) {
3885 Operand scope_depth_operand = masm->ExternalOperand(scope_depth);
3886 __ decl(scope_depth_operand);
3889 // Check for failure result.
3890 Label failure_returned;
3891 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
3893 // If return value is on the stack, pop it to registers.
3894 if (result_size_ > 1) {
3895 ASSERT_EQ(2, result_size_);
3896 // Read result values stored on stack. Result is stored
3897 // above the four argument mirror slots and the two
3898 // Arguments object slots.
3899 __ movq(rax, Operand(rsp, 6 * kPointerSize));
3900 __ movq(rdx, Operand(rsp, 7 * kPointerSize));
3903 __ lea(rcx, Operand(rax, 1));
3904 // Lower 2 bits of rcx are 0 iff rax has failure tag.
3905 __ testl(rcx, Immediate(kFailureTagMask));
3906 __ j(zero, &failure_returned);
3908 // Exit the JavaScript to C++ exit frame.
3909 __ LeaveExitFrame(save_doubles_);
3912 // Handling of failure.
3913 __ bind(&failure_returned);
3916 // If the returned exception is RETRY_AFTER_GC continue at retry label
3917 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
3918 __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
3919 __ j(zero, &retry, Label::kNear);
3921 // Special handling of out of memory exceptions.
3922 __ movq(kScratchRegister, Failure::OutOfMemoryException(), RelocInfo::NONE);
3923 __ cmpq(rax, kScratchRegister);
3924 __ j(equal, throw_out_of_memory_exception);
3926 // Retrieve the pending exception and clear the variable.
3927 ExternalReference pending_exception_address(
3928 Isolate::kPendingExceptionAddress, masm->isolate());
3929 Operand pending_exception_operand =
3930 masm->ExternalOperand(pending_exception_address);
3931 __ movq(rax, pending_exception_operand);
3932 __ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
3933 __ movq(pending_exception_operand, rdx);
3935 // Special handling of termination exceptions which are uncatchable
3936 // by javascript code.
3937 __ CompareRoot(rax, Heap::kTerminationExceptionRootIndex);
3938 __ j(equal, throw_termination_exception);
3940 // Handle normal exception.
3941 __ jmp(throw_normal_exception);
3948 void CEntryStub::Generate(MacroAssembler* masm) {
3949 // rax: number of arguments including receiver
3950 // rbx: pointer to C function (C callee-saved)
3951 // rbp: frame pointer of calling JS frame (restored after C call)
3952 // rsp: stack pointer (restored after C call)
3953 // rsi: current context (restored)
3955 // NOTE: Invocations of builtins may return failure objects
3956 // instead of a proper result. The builtin entry handles
3957 // this by performing a garbage collection and retrying the
3960 // Enter the exit frame that transitions from JavaScript to C++.
3962 int arg_stack_space = (result_size_ < 2 ? 2 : 4);
3964 int arg_stack_space = 0;
3966 __ EnterExitFrame(arg_stack_space, save_doubles_);
3968 // rax: Holds the context at this point, but should not be used.
3969 // On entry to code generated by GenerateCore, it must hold
3970 // a failure result if the collect_garbage argument to GenerateCore
3971 // is true. This failure result can be the result of code
3972 // generated by a previous call to GenerateCore. The value
3973 // of rax is then passed to Runtime::PerformGC.
3974 // rbx: pointer to builtin function (C callee-saved).
3975 // rbp: frame pointer of exit frame (restored after C call).
3976 // rsp: stack pointer (restored after C call).
3977 // r14: number of arguments including receiver (C callee-saved).
3978 // r15: argv pointer (C callee-saved).
3980 Label throw_normal_exception;
3981 Label throw_termination_exception;
3982 Label throw_out_of_memory_exception;
3984 // Call into the runtime system.
3986 &throw_normal_exception,
3987 &throw_termination_exception,
3988 &throw_out_of_memory_exception,
3992 // Do space-specific GC and retry runtime call.
3994 &throw_normal_exception,
3995 &throw_termination_exception,
3996 &throw_out_of_memory_exception,
4000 // Do full GC and retry runtime call one final time.
4001 Failure* failure = Failure::InternalError();
4002 __ movq(rax, failure, RelocInfo::NONE);
4004 &throw_normal_exception,
4005 &throw_termination_exception,
4006 &throw_out_of_memory_exception,
4010 __ bind(&throw_out_of_memory_exception);
4011 // Set external caught exception to false.
4012 Isolate* isolate = masm->isolate();
4013 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4015 __ Set(rax, static_cast<int64_t>(false));
4016 __ Store(external_caught, rax);
4018 // Set pending exception and rax to out of memory exception.
4019 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4021 __ movq(rax, Failure::OutOfMemoryException(), RelocInfo::NONE);
4022 __ Store(pending_exception, rax);
4023 // Fall through to the next label.
4025 __ bind(&throw_termination_exception);
4026 __ ThrowUncatchable(rax);
4028 __ bind(&throw_normal_exception);
4033 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4034 Label invoke, handler_entry, exit;
4035 Label not_outermost_js, not_outermost_js_2;
4036 { // NOLINT. Scope block confuses linter.
4037 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
4042 // Push the stack frame type marker twice.
4043 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
4044 // Scratch register is neither callee-save, nor an argument register on any
4045 // platform. It's free to use at this point.
4046 // Cannot use smi-register for loading yet.
4047 __ movq(kScratchRegister,
4048 reinterpret_cast<uint64_t>(Smi::FromInt(marker)),
4050 __ push(kScratchRegister); // context slot
4051 __ push(kScratchRegister); // function slot
4052 // Save callee-saved registers (X64/Win64 calling conventions).
4058 __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4059 __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
4062 // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
4063 // callee save as well.
4065 // Set up the roots and smi constant registers.
4066 // Needs to be done before any further smi loads.
4067 __ InitializeSmiConstantRegister();
4068 __ InitializeRootRegister();
4071 Isolate* isolate = masm->isolate();
4073 // Save copies of the top frame descriptor on the stack.
4074 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
4076 Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4077 __ push(c_entry_fp_operand);
4080 // If this is the outermost JS call, set js_entry_sp value.
4081 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
4082 __ Load(rax, js_entry_sp);
4084 __ j(not_zero, ¬_outermost_js);
4085 __ Push(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4087 __ Store(js_entry_sp, rax);
4090 __ bind(¬_outermost_js);
4091 __ Push(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
4094 // Jump to a faked try block that does the invoke, with a faked catch
4095 // block that sets the pending exception.
4097 __ bind(&handler_entry);
4098 handler_offset_ = handler_entry.pos();
4099 // Caught exception: Store result (exception) in the pending exception
4100 // field in the JSEnv and return a failure sentinel.
4101 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4103 __ Store(pending_exception, rax);
4104 __ movq(rax, Failure::Exception(), RelocInfo::NONE);
4107 // Invoke: Link this frame into the handler chain. There's only one
4108 // handler block in this code object, so its index is 0.
4110 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
4112 // Clear any pending exceptions.
4113 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex);
4114 __ Store(pending_exception, rax);
4116 // Fake a receiver (NULL).
4117 __ push(Immediate(0)); // receiver
4119 // Invoke the function by calling through JS entry trampoline builtin and
4120 // pop the faked function when we return. We load the address from an
4121 // external reference instead of inlining the call target address directly
4122 // in the code, because the builtin stubs may not have been generated yet
4123 // at the time this code is generated.
4125 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
4127 __ Load(rax, construct_entry);
4129 ExternalReference entry(Builtins::kJSEntryTrampoline, isolate);
4130 __ Load(rax, entry);
4132 __ lea(kScratchRegister, FieldOperand(rax, Code::kHeaderSize));
4133 __ call(kScratchRegister);
4135 // Unlink this frame from the handler chain.
4139 // Check if the current stack frame is marked as the outermost JS frame.
4141 __ Cmp(rbx, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
4142 __ j(not_equal, ¬_outermost_js_2);
4143 __ movq(kScratchRegister, js_entry_sp);
4144 __ movq(Operand(kScratchRegister, 0), Immediate(0));
4145 __ bind(¬_outermost_js_2);
4147 // Restore the top frame descriptor from the stack.
4148 { Operand c_entry_fp_operand = masm->ExternalOperand(c_entry_fp);
4149 __ pop(c_entry_fp_operand);
4152 // Restore callee-saved registers (X64 conventions).
4155 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
4163 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers
4165 // Restore frame pointer and return.
4171 void InstanceofStub::Generate(MacroAssembler* masm) {
4172 // Implements "value instanceof function" operator.
4173 // Expected input state with no inline cache:
4174 // rsp[0] : return address
4175 // rsp[1] : function pointer
4177 // Expected input state with an inline one-element cache:
4178 // rsp[0] : return address
4179 // rsp[1] : offset from return address to location of inline cache
4180 // rsp[2] : function pointer
4182 // Returns a bitwise zero to indicate that the value
4183 // is and instance of the function and anything else to
4184 // indicate that the value is not an instance.
4186 static const int kOffsetToMapCheckValue = 2;
4187 static const int kOffsetToResultValue = 18;
4188 // The last 4 bytes of the instruction sequence
4189 // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset))
4190 // Move(kScratchRegister, FACTORY->the_hole_value())
4191 // in front of the hole value address.
4192 static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78;
4193 // The last 4 bytes of the instruction sequence
4194 // __ j(not_equal, &cache_miss);
4195 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex);
4196 // before the offset of the hole value in the root array.
4197 static const unsigned int kWordBeforeResultValue = 0x458B4909;
4198 // Only the inline check flag is supported on X64.
4199 ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
4200 int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0;
4202 // Get the object - go slow case if it's a smi.
4205 __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space));
4206 __ JumpIfSmi(rax, &slow);
4208 // Check that the left hand is a JS object. Leave its map in rax.
4209 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
4211 __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
4214 // Get the prototype of the function.
4215 __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space));
4216 // rdx is function, rax is map.
4218 // If there is a call site cache don't look in the global cache, but do the
4219 // real lookup and update the call site cache.
4220 if (!HasCallSiteInlineCheck()) {
4221 // Look up the function and the map in the instanceof cache.
4223 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4224 __ j(not_equal, &miss, Label::kNear);
4225 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4226 __ j(not_equal, &miss, Label::kNear);
4227 __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4228 __ ret(2 * kPointerSize);
4232 __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
4234 // Check that the function prototype is a JS object.
4235 __ JumpIfSmi(rbx, &slow);
4236 __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
4238 __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
4241 // Register mapping:
4242 // rax is object map.
4244 // rbx is function prototype.
4245 if (!HasCallSiteInlineCheck()) {
4246 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
4247 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
4249 // Get return address and delta to inlined map check.
4250 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4251 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4252 if (FLAG_debug_code) {
4253 __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
4254 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
4255 __ Assert(equal, "InstanceofStub unexpected call site cache (check).");
4257 __ movq(kScratchRegister,
4258 Operand(kScratchRegister, kOffsetToMapCheckValue));
4259 __ movq(Operand(kScratchRegister, 0), rax);
4262 __ movq(rcx, FieldOperand(rax, Map::kPrototypeOffset));
4264 // Loop through the prototype chain looking for the function prototype.
4265 Label loop, is_instance, is_not_instance;
4266 __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
4269 __ j(equal, &is_instance, Label::kNear);
4270 __ cmpq(rcx, kScratchRegister);
4271 // The code at is_not_instance assumes that kScratchRegister contains a
4272 // non-zero GCable value (the null object in this case).
4273 __ j(equal, &is_not_instance, Label::kNear);
4274 __ movq(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
4275 __ movq(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
4278 __ bind(&is_instance);
4279 if (!HasCallSiteInlineCheck()) {
4281 // Store bitwise zero in the cache. This is a Smi in GC terms.
4282 STATIC_ASSERT(kSmiTag == 0);
4283 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
4285 // Store offset of true in the root array at the inline check site.
4286 int true_offset = 0x100 +
4287 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4288 // Assert it is a 1-byte signed value.
4289 ASSERT(true_offset >= 0 && true_offset < 0x100);
4290 __ movl(rax, Immediate(true_offset));
4291 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4292 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4293 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4294 if (FLAG_debug_code) {
4295 __ movl(rax, Immediate(kWordBeforeResultValue));
4296 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4297 __ Assert(equal, "InstanceofStub unexpected call site cache (mov).");
4301 __ ret(2 * kPointerSize + extra_stack_space);
4303 __ bind(&is_not_instance);
4304 if (!HasCallSiteInlineCheck()) {
4305 // We have to store a non-zero value in the cache.
4306 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
4308 // Store offset of false in the root array at the inline check site.
4309 int false_offset = 0x100 +
4310 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
4311 // Assert it is a 1-byte signed value.
4312 ASSERT(false_offset >= 0 && false_offset < 0x100);
4313 __ movl(rax, Immediate(false_offset));
4314 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize));
4315 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize));
4316 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax);
4317 if (FLAG_debug_code) {
4318 __ movl(rax, Immediate(kWordBeforeResultValue));
4319 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
4320 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
4323 __ ret(2 * kPointerSize + extra_stack_space);
4325 // Slow-case: Go through the JavaScript implementation.
4327 if (HasCallSiteInlineCheck()) {
4328 // Remove extra value from the stack.
4333 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
4337 // Passing arguments in registers is not supported.
4338 Register InstanceofStub::left() { return no_reg; }
4341 Register InstanceofStub::right() { return no_reg; }
4344 int CompareStub::MinorKey() {
4345 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
4346 // stubs the never NaN NaN condition is only taken into account if the
4347 // condition is equals.
4348 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
4349 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4350 return ConditionField::encode(static_cast<unsigned>(cc_))
4351 | RegisterField::encode(false) // lhs_ and rhs_ are not used
4352 | StrictField::encode(strict_)
4353 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
4354 | IncludeNumberCompareField::encode(include_number_compare_)
4355 | IncludeSmiCompareField::encode(include_smi_compare_);
4359 // Unfortunately you have to run without snapshots to see most of these
4360 // names in the profile since most compare stubs end up in the snapshot.
4361 void CompareStub::PrintName(StringStream* stream) {
4362 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4363 const char* cc_name;
4365 case less: cc_name = "LT"; break;
4366 case greater: cc_name = "GT"; break;
4367 case less_equal: cc_name = "LE"; break;
4368 case greater_equal: cc_name = "GE"; break;
4369 case equal: cc_name = "EQ"; break;
4370 case not_equal: cc_name = "NE"; break;
4371 default: cc_name = "UnknownCondition"; break;
4373 bool is_equality = cc_ == equal || cc_ == not_equal;
4374 stream->Add("CompareStub_%s", cc_name);
4375 if (strict_ && is_equality) stream->Add("_STRICT");
4376 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
4377 if (!include_number_compare_) stream->Add("_NO_NUMBER");
4378 if (!include_smi_compare_) stream->Add("_NO_SMI");
4382 // -------------------------------------------------------------------------
4383 // StringCharCodeAtGenerator
4385 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
4388 Label got_char_code;
4389 Label sliced_string;
4391 // If the receiver is a smi trigger the non-string case.
4392 __ JumpIfSmi(object_, receiver_not_string_);
4394 // Fetch the instance type of the receiver into result register.
4395 __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4396 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4397 // If the receiver is not a string trigger the non-string case.
4398 __ testb(result_, Immediate(kIsNotStringMask));
4399 __ j(not_zero, receiver_not_string_);
4401 // If the index is non-smi trigger the non-smi case.
4402 __ JumpIfNotSmi(index_, &index_not_smi_);
4403 __ bind(&got_smi_index_);
4405 // Check for index out of range.
4406 __ SmiCompare(index_, FieldOperand(object_, String::kLengthOffset));
4407 __ j(above_equal, index_out_of_range_);
4409 __ SmiToInteger32(index_, index_);
4411 StringCharLoadGenerator::Generate(
4412 masm, object_, index_, result_, &call_runtime_);
4414 __ Integer32ToSmi(result_, result_);
4419 void StringCharCodeAtGenerator::GenerateSlow(
4420 MacroAssembler* masm,
4421 const RuntimeCallHelper& call_helper) {
4422 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
4424 Factory* factory = masm->isolate()->factory();
4425 // Index is not a smi.
4426 __ bind(&index_not_smi_);
4427 // If index is a heap number, try converting it to an integer.
4429 factory->heap_number_map(),
4432 call_helper.BeforeCall(masm);
4434 __ push(index_); // Consumed by runtime conversion function.
4435 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
4436 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
4438 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
4439 // NumberToSmi discards numbers that are not exact integers.
4440 __ CallRuntime(Runtime::kNumberToSmi, 1);
4442 if (!index_.is(rax)) {
4443 // Save the conversion result before the pop instructions below
4444 // have a chance to overwrite it.
4445 __ movq(index_, rax);
4448 // Reload the instance type.
4449 __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
4450 __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
4451 call_helper.AfterCall(masm);
4452 // If index is still not a smi, it must be out of range.
4453 __ JumpIfNotSmi(index_, index_out_of_range_);
4454 // Otherwise, return to the fast path.
4455 __ jmp(&got_smi_index_);
4457 // Call runtime. We get here when the receiver is a string and the
4458 // index is a number, but the code of getting the actual character
4459 // is too complex (e.g., when the string needs to be flattened).
4460 __ bind(&call_runtime_);
4461 call_helper.BeforeCall(masm);
4463 __ Integer32ToSmi(index_, index_);
4465 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
4466 if (!result_.is(rax)) {
4467 __ movq(result_, rax);
4469 call_helper.AfterCall(masm);
4472 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
4476 // -------------------------------------------------------------------------
4477 // StringCharFromCodeGenerator
4479 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
4480 // Fast case of Heap::LookupSingleCharacterStringFromCode.
4481 __ JumpIfNotSmi(code_, &slow_case_);
4482 __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
4483 __ j(above, &slow_case_);
4485 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
4486 SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
4487 __ movq(result_, FieldOperand(result_, index.reg, index.scale,
4488 FixedArray::kHeaderSize));
4489 __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
4490 __ j(equal, &slow_case_);
4495 void StringCharFromCodeGenerator::GenerateSlow(
4496 MacroAssembler* masm,
4497 const RuntimeCallHelper& call_helper) {
4498 __ Abort("Unexpected fallthrough to CharFromCode slow case");
4500 __ bind(&slow_case_);
4501 call_helper.BeforeCall(masm);
4503 __ CallRuntime(Runtime::kCharFromCode, 1);
4504 if (!result_.is(rax)) {
4505 __ movq(result_, rax);
4507 call_helper.AfterCall(masm);
4510 __ Abort("Unexpected fallthrough from CharFromCode slow case");
4514 // -------------------------------------------------------------------------
4515 // StringCharAtGenerator
4517 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
4518 char_code_at_generator_.GenerateFast(masm);
4519 char_from_code_generator_.GenerateFast(masm);
4523 void StringCharAtGenerator::GenerateSlow(
4524 MacroAssembler* masm,
4525 const RuntimeCallHelper& call_helper) {
4526 char_code_at_generator_.GenerateSlow(masm, call_helper);
4527 char_from_code_generator_.GenerateSlow(masm, call_helper);
4531 void StringAddStub::Generate(MacroAssembler* masm) {
4532 Label call_runtime, call_builtin;
4533 Builtins::JavaScript builtin_id = Builtins::ADD;
4535 // Load the two arguments.
4536 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left).
4537 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right).
4539 // Make sure that both arguments are strings if not known in advance.
4540 if (flags_ == NO_STRING_ADD_FLAGS) {
4541 __ JumpIfSmi(rax, &call_runtime);
4542 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8);
4543 __ j(above_equal, &call_runtime);
4545 // First argument is a a string, test second.
4546 __ JumpIfSmi(rdx, &call_runtime);
4547 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9);
4548 __ j(above_equal, &call_runtime);
4550 // Here at least one of the arguments is definitely a string.
4551 // We convert the one that is not known to be a string.
4552 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
4553 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
4554 GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi,
4556 builtin_id = Builtins::STRING_ADD_RIGHT;
4557 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
4558 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
4559 GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi,
4561 builtin_id = Builtins::STRING_ADD_LEFT;
4565 // Both arguments are strings.
4566 // rax: first string
4567 // rdx: second string
4568 // Check if either of the strings are empty. In that case return the other.
4569 Label second_not_zero_length, both_not_zero_length;
4570 __ movq(rcx, FieldOperand(rdx, String::kLengthOffset));
4572 __ j(not_zero, &second_not_zero_length, Label::kNear);
4573 // Second string is empty, result is first string which is already in rax.
4574 Counters* counters = masm->isolate()->counters();
4575 __ IncrementCounter(counters->string_add_native(), 1);
4576 __ ret(2 * kPointerSize);
4577 __ bind(&second_not_zero_length);
4578 __ movq(rbx, FieldOperand(rax, String::kLengthOffset));
4580 __ j(not_zero, &both_not_zero_length, Label::kNear);
4581 // First string is empty, result is second string which is in rdx.
4583 __ IncrementCounter(counters->string_add_native(), 1);
4584 __ ret(2 * kPointerSize);
4586 // Both strings are non-empty.
4587 // rax: first string
4588 // rbx: length of first string
4589 // rcx: length of second string
4590 // rdx: second string
4591 // r8: map of first string (if flags_ == NO_STRING_ADD_FLAGS)
4592 // r9: map of second string (if flags_ == NO_STRING_ADD_FLAGS)
4593 Label string_add_flat_result, longer_than_two;
4594 __ bind(&both_not_zero_length);
4596 // If arguments where known to be strings, maps are not loaded to r8 and r9
4597 // by the code above.
4598 if (flags_ != NO_STRING_ADD_FLAGS) {
4599 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset));
4600 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset));
4602 // Get the instance types of the two strings as they will be needed soon.
4603 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset));
4604 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
4606 // Look at the length of the result of adding the two strings.
4607 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
4608 __ SmiAdd(rbx, rbx, rcx);
4609 // Use the symbol table when adding two one character strings, as it
4610 // helps later optimizations to return a symbol here.
4611 __ SmiCompare(rbx, Smi::FromInt(2));
4612 __ j(not_equal, &longer_than_two);
4614 // Check that both strings are non-external ASCII strings.
4615 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx,
4618 // Get the two characters forming the sub string.
4619 __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4620 __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4622 // Try to lookup two character string in symbol table. If it is not found
4623 // just allocate a new one.
4624 Label make_two_character_string, make_flat_ascii_string;
4625 StringHelper::GenerateTwoCharacterSymbolTableProbe(
4626 masm, rbx, rcx, r14, r11, rdi, r15, &make_two_character_string);
4627 __ IncrementCounter(counters->string_add_native(), 1);
4628 __ ret(2 * kPointerSize);
4630 __ bind(&make_two_character_string);
4632 __ AllocateAsciiString(rax, rdi, r8, r9, r11, &call_runtime);
4633 // rbx - first byte: first character
4634 // rbx - second byte: *maybe* second character
4635 // Make sure that the second byte of rbx contains the second character.
4636 __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4637 __ shll(rcx, Immediate(kBitsPerByte));
4639 // Write both characters to the new string.
4640 __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
4641 __ IncrementCounter(counters->string_add_native(), 1);
4642 __ ret(2 * kPointerSize);
4644 __ bind(&longer_than_two);
4645 // Check if resulting string will be flat.
4646 __ SmiCompare(rbx, Smi::FromInt(ConsString::kMinLength));
4647 __ j(below, &string_add_flat_result);
4648 // Handle exceptionally long strings in the runtime system.
4649 STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
4650 __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
4651 __ j(above, &call_runtime);
4653 // If result is not supposed to be flat, allocate a cons string object. If
4654 // both strings are ASCII the result is an ASCII cons string.
4655 // rax: first string
4656 // rbx: length of resulting flat string
4657 // rdx: second string
4658 // r8: instance type of first string
4659 // r9: instance type of second string
4660 Label non_ascii, allocated, ascii_data;
4663 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
4664 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
4665 __ testl(rcx, Immediate(kStringEncodingMask));
4666 __ j(zero, &non_ascii);
4667 __ bind(&ascii_data);
4668 // Allocate an ASCII cons string.
4669 __ AllocateAsciiConsString(rcx, rdi, no_reg, &call_runtime);
4670 __ bind(&allocated);
4671 // Fill the fields of the cons string.
4672 __ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
4673 __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
4674 Immediate(String::kEmptyHashField));
4675 __ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
4676 __ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
4678 __ IncrementCounter(counters->string_add_native(), 1);
4679 __ ret(2 * kPointerSize);
4680 __ bind(&non_ascii);
4681 // At least one of the strings is two-byte. Check whether it happens
4682 // to contain only ASCII characters.
4683 // rcx: first instance type AND second instance type.
4684 // r8: first instance type.
4685 // r9: second instance type.
4686 __ testb(rcx, Immediate(kAsciiDataHintMask));
4687 __ j(not_zero, &ascii_data);
4689 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
4690 __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4691 __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
4692 __ j(equal, &ascii_data);
4693 // Allocate a two byte cons string.
4694 __ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
4697 // We cannot encounter sliced strings or cons strings here since:
4698 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
4699 // Handle creating a flat result from either external or sequential strings.
4700 // Locate the first characters' locations.
4701 // rax: first string
4702 // rbx: length of resulting flat string as smi
4703 // rdx: second string
4704 // r8: instance type of first string
4705 // r9: instance type of first string
4706 Label first_prepared, second_prepared;
4707 Label first_is_sequential, second_is_sequential;
4708 __ bind(&string_add_flat_result);
4710 __ SmiToInteger32(r14, FieldOperand(rax, SeqString::kLengthOffset));
4711 // r14: length of first string
4712 STATIC_ASSERT(kSeqStringTag == 0);
4713 __ testb(r8, Immediate(kStringRepresentationMask));
4714 __ j(zero, &first_is_sequential, Label::kNear);
4715 // Rule out short external string and load string resource.
4716 STATIC_ASSERT(kShortExternalStringTag != 0);
4717 __ testb(r8, Immediate(kShortExternalStringMask));
4718 __ j(not_zero, &call_runtime);
4719 __ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
4720 __ jmp(&first_prepared, Label::kNear);
4721 __ bind(&first_is_sequential);
4722 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4723 __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4724 __ bind(&first_prepared);
4726 // Check whether both strings have same encoding.
4728 __ testb(r8, Immediate(kStringEncodingMask));
4729 __ j(not_zero, &call_runtime);
4731 __ SmiToInteger32(r15, FieldOperand(rdx, SeqString::kLengthOffset));
4732 // r15: length of second string
4733 STATIC_ASSERT(kSeqStringTag == 0);
4734 __ testb(r9, Immediate(kStringRepresentationMask));
4735 __ j(zero, &second_is_sequential, Label::kNear);
4736 // Rule out short external string and load string resource.
4737 STATIC_ASSERT(kShortExternalStringTag != 0);
4738 __ testb(r9, Immediate(kShortExternalStringMask));
4739 __ j(not_zero, &call_runtime);
4740 __ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
4741 __ jmp(&second_prepared, Label::kNear);
4742 __ bind(&second_is_sequential);
4743 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
4744 __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
4745 __ bind(&second_prepared);
4747 Label non_ascii_string_add_flat_result;
4748 // r9: instance type of second string
4749 // First string and second string have the same encoding.
4750 STATIC_ASSERT(kTwoByteStringTag == 0);
4751 __ SmiToInteger32(rbx, rbx);
4752 __ testb(r9, Immediate(kStringEncodingMask));
4753 __ j(zero, &non_ascii_string_add_flat_result);
4755 __ bind(&make_flat_ascii_string);
4756 // Both strings are ASCII strings. As they are short they are both flat.
4757 __ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
4758 // rax: result string
4759 // Locate first character of result.
4760 __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
4761 // rcx: first char of first string
4762 // rbx: first character of result
4763 // r14: length of first string
4764 StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, true);
4765 // rbx: next character of result
4766 // rdx: first char of second string
4767 // r15: length of second string
4768 StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, true);
4769 __ IncrementCounter(counters->string_add_native(), 1);
4770 __ ret(2 * kPointerSize);
4772 __ bind(&non_ascii_string_add_flat_result);
4773 // Both strings are ASCII strings. As they are short they are both flat.
4774 __ AllocateTwoByteString(rax, rbx, rdi, r8, r9, &call_runtime);
4775 // rax: result string
4776 // Locate first character of result.
4777 __ lea(rbx, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
4778 // rcx: first char of first string
4779 // rbx: first character of result
4780 // r14: length of first string
4781 StringHelper::GenerateCopyCharacters(masm, rbx, rcx, r14, false);
4782 // rbx: next character of result
4783 // rdx: first char of second string
4784 // r15: length of second string
4785 StringHelper::GenerateCopyCharacters(masm, rbx, rdx, r15, false);
4786 __ IncrementCounter(counters->string_add_native(), 1);
4787 __ ret(2 * kPointerSize);
4789 // Just jump to runtime to add the two strings.
4790 __ bind(&call_runtime);
4791 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4793 if (call_builtin.is_linked()) {
4794 __ bind(&call_builtin);
4795 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
4800 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
4807 // First check if the argument is already a string.
4808 Label not_string, done;
4809 __ JumpIfSmi(arg, ¬_string);
4810 __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
4813 // Check the number to string cache.
4815 __ bind(¬_string);
4816 // Puts the cached result into scratch1.
4817 NumberToStringStub::GenerateLookupNumberStringCache(masm,
4824 __ movq(arg, scratch1);
4825 __ movq(Operand(rsp, stack_offset), arg);
4828 // Check if the argument is a safe string wrapper.
4829 __ bind(¬_cached);
4830 __ JumpIfSmi(arg, slow);
4831 __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
4832 __ j(not_equal, slow);
4833 __ testb(FieldOperand(scratch1, Map::kBitField2Offset),
4834 Immediate(1 << Map::kStringWrapperSafeForDefaultValueOf));
4836 __ movq(arg, FieldOperand(arg, JSValue::kValueOffset));
4837 __ movq(Operand(rsp, stack_offset), arg);
4843 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
4850 // This loop just copies one character at a time, as it is only used for very
4853 __ movb(kScratchRegister, Operand(src, 0));
4854 __ movb(Operand(dest, 0), kScratchRegister);
4858 __ movzxwl(kScratchRegister, Operand(src, 0));
4859 __ movw(Operand(dest, 0), kScratchRegister);
4860 __ addq(src, Immediate(2));
4861 __ addq(dest, Immediate(2));
4864 __ j(not_zero, &loop);
4868 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
4873 // Copy characters using rep movs of doublewords. Align destination on 4 byte
4874 // boundary before starting rep movs. Copy remaining characters after running
4876 // Count is positive int32, dest and src are character pointers.
4877 ASSERT(dest.is(rdi)); // rep movs destination
4878 ASSERT(src.is(rsi)); // rep movs source
4879 ASSERT(count.is(rcx)); // rep movs count
4881 // Nothing to do for zero characters.
4883 __ testl(count, count);
4884 __ j(zero, &done, Label::kNear);
4886 // Make count the number of bytes to copy.
4888 STATIC_ASSERT(2 == sizeof(uc16));
4889 __ addl(count, count);
4892 // Don't enter the rep movs if there are less than 4 bytes to copy.
4894 __ testl(count, Immediate(~7));
4895 __ j(zero, &last_bytes, Label::kNear);
4897 // Copy from edi to esi using rep movs instruction.
4898 __ movl(kScratchRegister, count);
4899 __ shr(count, Immediate(3)); // Number of doublewords to copy.
4902 // Find number of bytes left.
4903 __ movl(count, kScratchRegister);
4904 __ and_(count, Immediate(7));
4906 // Check if there are more bytes to copy.
4907 __ bind(&last_bytes);
4908 __ testl(count, count);
4909 __ j(zero, &done, Label::kNear);
4911 // Copy remaining characters.
4914 __ movb(kScratchRegister, Operand(src, 0));
4915 __ movb(Operand(dest, 0), kScratchRegister);
4919 __ j(not_zero, &loop);
4924 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
4932 // Register scratch3 is the general scratch register in this function.
4933 Register scratch = scratch3;
4935 // Make sure that both characters are not digits as such strings has a
4936 // different hash algorithm. Don't try to look for these in the symbol table.
4937 Label not_array_index;
4938 __ leal(scratch, Operand(c1, -'0'));
4939 __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4940 __ j(above, ¬_array_index, Label::kNear);
4941 __ leal(scratch, Operand(c2, -'0'));
4942 __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
4943 __ j(below_equal, not_found);
4945 __ bind(¬_array_index);
4946 // Calculate the two character string hash.
4947 Register hash = scratch1;
4948 GenerateHashInit(masm, hash, c1, scratch);
4949 GenerateHashAddCharacter(masm, hash, c2, scratch);
4950 GenerateHashGetHash(masm, hash, scratch);
4952 // Collect the two characters in a register.
4953 Register chars = c1;
4954 __ shl(c2, Immediate(kBitsPerByte));
4957 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4958 // hash: hash of two character string.
4960 // Load the symbol table.
4961 Register symbol_table = c2;
4962 __ LoadRoot(symbol_table, Heap::kSymbolTableRootIndex);
4964 // Calculate capacity mask from the symbol table capacity.
4965 Register mask = scratch2;
4966 __ SmiToInteger32(mask,
4967 FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
4970 Register map = scratch4;
4973 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
4974 // hash: hash of two character string (32-bit int)
4975 // symbol_table: symbol table
4976 // mask: capacity mask (32-bit int)
4980 // Perform a number of probes in the symbol table.
4981 static const int kProbes = 4;
4982 Label found_in_symbol_table;
4983 Label next_probe[kProbes];
4984 Register candidate = scratch; // Scratch register contains candidate.
4985 for (int i = 0; i < kProbes; i++) {
4986 // Calculate entry in symbol table.
4987 __ movl(scratch, hash);
4989 __ addl(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
4991 __ andl(scratch, mask);
4993 // Load the entry from the symbol table.
4994 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
4996 FieldOperand(symbol_table,
4999 SymbolTable::kElementsStartOffset));
5001 // If entry is undefined no string with this hash can be found.
5003 __ CmpObjectType(candidate, ODDBALL_TYPE, map);
5004 __ j(not_equal, &is_string, Label::kNear);
5006 __ CompareRoot(candidate, Heap::kUndefinedValueRootIndex);
5007 __ j(equal, not_found);
5008 // Must be the hole (deleted entry).
5009 if (FLAG_debug_code) {
5010 __ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
5011 __ cmpq(kScratchRegister, candidate);
5012 __ Assert(equal, "oddball in symbol table is not undefined or the hole");
5014 __ jmp(&next_probe[i]);
5016 __ bind(&is_string);
5018 // If length is not 2 the string is not a candidate.
5019 __ SmiCompare(FieldOperand(candidate, String::kLengthOffset),
5021 __ j(not_equal, &next_probe[i]);
5023 // We use kScratchRegister as a temporary register in assumption that
5024 // JumpIfInstanceTypeIsNotSequentialAscii does not use it implicitly
5025 Register temp = kScratchRegister;
5027 // Check that the candidate is a non-external ASCII string.
5028 __ movzxbl(temp, FieldOperand(map, Map::kInstanceTypeOffset));
5029 __ JumpIfInstanceTypeIsNotSequentialAscii(
5030 temp, temp, &next_probe[i]);
5032 // Check if the two characters match.
5033 __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
5034 __ andl(temp, Immediate(0x0000ffff));
5035 __ cmpl(chars, temp);
5036 __ j(equal, &found_in_symbol_table);
5037 __ bind(&next_probe[i]);
5040 // No matching 2 character string found by probing.
5043 // Scratch register contains result when we fall through to here.
5044 Register result = candidate;
5045 __ bind(&found_in_symbol_table);
5046 if (!result.is(rax)) {
5047 __ movq(rax, result);
5052 void StringHelper::GenerateHashInit(MacroAssembler* masm,
5056 // hash = (seed + character) + ((seed + character) << 10);
5057 __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
5058 __ SmiToInteger32(scratch, scratch);
5059 __ addl(scratch, character);
5060 __ movl(hash, scratch);
5061 __ shll(scratch, Immediate(10));
5062 __ addl(hash, scratch);
5063 // hash ^= hash >> 6;
5064 __ movl(scratch, hash);
5065 __ shrl(scratch, Immediate(6));
5066 __ xorl(hash, scratch);
5070 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
5074 // hash += character;
5075 __ addl(hash, character);
5076 // hash += hash << 10;
5077 __ movl(scratch, hash);
5078 __ shll(scratch, Immediate(10));
5079 __ addl(hash, scratch);
5080 // hash ^= hash >> 6;
5081 __ movl(scratch, hash);
5082 __ shrl(scratch, Immediate(6));
5083 __ xorl(hash, scratch);
5087 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
5090 // hash += hash << 3;
5091 __ leal(hash, Operand(hash, hash, times_8, 0));
5092 // hash ^= hash >> 11;
5093 __ movl(scratch, hash);
5094 __ shrl(scratch, Immediate(11));
5095 __ xorl(hash, scratch);
5096 // hash += hash << 15;
5097 __ movl(scratch, hash);
5098 __ shll(scratch, Immediate(15));
5099 __ addl(hash, scratch);
5101 __ andl(hash, Immediate(String::kHashBitMask));
5103 // if (hash == 0) hash = 27;
5104 Label hash_not_zero;
5105 __ j(not_zero, &hash_not_zero);
5106 __ Set(hash, StringHasher::kZeroHash);
5107 __ bind(&hash_not_zero);
5110 void SubStringStub::Generate(MacroAssembler* masm) {
5113 // Stack frame on entry.
5114 // rsp[0]: return address
5119 const int kToOffset = 1 * kPointerSize;
5120 const int kFromOffset = kToOffset + kPointerSize;
5121 const int kStringOffset = kFromOffset + kPointerSize;
5122 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset;
5124 // Make sure first argument is a string.
5125 __ movq(rax, Operand(rsp, kStringOffset));
5126 STATIC_ASSERT(kSmiTag == 0);
5127 __ testl(rax, Immediate(kSmiTagMask));
5128 __ j(zero, &runtime);
5129 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
5130 __ j(NegateCondition(is_string), &runtime);
5133 // rbx: instance type
5134 // Calculate length of sub string using the smi values.
5135 __ movq(rcx, Operand(rsp, kToOffset));
5136 __ movq(rdx, Operand(rsp, kFromOffset));
5137 __ JumpUnlessBothNonNegativeSmi(rcx, rdx, &runtime);
5139 __ SmiSub(rcx, rcx, rdx); // Overflow doesn't happen.
5140 __ cmpq(rcx, FieldOperand(rax, String::kLengthOffset));
5141 Label not_original_string;
5142 // Shorter than original string's length: an actual substring.
5143 __ j(below, ¬_original_string, Label::kNear);
5144 // Longer than original string's length or negative: unsafe arguments.
5145 __ j(above, &runtime);
5146 // Return original string.
5147 Counters* counters = masm->isolate()->counters();
5148 __ IncrementCounter(counters->sub_string_native(), 1);
5149 __ ret(kArgumentsSize);
5150 __ bind(¬_original_string);
5151 __ SmiToInteger32(rcx, rcx);
5154 // rbx: instance type
5155 // rcx: sub string length
5156 // rdx: from index (smi)
5157 // Deal with different string types: update the index if necessary
5158 // and put the underlying string into edi.
5159 Label underlying_unpacked, sliced_string, seq_or_external_string;
5160 // If the string is not indirect, it can only be sequential or external.
5161 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
5162 STATIC_ASSERT(kIsIndirectStringMask != 0);
5163 __ testb(rbx, Immediate(kIsIndirectStringMask));
5164 __ j(zero, &seq_or_external_string, Label::kNear);
5166 __ testb(rbx, Immediate(kSlicedNotConsMask));
5167 __ j(not_zero, &sliced_string, Label::kNear);
5168 // Cons string. Check whether it is flat, then fetch first part.
5169 // Flat cons strings have an empty second part.
5170 __ CompareRoot(FieldOperand(rax, ConsString::kSecondOffset),
5171 Heap::kEmptyStringRootIndex);
5172 __ j(not_equal, &runtime);
5173 __ movq(rdi, FieldOperand(rax, ConsString::kFirstOffset));
5174 // Update instance type.
5175 __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5176 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5177 __ jmp(&underlying_unpacked, Label::kNear);
5179 __ bind(&sliced_string);
5180 // Sliced string. Fetch parent and correct start index by offset.
5181 __ addq(rdx, FieldOperand(rax, SlicedString::kOffsetOffset));
5182 __ movq(rdi, FieldOperand(rax, SlicedString::kParentOffset));
5183 // Update instance type.
5184 __ movq(rbx, FieldOperand(rdi, HeapObject::kMapOffset));
5185 __ movzxbl(rbx, FieldOperand(rbx, Map::kInstanceTypeOffset));
5186 __ jmp(&underlying_unpacked, Label::kNear);
5188 __ bind(&seq_or_external_string);
5189 // Sequential or external string. Just move string to the correct register.
5192 __ bind(&underlying_unpacked);
5194 if (FLAG_string_slices) {
5196 // rdi: underlying subject string
5197 // rbx: instance type of underlying subject string
5198 // rdx: adjusted start index (smi)
5200 // If coming from the make_two_character_string path, the string
5201 // is too short to be sliced anyways.
5202 __ cmpq(rcx, Immediate(SlicedString::kMinLength));
5203 // Short slice. Copy instead of slicing.
5204 __ j(less, ©_routine);
5205 // Allocate new sliced string. At this point we do not reload the instance
5206 // type including the string encoding because we simply rely on the info
5207 // provided by the original string. It does not matter if the original
5208 // string's encoding is wrong because we always have to recheck encoding of
5209 // the newly created string's parent anyways due to externalized strings.
5210 Label two_byte_slice, set_slice_header;
5211 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5212 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5213 __ testb(rbx, Immediate(kStringEncodingMask));
5214 __ j(zero, &two_byte_slice, Label::kNear);
5215 __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
5216 __ jmp(&set_slice_header, Label::kNear);
5217 __ bind(&two_byte_slice);
5218 __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
5219 __ bind(&set_slice_header);
5220 __ Integer32ToSmi(rcx, rcx);
5221 __ movq(FieldOperand(rax, SlicedString::kLengthOffset), rcx);
5222 __ movq(FieldOperand(rax, SlicedString::kHashFieldOffset),
5223 Immediate(String::kEmptyHashField));
5224 __ movq(FieldOperand(rax, SlicedString::kParentOffset), rdi);
5225 __ movq(FieldOperand(rax, SlicedString::kOffsetOffset), rdx);
5226 __ IncrementCounter(counters->sub_string_native(), 1);
5227 __ ret(kArgumentsSize);
5229 __ bind(©_routine);
5232 // rdi: underlying subject string
5233 // rbx: instance type of underlying subject string
5234 // rdx: adjusted start index (smi)
5236 // The subject string can only be external or sequential string of either
5237 // encoding at this point.
5238 Label two_byte_sequential, sequential_string;
5239 STATIC_ASSERT(kExternalStringTag != 0);
5240 STATIC_ASSERT(kSeqStringTag == 0);
5241 __ testb(rbx, Immediate(kExternalStringTag));
5242 __ j(zero, &sequential_string);
5244 // Handle external string.
5245 // Rule out short external strings.
5246 STATIC_CHECK(kShortExternalStringTag != 0);
5247 __ testb(rbx, Immediate(kShortExternalStringMask));
5248 __ j(not_zero, &runtime);
5249 __ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
5250 // Move the pointer so that offset-wise, it looks like a sequential string.
5251 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
5252 __ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5254 __ bind(&sequential_string);
5255 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
5256 __ testb(rbx, Immediate(kStringEncodingMask));
5257 __ j(zero, &two_byte_sequential);
5259 // Allocate the result.
5260 __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
5262 // rax: result string
5263 // rcx: result string length
5264 __ movq(r14, rsi); // esi used by following code.
5265 { // Locate character of sub string start.
5266 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
5267 __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5268 SeqAsciiString::kHeaderSize - kHeapObjectTag));
5270 // Locate first character of result.
5271 __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
5273 // rax: result string
5274 // rcx: result length
5275 // rdi: first character of result
5276 // rsi: character of sub string start
5277 // r14: original value of rsi
5278 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, true);
5279 __ movq(rsi, r14); // Restore rsi.
5280 __ IncrementCounter(counters->sub_string_native(), 1);
5281 __ ret(kArgumentsSize);
5283 __ bind(&two_byte_sequential);
5284 // Allocate the result.
5285 __ AllocateTwoByteString(rax, rcx, r11, r14, r15, &runtime);
5287 // rax: result string
5288 // rcx: result string length
5289 __ movq(r14, rsi); // esi used by following code.
5290 { // Locate character of sub string start.
5291 SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
5292 __ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
5293 SeqAsciiString::kHeaderSize - kHeapObjectTag));
5295 // Locate first character of result.
5296 __ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
5298 // rax: result string
5299 // rcx: result length
5300 // rdi: first character of result
5301 // rsi: character of sub string start
5302 // r14: original value of rsi
5303 StringHelper::GenerateCopyCharactersREP(masm, rdi, rsi, rcx, false);
5304 __ movq(rsi, r14); // Restore esi.
5305 __ IncrementCounter(counters->sub_string_native(), 1);
5306 __ ret(kArgumentsSize);
5308 // Just jump to runtime to create the sub string.
5310 __ TailCallRuntime(Runtime::kSubString, 3, 1);
5314 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
5318 Register scratch2) {
5319 Register length = scratch1;
5322 Label check_zero_length;
5323 __ movq(length, FieldOperand(left, String::kLengthOffset));
5324 __ SmiCompare(length, FieldOperand(right, String::kLengthOffset));
5325 __ j(equal, &check_zero_length, Label::kNear);
5326 __ Move(rax, Smi::FromInt(NOT_EQUAL));
5329 // Check if the length is zero.
5330 Label compare_chars;
5331 __ bind(&check_zero_length);
5332 STATIC_ASSERT(kSmiTag == 0);
5334 __ j(not_zero, &compare_chars, Label::kNear);
5335 __ Move(rax, Smi::FromInt(EQUAL));
5338 // Compare characters.
5339 __ bind(&compare_chars);
5340 Label strings_not_equal;
5341 GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
5342 &strings_not_equal, Label::kNear);
5344 // Characters are equal.
5345 __ Move(rax, Smi::FromInt(EQUAL));
5348 // Characters are not equal.
5349 __ bind(&strings_not_equal);
5350 __ Move(rax, Smi::FromInt(NOT_EQUAL));
5355 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
5361 Register scratch4) {
5362 // Ensure that you can always subtract a string length from a non-negative
5363 // number (e.g. another length).
5364 STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
5366 // Find minimum length and length difference.
5367 __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
5368 __ movq(scratch4, scratch1);
5371 FieldOperand(right, String::kLengthOffset));
5372 // Register scratch4 now holds left.length - right.length.
5373 const Register length_difference = scratch4;
5375 __ j(less, &left_shorter, Label::kNear);
5376 // The right string isn't longer that the left one.
5377 // Get the right string's length by subtracting the (non-negative) difference
5378 // from the left string's length.
5379 __ SmiSub(scratch1, scratch1, length_difference);
5380 __ bind(&left_shorter);
5381 // Register scratch1 now holds Min(left.length, right.length).
5382 const Register min_length = scratch1;
5384 Label compare_lengths;
5385 // If min-length is zero, go directly to comparing lengths.
5386 __ SmiTest(min_length);
5387 __ j(zero, &compare_lengths, Label::kNear);
5390 Label result_not_equal;
5391 GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
5392 &result_not_equal, Label::kNear);
5394 // Completed loop without finding different characters.
5395 // Compare lengths (precomputed).
5396 __ bind(&compare_lengths);
5397 __ SmiTest(length_difference);
5398 __ j(not_zero, &result_not_equal, Label::kNear);
5401 __ Move(rax, Smi::FromInt(EQUAL));
5404 Label result_greater;
5405 __ bind(&result_not_equal);
5406 // Unequal comparison of left to right, either character or length.
5407 __ j(greater, &result_greater, Label::kNear);
5410 __ Move(rax, Smi::FromInt(LESS));
5413 // Result is GREATER.
5414 __ bind(&result_greater);
5415 __ Move(rax, Smi::FromInt(GREATER));
5420 void StringCompareStub::GenerateAsciiCharsCompareLoop(
5421 MacroAssembler* masm,
5426 Label* chars_not_equal,
5427 Label::Distance near_jump) {
5428 // Change index to run from -length to -1 by adding length to string
5429 // start. This means that loop ends when index reaches zero, which
5430 // doesn't need an additional compare.
5431 __ SmiToInteger32(length, length);
5433 FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
5435 FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
5437 Register index = length; // index = -length;
5442 __ movb(scratch, Operand(left, index, times_1, 0));
5443 __ cmpb(scratch, Operand(right, index, times_1, 0));
5444 __ j(not_equal, chars_not_equal, near_jump);
5446 __ j(not_zero, &loop);
5450 void StringCompareStub::Generate(MacroAssembler* masm) {
5453 // Stack frame on entry.
5454 // rsp[0]: return address
5455 // rsp[8]: right string
5456 // rsp[16]: left string
5458 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left
5459 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right
5461 // Check for identity.
5464 __ j(not_equal, ¬_same, Label::kNear);
5465 __ Move(rax, Smi::FromInt(EQUAL));
5466 Counters* counters = masm->isolate()->counters();
5467 __ IncrementCounter(counters->string_compare_native(), 1);
5468 __ ret(2 * kPointerSize);
5472 // Check that both are sequential ASCII strings.
5473 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
5475 // Inline comparison of ASCII strings.
5476 __ IncrementCounter(counters->string_compare_native(), 1);
5477 // Drop arguments from the stack
5479 __ addq(rsp, Immediate(2 * kPointerSize));
5481 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
5483 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
5484 // tagged as a small integer.
5486 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5490 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
5491 ASSERT(state_ == CompareIC::SMIS);
5493 __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
5495 if (GetCondition() == equal) {
5496 // For equality we do not care about the sign of the result.
5501 __ j(no_overflow, &done, Label::kNear);
5502 // Correct sign of result in case of overflow.
5503 __ SmiNot(rdx, rdx);
5514 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
5515 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
5518 Label unordered, maybe_undefined1, maybe_undefined2;
5520 Condition either_smi = masm->CheckEitherSmi(rax, rdx);
5521 __ j(either_smi, &generic_stub, Label::kNear);
5523 __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
5524 __ j(not_equal, &maybe_undefined1, Label::kNear);
5525 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5526 __ j(not_equal, &maybe_undefined2, Label::kNear);
5528 // Load left and right operand
5529 __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
5530 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
5533 __ ucomisd(xmm0, xmm1);
5535 // Don't base result on EFLAGS when a NaN is involved.
5536 __ j(parity_even, &unordered, Label::kNear);
5538 // Return a result of -1, 0, or 1, based on EFLAGS.
5539 // Performing mov, because xor would destroy the flag register.
5540 __ movl(rax, Immediate(0));
5541 __ movl(rcx, Immediate(0));
5542 __ setcc(above, rax); // Add one to zero if carry clear and not equal.
5543 __ sbbq(rax, rcx); // Subtract one if below (aka. carry set).
5546 __ bind(&unordered);
5547 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
5548 __ bind(&generic_stub);
5549 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
5551 __ bind(&maybe_undefined1);
5552 if (Token::IsOrderedRelationalCompareOp(op_)) {
5553 __ Cmp(rax, masm->isolate()->factory()->undefined_value());
5554 __ j(not_equal, &miss);
5555 __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
5556 __ j(not_equal, &maybe_undefined2, Label::kNear);
5560 __ bind(&maybe_undefined2);
5561 if (Token::IsOrderedRelationalCompareOp(op_)) {
5562 __ Cmp(rdx, masm->isolate()->factory()->undefined_value());
5563 __ j(equal, &unordered);
5571 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
5572 ASSERT(state_ == CompareIC::SYMBOLS);
5573 ASSERT(GetCondition() == equal);
5575 // Registers containing left and right operands respectively.
5576 Register left = rdx;
5577 Register right = rax;
5578 Register tmp1 = rcx;
5579 Register tmp2 = rbx;
5581 // Check that both operands are heap objects.
5583 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5584 __ j(cond, &miss, Label::kNear);
5586 // Check that both operands are symbols.
5587 __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5588 __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5589 __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5590 __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5591 STATIC_ASSERT(kSymbolTag != 0);
5592 __ and_(tmp1, tmp2);
5593 __ testb(tmp1, Immediate(kIsSymbolMask));
5594 __ j(zero, &miss, Label::kNear);
5596 // Symbols are compared by identity.
5598 __ cmpq(left, right);
5599 // Make sure rax is non-zero. At this point input operands are
5600 // guaranteed to be non-zero.
5601 ASSERT(right.is(rax));
5602 __ j(not_equal, &done, Label::kNear);
5603 STATIC_ASSERT(EQUAL == 0);
5604 STATIC_ASSERT(kSmiTag == 0);
5605 __ Move(rax, Smi::FromInt(EQUAL));
5614 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
5615 ASSERT(state_ == CompareIC::STRINGS);
5618 bool equality = Token::IsEqualityOp(op_);
5620 // Registers containing left and right operands respectively.
5621 Register left = rdx;
5622 Register right = rax;
5623 Register tmp1 = rcx;
5624 Register tmp2 = rbx;
5625 Register tmp3 = rdi;
5627 // Check that both operands are heap objects.
5628 Condition cond = masm->CheckEitherSmi(left, right, tmp1);
5631 // Check that both operands are strings. This leaves the instance
5632 // types loaded in tmp1 and tmp2.
5633 __ movq(tmp1, FieldOperand(left, HeapObject::kMapOffset));
5634 __ movq(tmp2, FieldOperand(right, HeapObject::kMapOffset));
5635 __ movzxbq(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
5636 __ movzxbq(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
5637 __ movq(tmp3, tmp1);
5638 STATIC_ASSERT(kNotStringTag != 0);
5640 __ testb(tmp3, Immediate(kIsNotStringMask));
5641 __ j(not_zero, &miss);
5643 // Fast check for identical strings.
5645 __ cmpq(left, right);
5646 __ j(not_equal, ¬_same, Label::kNear);
5647 STATIC_ASSERT(EQUAL == 0);
5648 STATIC_ASSERT(kSmiTag == 0);
5649 __ Move(rax, Smi::FromInt(EQUAL));
5652 // Handle not identical strings.
5655 // Check that both strings are symbols. If they are, we're done
5656 // because we already know they are not identical.
5659 STATIC_ASSERT(kSymbolTag != 0);
5660 __ and_(tmp1, tmp2);
5661 __ testb(tmp1, Immediate(kIsSymbolMask));
5662 __ j(zero, &do_compare, Label::kNear);
5663 // Make sure rax is non-zero. At this point input operands are
5664 // guaranteed to be non-zero.
5665 ASSERT(right.is(rax));
5667 __ bind(&do_compare);
5670 // Check that both strings are sequential ASCII.
5672 __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
5674 // Compare flat ASCII strings. Returns when done.
5676 StringCompareStub::GenerateFlatAsciiStringEquals(
5677 masm, left, right, tmp1, tmp2);
5679 StringCompareStub::GenerateCompareFlatAsciiStrings(
5680 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
5683 // Handle more complex cases in runtime.
5685 __ pop(tmp1); // Return address.
5690 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
5692 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
5700 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
5701 ASSERT(state_ == CompareIC::OBJECTS);
5703 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5704 __ j(either_smi, &miss, Label::kNear);
5706 __ CmpObjectType(rax, JS_OBJECT_TYPE, rcx);
5707 __ j(not_equal, &miss, Label::kNear);
5708 __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
5709 __ j(not_equal, &miss, Label::kNear);
5711 ASSERT(GetCondition() == equal);
5720 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
5722 Condition either_smi = masm->CheckEitherSmi(rdx, rax);
5723 __ j(either_smi, &miss, Label::kNear);
5725 __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
5726 __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
5727 __ Cmp(rcx, known_map_);
5728 __ j(not_equal, &miss, Label::kNear);
5729 __ Cmp(rbx, known_map_);
5730 __ j(not_equal, &miss, Label::kNear);
5740 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
5742 // Call the runtime system in a fresh internal frame.
5743 ExternalReference miss =
5744 ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
5746 FrameScope scope(masm, StackFrame::INTERNAL);
5751 __ Push(Smi::FromInt(op_));
5752 __ CallExternalReference(miss, 3);
5754 // Compute the entry point of the rewritten stub.
5755 __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
5760 // Do a tail call to the rewritten stub.
5765 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
5768 Register properties,
5769 Handle<String> name,
5771 // If names of slots in range from 1 to kProbes - 1 for the hash value are
5772 // not equal to the name and kProbes-th slot is not used (its name is the
5773 // undefined value), it guarantees the hash table doesn't contain the
5774 // property. It's true even if some slots represent deleted properties
5775 // (their names are the hole value).
5776 for (int i = 0; i < kInlinedProbes; i++) {
5777 // r0 points to properties hash.
5778 // Compute the masked index: (hash + i + i * i) & mask.
5779 Register index = r0;
5780 // Capacity is smi 2^n.
5781 __ SmiToInteger32(index, FieldOperand(properties, kCapacityOffset));
5784 Immediate(name->Hash() + StringDictionary::GetProbeOffset(i)));
5786 // Scale the index by multiplying by the entry size.
5787 ASSERT(StringDictionary::kEntrySize == 3);
5788 __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
5790 Register entity_name = r0;
5791 // Having undefined at this place means the name is not contained.
5792 ASSERT_EQ(kSmiTagSize, 1);
5793 __ movq(entity_name, Operand(properties,
5796 kElementsStartOffset - kHeapObjectTag));
5797 __ Cmp(entity_name, masm->isolate()->factory()->undefined_value());
5800 // Stop if found the property.
5801 __ Cmp(entity_name, Handle<String>(name));
5805 // Check for the hole and skip.
5806 __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex);
5807 __ j(equal, &the_hole, Label::kNear);
5809 // Check if the entry name is not a symbol.
5810 __ movq(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
5811 __ testb(FieldOperand(entity_name, Map::kInstanceTypeOffset),
5812 Immediate(kIsSymbolMask));
5818 StringDictionaryLookupStub stub(properties,
5821 StringDictionaryLookupStub::NEGATIVE_LOOKUP);
5822 __ Push(Handle<Object>(name));
5823 __ push(Immediate(name->Hash()));
5826 __ j(not_zero, miss);
5831 // Probe the string dictionary in the |elements| register. Jump to the
5832 // |done| label if a property with the given name is found leaving the
5833 // index into the dictionary in |r1|. Jump to the |miss| label
5835 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
5842 ASSERT(!elements.is(r0));
5843 ASSERT(!elements.is(r1));
5844 ASSERT(!name.is(r0));
5845 ASSERT(!name.is(r1));
5847 // Assert that name contains a string.
5848 if (FLAG_debug_code) __ AbortIfNotString(name);
5850 __ SmiToInteger32(r0, FieldOperand(elements, kCapacityOffset));
5853 for (int i = 0; i < kInlinedProbes; i++) {
5854 // Compute the masked index: (hash + i + i * i) & mask.
5855 __ movl(r1, FieldOperand(name, String::kHashFieldOffset));
5856 __ shrl(r1, Immediate(String::kHashShift));
5858 __ addl(r1, Immediate(StringDictionary::GetProbeOffset(i)));
5862 // Scale the index by multiplying by the entry size.
5863 ASSERT(StringDictionary::kEntrySize == 3);
5864 __ lea(r1, Operand(r1, r1, times_2, 0)); // r1 = r1 * 3
5866 // Check if the key is identical to the name.
5867 __ cmpq(name, Operand(elements, r1, times_pointer_size,
5868 kElementsStartOffset - kHeapObjectTag));
5872 StringDictionaryLookupStub stub(elements,
5877 __ movl(r0, FieldOperand(name, String::kHashFieldOffset));
5878 __ shrl(r0, Immediate(String::kHashShift));
5888 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
5889 // This stub overrides SometimesSetsUpAFrame() to return false. That means
5890 // we cannot call anything that could cause a GC from this stub.
5891 // Stack frame on entry:
5892 // esp[0 * kPointerSize]: return address.
5893 // esp[1 * kPointerSize]: key's hash.
5894 // esp[2 * kPointerSize]: key.
5896 // dictionary_: StringDictionary to probe.
5897 // result_: used as scratch.
5898 // index_: will hold an index of entry if lookup is successful.
5899 // might alias with result_.
5901 // result_ is zero if lookup failed, non zero otherwise.
5903 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
5905 Register scratch = result_;
5907 __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
5911 // If names of slots in range from 1 to kProbes - 1 for the hash value are
5912 // not equal to the name and kProbes-th slot is not used (its name is the
5913 // undefined value), it guarantees the hash table doesn't contain the
5914 // property. It's true even if some slots represent deleted properties
5915 // (their names are the null value).
5916 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
5917 // Compute the masked index: (hash + i + i * i) & mask.
5918 __ movq(scratch, Operand(rsp, 2 * kPointerSize));
5920 __ addl(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
5922 __ and_(scratch, Operand(rsp, 0));
5924 // Scale the index by multiplying by the entry size.
5925 ASSERT(StringDictionary::kEntrySize == 3);
5926 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
5928 // Having undefined at this place means the name is not contained.
5929 __ movq(scratch, Operand(dictionary_,
5932 kElementsStartOffset - kHeapObjectTag));
5934 __ Cmp(scratch, masm->isolate()->factory()->undefined_value());
5935 __ j(equal, ¬_in_dictionary);
5937 // Stop if found the property.
5938 __ cmpq(scratch, Operand(rsp, 3 * kPointerSize));
5939 __ j(equal, &in_dictionary);
5941 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
5942 // If we hit a non symbol key during negative lookup
5943 // we have to bailout as this key might be equal to the
5944 // key we are looking for.
5946 // Check if the entry name is not a symbol.
5947 __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5948 __ testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
5949 Immediate(kIsSymbolMask));
5950 __ j(zero, &maybe_in_dictionary);
5954 __ bind(&maybe_in_dictionary);
5955 // If we are doing negative lookup then probing failure should be
5956 // treated as a lookup success. For positive lookup probing failure
5957 // should be treated as lookup failure.
5958 if (mode_ == POSITIVE_LOOKUP) {
5959 __ movq(scratch, Immediate(0));
5961 __ ret(2 * kPointerSize);
5964 __ bind(&in_dictionary);
5965 __ movq(scratch, Immediate(1));
5967 __ ret(2 * kPointerSize);
5969 __ bind(¬_in_dictionary);
5970 __ movq(scratch, Immediate(0));
5972 __ ret(2 * kPointerSize);
5976 struct AheadOfTimeWriteBarrierStubList {
5977 Register object, value, address;
5978 RememberedSetAction action;
5982 #define REG(Name) { kRegister_ ## Name ## _Code }
5984 struct AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
5985 // Used in RegExpExecStub.
5986 { REG(rbx), REG(rax), REG(rdi), EMIT_REMEMBERED_SET },
5987 // Used in CompileArrayPushCall.
5988 { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
5989 // Used in CompileStoreGlobal.
5990 { REG(rbx), REG(rcx), REG(rdx), OMIT_REMEMBERED_SET },
5991 // Used in StoreStubCompiler::CompileStoreField and
5992 // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
5993 { REG(rdx), REG(rcx), REG(rbx), EMIT_REMEMBERED_SET },
5994 // GenerateStoreField calls the stub with two different permutations of
5995 // registers. This is the second.
5996 { REG(rbx), REG(rcx), REG(rdx), EMIT_REMEMBERED_SET },
5997 // StoreIC::GenerateNormal via GenerateDictionaryStore.
5998 { REG(rbx), REG(r8), REG(r9), EMIT_REMEMBERED_SET },
5999 // KeyedStoreIC::GenerateGeneric.
6000 { REG(rbx), REG(rdx), REG(rcx), EMIT_REMEMBERED_SET},
6001 // KeyedStoreStubCompiler::GenerateStoreFastElement.
6002 { REG(rdi), REG(rbx), REG(rcx), EMIT_REMEMBERED_SET},
6003 { REG(rdx), REG(rdi), REG(rbx), EMIT_REMEMBERED_SET},
6004 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
6005 // and ElementsTransitionGenerator::GenerateSmiOnlyToObject
6006 // and ElementsTransitionGenerator::GenerateDoubleToObject
6007 { REG(rdx), REG(rbx), REG(rdi), EMIT_REMEMBERED_SET},
6008 { REG(rdx), REG(rbx), REG(rdi), OMIT_REMEMBERED_SET},
6009 // ElementsTransitionGenerator::GenerateSmiOnlyToDouble
6010 // and ElementsTransitionGenerator::GenerateDoubleToObject
6011 { REG(rdx), REG(r11), REG(r15), EMIT_REMEMBERED_SET},
6012 // ElementsTransitionGenerator::GenerateDoubleToObject
6013 { REG(r11), REG(rax), REG(r15), EMIT_REMEMBERED_SET},
6014 // StoreArrayLiteralElementStub::Generate
6015 { REG(rbx), REG(rax), REG(rcx), EMIT_REMEMBERED_SET},
6016 // Null termination.
6017 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
6022 bool RecordWriteStub::IsPregenerated() {
6023 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6024 !entry->object.is(no_reg);
6026 if (object_.is(entry->object) &&
6027 value_.is(entry->value) &&
6028 address_.is(entry->address) &&
6029 remembered_set_action_ == entry->action &&
6030 save_fp_regs_mode_ == kDontSaveFPRegs) {
6038 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
6039 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
6040 stub1.GetCode()->set_is_pregenerated(true);
6041 StoreBufferOverflowStub stub2(kSaveFPRegs);
6042 stub2.GetCode()->set_is_pregenerated(true);
6046 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
6047 for (AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
6048 !entry->object.is(no_reg);
6050 RecordWriteStub stub(entry->object,
6055 stub.GetCode()->set_is_pregenerated(true);
6060 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
6061 // the value has just been written into the object, now this stub makes sure
6062 // we keep the GC informed. The word in the object where the value has been
6063 // written is in the address register.
6064 void RecordWriteStub::Generate(MacroAssembler* masm) {
6065 Label skip_to_incremental_noncompacting;
6066 Label skip_to_incremental_compacting;
6068 // The first two instructions are generated with labels so as to get the
6069 // offset fixed up correctly by the bind(Label*) call. We patch it back and
6070 // forth between a compare instructions (a nop in this position) and the
6071 // real branch when we start and stop incremental heap marking.
6072 // See RecordWriteStub::Patch for details.
6073 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
6074 __ jmp(&skip_to_incremental_compacting, Label::kFar);
6076 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6077 __ RememberedSetHelper(object_,
6081 MacroAssembler::kReturnAtEnd);
6086 __ bind(&skip_to_incremental_noncompacting);
6087 GenerateIncremental(masm, INCREMENTAL);
6089 __ bind(&skip_to_incremental_compacting);
6090 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
6092 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
6093 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
6094 masm->set_byte_at(0, kTwoByteNopInstruction);
6095 masm->set_byte_at(2, kFiveByteNopInstruction);
6099 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
6102 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
6103 Label dont_need_remembered_set;
6105 __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6106 __ JumpIfNotInNewSpace(regs_.scratch0(),
6108 &dont_need_remembered_set);
6110 __ CheckPageFlag(regs_.object(),
6112 1 << MemoryChunk::SCAN_ON_SCAVENGE,
6114 &dont_need_remembered_set);
6116 // First notify the incremental marker if necessary, then update the
6118 CheckNeedsToInformIncrementalMarker(
6119 masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
6120 InformIncrementalMarker(masm, mode);
6121 regs_.Restore(masm);
6122 __ RememberedSetHelper(object_,
6126 MacroAssembler::kReturnAtEnd);
6128 __ bind(&dont_need_remembered_set);
6131 CheckNeedsToInformIncrementalMarker(
6132 masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
6133 InformIncrementalMarker(masm, mode);
6134 regs_.Restore(masm);
6139 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
6140 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
6143 Register arg2 = rdx;
6144 Register arg1 = rcx;
6146 Register arg3 = rdx;
6147 Register arg2 = rsi;
6148 Register arg1 = rdi;
6151 arg1.is(regs_.address()) ? kScratchRegister : regs_.address();
6152 ASSERT(!address.is(regs_.object()));
6153 ASSERT(!address.is(arg1));
6154 __ Move(address, regs_.address());
6155 __ Move(arg1, regs_.object());
6156 if (mode == INCREMENTAL_COMPACTION) {
6157 // TODO(gc) Can we just set address arg2 in the beginning?
6158 __ Move(arg2, address);
6160 ASSERT(mode == INCREMENTAL);
6161 __ movq(arg2, Operand(address, 0));
6163 __ LoadAddress(arg3, ExternalReference::isolate_address());
6164 int argument_count = 3;
6166 AllowExternalCallThatCantCauseGC scope(masm);
6167 __ PrepareCallCFunction(argument_count);
6168 if (mode == INCREMENTAL_COMPACTION) {
6170 ExternalReference::incremental_evacuation_record_write_function(
6174 ASSERT(mode == INCREMENTAL);
6176 ExternalReference::incremental_marking_record_write_function(
6180 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
6184 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
6185 MacroAssembler* masm,
6186 OnNoNeedToInformIncrementalMarker on_no_need,
6189 Label need_incremental;
6190 Label need_incremental_pop_object;
6192 // Let's look at the color of the object: If it is not black we don't have
6193 // to inform the incremental marker.
6194 __ JumpIfBlack(regs_.object(),
6200 regs_.Restore(masm);
6201 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6202 __ RememberedSetHelper(object_,
6206 MacroAssembler::kReturnAtEnd);
6213 // Get the value from the slot.
6214 __ movq(regs_.scratch0(), Operand(regs_.address(), 0));
6216 if (mode == INCREMENTAL_COMPACTION) {
6217 Label ensure_not_white;
6219 __ CheckPageFlag(regs_.scratch0(), // Contains value.
6220 regs_.scratch1(), // Scratch.
6221 MemoryChunk::kEvacuationCandidateMask,
6226 __ CheckPageFlag(regs_.object(),
6227 regs_.scratch1(), // Scratch.
6228 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
6232 __ bind(&ensure_not_white);
6235 // We need an extra register for this, so we push the object register
6237 __ push(regs_.object());
6238 __ EnsureNotWhite(regs_.scratch0(), // The value.
6239 regs_.scratch1(), // Scratch.
6240 regs_.object(), // Scratch.
6241 &need_incremental_pop_object,
6243 __ pop(regs_.object());
6245 regs_.Restore(masm);
6246 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
6247 __ RememberedSetHelper(object_,
6251 MacroAssembler::kReturnAtEnd);
6256 __ bind(&need_incremental_pop_object);
6257 __ pop(regs_.object());
6259 __ bind(&need_incremental);
6261 // Fall through when we need to inform the incremental marker.
6265 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
6266 // ----------- S t a t e -------------
6267 // -- rax : element value to store
6268 // -- rbx : array literal
6269 // -- rdi : map of array literal
6270 // -- rcx : element index as smi
6271 // -- rdx : array literal index in function
6272 // -- rsp[0] : return address
6273 // -----------------------------------
6276 Label double_elements;
6278 Label slow_elements;
6279 Label fast_elements;
6281 __ CheckFastElements(rdi, &double_elements);
6283 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
6284 __ JumpIfSmi(rax, &smi_element);
6285 __ CheckFastSmiOnlyElements(rdi, &fast_elements);
6287 // Store into the array literal requires a elements transition. Call into
6290 __ bind(&slow_elements);
6291 __ pop(rdi); // Pop return address and remember to put back later for tail
6296 __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
6297 __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
6299 __ push(rdi); // Return return address so that tail call returns to right
6301 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
6303 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
6304 __ bind(&fast_elements);
6305 __ SmiToInteger32(kScratchRegister, rcx);
6306 __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6307 __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size,
6308 FixedArrayBase::kHeaderSize));
6309 __ movq(Operand(rcx, 0), rax);
6310 // Update the write barrier for the array store.
6311 __ RecordWrite(rbx, rcx, rax,
6313 EMIT_REMEMBERED_SET,
6317 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
6318 // FAST_ELEMENTS, and value is Smi.
6319 __ bind(&smi_element);
6320 __ SmiToInteger32(kScratchRegister, rcx);
6321 __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset));
6322 __ movq(FieldOperand(rbx, kScratchRegister, times_pointer_size,
6323 FixedArrayBase::kHeaderSize), rax);
6326 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
6327 __ bind(&double_elements);
6329 __ movq(r9, FieldOperand(rbx, JSObject::kElementsOffset));
6330 __ SmiToInteger32(r11, rcx);
6331 __ StoreNumberToDoubleElements(rax,
6341 } } // namespace v8::internal
6343 #endif // V8_TARGET_ARCH_X64