1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #if defined(V8_TARGET_ARCH_IA32)
32 #include "bootstrapper.h"
33 #include "code-stubs.h"
36 #include "regexp-macro-assembler.h"
37 #include "stub-cache.h"
43 #define __ ACCESS_MASM(masm)
45 void ToNumberStub::Generate(MacroAssembler* masm) {
46 // The ToNumber stub takes one argument in eax.
47 Label check_heap_number, call_builtin;
48 __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
51 __ bind(&check_heap_number);
52 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
53 Factory* factory = masm->isolate()->factory();
54 __ cmp(ebx, Immediate(factory->heap_number_map()));
55 __ j(not_equal, &call_builtin, Label::kNear);
58 __ bind(&call_builtin);
59 __ pop(ecx); // Pop return address.
61 __ push(ecx); // Push return address.
62 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
66 void FastNewClosureStub::Generate(MacroAssembler* masm) {
67 // Create a new closure from the given function info in new
68 // space. Set the context to the current context in esi.
70 __ AllocateInNewSpace(JSFunction::kSize, eax, ebx, ecx, &gc, TAG_OBJECT);
72 // Get the function info from the stack.
73 __ mov(edx, Operand(esp, 1 * kPointerSize));
75 int map_index = (language_mode_ == CLASSIC_MODE)
76 ? Context::FUNCTION_MAP_INDEX
77 : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
79 // Compute the function map in the current global context and set that
80 // as the map of the allocated object.
81 __ mov(ecx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
82 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
83 __ mov(ecx, Operand(ecx, Context::SlotOffset(map_index)));
84 __ mov(FieldOperand(eax, JSObject::kMapOffset), ecx);
86 // Initialize the rest of the function. We don't have to update the
87 // write barrier because the allocated object is in new space.
88 Factory* factory = masm->isolate()->factory();
89 __ mov(ebx, Immediate(factory->empty_fixed_array()));
90 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ebx);
91 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
92 __ mov(FieldOperand(eax, JSFunction::kPrototypeOrInitialMapOffset),
93 Immediate(factory->the_hole_value()));
94 __ mov(FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset), edx);
95 __ mov(FieldOperand(eax, JSFunction::kContextOffset), esi);
96 __ mov(FieldOperand(eax, JSFunction::kLiteralsOffset), ebx);
97 __ mov(FieldOperand(eax, JSFunction::kNextFunctionLinkOffset),
98 Immediate(factory->undefined_value()));
100 // Initialize the code pointer in the function to be the one
101 // found in the shared function info object.
102 __ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
103 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
104 __ mov(FieldOperand(eax, JSFunction::kCodeEntryOffset), edx);
106 // Return and remove the on-stack parameter.
107 __ ret(1 * kPointerSize);
109 // Create a new closure through the slower runtime call.
111 __ pop(ecx); // Temporarily remove return address.
115 __ push(Immediate(factory->false_value()));
116 __ push(ecx); // Restore return address.
117 __ TailCallRuntime(Runtime::kNewClosure, 3, 1);
121 void FastNewContextStub::Generate(MacroAssembler* masm) {
122 // Try to allocate the context in new space.
124 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
125 __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
126 eax, ebx, ecx, &gc, TAG_OBJECT);
128 // Get the function from the stack.
129 __ mov(ecx, Operand(esp, 1 * kPointerSize));
131 // Set up the object header.
132 Factory* factory = masm->isolate()->factory();
133 __ mov(FieldOperand(eax, HeapObject::kMapOffset),
134 factory->function_context_map());
135 __ mov(FieldOperand(eax, Context::kLengthOffset),
136 Immediate(Smi::FromInt(length)));
138 // Set up the fixed slots.
139 __ Set(ebx, Immediate(0)); // Set to NULL.
140 __ mov(Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)), ecx);
141 __ mov(Operand(eax, Context::SlotOffset(Context::PREVIOUS_INDEX)), esi);
142 __ mov(Operand(eax, Context::SlotOffset(Context::EXTENSION_INDEX)), ebx);
144 // Copy the global object from the previous context.
145 __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
146 __ mov(Operand(eax, Context::SlotOffset(Context::GLOBAL_INDEX)), ebx);
148 // Copy the qml global object from the previous context.
149 __ mov(ebx, Operand(esi, Context::SlotOffset(Context::QML_GLOBAL_INDEX)));
150 __ mov(Operand(eax, Context::SlotOffset(Context::QML_GLOBAL_INDEX)), ebx);
153 // Initialize the rest of the slots to undefined.
154 __ mov(ebx, factory->undefined_value());
155 for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
156 __ mov(Operand(eax, Context::SlotOffset(i)), ebx);
159 // Return and remove the on-stack parameter.
161 __ ret(1 * kPointerSize);
163 // Need to collect. Call into runtime system.
165 __ TailCallRuntime(Runtime::kNewFunctionContext, 1, 1);
169 void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
170 // Stack layout on entry:
172 // [esp + (1 * kPointerSize)]: function
173 // [esp + (2 * kPointerSize)]: serialized scope info
175 // Try to allocate the context in new space.
177 int length = slots_ + Context::MIN_CONTEXT_SLOTS;
178 __ AllocateInNewSpace(FixedArray::SizeFor(length),
179 eax, ebx, ecx, &gc, TAG_OBJECT);
181 // Get the function or sentinel from the stack.
182 __ mov(ecx, Operand(esp, 1 * kPointerSize));
184 // Get the serialized scope info from the stack.
185 __ mov(ebx, Operand(esp, 2 * kPointerSize));
187 // Set up the object header.
188 Factory* factory = masm->isolate()->factory();
189 __ mov(FieldOperand(eax, HeapObject::kMapOffset),
190 factory->block_context_map());
191 __ mov(FieldOperand(eax, Context::kLengthOffset),
192 Immediate(Smi::FromInt(length)));
194 // If this block context is nested in the global context we get a smi
195 // sentinel instead of a function. The block context should get the
196 // canonical empty function of the global context as its closure which
197 // we still have to look up.
198 Label after_sentinel;
199 __ JumpIfNotSmi(ecx, &after_sentinel, Label::kNear);
200 if (FLAG_debug_code) {
201 const char* message = "Expected 0 as a Smi sentinel";
203 __ Assert(equal, message);
205 __ mov(ecx, GlobalObjectOperand());
206 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalContextOffset));
207 __ mov(ecx, ContextOperand(ecx, Context::CLOSURE_INDEX));
208 __ bind(&after_sentinel);
210 // Set up the fixed slots.
211 __ mov(ContextOperand(eax, Context::CLOSURE_INDEX), ecx);
212 __ mov(ContextOperand(eax, Context::PREVIOUS_INDEX), esi);
213 __ mov(ContextOperand(eax, Context::EXTENSION_INDEX), ebx);
215 // Copy the global object from the previous context.
216 __ mov(ebx, ContextOperand(esi, Context::GLOBAL_INDEX));
217 __ mov(ContextOperand(eax, Context::GLOBAL_INDEX), ebx);
219 // Copy the qml global object from the previous context.
220 __ mov(ebx, ContextOperand(esi, Context::QML_GLOBAL_INDEX));
221 __ mov(ContextOperand(eax, Context::QML_GLOBAL_INDEX), ebx);
223 // Initialize the rest of the slots to the hole value.
225 __ mov(ContextOperand(eax, Context::MIN_CONTEXT_SLOTS),
226 factory->the_hole_value());
228 __ mov(ebx, factory->the_hole_value());
229 for (int i = 0; i < slots_; i++) {
230 __ mov(ContextOperand(eax, i + Context::MIN_CONTEXT_SLOTS), ebx);
234 // Return and remove the on-stack parameters.
236 __ ret(2 * kPointerSize);
238 // Need to collect. Call into runtime system.
240 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1);
244 static void GenerateFastCloneShallowArrayCommon(
245 MacroAssembler* masm,
247 FastCloneShallowArrayStub::Mode mode,
249 // Registers on entry:
251 // ecx: boilerplate literal array.
252 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
254 // All sizes here are multiples of kPointerSize.
255 int elements_size = 0;
257 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
258 ? FixedDoubleArray::SizeFor(length)
259 : FixedArray::SizeFor(length);
261 int size = JSArray::kSize + elements_size;
263 // Allocate both the JS array and the elements array in one big
264 // allocation. This avoids multiple limit checks.
265 __ AllocateInNewSpace(size, eax, ebx, edx, fail, TAG_OBJECT);
267 // Copy the JS array part.
268 for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
269 if ((i != JSArray::kElementsOffset) || (length == 0)) {
270 __ mov(ebx, FieldOperand(ecx, i));
271 __ mov(FieldOperand(eax, i), ebx);
276 // Get hold of the elements array of the boilerplate and setup the
277 // elements pointer in the resulting object.
278 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
279 __ lea(edx, Operand(eax, JSArray::kSize));
280 __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
282 // Copy the elements array.
283 if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
284 for (int i = 0; i < elements_size; i += kPointerSize) {
285 __ mov(ebx, FieldOperand(ecx, i));
286 __ mov(FieldOperand(edx, i), ebx);
289 ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
291 for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
292 __ mov(ebx, FieldOperand(ecx, i));
293 __ mov(FieldOperand(edx, i), ebx);
295 while (i < elements_size) {
296 __ fld_d(FieldOperand(ecx, i));
297 __ fstp_d(FieldOperand(edx, i));
300 ASSERT(i == elements_size);
306 void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
307 // Stack layout on entry:
309 // [esp + kPointerSize]: constant elements.
310 // [esp + (2 * kPointerSize)]: literal index.
311 // [esp + (3 * kPointerSize)]: literals array.
313 // Load boilerplate object into ecx and check if we need to create a
315 __ mov(ecx, Operand(esp, 3 * kPointerSize));
316 __ mov(eax, Operand(esp, 2 * kPointerSize));
317 STATIC_ASSERT(kPointerSize == 4);
318 STATIC_ASSERT(kSmiTagSize == 1);
319 STATIC_ASSERT(kSmiTag == 0);
320 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
321 FixedArray::kHeaderSize));
322 Factory* factory = masm->isolate()->factory();
323 __ cmp(ecx, factory->undefined_value());
325 __ j(equal, &slow_case);
327 FastCloneShallowArrayStub::Mode mode = mode_;
328 // ecx is boilerplate object.
329 if (mode == CLONE_ANY_ELEMENTS) {
330 Label double_elements, check_fast_elements;
331 __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
332 __ CheckMap(ebx, factory->fixed_cow_array_map(),
333 &check_fast_elements, DONT_DO_SMI_CHECK);
334 GenerateFastCloneShallowArrayCommon(masm, 0,
335 COPY_ON_WRITE_ELEMENTS, &slow_case);
336 __ ret(3 * kPointerSize);
338 __ bind(&check_fast_elements);
339 __ CheckMap(ebx, factory->fixed_array_map(),
340 &double_elements, DONT_DO_SMI_CHECK);
341 GenerateFastCloneShallowArrayCommon(masm, length_,
342 CLONE_ELEMENTS, &slow_case);
343 __ ret(3 * kPointerSize);
345 __ bind(&double_elements);
346 mode = CLONE_DOUBLE_ELEMENTS;
347 // Fall through to generate the code to handle double elements.
350 if (FLAG_debug_code) {
352 Handle<Map> expected_map;
353 if (mode == CLONE_ELEMENTS) {
354 message = "Expected (writable) fixed array";
355 expected_map = factory->fixed_array_map();
356 } else if (mode == CLONE_DOUBLE_ELEMENTS) {
357 message = "Expected (writable) fixed double array";
358 expected_map = factory->fixed_double_array_map();
360 ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
361 message = "Expected copy-on-write fixed array";
362 expected_map = factory->fixed_cow_array_map();
365 __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
366 __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
367 __ Assert(equal, message);
371 GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
372 // Return and remove the on-stack parameters.
373 __ ret(3 * kPointerSize);
376 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
380 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
381 // Stack layout on entry:
383 // [esp + kPointerSize]: object literal flags.
384 // [esp + (2 * kPointerSize)]: constant properties.
385 // [esp + (3 * kPointerSize)]: literal index.
386 // [esp + (4 * kPointerSize)]: literals array.
388 // Load boilerplate object into ecx and check if we need to create a
391 __ mov(ecx, Operand(esp, 4 * kPointerSize));
392 __ mov(eax, Operand(esp, 3 * kPointerSize));
393 STATIC_ASSERT(kPointerSize == 4);
394 STATIC_ASSERT(kSmiTagSize == 1);
395 STATIC_ASSERT(kSmiTag == 0);
396 __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
397 FixedArray::kHeaderSize));
398 Factory* factory = masm->isolate()->factory();
399 __ cmp(ecx, factory->undefined_value());
400 __ j(equal, &slow_case);
402 // Check that the boilerplate contains only fast properties and we can
403 // statically determine the instance size.
404 int size = JSObject::kHeaderSize + length_ * kPointerSize;
405 __ mov(eax, FieldOperand(ecx, HeapObject::kMapOffset));
406 __ movzx_b(eax, FieldOperand(eax, Map::kInstanceSizeOffset));
407 __ cmp(eax, Immediate(size >> kPointerSizeLog2));
408 __ j(not_equal, &slow_case);
410 // Allocate the JS object and copy header together with all in-object
411 // properties from the boilerplate.
412 __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
413 for (int i = 0; i < size; i += kPointerSize) {
414 __ mov(ebx, FieldOperand(ecx, i));
415 __ mov(FieldOperand(eax, i), ebx);
418 // Return and remove the on-stack parameters.
419 __ ret(4 * kPointerSize);
422 __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
426 // The stub expects its argument on the stack and returns its result in tos_:
427 // zero for false, and a non-zero value for true.
428 void ToBooleanStub::Generate(MacroAssembler* masm) {
429 // This stub overrides SometimesSetsUpAFrame() to return false. That means
430 // we cannot call anything that could cause a GC from this stub.
432 Factory* factory = masm->isolate()->factory();
433 const Register argument = eax;
434 const Register map = edx;
436 if (!types_.IsEmpty()) {
437 __ mov(argument, Operand(esp, 1 * kPointerSize));
440 // undefined -> false
441 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
443 // Boolean -> its value
444 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false);
445 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true);
448 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false);
450 if (types_.Contains(SMI)) {
451 // Smis: 0 -> false, all other -> true
453 __ JumpIfNotSmi(argument, ¬_smi, Label::kNear);
454 // argument contains the correct return value already.
455 if (!tos_.is(argument)) {
456 __ mov(tos_, argument);
458 __ ret(1 * kPointerSize);
460 } else if (types_.NeedsMap()) {
461 // If we need a map later and have a Smi -> patch.
462 __ JumpIfSmi(argument, &patch, Label::kNear);
465 if (types_.NeedsMap()) {
466 __ mov(map, FieldOperand(argument, HeapObject::kMapOffset));
468 if (types_.CanBeUndetectable()) {
469 __ test_b(FieldOperand(map, Map::kBitFieldOffset),
470 1 << Map::kIsUndetectable);
471 // Undetectable -> false.
472 Label not_undetectable;
473 __ j(zero, ¬_undetectable, Label::kNear);
474 __ Set(tos_, Immediate(0));
475 __ ret(1 * kPointerSize);
476 __ bind(¬_undetectable);
480 if (types_.Contains(SPEC_OBJECT)) {
481 // spec object -> true.
483 __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
484 __ j(below, ¬_js_object, Label::kNear);
485 // argument contains the correct return value already.
486 if (!tos_.is(argument)) {
487 __ Set(tos_, Immediate(1));
489 __ ret(1 * kPointerSize);
490 __ bind(¬_js_object);
493 if (types_.Contains(STRING)) {
494 // String value -> false iff empty.
496 __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
497 __ j(above_equal, ¬_string, Label::kNear);
498 __ mov(tos_, FieldOperand(argument, String::kLengthOffset));
499 __ ret(1 * kPointerSize); // the string length is OK as the return value
500 __ bind(¬_string);
503 if (types_.Contains(HEAP_NUMBER)) {
504 // heap number -> false iff +0, -0, or NaN.
505 Label not_heap_number, false_result;
506 __ cmp(map, factory->heap_number_map());
507 __ j(not_equal, ¬_heap_number, Label::kNear);
509 __ fld_d(FieldOperand(argument, HeapNumber::kValueOffset));
511 __ j(zero, &false_result, Label::kNear);
512 // argument contains the correct return value already.
513 if (!tos_.is(argument)) {
514 __ Set(tos_, Immediate(1));
516 __ ret(1 * kPointerSize);
517 __ bind(&false_result);
518 __ Set(tos_, Immediate(0));
519 __ ret(1 * kPointerSize);
520 __ bind(¬_heap_number);
524 GenerateTypeTransition(masm);
528 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
529 // We don't allow a GC during a store buffer overflow so there is no need to
530 // store the registers in any particular way, but we do have to store and
533 if (save_doubles_ == kSaveFPRegs) {
534 CpuFeatures::Scope scope(SSE2);
535 __ sub(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
536 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
537 XMMRegister reg = XMMRegister::from_code(i);
538 __ movdbl(Operand(esp, i * kDoubleSize), reg);
541 const int argument_count = 1;
543 AllowExternalCallThatCantCauseGC scope(masm);
544 __ PrepareCallCFunction(argument_count, ecx);
545 __ mov(Operand(esp, 0 * kPointerSize),
546 Immediate(ExternalReference::isolate_address()));
548 ExternalReference::store_buffer_overflow_function(masm->isolate()),
550 if (save_doubles_ == kSaveFPRegs) {
551 CpuFeatures::Scope scope(SSE2);
552 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
553 XMMRegister reg = XMMRegister::from_code(i);
554 __ movdbl(reg, Operand(esp, i * kDoubleSize));
556 __ add(esp, Immediate(kDoubleSize * XMMRegister::kNumRegisters));
563 void ToBooleanStub::CheckOddball(MacroAssembler* masm,
565 Heap::RootListIndex value,
567 const Register argument = eax;
568 if (types_.Contains(type)) {
569 // If we see an expected oddball, return its ToBoolean value tos_.
570 Label different_value;
571 __ CompareRoot(argument, value);
572 __ j(not_equal, &different_value, Label::kNear);
574 // If we have to return zero, there is no way around clearing tos_.
575 __ Set(tos_, Immediate(0));
576 } else if (!tos_.is(argument)) {
577 // If we have to return non-zero, we can re-use the argument if it is the
578 // same register as the result, because we never see Smi-zero here.
579 __ Set(tos_, Immediate(1));
581 __ ret(1 * kPointerSize);
582 __ bind(&different_value);
587 void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
588 __ pop(ecx); // Get return address, operand is now on top of stack.
589 __ push(Immediate(Smi::FromInt(tos_.code())));
590 __ push(Immediate(Smi::FromInt(types_.ToByte())));
591 __ push(ecx); // Push return address.
592 // Patch the caller to an appropriate specialized stub and return the
593 // operation result to the caller of the stub.
594 __ TailCallExternalReference(
595 ExternalReference(IC_Utility(IC::kToBoolean_Patch), masm->isolate()),
601 class FloatingPointHelper : public AllStatic {
608 // Code pattern for loading a floating point value. Input value must
609 // be either a smi or a heap number object (fp value). Requirements:
610 // operand in register number. Returns operand as floating point number
612 static void LoadFloatOperand(MacroAssembler* masm, Register number);
614 // Code pattern for loading floating point values. Input values must
615 // be either smi or heap number objects (fp values). Requirements:
616 // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax.
617 // Returns operands as floating point numbers on FPU stack.
618 static void LoadFloatOperands(MacroAssembler* masm,
620 ArgLocation arg_location = ARGS_ON_STACK);
622 // Similar to LoadFloatOperand but assumes that both operands are smis.
623 // Expects operands in edx, eax.
624 static void LoadFloatSmis(MacroAssembler* masm, Register scratch);
626 // Test if operands are smi or number objects (fp). Requirements:
627 // operand_1 in eax, operand_2 in edx; falls through on float
628 // operands, jumps to the non_float label otherwise.
629 static void CheckFloatOperands(MacroAssembler* masm,
633 // Checks that the two floating point numbers on top of the FPU stack
634 // have int32 values.
635 static void CheckFloatOperandsAreInt32(MacroAssembler* masm,
638 // Takes the operands in edx and eax and loads them as integers in eax
640 static void LoadUnknownsAsIntegers(MacroAssembler* masm,
642 Label* operand_conversion_failure);
644 // Must only be called after LoadUnknownsAsIntegers. Assumes that the
645 // operands are pushed on the stack, and that their conversions to int32
646 // are in eax and ecx. Checks that the original numbers were in the int32
648 static void CheckLoadedIntegersWereInt32(MacroAssembler* masm,
652 // Assumes that operands are smis or heap numbers and loads them
653 // into xmm0 and xmm1. Operands are in edx and eax.
654 // Leaves operands unchanged.
655 static void LoadSSE2Operands(MacroAssembler* masm);
657 // Test if operands are numbers (smi or HeapNumber objects), and load
658 // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
659 // either operand is not a number. Operands are in edx and eax.
660 // Leaves operands unchanged.
661 static void LoadSSE2Operands(MacroAssembler* masm, Label* not_numbers);
663 // Similar to LoadSSE2Operands but assumes that both operands are smis.
664 // Expects operands in edx, eax.
665 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
667 // Checks that the two floating point numbers loaded into xmm0 and xmm1
668 // have int32 values.
669 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
675 // Get the integer part of a heap number. Surprisingly, all this bit twiddling
676 // is faster than using the built-in instructions on floating point registers.
677 // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
678 // trashed registers.
679 static void IntegerConvert(MacroAssembler* masm,
682 Label* conversion_failure) {
683 ASSERT(!source.is(ecx) && !source.is(edi) && !source.is(ebx));
684 Label done, right_exponent, normal_exponent;
685 Register scratch = ebx;
686 Register scratch2 = edi;
687 // Get exponent word.
688 __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
689 // Get exponent alone in scratch2.
690 __ mov(scratch2, scratch);
691 __ and_(scratch2, HeapNumber::kExponentMask);
693 CpuFeatures::Scope scope(SSE3);
694 // Check whether the exponent is too big for a 64 bit signed integer.
695 static const uint32_t kTooBigExponent =
696 (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
697 __ cmp(scratch2, Immediate(kTooBigExponent));
698 __ j(greater_equal, conversion_failure);
699 // Load x87 register with heap number.
700 __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
701 // Reserve space for 64 bit answer.
702 __ sub(esp, Immediate(sizeof(uint64_t))); // Nolint.
703 // Do conversion, which cannot fail because we checked the exponent.
704 __ fisttp_d(Operand(esp, 0));
705 __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
706 __ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
708 // Load ecx with zero. We use this either for the final shift or
711 // Check whether the exponent matches a 32 bit signed int that cannot be
712 // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
713 // exponent is 30 (biased). This is the exponent that we are fastest at and
714 // also the highest exponent we can handle here.
715 const uint32_t non_smi_exponent =
716 (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
717 __ cmp(scratch2, Immediate(non_smi_exponent));
718 // If we have a match of the int32-but-not-Smi exponent then skip some
720 __ j(equal, &right_exponent, Label::kNear);
721 // If the exponent is higher than that then go to slow case. This catches
722 // numbers that don't fit in a signed int32, infinities and NaNs.
723 __ j(less, &normal_exponent, Label::kNear);
726 // Handle a big exponent. The only reason we have this code is that the
727 // >>> operator has a tendency to generate numbers with an exponent of 31.
728 const uint32_t big_non_smi_exponent =
729 (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
730 __ cmp(scratch2, Immediate(big_non_smi_exponent));
731 __ j(not_equal, conversion_failure);
732 // We have the big exponent, typically from >>>. This means the number is
733 // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
734 __ mov(scratch2, scratch);
735 __ and_(scratch2, HeapNumber::kMantissaMask);
736 // Put back the implicit 1.
737 __ or_(scratch2, 1 << HeapNumber::kExponentShift);
738 // Shift up the mantissa bits to take up the space the exponent used to
739 // take. We just orred in the implicit bit so that took care of one and
740 // we want to use the full unsigned range so we subtract 1 bit from the
742 const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
743 __ shl(scratch2, big_shift_distance);
744 // Get the second half of the double.
745 __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
746 // Shift down 21 bits to get the most significant 11 bits or the low
748 __ shr(ecx, 32 - big_shift_distance);
749 __ or_(ecx, scratch2);
750 // We have the answer in ecx, but we may need to negate it.
751 __ test(scratch, scratch);
752 __ j(positive, &done, Label::kNear);
754 __ jmp(&done, Label::kNear);
757 __ bind(&normal_exponent);
758 // Exponent word in scratch, exponent part of exponent word in scratch2.
760 // We know the exponent is smaller than 30 (biased). If it is less than
761 // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
762 // it rounds to zero.
763 const uint32_t zero_exponent =
764 (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
765 __ sub(scratch2, Immediate(zero_exponent));
766 // ecx already has a Smi zero.
767 __ j(less, &done, Label::kNear);
769 // We have a shifted exponent between 0 and 30 in scratch2.
770 __ shr(scratch2, HeapNumber::kExponentShift);
771 __ mov(ecx, Immediate(30));
772 __ sub(ecx, scratch2);
774 __ bind(&right_exponent);
775 // Here ecx is the shift, scratch is the exponent word.
776 // Get the top bits of the mantissa.
777 __ and_(scratch, HeapNumber::kMantissaMask);
778 // Put back the implicit 1.
779 __ or_(scratch, 1 << HeapNumber::kExponentShift);
780 // Shift up the mantissa bits to take up the space the exponent used to
781 // take. We have kExponentShift + 1 significant bits int he low end of the
782 // word. Shift them to the top bits.
783 const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
784 __ shl(scratch, shift_distance);
785 // Get the second half of the double. For some exponents we don't
786 // actually need this because the bits get shifted out again, but
787 // it's probably slower to test than just to do it.
788 __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
789 // Shift down 22 bits to get the most significant 10 bits or the low
791 __ shr(scratch2, 32 - shift_distance);
792 __ or_(scratch2, scratch);
793 // Move down according to the exponent.
795 // Now the unsigned answer is in scratch2. We need to move it to ecx and
796 // we may need to fix the sign.
799 __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
800 __ j(greater, &negative, Label::kNear);
801 __ mov(ecx, scratch2);
802 __ jmp(&done, Label::kNear);
804 __ sub(ecx, scratch2);
810 void UnaryOpStub::PrintName(StringStream* stream) {
811 const char* op_name = Token::Name(op_);
812 const char* overwrite_name = NULL; // Make g++ happy.
814 case UNARY_NO_OVERWRITE: overwrite_name = "Alloc"; break;
815 case UNARY_OVERWRITE: overwrite_name = "Overwrite"; break;
817 stream->Add("UnaryOpStub_%s_%s_%s",
820 UnaryOpIC::GetName(operand_type_));
824 // TODO(svenpanne): Use virtual functions instead of switch.
825 void UnaryOpStub::Generate(MacroAssembler* masm) {
826 switch (operand_type_) {
827 case UnaryOpIC::UNINITIALIZED:
828 GenerateTypeTransition(masm);
831 GenerateSmiStub(masm);
833 case UnaryOpIC::HEAP_NUMBER:
834 GenerateHeapNumberStub(masm);
836 case UnaryOpIC::GENERIC:
837 GenerateGenericStub(masm);
843 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
844 __ pop(ecx); // Save return address.
846 __ push(eax); // the operand
847 __ push(Immediate(Smi::FromInt(op_)));
848 __ push(Immediate(Smi::FromInt(mode_)));
849 __ push(Immediate(Smi::FromInt(operand_type_)));
851 __ push(ecx); // Push return address.
853 // Patch the caller to an appropriate specialized stub and return the
854 // operation result to the caller of the stub.
855 __ TailCallExternalReference(
856 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1);
860 // TODO(svenpanne): Use virtual functions instead of switch.
861 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
864 GenerateSmiStubSub(masm);
867 GenerateSmiStubBitNot(masm);
875 void UnaryOpStub::GenerateSmiStubSub(MacroAssembler* masm) {
876 Label non_smi, undo, slow;
877 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow,
878 Label::kNear, Label::kNear, Label::kNear);
880 GenerateSmiCodeUndo(masm);
883 GenerateTypeTransition(masm);
887 void UnaryOpStub::GenerateSmiStubBitNot(MacroAssembler* masm) {
889 GenerateSmiCodeBitNot(masm, &non_smi);
891 GenerateTypeTransition(masm);
895 void UnaryOpStub::GenerateSmiCodeSub(MacroAssembler* masm,
899 Label::Distance non_smi_near,
900 Label::Distance undo_near,
901 Label::Distance slow_near) {
902 // Check whether the value is a smi.
903 __ JumpIfNotSmi(eax, non_smi, non_smi_near);
905 // We can't handle -0 with smis, so use a type transition for that case.
907 __ j(zero, slow, slow_near);
909 // Try optimistic subtraction '0 - value', saving operand in eax for undo.
911 __ Set(eax, Immediate(0));
913 __ j(overflow, undo, undo_near);
918 void UnaryOpStub::GenerateSmiCodeBitNot(
919 MacroAssembler* masm,
921 Label::Distance non_smi_near) {
922 // Check whether the value is a smi.
923 __ JumpIfNotSmi(eax, non_smi, non_smi_near);
925 // Flip bits and revert inverted smi-tag.
927 __ and_(eax, ~kSmiTagMask);
932 void UnaryOpStub::GenerateSmiCodeUndo(MacroAssembler* masm) {
937 // TODO(svenpanne): Use virtual functions instead of switch.
938 void UnaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
941 GenerateHeapNumberStubSub(masm);
944 GenerateHeapNumberStubBitNot(masm);
952 void UnaryOpStub::GenerateHeapNumberStubSub(MacroAssembler* masm) {
953 Label non_smi, undo, slow, call_builtin;
954 GenerateSmiCodeSub(masm, &non_smi, &undo, &call_builtin, Label::kNear);
956 GenerateHeapNumberCodeSub(masm, &slow);
958 GenerateSmiCodeUndo(masm);
960 GenerateTypeTransition(masm);
961 __ bind(&call_builtin);
962 GenerateGenericCodeFallback(masm);
966 void UnaryOpStub::GenerateHeapNumberStubBitNot(
967 MacroAssembler* masm) {
969 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
971 GenerateHeapNumberCodeBitNot(masm, &slow);
973 GenerateTypeTransition(masm);
977 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm,
979 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
980 __ cmp(edx, masm->isolate()->factory()->heap_number_map());
981 __ j(not_equal, slow);
983 if (mode_ == UNARY_OVERWRITE) {
984 __ xor_(FieldOperand(eax, HeapNumber::kExponentOffset),
985 Immediate(HeapNumber::kSignMask)); // Flip sign.
990 Label slow_allocate_heapnumber, heapnumber_allocated;
991 __ AllocateHeapNumber(eax, ebx, ecx, &slow_allocate_heapnumber);
992 __ jmp(&heapnumber_allocated, Label::kNear);
994 __ bind(&slow_allocate_heapnumber);
996 FrameScope scope(masm, StackFrame::INTERNAL);
998 __ CallRuntime(Runtime::kNumberAlloc, 0);
1002 __ bind(&heapnumber_allocated);
1003 // eax: allocated 'empty' number
1004 __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
1005 __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
1006 __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
1007 __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
1008 __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
1014 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
1016 __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
1017 __ cmp(edx, masm->isolate()->factory()->heap_number_map());
1018 __ j(not_equal, slow);
1020 // Convert the heap number in eax to an untagged integer in ecx.
1021 IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), slow);
1023 // Do the bitwise operation and check if the result fits in a smi.
1026 __ cmp(ecx, 0xc0000000);
1027 __ j(sign, &try_float, Label::kNear);
1029 // Tag the result as a smi and we're done.
1030 STATIC_ASSERT(kSmiTagSize == 1);
1031 __ lea(eax, Operand(ecx, times_2, kSmiTag));
1034 // Try to store the result in a heap number.
1035 __ bind(&try_float);
1036 if (mode_ == UNARY_NO_OVERWRITE) {
1037 Label slow_allocate_heapnumber, heapnumber_allocated;
1039 __ AllocateHeapNumber(eax, edx, edi, &slow_allocate_heapnumber);
1040 __ jmp(&heapnumber_allocated);
1042 __ bind(&slow_allocate_heapnumber);
1044 FrameScope scope(masm, StackFrame::INTERNAL);
1045 // Push the original HeapNumber on the stack. The integer value can't
1046 // be stored since it's untagged and not in the smi range (so we can't
1047 // smi-tag it). We'll recalculate the value after the GC instead.
1049 __ CallRuntime(Runtime::kNumberAlloc, 0);
1050 // New HeapNumber is in eax.
1053 // IntegerConvert uses ebx and edi as scratch registers.
1054 // This conversion won't go slow-case.
1055 IntegerConvert(masm, edx, CpuFeatures::IsSupported(SSE3), slow);
1058 __ bind(&heapnumber_allocated);
1060 if (CpuFeatures::IsSupported(SSE2)) {
1061 CpuFeatures::Scope use_sse2(SSE2);
1062 __ cvtsi2sd(xmm0, ecx);
1063 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1066 __ fild_s(Operand(esp, 0));
1068 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1074 // TODO(svenpanne): Use virtual functions instead of switch.
1075 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) {
1078 GenerateGenericStubSub(masm);
1080 case Token::BIT_NOT:
1081 GenerateGenericStubBitNot(masm);
1089 void UnaryOpStub::GenerateGenericStubSub(MacroAssembler* masm) {
1090 Label non_smi, undo, slow;
1091 GenerateSmiCodeSub(masm, &non_smi, &undo, &slow, Label::kNear);
1093 GenerateHeapNumberCodeSub(masm, &slow);
1095 GenerateSmiCodeUndo(masm);
1097 GenerateGenericCodeFallback(masm);
1101 void UnaryOpStub::GenerateGenericStubBitNot(MacroAssembler* masm) {
1102 Label non_smi, slow;
1103 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear);
1105 GenerateHeapNumberCodeBitNot(masm, &slow);
1107 GenerateGenericCodeFallback(masm);
1111 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
1112 // Handle the slow case by jumping to the corresponding JavaScript builtin.
1113 __ pop(ecx); // pop return address.
1115 __ push(ecx); // push return address
1118 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
1120 case Token::BIT_NOT:
1121 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
1129 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1130 __ pop(ecx); // Save return address.
1133 // Left and right arguments are now on top.
1134 // Push this stub's key. Although the operation and the type info are
1135 // encoded into the key, the encoding is opaque, so push them too.
1136 __ push(Immediate(Smi::FromInt(MinorKey())));
1137 __ push(Immediate(Smi::FromInt(op_)));
1138 __ push(Immediate(Smi::FromInt(operands_type_)));
1140 __ push(ecx); // Push return address.
1142 // Patch the caller to an appropriate specialized stub and return the
1143 // operation result to the caller of the stub.
1144 __ TailCallExternalReference(
1145 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1152 // Prepare for a type transition runtime call when the args are already on
1153 // the stack, under the return address.
1154 void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
1155 __ pop(ecx); // Save return address.
1156 // Left and right arguments are already on top of the stack.
1157 // Push this stub's key. Although the operation and the type info are
1158 // encoded into the key, the encoding is opaque, so push them too.
1159 __ push(Immediate(Smi::FromInt(MinorKey())));
1160 __ push(Immediate(Smi::FromInt(op_)));
1161 __ push(Immediate(Smi::FromInt(operands_type_)));
1163 __ push(ecx); // Push return address.
1165 // Patch the caller to an appropriate specialized stub and return the
1166 // operation result to the caller of the stub.
1167 __ TailCallExternalReference(
1168 ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
1175 void BinaryOpStub::Generate(MacroAssembler* masm) {
1176 // Explicitly allow generation of nested stubs. It is safe here because
1177 // generation code does not use any raw pointers.
1178 AllowStubCallsScope allow_stub_calls(masm, true);
1180 switch (operands_type_) {
1181 case BinaryOpIC::UNINITIALIZED:
1182 GenerateTypeTransition(masm);
1184 case BinaryOpIC::SMI:
1185 GenerateSmiStub(masm);
1187 case BinaryOpIC::INT32:
1188 GenerateInt32Stub(masm);
1190 case BinaryOpIC::HEAP_NUMBER:
1191 GenerateHeapNumberStub(masm);
1193 case BinaryOpIC::ODDBALL:
1194 GenerateOddballStub(masm);
1196 case BinaryOpIC::BOTH_STRING:
1197 GenerateBothStringStub(masm);
1199 case BinaryOpIC::STRING:
1200 GenerateStringStub(masm);
1202 case BinaryOpIC::GENERIC:
1203 GenerateGeneric(masm);
1211 void BinaryOpStub::PrintName(StringStream* stream) {
1212 const char* op_name = Token::Name(op_);
1213 const char* overwrite_name;
1215 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
1216 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
1217 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
1218 default: overwrite_name = "UnknownOverwrite"; break;
1220 stream->Add("BinaryOpStub_%s_%s_%s",
1223 BinaryOpIC::GetName(operands_type_));
1227 void BinaryOpStub::GenerateSmiCode(
1228 MacroAssembler* masm,
1230 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
1231 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
1232 // dividend in eax and edx free for the division. Use eax, ebx for those.
1233 Comment load_comment(masm, "-- Load arguments");
1234 Register left = edx;
1235 Register right = eax;
1236 if (op_ == Token::DIV || op_ == Token::MOD) {
1244 // 2. Prepare the smi check of both operands by oring them together.
1245 Comment smi_check_comment(masm, "-- Smi check arguments");
1247 Register combined = ecx;
1248 ASSERT(!left.is(combined) && !right.is(combined));
1251 // Perform the operation into eax and smi check the result. Preserve
1252 // eax in case the result is not a smi.
1253 ASSERT(!left.is(ecx) && !right.is(ecx));
1255 __ or_(right, left); // Bitwise or is commutative.
1259 case Token::BIT_XOR:
1260 case Token::BIT_AND:
1266 __ mov(combined, right);
1267 __ or_(combined, left);
1273 // Move the right operand into ecx for the shift operation, use eax
1274 // for the smi check register.
1275 ASSERT(!left.is(ecx) && !right.is(ecx));
1277 __ or_(right, left);
1285 // 3. Perform the smi check of the operands.
1286 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
1287 __ JumpIfNotSmi(combined, ¬_smis);
1289 // 4. Operands are both smis, perform the operation leaving the result in
1290 // eax and check the result if necessary.
1291 Comment perform_smi(masm, "-- Perform smi operation");
1292 Label use_fp_on_smis;
1298 case Token::BIT_XOR:
1299 ASSERT(right.is(eax));
1300 __ xor_(right, left); // Bitwise xor is commutative.
1303 case Token::BIT_AND:
1304 ASSERT(right.is(eax));
1305 __ and_(right, left); // Bitwise and is commutative.
1309 // Remove tags from operands (but keep sign).
1312 // Perform the operation.
1314 // Check that the *signed* result fits in a smi.
1315 __ cmp(left, 0xc0000000);
1316 __ j(sign, &use_fp_on_smis);
1317 // Tag the result and store it in register eax.
1323 // Remove tags from operands (but keep sign).
1326 // Perform the operation.
1328 // Tag the result and store it in register eax.
1334 // Remove tags from operands (but keep sign).
1337 // Perform the operation.
1339 // Check that the *unsigned* result fits in a smi.
1340 // Neither of the two high-order bits can be set:
1341 // - 0x80000000: high bit would be lost when smi tagging.
1342 // - 0x40000000: this number would convert to negative when
1343 // Smi tagging these two cases can only happen with shifts
1344 // by 0 or 1 when handed a valid smi.
1345 __ test(left, Immediate(0xc0000000));
1346 __ j(not_zero, &use_fp_on_smis);
1347 // Tag the result and store it in register eax.
1353 ASSERT(right.is(eax));
1354 __ add(right, left); // Addition is commutative.
1355 __ j(overflow, &use_fp_on_smis);
1359 __ sub(left, right);
1360 __ j(overflow, &use_fp_on_smis);
1365 // If the smi tag is 0 we can just leave the tag on one operand.
1366 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
1367 // We can't revert the multiplication if the result is not a smi
1368 // so save the right operand.
1370 // Remove tag from one of the operands (but keep sign).
1372 // Do multiplication.
1373 __ imul(right, left); // Multiplication is commutative.
1374 __ j(overflow, &use_fp_on_smis);
1375 // Check for negative zero result. Use combined = left | right.
1376 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
1380 // We can't revert the division if the result is not a smi so
1381 // save the left operand.
1383 // Check for 0 divisor.
1384 __ test(right, right);
1385 __ j(zero, &use_fp_on_smis);
1386 // Sign extend left into edx:eax.
1387 ASSERT(left.is(eax));
1389 // Divide edx:eax by right.
1391 // Check for the corner case of dividing the most negative smi by
1392 // -1. We cannot use the overflow flag, since it is not set by idiv
1394 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
1395 __ cmp(eax, 0x40000000);
1396 __ j(equal, &use_fp_on_smis);
1397 // Check for negative zero result. Use combined = left | right.
1398 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
1399 // Check that the remainder is zero.
1401 __ j(not_zero, &use_fp_on_smis);
1402 // Tag the result and store it in register eax.
1407 // Check for 0 divisor.
1408 __ test(right, right);
1409 __ j(zero, ¬_smis);
1411 // Sign extend left into edx:eax.
1412 ASSERT(left.is(eax));
1414 // Divide edx:eax by right.
1416 // Check for negative zero result. Use combined = left | right.
1417 __ NegativeZeroTest(edx, combined, slow);
1418 // Move remainder to register eax.
1426 // 5. Emit return of result in eax. Some operations have registers pushed.
1436 case Token::BIT_AND:
1437 case Token::BIT_XOR:
1441 __ ret(2 * kPointerSize);
1447 // 6. For some operations emit inline code to perform floating point
1448 // operations on known smis (e.g., if the result of the operation
1449 // overflowed the smi range).
1450 if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
1451 __ bind(&use_fp_on_smis);
1453 // Undo the effects of some operations, and some register moves.
1455 // The arguments are saved on the stack, and only used from there.
1458 // Revert right = right + left.
1459 __ sub(right, left);
1462 // Revert left = left - right.
1463 __ add(left, right);
1466 // Right was clobbered but a copy is in ebx.
1470 // Left was clobbered but a copy is in edi. Right is in ebx for
1471 // division. They should be in eax, ebx for jump to not_smi.
1475 // No other operators jump to use_fp_on_smis.
1480 ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
1484 Comment perform_float(masm, "-- Perform float operation on smis");
1485 __ bind(&use_fp_on_smis);
1486 // Result we want is in left == edx, so we can put the allocated heap
1488 __ AllocateHeapNumber(eax, ecx, ebx, slow);
1489 // Store the result in the HeapNumber and return.
1490 // It's OK to overwrite the arguments on the stack because we
1491 // are about to return.
1492 if (op_ == Token::SHR) {
1493 __ mov(Operand(esp, 1 * kPointerSize), left);
1494 __ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
1495 __ fild_d(Operand(esp, 1 * kPointerSize));
1496 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1498 ASSERT_EQ(Token::SHL, op_);
1499 if (CpuFeatures::IsSupported(SSE2)) {
1500 CpuFeatures::Scope use_sse2(SSE2);
1501 __ cvtsi2sd(xmm0, left);
1502 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1504 __ mov(Operand(esp, 1 * kPointerSize), left);
1505 __ fild_s(Operand(esp, 1 * kPointerSize));
1506 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1509 __ ret(2 * kPointerSize);
1517 Comment perform_float(masm, "-- Perform float operation on smis");
1518 __ bind(&use_fp_on_smis);
1519 // Restore arguments to edx, eax.
1522 // Revert right = right + left.
1523 __ sub(right, left);
1526 // Revert left = left - right.
1527 __ add(left, right);
1530 // Right was clobbered but a copy is in ebx.
1534 // Left was clobbered but a copy is in edi. Right is in ebx for
1539 default: UNREACHABLE();
1542 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
1543 if (CpuFeatures::IsSupported(SSE2)) {
1544 CpuFeatures::Scope use_sse2(SSE2);
1545 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
1547 case Token::ADD: __ addsd(xmm0, xmm1); break;
1548 case Token::SUB: __ subsd(xmm0, xmm1); break;
1549 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1550 case Token::DIV: __ divsd(xmm0, xmm1); break;
1551 default: UNREACHABLE();
1553 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
1554 } else { // SSE2 not available, use FPU.
1555 FloatingPointHelper::LoadFloatSmis(masm, ebx);
1557 case Token::ADD: __ faddp(1); break;
1558 case Token::SUB: __ fsubp(1); break;
1559 case Token::MUL: __ fmulp(1); break;
1560 case Token::DIV: __ fdivp(1); break;
1561 default: UNREACHABLE();
1563 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
1575 // 7. Non-smi operands, fall out to the non-smi code with the operands in
1577 Comment done_comment(masm, "-- Enter non-smi code");
1584 // Right operand is saved in ecx and eax was destroyed by the smi
1591 // Operands are in eax, ebx at this point.
1602 void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
1613 case Token::BIT_AND:
1614 case Token::BIT_XOR:
1618 GenerateRegisterArgsPush(masm);
1624 if (result_type_ == BinaryOpIC::UNINITIALIZED ||
1625 result_type_ == BinaryOpIC::SMI) {
1626 GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
1628 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
1630 __ bind(&call_runtime);
1636 GenerateTypeTransition(masm);
1640 case Token::BIT_AND:
1641 case Token::BIT_XOR:
1645 GenerateTypeTransitionWithSavedArgs(masm);
1653 void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
1654 ASSERT(operands_type_ == BinaryOpIC::STRING);
1655 ASSERT(op_ == Token::ADD);
1656 // Try to add arguments as strings, otherwise, transition to the generic
1658 GenerateAddStrings(masm);
1659 GenerateTypeTransition(masm);
1663 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
1665 ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
1666 ASSERT(op_ == Token::ADD);
1667 // If both arguments are strings, call the string add stub.
1668 // Otherwise, do a transition.
1670 // Registers containing left and right operands respectively.
1671 Register left = edx;
1672 Register right = eax;
1674 // Test if left operand is a string.
1675 __ JumpIfSmi(left, &call_runtime, Label::kNear);
1676 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
1677 __ j(above_equal, &call_runtime, Label::kNear);
1679 // Test if right operand is a string.
1680 __ JumpIfSmi(right, &call_runtime, Label::kNear);
1681 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
1682 __ j(above_equal, &call_runtime, Label::kNear);
1684 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
1685 GenerateRegisterArgsPush(masm);
1686 __ TailCallStub(&string_add_stub);
1688 __ bind(&call_runtime);
1689 GenerateTypeTransition(masm);
1693 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
1695 ASSERT(operands_type_ == BinaryOpIC::INT32);
1697 // Floating point case.
1705 if (CpuFeatures::IsSupported(SSE2)) {
1706 CpuFeatures::Scope use_sse2(SSE2);
1707 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
1708 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
1710 case Token::ADD: __ addsd(xmm0, xmm1); break;
1711 case Token::SUB: __ subsd(xmm0, xmm1); break;
1712 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1713 case Token::DIV: __ divsd(xmm0, xmm1); break;
1714 default: UNREACHABLE();
1716 // Check result type if it is currently Int32.
1717 if (result_type_ <= BinaryOpIC::INT32) {
1718 __ cvttsd2si(ecx, Operand(xmm0));
1719 __ cvtsi2sd(xmm2, ecx);
1720 __ ucomisd(xmm0, xmm2);
1721 __ j(not_zero, ¬_int32);
1722 __ j(carry, ¬_int32);
1724 GenerateHeapResultAllocation(masm, &call_runtime);
1725 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1727 } else { // SSE2 not available, use FPU.
1728 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
1729 FloatingPointHelper::LoadFloatOperands(
1732 FloatingPointHelper::ARGS_IN_REGISTERS);
1733 FloatingPointHelper::CheckFloatOperandsAreInt32(masm, ¬_int32);
1735 case Token::ADD: __ faddp(1); break;
1736 case Token::SUB: __ fsubp(1); break;
1737 case Token::MUL: __ fmulp(1); break;
1738 case Token::DIV: __ fdivp(1); break;
1739 default: UNREACHABLE();
1741 Label after_alloc_failure;
1742 GenerateHeapResultAllocation(masm, &after_alloc_failure);
1743 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1745 __ bind(&after_alloc_failure);
1747 __ jmp(&call_runtime);
1750 __ bind(¬_floats);
1751 __ bind(¬_int32);
1752 GenerateTypeTransition(masm);
1757 // For MOD we go directly to runtime in the non-smi case.
1761 case Token::BIT_AND:
1762 case Token::BIT_XOR:
1766 GenerateRegisterArgsPush(masm);
1769 Label non_smi_result;
1771 CpuFeatures::Scope use_sse2(SSE2);
1772 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
1773 FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, ¬_int32, ecx);
1775 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1778 FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
1781 case Token::BIT_OR: __ or_(eax, ecx); break;
1782 case Token::BIT_AND: __ and_(eax, ecx); break;
1783 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1784 case Token::SAR: __ sar_cl(eax); break;
1785 case Token::SHL: __ shl_cl(eax); break;
1786 case Token::SHR: __ shr_cl(eax); break;
1787 default: UNREACHABLE();
1789 if (op_ == Token::SHR) {
1790 // Check if result is non-negative and fits in a smi.
1791 __ test(eax, Immediate(0xc0000000));
1792 __ j(not_zero, &call_runtime);
1794 // Check if result fits in a smi.
1795 __ cmp(eax, 0xc0000000);
1796 __ j(negative, &non_smi_result, Label::kNear);
1798 // Tag smi result and return.
1800 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1802 // All ops except SHR return a signed int32 that we load in
1804 if (op_ != Token::SHR) {
1805 __ bind(&non_smi_result);
1806 // Allocate a heap number if needed.
1807 __ mov(ebx, eax); // ebx: result
1808 Label skip_allocation;
1810 case OVERWRITE_LEFT:
1811 case OVERWRITE_RIGHT:
1812 // If the operand was an object, we skip the
1813 // allocation of a heap number.
1814 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1815 1 * kPointerSize : 2 * kPointerSize));
1816 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
1819 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1820 __ bind(&skip_allocation);
1822 default: UNREACHABLE();
1824 // Store the result in the HeapNumber and return.
1825 if (CpuFeatures::IsSupported(SSE2)) {
1826 CpuFeatures::Scope use_sse2(SSE2);
1827 __ cvtsi2sd(xmm0, ebx);
1828 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1830 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1831 __ fild_s(Operand(esp, 1 * kPointerSize));
1832 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1834 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
1837 __ bind(¬_floats);
1838 __ bind(¬_int32);
1839 GenerateTypeTransitionWithSavedArgs(masm);
1842 default: UNREACHABLE(); break;
1845 // If an allocation fails, or SHR or MOD hit a hard case,
1846 // use the runtime system to get the correct result.
1847 __ bind(&call_runtime);
1851 GenerateRegisterArgsPush(masm);
1852 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1855 GenerateRegisterArgsPush(masm);
1856 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1859 GenerateRegisterArgsPush(masm);
1860 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1863 GenerateRegisterArgsPush(masm);
1864 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1867 GenerateRegisterArgsPush(masm);
1868 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1871 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1873 case Token::BIT_AND:
1874 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1876 case Token::BIT_XOR:
1877 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1880 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1883 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1886 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1894 void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
1895 if (op_ == Token::ADD) {
1896 // Handle string addition here, because it is the only operation
1897 // that does not do a ToNumber conversion on the operands.
1898 GenerateAddStrings(masm);
1901 Factory* factory = masm->isolate()->factory();
1903 // Convert odd ball arguments to numbers.
1905 __ cmp(edx, factory->undefined_value());
1906 __ j(not_equal, &check, Label::kNear);
1907 if (Token::IsBitOp(op_)) {
1910 __ mov(edx, Immediate(factory->nan_value()));
1912 __ jmp(&done, Label::kNear);
1914 __ cmp(eax, factory->undefined_value());
1915 __ j(not_equal, &done, Label::kNear);
1916 if (Token::IsBitOp(op_)) {
1919 __ mov(eax, Immediate(factory->nan_value()));
1923 GenerateHeapNumberStub(masm);
1927 void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
1930 // Floating point case.
1937 if (CpuFeatures::IsSupported(SSE2)) {
1938 CpuFeatures::Scope use_sse2(SSE2);
1939 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
1942 case Token::ADD: __ addsd(xmm0, xmm1); break;
1943 case Token::SUB: __ subsd(xmm0, xmm1); break;
1944 case Token::MUL: __ mulsd(xmm0, xmm1); break;
1945 case Token::DIV: __ divsd(xmm0, xmm1); break;
1946 default: UNREACHABLE();
1948 GenerateHeapResultAllocation(masm, &call_runtime);
1949 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1951 } else { // SSE2 not available, use FPU.
1952 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
1953 FloatingPointHelper::LoadFloatOperands(
1956 FloatingPointHelper::ARGS_IN_REGISTERS);
1958 case Token::ADD: __ faddp(1); break;
1959 case Token::SUB: __ fsubp(1); break;
1960 case Token::MUL: __ fmulp(1); break;
1961 case Token::DIV: __ fdivp(1); break;
1962 default: UNREACHABLE();
1964 Label after_alloc_failure;
1965 GenerateHeapResultAllocation(masm, &after_alloc_failure);
1966 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1968 __ bind(&after_alloc_failure);
1970 __ jmp(&call_runtime);
1973 __ bind(¬_floats);
1974 GenerateTypeTransition(masm);
1979 // For MOD we go directly to runtime in the non-smi case.
1983 case Token::BIT_AND:
1984 case Token::BIT_XOR:
1988 GenerateRegisterArgsPush(masm);
1990 Label non_smi_result;
1991 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
1995 case Token::BIT_OR: __ or_(eax, ecx); break;
1996 case Token::BIT_AND: __ and_(eax, ecx); break;
1997 case Token::BIT_XOR: __ xor_(eax, ecx); break;
1998 case Token::SAR: __ sar_cl(eax); break;
1999 case Token::SHL: __ shl_cl(eax); break;
2000 case Token::SHR: __ shr_cl(eax); break;
2001 default: UNREACHABLE();
2003 if (op_ == Token::SHR) {
2004 // Check if result is non-negative and fits in a smi.
2005 __ test(eax, Immediate(0xc0000000));
2006 __ j(not_zero, &call_runtime);
2008 // Check if result fits in a smi.
2009 __ cmp(eax, 0xc0000000);
2010 __ j(negative, &non_smi_result, Label::kNear);
2012 // Tag smi result and return.
2014 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2016 // All ops except SHR return a signed int32 that we load in
2018 if (op_ != Token::SHR) {
2019 __ bind(&non_smi_result);
2020 // Allocate a heap number if needed.
2021 __ mov(ebx, eax); // ebx: result
2022 Label skip_allocation;
2024 case OVERWRITE_LEFT:
2025 case OVERWRITE_RIGHT:
2026 // If the operand was an object, we skip the
2027 // allocation of a heap number.
2028 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2029 1 * kPointerSize : 2 * kPointerSize));
2030 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2033 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2034 __ bind(&skip_allocation);
2036 default: UNREACHABLE();
2038 // Store the result in the HeapNumber and return.
2039 if (CpuFeatures::IsSupported(SSE2)) {
2040 CpuFeatures::Scope use_sse2(SSE2);
2041 __ cvtsi2sd(xmm0, ebx);
2042 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2044 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2045 __ fild_s(Operand(esp, 1 * kPointerSize));
2046 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2048 __ ret(2 * kPointerSize); // Drop two pushed arguments from the stack.
2051 __ bind(¬_floats);
2052 GenerateTypeTransitionWithSavedArgs(masm);
2055 default: UNREACHABLE(); break;
2058 // If an allocation fails, or SHR or MOD hit a hard case,
2059 // use the runtime system to get the correct result.
2060 __ bind(&call_runtime);
2064 GenerateRegisterArgsPush(masm);
2065 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2068 GenerateRegisterArgsPush(masm);
2069 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2072 GenerateRegisterArgsPush(masm);
2073 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2076 GenerateRegisterArgsPush(masm);
2077 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2080 GenerateRegisterArgsPush(masm);
2081 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2084 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2086 case Token::BIT_AND:
2087 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2089 case Token::BIT_XOR:
2090 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2093 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2096 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2099 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2107 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2110 Counters* counters = masm->isolate()->counters();
2111 __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
2121 case Token::BIT_AND:
2122 case Token::BIT_XOR:
2126 GenerateRegisterArgsPush(masm);
2132 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2134 // Floating point case.
2141 if (CpuFeatures::IsSupported(SSE2)) {
2142 CpuFeatures::Scope use_sse2(SSE2);
2143 FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats);
2146 case Token::ADD: __ addsd(xmm0, xmm1); break;
2147 case Token::SUB: __ subsd(xmm0, xmm1); break;
2148 case Token::MUL: __ mulsd(xmm0, xmm1); break;
2149 case Token::DIV: __ divsd(xmm0, xmm1); break;
2150 default: UNREACHABLE();
2152 GenerateHeapResultAllocation(masm, &call_runtime);
2153 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2155 } else { // SSE2 not available, use FPU.
2156 FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx);
2157 FloatingPointHelper::LoadFloatOperands(
2160 FloatingPointHelper::ARGS_IN_REGISTERS);
2162 case Token::ADD: __ faddp(1); break;
2163 case Token::SUB: __ fsubp(1); break;
2164 case Token::MUL: __ fmulp(1); break;
2165 case Token::DIV: __ fdivp(1); break;
2166 default: UNREACHABLE();
2168 Label after_alloc_failure;
2169 GenerateHeapResultAllocation(masm, &after_alloc_failure);
2170 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2172 __ bind(&after_alloc_failure);
2174 __ jmp(&call_runtime);
2176 __ bind(¬_floats);
2180 // For MOD we go directly to runtime in the non-smi case.
2184 case Token::BIT_AND:
2185 case Token::BIT_XOR:
2189 Label non_smi_result;
2190 FloatingPointHelper::LoadUnknownsAsIntegers(masm,
2194 case Token::BIT_OR: __ or_(eax, ecx); break;
2195 case Token::BIT_AND: __ and_(eax, ecx); break;
2196 case Token::BIT_XOR: __ xor_(eax, ecx); break;
2197 case Token::SAR: __ sar_cl(eax); break;
2198 case Token::SHL: __ shl_cl(eax); break;
2199 case Token::SHR: __ shr_cl(eax); break;
2200 default: UNREACHABLE();
2202 if (op_ == Token::SHR) {
2203 // Check if result is non-negative and fits in a smi.
2204 __ test(eax, Immediate(0xc0000000));
2205 __ j(not_zero, &call_runtime);
2207 // Check if result fits in a smi.
2208 __ cmp(eax, 0xc0000000);
2209 __ j(negative, &non_smi_result, Label::kNear);
2211 // Tag smi result and return.
2213 __ ret(2 * kPointerSize); // Drop the arguments from the stack.
2215 // All ops except SHR return a signed int32 that we load in
2217 if (op_ != Token::SHR) {
2218 __ bind(&non_smi_result);
2219 // Allocate a heap number if needed.
2220 __ mov(ebx, eax); // ebx: result
2221 Label skip_allocation;
2223 case OVERWRITE_LEFT:
2224 case OVERWRITE_RIGHT:
2225 // If the operand was an object, we skip the
2226 // allocation of a heap number.
2227 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
2228 1 * kPointerSize : 2 * kPointerSize));
2229 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2232 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
2233 __ bind(&skip_allocation);
2235 default: UNREACHABLE();
2237 // Store the result in the HeapNumber and return.
2238 if (CpuFeatures::IsSupported(SSE2)) {
2239 CpuFeatures::Scope use_sse2(SSE2);
2240 __ cvtsi2sd(xmm0, ebx);
2241 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
2243 __ mov(Operand(esp, 1 * kPointerSize), ebx);
2244 __ fild_s(Operand(esp, 1 * kPointerSize));
2245 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2247 __ ret(2 * kPointerSize);
2251 default: UNREACHABLE(); break;
2254 // If all else fails, use the runtime system to get the correct
2256 __ bind(&call_runtime);
2259 GenerateAddStrings(masm);
2260 GenerateRegisterArgsPush(masm);
2261 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
2265 GenerateRegisterArgsPush(masm);
2266 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
2269 GenerateRegisterArgsPush(masm);
2270 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
2273 GenerateRegisterArgsPush(masm);
2274 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
2277 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
2280 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
2282 case Token::BIT_AND:
2283 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
2285 case Token::BIT_XOR:
2286 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
2289 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
2292 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
2295 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
2303 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2304 ASSERT(op_ == Token::ADD);
2305 Label left_not_string, call_runtime;
2307 // Registers containing left and right operands respectively.
2308 Register left = edx;
2309 Register right = eax;
2311 // Test if left operand is a string.
2312 __ JumpIfSmi(left, &left_not_string, Label::kNear);
2313 __ CmpObjectType(left, FIRST_NONSTRING_TYPE, ecx);
2314 __ j(above_equal, &left_not_string, Label::kNear);
2316 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
2317 GenerateRegisterArgsPush(masm);
2318 __ TailCallStub(&string_add_left_stub);
2320 // Left operand is not a string, test right.
2321 __ bind(&left_not_string);
2322 __ JumpIfSmi(right, &call_runtime, Label::kNear);
2323 __ CmpObjectType(right, FIRST_NONSTRING_TYPE, ecx);
2324 __ j(above_equal, &call_runtime, Label::kNear);
2326 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
2327 GenerateRegisterArgsPush(masm);
2328 __ TailCallStub(&string_add_right_stub);
2330 // Neither argument is a string.
2331 __ bind(&call_runtime);
2335 void BinaryOpStub::GenerateHeapResultAllocation(
2336 MacroAssembler* masm,
2337 Label* alloc_failure) {
2338 Label skip_allocation;
2339 OverwriteMode mode = mode_;
2341 case OVERWRITE_LEFT: {
2342 // If the argument in edx is already an object, we skip the
2343 // allocation of a heap number.
2344 __ JumpIfNotSmi(edx, &skip_allocation, Label::kNear);
2345 // Allocate a heap number for the result. Keep eax and edx intact
2346 // for the possible runtime call.
2347 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2348 // Now edx can be overwritten losing one of the arguments as we are
2349 // now done and will not need it any more.
2351 __ bind(&skip_allocation);
2352 // Use object in edx as a result holder
2356 case OVERWRITE_RIGHT:
2357 // If the argument in eax is already an object, we skip the
2358 // allocation of a heap number.
2359 __ JumpIfNotSmi(eax, &skip_allocation, Label::kNear);
2362 // Allocate a heap number for the result. Keep eax and edx intact
2363 // for the possible runtime call.
2364 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
2365 // Now eax can be overwritten losing one of the arguments as we are
2366 // now done and will not need it any more.
2368 __ bind(&skip_allocation);
2370 default: UNREACHABLE();
2375 void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2383 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2386 // esp[4]: tagged number input argument (should be number).
2387 // esp[0]: return address.
2389 // eax: tagged double result.
2392 // esp[0]: return address.
2393 // xmm1: untagged double input argument
2395 // xmm1: untagged double result.
2398 Label runtime_call_clear_stack;
2400 const bool tagged = (argument_type_ == TAGGED);
2402 // Test that eax is a number.
2403 Label input_not_smi;
2405 __ mov(eax, Operand(esp, kPointerSize));
2406 __ JumpIfNotSmi(eax, &input_not_smi, Label::kNear);
2407 // Input is a smi. Untag and load it onto the FPU stack.
2408 // Then load the low and high words of the double into ebx, edx.
2409 STATIC_ASSERT(kSmiTagSize == 1);
2411 __ sub(esp, Immediate(2 * kPointerSize));
2412 __ mov(Operand(esp, 0), eax);
2413 __ fild_s(Operand(esp, 0));
2414 __ fst_d(Operand(esp, 0));
2417 __ jmp(&loaded, Label::kNear);
2418 __ bind(&input_not_smi);
2419 // Check if input is a HeapNumber.
2420 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2421 Factory* factory = masm->isolate()->factory();
2422 __ cmp(ebx, Immediate(factory->heap_number_map()));
2423 __ j(not_equal, &runtime_call);
2424 // Input is a HeapNumber. Push it on the FPU stack and load its
2425 // low and high words into ebx, edx.
2426 __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
2427 __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
2428 __ mov(ebx, FieldOperand(eax, HeapNumber::kMantissaOffset));
2431 } else { // UNTAGGED.
2432 if (CpuFeatures::IsSupported(SSE4_1)) {
2433 CpuFeatures::Scope sse4_scope(SSE4_1);
2434 __ pextrd(edx, xmm1, 0x1); // copy xmm1[63..32] to edx.
2436 __ pshufd(xmm0, xmm1, 0x1);
2442 // ST[0] or xmm1 == double value
2443 // ebx = low 32 bits of double value
2444 // edx = high 32 bits of double value
2445 // Compute hash (the shifts are arithmetic):
2446 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
2455 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
2457 Immediate(TranscendentalCache::SubCache::kCacheSize - 1));
2459 // ST[0] or xmm1 == double value.
2460 // ebx = low 32 bits of double value.
2461 // edx = high 32 bits of double value.
2462 // ecx = TranscendentalCache::hash(double value).
2463 ExternalReference cache_array =
2464 ExternalReference::transcendental_cache_array_address(masm->isolate());
2465 __ mov(eax, Immediate(cache_array));
2466 int cache_array_index =
2467 type_ * sizeof(masm->isolate()->transcendental_cache()->caches_[0]);
2468 __ mov(eax, Operand(eax, cache_array_index));
2469 // Eax points to the cache for the type type_.
2470 // If NULL, the cache hasn't been initialized yet, so go through runtime.
2472 __ j(zero, &runtime_call_clear_stack);
2474 // Check that the layout of cache elements match expectations.
2475 { TranscendentalCache::SubCache::Element test_elem[2];
2476 char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
2477 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
2478 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
2479 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
2480 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
2481 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
2482 CHECK_EQ(0, elem_in0 - elem_start);
2483 CHECK_EQ(kIntSize, elem_in1 - elem_start);
2484 CHECK_EQ(2 * kIntSize, elem_out - elem_start);
2487 // Find the address of the ecx'th entry in the cache, i.e., &eax[ecx*12].
2488 __ lea(ecx, Operand(ecx, ecx, times_2, 0));
2489 __ lea(ecx, Operand(eax, ecx, times_4, 0));
2490 // Check if cache matches: Double value is stored in uint32_t[2] array.
2492 __ cmp(ebx, Operand(ecx, 0));
2493 __ j(not_equal, &cache_miss, Label::kNear);
2494 __ cmp(edx, Operand(ecx, kIntSize));
2495 __ j(not_equal, &cache_miss, Label::kNear);
2497 Counters* counters = masm->isolate()->counters();
2498 __ IncrementCounter(counters->transcendental_cache_hit(), 1);
2499 __ mov(eax, Operand(ecx, 2 * kIntSize));
2502 __ ret(kPointerSize);
2503 } else { // UNTAGGED.
2504 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2508 __ bind(&cache_miss);
2509 __ IncrementCounter(counters->transcendental_cache_miss(), 1);
2510 // Update cache with new value.
2511 // We are short on registers, so use no_reg as scratch.
2512 // This gives slightly larger code.
2514 __ AllocateHeapNumber(eax, edi, no_reg, &runtime_call_clear_stack);
2515 } else { // UNTAGGED.
2516 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2517 __ sub(esp, Immediate(kDoubleSize));
2518 __ movdbl(Operand(esp, 0), xmm1);
2519 __ fld_d(Operand(esp, 0));
2520 __ add(esp, Immediate(kDoubleSize));
2522 GenerateOperation(masm, type_);
2523 __ mov(Operand(ecx, 0), ebx);
2524 __ mov(Operand(ecx, kIntSize), edx);
2525 __ mov(Operand(ecx, 2 * kIntSize), eax);
2526 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
2528 __ ret(kPointerSize);
2529 } else { // UNTAGGED.
2530 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2533 // Skip cache and return answer directly, only in untagged case.
2534 __ bind(&skip_cache);
2535 __ sub(esp, Immediate(kDoubleSize));
2536 __ movdbl(Operand(esp, 0), xmm1);
2537 __ fld_d(Operand(esp, 0));
2538 GenerateOperation(masm, type_);
2539 __ fstp_d(Operand(esp, 0));
2540 __ movdbl(xmm1, Operand(esp, 0));
2541 __ add(esp, Immediate(kDoubleSize));
2542 // We return the value in xmm1 without adding it to the cache, but
2543 // we cause a scavenging GC so that future allocations will succeed.
2545 FrameScope scope(masm, StackFrame::INTERNAL);
2546 // Allocate an unused object bigger than a HeapNumber.
2547 __ push(Immediate(Smi::FromInt(2 * kDoubleSize)));
2548 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace);
2553 // Call runtime, doing whatever allocation and cleanup is necessary.
2555 __ bind(&runtime_call_clear_stack);
2557 __ bind(&runtime_call);
2558 ExternalReference runtime =
2559 ExternalReference(RuntimeFunction(), masm->isolate());
2560 __ TailCallExternalReference(runtime, 1, 1);
2561 } else { // UNTAGGED.
2562 __ bind(&runtime_call_clear_stack);
2563 __ bind(&runtime_call);
2564 __ AllocateHeapNumber(eax, edi, no_reg, &skip_cache);
2565 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm1);
2567 FrameScope scope(masm, StackFrame::INTERNAL);
2569 __ CallRuntime(RuntimeFunction(), 1);
2571 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2577 Runtime::FunctionId TranscendentalCacheStub::RuntimeFunction() {
2579 case TranscendentalCache::SIN: return Runtime::kMath_sin;
2580 case TranscendentalCache::COS: return Runtime::kMath_cos;
2581 case TranscendentalCache::TAN: return Runtime::kMath_tan;
2582 case TranscendentalCache::LOG: return Runtime::kMath_log;
2585 return Runtime::kAbort;
2590 void TranscendentalCacheStub::GenerateOperation(
2591 MacroAssembler* masm, TranscendentalCache::Type type) {
2592 // Only free register is edi.
2593 // Input value is on FP stack, and also in ebx/edx.
2594 // Input value is possibly in xmm1.
2595 // Address of result (a newly allocated HeapNumber) may be in eax.
2596 if (type == TranscendentalCache::SIN ||
2597 type == TranscendentalCache::COS ||
2598 type == TranscendentalCache::TAN) {
2599 // Both fsin and fcos require arguments in the range +/-2^63 and
2600 // return NaN for infinities and NaN. They can share all code except
2601 // the actual fsin/fcos operation.
2602 Label in_range, done;
2603 // If argument is outside the range -2^63..2^63, fsin/cos doesn't
2604 // work. We must reduce it to the appropriate range.
2606 __ and_(edi, Immediate(0x7ff00000)); // Exponent only.
2607 int supported_exponent_limit =
2608 (63 + HeapNumber::kExponentBias) << HeapNumber::kExponentShift;
2609 __ cmp(edi, Immediate(supported_exponent_limit));
2610 __ j(below, &in_range, Label::kNear);
2611 // Check for infinity and NaN. Both return NaN for sin.
2612 __ cmp(edi, Immediate(0x7ff00000));
2613 Label non_nan_result;
2614 __ j(not_equal, &non_nan_result, Label::kNear);
2615 // Input is +/-Infinity or NaN. Result is NaN.
2617 // NaN is represented by 0x7ff8000000000000.
2618 __ push(Immediate(0x7ff80000));
2619 __ push(Immediate(0));
2620 __ fld_d(Operand(esp, 0));
2621 __ add(esp, Immediate(2 * kPointerSize));
2622 __ jmp(&done, Label::kNear);
2624 __ bind(&non_nan_result);
2626 // Use fpmod to restrict argument to the range +/-2*PI.
2627 __ mov(edi, eax); // Save eax before using fnstsw_ax.
2631 // FPU Stack: input, 2*pi, input.
2633 Label no_exceptions;
2636 // Clear if Illegal Operand or Zero Division exceptions are set.
2637 __ test(eax, Immediate(5));
2638 __ j(zero, &no_exceptions, Label::kNear);
2640 __ bind(&no_exceptions);
2643 // Compute st(0) % st(1)
2645 Label partial_remainder_loop;
2646 __ bind(&partial_remainder_loop);
2650 __ test(eax, Immediate(0x400 /* C2 */));
2651 // If C2 is set, computation only has partial result. Loop to
2652 // continue computation.
2653 __ j(not_zero, &partial_remainder_loop);
2655 // FPU Stack: input, 2*pi, input % 2*pi
2658 __ mov(eax, edi); // Restore eax (allocated HeapNumber pointer).
2660 // FPU Stack: input % 2*pi
2663 case TranscendentalCache::SIN:
2666 case TranscendentalCache::COS:
2669 case TranscendentalCache::TAN:
2670 // FPTAN calculates tangent onto st(0) and pushes 1.0 onto the
2671 // FP register stack.
2673 __ fstp(0); // Pop FP register stack.
2680 ASSERT(type == TranscendentalCache::LOG);
2688 // Input: edx, eax are the left and right objects of a bit op.
2689 // Output: eax, ecx are left and right integers for a bit op.
2690 void FloatingPointHelper::LoadUnknownsAsIntegers(MacroAssembler* masm,
2692 Label* conversion_failure) {
2693 // Check float operands.
2694 Label arg1_is_object, check_undefined_arg1;
2695 Label arg2_is_object, check_undefined_arg2;
2696 Label load_arg2, done;
2698 // Test if arg1 is a Smi.
2699 __ JumpIfNotSmi(edx, &arg1_is_object, Label::kNear);
2704 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2705 __ bind(&check_undefined_arg1);
2706 Factory* factory = masm->isolate()->factory();
2707 __ cmp(edx, factory->undefined_value());
2708 __ j(not_equal, conversion_failure);
2709 __ mov(edx, Immediate(0));
2712 __ bind(&arg1_is_object);
2713 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
2714 __ cmp(ebx, factory->heap_number_map());
2715 __ j(not_equal, &check_undefined_arg1);
2717 // Get the untagged integer version of the edx heap number in ecx.
2718 IntegerConvert(masm, edx, use_sse3, conversion_failure);
2721 // Here edx has the untagged integer, eax has a Smi or a heap number.
2722 __ bind(&load_arg2);
2724 // Test if arg2 is a Smi.
2725 __ JumpIfNotSmi(eax, &arg2_is_object, Label::kNear);
2731 // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
2732 __ bind(&check_undefined_arg2);
2733 __ cmp(eax, factory->undefined_value());
2734 __ j(not_equal, conversion_failure);
2735 __ mov(ecx, Immediate(0));
2738 __ bind(&arg2_is_object);
2739 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
2740 __ cmp(ebx, factory->heap_number_map());
2741 __ j(not_equal, &check_undefined_arg2);
2743 // Get the untagged integer version of the eax heap number in ecx.
2744 IntegerConvert(masm, eax, use_sse3, conversion_failure);
2750 void FloatingPointHelper::CheckLoadedIntegersWereInt32(MacroAssembler* masm,
2757 void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
2759 Label load_smi, done;
2761 __ JumpIfSmi(number, &load_smi, Label::kNear);
2762 __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
2763 __ jmp(&done, Label::kNear);
2766 __ SmiUntag(number);
2768 __ fild_s(Operand(esp, 0));
2775 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm) {
2776 Label load_smi_edx, load_eax, load_smi_eax, done;
2777 // Load operand in edx into xmm0.
2778 __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2779 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2782 // Load operand in eax into xmm1.
2783 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2784 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2785 __ jmp(&done, Label::kNear);
2787 __ bind(&load_smi_edx);
2788 __ SmiUntag(edx); // Untag smi before converting to float.
2789 __ cvtsi2sd(xmm0, edx);
2790 __ SmiTag(edx); // Retag smi for heap number overwriting test.
2793 __ bind(&load_smi_eax);
2794 __ SmiUntag(eax); // Untag smi before converting to float.
2795 __ cvtsi2sd(xmm1, eax);
2796 __ SmiTag(eax); // Retag smi for heap number overwriting test.
2802 void FloatingPointHelper::LoadSSE2Operands(MacroAssembler* masm,
2803 Label* not_numbers) {
2804 Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
2805 // Load operand in edx into xmm0, or branch to not_numbers.
2806 __ JumpIfSmi(edx, &load_smi_edx, Label::kNear);
2807 Factory* factory = masm->isolate()->factory();
2808 __ cmp(FieldOperand(edx, HeapObject::kMapOffset), factory->heap_number_map());
2809 __ j(not_equal, not_numbers); // Argument in edx is not a number.
2810 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
2812 // Load operand in eax into xmm1, or branch to not_numbers.
2813 __ JumpIfSmi(eax, &load_smi_eax, Label::kNear);
2814 __ cmp(FieldOperand(eax, HeapObject::kMapOffset), factory->heap_number_map());
2815 __ j(equal, &load_float_eax, Label::kNear);
2816 __ jmp(not_numbers); // Argument in eax is not a number.
2817 __ bind(&load_smi_edx);
2818 __ SmiUntag(edx); // Untag smi before converting to float.
2819 __ cvtsi2sd(xmm0, edx);
2820 __ SmiTag(edx); // Retag smi for heap number overwriting test.
2822 __ bind(&load_smi_eax);
2823 __ SmiUntag(eax); // Untag smi before converting to float.
2824 __ cvtsi2sd(xmm1, eax);
2825 __ SmiTag(eax); // Retag smi for heap number overwriting test.
2826 __ jmp(&done, Label::kNear);
2827 __ bind(&load_float_eax);
2828 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
2833 void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
2835 const Register left = edx;
2836 const Register right = eax;
2837 __ mov(scratch, left);
2838 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2839 __ SmiUntag(scratch);
2840 __ cvtsi2sd(xmm0, scratch);
2842 __ mov(scratch, right);
2843 __ SmiUntag(scratch);
2844 __ cvtsi2sd(xmm1, scratch);
2848 void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
2851 __ cvttsd2si(scratch, Operand(xmm0));
2852 __ cvtsi2sd(xmm2, scratch);
2853 __ ucomisd(xmm0, xmm2);
2854 __ j(not_zero, non_int32);
2855 __ j(carry, non_int32);
2856 __ cvttsd2si(scratch, Operand(xmm1));
2857 __ cvtsi2sd(xmm2, scratch);
2858 __ ucomisd(xmm1, xmm2);
2859 __ j(not_zero, non_int32);
2860 __ j(carry, non_int32);
2864 void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
2866 ArgLocation arg_location) {
2867 Label load_smi_1, load_smi_2, done_load_1, done;
2868 if (arg_location == ARGS_IN_REGISTERS) {
2869 __ mov(scratch, edx);
2871 __ mov(scratch, Operand(esp, 2 * kPointerSize));
2873 __ JumpIfSmi(scratch, &load_smi_1, Label::kNear);
2874 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2875 __ bind(&done_load_1);
2877 if (arg_location == ARGS_IN_REGISTERS) {
2878 __ mov(scratch, eax);
2880 __ mov(scratch, Operand(esp, 1 * kPointerSize));
2882 __ JumpIfSmi(scratch, &load_smi_2, Label::kNear);
2883 __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
2884 __ jmp(&done, Label::kNear);
2886 __ bind(&load_smi_1);
2887 __ SmiUntag(scratch);
2889 __ fild_s(Operand(esp, 0));
2891 __ jmp(&done_load_1);
2893 __ bind(&load_smi_2);
2894 __ SmiUntag(scratch);
2896 __ fild_s(Operand(esp, 0));
2903 void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm,
2905 const Register left = edx;
2906 const Register right = eax;
2907 __ mov(scratch, left);
2908 ASSERT(!scratch.is(right)); // We're about to clobber scratch.
2909 __ SmiUntag(scratch);
2911 __ fild_s(Operand(esp, 0));
2913 __ mov(scratch, right);
2914 __ SmiUntag(scratch);
2915 __ mov(Operand(esp, 0), scratch);
2916 __ fild_s(Operand(esp, 0));
2921 void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
2924 Label test_other, done;
2925 // Test if both operands are floats or smi -> scratch=k_is_float;
2926 // Otherwise scratch = k_not_float.
2927 __ JumpIfSmi(edx, &test_other, Label::kNear);
2928 __ mov(scratch, FieldOperand(edx, HeapObject::kMapOffset));
2929 Factory* factory = masm->isolate()->factory();
2930 __ cmp(scratch, factory->heap_number_map());
2931 __ j(not_equal, non_float); // argument in edx is not a number -> NaN
2933 __ bind(&test_other);
2934 __ JumpIfSmi(eax, &done, Label::kNear);
2935 __ mov(scratch, FieldOperand(eax, HeapObject::kMapOffset));
2936 __ cmp(scratch, factory->heap_number_map());
2937 __ j(not_equal, non_float); // argument in eax is not a number -> NaN
2939 // Fall-through: Both operands are numbers.
2944 void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
2950 void MathPowStub::Generate(MacroAssembler* masm) {
2951 CpuFeatures::Scope use_sse2(SSE2);
2952 Factory* factory = masm->isolate()->factory();
2953 const Register exponent = eax;
2954 const Register base = edx;
2955 const Register scratch = ecx;
2956 const XMMRegister double_result = xmm3;
2957 const XMMRegister double_base = xmm2;
2958 const XMMRegister double_exponent = xmm1;
2959 const XMMRegister double_scratch = xmm4;
2961 Label call_runtime, done, exponent_not_smi, int_exponent;
2963 // Save 1 in double_result - we need this several times later on.
2964 __ mov(scratch, Immediate(1));
2965 __ cvtsi2sd(double_result, scratch);
2967 if (exponent_type_ == ON_STACK) {
2968 Label base_is_smi, unpack_exponent;
2969 // The exponent and base are supplied as arguments on the stack.
2970 // This can only happen if the stub is called from non-optimized code.
2971 // Load input parameters from stack.
2972 __ mov(base, Operand(esp, 2 * kPointerSize));
2973 __ mov(exponent, Operand(esp, 1 * kPointerSize));
2975 __ JumpIfSmi(base, &base_is_smi, Label::kNear);
2976 __ cmp(FieldOperand(base, HeapObject::kMapOffset),
2977 factory->heap_number_map());
2978 __ j(not_equal, &call_runtime);
2980 __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
2981 __ jmp(&unpack_exponent, Label::kNear);
2983 __ bind(&base_is_smi);
2985 __ cvtsi2sd(double_base, base);
2987 __ bind(&unpack_exponent);
2988 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
2989 __ SmiUntag(exponent);
2990 __ jmp(&int_exponent);
2992 __ bind(&exponent_not_smi);
2993 __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
2994 factory->heap_number_map());
2995 __ j(not_equal, &call_runtime);
2996 __ movdbl(double_exponent,
2997 FieldOperand(exponent, HeapNumber::kValueOffset));
2998 } else if (exponent_type_ == TAGGED) {
2999 __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
3000 __ SmiUntag(exponent);
3001 __ jmp(&int_exponent);
3003 __ bind(&exponent_not_smi);
3004 __ movdbl(double_exponent,
3005 FieldOperand(exponent, HeapNumber::kValueOffset));
3008 if (exponent_type_ != INTEGER) {
3010 // Detect integer exponents stored as double.
3011 __ cvttsd2si(exponent, Operand(double_exponent));
3012 // Skip to runtime if possibly NaN (indicated by the indefinite integer).
3013 __ cmp(exponent, Immediate(0x80000000u));
3014 __ j(equal, &call_runtime);
3015 __ cvtsi2sd(double_scratch, exponent);
3016 // Already ruled out NaNs for exponent.
3017 __ ucomisd(double_exponent, double_scratch);
3018 __ j(equal, &int_exponent);
3020 if (exponent_type_ == ON_STACK) {
3021 // Detect square root case. Crankshaft detects constant +/-0.5 at
3022 // compile time and uses DoMathPowHalf instead. We then skip this check
3023 // for non-constant cases of +/-0.5 as these hardly occur.
3024 Label continue_sqrt, continue_rsqrt, not_plus_half;
3026 // Load double_scratch with 0.5.
3027 __ mov(scratch, Immediate(0x3F000000u));
3028 __ movd(double_scratch, scratch);
3029 __ cvtss2sd(double_scratch, double_scratch);
3030 // Already ruled out NaNs for exponent.
3031 __ ucomisd(double_scratch, double_exponent);
3032 __ j(not_equal, ¬_plus_half, Label::kNear);
3034 // Calculates square root of base. Check for the special case of
3035 // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
3036 // According to IEEE-754, single-precision -Infinity has the highest
3037 // 9 bits set and the lowest 23 bits cleared.
3038 __ mov(scratch, 0xFF800000u);
3039 __ movd(double_scratch, scratch);
3040 __ cvtss2sd(double_scratch, double_scratch);
3041 __ ucomisd(double_base, double_scratch);
3042 // Comparing -Infinity with NaN results in "unordered", which sets the
3043 // zero flag as if both were equal. However, it also sets the carry flag.
3044 __ j(not_equal, &continue_sqrt, Label::kNear);
3045 __ j(carry, &continue_sqrt, Label::kNear);
3047 // Set result to Infinity in the special case.
3048 __ xorps(double_result, double_result);
3049 __ subsd(double_result, double_scratch);
3052 __ bind(&continue_sqrt);
3053 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3054 __ xorps(double_scratch, double_scratch);
3055 __ addsd(double_scratch, double_base); // Convert -0 to +0.
3056 __ sqrtsd(double_result, double_scratch);
3060 __ bind(¬_plus_half);
3061 // Load double_exponent with -0.5 by substracting 1.
3062 __ subsd(double_scratch, double_result);
3063 // Already ruled out NaNs for exponent.
3064 __ ucomisd(double_scratch, double_exponent);
3065 __ j(not_equal, &fast_power, Label::kNear);
3067 // Calculates reciprocal of square root of base. Check for the special
3068 // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
3069 // According to IEEE-754, single-precision -Infinity has the highest
3070 // 9 bits set and the lowest 23 bits cleared.
3071 __ mov(scratch, 0xFF800000u);
3072 __ movd(double_scratch, scratch);
3073 __ cvtss2sd(double_scratch, double_scratch);
3074 __ ucomisd(double_base, double_scratch);
3075 // Comparing -Infinity with NaN results in "unordered", which sets the
3076 // zero flag as if both were equal. However, it also sets the carry flag.
3077 __ j(not_equal, &continue_rsqrt, Label::kNear);
3078 __ j(carry, &continue_rsqrt, Label::kNear);
3080 // Set result to 0 in the special case.
3081 __ xorps(double_result, double_result);
3084 __ bind(&continue_rsqrt);
3085 // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
3086 __ xorps(double_exponent, double_exponent);
3087 __ addsd(double_exponent, double_base); // Convert -0 to +0.
3088 __ sqrtsd(double_exponent, double_exponent);
3089 __ divsd(double_result, double_exponent);
3093 // Using FPU instructions to calculate power.
3094 Label fast_power_failed;
3095 __ bind(&fast_power);
3096 __ fnclex(); // Clear flags to catch exceptions later.
3097 // Transfer (B)ase and (E)xponent onto the FPU register stack.
3098 __ sub(esp, Immediate(kDoubleSize));
3099 __ movdbl(Operand(esp, 0), double_exponent);
3100 __ fld_d(Operand(esp, 0)); // E
3101 __ movdbl(Operand(esp, 0), double_base);
3102 __ fld_d(Operand(esp, 0)); // B, E
3104 // Exponent is in st(1) and base is in st(0)
3105 // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
3106 // FYL2X calculates st(1) * log2(st(0))
3109 __ frndint(); // rnd(X), X
3110 __ fsub(1); // rnd(X), X-rnd(X)
3111 __ fxch(1); // X - rnd(X), rnd(X)
3112 // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
3113 __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
3114 __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
3115 __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
3116 // FSCALE calculates st(0) * 2^st(1)
3117 __ fscale(); // 2^X, rnd(X)
3119 // Bail out to runtime in case of exceptions in the status word.
3121 __ test_b(eax, 0x5F); // We check for all but precision exception.
3122 __ j(not_zero, &fast_power_failed, Label::kNear);
3123 __ fstp_d(Operand(esp, 0));
3124 __ movdbl(double_result, Operand(esp, 0));
3125 __ add(esp, Immediate(kDoubleSize));
3128 __ bind(&fast_power_failed);
3130 __ add(esp, Immediate(kDoubleSize));
3131 __ jmp(&call_runtime);
3134 // Calculate power with integer exponent.
3135 __ bind(&int_exponent);
3136 const XMMRegister double_scratch2 = double_exponent;
3137 __ mov(scratch, exponent); // Back up exponent.
3138 __ movsd(double_scratch, double_base); // Back up base.
3139 __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
3141 // Get absolute value of exponent.
3142 Label no_neg, while_true, no_multiply;
3143 __ test(scratch, scratch);
3144 __ j(positive, &no_neg, Label::kNear);
3148 __ bind(&while_true);
3150 __ j(not_carry, &no_multiply, Label::kNear);
3151 __ mulsd(double_result, double_scratch);
3152 __ bind(&no_multiply);
3154 __ mulsd(double_scratch, double_scratch);
3155 __ j(not_zero, &while_true);
3157 // scratch has the original value of the exponent - if the exponent is
3158 // negative, return 1/result.
3159 __ test(exponent, exponent);
3160 __ j(positive, &done);
3161 __ divsd(double_scratch2, double_result);
3162 __ movsd(double_result, double_scratch2);
3163 // Test whether result is zero. Bail out to check for subnormal result.
3164 // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
3165 __ xorps(double_scratch2, double_scratch2);
3166 __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
3167 // double_exponent aliased as double_scratch2 has already been overwritten
3168 // and may not have contained the exponent value in the first place when the
3169 // exponent is a smi. We reset it with exponent value before bailing out.
3170 __ j(not_equal, &done);
3171 __ cvtsi2sd(double_exponent, exponent);
3173 // Returning or bailing out.
3174 Counters* counters = masm->isolate()->counters();
3175 if (exponent_type_ == ON_STACK) {
3176 // The arguments are still on the stack.
3177 __ bind(&call_runtime);
3178 __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
3180 // The stub is called from non-optimized code, which expects the result
3181 // as heap number in exponent.
3183 __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
3184 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
3185 __ IncrementCounter(counters->math_pow(), 1);
3186 __ ret(2 * kPointerSize);
3188 __ bind(&call_runtime);
3190 AllowExternalCallThatCantCauseGC scope(masm);
3191 __ PrepareCallCFunction(4, scratch);
3192 __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
3193 __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
3195 ExternalReference::power_double_double_function(masm->isolate()), 4);
3197 // Return value is in st(0) on ia32.
3198 // Store it into the (fixed) result register.
3199 __ sub(esp, Immediate(kDoubleSize));
3200 __ fstp_d(Operand(esp, 0));
3201 __ movdbl(double_result, Operand(esp, 0));
3202 __ add(esp, Immediate(kDoubleSize));
3205 __ IncrementCounter(counters->math_pow(), 1);
3211 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3212 // The key is in edx and the parameter count is in eax.
3214 // The displacement is used for skipping the frame pointer on the
3215 // stack. It is the offset of the last parameter (if any) relative
3216 // to the frame pointer.
3217 static const int kDisplacement = 1 * kPointerSize;
3219 // Check that the key is a smi.
3221 __ JumpIfNotSmi(edx, &slow, Label::kNear);
3223 // Check if the calling frame is an arguments adaptor frame.
3225 __ mov(ebx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3226 __ mov(ecx, Operand(ebx, StandardFrameConstants::kContextOffset));
3227 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3228 __ j(equal, &adaptor, Label::kNear);
3230 // Check index against formal parameters count limit passed in
3231 // through register eax. Use unsigned comparison to get negative
3234 __ j(above_equal, &slow, Label::kNear);
3236 // Read the argument from the stack and return it.
3237 STATIC_ASSERT(kSmiTagSize == 1);
3238 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3239 __ lea(ebx, Operand(ebp, eax, times_2, 0));
3241 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3244 // Arguments adaptor case: Check index against actual arguments
3245 // limit found in the arguments adaptor frame. Use unsigned
3246 // comparison to get negative check for free.
3248 __ mov(ecx, Operand(ebx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3250 __ j(above_equal, &slow, Label::kNear);
3252 // Read the argument from the stack and return it.
3253 STATIC_ASSERT(kSmiTagSize == 1);
3254 STATIC_ASSERT(kSmiTag == 0); // Shifting code depends on these.
3255 __ lea(ebx, Operand(ebx, ecx, times_2, 0));
3257 __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
3260 // Slow-case: Handle non-smi or out-of-bounds access to arguments
3261 // by calling the runtime system.
3263 __ pop(ebx); // Return address.
3266 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
3270 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) {
3271 // esp[0] : return address
3272 // esp[4] : number of parameters
3273 // esp[8] : receiver displacement
3274 // esp[12] : function
3276 // Check if the calling frame is an arguments adaptor frame.
3278 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3279 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3280 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3281 __ j(not_equal, &runtime, Label::kNear);
3283 // Patch the arguments.length and the parameters pointer.
3284 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3285 __ mov(Operand(esp, 1 * kPointerSize), ecx);
3286 __ lea(edx, Operand(edx, ecx, times_2,
3287 StandardFrameConstants::kCallerSPOffset));
3288 __ mov(Operand(esp, 2 * kPointerSize), edx);
3291 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
3295 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
3296 // esp[0] : return address
3297 // esp[4] : number of parameters (tagged)
3298 // esp[8] : receiver displacement
3299 // esp[12] : function
3301 // ebx = parameter count (tagged)
3302 __ mov(ebx, Operand(esp, 1 * kPointerSize));
3304 // Check if the calling frame is an arguments adaptor frame.
3305 // TODO(rossberg): Factor out some of the bits that are shared with the other
3306 // Generate* functions.
3308 Label adaptor_frame, try_allocate;
3309 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3310 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3311 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3312 __ j(equal, &adaptor_frame, Label::kNear);
3314 // No adaptor, parameter count = argument count.
3316 __ jmp(&try_allocate, Label::kNear);
3318 // We have an adaptor frame. Patch the parameters pointer.
3319 __ bind(&adaptor_frame);
3320 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3321 __ lea(edx, Operand(edx, ecx, times_2,
3322 StandardFrameConstants::kCallerSPOffset));
3323 __ mov(Operand(esp, 2 * kPointerSize), edx);
3325 // ebx = parameter count (tagged)
3326 // ecx = argument count (tagged)
3327 // esp[4] = parameter count (tagged)
3328 // esp[8] = address of receiver argument
3329 // Compute the mapped parameter count = min(ebx, ecx) in ebx.
3331 __ j(less_equal, &try_allocate, Label::kNear);
3334 __ bind(&try_allocate);
3336 // Save mapped parameter count.
3339 // Compute the sizes of backing store, parameter map, and arguments object.
3340 // 1. Parameter map, has 2 extra words containing context and backing store.
3341 const int kParameterMapHeaderSize =
3342 FixedArray::kHeaderSize + 2 * kPointerSize;
3343 Label no_parameter_map;
3345 __ j(zero, &no_parameter_map, Label::kNear);
3346 __ lea(ebx, Operand(ebx, times_2, kParameterMapHeaderSize));
3347 __ bind(&no_parameter_map);
3349 // 2. Backing store.
3350 __ lea(ebx, Operand(ebx, ecx, times_2, FixedArray::kHeaderSize));
3352 // 3. Arguments object.
3353 __ add(ebx, Immediate(Heap::kArgumentsObjectSize));
3355 // Do the allocation of all three objects in one go.
3356 __ AllocateInNewSpace(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
3358 // eax = address of new object(s) (tagged)
3359 // ecx = argument count (tagged)
3360 // esp[0] = mapped parameter count (tagged)
3361 // esp[8] = parameter count (tagged)
3362 // esp[12] = address of receiver argument
3363 // Get the arguments boilerplate from the current (global) context into edi.
3364 Label has_mapped_parameters, copy;
3365 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3366 __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3367 __ mov(ebx, Operand(esp, 0 * kPointerSize));
3369 __ j(not_zero, &has_mapped_parameters, Label::kNear);
3370 __ mov(edi, Operand(edi,
3371 Context::SlotOffset(Context::ARGUMENTS_BOILERPLATE_INDEX)));
3372 __ jmp(©, Label::kNear);
3374 __ bind(&has_mapped_parameters);
3375 __ mov(edi, Operand(edi,
3376 Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
3379 // eax = address of new object (tagged)
3380 // ebx = mapped parameter count (tagged)
3381 // ecx = argument count (tagged)
3382 // edi = address of boilerplate object (tagged)
3383 // esp[0] = mapped parameter count (tagged)
3384 // esp[8] = parameter count (tagged)
3385 // esp[12] = address of receiver argument
3386 // Copy the JS object part.
3387 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3388 __ mov(edx, FieldOperand(edi, i));
3389 __ mov(FieldOperand(eax, i), edx);
3392 // Set up the callee in-object property.
3393 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
3394 __ mov(edx, Operand(esp, 4 * kPointerSize));
3395 __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3396 Heap::kArgumentsCalleeIndex * kPointerSize),
3399 // Use the length (smi tagged) and set that as an in-object property too.
3400 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3401 __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3402 Heap::kArgumentsLengthIndex * kPointerSize),
3405 // Set up the elements pointer in the allocated arguments object.
3406 // If we allocated a parameter map, edi will point there, otherwise to the
3408 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSize));
3409 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3411 // eax = address of new object (tagged)
3412 // ebx = mapped parameter count (tagged)
3413 // ecx = argument count (tagged)
3414 // edi = address of parameter map or backing store (tagged)
3415 // esp[0] = mapped parameter count (tagged)
3416 // esp[8] = parameter count (tagged)
3417 // esp[12] = address of receiver argument
3421 // Initialize parameter map. If there are no mapped arguments, we're done.
3422 Label skip_parameter_map;
3424 __ j(zero, &skip_parameter_map);
3426 __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3427 Immediate(FACTORY->non_strict_arguments_elements_map()));
3428 __ lea(eax, Operand(ebx, reinterpret_cast<intptr_t>(Smi::FromInt(2))));
3429 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), eax);
3430 __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 0 * kPointerSize), esi);
3431 __ lea(eax, Operand(edi, ebx, times_2, kParameterMapHeaderSize));
3432 __ mov(FieldOperand(edi, FixedArray::kHeaderSize + 1 * kPointerSize), eax);
3434 // Copy the parameter slots and the holes in the arguments.
3435 // We need to fill in mapped_parameter_count slots. They index the context,
3436 // where parameters are stored in reverse order, at
3437 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
3438 // The mapped parameter thus need to get indices
3439 // MIN_CONTEXT_SLOTS+parameter_count-1 ..
3440 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
3441 // We loop from right to left.
3442 Label parameters_loop, parameters_test;
3444 __ mov(eax, Operand(esp, 2 * kPointerSize));
3445 __ mov(ebx, Immediate(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
3446 __ add(ebx, Operand(esp, 4 * kPointerSize));
3448 __ mov(ecx, FACTORY->the_hole_value());
3450 __ lea(edi, Operand(edi, eax, times_2, kParameterMapHeaderSize));
3451 // eax = loop variable (tagged)
3452 // ebx = mapping index (tagged)
3453 // ecx = the hole value
3454 // edx = address of parameter map (tagged)
3455 // edi = address of backing store (tagged)
3456 // esp[0] = argument count (tagged)
3457 // esp[4] = address of new object (tagged)
3458 // esp[8] = mapped parameter count (tagged)
3459 // esp[16] = parameter count (tagged)
3460 // esp[20] = address of receiver argument
3461 __ jmp(¶meters_test, Label::kNear);
3463 __ bind(¶meters_loop);
3464 __ sub(eax, Immediate(Smi::FromInt(1)));
3465 __ mov(FieldOperand(edx, eax, times_2, kParameterMapHeaderSize), ebx);
3466 __ mov(FieldOperand(edi, eax, times_2, FixedArray::kHeaderSize), ecx);
3467 __ add(ebx, Immediate(Smi::FromInt(1)));
3468 __ bind(¶meters_test);
3470 __ j(not_zero, ¶meters_loop, Label::kNear);
3473 __ bind(&skip_parameter_map);
3475 // ecx = argument count (tagged)
3476 // edi = address of backing store (tagged)
3477 // esp[0] = address of new object (tagged)
3478 // esp[4] = mapped parameter count (tagged)
3479 // esp[12] = parameter count (tagged)
3480 // esp[16] = address of receiver argument
3481 // Copy arguments header and remaining slots (if there are any).
3482 __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3483 Immediate(FACTORY->fixed_array_map()));
3484 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3486 Label arguments_loop, arguments_test;
3487 __ mov(ebx, Operand(esp, 1 * kPointerSize));
3488 __ mov(edx, Operand(esp, 4 * kPointerSize));
3489 __ sub(edx, ebx); // Is there a smarter way to do negative scaling?
3491 __ jmp(&arguments_test, Label::kNear);
3493 __ bind(&arguments_loop);
3494 __ sub(edx, Immediate(kPointerSize));
3495 __ mov(eax, Operand(edx, 0));
3496 __ mov(FieldOperand(edi, ebx, times_2, FixedArray::kHeaderSize), eax);
3497 __ add(ebx, Immediate(Smi::FromInt(1)));
3499 __ bind(&arguments_test);
3501 __ j(less, &arguments_loop, Label::kNear);
3504 __ pop(eax); // Address of arguments object.
3505 __ pop(ebx); // Parameter count.
3507 // Return and remove the on-stack parameters.
3508 __ ret(3 * kPointerSize);
3510 // Do the runtime call to allocate the arguments object.
3512 __ pop(eax); // Remove saved parameter count.
3513 __ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
3514 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3518 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
3519 // esp[0] : return address
3520 // esp[4] : number of parameters
3521 // esp[8] : receiver displacement
3522 // esp[12] : function
3524 // Check if the calling frame is an arguments adaptor frame.
3525 Label adaptor_frame, try_allocate, runtime;
3526 __ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
3527 __ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
3528 __ cmp(ecx, Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3529 __ j(equal, &adaptor_frame, Label::kNear);
3531 // Get the length from the frame.
3532 __ mov(ecx, Operand(esp, 1 * kPointerSize));
3533 __ jmp(&try_allocate, Label::kNear);
3535 // Patch the arguments.length and the parameters pointer.
3536 __ bind(&adaptor_frame);
3537 __ mov(ecx, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
3538 __ mov(Operand(esp, 1 * kPointerSize), ecx);
3539 __ lea(edx, Operand(edx, ecx, times_2,
3540 StandardFrameConstants::kCallerSPOffset));
3541 __ mov(Operand(esp, 2 * kPointerSize), edx);
3543 // Try the new space allocation. Start out with computing the size of
3544 // the arguments object and the elements array.
3545 Label add_arguments_object;
3546 __ bind(&try_allocate);
3548 __ j(zero, &add_arguments_object, Label::kNear);
3549 __ lea(ecx, Operand(ecx, times_2, FixedArray::kHeaderSize));
3550 __ bind(&add_arguments_object);
3551 __ add(ecx, Immediate(Heap::kArgumentsObjectSizeStrict));
3553 // Do the allocation of both objects in one go.
3554 __ AllocateInNewSpace(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
3556 // Get the arguments boilerplate from the current (global) context.
3557 __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_INDEX)));
3558 __ mov(edi, FieldOperand(edi, GlobalObject::kGlobalContextOffset));
3560 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX);
3561 __ mov(edi, Operand(edi, offset));
3563 // Copy the JS object part.
3564 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
3565 __ mov(ebx, FieldOperand(edi, i));
3566 __ mov(FieldOperand(eax, i), ebx);
3569 // Get the length (smi tagged) and set that as an in-object property too.
3570 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
3571 __ mov(ecx, Operand(esp, 1 * kPointerSize));
3572 __ mov(FieldOperand(eax, JSObject::kHeaderSize +
3573 Heap::kArgumentsLengthIndex * kPointerSize),
3576 // If there are no actual arguments, we're done.
3579 __ j(zero, &done, Label::kNear);
3581 // Get the parameters pointer from the stack.
3582 __ mov(edx, Operand(esp, 2 * kPointerSize));
3584 // Set up the elements pointer in the allocated arguments object and
3585 // initialize the header in the elements fixed array.
3586 __ lea(edi, Operand(eax, Heap::kArgumentsObjectSizeStrict));
3587 __ mov(FieldOperand(eax, JSObject::kElementsOffset), edi);
3588 __ mov(FieldOperand(edi, FixedArray::kMapOffset),
3589 Immediate(FACTORY->fixed_array_map()));
3591 __ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
3592 // Untag the length for the loop below.
3595 // Copy the fixed array slots.
3598 __ mov(ebx, Operand(edx, -1 * kPointerSize)); // Skip receiver.
3599 __ mov(FieldOperand(edi, FixedArray::kHeaderSize), ebx);
3600 __ add(edi, Immediate(kPointerSize));
3601 __ sub(edx, Immediate(kPointerSize));
3603 __ j(not_zero, &loop);
3605 // Return and remove the on-stack parameters.
3607 __ ret(3 * kPointerSize);
3609 // Do the runtime call to allocate the arguments object.
3611 __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
3615 void RegExpExecStub::Generate(MacroAssembler* masm) {
3616 // Just jump directly to runtime if native RegExp is not selected at compile
3617 // time or if regexp entry in generated code is turned off runtime switch or
3619 #ifdef V8_INTERPRETED_REGEXP
3620 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
3621 #else // V8_INTERPRETED_REGEXP
3623 // Stack frame on entry.
3624 // esp[0]: return address
3625 // esp[4]: last_match_info (expected JSArray)
3626 // esp[8]: previous index
3627 // esp[12]: subject string
3628 // esp[16]: JSRegExp object
3630 static const int kLastMatchInfoOffset = 1 * kPointerSize;
3631 static const int kPreviousIndexOffset = 2 * kPointerSize;
3632 static const int kSubjectOffset = 3 * kPointerSize;
3633 static const int kJSRegExpOffset = 4 * kPointerSize;
3635 Label runtime, invoke_regexp;
3637 // Ensure that a RegExp stack is allocated.
3638 ExternalReference address_of_regexp_stack_memory_address =
3639 ExternalReference::address_of_regexp_stack_memory_address(
3641 ExternalReference address_of_regexp_stack_memory_size =
3642 ExternalReference::address_of_regexp_stack_memory_size(masm->isolate());
3643 __ mov(ebx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3645 __ j(zero, &runtime);
3647 // Check that the first argument is a JSRegExp object.
3648 __ mov(eax, Operand(esp, kJSRegExpOffset));
3649 STATIC_ASSERT(kSmiTag == 0);
3650 __ JumpIfSmi(eax, &runtime);
3651 __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
3652 __ j(not_equal, &runtime);
3653 // Check that the RegExp has been compiled (data contains a fixed array).
3654 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3655 if (FLAG_debug_code) {
3656 __ test(ecx, Immediate(kSmiTagMask));
3657 __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
3658 __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
3659 __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
3662 // ecx: RegExp data (FixedArray)
3663 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3664 __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
3665 __ cmp(ebx, Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
3666 __ j(not_equal, &runtime);
3668 // ecx: RegExp data (FixedArray)
3669 // Check that the number of captures fit in the static offsets vector buffer.
3670 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3671 // Calculate number of capture registers (number_of_captures + 1) * 2. This
3672 // uses the asumption that smis are 2 * their untagged value.
3673 STATIC_ASSERT(kSmiTag == 0);
3674 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3675 __ add(edx, Immediate(2)); // edx was a smi.
3676 // Check that the static offsets vector buffer is large enough.
3677 __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
3678 __ j(above, &runtime);
3680 // ecx: RegExp data (FixedArray)
3681 // edx: Number of capture registers
3682 // Check that the second argument is a string.
3683 __ mov(eax, Operand(esp, kSubjectOffset));
3684 __ JumpIfSmi(eax, &runtime);
3685 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
3686 __ j(NegateCondition(is_string), &runtime);
3687 // Get the length of the string to ebx.
3688 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
3690 // ebx: Length of subject string as a smi
3691 // ecx: RegExp data (FixedArray)
3692 // edx: Number of capture registers
3693 // Check that the third argument is a positive smi less than the subject
3694 // string length. A negative value will be greater (unsigned comparison).
3695 __ mov(eax, Operand(esp, kPreviousIndexOffset));
3696 __ JumpIfNotSmi(eax, &runtime);
3698 __ j(above_equal, &runtime);
3700 // ecx: RegExp data (FixedArray)
3701 // edx: Number of capture registers
3702 // Check that the fourth object is a JSArray object.
3703 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3704 __ JumpIfSmi(eax, &runtime);
3705 __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
3706 __ j(not_equal, &runtime);
3707 // Check that the JSArray is in fast case.
3708 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3709 __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
3710 Factory* factory = masm->isolate()->factory();
3711 __ cmp(eax, factory->fixed_array_map());
3712 __ j(not_equal, &runtime);
3713 // Check that the last match info has space for the capture registers and the
3714 // additional information.
3715 __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
3717 __ add(edx, Immediate(RegExpImpl::kLastMatchOverhead));
3719 __ j(greater, &runtime);
3721 // Reset offset for possibly sliced string.
3722 __ Set(edi, Immediate(0));
3723 // ecx: RegExp data (FixedArray)
3724 // Check the representation and encoding of the subject string.
3725 Label seq_ascii_string, seq_two_byte_string, check_code;
3726 __ mov(eax, Operand(esp, kSubjectOffset));
3727 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3728 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
3729 // First check for flat two byte string.
3730 __ and_(ebx, kIsNotStringMask |
3731 kStringRepresentationMask |
3732 kStringEncodingMask |
3733 kShortExternalStringMask);
3734 STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
3735 __ j(zero, &seq_two_byte_string, Label::kNear);
3736 // Any other flat string must be a flat ASCII string. None of the following
3737 // string type tests will succeed if subject is not a string or a short
3739 __ and_(ebx, Immediate(kIsNotStringMask |
3740 kStringRepresentationMask |
3741 kShortExternalStringMask));
3742 __ j(zero, &seq_ascii_string, Label::kNear);
3744 // ebx: whether subject is a string and if yes, its string representation
3745 // Check for flat cons string or sliced string.
3746 // A flat cons string is a cons string where the second part is the empty
3747 // string. In that case the subject string is just the first part of the cons
3748 // string. Also in this case the first part of the cons string is known to be
3749 // a sequential string or an external string.
3750 // In the case of a sliced string its offset has to be taken into account.
3751 Label cons_string, external_string, check_encoding;
3752 STATIC_ASSERT(kConsStringTag < kExternalStringTag);
3753 STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
3754 STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
3755 STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
3756 __ cmp(ebx, Immediate(kExternalStringTag));
3757 __ j(less, &cons_string);
3758 __ j(equal, &external_string);
3760 // Catch non-string subject or short external string.
3761 STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
3762 __ test(ebx, Immediate(kIsNotStringMask | kShortExternalStringTag));
3763 __ j(not_zero, &runtime);
3765 // String is sliced.
3766 __ mov(edi, FieldOperand(eax, SlicedString::kOffsetOffset));
3767 __ mov(eax, FieldOperand(eax, SlicedString::kParentOffset));
3768 // edi: offset of sliced string, smi-tagged.
3769 // eax: parent string.
3770 __ jmp(&check_encoding, Label::kNear);
3771 // String is a cons string, check whether it is flat.
3772 __ bind(&cons_string);
3773 __ cmp(FieldOperand(eax, ConsString::kSecondOffset), factory->empty_string());
3774 __ j(not_equal, &runtime);
3775 __ mov(eax, FieldOperand(eax, ConsString::kFirstOffset));
3776 __ bind(&check_encoding);
3777 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
3778 // eax: first part of cons string or parent of sliced string.
3779 // ebx: map of first part of cons string or map of parent of sliced string.
3780 // Is first part of cons or parent of slice a flat two byte string?
3781 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3782 kStringRepresentationMask | kStringEncodingMask);
3783 STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
3784 __ j(zero, &seq_two_byte_string, Label::kNear);
3785 // Any other flat string must be sequential ASCII or external.
3786 __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
3787 kStringRepresentationMask);
3788 __ j(not_zero, &external_string);
3790 __ bind(&seq_ascii_string);
3791 // eax: subject string (flat ASCII)
3792 // ecx: RegExp data (FixedArray)
3793 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
3794 __ Set(ecx, Immediate(1)); // Type is ASCII.
3795 __ jmp(&check_code, Label::kNear);
3797 __ bind(&seq_two_byte_string);
3798 // eax: subject string (flat two byte)
3799 // ecx: RegExp data (FixedArray)
3800 __ mov(edx, FieldOperand(ecx, JSRegExp::kDataUC16CodeOffset));
3801 __ Set(ecx, Immediate(0)); // Type is two byte.
3803 __ bind(&check_code);
3804 // Check that the irregexp code has been generated for the actual string
3805 // encoding. If it has, the field contains a code object otherwise it contains
3806 // a smi (code flushing support).
3807 __ JumpIfSmi(edx, &runtime);
3809 // eax: subject string
3811 // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
3812 // Load used arguments before starting to push arguments for call to native
3813 // RegExp code to avoid handling changing stack height.
3814 __ mov(ebx, Operand(esp, kPreviousIndexOffset));
3815 __ SmiUntag(ebx); // Previous index from smi.
3817 // eax: subject string
3818 // ebx: previous index
3820 // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3821 // All checks done. Now push arguments for native regexp code.
3822 Counters* counters = masm->isolate()->counters();
3823 __ IncrementCounter(counters->regexp_entry_native(), 1);
3825 // Isolates: note we add an additional parameter here (isolate pointer).
3826 static const int kRegExpExecuteArguments = 8;
3827 __ EnterApiExitFrame(kRegExpExecuteArguments);
3829 // Argument 8: Pass current isolate address.
3830 __ mov(Operand(esp, 7 * kPointerSize),
3831 Immediate(ExternalReference::isolate_address()));
3833 // Argument 7: Indicate that this is a direct call from JavaScript.
3834 __ mov(Operand(esp, 6 * kPointerSize), Immediate(1));
3836 // Argument 6: Start (high end) of backtracking stack memory area.
3837 __ mov(esi, Operand::StaticVariable(address_of_regexp_stack_memory_address));
3838 __ add(esi, Operand::StaticVariable(address_of_regexp_stack_memory_size));
3839 __ mov(Operand(esp, 5 * kPointerSize), esi);
3841 // Argument 5: static offsets vector buffer.
3842 __ mov(Operand(esp, 4 * kPointerSize),
3843 Immediate(ExternalReference::address_of_static_offsets_vector(
3846 // Argument 2: Previous index.
3847 __ mov(Operand(esp, 1 * kPointerSize), ebx);
3849 // Argument 1: Original subject string.
3850 // The original subject is in the previous stack frame. Therefore we have to
3851 // use ebp, which points exactly to one pointer size below the previous esp.
3852 // (Because creating a new stack frame pushes the previous ebp onto the stack
3853 // and thereby moves up esp by one kPointerSize.)
3854 __ mov(esi, Operand(ebp, kSubjectOffset + kPointerSize));
3855 __ mov(Operand(esp, 0 * kPointerSize), esi);
3857 // esi: original subject string
3858 // eax: underlying subject string
3859 // ebx: previous index
3860 // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
3862 // Argument 4: End of string data
3863 // Argument 3: Start of string data
3864 // Prepare start and end index of the input.
3865 // Load the length from the original sliced string if that is the case.
3866 __ mov(esi, FieldOperand(esi, String::kLengthOffset));
3867 __ add(esi, edi); // Calculate input end wrt offset.
3869 __ add(ebx, edi); // Calculate input start wrt offset.
3871 // ebx: start index of the input string
3872 // esi: end index of the input string
3873 Label setup_two_byte, setup_rest;
3875 __ j(zero, &setup_two_byte, Label::kNear);
3877 __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
3878 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3879 __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
3880 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3881 __ jmp(&setup_rest, Label::kNear);
3883 __ bind(&setup_two_byte);
3884 STATIC_ASSERT(kSmiTag == 0);
3885 STATIC_ASSERT(kSmiTagSize == 1); // esi is smi (powered by 2).
3886 __ lea(ecx, FieldOperand(eax, esi, times_1, SeqTwoByteString::kHeaderSize));
3887 __ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
3888 __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
3889 __ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
3891 __ bind(&setup_rest);
3893 // Locate the code entry and call it.
3894 __ add(edx, Immediate(Code::kHeaderSize - kHeapObjectTag));
3897 // Drop arguments and come back to JS mode.
3898 __ LeaveApiExitFrame();
3900 // Check the result.
3902 __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
3903 __ j(equal, &success);
3905 __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
3906 __ j(equal, &failure);
3907 __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
3908 // If not exception it can only be retry. Handle that in the runtime system.
3909 __ j(not_equal, &runtime);
3910 // Result must now be exception. If there is no pending exception already a
3911 // stack overflow (on the backtrack stack) was detected in RegExp code but
3912 // haven't created the exception yet. Handle that in the runtime system.
3913 // TODO(592): Rerunning the RegExp to get the stack overflow exception.
3914 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
3916 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
3917 __ mov(eax, Operand::StaticVariable(pending_exception));
3919 __ j(equal, &runtime);
3920 // For exception, throw the exception again.
3922 // Clear the pending exception variable.
3923 __ mov(Operand::StaticVariable(pending_exception), edx);
3925 // Special handling of termination exceptions which are uncatchable
3926 // by javascript code.
3927 __ cmp(eax, factory->termination_exception());
3928 Label throw_termination_exception;
3929 __ j(equal, &throw_termination_exception, Label::kNear);
3931 // Handle normal exception by following handler chain.
3934 __ bind(&throw_termination_exception);
3935 __ ThrowUncatchable(eax);
3938 // For failure to match, return null.
3939 __ mov(eax, factory->null_value());
3940 __ ret(4 * kPointerSize);
3942 // Load RegExp data.
3944 __ mov(eax, Operand(esp, kJSRegExpOffset));
3945 __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
3946 __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
3947 // Calculate number of capture registers (number_of_captures + 1) * 2.
3948 STATIC_ASSERT(kSmiTag == 0);
3949 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
3950 __ add(edx, Immediate(2)); // edx was a smi.
3952 // edx: Number of capture registers
3953 // Load last_match_info which is still known to be a fast case JSArray.
3954 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
3955 __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
3957 // ebx: last_match_info backing store (FixedArray)
3958 // edx: number of capture registers
3959 // Store the capture count.
3960 __ SmiTag(edx); // Number of capture registers to smi.
3961 __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
3962 __ SmiUntag(edx); // Number of capture registers back from smi.
3963 // Store last subject and last input.
3964 __ mov(eax, Operand(esp, kSubjectOffset));
3965 __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
3966 __ RecordWriteField(ebx,
3967 RegExpImpl::kLastSubjectOffset,
3971 __ mov(eax, Operand(esp, kSubjectOffset));
3972 __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
3973 __ RecordWriteField(ebx,
3974 RegExpImpl::kLastInputOffset,
3979 // Get the static offsets vector filled by the native regexp code.
3980 ExternalReference address_of_static_offsets_vector =
3981 ExternalReference::address_of_static_offsets_vector(masm->isolate());
3982 __ mov(ecx, Immediate(address_of_static_offsets_vector));
3984 // ebx: last_match_info backing store (FixedArray)
3985 // ecx: offsets vector
3986 // edx: number of capture registers
3987 Label next_capture, done;
3988 // Capture register counter starts from number of capture registers and
3989 // counts down until wraping after zero.
3990 __ bind(&next_capture);
3991 __ sub(edx, Immediate(1));
3992 __ j(negative, &done, Label::kNear);
3993 // Read the value from the static offsets vector buffer.
3994 __ mov(edi, Operand(ecx, edx, times_int_size, 0));
3996 // Store the smi value in the last match info.
3997 __ mov(FieldOperand(ebx,
4000 RegExpImpl::kFirstCaptureOffset),
4002 __ jmp(&next_capture);
4005 // Return last match info.
4006 __ mov(eax, Operand(esp, kLastMatchInfoOffset));
4007 __ ret(4 * kPointerSize);
4009 // External string. Short external strings have already been ruled out.
4010 // eax: subject string (expected to be external)
4012 __ bind(&external_string);
4013 __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
4014 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
4015 if (FLAG_debug_code) {
4016 // Assert that we do not have a cons or slice (indirect strings) here.
4017 // Sequential strings have already been ruled out.
4018 __ test_b(ebx, kIsIndirectStringMask);
4019 __ Assert(zero, "external string expected, but not found");
4021 __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
4022 // Move the pointer so that offset-wise, it looks like a sequential string.
4023 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
4024 __ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
4025 STATIC_ASSERT(kTwoByteStringTag == 0);
4026 __ test_b(ebx, kStringEncodingMask);
4027 __ j(not_zero, &seq_ascii_string);
4028 __ jmp(&seq_two_byte_string);
4030 // Do the runtime call to execute the regexp.
4032 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
4033 #endif // V8_INTERPRETED_REGEXP
4037 void RegExpConstructResultStub::Generate(MacroAssembler* masm) {
4038 const int kMaxInlineLength = 100;
4041 __ mov(ebx, Operand(esp, kPointerSize * 3));
4042 __ JumpIfNotSmi(ebx, &slowcase);
4043 __ cmp(ebx, Immediate(Smi::FromInt(kMaxInlineLength)));
4044 __ j(above, &slowcase);
4045 // Smi-tagging is equivalent to multiplying by 2.
4046 STATIC_ASSERT(kSmiTag == 0);
4047 STATIC_ASSERT(kSmiTagSize == 1);
4048 // Allocate RegExpResult followed by FixedArray with size in ebx.
4049 // JSArray: [Map][empty properties][Elements][Length-smi][index][input]
4050 // Elements: [Map][Length][..elements..]
4051 __ AllocateInNewSpace(JSRegExpResult::kSize + FixedArray::kHeaderSize,
4052 times_half_pointer_size,
4053 ebx, // In: Number of elements (times 2, being a smi)
4054 eax, // Out: Start of allocation (tagged).
4055 ecx, // Out: End of allocation.
4056 edx, // Scratch register
4059 // eax: Start of allocated area, object-tagged.
4061 // Set JSArray map to global.regexp_result_map().
4062 // Set empty properties FixedArray.
4063 // Set elements to point to FixedArray allocated right after the JSArray.
4064 // Interleave operations for better latency.
4065 __ mov(edx, ContextOperand(esi, Context::GLOBAL_INDEX));
4066 Factory* factory = masm->isolate()->factory();
4067 __ mov(ecx, Immediate(factory->empty_fixed_array()));
4068 __ lea(ebx, Operand(eax, JSRegExpResult::kSize));
4069 __ mov(edx, FieldOperand(edx, GlobalObject::kGlobalContextOffset));
4070 __ mov(FieldOperand(eax, JSObject::kElementsOffset), ebx);
4071 __ mov(FieldOperand(eax, JSObject::kPropertiesOffset), ecx);
4072 __ mov(edx, ContextOperand(edx, Context::REGEXP_RESULT_MAP_INDEX));
4073 __ mov(FieldOperand(eax, HeapObject::kMapOffset), edx);
4075 // Set input, index and length fields from arguments.
4076 __ mov(ecx, Operand(esp, kPointerSize * 1));
4077 __ mov(FieldOperand(eax, JSRegExpResult::kInputOffset), ecx);
4078 __ mov(ecx, Operand(esp, kPointerSize * 2));
4079 __ mov(FieldOperand(eax, JSRegExpResult::kIndexOffset), ecx);
4080 __ mov(ecx, Operand(esp, kPointerSize * 3));
4081 __ mov(FieldOperand(eax, JSArray::kLengthOffset), ecx);
4083 // Fill out the elements FixedArray.
4086 // ecx: Number of elements in array, as smi.
4089 __ mov(FieldOperand(ebx, HeapObject::kMapOffset),
4090 Immediate(factory->fixed_array_map()));
4092 __ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
4093 // Fill contents of fixed-array with the-hole.
4095 __ mov(edx, Immediate(factory->the_hole_value()));
4096 __ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
4097 // Fill fixed array elements with hole.
4099 // ecx: Number of elements to fill.
4100 // ebx: Start of elements in FixedArray.
4105 __ j(less_equal, &done, Label::kNear); // Jump if ecx is negative or zero.
4106 __ sub(ecx, Immediate(1));
4107 __ mov(Operand(ebx, ecx, times_pointer_size, 0), edx);
4111 __ ret(3 * kPointerSize);
4114 __ TailCallRuntime(Runtime::kRegExpConstructResult, 3, 1);
4118 void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
4125 // Use of registers. Register result is used as a temporary.
4126 Register number_string_cache = result;
4127 Register mask = scratch1;
4128 Register scratch = scratch2;
4130 // Load the number string cache.
4131 ExternalReference roots_array_start =
4132 ExternalReference::roots_array_start(masm->isolate());
4133 __ mov(scratch, Immediate(Heap::kNumberStringCacheRootIndex));
4134 __ mov(number_string_cache,
4135 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
4136 // Make the hash mask from the length of the number string cache. It
4137 // contains two elements (number and string) for each cache entry.
4138 __ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
4139 __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
4140 __ sub(mask, Immediate(1)); // Make mask.
4142 // Calculate the entry in the number string cache. The hash value in the
4143 // number string cache for smis is just the smi value, and the hash for
4144 // doubles is the xor of the upper and lower words. See
4145 // Heap::GetNumberStringCache.
4146 Label smi_hash_calculated;
4147 Label load_result_from_cache;
4148 if (object_is_smi) {
4149 __ mov(scratch, object);
4150 __ SmiUntag(scratch);
4153 STATIC_ASSERT(kSmiTag == 0);
4154 __ JumpIfNotSmi(object, ¬_smi, Label::kNear);
4155 __ mov(scratch, object);
4156 __ SmiUntag(scratch);
4157 __ jmp(&smi_hash_calculated, Label::kNear);
4159 __ cmp(FieldOperand(object, HeapObject::kMapOffset),
4160 masm->isolate()->factory()->heap_number_map());
4161 __ j(not_equal, not_found);
4162 STATIC_ASSERT(8 == kDoubleSize);
4163 __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
4164 __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
4165 // Object is heap number and hash is now in scratch. Calculate cache index.
4166 __ and_(scratch, mask);
4167 Register index = scratch;
4168 Register probe = mask;
4170 FieldOperand(number_string_cache,
4172 times_twice_pointer_size,
4173 FixedArray::kHeaderSize));
4174 __ JumpIfSmi(probe, not_found);
4175 if (CpuFeatures::IsSupported(SSE2)) {
4176 CpuFeatures::Scope fscope(SSE2);
4177 __ movdbl(xmm0, FieldOperand(object, HeapNumber::kValueOffset));
4178 __ movdbl(xmm1, FieldOperand(probe, HeapNumber::kValueOffset));
4179 __ ucomisd(xmm0, xmm1);
4181 __ fld_d(FieldOperand(object, HeapNumber::kValueOffset));
4182 __ fld_d(FieldOperand(probe, HeapNumber::kValueOffset));
4185 __ j(parity_even, not_found); // Bail out if NaN is involved.
4186 __ j(not_equal, not_found); // The cache did not contain this value.
4187 __ jmp(&load_result_from_cache, Label::kNear);
4190 __ bind(&smi_hash_calculated);
4191 // Object is smi and hash is now in scratch. Calculate cache index.
4192 __ and_(scratch, mask);
4193 Register index = scratch;
4194 // Check if the entry is the smi we are looking for.
4196 FieldOperand(number_string_cache,
4198 times_twice_pointer_size,
4199 FixedArray::kHeaderSize));
4200 __ j(not_equal, not_found);
4202 // Get the result from the cache.
4203 __ bind(&load_result_from_cache);
4205 FieldOperand(number_string_cache,
4207 times_twice_pointer_size,
4208 FixedArray::kHeaderSize + kPointerSize));
4209 Counters* counters = masm->isolate()->counters();
4210 __ IncrementCounter(counters->number_to_string_native(), 1);
4214 void NumberToStringStub::Generate(MacroAssembler* masm) {
4217 __ mov(ebx, Operand(esp, kPointerSize));
4219 // Generate code to lookup number in the number string cache.
4220 GenerateLookupNumberStringCache(masm, ebx, eax, ecx, edx, false, &runtime);
4221 __ ret(1 * kPointerSize);
4224 // Handle number to string in the runtime system if not found in the cache.
4225 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1);
4229 static int NegativeComparisonResult(Condition cc) {
4230 ASSERT(cc != equal);
4231 ASSERT((cc == less) || (cc == less_equal)
4232 || (cc == greater) || (cc == greater_equal));
4233 return (cc == greater || cc == greater_equal) ? LESS : GREATER;
4236 void CompareStub::Generate(MacroAssembler* masm) {
4237 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
4239 Label check_unequal_objects;
4241 // Compare two smis if required.
4242 if (include_smi_compare_) {
4243 Label non_smi, smi_done;
4246 __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
4247 __ sub(edx, eax); // Return on the result of the subtraction.
4248 __ j(no_overflow, &smi_done, Label::kNear);
4249 __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
4254 } else if (FLAG_debug_code) {
4257 __ test(ecx, Immediate(kSmiTagMask));
4258 __ Assert(not_zero, "Unexpected smi operands.");
4261 // NOTICE! This code is only reached after a smi-fast-case check, so
4262 // it is certain that at least one operand isn't a smi.
4264 // Identical objects can be compared fast, but there are some tricky cases
4265 // for NaN and undefined.
4267 Label not_identical;
4269 __ j(not_equal, ¬_identical);
4272 // Check for undefined. undefined OP undefined is false even though
4273 // undefined == undefined.
4274 Label check_for_nan;
4275 __ cmp(edx, masm->isolate()->factory()->undefined_value());
4276 __ j(not_equal, &check_for_nan, Label::kNear);
4277 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4279 __ bind(&check_for_nan);
4282 // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
4283 // so we do the second best thing - test it ourselves.
4284 // Note: if cc_ != equal, never_nan_nan_ is not used.
4285 if (never_nan_nan_ && (cc_ == equal)) {
4286 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4290 __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
4291 Immediate(masm->isolate()->factory()->heap_number_map()));
4292 __ j(equal, &heap_number, Label::kNear);
4294 // Call runtime on identical JSObjects. Otherwise return equal.
4295 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4296 __ j(above_equal, ¬_identical);
4298 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4301 __ bind(&heap_number);
4302 // It is a heap number, so return non-equal if it's NaN and equal if
4304 // The representation of NaN values has all exponent bits (52..62) set,
4305 // and not all mantissa bits (0..51) clear.
4306 // We only accept QNaNs, which have bit 51 set.
4307 // Read top bits of double representation (second word of value).
4309 // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
4310 // all bits in the mask are set. We only need to check the word
4311 // that contains the exponent and high bit of the mantissa.
4312 STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
4313 __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
4314 __ Set(eax, Immediate(0));
4315 // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
4318 __ cmp(edx, kQuietNaNHighBitsMask << 1);
4320 STATIC_ASSERT(EQUAL != 1);
4321 __ setcc(above_equal, eax);
4325 __ j(above_equal, &nan, Label::kNear);
4326 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
4329 __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4334 __ bind(¬_identical);
4337 // Strict equality can quickly decide whether objects are equal.
4338 // Non-strict object equality is slower, so it is handled later in the stub.
4339 if (cc_ == equal && strict_) {
4340 Label slow; // Fallthrough label.
4342 // If we're doing a strict equality comparison, we don't have to do
4343 // type conversion, so we generate code to do fast comparison for objects
4344 // and oddballs. Non-smi numbers and strings still go through the usual
4346 // If either is a Smi (we know that not both are), then they can only
4347 // be equal if the other is a HeapNumber. If so, use the slow case.
4348 STATIC_ASSERT(kSmiTag == 0);
4349 ASSERT_EQ(0, Smi::FromInt(0));
4350 __ mov(ecx, Immediate(kSmiTagMask));
4353 __ j(not_zero, ¬_smis, Label::kNear);
4354 // One operand is a smi.
4356 // Check whether the non-smi is a heap number.
4357 STATIC_ASSERT(kSmiTagMask == 1);
4358 // ecx still holds eax & kSmiTag, which is either zero or one.
4359 __ sub(ecx, Immediate(0x01));
4362 __ and_(ebx, ecx); // ebx holds either 0 or eax ^ edx.
4364 // if eax was smi, ebx is now edx, else eax.
4366 // Check if the non-smi operand is a heap number.
4367 __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
4368 Immediate(masm->isolate()->factory()->heap_number_map()));
4369 // If heap number, handle it in the slow case.
4370 __ j(equal, &slow, Label::kNear);
4371 // Return non-equal (ebx is not zero)
4376 // If either operand is a JSObject or an oddball value, then they are not
4377 // equal since their pointers are different
4378 // There is no test for undetectability in strict equality.
4380 // Get the type of the first operand.
4381 // If the first object is a JS object, we have done pointer comparison.
4382 Label first_non_object;
4383 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
4384 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4385 __ j(below, &first_non_object, Label::kNear);
4387 // Return non-zero (eax is not zero)
4388 Label return_not_equal;
4389 STATIC_ASSERT(kHeapObjectTag != 0);
4390 __ bind(&return_not_equal);
4393 __ bind(&first_non_object);
4394 // Check for oddballs: true, false, null, undefined.
4395 __ CmpInstanceType(ecx, ODDBALL_TYPE);
4396 __ j(equal, &return_not_equal);
4398 __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ecx);
4399 __ j(above_equal, &return_not_equal);
4401 // Check for oddballs: true, false, null, undefined.
4402 __ CmpInstanceType(ecx, ODDBALL_TYPE);
4403 __ j(equal, &return_not_equal);
4405 // Fall through to the general case.
4409 // Generate the number comparison code.
4410 if (include_number_compare_) {
4411 Label non_number_comparison;
4413 if (CpuFeatures::IsSupported(SSE2)) {
4414 CpuFeatures::Scope use_sse2(SSE2);
4415 CpuFeatures::Scope use_cmov(CMOV);
4417 FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
4418 __ ucomisd(xmm0, xmm1);
4420 // Don't base result on EFLAGS when a NaN is involved.
4421 __ j(parity_even, &unordered, Label::kNear);
4422 // Return a result of -1, 0, or 1, based on EFLAGS.
4423 __ mov(eax, 0); // equal
4424 __ mov(ecx, Immediate(Smi::FromInt(1)));
4425 __ cmov(above, eax, ecx);
4426 __ mov(ecx, Immediate(Smi::FromInt(-1)));
4427 __ cmov(below, eax, ecx);
4430 FloatingPointHelper::CheckFloatOperands(
4431 masm, &non_number_comparison, ebx);
4432 FloatingPointHelper::LoadFloatOperand(masm, eax);
4433 FloatingPointHelper::LoadFloatOperand(masm, edx);
4436 // Don't base result on EFLAGS when a NaN is involved.
4437 __ j(parity_even, &unordered, Label::kNear);
4439 Label below_label, above_label;
4440 // Return a result of -1, 0, or 1, based on EFLAGS.
4441 __ j(below, &below_label, Label::kNear);
4442 __ j(above, &above_label, Label::kNear);
4444 __ Set(eax, Immediate(0));
4447 __ bind(&below_label);
4448 __ mov(eax, Immediate(Smi::FromInt(-1)));
4451 __ bind(&above_label);
4452 __ mov(eax, Immediate(Smi::FromInt(1)));
4456 // If one of the numbers was NaN, then the result is always false.
4457 // The cc is never not-equal.
4458 __ bind(&unordered);
4459 ASSERT(cc_ != not_equal);
4460 if (cc_ == less || cc_ == less_equal) {
4461 __ mov(eax, Immediate(Smi::FromInt(1)));
4463 __ mov(eax, Immediate(Smi::FromInt(-1)));
4467 // The number comparison code did not provide a valid result.
4468 __ bind(&non_number_comparison);
4471 // Fast negative check for symbol-to-symbol equality.
4472 Label check_for_strings;
4474 BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
4475 BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
4477 // We've already checked for object identity, so if both operands
4478 // are symbols they aren't equal. Register eax already holds a
4479 // non-zero value, which indicates not equal, so just return.
4483 __ bind(&check_for_strings);
4485 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
4486 &check_unequal_objects);
4488 // Inline comparison of ASCII strings.
4490 StringCompareStub::GenerateFlatAsciiStringEquals(masm,
4496 StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
4504 __ Abort("Unexpected fall-through from string comparison");
4507 __ bind(&check_unequal_objects);
4508 if (cc_ == equal && !strict_) {
4509 // Non-strict equality. Objects are unequal if
4510 // they are both JSObjects and not undetectable,
4511 // and their pointers are different.
4512 Label not_both_objects;
4513 Label return_unequal;
4514 // At most one is a smi, so we can test for smi by adding the two.
4515 // A smi plus a heap object has the low bit set, a heap object plus
4516 // a heap object has the low bit clear.
4517 STATIC_ASSERT(kSmiTag == 0);
4518 STATIC_ASSERT(kSmiTagMask == 1);
4519 __ lea(ecx, Operand(eax, edx, times_1, 0));
4520 __ test(ecx, Immediate(kSmiTagMask));
4521 __ j(not_zero, ¬_both_objects, Label::kNear);
4522 __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
4523 __ j(below, ¬_both_objects, Label::kNear);
4524 __ CmpObjectType(edx, FIRST_SPEC_OBJECT_TYPE, ebx);
4525 __ j(below, ¬_both_objects, Label::kNear);
4526 // We do not bail out after this point. Both are JSObjects, and
4527 // they are equal if and only if both are undetectable.
4528 // The and of the undetectable flags is 1 if and only if they are equal.
4529 __ test_b(FieldOperand(ecx, Map::kBitFieldOffset),
4530 1 << Map::kIsUndetectable);
4531 __ j(zero, &return_unequal, Label::kNear);
4532 __ test_b(FieldOperand(ebx, Map::kBitFieldOffset),
4533 1 << Map::kIsUndetectable);
4534 __ j(zero, &return_unequal, Label::kNear);
4535 // The objects are both undetectable, so they both compare as the value
4536 // undefined, and are equal.
4537 __ Set(eax, Immediate(EQUAL));
4538 __ bind(&return_unequal);
4539 // Return non-equal by returning the non-zero object pointer in eax,
4540 // or return equal if we fell through to here.
4541 __ ret(0); // rax, rdx were pushed
4542 __ bind(¬_both_objects);
4545 // Push arguments below the return address.
4550 // Figure out which native to call and setup the arguments.
4551 Builtins::JavaScript builtin;
4553 builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
4555 builtin = Builtins::COMPARE;
4556 __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
4559 // Restore return address on the stack.
4562 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
4563 // tagged as a small integer.
4564 __ InvokeBuiltin(builtin, JUMP_FUNCTION);
4568 void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
4572 __ JumpIfSmi(object, label);
4573 __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
4574 __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
4575 __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
4576 __ cmp(scratch, kSymbolTag | kStringTag);
4577 __ j(not_equal, label);
4581 void StackCheckStub::Generate(MacroAssembler* masm) {
4582 __ TailCallRuntime(Runtime::kStackGuard, 0, 1);
4586 void InterruptStub::Generate(MacroAssembler* masm) {
4587 __ TailCallRuntime(Runtime::kInterrupt, 0, 1);
4591 static void GenerateRecordCallTarget(MacroAssembler* masm) {
4592 // Cache the called function in a global property cell. Cache states
4593 // are uninitialized, monomorphic (indicated by a JSFunction), and
4595 // ebx : cache cell for call target
4596 // edi : the function to call
4597 Isolate* isolate = masm->isolate();
4598 Label initialize, done;
4600 // Load the cache state into ecx.
4601 __ mov(ecx, FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset));
4603 // A monomorphic cache hit or an already megamorphic state: invoke the
4604 // function without changing the state.
4606 __ j(equal, &done, Label::kNear);
4607 __ cmp(ecx, Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4608 __ j(equal, &done, Label::kNear);
4610 // A monomorphic miss (i.e, here the cache is not uninitialized) goes
4612 __ cmp(ecx, Immediate(TypeFeedbackCells::UninitializedSentinel(isolate)));
4613 __ j(equal, &initialize, Label::kNear);
4614 // MegamorphicSentinel is an immortal immovable object (undefined) so no
4615 // write-barrier is needed.
4616 __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
4617 Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4618 __ jmp(&done, Label::kNear);
4620 // An uninitialized cache is patched with the function.
4621 __ bind(&initialize);
4622 __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
4623 // No need for a write barrier here - cells are rescanned.
4629 void CallFunctionStub::Generate(MacroAssembler* masm) {
4630 // ebx : cache cell for call target
4631 // edi : the function to call
4632 Isolate* isolate = masm->isolate();
4633 Label slow, non_function;
4635 // The receiver might implicitly be the global object. This is
4636 // indicated by passing the hole as the receiver to the call
4638 if (ReceiverMightBeImplicit()) {
4640 // Get the receiver from the stack.
4641 // +1 ~ return address
4642 __ mov(eax, Operand(esp, (argc_ + 1) * kPointerSize));
4643 // Call as function is indicated with the hole.
4644 __ cmp(eax, isolate->factory()->the_hole_value());
4645 __ j(not_equal, &receiver_ok, Label::kNear);
4646 // Patch the receiver on the stack with the global receiver object.
4647 __ mov(ecx, GlobalObjectOperand());
4648 __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
4649 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), ecx);
4650 __ bind(&receiver_ok);
4653 // Check that the function really is a JavaScript function.
4654 __ JumpIfSmi(edi, &non_function);
4655 // Goto slow case if we do not have a function.
4656 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4657 __ j(not_equal, &slow);
4659 if (RecordCallTarget()) {
4660 GenerateRecordCallTarget(masm);
4663 // Fast-case: Just invoke the function.
4664 ParameterCount actual(argc_);
4666 if (ReceiverMightBeImplicit()) {
4667 Label call_as_function;
4668 __ cmp(eax, isolate->factory()->the_hole_value());
4669 __ j(equal, &call_as_function);
4670 __ InvokeFunction(edi,
4675 __ bind(&call_as_function);
4677 __ InvokeFunction(edi,
4683 // Slow-case: Non-function called.
4685 if (RecordCallTarget()) {
4686 // If there is a call target cache, mark it megamorphic in the
4687 // non-function case. MegamorphicSentinel is an immortal immovable
4688 // object (undefined) so no write barrier is needed.
4689 __ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
4690 Immediate(TypeFeedbackCells::MegamorphicSentinel(isolate)));
4692 // Check for function proxy.
4693 __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4694 __ j(not_equal, &non_function);
4696 __ push(edi); // put proxy as additional argument under return address
4698 __ Set(eax, Immediate(argc_ + 1));
4699 __ Set(ebx, Immediate(0));
4700 __ SetCallKind(ecx, CALL_AS_FUNCTION);
4701 __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY);
4703 Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4704 __ jmp(adaptor, RelocInfo::CODE_TARGET);
4707 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
4708 // of the original receiver from the call site).
4709 __ bind(&non_function);
4710 __ mov(Operand(esp, (argc_ + 1) * kPointerSize), edi);
4711 __ Set(eax, Immediate(argc_));
4712 __ Set(ebx, Immediate(0));
4713 __ SetCallKind(ecx, CALL_AS_METHOD);
4714 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
4715 Handle<Code> adaptor = isolate->builtins()->ArgumentsAdaptorTrampoline();
4716 __ jmp(adaptor, RelocInfo::CODE_TARGET);
4720 void CallConstructStub::Generate(MacroAssembler* masm) {
4721 // eax : number of arguments
4722 // ebx : cache cell for call target
4723 // edi : constructor function
4724 Label slow, non_function_call;
4726 // Check that function is not a smi.
4727 __ JumpIfSmi(edi, &non_function_call);
4728 // Check that function is a JSFunction.
4729 __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
4730 __ j(not_equal, &slow);
4732 if (RecordCallTarget()) {
4733 GenerateRecordCallTarget(masm);
4736 // Jump to the function-specific construct stub.
4737 __ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
4738 __ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
4739 __ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
4742 // edi: called object
4743 // eax: number of arguments
4747 __ CmpInstanceType(ecx, JS_FUNCTION_PROXY_TYPE);
4748 __ j(not_equal, &non_function_call);
4749 __ GetBuiltinEntry(edx, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
4752 __ bind(&non_function_call);
4753 __ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
4755 // Set expected number of arguments to zero (not changing eax).
4756 __ Set(ebx, Immediate(0));
4757 Handle<Code> arguments_adaptor =
4758 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
4759 __ SetCallKind(ecx, CALL_AS_METHOD);
4760 __ jmp(arguments_adaptor, RelocInfo::CODE_TARGET);
4764 bool CEntryStub::NeedsImmovableCode() {
4769 bool CEntryStub::IsPregenerated() {
4770 return (!save_doubles_ || ISOLATE->fp_stubs_generated()) &&
4775 void CodeStub::GenerateStubsAheadOfTime() {
4776 CEntryStub::GenerateAheadOfTime();
4777 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime();
4778 // It is important that the store buffer overflow stubs are generated first.
4779 RecordWriteStub::GenerateFixedRegStubsAheadOfTime();
4783 void CodeStub::GenerateFPStubs() {
4784 CEntryStub save_doubles(1, kSaveFPRegs);
4785 Handle<Code> code = save_doubles.GetCode();
4786 code->set_is_pregenerated(true);
4787 code->GetIsolate()->set_fp_stubs_generated(true);
4791 void CEntryStub::GenerateAheadOfTime() {
4792 CEntryStub stub(1, kDontSaveFPRegs);
4793 Handle<Code> code = stub.GetCode();
4794 code->set_is_pregenerated(true);
4798 void CEntryStub::GenerateCore(MacroAssembler* masm,
4799 Label* throw_normal_exception,
4800 Label* throw_termination_exception,
4801 Label* throw_out_of_memory_exception,
4803 bool always_allocate_scope) {
4804 // eax: result parameter for PerformGC, if any
4805 // ebx: pointer to C function (C callee-saved)
4806 // ebp: frame pointer (restored after C call)
4807 // esp: stack pointer (restored after C call)
4808 // edi: number of arguments including receiver (C callee-saved)
4809 // esi: pointer to the first argument (C callee-saved)
4811 // Result returned in eax, or eax+edx if result_size_ is 2.
4813 // Check stack alignment.
4814 if (FLAG_debug_code) {
4815 __ CheckStackAlignment();
4819 // Pass failure code returned from last attempt as first argument to
4820 // PerformGC. No need to use PrepareCallCFunction/CallCFunction here as the
4821 // stack alignment is known to be correct. This function takes one argument
4822 // which is passed on the stack, and we know that the stack has been
4823 // prepared to pass at least one argument.
4824 __ mov(Operand(esp, 0 * kPointerSize), eax); // Result.
4825 __ call(FUNCTION_ADDR(Runtime::PerformGC), RelocInfo::RUNTIME_ENTRY);
4828 ExternalReference scope_depth =
4829 ExternalReference::heap_always_allocate_scope_depth(masm->isolate());
4830 if (always_allocate_scope) {
4831 __ inc(Operand::StaticVariable(scope_depth));
4835 __ mov(Operand(esp, 0 * kPointerSize), edi); // argc.
4836 __ mov(Operand(esp, 1 * kPointerSize), esi); // argv.
4837 __ mov(Operand(esp, 2 * kPointerSize),
4838 Immediate(ExternalReference::isolate_address()));
4840 // Result is in eax or edx:eax - do not destroy these registers!
4842 if (always_allocate_scope) {
4843 __ dec(Operand::StaticVariable(scope_depth));
4846 // Make sure we're not trying to return 'the hole' from the runtime
4847 // call as this may lead to crashes in the IC code later.
4848 if (FLAG_debug_code) {
4850 __ cmp(eax, masm->isolate()->factory()->the_hole_value());
4851 __ j(not_equal, &okay, Label::kNear);
4856 // Check for failure result.
4857 Label failure_returned;
4858 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
4859 __ lea(ecx, Operand(eax, 1));
4860 // Lower 2 bits of ecx are 0 iff eax has failure tag.
4861 __ test(ecx, Immediate(kFailureTagMask));
4862 __ j(zero, &failure_returned);
4864 ExternalReference pending_exception_address(
4865 Isolate::kPendingExceptionAddress, masm->isolate());
4867 // Check that there is no pending exception, otherwise we
4868 // should have returned some failure value.
4869 if (FLAG_debug_code) {
4871 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4873 __ cmp(edx, Operand::StaticVariable(pending_exception_address));
4874 // Cannot use check here as it attempts to generate call into runtime.
4875 __ j(equal, &okay, Label::kNear);
4881 // Exit the JavaScript to C++ exit frame.
4882 __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
4885 // Handling of failure.
4886 __ bind(&failure_returned);
4889 // If the returned exception is RETRY_AFTER_GC continue at retry label
4890 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
4891 __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
4892 __ j(zero, &retry, Label::kNear);
4894 // Special handling of out of memory exceptions.
4895 __ cmp(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4896 __ j(equal, throw_out_of_memory_exception);
4898 // Retrieve the pending exception and clear the variable.
4899 __ mov(eax, Operand::StaticVariable(pending_exception_address));
4900 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
4901 __ mov(Operand::StaticVariable(pending_exception_address), edx);
4903 // Special handling of termination exceptions which are uncatchable
4904 // by javascript code.
4905 __ cmp(eax, masm->isolate()->factory()->termination_exception());
4906 __ j(equal, throw_termination_exception);
4908 // Handle normal exception.
4909 __ jmp(throw_normal_exception);
4916 void CEntryStub::Generate(MacroAssembler* masm) {
4917 // eax: number of arguments including receiver
4918 // ebx: pointer to C function (C callee-saved)
4919 // ebp: frame pointer (restored after C call)
4920 // esp: stack pointer (restored after C call)
4921 // esi: current context (C callee-saved)
4922 // edi: JS function of the caller (C callee-saved)
4924 // NOTE: Invocations of builtins may return failure objects instead
4925 // of a proper result. The builtin entry handles this by performing
4926 // a garbage collection and retrying the builtin (twice).
4928 // Enter the exit frame that transitions from JavaScript to C++.
4929 __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
4931 // eax: result parameter for PerformGC, if any (setup below)
4932 // ebx: pointer to builtin function (C callee-saved)
4933 // ebp: frame pointer (restored after C call)
4934 // esp: stack pointer (restored after C call)
4935 // edi: number of arguments including receiver (C callee-saved)
4936 // esi: argv pointer (C callee-saved)
4938 Label throw_normal_exception;
4939 Label throw_termination_exception;
4940 Label throw_out_of_memory_exception;
4942 // Call into the runtime system.
4944 &throw_normal_exception,
4945 &throw_termination_exception,
4946 &throw_out_of_memory_exception,
4950 // Do space-specific GC and retry runtime call.
4952 &throw_normal_exception,
4953 &throw_termination_exception,
4954 &throw_out_of_memory_exception,
4958 // Do full GC and retry runtime call one final time.
4959 Failure* failure = Failure::InternalError();
4960 __ mov(eax, Immediate(reinterpret_cast<int32_t>(failure)));
4962 &throw_normal_exception,
4963 &throw_termination_exception,
4964 &throw_out_of_memory_exception,
4968 __ bind(&throw_out_of_memory_exception);
4969 // Set external caught exception to false.
4970 Isolate* isolate = masm->isolate();
4971 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress,
4973 __ mov(Operand::StaticVariable(external_caught), Immediate(false));
4975 // Set pending exception and eax to out of memory exception.
4976 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
4978 __ mov(eax, reinterpret_cast<int32_t>(Failure::OutOfMemoryException()));
4979 __ mov(Operand::StaticVariable(pending_exception), eax);
4980 // Fall through to the next label.
4982 __ bind(&throw_termination_exception);
4983 __ ThrowUncatchable(eax);
4985 __ bind(&throw_normal_exception);
4990 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
4991 Label invoke, handler_entry, exit;
4992 Label not_outermost_js, not_outermost_js_2;
4998 // Push marker in two places.
4999 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
5000 __ push(Immediate(Smi::FromInt(marker))); // context slot
5001 __ push(Immediate(Smi::FromInt(marker))); // function slot
5002 // Save callee-saved registers (C calling conventions).
5007 // Save copies of the top frame descriptor on the stack.
5008 ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, masm->isolate());
5009 __ push(Operand::StaticVariable(c_entry_fp));
5011 // If this is the outermost JS call, set js_entry_sp value.
5012 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress,
5014 __ cmp(Operand::StaticVariable(js_entry_sp), Immediate(0));
5015 __ j(not_equal, ¬_outermost_js, Label::kNear);
5016 __ mov(Operand::StaticVariable(js_entry_sp), ebp);
5017 __ push(Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5019 __ jmp(&cont, Label::kNear);
5020 __ bind(¬_outermost_js);
5021 __ push(Immediate(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
5024 // Jump to a faked try block that does the invoke, with a faked catch
5025 // block that sets the pending exception.
5027 __ bind(&handler_entry);
5028 handler_offset_ = handler_entry.pos();
5029 // Caught exception: Store result (exception) in the pending exception
5030 // field in the JSEnv and return a failure sentinel.
5031 ExternalReference pending_exception(Isolate::kPendingExceptionAddress,
5033 __ mov(Operand::StaticVariable(pending_exception), eax);
5034 __ mov(eax, reinterpret_cast<int32_t>(Failure::Exception()));
5037 // Invoke: Link this frame into the handler chain. There's only one
5038 // handler block in this code object, so its index is 0.
5040 __ PushTryHandler(StackHandler::JS_ENTRY, 0);
5042 // Clear any pending exceptions.
5043 __ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
5044 __ mov(Operand::StaticVariable(pending_exception), edx);
5046 // Fake a receiver (NULL).
5047 __ push(Immediate(0)); // receiver
5049 // Invoke the function by calling through JS entry trampoline builtin and
5050 // pop the faked function when we return. Notice that we cannot store a
5051 // reference to the trampoline code directly in this stub, because the
5052 // builtin stubs may not have been generated yet.
5054 ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
5056 __ mov(edx, Immediate(construct_entry));
5058 ExternalReference entry(Builtins::kJSEntryTrampoline,
5060 __ mov(edx, Immediate(entry));
5062 __ mov(edx, Operand(edx, 0)); // deref address
5063 __ lea(edx, FieldOperand(edx, Code::kHeaderSize));
5066 // Unlink this frame from the handler chain.
5070 // Check if the current stack frame is marked as the outermost JS frame.
5072 __ cmp(ebx, Immediate(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
5073 __ j(not_equal, ¬_outermost_js_2);
5074 __ mov(Operand::StaticVariable(js_entry_sp), Immediate(0));
5075 __ bind(¬_outermost_js_2);
5077 // Restore the top frame descriptor from the stack.
5078 __ pop(Operand::StaticVariable(ExternalReference(
5079 Isolate::kCEntryFPAddress,
5082 // Restore callee-saved registers (C calling conventions).
5086 __ add(esp, Immediate(2 * kPointerSize)); // remove markers
5088 // Restore frame pointer and return.
5094 // Generate stub code for instanceof.
5095 // This code can patch a call site inlined cache of the instance of check,
5096 // which looks like this.
5098 // 81 ff XX XX XX XX cmp edi, <the hole, patched to a map>
5099 // 75 0a jne <some near label>
5100 // b8 XX XX XX XX mov eax, <the hole, patched to either true or false>
5102 // If call site patching is requested the stack will have the delta from the
5103 // return address to the cmp instruction just below the return address. This
5104 // also means that call site patching can only take place with arguments in
5105 // registers. TOS looks like this when call site patching is requested
5107 // esp[0] : return address
5108 // esp[4] : delta from return address to cmp instruction
5110 void InstanceofStub::Generate(MacroAssembler* masm) {
5111 // Call site inlining and patching implies arguments in registers.
5112 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
5114 // Fixed register usage throughout the stub.
5115 Register object = eax; // Object (lhs).
5116 Register map = ebx; // Map of the object.
5117 Register function = edx; // Function (rhs).
5118 Register prototype = edi; // Prototype of the function.
5119 Register scratch = ecx;
5121 // Constants describing the call site code to patch.
5122 static const int kDeltaToCmpImmediate = 2;
5123 static const int kDeltaToMov = 8;
5124 static const int kDeltaToMovImmediate = 9;
5125 static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
5126 static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
5127 static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
5129 ExternalReference roots_array_start =
5130 ExternalReference::roots_array_start(masm->isolate());
5132 ASSERT_EQ(object.code(), InstanceofStub::left().code());
5133 ASSERT_EQ(function.code(), InstanceofStub::right().code());
5135 // Get the object and function - they are always both needed.
5136 Label slow, not_js_object;
5137 if (!HasArgsInRegisters()) {
5138 __ mov(object, Operand(esp, 2 * kPointerSize));
5139 __ mov(function, Operand(esp, 1 * kPointerSize));
5142 // Check that the left hand is a JS object.
5143 __ JumpIfSmi(object, ¬_js_object);
5144 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object);
5146 // If there is a call site cache don't look in the global cache, but do the
5147 // real lookup and update the call site cache.
5148 if (!HasCallSiteInlineCheck()) {
5149 // Look up the function and the map in the instanceof cache.
5151 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5152 __ cmp(function, Operand::StaticArray(scratch,
5154 roots_array_start));
5155 __ j(not_equal, &miss, Label::kNear);
5156 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5157 __ cmp(map, Operand::StaticArray(
5158 scratch, times_pointer_size, roots_array_start));
5159 __ j(not_equal, &miss, Label::kNear);
5160 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5161 __ mov(eax, Operand::StaticArray(
5162 scratch, times_pointer_size, roots_array_start));
5163 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5167 // Get the prototype of the function.
5168 __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
5170 // Check that the function prototype is a JS object.
5171 __ JumpIfSmi(prototype, &slow);
5172 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
5174 // Update the global instanceof or call site inlined cache with the current
5175 // map and function. The cached answer will be set when it is known below.
5176 if (!HasCallSiteInlineCheck()) {
5177 __ mov(scratch, Immediate(Heap::kInstanceofCacheMapRootIndex));
5178 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5180 __ mov(scratch, Immediate(Heap::kInstanceofCacheFunctionRootIndex));
5181 __ mov(Operand::StaticArray(scratch, times_pointer_size, roots_array_start),
5184 // The constants for the code patching are based on no push instructions
5185 // at the call site.
5186 ASSERT(HasArgsInRegisters());
5187 // Get return address and delta to inlined map check.
5188 __ mov(scratch, Operand(esp, 0 * kPointerSize));
5189 __ sub(scratch, Operand(esp, 1 * kPointerSize));
5190 if (FLAG_debug_code) {
5191 __ cmpb(Operand(scratch, 0), kCmpEdiOperandByte1);
5192 __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 1)");
5193 __ cmpb(Operand(scratch, 1), kCmpEdiOperandByte2);
5194 __ Assert(equal, "InstanceofStub unexpected call site cache (cmp 2)");
5196 __ mov(scratch, Operand(scratch, kDeltaToCmpImmediate));
5197 __ mov(Operand(scratch, 0), map);
5200 // Loop through the prototype chain of the object looking for the function
5202 __ mov(scratch, FieldOperand(map, Map::kPrototypeOffset));
5203 Label loop, is_instance, is_not_instance;
5205 __ cmp(scratch, prototype);
5206 __ j(equal, &is_instance, Label::kNear);
5207 Factory* factory = masm->isolate()->factory();
5208 __ cmp(scratch, Immediate(factory->null_value()));
5209 __ j(equal, &is_not_instance, Label::kNear);
5210 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
5211 __ mov(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
5214 __ bind(&is_instance);
5215 if (!HasCallSiteInlineCheck()) {
5216 __ Set(eax, Immediate(0));
5217 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5218 __ mov(Operand::StaticArray(scratch,
5219 times_pointer_size, roots_array_start), eax);
5221 // Get return address and delta to inlined map check.
5222 __ mov(eax, factory->true_value());
5223 __ mov(scratch, Operand(esp, 0 * kPointerSize));
5224 __ sub(scratch, Operand(esp, 1 * kPointerSize));
5225 if (FLAG_debug_code) {
5226 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5227 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5229 __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5230 if (!ReturnTrueFalseObject()) {
5231 __ Set(eax, Immediate(0));
5234 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5236 __ bind(&is_not_instance);
5237 if (!HasCallSiteInlineCheck()) {
5238 __ Set(eax, Immediate(Smi::FromInt(1)));
5239 __ mov(scratch, Immediate(Heap::kInstanceofCacheAnswerRootIndex));
5240 __ mov(Operand::StaticArray(
5241 scratch, times_pointer_size, roots_array_start), eax);
5243 // Get return address and delta to inlined map check.
5244 __ mov(eax, factory->false_value());
5245 __ mov(scratch, Operand(esp, 0 * kPointerSize));
5246 __ sub(scratch, Operand(esp, 1 * kPointerSize));
5247 if (FLAG_debug_code) {
5248 __ cmpb(Operand(scratch, kDeltaToMov), kMovEaxImmediateByte);
5249 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)");
5251 __ mov(Operand(scratch, kDeltaToMovImmediate), eax);
5252 if (!ReturnTrueFalseObject()) {
5253 __ Set(eax, Immediate(Smi::FromInt(1)));
5256 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5258 Label object_not_null, object_not_null_or_smi;
5259 __ bind(¬_js_object);
5260 // Before null, smi and string value checks, check that the rhs is a function
5261 // as for a non-function rhs an exception needs to be thrown.
5262 __ JumpIfSmi(function, &slow, Label::kNear);
5263 __ CmpObjectType(function, JS_FUNCTION_TYPE, scratch);
5264 __ j(not_equal, &slow, Label::kNear);
5266 // Null is not instance of anything.
5267 __ cmp(object, factory->null_value());
5268 __ j(not_equal, &object_not_null, Label::kNear);
5269 __ Set(eax, Immediate(Smi::FromInt(1)));
5270 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5272 __ bind(&object_not_null);
5273 // Smi values is not instance of anything.
5274 __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
5275 __ Set(eax, Immediate(Smi::FromInt(1)));
5276 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5278 __ bind(&object_not_null_or_smi);
5279 // String values is not instance of anything.
5280 Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
5281 __ j(NegateCondition(is_string), &slow, Label::kNear);
5282 __ Set(eax, Immediate(Smi::FromInt(1)));
5283 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5285 // Slow-case: Go through the JavaScript implementation.
5287 if (!ReturnTrueFalseObject()) {
5288 // Tail call the builtin which returns 0 or 1.
5289 if (HasArgsInRegisters()) {
5290 // Push arguments below return address.
5296 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
5298 // Call the builtin and convert 0/1 to true/false.
5300 FrameScope scope(masm, StackFrame::INTERNAL);
5303 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
5305 Label true_value, done;
5307 __ j(zero, &true_value, Label::kNear);
5308 __ mov(eax, factory->false_value());
5309 __ jmp(&done, Label::kNear);
5310 __ bind(&true_value);
5311 __ mov(eax, factory->true_value());
5313 __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
5318 Register InstanceofStub::left() { return eax; }
5321 Register InstanceofStub::right() { return edx; }
5324 int CompareStub::MinorKey() {
5325 // Encode the three parameters in a unique 16 bit value. To avoid duplicate
5326 // stubs the never NaN NaN condition is only taken into account if the
5327 // condition is equals.
5328 ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
5329 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5330 return ConditionField::encode(static_cast<unsigned>(cc_))
5331 | RegisterField::encode(false) // lhs_ and rhs_ are not used
5332 | StrictField::encode(strict_)
5333 | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
5334 | IncludeNumberCompareField::encode(include_number_compare_)
5335 | IncludeSmiCompareField::encode(include_smi_compare_);
5339 // Unfortunately you have to run without snapshots to see most of these
5340 // names in the profile since most compare stubs end up in the snapshot.
5341 void CompareStub::PrintName(StringStream* stream) {
5342 ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
5343 const char* cc_name;
5345 case less: cc_name = "LT"; break;
5346 case greater: cc_name = "GT"; break;
5347 case less_equal: cc_name = "LE"; break;
5348 case greater_equal: cc_name = "GE"; break;
5349 case equal: cc_name = "EQ"; break;
5350 case not_equal: cc_name = "NE"; break;
5351 default: cc_name = "UnknownCondition"; break;
5353 bool is_equality = cc_ == equal || cc_ == not_equal;
5354 stream->Add("CompareStub_%s", cc_name);
5355 if (strict_ && is_equality) stream->Add("_STRICT");
5356 if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
5357 if (!include_number_compare_) stream->Add("_NO_NUMBER");
5358 if (!include_smi_compare_) stream->Add("_NO_SMI");
5362 // -------------------------------------------------------------------------
5363 // StringCharCodeAtGenerator
5365 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
5366 // If the receiver is a smi trigger the non-string case.
5367 STATIC_ASSERT(kSmiTag == 0);
5368 __ JumpIfSmi(object_, receiver_not_string_);
5370 // Fetch the instance type of the receiver into result register.
5371 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5372 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5373 // If the receiver is not a string trigger the non-string case.
5374 __ test(result_, Immediate(kIsNotStringMask));
5375 __ j(not_zero, receiver_not_string_);
5377 // If the index is non-smi trigger the non-smi case.
5378 STATIC_ASSERT(kSmiTag == 0);
5379 __ JumpIfNotSmi(index_, &index_not_smi_);
5380 __ bind(&got_smi_index_);
5382 // Check for index out of range.
5383 __ cmp(index_, FieldOperand(object_, String::kLengthOffset));
5384 __ j(above_equal, index_out_of_range_);
5386 __ SmiUntag(index_);
5388 Factory* factory = masm->isolate()->factory();
5389 StringCharLoadGenerator::Generate(
5390 masm, factory, object_, index_, result_, &call_runtime_);
5397 void StringCharCodeAtGenerator::GenerateSlow(
5398 MacroAssembler* masm,
5399 const RuntimeCallHelper& call_helper) {
5400 __ Abort("Unexpected fallthrough to CharCodeAt slow case");
5402 // Index is not a smi.
5403 __ bind(&index_not_smi_);
5404 // If index is a heap number, try converting it to an integer.
5406 masm->isolate()->factory()->heap_number_map(),
5409 call_helper.BeforeCall(masm);
5411 __ push(index_); // Consumed by runtime conversion function.
5412 if (index_flags_ == STRING_INDEX_IS_NUMBER) {
5413 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
5415 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
5416 // NumberToSmi discards numbers that are not exact integers.
5417 __ CallRuntime(Runtime::kNumberToSmi, 1);
5419 if (!index_.is(eax)) {
5420 // Save the conversion result before the pop instructions below
5421 // have a chance to overwrite it.
5422 __ mov(index_, eax);
5425 // Reload the instance type.
5426 __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
5427 __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
5428 call_helper.AfterCall(masm);
5429 // If index is still not a smi, it must be out of range.
5430 STATIC_ASSERT(kSmiTag == 0);
5431 __ JumpIfNotSmi(index_, index_out_of_range_);
5432 // Otherwise, return to the fast path.
5433 __ jmp(&got_smi_index_);
5435 // Call runtime. We get here when the receiver is a string and the
5436 // index is a number, but the code of getting the actual character
5437 // is too complex (e.g., when the string needs to be flattened).
5438 __ bind(&call_runtime_);
5439 call_helper.BeforeCall(masm);
5443 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
5444 if (!result_.is(eax)) {
5445 __ mov(result_, eax);
5447 call_helper.AfterCall(masm);
5450 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
5454 // -------------------------------------------------------------------------
5455 // StringCharFromCodeGenerator
5457 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
5458 // Fast case of Heap::LookupSingleCharacterStringFromCode.
5459 STATIC_ASSERT(kSmiTag == 0);
5460 STATIC_ASSERT(kSmiShiftSize == 0);
5461 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
5463 Immediate(kSmiTagMask |
5464 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
5465 __ j(not_zero, &slow_case_);
5467 Factory* factory = masm->isolate()->factory();
5468 __ Set(result_, Immediate(factory->single_character_string_cache()));
5469 STATIC_ASSERT(kSmiTag == 0);
5470 STATIC_ASSERT(kSmiTagSize == 1);
5471 STATIC_ASSERT(kSmiShiftSize == 0);
5472 // At this point code register contains smi tagged ASCII char code.
5473 __ mov(result_, FieldOperand(result_,
5474 code_, times_half_pointer_size,
5475 FixedArray::kHeaderSize));
5476 __ cmp(result_, factory->undefined_value());
5477 __ j(equal, &slow_case_);
5482 void StringCharFromCodeGenerator::GenerateSlow(
5483 MacroAssembler* masm,
5484 const RuntimeCallHelper& call_helper) {
5485 __ Abort("Unexpected fallthrough to CharFromCode slow case");
5487 __ bind(&slow_case_);
5488 call_helper.BeforeCall(masm);
5490 __ CallRuntime(Runtime::kCharFromCode, 1);
5491 if (!result_.is(eax)) {
5492 __ mov(result_, eax);
5494 call_helper.AfterCall(masm);
5497 __ Abort("Unexpected fallthrough from CharFromCode slow case");
5501 // -------------------------------------------------------------------------
5502 // StringCharAtGenerator
5504 void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
5505 char_code_at_generator_.GenerateFast(masm);
5506 char_from_code_generator_.GenerateFast(masm);
5510 void StringCharAtGenerator::GenerateSlow(
5511 MacroAssembler* masm,
5512 const RuntimeCallHelper& call_helper) {
5513 char_code_at_generator_.GenerateSlow(masm, call_helper);
5514 char_from_code_generator_.GenerateSlow(masm, call_helper);
5518 void StringAddStub::Generate(MacroAssembler* masm) {
5519 Label call_runtime, call_builtin;
5520 Builtins::JavaScript builtin_id = Builtins::ADD;
5522 // Load the two arguments.
5523 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5524 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5526 // Make sure that both arguments are strings if not known in advance.
5527 if (flags_ == NO_STRING_ADD_FLAGS) {
5528 __ JumpIfSmi(eax, &call_runtime);
5529 __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ebx);
5530 __ j(above_equal, &call_runtime);
5532 // First argument is a a string, test second.
5533 __ JumpIfSmi(edx, &call_runtime);
5534 __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ebx);
5535 __ j(above_equal, &call_runtime);
5537 // Here at least one of the arguments is definitely a string.
5538 // We convert the one that is not known to be a string.
5539 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) {
5540 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0);
5541 GenerateConvertArgument(masm, 2 * kPointerSize, eax, ebx, ecx, edi,
5543 builtin_id = Builtins::STRING_ADD_RIGHT;
5544 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) {
5545 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0);
5546 GenerateConvertArgument(masm, 1 * kPointerSize, edx, ebx, ecx, edi,
5548 builtin_id = Builtins::STRING_ADD_LEFT;
5552 // Both arguments are strings.
5553 // eax: first string
5554 // edx: second string
5555 // Check if either of the strings are empty. In that case return the other.
5556 Label second_not_zero_length, both_not_zero_length;
5557 __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
5558 STATIC_ASSERT(kSmiTag == 0);
5560 __ j(not_zero, &second_not_zero_length, Label::kNear);
5561 // Second string is empty, result is first string which is already in eax.
5562 Counters* counters = masm->isolate()->counters();
5563 __ IncrementCounter(counters->string_add_native(), 1);
5564 __ ret(2 * kPointerSize);
5565 __ bind(&second_not_zero_length);
5566 __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
5567 STATIC_ASSERT(kSmiTag == 0);
5569 __ j(not_zero, &both_not_zero_length, Label::kNear);
5570 // First string is empty, result is second string which is in edx.
5572 __ IncrementCounter(counters->string_add_native(), 1);
5573 __ ret(2 * kPointerSize);
5575 // Both strings are non-empty.
5576 // eax: first string
5577 // ebx: length of first string as a smi
5578 // ecx: length of second string as a smi
5579 // edx: second string
5580 // Look at the length of the result of adding the two strings.
5581 Label string_add_flat_result, longer_than_two;
5582 __ bind(&both_not_zero_length);
5584 STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
5585 // Handle exceptionally long strings in the runtime system.
5586 __ j(overflow, &call_runtime);
5587 // Use the symbol table when adding two one character strings, as it
5588 // helps later optimizations to return a symbol here.
5589 __ cmp(ebx, Immediate(Smi::FromInt(2)));
5590 __ j(not_equal, &longer_than_two);
5592 // Check that both strings are non-external ASCII strings.
5593 __ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
5595 // Get the two characters forming the new string.
5596 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5597 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5599 // Try to lookup two character string in symbol table. If it is not found
5600 // just allocate a new one.
5601 Label make_two_character_string, make_two_character_string_no_reload;
5602 StringHelper::GenerateTwoCharacterSymbolTableProbe(
5603 masm, ebx, ecx, eax, edx, edi,
5604 &make_two_character_string_no_reload, &make_two_character_string);
5605 __ IncrementCounter(counters->string_add_native(), 1);
5606 __ ret(2 * kPointerSize);
5608 // Allocate a two character string.
5609 __ bind(&make_two_character_string);
5610 // Reload the arguments.
5611 __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
5612 __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
5613 // Get the two characters forming the new string.
5614 __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
5615 __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
5616 __ bind(&make_two_character_string_no_reload);
5617 __ IncrementCounter(counters->string_add_make_two_char(), 1);
5618 __ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
5619 // Pack both characters in ebx.
5620 __ shl(ecx, kBitsPerByte);
5622 // Set the characters in the new string.
5623 __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
5624 __ IncrementCounter(counters->string_add_native(), 1);
5625 __ ret(2 * kPointerSize);
5627 __ bind(&longer_than_two);
5628 // Check if resulting string will be flat.
5629 __ cmp(ebx, Immediate(Smi::FromInt(ConsString::kMinLength)));
5630 __ j(below, &string_add_flat_result);
5632 // If result is not supposed to be flat allocate a cons string object. If both
5633 // strings are ASCII the result is an ASCII cons string.
5634 Label non_ascii, allocated, ascii_data;
5635 __ mov(edi, FieldOperand(eax, HeapObject::kMapOffset));
5636 __ movzx_b(ecx, FieldOperand(edi, Map::kInstanceTypeOffset));
5637 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5638 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5640 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
5641 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
5642 __ test(ecx, Immediate(kStringEncodingMask));
5643 __ j(zero, &non_ascii);
5644 __ bind(&ascii_data);
5645 // Allocate an ASCII cons string.
5646 __ AllocateAsciiConsString(ecx, edi, no_reg, &call_runtime);
5647 __ bind(&allocated);
5648 // Fill the fields of the cons string.
5649 if (FLAG_debug_code) __ AbortIfNotSmi(ebx);
5650 __ mov(FieldOperand(ecx, ConsString::kLengthOffset), ebx);
5651 __ mov(FieldOperand(ecx, ConsString::kHashFieldOffset),
5652 Immediate(String::kEmptyHashField));
5653 __ mov(FieldOperand(ecx, ConsString::kFirstOffset), eax);
5654 __ mov(FieldOperand(ecx, ConsString::kSecondOffset), edx);
5656 __ IncrementCounter(counters->string_add_native(), 1);
5657 __ ret(2 * kPointerSize);
5658 __ bind(&non_ascii);
5659 // At least one of the strings is two-byte. Check whether it happens
5660 // to contain only ASCII characters.
5661 // ecx: first instance type AND second instance type.
5662 // edi: second instance type.
5663 __ test(ecx, Immediate(kAsciiDataHintMask));
5664 __ j(not_zero, &ascii_data);
5665 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5666 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5668 STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
5669 __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
5670 __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
5671 __ j(equal, &ascii_data);
5672 // Allocate a two byte cons string.
5673 __ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
5676 // We cannot encounter sliced strings or cons strings here since:
5677 STATIC_ASSERT(SlicedString::kMinLength >= ConsString::kMinLength);
5678 // Handle creating a flat result from either external or sequential strings.
5679 // Locate the first characters' locations.
5680 // eax: first string
5681 // ebx: length of resulting flat string as a smi
5682 // edx: second string
5683 Label first_prepared, second_prepared;
5684 Label first_is_sequential, second_is_sequential;
5685 __ bind(&string_add_flat_result);
5686 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
5687 __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
5688 // ecx: instance type of first string
5689 STATIC_ASSERT(kSeqStringTag == 0);
5690 __ test_b(ecx, kStringRepresentationMask);
5691 __ j(zero, &first_is_sequential, Label::kNear);
5692 // Rule out short external string and load string resource.
5693 STATIC_ASSERT(kShortExternalStringTag != 0);
5694 __ test_b(ecx, kShortExternalStringMask);
5695 __ j(not_zero, &call_runtime);
5696 __ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
5697 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5698 __ jmp(&first_prepared, Label::kNear);
5699 __ bind(&first_is_sequential);
5700 __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5701 __ bind(&first_prepared);
5703 __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
5704 __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
5705 // Check whether both strings have same encoding.
5706 // edi: instance type of second string
5708 __ test_b(ecx, kStringEncodingMask);
5709 __ j(not_zero, &call_runtime);
5710 STATIC_ASSERT(kSeqStringTag == 0);
5711 __ test_b(edi, kStringRepresentationMask);
5712 __ j(zero, &second_is_sequential, Label::kNear);
5713 // Rule out short external string and load string resource.
5714 STATIC_ASSERT(kShortExternalStringTag != 0);
5715 __ test_b(edi, kShortExternalStringMask);
5716 __ j(not_zero, &call_runtime);
5717 __ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
5718 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
5719 __ jmp(&second_prepared, Label::kNear);
5720 __ bind(&second_is_sequential);
5721 __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5722 __ bind(&second_prepared);
5724 // Push the addresses of both strings' first characters onto the stack.
5728 Label non_ascii_string_add_flat_result, call_runtime_drop_two;
5729 // edi: instance type of second string
5730 // First string and second string have the same encoding.
5731 STATIC_ASSERT(kTwoByteStringTag == 0);
5732 __ test_b(edi, kStringEncodingMask);
5733 __ j(zero, &non_ascii_string_add_flat_result);
5735 // Both strings are ASCII strings.
5736 // ebx: length of resulting flat string as a smi
5738 __ AllocateAsciiString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5739 // eax: result string
5741 // Locate first character of result.
5742 __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
5743 // Load first argument's length and first character location. Account for
5744 // values currently on the stack when fetching arguments from it.
5745 __ mov(edx, Operand(esp, 4 * kPointerSize));
5746 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5749 // eax: result string
5750 // ecx: first character of result
5751 // edx: first char of first argument
5752 // edi: length of first argument
5753 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5754 // Load second argument's length and first character location. Account for
5755 // values currently on the stack when fetching arguments from it.
5756 __ mov(edx, Operand(esp, 2 * kPointerSize));
5757 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5760 // eax: result string
5761 // ecx: next character of result
5762 // edx: first char of second argument
5763 // edi: length of second argument
5764 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, true);
5765 __ IncrementCounter(counters->string_add_native(), 1);
5766 __ ret(2 * kPointerSize);
5768 // Handle creating a flat two byte result.
5769 // eax: first string - known to be two byte
5770 // ebx: length of resulting flat string as a smi
5771 // edx: second string
5772 __ bind(&non_ascii_string_add_flat_result);
5773 // Both strings are two byte strings.
5775 __ AllocateTwoByteString(eax, ebx, ecx, edx, edi, &call_runtime_drop_two);
5776 // eax: result string
5778 // Locate first character of result.
5779 __ add(ecx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
5780 // Load second argument's length and first character location. Account for
5781 // values currently on the stack when fetching arguments from it.
5782 __ mov(edx, Operand(esp, 4 * kPointerSize));
5783 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5786 // eax: result string
5787 // ecx: first character of result
5788 // edx: first char of first argument
5789 // edi: length of first argument
5790 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5791 // Load second argument's length and first character location. Account for
5792 // values currently on the stack when fetching arguments from it.
5793 __ mov(edx, Operand(esp, 2 * kPointerSize));
5794 __ mov(edi, FieldOperand(edx, String::kLengthOffset));
5797 // eax: result string
5798 // ecx: next character of result
5799 // edx: first char of second argument
5800 // edi: length of second argument
5801 StringHelper::GenerateCopyCharacters(masm, ecx, edx, edi, ebx, false);
5802 __ IncrementCounter(counters->string_add_native(), 1);
5803 __ ret(2 * kPointerSize);
5805 // Recover stack pointer before jumping to runtime.
5806 __ bind(&call_runtime_drop_two);
5808 // Just jump to runtime to add the two strings.
5809 __ bind(&call_runtime);
5810 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
5812 if (call_builtin.is_linked()) {
5813 __ bind(&call_builtin);
5814 __ InvokeBuiltin(builtin_id, JUMP_FUNCTION);
5819 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
5826 // First check if the argument is already a string.
5827 Label not_string, done;
5828 __ JumpIfSmi(arg, ¬_string);
5829 __ CmpObjectType(arg, FIRST_NONSTRING_TYPE, scratch1);
5832 // Check the number to string cache.
5834 __ bind(¬_string);
5835 // Puts the cached result into scratch1.
5836 NumberToStringStub::GenerateLookupNumberStringCache(masm,
5843 __ mov(arg, scratch1);
5844 __ mov(Operand(esp, stack_offset), arg);
5847 // Check if the argument is a safe string wrapper.
5848 __ bind(¬_cached);
5849 __ JumpIfSmi(arg, slow);
5850 __ CmpObjectType(arg, JS_VALUE_TYPE, scratch1); // map -> scratch1.
5851 __ j(not_equal, slow);
5852 __ test_b(FieldOperand(scratch1, Map::kBitField2Offset),
5853 1 << Map::kStringWrapperSafeForDefaultValueOf);
5855 __ mov(arg, FieldOperand(arg, JSValue::kValueOffset));
5856 __ mov(Operand(esp, stack_offset), arg);
5862 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
5870 // This loop just copies one character at a time, as it is only used for very
5873 __ mov_b(scratch, Operand(src, 0));
5874 __ mov_b(Operand(dest, 0), scratch);
5875 __ add(src, Immediate(1));
5876 __ add(dest, Immediate(1));
5878 __ mov_w(scratch, Operand(src, 0));
5879 __ mov_w(Operand(dest, 0), scratch);
5880 __ add(src, Immediate(2));
5881 __ add(dest, Immediate(2));
5883 __ sub(count, Immediate(1));
5884 __ j(not_zero, &loop);
5888 void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
5894 // Copy characters using rep movs of doublewords.
5895 // The destination is aligned on a 4 byte boundary because we are
5896 // copying to the beginning of a newly allocated string.
5897 ASSERT(dest.is(edi)); // rep movs destination
5898 ASSERT(src.is(esi)); // rep movs source
5899 ASSERT(count.is(ecx)); // rep movs count
5900 ASSERT(!scratch.is(dest));
5901 ASSERT(!scratch.is(src));
5902 ASSERT(!scratch.is(count));
5904 // Nothing to do for zero characters.
5906 __ test(count, count);
5909 // Make count the number of bytes to copy.
5914 // Don't enter the rep movs if there are less than 4 bytes to copy.
5916 __ test(count, Immediate(~3));
5917 __ j(zero, &last_bytes, Label::kNear);
5919 // Copy from edi to esi using rep movs instruction.
5920 __ mov(scratch, count);
5921 __ sar(count, 2); // Number of doublewords to copy.
5925 // Find number of bytes left.
5926 __ mov(count, scratch);
5929 // Check if there are more bytes to copy.
5930 __ bind(&last_bytes);
5931 __ test(count, count);
5934 // Copy remaining characters.
5937 __ mov_b(scratch, Operand(src, 0));
5938 __ mov_b(Operand(dest, 0), scratch);
5939 __ add(src, Immediate(1));
5940 __ add(dest, Immediate(1));
5941 __ sub(count, Immediate(1));
5942 __ j(not_zero, &loop);
5948 void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
5956 // Register scratch3 is the general scratch register in this function.
5957 Register scratch = scratch3;
5959 // Make sure that both characters are not digits as such strings has a
5960 // different hash algorithm. Don't try to look for these in the symbol table.
5961 Label not_array_index;
5962 __ mov(scratch, c1);
5963 __ sub(scratch, Immediate(static_cast<int>('0')));
5964 __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5965 __ j(above, ¬_array_index, Label::kNear);
5966 __ mov(scratch, c2);
5967 __ sub(scratch, Immediate(static_cast<int>('0')));
5968 __ cmp(scratch, Immediate(static_cast<int>('9' - '0')));
5969 __ j(below_equal, not_probed);
5971 __ bind(¬_array_index);
5972 // Calculate the two character string hash.
5973 Register hash = scratch1;
5974 GenerateHashInit(masm, hash, c1, scratch);
5975 GenerateHashAddCharacter(masm, hash, c2, scratch);
5976 GenerateHashGetHash(masm, hash, scratch);
5978 // Collect the two characters in a register.
5979 Register chars = c1;
5980 __ shl(c2, kBitsPerByte);
5983 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
5984 // hash: hash of two character string.
5986 // Load the symbol table.
5987 Register symbol_table = c2;
5988 ExternalReference roots_array_start =
5989 ExternalReference::roots_array_start(masm->isolate());
5990 __ mov(scratch, Immediate(Heap::kSymbolTableRootIndex));
5991 __ mov(symbol_table,
5992 Operand::StaticArray(scratch, times_pointer_size, roots_array_start));
5994 // Calculate capacity mask from the symbol table capacity.
5995 Register mask = scratch2;
5996 __ mov(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
5998 __ sub(mask, Immediate(1));
6001 // chars: two character string, char 1 in byte 0 and char 2 in byte 1.
6002 // hash: hash of two character string
6003 // symbol_table: symbol table
6004 // mask: capacity mask
6007 // Perform a number of probes in the symbol table.
6008 static const int kProbes = 4;
6009 Label found_in_symbol_table;
6010 Label next_probe[kProbes], next_probe_pop_mask[kProbes];
6011 Register candidate = scratch; // Scratch register contains candidate.
6012 for (int i = 0; i < kProbes; i++) {
6013 // Calculate entry in symbol table.
6014 __ mov(scratch, hash);
6016 __ add(scratch, Immediate(SymbolTable::GetProbeOffset(i)));
6018 __ and_(scratch, mask);
6020 // Load the entry from the symbol table.
6021 STATIC_ASSERT(SymbolTable::kEntrySize == 1);
6023 FieldOperand(symbol_table,
6026 SymbolTable::kElementsStartOffset));
6028 // If entry is undefined no string with this hash can be found.
6029 Factory* factory = masm->isolate()->factory();
6030 __ cmp(candidate, factory->undefined_value());
6031 __ j(equal, not_found);
6032 __ cmp(candidate, factory->the_hole_value());
6033 __ j(equal, &next_probe[i]);
6035 // If length is not 2 the string is not a candidate.
6036 __ cmp(FieldOperand(candidate, String::kLengthOffset),
6037 Immediate(Smi::FromInt(2)));
6038 __ j(not_equal, &next_probe[i]);
6040 // As we are out of registers save the mask on the stack and use that
6041 // register as a temporary.
6043 Register temp = mask;
6045 // Check that the candidate is a non-external ASCII string.
6046 __ mov(temp, FieldOperand(candidate, HeapObject::kMapOffset));
6047 __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
6048 __ JumpIfInstanceTypeIsNotSequentialAscii(
6049 temp, temp, &next_probe_pop_mask[i]);
6051 // Check if the two characters match.
6052 __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
6053 __ and_(temp, 0x0000ffff);
6054 __ cmp(chars, temp);
6055 __ j(equal, &found_in_symbol_table);
6056 __ bind(&next_probe_pop_mask[i]);
6058 __ bind(&next_probe[i]);
6061 // No matching 2 character string found by probing.
6064 // Scratch register contains result when we fall through to here.
6065 Register result = candidate;
6066 __ bind(&found_in_symbol_table);
6067 __ pop(mask); // Pop saved mask from the stack.
6068 if (!result.is(eax)) {
6069 __ mov(eax, result);
6074 void StringHelper::GenerateHashInit(MacroAssembler* masm,
6078 // hash = (seed + character) + ((seed + character) << 10);
6079 if (Serializer::enabled()) {
6080 ExternalReference roots_array_start =
6081 ExternalReference::roots_array_start(masm->isolate());
6082 __ mov(scratch, Immediate(Heap::kHashSeedRootIndex));
6083 __ mov(scratch, Operand::StaticArray(scratch,
6085 roots_array_start));
6086 __ SmiUntag(scratch);
6087 __ add(scratch, character);
6088 __ mov(hash, scratch);
6089 __ shl(scratch, 10);
6090 __ add(hash, scratch);
6092 int32_t seed = masm->isolate()->heap()->HashSeed();
6093 __ lea(scratch, Operand(character, seed));
6094 __ shl(scratch, 10);
6095 __ lea(hash, Operand(scratch, character, times_1, seed));
6097 // hash ^= hash >> 6;
6098 __ mov(scratch, hash);
6100 __ xor_(hash, scratch);
6104 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
6108 // hash += character;
6109 __ add(hash, character);
6110 // hash += hash << 10;
6111 __ mov(scratch, hash);
6112 __ shl(scratch, 10);
6113 __ add(hash, scratch);
6114 // hash ^= hash >> 6;
6115 __ mov(scratch, hash);
6117 __ xor_(hash, scratch);
6121 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
6124 // hash += hash << 3;
6125 __ mov(scratch, hash);
6127 __ add(hash, scratch);
6128 // hash ^= hash >> 11;
6129 __ mov(scratch, hash);
6130 __ shr(scratch, 11);
6131 __ xor_(hash, scratch);
6132 // hash += hash << 15;
6133 __ mov(scratch, hash);
6134 __ shl(scratch, 15);
6135 __ add(hash, scratch);
6137 __ and_(hash, String::kHashBitMask);
6139 // if (hash == 0) hash = 27;
6140 Label hash_not_zero;
6141 __ j(not_zero, &hash_not_zero, Label::kNear);
6142 __ mov(hash, Immediate(StringHasher::kZeroHash));
6143 __ bind(&hash_not_zero);
6147 void SubStringStub::Generate(MacroAssembler* masm) {
6150 // Stack frame on entry.
6151 // esp[0]: return address
6156 // Make sure first argument is a string.
6157 __ mov(eax, Operand(esp, 3 * kPointerSize));
6158 STATIC_ASSERT(kSmiTag == 0);
6159 __ JumpIfSmi(eax, &runtime);
6160 Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
6161 __ j(NegateCondition(is_string), &runtime);
6164 // ebx: instance type
6166 // Calculate length of sub string using the smi values.
6167 __ mov(ecx, Operand(esp, 1 * kPointerSize)); // To index.
6168 __ JumpIfNotSmi(ecx, &runtime);
6169 __ mov(edx, Operand(esp, 2 * kPointerSize)); // From index.
6170 __ JumpIfNotSmi(edx, &runtime);
6172 __ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
6173 Label not_original_string;
6174 __ j(not_equal, ¬_original_string, Label::kNear);
6175 Counters* counters = masm->isolate()->counters();
6176 __ IncrementCounter(counters->sub_string_native(), 1);
6177 __ ret(3 * kPointerSize);
6178 __ bind(¬_original_string);
6181 // ebx: instance type
6182 // ecx: sub string length (smi)
6183 // edx: from index (smi)
6184 // Deal with different string types: update the index if necessary
6185 // and put the underlying string into edi.
6186 Label underlying_unpacked, sliced_string, seq_or_external_string;
6187 // If the string is not indirect, it can only be sequential or external.
6188 STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
6189 STATIC_ASSERT(kIsIndirectStringMask != 0);
6190 __ test(ebx, Immediate(kIsIndirectStringMask));
6191 __ j(zero, &seq_or_external_string, Label::kNear);
6193 Factory* factory = masm->isolate()->factory();
6194 __ test(ebx, Immediate(kSlicedNotConsMask));
6195 __ j(not_zero, &sliced_string, Label::kNear);
6196 // Cons string. Check whether it is flat, then fetch first part.
6197 // Flat cons strings have an empty second part.
6198 __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
6199 factory->empty_string());
6200 __ j(not_equal, &runtime);
6201 __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
6202 // Update instance type.
6203 __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
6204 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6205 __ jmp(&underlying_unpacked, Label::kNear);
6207 __ bind(&sliced_string);
6208 // Sliced string. Fetch parent and adjust start index by offset.
6209 __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
6210 __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
6211 // Update instance type.
6212 __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
6213 __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
6214 __ jmp(&underlying_unpacked, Label::kNear);
6216 __ bind(&seq_or_external_string);
6217 // Sequential or external string. Just move string to the expected register.
6220 __ bind(&underlying_unpacked);
6222 if (FLAG_string_slices) {
6224 // edi: underlying subject string
6225 // ebx: instance type of underlying subject string
6226 // edx: adjusted start index (smi)
6227 // ecx: length (smi)
6228 __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
6229 // Short slice. Copy instead of slicing.
6230 __ j(less, ©_routine);
6231 // Allocate new sliced string. At this point we do not reload the instance
6232 // type including the string encoding because we simply rely on the info
6233 // provided by the original string. It does not matter if the original
6234 // string's encoding is wrong because we always have to recheck encoding of
6235 // the newly created string's parent anyways due to externalized strings.
6236 Label two_byte_slice, set_slice_header;
6237 STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
6238 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
6239 __ test(ebx, Immediate(kStringEncodingMask));
6240 __ j(zero, &two_byte_slice, Label::kNear);
6241 __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
6242 __ jmp(&set_slice_header, Label::kNear);
6243 __ bind(&two_byte_slice);
6244 __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
6245 __ bind(&set_slice_header);
6246 __ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
6247 __ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
6248 Immediate(String::kEmptyHashField));
6249 __ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
6250 __ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
6251 __ IncrementCounter(counters->sub_string_native(), 1);
6252 __ ret(3 * kPointerSize);
6254 __ bind(©_routine);
6257 // edi: underlying subject string
6258 // ebx: instance type of underlying subject string
6259 // edx: adjusted start index (smi)
6260 // ecx: length (smi)
6261 // The subject string can only be external or sequential string of either
6262 // encoding at this point.
6263 Label two_byte_sequential, runtime_drop_two, sequential_string;
6264 STATIC_ASSERT(kExternalStringTag != 0);
6265 STATIC_ASSERT(kSeqStringTag == 0);
6266 __ test_b(ebx, kExternalStringTag);
6267 __ j(zero, &sequential_string);
6269 // Handle external string.
6270 // Rule out short external strings.
6271 STATIC_CHECK(kShortExternalStringTag != 0);
6272 __ test_b(ebx, kShortExternalStringMask);
6273 __ j(not_zero, &runtime);
6274 __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
6275 // Move the pointer so that offset-wise, it looks like a sequential string.
6276 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
6277 __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6279 __ bind(&sequential_string);
6280 // Stash away (adjusted) index and (underlying) string.
6284 STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
6285 __ test_b(ebx, kStringEncodingMask);
6286 __ j(zero, &two_byte_sequential);
6288 // Sequential ASCII string. Allocate the result.
6289 __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6291 // eax: result string
6292 // ecx: result string length
6293 __ mov(edx, esi); // esi used by following code.
6294 // Locate first character of result.
6296 __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
6297 // Load string argument and locate character of sub string start.
6301 __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
6303 // eax: result string
6304 // ecx: result length
6305 // edx: original value of esi
6306 // edi: first character of result
6307 // esi: character of sub string start
6308 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
6309 __ mov(esi, edx); // Restore esi.
6310 __ IncrementCounter(counters->sub_string_native(), 1);
6311 __ ret(3 * kPointerSize);
6313 __ bind(&two_byte_sequential);
6314 // Sequential two-byte string. Allocate the result.
6315 __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
6317 // eax: result string
6318 // ecx: result string length
6319 __ mov(edx, esi); // esi used by following code.
6320 // Locate first character of result.
6323 Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
6324 // Load string argument and locate character of sub string start.
6327 // As from is a smi it is 2 times the value which matches the size of a two
6329 STATIC_ASSERT(kSmiTag == 0);
6330 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
6331 __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
6333 // eax: result string
6334 // ecx: result length
6335 // edx: original value of esi
6336 // edi: first character of result
6337 // esi: character of sub string start
6338 StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
6339 __ mov(esi, edx); // Restore esi.
6340 __ IncrementCounter(counters->sub_string_native(), 1);
6341 __ ret(3 * kPointerSize);
6343 // Drop pushed values on the stack before tail call.
6344 __ bind(&runtime_drop_two);
6347 // Just jump to runtime to create the sub string.
6349 __ TailCallRuntime(Runtime::kSubString, 3, 1);
6353 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
6357 Register scratch2) {
6358 Register length = scratch1;
6361 Label strings_not_equal, check_zero_length;
6362 __ mov(length, FieldOperand(left, String::kLengthOffset));
6363 __ cmp(length, FieldOperand(right, String::kLengthOffset));
6364 __ j(equal, &check_zero_length, Label::kNear);
6365 __ bind(&strings_not_equal);
6366 __ Set(eax, Immediate(Smi::FromInt(NOT_EQUAL)));
6369 // Check if the length is zero.
6370 Label compare_chars;
6371 __ bind(&check_zero_length);
6372 STATIC_ASSERT(kSmiTag == 0);
6373 __ test(length, length);
6374 __ j(not_zero, &compare_chars, Label::kNear);
6375 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6378 // Compare characters.
6379 __ bind(&compare_chars);
6380 GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
6381 &strings_not_equal, Label::kNear);
6383 // Characters are equal.
6384 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6389 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
6394 Register scratch3) {
6395 Counters* counters = masm->isolate()->counters();
6396 __ IncrementCounter(counters->string_compare_native(), 1);
6398 // Find minimum length.
6400 __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
6401 __ mov(scratch3, scratch1);
6402 __ sub(scratch3, FieldOperand(right, String::kLengthOffset));
6404 Register length_delta = scratch3;
6406 __ j(less_equal, &left_shorter, Label::kNear);
6407 // Right string is shorter. Change scratch1 to be length of right string.
6408 __ sub(scratch1, length_delta);
6409 __ bind(&left_shorter);
6411 Register min_length = scratch1;
6413 // If either length is zero, just compare lengths.
6414 Label compare_lengths;
6415 __ test(min_length, min_length);
6416 __ j(zero, &compare_lengths, Label::kNear);
6418 // Compare characters.
6419 Label result_not_equal;
6420 GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
6421 &result_not_equal, Label::kNear);
6423 // Compare lengths - strings up to min-length are equal.
6424 __ bind(&compare_lengths);
6425 __ test(length_delta, length_delta);
6426 __ j(not_zero, &result_not_equal, Label::kNear);
6429 STATIC_ASSERT(EQUAL == 0);
6430 STATIC_ASSERT(kSmiTag == 0);
6431 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6434 Label result_greater;
6435 __ bind(&result_not_equal);
6436 __ j(greater, &result_greater, Label::kNear);
6439 __ Set(eax, Immediate(Smi::FromInt(LESS)));
6442 // Result is GREATER.
6443 __ bind(&result_greater);
6444 __ Set(eax, Immediate(Smi::FromInt(GREATER)));
6449 void StringCompareStub::GenerateAsciiCharsCompareLoop(
6450 MacroAssembler* masm,
6455 Label* chars_not_equal,
6456 Label::Distance chars_not_equal_near) {
6457 // Change index to run from -length to -1 by adding length to string
6458 // start. This means that loop ends when index reaches zero, which
6459 // doesn't need an additional compare.
6460 __ SmiUntag(length);
6462 FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
6464 FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
6466 Register index = length; // index = -length;
6471 __ mov_b(scratch, Operand(left, index, times_1, 0));
6472 __ cmpb(scratch, Operand(right, index, times_1, 0));
6473 __ j(not_equal, chars_not_equal, chars_not_equal_near);
6475 __ j(not_zero, &loop);
6479 void StringCompareStub::Generate(MacroAssembler* masm) {
6482 // Stack frame on entry.
6483 // esp[0]: return address
6484 // esp[4]: right string
6485 // esp[8]: left string
6487 __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
6488 __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
6492 __ j(not_equal, ¬_same, Label::kNear);
6493 STATIC_ASSERT(EQUAL == 0);
6494 STATIC_ASSERT(kSmiTag == 0);
6495 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6496 __ IncrementCounter(masm->isolate()->counters()->string_compare_native(), 1);
6497 __ ret(2 * kPointerSize);
6501 // Check that both objects are sequential ASCII strings.
6502 __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
6504 // Compare flat ASCII strings.
6505 // Drop arguments from the stack.
6507 __ add(esp, Immediate(2 * kPointerSize));
6509 GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
6511 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
6512 // tagged as a small integer.
6514 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6518 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
6519 ASSERT(state_ == CompareIC::SMIS);
6523 __ JumpIfNotSmi(ecx, &miss, Label::kNear);
6525 if (GetCondition() == equal) {
6526 // For equality we do not care about the sign of the result.
6531 __ j(no_overflow, &done, Label::kNear);
6532 // Correct sign of result in case of overflow.
6544 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
6545 ASSERT(state_ == CompareIC::HEAP_NUMBERS);
6548 Label unordered, maybe_undefined1, maybe_undefined2;
6552 __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
6554 __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
6555 __ j(not_equal, &maybe_undefined1, Label::kNear);
6556 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6557 __ j(not_equal, &maybe_undefined2, Label::kNear);
6559 // Inlining the double comparison and falling back to the general compare
6560 // stub if NaN is involved or SS2 or CMOV is unsupported.
6561 if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
6562 CpuFeatures::Scope scope1(SSE2);
6563 CpuFeatures::Scope scope2(CMOV);
6565 // Load left and right operand
6566 __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
6567 __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
6570 __ ucomisd(xmm0, xmm1);
6572 // Don't base result on EFLAGS when a NaN is involved.
6573 __ j(parity_even, &unordered, Label::kNear);
6575 // Return a result of -1, 0, or 1, based on EFLAGS.
6576 // Performing mov, because xor would destroy the flag register.
6577 __ mov(eax, 0); // equal
6578 __ mov(ecx, Immediate(Smi::FromInt(1)));
6579 __ cmov(above, eax, ecx);
6580 __ mov(ecx, Immediate(Smi::FromInt(-1)));
6581 __ cmov(below, eax, ecx);
6585 __ bind(&unordered);
6586 CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
6587 __ bind(&generic_stub);
6588 __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
6590 __ bind(&maybe_undefined1);
6591 if (Token::IsOrderedRelationalCompareOp(op_)) {
6592 __ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
6593 __ j(not_equal, &miss);
6594 __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
6595 __ j(not_equal, &maybe_undefined2, Label::kNear);
6599 __ bind(&maybe_undefined2);
6600 if (Token::IsOrderedRelationalCompareOp(op_)) {
6601 __ cmp(edx, Immediate(masm->isolate()->factory()->undefined_value()));
6602 __ j(equal, &unordered);
6610 void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
6611 ASSERT(state_ == CompareIC::SYMBOLS);
6612 ASSERT(GetCondition() == equal);
6614 // Registers containing left and right operands respectively.
6615 Register left = edx;
6616 Register right = eax;
6617 Register tmp1 = ecx;
6618 Register tmp2 = ebx;
6620 // Check that both operands are heap objects.
6623 STATIC_ASSERT(kSmiTag == 0);
6624 __ and_(tmp1, right);
6625 __ JumpIfSmi(tmp1, &miss, Label::kNear);
6627 // Check that both operands are symbols.
6628 __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6629 __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6630 __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6631 __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6632 STATIC_ASSERT(kSymbolTag != 0);
6633 __ and_(tmp1, tmp2);
6634 __ test(tmp1, Immediate(kIsSymbolMask));
6635 __ j(zero, &miss, Label::kNear);
6637 // Symbols are compared by identity.
6639 __ cmp(left, right);
6640 // Make sure eax is non-zero. At this point input operands are
6641 // guaranteed to be non-zero.
6642 ASSERT(right.is(eax));
6643 __ j(not_equal, &done, Label::kNear);
6644 STATIC_ASSERT(EQUAL == 0);
6645 STATIC_ASSERT(kSmiTag == 0);
6646 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6655 void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
6656 ASSERT(state_ == CompareIC::STRINGS);
6659 bool equality = Token::IsEqualityOp(op_);
6661 // Registers containing left and right operands respectively.
6662 Register left = edx;
6663 Register right = eax;
6664 Register tmp1 = ecx;
6665 Register tmp2 = ebx;
6666 Register tmp3 = edi;
6668 // Check that both operands are heap objects.
6670 STATIC_ASSERT(kSmiTag == 0);
6671 __ and_(tmp1, right);
6672 __ JumpIfSmi(tmp1, &miss);
6674 // Check that both operands are strings. This leaves the instance
6675 // types loaded in tmp1 and tmp2.
6676 __ mov(tmp1, FieldOperand(left, HeapObject::kMapOffset));
6677 __ mov(tmp2, FieldOperand(right, HeapObject::kMapOffset));
6678 __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
6679 __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
6681 STATIC_ASSERT(kNotStringTag != 0);
6683 __ test(tmp3, Immediate(kIsNotStringMask));
6684 __ j(not_zero, &miss);
6686 // Fast check for identical strings.
6688 __ cmp(left, right);
6689 __ j(not_equal, ¬_same, Label::kNear);
6690 STATIC_ASSERT(EQUAL == 0);
6691 STATIC_ASSERT(kSmiTag == 0);
6692 __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
6695 // Handle not identical strings.
6698 // Check that both strings are symbols. If they are, we're done
6699 // because we already know they are not identical. But in the case of
6700 // non-equality compare, we still need to determine the order.
6703 STATIC_ASSERT(kSymbolTag != 0);
6704 __ and_(tmp1, tmp2);
6705 __ test(tmp1, Immediate(kIsSymbolMask));
6706 __ j(zero, &do_compare, Label::kNear);
6707 // Make sure eax is non-zero. At this point input operands are
6708 // guaranteed to be non-zero.
6709 ASSERT(right.is(eax));
6711 __ bind(&do_compare);
6714 // Check that both strings are sequential ASCII.
6716 __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
6718 // Compare flat ASCII strings. Returns when done.
6720 StringCompareStub::GenerateFlatAsciiStringEquals(
6721 masm, left, right, tmp1, tmp2);
6723 StringCompareStub::GenerateCompareFlatAsciiStrings(
6724 masm, left, right, tmp1, tmp2, tmp3);
6727 // Handle more complex cases in runtime.
6729 __ pop(tmp1); // Return address.
6734 __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
6736 __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
6744 void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
6745 ASSERT(state_ == CompareIC::OBJECTS);
6749 __ JumpIfSmi(ecx, &miss, Label::kNear);
6751 __ CmpObjectType(eax, JS_OBJECT_TYPE, ecx);
6752 __ j(not_equal, &miss, Label::kNear);
6753 __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
6754 __ j(not_equal, &miss, Label::kNear);
6756 ASSERT(GetCondition() == equal);
6765 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
6769 __ JumpIfSmi(ecx, &miss, Label::kNear);
6771 __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
6772 __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
6773 __ cmp(ecx, known_map_);
6774 __ j(not_equal, &miss, Label::kNear);
6775 __ cmp(ebx, known_map_);
6776 __ j(not_equal, &miss, Label::kNear);
6786 void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
6788 // Call the runtime system in a fresh internal frame.
6789 ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
6791 FrameScope scope(masm, StackFrame::INTERNAL);
6792 __ push(edx); // Preserve edx and eax.
6794 __ push(edx); // And also use them as the arguments.
6796 __ push(Immediate(Smi::FromInt(op_)));
6797 __ CallExternalReference(miss, 3);
6798 // Compute the entry point of the rewritten stub.
6799 __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
6804 // Do a tail call to the rewritten stub.
6809 // Helper function used to check that the dictionary doesn't contain
6810 // the property. This function may return false negatives, so miss_label
6811 // must always call a backup property check that is complete.
6812 // This function is safe to call if the receiver has fast properties.
6813 // Name must be a symbol and receiver must be a heap object.
6814 void StringDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
6817 Register properties,
6818 Handle<String> name,
6820 ASSERT(name->IsSymbol());
6822 // If names of slots in range from 1 to kProbes - 1 for the hash value are
6823 // not equal to the name and kProbes-th slot is not used (its name is the
6824 // undefined value), it guarantees the hash table doesn't contain the
6825 // property. It's true even if some slots represent deleted properties
6826 // (their names are the hole value).
6827 for (int i = 0; i < kInlinedProbes; i++) {
6828 // Compute the masked index: (hash + i + i * i) & mask.
6829 Register index = r0;
6830 // Capacity is smi 2^n.
6831 __ mov(index, FieldOperand(properties, kCapacityOffset));
6834 Immediate(Smi::FromInt(name->Hash() +
6835 StringDictionary::GetProbeOffset(i))));
6837 // Scale the index by multiplying by the entry size.
6838 ASSERT(StringDictionary::kEntrySize == 3);
6839 __ lea(index, Operand(index, index, times_2, 0)); // index *= 3.
6840 Register entity_name = r0;
6841 // Having undefined at this place means the name is not contained.
6842 ASSERT_EQ(kSmiTagSize, 1);
6843 __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
6844 kElementsStartOffset - kHeapObjectTag));
6845 __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
6848 // Stop if found the property.
6849 __ cmp(entity_name, Handle<String>(name));
6853 // Check for the hole and skip.
6854 __ cmp(entity_name, masm->isolate()->factory()->the_hole_value());
6855 __ j(equal, &the_hole, Label::kNear);
6857 // Check if the entry name is not a symbol.
6858 __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
6859 __ test_b(FieldOperand(entity_name, Map::kInstanceTypeOffset),
6865 StringDictionaryLookupStub stub(properties,
6868 StringDictionaryLookupStub::NEGATIVE_LOOKUP);
6869 __ push(Immediate(Handle<Object>(name)));
6870 __ push(Immediate(name->Hash()));
6873 __ j(not_zero, miss);
6878 // Probe the string dictionary in the |elements| register. Jump to the
6879 // |done| label if a property with the given name is found leaving the
6880 // index into the dictionary in |r0|. Jump to the |miss| label
6882 void StringDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
6889 ASSERT(!elements.is(r0));
6890 ASSERT(!elements.is(r1));
6891 ASSERT(!name.is(r0));
6892 ASSERT(!name.is(r1));
6894 // Assert that name contains a string.
6895 if (FLAG_debug_code) __ AbortIfNotString(name);
6897 __ mov(r1, FieldOperand(elements, kCapacityOffset));
6898 __ shr(r1, kSmiTagSize); // convert smi to int
6901 // Generate an unrolled loop that performs a few probes before
6902 // giving up. Measurements done on Gmail indicate that 2 probes
6903 // cover ~93% of loads from dictionaries.
6904 for (int i = 0; i < kInlinedProbes; i++) {
6905 // Compute the masked index: (hash + i + i * i) & mask.
6906 __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6907 __ shr(r0, String::kHashShift);
6909 __ add(r0, Immediate(StringDictionary::GetProbeOffset(i)));
6913 // Scale the index by multiplying by the entry size.
6914 ASSERT(StringDictionary::kEntrySize == 3);
6915 __ lea(r0, Operand(r0, r0, times_2, 0)); // r0 = r0 * 3
6917 // Check if the key is identical to the name.
6918 __ cmp(name, Operand(elements,
6921 kElementsStartOffset - kHeapObjectTag));
6925 StringDictionaryLookupStub stub(elements,
6930 __ mov(r0, FieldOperand(name, String::kHashFieldOffset));
6931 __ shr(r0, String::kHashShift);
6941 void StringDictionaryLookupStub::Generate(MacroAssembler* masm) {
6942 // This stub overrides SometimesSetsUpAFrame() to return false. That means
6943 // we cannot call anything that could cause a GC from this stub.
6944 // Stack frame on entry:
6945 // esp[0 * kPointerSize]: return address.
6946 // esp[1 * kPointerSize]: key's hash.
6947 // esp[2 * kPointerSize]: key.
6949 // dictionary_: StringDictionary to probe.
6950 // result_: used as scratch.
6951 // index_: will hold an index of entry if lookup is successful.
6952 // might alias with result_.
6954 // result_ is zero if lookup failed, non zero otherwise.
6956 Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
6958 Register scratch = result_;
6960 __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
6962 __ SmiUntag(scratch);
6965 // If names of slots in range from 1 to kProbes - 1 for the hash value are
6966 // not equal to the name and kProbes-th slot is not used (its name is the
6967 // undefined value), it guarantees the hash table doesn't contain the
6968 // property. It's true even if some slots represent deleted properties
6969 // (their names are the null value).
6970 for (int i = kInlinedProbes; i < kTotalProbes; i++) {
6971 // Compute the masked index: (hash + i + i * i) & mask.
6972 __ mov(scratch, Operand(esp, 2 * kPointerSize));
6974 __ add(scratch, Immediate(StringDictionary::GetProbeOffset(i)));
6976 __ and_(scratch, Operand(esp, 0));
6978 // Scale the index by multiplying by the entry size.
6979 ASSERT(StringDictionary::kEntrySize == 3);
6980 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3.
6982 // Having undefined at this place means the name is not contained.
6983 ASSERT_EQ(kSmiTagSize, 1);
6984 __ mov(scratch, Operand(dictionary_,
6987 kElementsStartOffset - kHeapObjectTag));
6988 __ cmp(scratch, masm->isolate()->factory()->undefined_value());
6989 __ j(equal, ¬_in_dictionary);
6991 // Stop if found the property.
6992 __ cmp(scratch, Operand(esp, 3 * kPointerSize));
6993 __ j(equal, &in_dictionary);
6995 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
6996 // If we hit a non symbol key during negative lookup
6997 // we have to bailout as this key might be equal to the
6998 // key we are looking for.
7000 // Check if the entry name is not a symbol.
7001 __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
7002 __ test_b(FieldOperand(scratch, Map::kInstanceTypeOffset),
7004 __ j(zero, &maybe_in_dictionary);
7008 __ bind(&maybe_in_dictionary);
7009 // If we are doing negative lookup then probing failure should be
7010 // treated as a lookup success. For positive lookup probing failure
7011 // should be treated as lookup failure.
7012 if (mode_ == POSITIVE_LOOKUP) {
7013 __ mov(result_, Immediate(0));
7015 __ ret(2 * kPointerSize);
7018 __ bind(&in_dictionary);
7019 __ mov(result_, Immediate(1));
7021 __ ret(2 * kPointerSize);
7023 __ bind(¬_in_dictionary);
7024 __ mov(result_, Immediate(0));
7026 __ ret(2 * kPointerSize);
7030 struct AheadOfTimeWriteBarrierStubList {
7031 Register object, value, address;
7032 RememberedSetAction action;
7036 #define REG(Name) { kRegister_ ## Name ## _Code }
7038 static const AheadOfTimeWriteBarrierStubList kAheadOfTime[] = {
7039 // Used in RegExpExecStub.
7040 { REG(ebx), REG(eax), REG(edi), EMIT_REMEMBERED_SET },
7041 // Used in CompileArrayPushCall.
7042 { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7043 { REG(ebx), REG(edi), REG(edx), OMIT_REMEMBERED_SET },
7044 // Used in CompileStoreGlobal and CallFunctionStub.
7045 { REG(ebx), REG(ecx), REG(edx), OMIT_REMEMBERED_SET },
7046 // Used in StoreStubCompiler::CompileStoreField and
7047 // KeyedStoreStubCompiler::CompileStoreField via GenerateStoreField.
7048 { REG(edx), REG(ecx), REG(ebx), EMIT_REMEMBERED_SET },
7049 // GenerateStoreField calls the stub with two different permutations of
7050 // registers. This is the second.
7051 { REG(ebx), REG(ecx), REG(edx), EMIT_REMEMBERED_SET },
7052 // StoreIC::GenerateNormal via GenerateDictionaryStore
7053 { REG(ebx), REG(edi), REG(edx), EMIT_REMEMBERED_SET },
7054 // KeyedStoreIC::GenerateGeneric.
7055 { REG(ebx), REG(edx), REG(ecx), EMIT_REMEMBERED_SET},
7056 // KeyedStoreStubCompiler::GenerateStoreFastElement.
7057 { REG(edi), REG(ebx), REG(ecx), EMIT_REMEMBERED_SET},
7058 { REG(edx), REG(edi), REG(ebx), EMIT_REMEMBERED_SET},
7059 // ElementsTransitionGenerator::GenerateSmiOnlyToObject
7060 // and ElementsTransitionGenerator::GenerateSmiOnlyToDouble
7061 // and ElementsTransitionGenerator::GenerateDoubleToObject
7062 { REG(edx), REG(ebx), REG(edi), EMIT_REMEMBERED_SET},
7063 { REG(edx), REG(ebx), REG(edi), OMIT_REMEMBERED_SET},
7064 // ElementsTransitionGenerator::GenerateDoubleToObject
7065 { REG(eax), REG(edx), REG(esi), EMIT_REMEMBERED_SET},
7066 { REG(edx), REG(eax), REG(edi), EMIT_REMEMBERED_SET},
7067 // StoreArrayLiteralElementStub::Generate
7068 { REG(ebx), REG(eax), REG(ecx), EMIT_REMEMBERED_SET},
7069 // Null termination.
7070 { REG(no_reg), REG(no_reg), REG(no_reg), EMIT_REMEMBERED_SET}
7075 bool RecordWriteStub::IsPregenerated() {
7076 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7077 !entry->object.is(no_reg);
7079 if (object_.is(entry->object) &&
7080 value_.is(entry->value) &&
7081 address_.is(entry->address) &&
7082 remembered_set_action_ == entry->action &&
7083 save_fp_regs_mode_ == kDontSaveFPRegs) {
7091 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime() {
7092 StoreBufferOverflowStub stub1(kDontSaveFPRegs);
7093 stub1.GetCode()->set_is_pregenerated(true);
7095 CpuFeatures::TryForceFeatureScope scope(SSE2);
7096 if (CpuFeatures::IsSupported(SSE2)) {
7097 StoreBufferOverflowStub stub2(kSaveFPRegs);
7098 stub2.GetCode()->set_is_pregenerated(true);
7103 void RecordWriteStub::GenerateFixedRegStubsAheadOfTime() {
7104 for (const AheadOfTimeWriteBarrierStubList* entry = kAheadOfTime;
7105 !entry->object.is(no_reg);
7107 RecordWriteStub stub(entry->object,
7112 stub.GetCode()->set_is_pregenerated(true);
7117 // Takes the input in 3 registers: address_ value_ and object_. A pointer to
7118 // the value has just been written into the object, now this stub makes sure
7119 // we keep the GC informed. The word in the object where the value has been
7120 // written is in the address register.
7121 void RecordWriteStub::Generate(MacroAssembler* masm) {
7122 Label skip_to_incremental_noncompacting;
7123 Label skip_to_incremental_compacting;
7125 // The first two instructions are generated with labels so as to get the
7126 // offset fixed up correctly by the bind(Label*) call. We patch it back and
7127 // forth between a compare instructions (a nop in this position) and the
7128 // real branch when we start and stop incremental heap marking.
7129 __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
7130 __ jmp(&skip_to_incremental_compacting, Label::kFar);
7132 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7133 __ RememberedSetHelper(object_,
7137 MacroAssembler::kReturnAtEnd);
7142 __ bind(&skip_to_incremental_noncompacting);
7143 GenerateIncremental(masm, INCREMENTAL);
7145 __ bind(&skip_to_incremental_compacting);
7146 GenerateIncremental(masm, INCREMENTAL_COMPACTION);
7148 // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
7149 // Will be checked in IncrementalMarking::ActivateGeneratedStub.
7150 masm->set_byte_at(0, kTwoByteNopInstruction);
7151 masm->set_byte_at(2, kFiveByteNopInstruction);
7155 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
7158 if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
7159 Label dont_need_remembered_set;
7161 __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7162 __ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
7164 &dont_need_remembered_set);
7166 __ CheckPageFlag(regs_.object(),
7168 1 << MemoryChunk::SCAN_ON_SCAVENGE,
7170 &dont_need_remembered_set);
7172 // First notify the incremental marker if necessary, then update the
7174 CheckNeedsToInformIncrementalMarker(
7176 kUpdateRememberedSetOnNoNeedToInformIncrementalMarker,
7178 InformIncrementalMarker(masm, mode);
7179 regs_.Restore(masm);
7180 __ RememberedSetHelper(object_,
7184 MacroAssembler::kReturnAtEnd);
7186 __ bind(&dont_need_remembered_set);
7189 CheckNeedsToInformIncrementalMarker(
7191 kReturnOnNoNeedToInformIncrementalMarker,
7193 InformIncrementalMarker(masm, mode);
7194 regs_.Restore(masm);
7199 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
7200 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
7201 int argument_count = 3;
7202 __ PrepareCallCFunction(argument_count, regs_.scratch0());
7203 __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
7204 if (mode == INCREMENTAL_COMPACTION) {
7205 __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
7207 ASSERT(mode == INCREMENTAL);
7208 __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7209 __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
7211 __ mov(Operand(esp, 2 * kPointerSize),
7212 Immediate(ExternalReference::isolate_address()));
7214 AllowExternalCallThatCantCauseGC scope(masm);
7215 if (mode == INCREMENTAL_COMPACTION) {
7217 ExternalReference::incremental_evacuation_record_write_function(
7221 ASSERT(mode == INCREMENTAL);
7223 ExternalReference::incremental_marking_record_write_function(
7227 regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
7231 void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
7232 MacroAssembler* masm,
7233 OnNoNeedToInformIncrementalMarker on_no_need,
7235 Label object_is_black, need_incremental, need_incremental_pop_object;
7237 // Let's look at the color of the object: If it is not black we don't have
7238 // to inform the incremental marker.
7239 __ JumpIfBlack(regs_.object(),
7245 regs_.Restore(masm);
7246 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7247 __ RememberedSetHelper(object_,
7251 MacroAssembler::kReturnAtEnd);
7256 __ bind(&object_is_black);
7258 // Get the value from the slot.
7259 __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
7261 if (mode == INCREMENTAL_COMPACTION) {
7262 Label ensure_not_white;
7264 __ CheckPageFlag(regs_.scratch0(), // Contains value.
7265 regs_.scratch1(), // Scratch.
7266 MemoryChunk::kEvacuationCandidateMask,
7271 __ CheckPageFlag(regs_.object(),
7272 regs_.scratch1(), // Scratch.
7273 MemoryChunk::kSkipEvacuationSlotsRecordingMask,
7278 __ jmp(&need_incremental);
7280 __ bind(&ensure_not_white);
7283 // We need an extra register for this, so we push the object register
7285 __ push(regs_.object());
7286 __ EnsureNotWhite(regs_.scratch0(), // The value.
7287 regs_.scratch1(), // Scratch.
7288 regs_.object(), // Scratch.
7289 &need_incremental_pop_object,
7291 __ pop(regs_.object());
7293 regs_.Restore(masm);
7294 if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
7295 __ RememberedSetHelper(object_,
7299 MacroAssembler::kReturnAtEnd);
7304 __ bind(&need_incremental_pop_object);
7305 __ pop(regs_.object());
7307 __ bind(&need_incremental);
7309 // Fall through when we need to inform the incremental marker.
7313 void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
7314 // ----------- S t a t e -------------
7315 // -- eax : element value to store
7316 // -- ebx : array literal
7317 // -- edi : map of array literal
7318 // -- ecx : element index as smi
7319 // -- edx : array literal index in function
7320 // -- esp[0] : return address
7321 // -----------------------------------
7324 Label double_elements;
7326 Label slow_elements;
7327 Label slow_elements_from_double;
7328 Label fast_elements;
7330 __ CheckFastElements(edi, &double_elements);
7332 // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
7333 __ JumpIfSmi(eax, &smi_element);
7334 __ CheckFastSmiOnlyElements(edi, &fast_elements, Label::kNear);
7336 // Store into the array literal requires a elements transition. Call into
7339 __ bind(&slow_elements);
7340 __ pop(edi); // Pop return address and remember to put back later for tail
7345 __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
7346 __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
7348 __ push(edi); // Return return address so that tail call returns to right
7350 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
7352 __ bind(&slow_elements_from_double);
7354 __ jmp(&slow_elements);
7356 // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
7357 __ bind(&fast_elements);
7358 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
7359 __ lea(ecx, FieldOperand(ebx, ecx, times_half_pointer_size,
7360 FixedArrayBase::kHeaderSize));
7361 __ mov(Operand(ecx, 0), eax);
7362 // Update the write barrier for the array store.
7363 __ RecordWrite(ebx, ecx, eax,
7365 EMIT_REMEMBERED_SET,
7369 // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
7370 // FAST_ELEMENTS, and value is Smi.
7371 __ bind(&smi_element);
7372 __ mov(ebx, FieldOperand(ebx, JSObject::kElementsOffset));
7373 __ mov(FieldOperand(ebx, ecx, times_half_pointer_size,
7374 FixedArrayBase::kHeaderSize), eax);
7377 // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
7378 __ bind(&double_elements);
7381 __ mov(edx, FieldOperand(ebx, JSObject::kElementsOffset));
7382 __ StoreNumberToDoubleElements(eax,
7387 &slow_elements_from_double,
7395 } } // namespace v8::internal
7397 #endif // V8_TARGET_ARCH_IA32