1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
7 #if V8_TARGET_ARCH_IA32
9 #include "src/codegen.h"
10 #include "src/heap/heap.h"
11 #include "src/macro-assembler.h"
17 // -------------------------------------------------------------------------
18 // Platform-specific RuntimeCallHelper functions.
20 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
21 masm->EnterFrame(StackFrame::INTERNAL);
22 DCHECK(!masm->has_frame());
23 masm->set_has_frame(true);
27 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
28 masm->LeaveFrame(StackFrame::INTERNAL);
29 DCHECK(masm->has_frame());
30 masm->set_has_frame(false);
37 UnaryMathFunction CreateExpFunction() {
38 if (!FLAG_fast_math) return &std::exp;
41 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
42 if (buffer == NULL) return &std::exp;
43 ExternalReference::InitializeMathExpData();
45 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
46 // esp[1 * kPointerSize]: raw double input
47 // esp[0 * kPointerSize]: return address
49 XMMRegister input = xmm1;
50 XMMRegister result = xmm2;
51 __ movsd(input, Operand(esp, 1 * kPointerSize));
55 MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
59 __ movsd(Operand(esp, 1 * kPointerSize), result);
60 __ fld_d(Operand(esp, 1 * kPointerSize));
66 DCHECK(!RelocInfo::RequiresRelocation(desc));
68 CpuFeatures::FlushICache(buffer, actual_size);
69 base::OS::ProtectCode(buffer, actual_size);
70 return FUNCTION_CAST<UnaryMathFunction>(buffer);
74 UnaryMathFunction CreateSqrtFunction() {
76 // Allocate buffer in executable space.
78 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
79 if (buffer == NULL) return &std::sqrt;
80 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
81 // esp[1 * kPointerSize]: raw double input
82 // esp[0 * kPointerSize]: return address
83 // Move double input into registers.
85 __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
86 __ sqrtsd(xmm0, xmm0);
87 __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
88 // Load result into floating point register as return value.
89 __ fld_d(Operand(esp, 1 * kPointerSize));
95 DCHECK(!RelocInfo::RequiresRelocation(desc));
97 CpuFeatures::FlushICache(buffer, actual_size);
98 base::OS::ProtectCode(buffer, actual_size);
99 return FUNCTION_CAST<UnaryMathFunction>(buffer);
103 // Helper functions for CreateMemMoveFunction.
105 #define __ ACCESS_MASM(masm)
107 enum Direction { FORWARD, BACKWARD };
108 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
110 // Expects registers:
111 // esi - source, aligned if alignment == ALIGNED
112 // edi - destination, always aligned
113 // ecx - count (copy size in bytes)
114 // edx - loop count (number of 64 byte chunks)
115 void MemMoveEmitMainLoop(MacroAssembler* masm,
118 Alignment alignment) {
121 Register count = ecx;
122 Register loop_count = edx;
123 Label loop, move_last_31, move_last_63;
124 __ cmp(loop_count, 0);
125 __ j(equal, &move_last_63);
127 // Main loop. Copy in 64 byte chunks.
128 if (direction == BACKWARD) __ sub(src, Immediate(0x40));
129 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
130 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
131 __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
132 __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
133 if (direction == FORWARD) __ add(src, Immediate(0x40));
134 if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
135 __ movdqa(Operand(dst, 0x00), xmm0);
136 __ movdqa(Operand(dst, 0x10), xmm1);
137 __ movdqa(Operand(dst, 0x20), xmm2);
138 __ movdqa(Operand(dst, 0x30), xmm3);
139 if (direction == FORWARD) __ add(dst, Immediate(0x40));
141 __ j(not_zero, &loop);
142 // At most 63 bytes left to copy.
143 __ bind(&move_last_63);
144 __ test(count, Immediate(0x20));
145 __ j(zero, &move_last_31);
146 if (direction == BACKWARD) __ sub(src, Immediate(0x20));
147 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
148 __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
149 if (direction == FORWARD) __ add(src, Immediate(0x20));
150 if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
151 __ movdqa(Operand(dst, 0x00), xmm0);
152 __ movdqa(Operand(dst, 0x10), xmm1);
153 if (direction == FORWARD) __ add(dst, Immediate(0x20));
154 // At most 31 bytes left to copy.
155 __ bind(&move_last_31);
156 __ test(count, Immediate(0x10));
157 __ j(zero, move_last_15);
158 if (direction == BACKWARD) __ sub(src, Immediate(0x10));
159 __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
160 if (direction == FORWARD) __ add(src, Immediate(0x10));
161 if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
162 __ movdqa(Operand(dst, 0), xmm0);
163 if (direction == FORWARD) __ add(dst, Immediate(0x10));
167 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
178 class LabelConverter {
180 explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
181 int32_t address(Label* l) const {
182 return reinterpret_cast<int32_t>(buffer_) + l->pos();
189 MemMoveFunction CreateMemMoveFunction() {
191 // Allocate buffer in executable space.
193 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
194 if (buffer == NULL) return NULL;
195 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
196 LabelConverter conv(buffer);
198 // Generated code is put into a fixed, unmovable buffer, and not into
199 // the V8 heap. We can't, and don't, refer to any relocatable addresses
200 // (e.g. the JavaScript nan-object).
202 // 32-bit C declaration function calls pass arguments on stack.
205 // esp[12]: Third argument, size.
206 // esp[8]: Second argument, source pointer.
207 // esp[4]: First argument, destination pointer.
208 // esp[0]: return address
210 const int kDestinationOffset = 1 * kPointerSize;
211 const int kSourceOffset = 2 * kPointerSize;
212 const int kSizeOffset = 3 * kPointerSize;
214 // When copying up to this many bytes, use special "small" handlers.
215 const size_t kSmallCopySize = 8;
216 // When copying up to this many bytes, use special "medium" handlers.
217 const size_t kMediumCopySize = 63;
218 // When non-overlapping region of src and dst is less than this,
219 // use a more careful implementation (slightly slower).
220 const size_t kMinMoveDistance = 16;
221 // Note that these values are dictated by the implementation below,
222 // do not just change them and hope things will work!
224 int stack_offset = 0; // Update if we change the stack height.
226 Label backward, backward_much_overlap;
227 Label forward_much_overlap, small_size, medium_size, pop_and_return;
230 stack_offset += 2 * kPointerSize;
233 Register count = ecx;
234 Register loop_count = edx;
235 __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
236 __ mov(src, Operand(esp, stack_offset + kSourceOffset));
237 __ mov(count, Operand(esp, stack_offset + kSizeOffset));
240 __ j(equal, &pop_and_return);
242 __ prefetch(Operand(src, 0), 1);
243 __ cmp(count, kSmallCopySize);
244 __ j(below_equal, &small_size);
245 __ cmp(count, kMediumCopySize);
246 __ j(below_equal, &medium_size);
248 __ j(above, &backward);
251 // |dst| is a lower address than |src|. Copy front-to-back.
252 Label unaligned_source, move_last_15, skip_last_move;
255 __ cmp(eax, kMinMoveDistance);
256 __ j(below, &forward_much_overlap);
257 // Copy first 16 bytes.
258 __ movdqu(xmm0, Operand(src, 0));
259 __ movdqu(Operand(dst, 0), xmm0);
260 // Determine distance to alignment: 16 - (dst & 0xF).
264 __ add(edx, Immediate(16));
268 // dst is now aligned. Main copy loop.
269 __ mov(loop_count, count);
270 __ shr(loop_count, 6);
271 // Check if src is also aligned.
272 __ test(src, Immediate(0xF));
273 __ j(not_zero, &unaligned_source);
274 // Copy loop for aligned source and destination.
275 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
276 // At most 15 bytes to copy. Copy 16 bytes at end of string.
277 __ bind(&move_last_15);
279 __ j(zero, &skip_last_move, Label::kNear);
280 __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
281 __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
282 __ bind(&skip_last_move);
283 MemMoveEmitPopAndReturn(&masm);
285 // Copy loop for unaligned source and aligned destination.
286 __ bind(&unaligned_source);
287 MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
288 __ jmp(&move_last_15);
290 // Less than kMinMoveDistance offset between dst and src.
291 Label loop_until_aligned, last_15_much_overlap;
292 __ bind(&loop_until_aligned);
293 __ mov_b(eax, Operand(src, 0));
295 __ mov_b(Operand(dst, 0), eax);
298 __ bind(&forward_much_overlap); // Entry point into this block.
299 __ test(dst, Immediate(0xF));
300 __ j(not_zero, &loop_until_aligned);
301 // dst is now aligned, src can't be. Main copy loop.
302 __ mov(loop_count, count);
303 __ shr(loop_count, 6);
304 MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
305 FORWARD, MOVE_UNALIGNED);
306 __ bind(&last_15_much_overlap);
308 __ j(zero, &pop_and_return);
309 __ cmp(count, kSmallCopySize);
310 __ j(below_equal, &small_size);
311 __ jmp(&medium_size);
315 // |dst| is a higher address than |src|. Copy backwards.
316 Label unaligned_source, move_first_15, skip_last_move;
318 // |dst| and |src| always point to the end of what's left to copy.
323 __ cmp(eax, kMinMoveDistance);
324 __ j(below, &backward_much_overlap);
325 // Copy last 16 bytes.
326 __ movdqu(xmm0, Operand(src, -0x10));
327 __ movdqu(Operand(dst, -0x10), xmm0);
328 // Find distance to alignment: dst & 0xF
334 // dst is now aligned. Main copy loop.
335 __ mov(loop_count, count);
336 __ shr(loop_count, 6);
337 // Check if src is also aligned.
338 __ test(src, Immediate(0xF));
339 __ j(not_zero, &unaligned_source);
340 // Copy loop for aligned source and destination.
341 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
342 // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
343 __ bind(&move_first_15);
345 __ j(zero, &skip_last_move, Label::kNear);
348 __ movdqu(xmm0, Operand(src, 0));
349 __ movdqu(Operand(dst, 0), xmm0);
350 __ bind(&skip_last_move);
351 MemMoveEmitPopAndReturn(&masm);
353 // Copy loop for unaligned source and aligned destination.
354 __ bind(&unaligned_source);
355 MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
356 __ jmp(&move_first_15);
358 // Less than kMinMoveDistance offset between dst and src.
359 Label loop_until_aligned, first_15_much_overlap;
360 __ bind(&loop_until_aligned);
363 __ mov_b(eax, Operand(src, 0));
364 __ mov_b(Operand(dst, 0), eax);
366 __ bind(&backward_much_overlap); // Entry point into this block.
367 __ test(dst, Immediate(0xF));
368 __ j(not_zero, &loop_until_aligned);
369 // dst is now aligned, src can't be. Main copy loop.
370 __ mov(loop_count, count);
371 __ shr(loop_count, 6);
372 MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
373 BACKWARD, MOVE_UNALIGNED);
374 __ bind(&first_15_much_overlap);
376 __ j(zero, &pop_and_return);
377 // Small/medium handlers expect dst/src to point to the beginning.
380 __ cmp(count, kSmallCopySize);
381 __ j(below_equal, &small_size);
382 __ jmp(&medium_size);
385 // Special handlers for 9 <= copy_size < 64. No assumptions about
386 // alignment or move distance, so all reads must be unaligned and
387 // must happen before any writes.
388 Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
391 __ movsd(xmm0, Operand(src, 0));
392 __ movsd(xmm1, Operand(src, count, times_1, -8));
393 __ movsd(Operand(dst, 0), xmm0);
394 __ movsd(Operand(dst, count, times_1, -8), xmm1);
395 MemMoveEmitPopAndReturn(&masm);
398 __ movdqu(xmm0, Operand(src, 0));
399 __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
400 __ movdqu(Operand(dst, 0x00), xmm0);
401 __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
402 MemMoveEmitPopAndReturn(&masm);
405 __ movdqu(xmm0, Operand(src, 0x00));
406 __ movdqu(xmm1, Operand(src, 0x10));
407 __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
408 __ movdqu(Operand(dst, 0x00), xmm0);
409 __ movdqu(Operand(dst, 0x10), xmm1);
410 __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
411 MemMoveEmitPopAndReturn(&masm);
414 __ movdqu(xmm0, Operand(src, 0x00));
415 __ movdqu(xmm1, Operand(src, 0x10));
416 __ movdqu(xmm2, Operand(src, 0x20));
417 __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
418 __ movdqu(Operand(dst, 0x00), xmm0);
419 __ movdqu(Operand(dst, 0x10), xmm1);
420 __ movdqu(Operand(dst, 0x20), xmm2);
421 __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
422 MemMoveEmitPopAndReturn(&masm);
424 __ bind(&medium_handlers);
425 __ dd(conv.address(&f9_16));
426 __ dd(conv.address(&f17_32));
427 __ dd(conv.address(&f33_48));
428 __ dd(conv.address(&f49_63));
430 __ bind(&medium_size); // Entry point into this block.
434 if (FLAG_debug_code) {
437 __ j(below_equal, &ok);
441 __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
445 // Specialized copiers for copy_size <= 8 bytes.
446 Label small_handlers, f0, f1, f2, f3, f4, f5_8;
448 MemMoveEmitPopAndReturn(&masm);
451 __ mov_b(eax, Operand(src, 0));
452 __ mov_b(Operand(dst, 0), eax);
453 MemMoveEmitPopAndReturn(&masm);
456 __ mov_w(eax, Operand(src, 0));
457 __ mov_w(Operand(dst, 0), eax);
458 MemMoveEmitPopAndReturn(&masm);
461 __ mov_w(eax, Operand(src, 0));
462 __ mov_b(edx, Operand(src, 2));
463 __ mov_w(Operand(dst, 0), eax);
464 __ mov_b(Operand(dst, 2), edx);
465 MemMoveEmitPopAndReturn(&masm);
468 __ mov(eax, Operand(src, 0));
469 __ mov(Operand(dst, 0), eax);
470 MemMoveEmitPopAndReturn(&masm);
473 __ mov(eax, Operand(src, 0));
474 __ mov(edx, Operand(src, count, times_1, -4));
475 __ mov(Operand(dst, 0), eax);
476 __ mov(Operand(dst, count, times_1, -4), edx);
477 MemMoveEmitPopAndReturn(&masm);
479 __ bind(&small_handlers);
480 __ dd(conv.address(&f0));
481 __ dd(conv.address(&f1));
482 __ dd(conv.address(&f2));
483 __ dd(conv.address(&f3));
484 __ dd(conv.address(&f4));
485 __ dd(conv.address(&f5_8));
486 __ dd(conv.address(&f5_8));
487 __ dd(conv.address(&f5_8));
488 __ dd(conv.address(&f5_8));
490 __ bind(&small_size); // Entry point into this block.
491 if (FLAG_debug_code) {
494 __ j(below_equal, &ok);
498 __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
502 __ bind(&pop_and_return);
503 MemMoveEmitPopAndReturn(&masm);
507 DCHECK(!RelocInfo::RequiresRelocation(desc));
508 CpuFeatures::FlushICache(buffer, actual_size);
509 base::OS::ProtectCode(buffer, actual_size);
510 // TODO(jkummerow): It would be nice to register this code creation event
511 // with the PROFILE / GDBJIT system.
512 return FUNCTION_CAST<MemMoveFunction>(buffer);
518 // -------------------------------------------------------------------------
521 #define __ ACCESS_MASM(masm)
524 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
525 MacroAssembler* masm,
530 AllocationSiteMode mode,
531 Label* allocation_memento_found) {
532 Register scratch = edi;
533 DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
535 if (mode == TRACK_ALLOCATION_SITE) {
536 DCHECK(allocation_memento_found != NULL);
537 __ JumpIfJSArrayHasAllocationMemento(
538 receiver, scratch, allocation_memento_found);
541 // Set transitioned map.
542 __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
543 __ RecordWriteField(receiver,
544 HeapObject::kMapOffset,
553 void ElementsTransitionGenerator::GenerateSmiToDouble(
554 MacroAssembler* masm,
559 AllocationSiteMode mode,
561 // Return address is on the stack.
562 DCHECK(receiver.is(edx));
564 DCHECK(value.is(eax));
565 DCHECK(target_map.is(ebx));
567 Label loop, entry, convert_hole, gc_required, only_change_map;
569 if (mode == TRACK_ALLOCATION_SITE) {
570 __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
573 // Check for empty arrays, which only require a map transition and no changes
574 // to the backing store.
575 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
576 __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
577 __ j(equal, &only_change_map);
582 __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
584 // Allocate new FixedDoubleArray.
586 // edi: length of source FixedArray (smi-tagged)
587 AllocationFlags flags =
588 static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
589 __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
590 REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
592 // eax: destination FixedDoubleArray
593 // edi: number of elements
595 __ mov(FieldOperand(eax, HeapObject::kMapOffset),
596 Immediate(masm->isolate()->factory()->fixed_double_array_map()));
597 __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
598 __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
599 // Replace receiver's backing store with newly created FixedDoubleArray.
600 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
602 __ RecordWriteField(edx,
603 JSObject::kElementsOffset,
610 __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
612 // Prepare for conversion loop.
613 ExternalReference canonical_the_hole_nan_reference =
614 ExternalReference::address_of_the_hole_nan();
615 XMMRegister the_hole_nan = xmm1;
616 __ movsd(the_hole_nan,
617 Operand::StaticVariable(canonical_the_hole_nan_reference));
620 // Call into runtime if GC is required.
621 __ bind(&gc_required);
622 // Restore registers before jumping into runtime.
623 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
628 // Convert and copy elements
629 // esi: source FixedArray
631 __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
632 // ebx: current element from source
633 // edi: index of current element
634 __ JumpIfNotSmi(ebx, &convert_hole);
636 // Normal smi, convert it to double and store.
638 __ Cvtsi2sd(xmm0, ebx);
639 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
643 // Found hole, store hole_nan_as_double instead.
644 __ bind(&convert_hole);
646 if (FLAG_debug_code) {
647 __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
648 __ Assert(equal, kObjectFoundInSmiOnlyArray);
651 __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
655 __ sub(edi, Immediate(Smi::FromInt(1)));
656 __ j(not_sign, &loop);
662 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
664 __ bind(&only_change_map);
667 // Set transitioned map.
668 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
669 __ RecordWriteField(edx,
670 HeapObject::kMapOffset,
679 void ElementsTransitionGenerator::GenerateDoubleToObject(
680 MacroAssembler* masm,
685 AllocationSiteMode mode,
687 // Return address is on the stack.
688 DCHECK(receiver.is(edx));
690 DCHECK(value.is(eax));
691 DCHECK(target_map.is(ebx));
693 Label loop, entry, convert_hole, gc_required, only_change_map, success;
695 if (mode == TRACK_ALLOCATION_SITE) {
696 __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
699 // Check for empty arrays, which only require a map transition and no changes
700 // to the backing store.
701 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
702 __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
703 __ j(equal, &only_change_map);
709 __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
711 // Allocate new FixedArray.
712 // ebx: length of source FixedDoubleArray (smi-tagged)
713 __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
714 __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
716 // eax: destination FixedArray
717 // ebx: number of elements
718 __ mov(FieldOperand(eax, HeapObject::kMapOffset),
719 Immediate(masm->isolate()->factory()->fixed_array_map()));
720 __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
721 __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
727 // Set transitioned map.
728 __ bind(&only_change_map);
729 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
730 __ RecordWriteField(edx,
731 HeapObject::kMapOffset,
739 // Call into runtime if GC is required.
740 __ bind(&gc_required);
741 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
747 // Box doubles into heap numbers.
748 // edi: source FixedDoubleArray
749 // eax: destination FixedArray
751 // ebx: index of current element (smi-tagged)
752 uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
753 __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
754 __ j(equal, &convert_hole);
756 // Non-hole double, copy value into a heap number.
757 __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
758 // edx: new heap number
760 FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
761 __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
762 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
764 __ RecordWriteArray(eax,
770 __ jmp(&entry, Label::kNear);
772 // Replace the-hole NaN with the-hole pointer.
773 __ bind(&convert_hole);
774 __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
775 masm->isolate()->factory()->the_hole_value());
778 __ sub(ebx, Immediate(Smi::FromInt(1)));
779 __ j(not_sign, &loop);
785 // Set transitioned map.
786 __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
787 __ RecordWriteField(edx,
788 HeapObject::kMapOffset,
794 // Replace receiver's backing store with newly created and filled FixedArray.
795 __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
796 __ RecordWriteField(edx,
797 JSObject::kElementsOffset,
804 // Restore registers.
806 __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
812 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
817 Label* call_runtime) {
818 // Fetch the instance type of the receiver into result register.
819 __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
820 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
822 // We need special handling for indirect strings.
823 Label check_sequential;
824 __ test(result, Immediate(kIsIndirectStringMask));
825 __ j(zero, &check_sequential, Label::kNear);
827 // Dispatch on the indirect string shape: slice or cons.
829 __ test(result, Immediate(kSlicedNotConsMask));
830 __ j(zero, &cons_string, Label::kNear);
833 Label indirect_string_loaded;
834 __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
836 __ add(index, result);
837 __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
838 __ jmp(&indirect_string_loaded, Label::kNear);
840 // Handle cons strings.
841 // Check whether the right hand side is the empty string (i.e. if
842 // this is really a flat string in a cons string). If that is not
843 // the case we would rather go to the runtime system now to flatten
845 __ bind(&cons_string);
846 __ cmp(FieldOperand(string, ConsString::kSecondOffset),
847 Immediate(factory->empty_string()));
848 __ j(not_equal, call_runtime);
849 __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
851 __ bind(&indirect_string_loaded);
852 __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
853 __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
855 // Distinguish sequential and external strings. Only these two string
856 // representations can reach here (slices and flat cons strings have been
857 // reduced to the underlying sequential or external string).
859 __ bind(&check_sequential);
860 STATIC_ASSERT(kSeqStringTag == 0);
861 __ test(result, Immediate(kStringRepresentationMask));
862 __ j(zero, &seq_string, Label::kNear);
864 // Handle external strings.
865 Label ascii_external, done;
866 if (FLAG_debug_code) {
867 // Assert that we do not have a cons or slice (indirect strings) here.
868 // Sequential strings have already been ruled out.
869 __ test(result, Immediate(kIsIndirectStringMask));
870 __ Assert(zero, kExternalStringExpectedButNotFound);
872 // Rule out short external strings.
873 STATIC_ASSERT(kShortExternalStringTag != 0);
874 __ test_b(result, kShortExternalStringMask);
875 __ j(not_zero, call_runtime);
877 STATIC_ASSERT(kTwoByteStringTag == 0);
878 __ test_b(result, kStringEncodingMask);
879 __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
880 __ j(not_equal, &ascii_external, Label::kNear);
882 __ movzx_w(result, Operand(result, index, times_2, 0));
883 __ jmp(&done, Label::kNear);
884 __ bind(&ascii_external);
886 __ movzx_b(result, Operand(result, index, times_1, 0));
887 __ jmp(&done, Label::kNear);
889 // Dispatch on the encoding: ASCII or two-byte.
891 __ bind(&seq_string);
892 STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
893 STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
894 __ test(result, Immediate(kStringEncodingMask));
895 __ j(not_zero, &ascii, Label::kNear);
898 // Load the two-byte character code into the result register.
899 __ movzx_w(result, FieldOperand(string,
902 SeqTwoByteString::kHeaderSize));
903 __ jmp(&done, Label::kNear);
906 // Load the byte into the result register.
908 __ movzx_b(result, FieldOperand(string,
911 SeqOneByteString::kHeaderSize));
916 static Operand ExpConstant(int index) {
917 return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
921 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
924 XMMRegister double_scratch,
927 DCHECK(!input.is(double_scratch));
928 DCHECK(!input.is(result));
929 DCHECK(!result.is(double_scratch));
930 DCHECK(!temp1.is(temp2));
931 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
932 DCHECK(!masm->serializer_enabled()); // External references not serializable.
936 __ movsd(double_scratch, ExpConstant(0));
937 __ xorpd(result, result);
938 __ ucomisd(double_scratch, input);
939 __ j(above_equal, &done);
940 __ ucomisd(input, ExpConstant(1));
941 __ movsd(result, ExpConstant(2));
942 __ j(above_equal, &done);
943 __ movsd(double_scratch, ExpConstant(3));
944 __ movsd(result, ExpConstant(4));
945 __ mulsd(double_scratch, input);
946 __ addsd(double_scratch, result);
947 __ movd(temp2, double_scratch);
948 __ subsd(double_scratch, result);
949 __ movsd(result, ExpConstant(6));
950 __ mulsd(double_scratch, ExpConstant(5));
951 __ subsd(double_scratch, input);
952 __ subsd(result, double_scratch);
953 __ movsd(input, double_scratch);
954 __ mulsd(input, double_scratch);
955 __ mulsd(result, input);
956 __ mov(temp1, temp2);
957 __ mulsd(result, ExpConstant(7));
958 __ subsd(result, double_scratch);
959 __ add(temp1, Immediate(0x1ff800));
960 __ addsd(result, ExpConstant(8));
961 __ and_(temp2, Immediate(0x7ff));
964 __ movd(input, temp1);
965 __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
966 __ movsd(double_scratch, Operand::StaticArray(
967 temp2, times_8, ExternalReference::math_exp_log_table()));
968 __ orps(input, double_scratch);
969 __ mulsd(result, input);
976 CodeAgingHelper::CodeAgingHelper() {
977 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
978 CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
979 patcher.masm()->push(ebp);
980 patcher.masm()->mov(ebp, esp);
981 patcher.masm()->push(esi);
982 patcher.masm()->push(edi);
987 bool CodeAgingHelper::IsOld(byte* candidate) const {
988 return *candidate == kCallOpcode;
993 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
994 bool result = isolate->code_aging_helper()->IsYoung(sequence);
995 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
1000 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
1001 MarkingParity* parity) {
1002 if (IsYoungSequence(isolate, sequence)) {
1003 *age = kNoAgeCodeAge;
1004 *parity = NO_MARKING_PARITY;
1006 sequence++; // Skip the kCallOpcode byte
1007 Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
1008 Assembler::kCallTargetAddressOffset;
1009 Code* stub = GetCodeFromTargetAddress(target_address);
1010 GetCodeAgeAndParity(stub, age, parity);
1015 void Code::PatchPlatformCodeAge(Isolate* isolate,
1018 MarkingParity parity) {
1019 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
1020 if (age == kNoAgeCodeAge) {
1021 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
1022 CpuFeatures::FlushICache(sequence, young_length);
1024 Code* stub = GetCodeAgeStub(isolate, age, parity);
1025 CodePatcher patcher(sequence, young_length);
1026 patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
1031 } } // namespace v8::internal
1033 #endif // V8_TARGET_ARCH_IA32