1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/arm/simulator-arm.h"
10 #include "src/codegen.h"
11 #include "src/macro-assembler.h"
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_arm_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
24 fast_exp_arm_machine_code, x, 0);
29 UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
33 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34 if (buffer == NULL) return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
40 DwVfpRegister input = d0;
41 DwVfpRegister result = d1;
42 DwVfpRegister double_scratch1 = d2;
43 DwVfpRegister double_scratch2 = d3;
48 if (masm.use_eabi_hardfloat()) {
49 // Input value is in d0 anyway, nothing to do.
51 __ vmov(input, r0, r1);
53 __ Push(temp3, temp2, temp1);
54 MathExpGenerator::EmitMathExp(
55 &masm, input, result, double_scratch1, double_scratch2,
57 __ Pop(temp3, temp2, temp1);
58 if (masm.use_eabi_hardfloat()) {
61 __ vmov(r0, r1, result);
68 DCHECK(!RelocInfo::RequiresRelocation(desc));
70 CpuFeatures::FlushICache(buffer, actual_size);
71 base::OS::ProtectCode(buffer, actual_size);
73 #if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
76 fast_exp_arm_machine_code = buffer;
77 return &fast_exp_simulator;
81 #if defined(V8_HOST_ARCH_ARM)
82 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
83 #if defined(USE_SIMULATOR)
86 if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
89 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
90 if (buffer == NULL) return stub;
92 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
100 if (CpuFeatures::IsSupported(NEON)) {
101 Label loop, less_256, less_128, less_64, less_32, _16_or_less, _8_or_less;
102 Label size_less_than_8;
103 __ pld(MemOperand(src, 0));
105 __ cmp(chars, Operand(8));
106 __ b(lt, &size_less_than_8);
107 __ cmp(chars, Operand(32));
109 if (CpuFeatures::cache_line_size() == 32) {
110 __ pld(MemOperand(src, 32));
112 __ cmp(chars, Operand(64));
114 __ pld(MemOperand(src, 64));
115 if (CpuFeatures::cache_line_size() == 32) {
116 __ pld(MemOperand(src, 96));
118 __ cmp(chars, Operand(128));
120 __ pld(MemOperand(src, 128));
121 if (CpuFeatures::cache_line_size() == 32) {
122 __ pld(MemOperand(src, 160));
124 __ pld(MemOperand(src, 192));
125 if (CpuFeatures::cache_line_size() == 32) {
126 __ pld(MemOperand(src, 224));
128 __ cmp(chars, Operand(256));
130 __ sub(chars, chars, Operand(256));
133 __ pld(MemOperand(src, 256));
134 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
135 if (CpuFeatures::cache_line_size() == 32) {
136 __ pld(MemOperand(src, 256));
138 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
139 __ sub(chars, chars, Operand(64), SetCC);
140 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
141 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
143 __ add(chars, chars, Operand(256));
146 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
147 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
148 __ sub(chars, chars, Operand(128));
149 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
150 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
151 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
152 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
153 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
154 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
155 __ cmp(chars, Operand(64));
159 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
160 __ vld1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(src, PostIndex));
161 __ sub(chars, chars, Operand(64));
162 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
163 __ vst1(Neon8, NeonListOperand(d4, 4), NeonMemOperand(dest, PostIndex));
166 __ cmp(chars, Operand(32));
168 __ vld1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(src, PostIndex));
169 __ vst1(Neon8, NeonListOperand(d0, 4), NeonMemOperand(dest, PostIndex));
170 __ sub(chars, chars, Operand(32));
173 __ cmp(chars, Operand(16));
174 __ b(le, &_16_or_less);
175 __ vld1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(src, PostIndex));
176 __ vst1(Neon8, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
177 __ sub(chars, chars, Operand(16));
179 __ bind(&_16_or_less);
180 __ cmp(chars, Operand(8));
181 __ b(le, &_8_or_less);
182 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
183 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest, PostIndex));
184 __ sub(chars, chars, Operand(8));
186 // Do a last copy which may overlap with the previous copy (up to 8 bytes).
187 __ bind(&_8_or_less);
188 __ rsb(chars, chars, Operand(8));
189 __ sub(src, src, Operand(chars));
190 __ sub(dest, dest, Operand(chars));
191 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
192 __ vst1(Neon8, NeonListOperand(d0), NeonMemOperand(dest));
196 __ bind(&size_less_than_8);
198 __ bic(temp1, chars, Operand(0x3), SetCC);
200 __ ldr(temp1, MemOperand(src, 4, PostIndex));
201 __ str(temp1, MemOperand(dest, 4, PostIndex));
206 __ bic(temp2, chars, Operand(0x3), SetCC);
208 __ add(temp2, dest, temp2);
211 __ ldr(temp1, MemOperand(src, 4, PostIndex));
212 __ str(temp1, MemOperand(dest, 4, PostIndex));
218 __ mov(chars, Operand(chars, LSL, 31), SetCC);
219 // bit0 => Z (ne), bit1 => C (cs)
220 __ ldrh(temp1, MemOperand(src, 2, PostIndex), cs);
221 __ strh(temp1, MemOperand(dest, 2, PostIndex), cs);
222 __ ldrb(temp1, MemOperand(src), ne);
223 __ strb(temp1, MemOperand(dest), ne);
228 DCHECK(!RelocInfo::RequiresRelocation(desc));
230 CpuFeatures::FlushICache(buffer, actual_size);
231 base::OS::ProtectCode(buffer, actual_size);
232 return FUNCTION_CAST<MemCopyUint8Function>(buffer);
237 // Convert 8 to 16. The number of character to copy must be at least 8.
238 MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
239 MemCopyUint16Uint8Function stub) {
240 #if defined(USE_SIMULATOR)
243 if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
246 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
247 if (buffer == NULL) return stub;
249 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
254 if (CpuFeatures::IsSupported(NEON)) {
258 __ bic(temp, chars, Operand(0x7));
259 __ sub(chars, chars, Operand(temp));
260 __ add(temp, dest, Operand(temp, LSL, 1));
263 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src, PostIndex));
264 __ vmovl(NeonU8, q0, d0);
265 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest, PostIndex));
269 // Do a last copy which will overlap with the previous copy (1 to 8 bytes).
270 __ rsb(chars, chars, Operand(8));
271 __ sub(src, src, Operand(chars));
272 __ sub(dest, dest, Operand(chars, LSL, 1));
273 __ vld1(Neon8, NeonListOperand(d0), NeonMemOperand(src));
274 __ vmovl(NeonU8, q0, d0);
275 __ vst1(Neon16, NeonListOperand(d0, 2), NeonMemOperand(dest));
286 __ bic(temp2, chars, Operand(0x3));
287 __ add(temp2, dest, Operand(temp2, LSL, 1));
290 __ ldr(temp1, MemOperand(src, 4, PostIndex));
291 __ uxtb16(temp3, Operand(temp1, ROR, 0));
292 __ uxtb16(temp4, Operand(temp1, ROR, 8));
293 __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
294 __ str(temp1, MemOperand(dest));
295 __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
296 __ str(temp1, MemOperand(dest, 4));
297 __ add(dest, dest, Operand(8));
301 __ mov(chars, Operand(chars, LSL, 31), SetCC); // bit0 => ne, bit1 => cs
303 __ ldrh(temp1, MemOperand(src, 2, PostIndex));
304 __ uxtb(temp3, Operand(temp1, ROR, 8));
305 __ mov(temp3, Operand(temp3, LSL, 16));
306 __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
307 __ str(temp3, MemOperand(dest, 4, PostIndex));
309 __ ldrb(temp1, MemOperand(src), ne);
310 __ strh(temp1, MemOperand(dest), ne);
317 CpuFeatures::FlushICache(buffer, actual_size);
318 base::OS::ProtectCode(buffer, actual_size);
320 return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
325 UnaryMathFunction CreateSqrtFunction() {
326 #if defined(USE_SIMULATOR)
331 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
332 if (buffer == NULL) return &std::sqrt;
334 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
336 __ MovFromFloatParameter(d0);
338 __ MovToFloatResult(d0);
343 DCHECK(!RelocInfo::RequiresRelocation(desc));
345 CpuFeatures::FlushICache(buffer, actual_size);
346 base::OS::ProtectCode(buffer, actual_size);
347 return FUNCTION_CAST<UnaryMathFunction>(buffer);
354 // -------------------------------------------------------------------------
355 // Platform-specific RuntimeCallHelper functions.
357 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
358 masm->EnterFrame(StackFrame::INTERNAL);
359 DCHECK(!masm->has_frame());
360 masm->set_has_frame(true);
364 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
365 masm->LeaveFrame(StackFrame::INTERNAL);
366 DCHECK(masm->has_frame());
367 masm->set_has_frame(false);
371 // -------------------------------------------------------------------------
374 #define __ ACCESS_MASM(masm)
376 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
377 MacroAssembler* masm,
382 AllocationSiteMode mode,
383 Label* allocation_memento_found) {
384 Register scratch_elements = r4;
385 DCHECK(!AreAliased(receiver, key, value, target_map,
388 if (mode == TRACK_ALLOCATION_SITE) {
389 DCHECK(allocation_memento_found != NULL);
390 __ JumpIfJSArrayHasAllocationMemento(
391 receiver, scratch_elements, allocation_memento_found);
394 // Set transitioned map.
395 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
396 __ RecordWriteField(receiver,
397 HeapObject::kMapOffset,
407 void ElementsTransitionGenerator::GenerateSmiToDouble(
408 MacroAssembler* masm,
413 AllocationSiteMode mode,
415 // Register lr contains the return address.
416 Label loop, entry, convert_hole, gc_required, only_change_map, done;
417 Register elements = r4;
418 Register length = r5;
420 Register array_end = array;
422 // target_map parameter can be clobbered.
423 Register scratch1 = target_map;
424 Register scratch2 = r9;
426 // Verify input registers don't conflict with locals.
427 DCHECK(!AreAliased(receiver, key, value, target_map,
428 elements, length, array, scratch2));
430 if (mode == TRACK_ALLOCATION_SITE) {
431 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
434 // Check for empty arrays, which only require a map transition and no changes
435 // to the backing store.
436 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
437 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
438 __ b(eq, &only_change_map);
441 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
442 // length: number of elements (smi-tagged)
444 // Allocate new FixedDoubleArray.
445 // Use lr as a temporary register.
446 __ mov(lr, Operand(length, LSL, 2));
447 __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
448 __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
449 // array: destination FixedDoubleArray, not tagged as heap object.
450 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
451 // r4: source FixedArray.
453 // Set destination FixedDoubleArray's length and map.
454 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
455 __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
456 // Update receiver's map.
457 __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
459 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
460 __ RecordWriteField(receiver,
461 HeapObject::kMapOffset,
468 // Replace receiver's backing store with newly created FixedDoubleArray.
469 __ add(scratch1, array, Operand(kHeapObjectTag));
470 __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
471 __ RecordWriteField(receiver,
472 JSObject::kElementsOffset,
480 // Prepare for conversion loop.
481 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
482 __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
483 __ add(array_end, scratch2, Operand(length, LSL, 2));
485 // Repurpose registers no longer in use.
486 Register hole_lower = elements;
487 Register hole_upper = length;
489 __ mov(hole_lower, Operand(kHoleNanLower32));
490 __ mov(hole_upper, Operand(kHoleNanUpper32));
491 // scratch1: begin of source FixedArray element fields, not tagged
492 // hole_lower: kHoleNanLower32
493 // hole_upper: kHoleNanUpper32
494 // array_end: end of destination FixedDoubleArray, not tagged
495 // scratch2: begin of FixedDoubleArray element fields, not tagged
499 __ bind(&only_change_map);
500 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
501 __ RecordWriteField(receiver,
502 HeapObject::kMapOffset,
511 // Call into runtime if GC is required.
512 __ bind(&gc_required);
516 // Convert and copy elements.
518 __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
519 // lr: current element
520 __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
522 // Normal smi, convert to double and store.
524 __ vcvt_f64_s32(d0, s0);
525 __ vstr(d0, scratch2, 0);
526 __ add(scratch2, scratch2, Operand(8));
529 // Hole found, store the-hole NaN.
530 __ bind(&convert_hole);
531 if (FLAG_debug_code) {
532 // Restore a "smi-untagged" heap object.
534 __ orr(lr, lr, Operand(1));
535 __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
536 __ Assert(eq, kObjectFoundInSmiOnlyArray);
538 __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
541 __ cmp(scratch2, array_end);
549 void ElementsTransitionGenerator::GenerateDoubleToObject(
550 MacroAssembler* masm,
555 AllocationSiteMode mode,
557 // Register lr contains the return address.
558 Label entry, loop, convert_hole, gc_required, only_change_map;
559 Register elements = r4;
561 Register length = r5;
562 Register scratch = r9;
564 // Verify input registers don't conflict with locals.
565 DCHECK(!AreAliased(receiver, key, value, target_map,
566 elements, array, length, scratch));
568 if (mode == TRACK_ALLOCATION_SITE) {
569 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
572 // Check for empty arrays, which only require a map transition and no changes
573 // to the backing store.
574 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
575 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
576 __ b(eq, &only_change_map);
579 __ Push(target_map, receiver, key, value);
580 __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
581 // elements: source FixedDoubleArray
582 // length: number of elements (smi-tagged)
584 // Allocate new FixedArray.
585 // Re-use value and target_map registers, as they have been saved on the
587 Register array_size = value;
588 Register allocate_scratch = target_map;
589 __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
590 __ add(array_size, array_size, Operand(length, LSL, 1));
591 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
592 NO_ALLOCATION_FLAGS);
593 // array: destination FixedArray, not tagged as heap object
594 // Set destination FixedDoubleArray's length and map.
595 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
596 __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
597 __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
599 // Prepare for conversion loop.
600 Register src_elements = elements;
601 Register dst_elements = target_map;
602 Register dst_end = length;
603 Register heap_number_map = scratch;
604 __ add(src_elements, elements,
605 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
606 __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
607 __ add(array, array, Operand(kHeapObjectTag));
608 __ add(dst_end, dst_elements, Operand(length, LSL, 1));
609 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
610 // Using offsetted addresses in src_elements to fully take advantage of
612 // dst_elements: begin of destination FixedArray element fields, not tagged
613 // src_elements: begin of source FixedDoubleArray element fields,
615 // dst_end: end of destination FixedArray, not tagged
616 // array: destination FixedArray
617 // heap_number_map: heap number map
620 // Call into runtime if GC is required.
621 __ bind(&gc_required);
622 __ Pop(target_map, receiver, key, value);
627 Register upper_bits = key;
628 __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
629 // upper_bits: current element's upper 32 bit
630 // src_elements: address of next element's upper 32 bit
631 __ cmp(upper_bits, Operand(kHoleNanUpper32));
632 __ b(eq, &convert_hole);
634 // Non-hole double, copy value into a heap number.
635 Register heap_number = receiver;
636 Register scratch2 = value;
637 __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
639 // heap_number: new heap number
640 __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
641 __ Strd(scratch2, upper_bits,
642 FieldMemOperand(heap_number, HeapNumber::kValueOffset));
643 __ mov(scratch2, dst_elements);
644 __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
645 __ RecordWrite(array,
654 // Replace the-hole NaN with the-hole pointer.
655 __ bind(&convert_hole);
656 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
657 __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
660 __ cmp(dst_elements, dst_end);
663 __ Pop(target_map, receiver, key, value);
664 // Replace receiver's backing store with newly created and filled FixedArray.
665 __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
666 __ RecordWriteField(receiver,
667 JSObject::kElementsOffset,
676 __ bind(&only_change_map);
677 // Update receiver's map.
678 __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
679 __ RecordWriteField(receiver,
680 HeapObject::kMapOffset,
690 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
694 Label* call_runtime) {
695 // Fetch the instance type of the receiver into result register.
696 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
697 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
699 // We need special handling for indirect strings.
700 Label check_sequential;
701 __ tst(result, Operand(kIsIndirectStringMask));
702 __ b(eq, &check_sequential);
704 // Dispatch on the indirect string shape: slice or cons.
706 __ tst(result, Operand(kSlicedNotConsMask));
707 __ b(eq, &cons_string);
710 Label indirect_string_loaded;
711 __ ldr(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
712 __ ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
713 __ add(index, index, Operand::SmiUntag(result));
714 __ jmp(&indirect_string_loaded);
716 // Handle cons strings.
717 // Check whether the right hand side is the empty string (i.e. if
718 // this is really a flat string in a cons string). If that is not
719 // the case we would rather go to the runtime system now to flatten
721 __ bind(&cons_string);
722 __ ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
723 __ CompareRoot(result, Heap::kempty_stringRootIndex);
724 __ b(ne, call_runtime);
725 // Get the first of the two strings and load its instance type.
726 __ ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
728 __ bind(&indirect_string_loaded);
729 __ ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
730 __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
732 // Distinguish sequential and external strings. Only these two string
733 // representations can reach here (slices and flat cons strings have been
734 // reduced to the underlying sequential or external string).
735 Label external_string, check_encoding;
736 __ bind(&check_sequential);
737 STATIC_ASSERT(kSeqStringTag == 0);
738 __ tst(result, Operand(kStringRepresentationMask));
739 __ b(ne, &external_string);
741 // Prepare sequential strings
742 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
745 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
746 __ jmp(&check_encoding);
748 // Handle external strings.
749 __ bind(&external_string);
750 if (FLAG_debug_code) {
751 // Assert that we do not have a cons or slice (indirect strings) here.
752 // Sequential strings have already been ruled out.
753 __ tst(result, Operand(kIsIndirectStringMask));
754 __ Assert(eq, kExternalStringExpectedButNotFound);
756 // Rule out short external strings.
757 STATIC_ASSERT(kShortExternalStringTag != 0);
758 __ tst(result, Operand(kShortExternalStringMask));
759 __ b(ne, call_runtime);
760 __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
763 __ bind(&check_encoding);
764 STATIC_ASSERT(kTwoByteStringTag == 0);
765 __ tst(result, Operand(kStringEncodingMask));
768 __ ldrh(result, MemOperand(string, index, LSL, 1));
772 __ ldrb(result, MemOperand(string, index));
777 static MemOperand ExpConstant(int index, Register base) {
778 return MemOperand(base, index * kDoubleSize);
782 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
784 DwVfpRegister result,
785 DwVfpRegister double_scratch1,
786 DwVfpRegister double_scratch2,
790 DCHECK(!input.is(result));
791 DCHECK(!input.is(double_scratch1));
792 DCHECK(!input.is(double_scratch2));
793 DCHECK(!result.is(double_scratch1));
794 DCHECK(!result.is(double_scratch2));
795 DCHECK(!double_scratch1.is(double_scratch2));
796 DCHECK(!temp1.is(temp2));
797 DCHECK(!temp1.is(temp3));
798 DCHECK(!temp2.is(temp3));
799 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
800 DCHECK(!masm->serializer_enabled()); // External references not serializable.
802 Label zero, infinity, done;
804 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
806 __ vldr(double_scratch1, ExpConstant(0, temp3));
807 __ VFPCompareAndSetFlags(double_scratch1, input);
810 __ vldr(double_scratch2, ExpConstant(1, temp3));
811 __ VFPCompareAndSetFlags(input, double_scratch2);
814 __ vldr(double_scratch1, ExpConstant(3, temp3));
815 __ vldr(result, ExpConstant(4, temp3));
816 __ vmul(double_scratch1, double_scratch1, input);
817 __ vadd(double_scratch1, double_scratch1, result);
818 __ VmovLow(temp2, double_scratch1);
819 __ vsub(double_scratch1, double_scratch1, result);
820 __ vldr(result, ExpConstant(6, temp3));
821 __ vldr(double_scratch2, ExpConstant(5, temp3));
822 __ vmul(double_scratch1, double_scratch1, double_scratch2);
823 __ vsub(double_scratch1, double_scratch1, input);
824 __ vsub(result, result, double_scratch1);
825 __ vmul(double_scratch2, double_scratch1, double_scratch1);
826 __ vmul(result, result, double_scratch2);
827 __ vldr(double_scratch2, ExpConstant(7, temp3));
828 __ vmul(result, result, double_scratch2);
829 __ vsub(result, result, double_scratch1);
830 // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
831 DCHECK(*reinterpret_cast<double*>
832 (ExternalReference::math_exp_constants(8).address()) == 1);
833 __ vmov(double_scratch2, 1);
834 __ vadd(result, result, double_scratch2);
835 __ mov(temp1, Operand(temp2, LSR, 11));
836 __ Ubfx(temp2, temp2, 0, 11);
837 __ add(temp1, temp1, Operand(0x3ff));
839 // Must not call ExpConstant() after overwriting temp3!
840 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
841 __ add(temp3, temp3, Operand(temp2, LSL, 3));
842 __ ldm(ia, temp3, temp2.bit() | temp3.bit());
843 // The first word is loaded is the lower number register.
844 if (temp2.code() < temp3.code()) {
845 __ orr(temp1, temp3, Operand(temp1, LSL, 20));
846 __ vmov(double_scratch1, temp2, temp1);
848 __ orr(temp1, temp2, Operand(temp1, LSL, 20));
849 __ vmov(double_scratch1, temp3, temp1);
851 __ vmul(result, result, double_scratch1);
855 __ vmov(result, kDoubleRegZero);
859 __ vldr(result, ExpConstant(2, temp3));
867 // add(r0, pc, Operand(-8))
868 static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
871 CodeAgingHelper::CodeAgingHelper() {
872 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
873 // Since patcher is a large object, allocate it dynamically when needed,
874 // to avoid overloading the stack in stress conditions.
875 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
876 // the process, before ARM simulator ICache is setup.
877 SmartPointer<CodePatcher> patcher(
878 new CodePatcher(young_sequence_.start(),
879 young_sequence_.length() / Assembler::kInstrSize,
880 CodePatcher::DONT_FLUSH));
881 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
882 patcher->masm()->PushFixedFrame(r1);
883 patcher->masm()->nop(ip.code());
884 patcher->masm()->add(
885 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
890 bool CodeAgingHelper::IsOld(byte* candidate) const {
891 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
896 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
897 bool result = isolate->code_aging_helper()->IsYoung(sequence);
898 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
903 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
904 MarkingParity* parity) {
905 if (IsYoungSequence(isolate, sequence)) {
906 *age = kNoAgeCodeAge;
907 *parity = NO_MARKING_PARITY;
909 Address target_address = Memory::Address_at(
910 sequence + (kNoCodeAgeSequenceLength - Assembler::kInstrSize));
911 Code* stub = GetCodeFromTargetAddress(target_address);
912 GetCodeAgeAndParity(stub, age, parity);
917 void Code::PatchPlatformCodeAge(Isolate* isolate,
920 MarkingParity parity) {
921 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
922 if (age == kNoAgeCodeAge) {
923 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
924 CpuFeatures::FlushICache(sequence, young_length);
926 Code* stub = GetCodeAgeStub(isolate, age, parity);
927 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
928 patcher.masm()->add(r0, pc, Operand(-8));
929 patcher.masm()->ldr(pc, MemOperand(pc, -4));
930 patcher.masm()->emit_code_stub_address(stub);
935 } } // namespace v8::internal
937 #endif // V8_TARGET_ARCH_ARM