1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/ppc/simulator-ppc.h"
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_ppc_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())
24 ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
29 UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
33 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34 if (buffer == NULL) return &std::exp;
35 ExternalReference::InitializeMathExpData();
37 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
40 DoubleRegister input = d1;
41 DoubleRegister result = d2;
42 DoubleRegister double_scratch1 = d3;
43 DoubleRegister double_scratch2 = d4;
49 __ function_descriptor();
51 __ Push(temp3, temp2, temp1);
52 MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
53 double_scratch2, temp1, temp2, temp3);
54 __ Pop(temp3, temp2, temp1);
61 #if !ABI_USES_FUNCTION_DESCRIPTORS
62 DCHECK(!RelocInfo::RequiresRelocation(desc));
65 CpuFeatures::FlushICache(buffer, actual_size);
66 base::OS::ProtectCode(buffer, actual_size);
68 #if !defined(USE_SIMULATOR)
69 return FUNCTION_CAST<UnaryMathFunction>(buffer);
71 fast_exp_ppc_machine_code = buffer;
72 return &fast_exp_simulator;
77 UnaryMathFunction CreateSqrtFunction() {
78 #if defined(USE_SIMULATOR)
83 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
84 if (buffer == NULL) return &std::sqrt;
86 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
89 __ function_descriptor();
91 __ MovFromFloatParameter(d1);
93 __ MovToFloatResult(d1);
98 #if !ABI_USES_FUNCTION_DESCRIPTORS
99 DCHECK(!RelocInfo::RequiresRelocation(desc));
102 CpuFeatures::FlushICache(buffer, actual_size);
103 base::OS::ProtectCode(buffer, actual_size);
104 return FUNCTION_CAST<UnaryMathFunction>(buffer);
111 // -------------------------------------------------------------------------
112 // Platform-specific RuntimeCallHelper functions.
114 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
115 masm->EnterFrame(StackFrame::INTERNAL);
116 DCHECK(!masm->has_frame());
117 masm->set_has_frame(true);
121 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
122 masm->LeaveFrame(StackFrame::INTERNAL);
123 DCHECK(masm->has_frame());
124 masm->set_has_frame(false);
128 // -------------------------------------------------------------------------
131 #define __ ACCESS_MASM(masm)
133 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
134 MacroAssembler* masm, Register receiver, Register key, Register value,
135 Register target_map, AllocationSiteMode mode,
136 Label* allocation_memento_found) {
137 Register scratch_elements = r7;
138 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
140 if (mode == TRACK_ALLOCATION_SITE) {
141 DCHECK(allocation_memento_found != NULL);
142 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
143 allocation_memento_found);
146 // Set transitioned map.
147 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
148 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
149 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
154 void ElementsTransitionGenerator::GenerateSmiToDouble(
155 MacroAssembler* masm, Register receiver, Register key, Register value,
156 Register target_map, AllocationSiteMode mode, Label* fail) {
157 // lr contains the return address
158 Label loop, entry, convert_hole, only_change_map, done;
159 Register elements = r7;
160 Register length = r8;
162 Register array_end = array;
164 // target_map parameter can be clobbered.
165 Register scratch1 = target_map;
166 Register scratch2 = r10;
167 Register scratch3 = r11;
168 Register scratch4 = r14;
170 // Verify input registers don't conflict with locals.
171 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
174 if (mode == TRACK_ALLOCATION_SITE) {
175 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
178 // Check for empty arrays, which only require a map transition and no changes
179 // to the backing store.
180 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
181 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
182 __ beq(&only_change_map);
184 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
185 // length: number of elements (smi-tagged)
187 // Allocate new FixedDoubleArray.
188 __ SmiToDoubleArrayOffset(scratch3, length);
189 __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
190 __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
191 // array: destination FixedDoubleArray, not tagged as heap object.
192 // elements: source FixedArray.
194 // Set destination FixedDoubleArray's length and map.
195 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
196 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
197 // Update receiver's map.
198 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
200 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
201 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
202 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
204 // Replace receiver's backing store with newly created FixedDoubleArray.
205 __ addi(scratch1, array, Operand(kHeapObjectTag));
206 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
207 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
208 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
211 // Prepare for conversion loop.
212 __ addi(scratch1, elements,
213 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
214 __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
215 __ SmiToDoubleArrayOffset(array_end, length);
216 __ add(array_end, scratch2, array_end);
217 // Repurpose registers no longer in use.
218 #if V8_TARGET_ARCH_PPC64
219 Register hole_int64 = elements;
220 __ mov(hole_int64, Operand(kHoleNanInt64));
222 Register hole_lower = elements;
223 Register hole_upper = length;
224 __ mov(hole_lower, Operand(kHoleNanLower32));
225 __ mov(hole_upper, Operand(kHoleNanUpper32));
227 // scratch1: begin of source FixedArray element fields, not tagged
228 // hole_lower: kHoleNanLower32 OR hol_int64
229 // hole_upper: kHoleNanUpper32
230 // array_end: end of destination FixedDoubleArray, not tagged
231 // scratch2: begin of FixedDoubleArray element fields, not tagged
235 __ bind(&only_change_map);
236 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
237 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
238 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
242 // Convert and copy elements.
244 __ LoadP(scratch3, MemOperand(scratch1));
245 __ addi(scratch1, scratch1, Operand(kPointerSize));
246 // scratch3: current element
247 __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
249 // Normal smi, convert to double and store.
250 __ ConvertIntToDouble(scratch3, d0);
251 __ stfd(d0, MemOperand(scratch2, 0));
252 __ addi(scratch2, scratch2, Operand(8));
255 // Hole found, store the-hole NaN.
256 __ bind(&convert_hole);
257 if (FLAG_debug_code) {
258 __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
259 __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
260 __ Assert(eq, kObjectFoundInSmiOnlyArray);
262 #if V8_TARGET_ARCH_PPC64
263 __ std(hole_int64, MemOperand(scratch2, 0));
265 __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
266 __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
268 __ addi(scratch2, scratch2, Operand(8));
271 __ cmp(scratch2, array_end);
278 void ElementsTransitionGenerator::GenerateDoubleToObject(
279 MacroAssembler* masm, Register receiver, Register key, Register value,
280 Register target_map, AllocationSiteMode mode, Label* fail) {
281 // Register lr contains the return address.
282 Label loop, convert_hole, gc_required, only_change_map;
283 Register elements = r7;
285 Register length = r8;
286 Register scratch = r10;
287 Register scratch3 = r11;
288 Register hole_value = r14;
290 // Verify input registers don't conflict with locals.
291 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
294 if (mode == TRACK_ALLOCATION_SITE) {
295 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
298 // Check for empty arrays, which only require a map transition and no changes
299 // to the backing store.
300 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
301 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
302 __ beq(&only_change_map);
304 __ Push(target_map, receiver, key, value);
305 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
306 // elements: source FixedDoubleArray
307 // length: number of elements (smi-tagged)
309 // Allocate new FixedArray.
310 // Re-use value and target_map registers, as they have been saved on the
312 Register array_size = value;
313 Register allocate_scratch = target_map;
314 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
315 __ SmiToPtrArrayOffset(r0, length);
316 __ add(array_size, array_size, r0);
317 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
318 NO_ALLOCATION_FLAGS);
319 // array: destination FixedArray, not tagged as heap object
320 // Set destination FixedDoubleArray's length and map.
321 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
322 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
323 __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
324 __ addi(array, array, Operand(kHeapObjectTag));
326 // Prepare for conversion loop.
327 Register src_elements = elements;
328 Register dst_elements = target_map;
329 Register dst_end = length;
330 Register heap_number_map = scratch;
331 __ addi(src_elements, elements,
332 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
333 __ SmiToPtrArrayOffset(length, length);
334 __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
336 Label initialization_loop, loop_done;
337 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
338 __ beq(&loop_done, cr0);
340 // Allocating heap numbers in the loop below can fail and cause a jump to
341 // gc_required. We can't leave a partly initialized FixedArray behind,
342 // so pessimistically fill it with holes now.
344 __ addi(dst_elements, array,
345 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
346 __ bind(&initialization_loop);
347 __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
348 __ bdnz(&initialization_loop);
350 __ addi(dst_elements, array,
351 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
352 __ add(dst_end, dst_elements, length);
353 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
354 // Using offsetted addresses in src_elements to fully take advantage of
356 // dst_elements: begin of destination FixedArray element fields, not tagged
357 // src_elements: begin of source FixedDoubleArray element fields,
359 // dst_end: end of destination FixedArray, not tagged
360 // array: destination FixedArray
361 // hole_value: the-hole pointer
362 // heap_number_map: heap number map
365 // Call into runtime if GC is required.
366 __ bind(&gc_required);
367 __ Pop(target_map, receiver, key, value);
370 // Replace the-hole NaN with the-hole pointer.
371 __ bind(&convert_hole);
372 __ StoreP(hole_value, MemOperand(dst_elements));
373 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
374 __ cmpl(dst_elements, dst_end);
378 Register upper_bits = key;
379 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
380 __ addi(src_elements, src_elements, Operand(kDoubleSize));
381 // upper_bits: current element's upper 32 bit
382 // src_elements: address of next element's upper 32 bit
383 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
384 __ beq(&convert_hole);
386 // Non-hole double, copy value into a heap number.
387 Register heap_number = receiver;
388 Register scratch2 = value;
389 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
391 // heap_number: new heap number
392 #if V8_TARGET_ARCH_PPC64
393 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
394 // subtract tag for std
395 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
396 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
399 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
401 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
402 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
403 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
405 __ mr(scratch2, dst_elements);
406 __ StoreP(heap_number, MemOperand(dst_elements));
407 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
408 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
409 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
410 __ cmpl(dst_elements, dst_end);
414 __ Pop(target_map, receiver, key, value);
415 // Replace receiver's backing store with newly created and filled FixedArray.
416 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
417 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
418 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
421 __ bind(&only_change_map);
422 // Update receiver's map.
423 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
424 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
425 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
430 // assume ip can be used as a scratch register below
431 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
432 Register index, Register result,
433 Label* call_runtime) {
434 // Fetch the instance type of the receiver into result register.
435 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
436 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
438 // We need special handling for indirect strings.
439 Label check_sequential;
440 __ andi(r0, result, Operand(kIsIndirectStringMask));
441 __ beq(&check_sequential, cr0);
443 // Dispatch on the indirect string shape: slice or cons.
445 __ mov(ip, Operand(kSlicedNotConsMask));
446 __ and_(r0, result, ip, SetRC);
447 __ beq(&cons_string, cr0);
450 Label indirect_string_loaded;
451 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
452 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
453 __ SmiUntag(ip, result);
454 __ add(index, index, ip);
455 __ b(&indirect_string_loaded);
457 // Handle cons strings.
458 // Check whether the right hand side is the empty string (i.e. if
459 // this is really a flat string in a cons string). If that is not
460 // the case we would rather go to the runtime system now to flatten
462 __ bind(&cons_string);
463 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
464 __ CompareRoot(result, Heap::kempty_stringRootIndex);
465 __ bne(call_runtime);
466 // Get the first of the two strings and load its instance type.
467 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
469 __ bind(&indirect_string_loaded);
470 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
471 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
473 // Distinguish sequential and external strings. Only these two string
474 // representations can reach here (slices and flat cons strings have been
475 // reduced to the underlying sequential or external string).
476 Label external_string, check_encoding;
477 __ bind(&check_sequential);
478 STATIC_ASSERT(kSeqStringTag == 0);
479 __ andi(r0, result, Operand(kStringRepresentationMask));
480 __ bne(&external_string, cr0);
482 // Prepare sequential strings
483 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
484 __ addi(string, string,
485 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
486 __ b(&check_encoding);
488 // Handle external strings.
489 __ bind(&external_string);
490 if (FLAG_debug_code) {
491 // Assert that we do not have a cons or slice (indirect strings) here.
492 // Sequential strings have already been ruled out.
493 __ andi(r0, result, Operand(kIsIndirectStringMask));
494 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
496 // Rule out short external strings.
497 STATIC_ASSERT(kShortExternalStringTag != 0);
498 __ andi(r0, result, Operand(kShortExternalStringMask));
499 __ bne(call_runtime, cr0);
501 FieldMemOperand(string, ExternalString::kResourceDataOffset));
503 Label one_byte, done;
504 __ bind(&check_encoding);
505 STATIC_ASSERT(kTwoByteStringTag == 0);
506 __ andi(r0, result, Operand(kStringEncodingMask));
507 __ bne(&one_byte, cr0);
509 __ ShiftLeftImm(result, index, Operand(1));
510 __ lhzx(result, MemOperand(string, result));
514 __ lbzx(result, MemOperand(string, index));
519 static MemOperand ExpConstant(int index, Register base) {
520 return MemOperand(base, index * kDoubleSize);
524 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
525 DoubleRegister result,
526 DoubleRegister double_scratch1,
527 DoubleRegister double_scratch2,
528 Register temp1, Register temp2,
530 DCHECK(!input.is(result));
531 DCHECK(!input.is(double_scratch1));
532 DCHECK(!input.is(double_scratch2));
533 DCHECK(!result.is(double_scratch1));
534 DCHECK(!result.is(double_scratch2));
535 DCHECK(!double_scratch1.is(double_scratch2));
536 DCHECK(!temp1.is(temp2));
537 DCHECK(!temp1.is(temp3));
538 DCHECK(!temp2.is(temp3));
539 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
540 DCHECK(!masm->serializer_enabled()); // External references not serializable.
542 Label zero, infinity, done;
544 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
546 __ lfd(double_scratch1, ExpConstant(0, temp3));
547 __ fcmpu(double_scratch1, input);
548 __ fmr(result, input);
549 __ bunordered(&done);
552 __ lfd(double_scratch2, ExpConstant(1, temp3));
553 __ fcmpu(input, double_scratch2);
556 __ lfd(double_scratch1, ExpConstant(3, temp3));
557 __ lfd(result, ExpConstant(4, temp3));
558 __ fmul(double_scratch1, double_scratch1, input);
559 __ fadd(double_scratch1, double_scratch1, result);
560 __ MovDoubleLowToInt(temp2, double_scratch1);
561 __ fsub(double_scratch1, double_scratch1, result);
562 __ lfd(result, ExpConstant(6, temp3));
563 __ lfd(double_scratch2, ExpConstant(5, temp3));
564 __ fmul(double_scratch1, double_scratch1, double_scratch2);
565 __ fsub(double_scratch1, double_scratch1, input);
566 __ fsub(result, result, double_scratch1);
567 __ fmul(double_scratch2, double_scratch1, double_scratch1);
568 __ fmul(result, result, double_scratch2);
569 __ lfd(double_scratch2, ExpConstant(7, temp3));
570 __ fmul(result, result, double_scratch2);
571 __ fsub(result, result, double_scratch1);
572 __ lfd(double_scratch2, ExpConstant(8, temp3));
573 __ fadd(result, result, double_scratch2);
574 __ srwi(temp1, temp2, Operand(11));
575 __ andi(temp2, temp2, Operand(0x7ff));
576 __ addi(temp1, temp1, Operand(0x3ff));
578 // Must not call ExpConstant() after overwriting temp3!
579 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
580 __ slwi(temp2, temp2, Operand(3));
581 #if V8_TARGET_ARCH_PPC64
582 __ ldx(temp2, MemOperand(temp3, temp2));
583 __ sldi(temp1, temp1, Operand(52));
584 __ orx(temp2, temp1, temp2);
585 __ MovInt64ToDouble(double_scratch1, temp2);
587 __ add(ip, temp3, temp2);
588 __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
589 __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
590 __ slwi(temp1, temp1, Operand(20));
591 __ orx(temp3, temp1, temp3);
592 __ MovInt64ToDouble(double_scratch1, temp3, temp2);
595 __ fmul(result, result, double_scratch1);
599 __ fmr(result, kDoubleRegZero);
603 __ lfd(result, ExpConstant(2, temp3));
610 CodeAgingHelper::CodeAgingHelper() {
611 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
612 // Since patcher is a large object, allocate it dynamically when needed,
613 // to avoid overloading the stack in stress conditions.
614 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
615 // the process, before ARM simulator ICache is setup.
616 SmartPointer<CodePatcher> patcher(new CodePatcher(
617 young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
618 CodePatcher::DONT_FLUSH));
619 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
620 patcher->masm()->PushFixedFrame(r4);
621 patcher->masm()->addi(fp, sp,
622 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
623 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
624 patcher->masm()->nop();
630 bool CodeAgingHelper::IsOld(byte* candidate) const {
631 return Assembler::IsNop(Assembler::instr_at(candidate));
636 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
637 bool result = isolate->code_aging_helper()->IsYoung(sequence);
638 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
643 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
644 MarkingParity* parity) {
645 if (IsYoungSequence(isolate, sequence)) {
646 *age = kNoAgeCodeAge;
647 *parity = NO_MARKING_PARITY;
649 ConstantPoolArray* constant_pool = NULL;
650 Address target_address = Assembler::target_address_at(
651 sequence + kCodeAgingTargetDelta, constant_pool);
652 Code* stub = GetCodeFromTargetAddress(target_address);
653 GetCodeAgeAndParity(stub, age, parity);
658 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
659 MarkingParity parity) {
660 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
661 if (age == kNoAgeCodeAge) {
662 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
663 CpuFeatures::FlushICache(sequence, young_length);
666 Code* stub = GetCodeAgeStub(isolate, age, parity);
667 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
668 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
669 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
670 // Don't use Call -- we need to preserve ip and lr.
671 // GenerateMakeCodeYoungAgainCommon for the stub code.
672 patcher.masm()->nop(); // marker to detect sequence (see IsOld)
673 patcher.masm()->mov(r3, Operand(target));
674 patcher.masm()->Jump(r3);
675 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
676 patcher.masm()->nop();
681 } // namespace v8::internal
683 #endif // V8_TARGET_ARCH_PPC