// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
#include "v8.h"
#endif
has_frame_(false),
use_real_aborts_(true),
- sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
+ sp_(jssp),
+ tmp_list_(DefaultTmpList()),
+ fptmp_list_(DefaultFPTmpList()) {
if (isolate() != NULL) {
code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
isolate());
}
+CPURegList MacroAssembler::DefaultTmpList() {
+ return CPURegList(ip0, ip1);
+}
+
+
+CPURegList MacroAssembler::DefaultFPTmpList() {
+ return CPURegList(fp_scratch1, fp_scratch2);
+}
+
+
void MacroAssembler::LogicalMacro(const Register& rd,
const Register& rn,
const Operand& operand,
LogicalOp op) {
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
Logical(rd, rn, temp, op);
UseScratchRegisterScope temps(this);
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
LoadRelocated(dst, operand);
} else if (operand.IsImmediate()) {
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
ASSERT(allow_macro_instructions_);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
LoadRelocated(rd, operand);
mvn(rd, rd);
Condition cond,
ConditionalCompareOp op) {
ASSERT((cond != al) && (cond != nv));
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
- !operand.NeedsRelocation() && (S == LeaveFlags)) {
+ !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
// The instruction would be a nop. Avoid generating useless code.
return;
}
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
ASSERT(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation()) {
+ if (operand.NeedsRelocation(isolate())) {
Register temp = temps.AcquireX();
LoadRelocated(temp, operand);
AddSubWithCarryMacro(rd, rn, temp, S, op);
}
+void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
+ ASSERT(allow_macro_instructions_);
+ ASSERT(!rd.IsZero());
+
+ if (hint == kAdrNear) {
+ adr(rd, label);
+ return;
+ }
+
+ ASSERT(hint == kAdrFar);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+ ASSERT(!AreAliased(rd, scratch));
+
+ if (label->is_bound()) {
+ int label_offset = label->pos() - pc_offset();
+ if (Instruction::IsValidPCRelOffset(label_offset)) {
+ adr(rd, label);
+ } else {
+ ASSERT(label_offset <= 0);
+ int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
+ adr(rd, min_adr_offset);
+ Add(rd, rd, label_offset - min_adr_offset);
+ }
+ } else {
+ InstructionAccurateScope scope(
+ this, PatchingAssembler::kAdrFarPatchableNInstrs);
+ adr(rd, label);
+ for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
+ nop(ADR_FAR_NOP);
+ }
+ movz(scratch, 0);
+ add(rd, rd, scratch);
+ }
+}
+
+
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
}
-void MacroAssembler::LoadRoot(Register destination,
+void MacroAssembler::LoadRoot(CPURegister destination,
Heap::RootListIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
// without a load. Refer to the ARM back end for details.
}
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
+void MacroAssembler::ThrowIf(Condition cond, BailoutReason reason) {
Label ok;
- B(InvertCondition(cc), &ok);
+ B(InvertCondition(cond), &ok);
Throw(reason);
Bind(&ok);
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
- Call(stub->GetCode(isolate()), RelocInfo::CODE_TARGET, ast_id);
+ Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::TailCallStub(CodeStub* stub) {
- Jump(stub->GetCode(isolate()), RelocInfo::CODE_TARGET);
+ Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
}
Mov(x0, num_arguments);
Mov(x1, ExternalReference(f, isolate()));
- CEntryStub stub(1, save_doubles);
+ CEntryStub stub(isolate(), 1, save_doubles);
CallStub(&stub);
}
Label profiler_disabled;
Label end_profiler_check;
- bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
- STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
- Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+ Mov(x10, ExternalReference::is_profiling_address(isolate()));
Ldrb(w10, MemOperand(x10));
Cbz(w10, &profiler_disabled);
Mov(x3, thunk_ref);
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
- DirectCEntryStub stub;
+ DirectCEntryStub stub(isolate());
stub.GenerateCall(this, x3);
if (FLAG_log_timer_events) {
Mov(x0, num_arguments);
Mov(x1, ext);
- CEntryStub stub(1);
+ CEntryStub stub(isolate(), 1);
CallStub(&stub);
}
void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
Mov(x1, builtin);
- CEntryStub stub(1);
- Jump(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
+ CEntryStub stub(isolate(), 1);
+ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
}
}
-void MacroAssembler::TryConvertDoubleToInt(Register as_int,
- FPRegister value,
- FPRegister scratch_d,
- Label* on_successful_conversion,
- Label* on_failed_conversion) {
+void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
+ FPRegister value,
+ FPRegister scratch_d,
+ Label* on_successful_conversion,
+ Label* on_failed_conversion) {
// Convert to an int and back again, then compare with the original value.
Fcvtzs(as_int, value);
Scvtf(scratch_d, as_int);
Push(lr);
Push(double_input); // Put input on stack.
- DoubleToIStub stub(jssp,
+ DoubleToIStub stub(isolate(),
+ jssp,
result,
0,
true, // is_truncating
// If we fell through then inline version didn't succeed - call stub instead.
Push(lr);
- DoubleToIStub stub(object,
+ DoubleToIStub stub(isolate(),
+ object,
result,
HeapNumber::kValueOffset - kHeapObjectTag,
true, // is_truncating
}
-#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::DebugBreak() {
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
- CEntryStub ces(1);
+ CEntryStub ces(isolate(), 1);
ASSERT(AllowThisStubCall(&ces));
- Call(ces.GetCode(isolate()), RelocInfo::DEBUG_BREAK);
+ Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
-#endif
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
// Calculate new top and bail out if new space is exhausted.
Adds(scratch3, result, object_size);
- B(vs, gc_required);
- Cmp(scratch3, allocation_limit);
+ Ccmp(scratch3, allocation_limit, CFlag, cc);
B(hi, gc_required);
Str(scratch3, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
- Orr(result, result, kHeapObjectTag);
+ ObjectTag(result, result);
}
}
Check(eq, kUnalignedAllocationInNewSpace);
}
- B(vs, gc_required);
- Cmp(scratch3, allocation_limit);
+ Ccmp(scratch3, allocation_limit, CFlag, cc);
B(hi, gc_required);
Str(scratch3, MemOperand(top_address));
// Tag the object if requested.
if ((flags & TAG_OBJECT) != 0) {
- Orr(result, result, kHeapObjectTag);
+ ObjectTag(result, result);
}
}
Label* gc_required,
Register scratch1,
Register scratch2,
- Register heap_number_map) {
+ CPURegister value,
+ CPURegister heap_number_map) {
+ ASSERT(!value.IsValid() || value.Is64Bits());
+ UseScratchRegisterScope temps(this);
+
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
- TAG_OBJECT);
-
- // Store heap number map in the allocated object.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = scratch1;
+ NO_ALLOCATION_FLAGS);
+
+ // Prepare the heap number map.
+ if (!heap_number_map.IsValid()) {
+ // If we have a valid value register, use the same type of register to store
+ // the map so we can use STP to store both in one instruction.
+ if (value.IsValid() && value.IsFPRegister()) {
+ heap_number_map = temps.AcquireD();
+ } else {
+ heap_number_map = scratch1;
+ }
LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-}
-
+ if (emit_debug_code()) {
+ Register map;
+ if (heap_number_map.IsFPRegister()) {
+ map = scratch1;
+ Fmov(map, DoubleRegister(heap_number_map));
+ } else {
+ map = Register(heap_number_map);
+ }
+ AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+ }
-void MacroAssembler::AllocateHeapNumberWithValue(Register result,
- DoubleRegister value,
- Label* gc_required,
- Register scratch1,
- Register scratch2,
- Register heap_number_map) {
- // TODO(all): Check if it would be more efficient to use STP to store both
- // the map and the value.
- AllocateHeapNumber(result, gc_required, scratch1, scratch2, heap_number_map);
- Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+ // Store the heap number map and the value in the allocated object.
+ if (value.IsSameSizeAndType(heap_number_map)) {
+ STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
+ HeapNumber::kValueOffset);
+ Stp(heap_number_map, value, MemOperand(result, HeapObject::kMapOffset));
+ } else {
+ Str(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+ if (value.IsValid()) {
+ Str(value, MemOperand(result, HeapNumber::kValueOffset));
+ }
+ }
+ ObjectTag(result, result);
}
Bind(&store_buffer_overflow);
Push(lr);
StoreBufferOverflowStub store_buffer_overflow_stub =
- StoreBufferOverflowStub(fp_mode);
+ StoreBufferOverflowStub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub);
Pop(lr);
if (lr_status == kLRHasNotBeenSaved) {
Push(lr);
}
- RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode);
+ RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+ fp_mode);
CallStub(&stub);
if (lr_status == kLRHasNotBeenSaved) {
Pop(lr);
// We need some scratch registers for the MacroAssembler, so make sure we have
// some. This is safe here because Abort never returns.
RegList old_tmp_list = TmpList()->list();
- TmpList()->Combine(ip0);
- TmpList()->Combine(ip1);
+ TmpList()->Combine(MacroAssembler::DefaultTmpList());
if (use_real_aborts()) {
// Avoid infinite recursion; Push contains some assertions that use Abort.