// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_ARM64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "debug.h"
-#include "isolate-inl.h"
-#include "runtime.h"
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime/runtime.h"
namespace v8 {
namespace internal {
LogicalOp op) {
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
Logical(rd, rn, temp, op);
} else if (operand.IsImmediate()) {
- int64_t immediate = operand.immediate();
+ int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = ~immediate;
- if (rd.Is32Bits()) {
- immediate &= kWRegMask;
- }
}
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
+ ((immediate >> kWRegSizeInBits) == -1));
+ immediate &= kWRegMask;
+ }
+
+ DCHECK(rd.Is64Bits() || is_uint32(immediate));
+
// Special cases for all set or all clear immediates.
if (immediate == 0) {
switch (op) {
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, immediate);
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
- Logical(temp, rn, temp, op);
+ Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
+ AssertStackConsistency();
} else {
- Logical(rd, rn, temp, op);
+ Logical(rd, rn, imm_operand, op);
}
}
} else if (operand.IsExtendedRegister()) {
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
} else {
// The operand can be encoded in the instruction.
- ASSERT(operand.IsShiftedRegister());
+ DCHECK(operand.IsShiftedRegister());
Logical(rd, rn, operand, op);
}
}
void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ DCHECK(!rd.IsZero());
// TODO(all) extend to support more immediates.
//
// applying move-keep operations to move-zero and move-inverted initial
// values.
- unsigned reg_size = rd.SizeInBits();
- unsigned n, imm_s, imm_r;
- if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move zero instruction. Movz can't
- // write to the stack pointer.
- movz(rd, imm);
- } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move inverted instruction. Movn can't
- // write to the stack pointer.
- movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
- } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be represented in a logical orr instruction.
- LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
- } else {
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (!TryOneInstrMoveImmediate(rd, imm)) {
+ unsigned reg_size = rd.SizeInBits();
+
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
- ASSERT((reg_size % 16) == 0);
+ DCHECK((reg_size % 16) == 0);
bool first_mov_done = false;
for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
}
}
}
- ASSERT(first_mov_done);
+ DCHECK(first_mov_done);
// Move the temporary if the original destination register was the stack
// pointer.
if (rd.IsSP()) {
mov(rd, temp);
+ AssertStackConsistency();
}
}
}
void MacroAssembler::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
// Provide a swap register for instructions that need to write into the
// system stack pointer (and can't do this inherently).
UseScratchRegisterScope temps(this);
Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
- if (operand.NeedsRelocation(isolate())) {
- LoadRelocated(dst, operand);
+ if (operand.NeedsRelocation(this)) {
+ Ldr(dst, operand.immediate());
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(dst, operand.immediate());
+ Mov(dst, operand.ImmediateValue());
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Emit a shift instruction if moving a shifted register. This operation
// Copy the result to the system stack pointer.
if (!dst.Is(rd)) {
- ASSERT(rd.IsSP());
+ DCHECK(rd.IsSP());
Assembler::mov(rd, dst);
}
}
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
- if (operand.NeedsRelocation(isolate())) {
- LoadRelocated(rd, operand);
+ if (operand.NeedsRelocation(this)) {
+ Ldr(rd, operand.immediate());
mvn(rd, rd);
} else if (operand.IsImmediate()) {
// Call the macro assembler for generic immediates.
- Mov(rd, ~operand.immediate());
+ Mov(rd, ~operand.ImmediateValue());
} else if (operand.IsExtendedRegister()) {
// Emit two instructions for the extend case. This differs from Mov, as
unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size % 8) == 0);
+ DCHECK((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op) {
- ASSERT((cond != al) && (cond != nv));
- if (operand.NeedsRelocation(isolate())) {
+ DCHECK((cond != al) && (cond != nv));
+ if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
ConditionalCompareMacro(rn, temp, nzcv, cond, op);
} else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
- (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ (operand.IsImmediate() &&
+ IsImmConditionalCompare(operand.ImmediateValue()))) {
// The immediate can be encoded in the instruction, or the operand is an
// unshifted register: call the assembler.
ConditionalCompare(rn, operand, nzcv, cond, op);
const Register& rn,
const Operand& operand,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
if (operand.IsImmediate()) {
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
// register.
- int64_t imm = operand.immediate();
+ int64_t imm = operand.ImmediateValue();
Register zr = AppropriateZeroRegFor(rn);
if (imm == 0) {
csel(rd, rn, zr, cond);
} else {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand.immediate());
+ Mov(temp, imm);
csel(rd, rn, temp, cond);
}
} else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
}
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.SizeInBits();
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ movz(dst, imm);
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move not instruction. Movn can't write
+ // to the stack pointer.
+ movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ return true;
+ }
+ return false;
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm) {
+ int reg_size = dst.SizeInBits();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register. We
+ // insert set bits in the least-significant bits, as this creates a
+ // different immediate that may be encodable using movn or orr-immediate.
+ // If this new immediate is encodable, the set bits will be eliminated by
+ // the post shift on the following instruction.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+
+ if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ // Use the generic move operation to set up the immediate.
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
void MacroAssembler::AddSubMacro(const Register& rd,
const Register& rn,
const Operand& operand,
FlagsUpdate S,
AddSubOp op) {
if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
- !operand.NeedsRelocation(isolate()) && (S == LeaveFlags)) {
+ !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
// The instruction would be a nop. Avoid generating useless code.
return;
}
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubMacro(rd, rn, temp, S, op);
- } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
- (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ } else if ((operand.IsImmediate() &&
+ !IsImmAddSub(operand.ImmediateValue())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand);
- AddSub(rd, rn, temp, S, op);
+ if (operand.IsImmediate()) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
} else {
AddSub(rd, rn, operand, S, op);
}
const Operand& operand,
FlagsUpdate S,
AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
- if (operand.NeedsRelocation(isolate())) {
+ if (operand.NeedsRelocation(this)) {
Register temp = temps.AcquireX();
- LoadRelocated(temp, operand);
+ Ldr(temp, operand.immediate());
AddSubWithCarryMacro(rd, rn, temp, S, op);
} else if (operand.IsImmediate() ||
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Add/sub with carry (shifted register).
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
- ASSERT(is_uintn(operand.shift_amount(),
+ DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+ DCHECK(operand.shift() != ROR);
+ DCHECK(is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
} else if (operand.IsExtendedRegister()) {
// Add/sub with carry (extended register).
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
}
}
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ DCHECK(!addr.IsRegisterOffset());
+
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ DCHECK(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
void MacroAssembler::Load(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
Ldrsb(rt, addr);
} else if (r.IsInteger32()) {
Ldr(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
Ldr(rt, addr);
}
}
void MacroAssembler::Store(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
Strb(rt, addr);
} else if (r.IsInteger32()) {
Str(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
+ if (r.IsHeapObject()) {
+ AssertNotSmi(rt);
+ } else if (r.IsSmi()) {
+ AssertSmi(rt);
+ }
Str(rt, addr);
}
}
void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
if (hint == kAdrNear) {
adr(rd, label);
return;
}
- ASSERT(hint == kAdrFar);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- ASSERT(!AreAliased(rd, scratch));
-
+ DCHECK(hint == kAdrFar);
if (label->is_bound()) {
int label_offset = label->pos() - pc_offset();
if (Instruction::IsValidPCRelOffset(label_offset)) {
adr(rd, label);
} else {
- ASSERT(label_offset <= 0);
+ DCHECK(label_offset <= 0);
int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
adr(rd, min_adr_offset);
Add(rd, rd, label_offset - min_adr_offset);
}
} else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
InstructionAccurateScope scope(
this, PatchingAssembler::kAdrFarPatchableNInstrs);
adr(rd, label);
nop(ADR_FAR_NOP);
}
movz(scratch, 0);
- add(rd, rd, scratch);
}
}
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
- ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
B(static_cast<Condition>(type), label);
void MacroAssembler::B(Label* label, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK((cond != al) && (cond != nv));
Label done;
bool need_extra_instructions =
NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
if (need_extra_instructions) {
- b(&done, InvertCondition(cond));
+ b(&done, NegateCondition(cond));
B(label);
} else {
b(label, cond);
void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Cbz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Abs(const Register& rd, const Register& rm,
Label* is_not_representable,
Label* is_representable) {
- ASSERT(allow_macro_instructions_);
- ASSERT(AreSameSizeAndType(rd, rm));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(AreSameSizeAndType(rd, rm));
Cmp(rm, 1);
Cneg(rd, rm, lt);
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
PushHelper(count, size, src0, src1, src2, src3);
}
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
PushHelper(4, size, src0, src1, src2, src3);
PushHelper(count - 4, size, src4, src5, src6, src7);
}
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
- ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(dst0.IsValid());
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(dst0.IsValid());
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
- PrepareForPop(count, size);
PopHelper(count, size, dst0, dst1, dst2, dst3);
+ PopPostamble(count, size);
+}
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
+
+void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
+ int size = src0.SizeInBytes() + src1.SizeInBytes();
+
+ PushPreamble(size);
+ // Reserve room for src0 and push src1.
+ str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ // Fill the gap with src0.
+ str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
}
-void MacroAssembler::PushPopQueue::PushQueued() {
+void MacroAssembler::PushPopQueue::PushQueued(
+ PreambleDirective preamble_directive) {
if (queued_.empty()) return;
- masm_->PrepareForPush(size_);
+ if (preamble_directive == WITH_PREAMBLE) {
+ masm_->PushPreamble(size_);
+ }
int count = queued_.size();
int index = 0;
void MacroAssembler::PushPopQueue::PopQueued() {
if (queued_.empty()) return;
- masm_->PrepareForPop(size_);
-
int count = queued_.size();
int index = 0;
while (index < count) {
batch[0], batch[1], batch[2], batch[3]);
}
+ masm_->PopPostamble(size_);
queued_.clear();
}
void MacroAssembler::PushCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- PrepareForPush(registers.Count(), size);
+ PushPreamble(registers.Count(), size);
// Push up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in order
// to maintain the 16-byte alignment for csp.
void MacroAssembler::PopCPURegList(CPURegList registers) {
int size = registers.RegisterSizeInBytes();
- PrepareForPop(registers.Count(), size);
// Pop up to four registers at a time because if the current stack pointer is
// csp and reg_size is 32, registers must be pushed in blocks of four in
// order to maintain the 16-byte alignment for csp.
int count = count_before - registers.Count();
PopHelper(count, size, dst0, dst1, dst2, dst3);
}
-
- if (!csp.Is(StackPointer()) && emit_debug_code()) {
- // It is safe to leave csp where it is when unwinding the JavaScript stack,
- // but if we keep it matching StackPointer, the simulator can detect memory
- // accesses in the now-free part of the stack.
- Mov(csp, StackPointer());
- }
+ PopPostamble(registers.Count(), size);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
int size = src.SizeInBytes();
- PrepareForPush(count, size);
+ PushPreamble(count, size);
if (FLAG_optimize_for_size && count > 8) {
UseScratchRegisterScope temps(this);
PushHelper(1, size, src, NoReg, NoReg, NoReg);
count -= 1;
}
- ASSERT(count == 0);
+ DCHECK(count == 0);
}
void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
- PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
+ PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(count);
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(size == src0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(size == src0.SizeInBytes());
// When pushing multiple registers, the store order is chosen such that
// Push(a, b) is equivalent to Push(a) followed by Push(b).
switch (count) {
case 1:
- ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
break;
case 2:
- ASSERT(src2.IsNone() && src3.IsNone());
+ DCHECK(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
break;
case 3:
- ASSERT(src3.IsNone());
+ DCHECK(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
break;
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(size == dst0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(size == dst0.SizeInBytes());
// When popping multiple registers, the load order is chosen such that
// Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
switch (count) {
case 1:
- ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
break;
case 2:
- ASSERT(dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
break;
case 3:
- ASSERT(dst3.IsNone());
+ DCHECK(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
break;
}
-void MacroAssembler::PrepareForPush(Operand total_size) {
- // TODO(jbramley): This assertion generates too much code in some debug tests.
- // AssertStackConsistency();
+void MacroAssembler::PushPreamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
}
-void MacroAssembler::PrepareForPop(Operand total_size) {
- AssertStackConsistency();
+void MacroAssembler::PopPostamble(Operand total_size) {
if (csp.Is(StackPointer())) {
// If the current stack pointer is csp, then it must be aligned to 16 bytes
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.immediate() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// well, and it will be caught by hardware (or the simulator) anyway.
+ } else if (emit_debug_code()) {
+ // It is safe to leave csp where it is when unwinding the JavaScript stack,
+ // but if we keep it matching StackPointer, the simulator can detect memory
+ // accesses in the now-free part of the stack.
+ SyncSystemStackPointer();
}
}
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.immediate() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
void MacroAssembler::PokePair(const CPURegister& src1,
const CPURegister& src2,
int offset) {
- ASSERT(AreSameSizeAndType(src1, src2));
- ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(src1, src2));
+ DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
Stp(src1, src2, MemOperand(StackPointer(), offset));
}
void MacroAssembler::PeekPair(const CPURegister& dst1,
const CPURegister& dst2,
int offset) {
- ASSERT(AreSameSizeAndType(dst1, dst2));
- ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(dst1, dst2));
+ DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
}
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
void MacroAssembler::AssertStackConsistency() {
- if (emit_debug_code()) {
- if (csp.Is(StackPointer())) {
- // We can't check the alignment of csp without using a scratch register
- // (or clobbering the flags), but the processor (or simulator) will abort
- // if it is not properly aligned during a load.
+ // Avoid emitting code when !use_real_abort() since non-real aborts cause too
+ // much code to be generated.
+ if (emit_debug_code() && use_real_aborts()) {
+ if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+ // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
+ // can't check the alignment of csp without using a scratch register (or
+ // clobbering the flags), but the processor (or simulator) will abort if
+ // it is not properly aligned during a load.
ldr(xzr, MemOperand(csp, 0));
- } else if (FLAG_enable_slow_asserts) {
+ }
+ if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
Label ok;
// Check that csp <= StackPointer(), preserving all registers and NZCV.
sub(StackPointer(), csp, StackPointer());
cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
- Abort(kTheCurrentStackPointerIsBelowCsp);
+ // Avoid generating AssertStackConsistency checks for the Push in Abort.
+ { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
+ Abort(kTheCurrentStackPointerIsBelowCsp);
+ }
bind(&ok);
// Restore StackPointer().
}
+void MacroAssembler::AssertFPCRState(Register fpcr) {
+ if (emit_debug_code()) {
+ Label unexpected_mode, done;
+ UseScratchRegisterScope temps(this);
+ if (fpcr.IsNone()) {
+ fpcr = temps.AcquireX();
+ Mrs(fpcr, FPCR);
+ }
+
+ // Settings overridden by ConfiugreFPCR():
+ // - Assert that default-NaN mode is set.
+ Tbz(fpcr, DN_offset, &unexpected_mode);
+
+ // Settings left to their default values:
+ // - Assert that flush-to-zero is not set.
+ Tbnz(fpcr, FZ_offset, &unexpected_mode);
+ // - Assert that the rounding mode is nearest-with-ties-to-even.
+ STATIC_ASSERT(FPTieEven == 0);
+ Tst(fpcr, RMode_mask);
+ B(eq, &done);
+
+ Bind(&unexpected_mode);
+ Abort(kUnexpectedFPCRMode);
+
+ Bind(&done);
+ }
+}
+
+
+void MacroAssembler::ConfigureFPCR() {
+ UseScratchRegisterScope temps(this);
+ Register fpcr = temps.AcquireX();
+ Mrs(fpcr, FPCR);
+
+ // If necessary, enable default-NaN mode. The default values of the other FPCR
+ // options should be suitable, and AssertFPCRState will verify that.
+ Label no_write_required;
+ Tbnz(fpcr, DN_offset, &no_write_required);
+
+ Orr(fpcr, fpcr, DN_mask);
+ Msr(FPCR, fpcr);
+
+ Bind(&no_write_required);
+ AssertFPCRState(fpcr);
+}
+
+
+void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
+ const FPRegister& src) {
+ AssertFPCRState();
+
+ // With DN=1 and RMode=FPTieEven, subtracting 0.0 preserves all inputs except
+ // for NaNs, which become the default NaN. We use fsub rather than fadd
+ // because sub preserves -0.0 inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
+ Fsub(dst, src, fp_zero);
+}
+
+
void MacroAssembler::LoadRoot(CPURegister destination,
Heap::RootListIndex index) {
// TODO(jbramley): Most root values are constants, and can be synthesized
void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldrsw(dst, UntagSmiFieldMemOperand(map, Map::kBitField3Offset));
+ Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Map::EnumLengthBits::kMask);
}
void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
- STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
- And(dst, dst, Smi::FromInt(Map::EnumLengthBits::kMask));
+ EnumLengthUntagged(dst, map);
+ SmiTag(dst, dst);
}
Register scratch2,
Register scratch3,
Label* call_runtime) {
- ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
scratch3));
Register empty_fixed_array_value = scratch0;
Register scratch1,
Register scratch2) {
// Handler expects argument in x0.
- ASSERT(exception.Is(x0));
+ DCHECK(exception.Is(x0));
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
- ASSERT(cond == eq || cond == ne);
+ DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
And(temp, object, ExternalReference::new_space_mask(isolate()));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top stack handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
}
-void MacroAssembler::Throw(BailoutReason reason) {
- Label throw_start;
- Bind(&throw_start);
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- RecordComment("Throw message: ");
- RecordComment((msg != NULL) ? msg : "UNKNOWN");
-#endif
-
- Mov(x0, Smi::FromInt(reason));
- Push(x0);
-
- // Disable stub call restrictions to always allow calls to throw.
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- }
- // ThrowMessage should not return here.
- Unreachable();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cond, BailoutReason reason) {
- Label ok;
- B(InvertCondition(cond), &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
-void MacroAssembler::ThrowIfSmi(const Register& value, BailoutReason reason) {
- Label ok;
- JumpIfNotSmi(value, &ok);
- Throw(reason);
- Bind(&ok);
-}
-
-
-void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- ASSERT(smi.Is64Bits());
- Abs(smi, smi, slow);
-}
-
-
void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
if (emit_debug_code()) {
STATIC_ASSERT(kSmiTag == 0);
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
// Check that the number of arguments matches what the function expects.
// If f->nargs is -1, the function can accept a variable number of arguments.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- // Illegal operation: drop the stack arguments and return undefined.
- if (num_arguments > 0) {
- Drop(num_arguments);
- }
- LoadRoot(x0, Heap::kUndefinedValueRootIndex);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// Place the necessary arguments.
Mov(x0, num_arguments);
ExternalReference::handle_scope_level_address(isolate()),
next_address);
- ASSERT(function_address.is(x1) || function_address.is(x2));
+ DCHECK(function_address.is(x1) || function_address.is(x2));
Label profiler_disabled;
Label end_profiler_check;
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
ExternalReference(
- Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ Runtime::kPromoteScheduledException, isolate()), 0);
}
B(&exception_handled);
void MacroAssembler::GetBuiltinEntry(Register target,
Register function,
Builtins::JavaScript id) {
- ASSERT(!AreAliased(target, function));
+ DCHECK(!AreAliased(target, function));
GetBuiltinFunction(function, id);
// Load the code entry point from the builtins object.
Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Get the builtin entry in x2 and setup the function object in x1.
GetBuiltinEntry(x2, x1, id);
Call(x2);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(x2);
}
}
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ DCHECK(!AreAliased(string, length, scratch1, scratch2));
LoadRoot(scratch2, map_index);
SmiTag(scratch1, length);
Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// We can pass 8 integer arguments in registers. If we need to pass more than
// that, we'll need to implement support for passing them on the stack.
- ASSERT(num_of_reg_args <= 8);
+ DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
// BUILTIN_FP_CALL: double f(double)
// BUILTIN_FP_INT_CALL: double f(double, int)
if (num_of_double_args > 0) {
- ASSERT(num_of_reg_args <= 1);
- ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ DCHECK(num_of_reg_args <= 1);
+ DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
+ DCHECK(sp_alignment >= 16);
+ DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
- ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+ DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
// Align and synchronize the system stack pointer with jssp.
Bic(csp, old_stack_pointer, sp_alignment - 1);
// where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(ActivationFrameAlignment() == 16);
+ DCHECK(ActivationFrameAlignment() == 16);
Sub(temp, csp, old_stack_pointer);
// We want temp <= 0 && temp >= -12.
Cmp(temp, 0);
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
}
positions_recorder()->WriteRecordedPositions();
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- ASSERT(((imm >> 48) & 0xffff) == 0);
+ DCHECK(((imm >> 48) & 0xffff) == 0);
movz(temp, (imm >> 0) & 0xffff, 0);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
} else {
- LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
+ Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
}
Blr(temp);
#ifdef DEBUG
USE(target);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
USE(ast_id);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
}
+void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
+ SmiCheckType smi_check_type) {
+ Label on_not_heap_number;
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, &on_not_heap_number);
+ }
-
-void MacroAssembler::JumpForHeapNumber(Register object,
- Register heap_number_map,
- Label* on_heap_number,
- Label* on_not_heap_number) {
- ASSERT(on_heap_number || on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
-
- // Load the HeapNumber map if it is not passed.
- if (heap_number_map.Is(NoReg)) {
- heap_number_map = temps.AcquireX();
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- } else {
- AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- }
-
- ASSERT(!AreAliased(temp, heap_number_map));
-
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
- Cmp(temp, heap_number_map);
-
- if (on_heap_number) {
- B(eq, on_heap_number);
- }
- if (on_not_heap_number) {
- B(ne, on_not_heap_number);
- }
-}
+ JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
-
-void MacroAssembler::JumpIfHeapNumber(Register object,
- Label* on_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- on_heap_number,
- NULL);
+ Bind(&on_not_heap_number);
}
void MacroAssembler::JumpIfNotHeapNumber(Register object,
Label* on_not_heap_number,
- Register heap_number_map) {
- JumpForHeapNumber(object,
- heap_number_map,
- NULL,
- on_not_heap_number);
+ SmiCheckType smi_check_type) {
+ if (smi_check_type == DO_SMI_CHECK) {
+ JumpIfSmi(object, on_not_heap_number);
+ }
+
+ AssertNotSmi(object);
+
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
}
Register scratch2,
Register scratch3,
Label* not_found) {
- ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
Label load_result_from_cache;
JumpIfSmi(object, &is_smi);
- CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
- DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(object, not_found);
STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
}
+void MacroAssembler::JumpIfMinusZero(Register input,
+ Label* on_negative_zero) {
+ DCHECK(input.Is64Bits());
+ // Floating point value is in an integer register. Detect -0.0 by subtracting
+ // 1 (cmp), which will cause overflow.
+ Cmp(input, 1);
+ B(vs, on_negative_zero);
+}
+
+
void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
// Clamp the value to [0..255].
Cmp(input.W(), Operand(input.W(), UXTB));
Register scratch5) {
// Untag src and dst into scratch registers.
// Copy src->dst in a tight loop.
- ASSERT(!AreAliased(dst, src,
+ DCHECK(!AreAliased(dst, src,
scratch1, scratch2, scratch3, scratch4, scratch5));
- ASSERT(count >= 2);
+ DCHECK(count >= 2);
const Register& remaining = scratch3;
Mov(remaining, count / 2);
Register scratch4) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
Register scratch3) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
//
// In both cases, fields are copied in pairs if possible, and left-overs are
// handled separately.
- ASSERT(!AreAliased(dst, src));
- ASSERT(!temps.IncludesAliasOf(dst));
- ASSERT(!temps.IncludesAliasOf(src));
- ASSERT(!temps.IncludesAliasOf(xzr));
+ DCHECK(!AreAliased(dst, src));
+ DCHECK(!temps.IncludesAliasOf(dst));
+ DCHECK(!temps.IncludesAliasOf(src));
+ DCHECK(!temps.IncludesAliasOf(xzr));
if (emit_debug_code()) {
Cmp(dst, src);
UseScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireX();
Register tmp2 = temps.AcquireX();
- ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
- ASSERT(!AreAliased(src, dst, csp));
+ DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ DCHECK(!AreAliased(src, dst, csp));
if (emit_debug_code()) {
// Check copy length.
void MacroAssembler::FillFields(Register dst,
Register field_count,
Register filler) {
- ASSERT(!dst.Is(csp));
+ DCHECK(!dst.Is(csp));
UseScratchRegisterScope temps(this);
Register field_ptr = temps.AcquireX();
Register counter = temps.AcquireX();
}
-void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
- Label* failure,
- SmiCheckType smi_check) {
-
+void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
+ Register first, Register second, Register scratch1, Register scratch2,
+ Label* failure, SmiCheckType smi_check) {
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
- ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ DCHECK(smi_check == DONT_DO_SMI_CHECK);
Label not_smi;
JumpIfEitherSmi(first, second, NULL, ¬_smi);
Bind(¬_smi);
}
- // Test that both first and second are sequential ASCII strings.
+ // Test that both first and second are sequential one-byte strings.
Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
- JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
- scratch2,
- scratch1,
- scratch2,
- failure);
+ JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
+ scratch2, failure);
}
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(scratch1, second));
- ASSERT(!AreAliased(scratch1, scratch2));
- static const int kFlatAsciiStringMask =
+ DCHECK(!AreAliased(scratch1, second));
+ DCHECK(!AreAliased(scratch1, scratch2));
+ static const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
- Register scratch,
- Label* failure) {
- const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+ Register scratch,
+ Label* failure) {
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch, type, kFlatAsciiStringMask);
- Cmp(scratch, kFlatAsciiStringTag);
+ And(scratch, type, kFlatOneByteStringMask);
+ Cmp(scratch, kFlatOneByteStringTag);
B(ne, failure);
}
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
- Register first,
- Register second,
- Register scratch1,
- Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+ Register first, Register second, Register scratch1, Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(first, second, scratch1, scratch2));
- const int kFlatAsciiStringMask =
+ DCHECK(!AreAliased(first, second, scratch1, scratch2));
+ const int kFlatOneByteStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
- const int kFlatAsciiStringTag =
+ const int kFlatOneByteStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
- And(scratch1, first, kFlatAsciiStringMask);
- And(scratch2, second, kFlatAsciiStringMask);
- Cmp(scratch1, kFlatAsciiStringTag);
- Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+ And(scratch1, first, kFlatOneByteStringMask);
+ And(scratch2, second, kFlatOneByteStringMask);
+ Cmp(scratch1, kFlatOneByteStringTag);
+ Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
B(ne, failure);
}
-void MacroAssembler::JumpIfNotUniqueName(Register type,
- Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+ Label* not_unique_name) {
STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
// if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
// continue
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(x0));
- ASSERT(expected.is_immediate() || expected.reg().is(x2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+ DCHECK(actual.is_immediate() || actual.reg().is(x0));
+ DCHECK(expected.is_immediate() || expected.reg().is(x2));
+ DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
Call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(code);
}
}
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.is(x1));
+ DCHECK(function.is(x1));
Register expected_reg = x2;
Register code_reg = x3;
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.Is(x1));
+ DCHECK(function.Is(x1));
Register code_reg = x3;
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
- ASSERT(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
+ const Register old_stack_pointer = StackPointer();
+ if (csp.Is(old_stack_pointer)) {
+ // This currently only happens during compiler-unittest. If it arises
+ // during regular code generation the DoubleToI stub should be updated to
+ // cope with csp and have an extra parameter indicating which stack pointer
+ // it should use.
+ Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
+ Mov(jssp, csp);
+ SetStackPointer(jssp);
+ }
+
// If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- Push(double_input); // Put input on stack.
+ Push(lr, double_input);
DoubleToIStub stub(isolate(),
jssp,
true); // skip_fastpath
CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
- Drop(1, kDoubleSize); // Drop the double input on the stack.
- Pop(lr);
+ DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
+ Pop(xzr, lr); // xzr to drop the double input on the stack.
+
+ if (csp.Is(old_stack_pointer)) {
+ Mov(csp, jssp);
+ SetStackPointer(csp);
+ AssertStackConsistency();
+ Pop(xzr, jssp);
+ }
Bind(&done);
}
void MacroAssembler::TruncateHeapNumberToI(Register result,
Register object) {
Label done;
- ASSERT(!result.is(object));
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(!result.is(object));
+ DCHECK(jssp.Is(StackPointer()));
Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
- ASSERT(StackPointer().Is(jssp));
- UseScratchRegisterScope temps(this);
- Register temp = temps.AcquireX();
- __ Mov(temp, Smi::FromInt(StackFrame::STUB));
- // Compiled stubs don't age, and so they don't need the predictable code
- // ageing sequence.
- __ Push(lr, fp, cp, temp);
- __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+void MacroAssembler::StubPrologue() {
+ DCHECK(StackPointer().Is(jssp));
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ __ Mov(temp, Smi::FromInt(StackFrame::STUB));
+ // Compiled stubs don't age, and so they don't need the predictable code
+ // ageing sequence.
+ __ Push(lr, fp, cp, temp);
+ __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ if (code_pre_aging) {
+ Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+ __ EmitCodeAgeSequence(stub);
} else {
- if (isolate()->IsCodePreAgingActive()) {
- Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
- __ EmitCodeAgeSequence(stub);
- } else {
- __ EmitFrameSetupForCodeAgePatching();
- }
+ __ EmitFrameSetupForCodeAgePatching();
}
}
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+ bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on arm64.
+ UNREACHABLE();
+}
+
+
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(jssp, fp);
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedFP;
- ASSERT(saved_fp_regs.Count() % 2 == 0);
+ DCHECK(saved_fp_regs.Count() % 2 == 0);
int offset = ExitFrameConstants::kLastExitFrameField;
while (!saved_fp_regs.IsEmpty()) {
void MacroAssembler::EnterExitFrame(bool save_doubles,
const Register& scratch,
int extra_space) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
Mov(scratch, Operand(CodeObject()));
// Align and synchronize the system stack pointer with jssp.
AlignAndSetCSPForFrame();
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
bool restore_context) {
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
ExitFrameRestoreFPRegs();
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value != 0);
+ DCHECK(value != 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
Ldr(scratch1, MemOperand(scratch2));
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(isolate(), 1);
- ASSERT(AllowThisStubCall(&ces));
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
Push(xzr, xzr, x11, x10);
} else {
Push(fp, cp, x11, x10);
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
- ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+ DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
+ DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
- ASSERT(0 == (object_size & kObjectAlignmentMask));
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
- ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
scratch1.Is64Bits() && scratch2.Is64Bits());
// Check relative positions of allocation top and limit addresses.
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
Register scratch2,
Register scratch3,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
}
-void MacroAssembler::AllocateAsciiString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Register scratch3,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+ Register scratch1, Register scratch2,
+ Register scratch3,
+ Label* gc_required) {
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
Bic(scratch1, scratch1, kObjectAlignmentMask);
- // Allocate ASCII string in new space.
+ // Allocate one-byte string in new space.
Allocate(scratch1,
result,
scratch2,
TAG_OBJECT);
// Set the map, length and hash field.
- InitializeNewString(result,
- length,
- Heap::kAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
}
-void MacroAssembler::AllocateAsciiConsString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
- Mov(scratch1, high_promotion_mode);
- Ldr(scratch1, MemOperand(scratch1));
- Cbz(scratch1, &allocate_new_space);
-
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- B(&install_map);
-
- Bind(&allocate_new_space);
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- flags);
-
- Bind(&install_map);
+ TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kConsAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
}
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
- Register length,
- Register scratch1,
- Register scratch2,
- Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+ Register length,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
- InitializeNewString(result,
- length,
- Heap::kSlicedAsciiStringMapRootIndex,
- scratch1,
- scratch2);
+ InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+ scratch1, scratch2);
}
Register scratch1,
Register scratch2,
CPURegister value,
- CPURegister heap_number_map) {
- ASSERT(!value.IsValid() || value.Is64Bits());
+ CPURegister heap_number_map,
+ MutableMode mode) {
+ DCHECK(!value.IsValid() || value.Is64Bits());
UseScratchRegisterScope temps(this);
// Allocate an object in the heap for the heap number and tag it as a heap
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+
// Prepare the heap number map.
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
} else {
heap_number_map = scratch1;
}
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ LoadRoot(heap_number_map, map_index);
}
if (emit_debug_code()) {
Register map;
} else {
map = Register(heap_number_map);
}
- AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+ AssertRegisterIsRoot(map, map_index);
}
// Store the heap number map and the value in the allocated object.
}
-void MacroAssembler::CompareMap(Register obj,
- Register scratch,
- Handle<Map> map) {
+void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register obj_map = temps.AcquireX();
+ Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
+ CompareRoot(obj_map, index);
+}
+
+
+void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
+ Handle<Map> map) {
Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
CompareMap(scratch, map);
}
JumpIfSmi(obj, fail);
}
- CompareMap(obj, scratch, map);
+ CompareObjectMap(obj, scratch, map);
B(ne, fail);
}
// Load the map's "bit field 2".
__ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
- __ Ubfx(result, result, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(result);
}
Register scratch,
Label* miss,
BoundFunctionAction action) {
- ASSERT(!AreAliased(function, result, scratch));
+ DCHECK(!AreAliased(function, result, scratch));
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
+ Label non_instance;
+ if (action == kMissOnBoundFunction) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- // Check that the function really is a function. Load map into result reg.
- JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
- if (action == kMissOnBoundFunction) {
Register scratch_w = scratch.W();
Ldr(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
Ldr(scratch_w,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ // Make sure that the function has an instance prototype.
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ }
// Get the prototype or initial map from the function.
Ldr(result,
// Get the prototype from the initial map.
Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- B(&done);
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- Bind(&non_instance);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ if (action == kMissOnBoundFunction) {
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
// All done.
Bind(&done);
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(!AreAliased(obj, temp));
+ DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
Cmp(obj, temp);
}
} else if (if_false == fall_through) {
CompareAndBranch(lhs, rhs, cond, if_true);
} else if (if_true == fall_through) {
- CompareAndBranch(lhs, rhs, InvertCondition(cond), if_false);
+ CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
} else {
CompareAndBranch(lhs, rhs, cond, if_true);
B(if_false);
Register elements_reg,
Register scratch1,
FPRegister fpscratch1,
- FPRegister fpscratch2,
Label* fail,
int elements_offset) {
- ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label store_num;
// Speculatively convert the smi to a double - all smis can be exactly
JumpIfSmi(value_reg, &store_num);
// Ensure that the object is a heap number.
- CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
- fail, DONT_DO_SMI_CHECK);
+ JumpIfNotHeapNumber(value_reg, fail);
Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
- Fmov(fpscratch2, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- // Check for NaN by comparing the number to itself: NaN comparison will
- // report unordered, indicated by the overflow flag being set.
- Fcmp(fpscratch1, fpscratch1);
- Fcsel(fpscratch1, fpscratch2, fpscratch1, vs);
+ // Canonicalize NaNs.
+ CanonicalizeNaN(fpscratch1);
// Store the result.
Bind(&store_num);
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. kArrayIndexValueMask has zeros in
- // the low kHashShift bits.
- STATIC_ASSERT(kSmiTag == 0);
- Ubfx(hash, hash, String::kHashShift, String::kArrayIndexValueBits);
- SmiTag(index, hash);
+ DecodeField<String::ArrayIndexValueBits>(index, hash);
+ SmiTag(index, index);
}
SeqStringSetCharCheckIndexType index_type,
Register scratch,
uint32_t encoding_mask) {
- ASSERT(!AreAliased(string, index, scratch));
+ DCHECK(!AreAliased(string, index, scratch));
if (index_type == kIndexIsSmi) {
AssertSmi(index);
Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
Check(lt, kIndexIsTooLarge);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
Cmp(index, 0);
Check(ge, kIndexIsNegative);
}
Register scratch1,
Register scratch2,
Label* miss) {
- ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
// Load current lexical context from the stack frame.
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register key, Register scratch) {
- ASSERT(!AreAliased(key, scratch));
+ DCHECK(!AreAliased(key, scratch));
// Xor original key with a seed.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
Register scratch1,
Register scratch2,
Register scratch3) {
- ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
Label done;
And(scratch2, scratch2, scratch1);
// Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
Register scratch1,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
- ASSERT(!AreAliased(object, address, scratch1));
+ DCHECK(!AreAliased(object, address, scratch1));
Label done, store_buffer_overflow;
if (emit_debug_code()) {
Label ok;
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
- ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
(1 << (14 + kPointerSizeLog2)));
if (and_then == kFallThroughAtEnd) {
Tbz(scratch1, (14 + kPointerSizeLog2), &done);
} else {
- ASSERT(and_then == kReturnAtEnd);
+ DCHECK(and_then == kReturnAtEnd);
Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
Ret();
}
Bind(&store_buffer_overflow);
Push(lr);
- StoreBufferOverflowStub store_buffer_overflow_stub =
- StoreBufferOverflowStub(isolate(), fp_mode);
+ StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
CallStub(&store_buffer_overflow_stub);
Pop(lr);
// Safepoints expect a block of kNumSafepointRegisters values on the stack, so
// adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
+ DCHECK(num_unsaved >= 0);
Claim(num_unsaved);
PushXRegList(kSafepointSavedRegisters);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+ DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
LinkRegisterStatus lr_status,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Although the object register is tagged, the offset is relative to the start
// of the object, so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
lr_status,
save_fp,
remembered_set_action,
- OMIT_SMI_CHECK);
+ OMIT_SMI_CHECK,
+ pointers_to_here_check_for_value);
Bind(&done);
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
- Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+ Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
+ }
+}
+
+
+// Will clobber: object, map, dst.
+// If lr_status is kLRHasBeenSaved, lr will also be clobbered.
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode) {
+ ASM_LOCATION("MacroAssembler::RecordWrite");
+ DCHECK(!AreAliased(object, map));
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ CompareObjectMap(map, temp, isolate()->factory()->meta_map());
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ if (!FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+
+ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+ Cmp(temp, map);
+ Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlagClear(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+
+ // Record the actual write.
+ if (lr_status == kLRHasNotBeenSaved) {
+ Push(lr);
+ }
+ Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+ if (lr_status == kLRHasNotBeenSaved) {
+ Pop(lr);
+ }
+
+ Bind(&done);
+
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
+ dst);
+
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
//
// The register 'object' contains a heap object pointer. The heap object tag is
// shifted away.
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- LinkRegisterStatus lr_status,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ LinkRegisterStatus lr_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
ASM_LOCATION("MacroAssembler::RecordWrite");
- ASSERT(!AreAliased(object, value));
+ DCHECK(!AreAliased(object, value));
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- // TODO(mstarzinger): Dynamic counter missing.
-
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
- CheckPageFlagClear(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- &done);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlagClear(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ &done);
+ }
CheckPageFlagClear(object,
value, // Used as scratch.
MemoryChunk::kPointersFromHereAreInterestingMask,
Bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
+ value);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
- Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
- Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+ Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+ Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
}
}
if (emit_debug_code()) {
// The bit sequence is backward. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label color_is_valid;
Tbnz(reg, 0, &color_is_valid);
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register shift_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
- ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
// addr_reg is divided into fields:
// |63 page base 20|19 high 8|7 shift 3|2 0|
// 'high' gives the index of the cell holding color bits for the object.
int first_bit,
int second_bit) {
// See mark-compact.h for color definitions.
- ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+ DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
GetMarkBits(object, bitmap_scratch, shift_scratch);
Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
// Check for the color.
if (first_bit == 0) {
// Checking for white.
- ASSERT(second_bit == 0);
+ DCHECK(second_bit == 0);
// We only need to test the first bit.
Tbz(bitmap_scratch, 0, has_color);
} else {
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Mov(scratch, Operand(map));
- Ldrsw(scratch, UntagSmiFieldMemOperand(scratch, Map::kBitField3Offset));
+ Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
}
}
Register scratch0,
Register scratch1,
Label* on_black) {
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
}
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!AreAliased(object, scratch0, scratch1));
+ DCHECK(!AreAliased(object, scratch0, scratch1));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
Bind(&loop_again);
Ldr(current, FieldMemOperand(current, HeapObject::kMapOffset));
Ldrb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
- Ubfx(scratch1, scratch1, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DecodeField<Map::ElementsKindBits>(scratch1);
CompareAndBranch(scratch1, DICTIONARY_ELEMENTS, eq, found);
Ldr(current, FieldMemOperand(current, Map::kPrototypeOffset));
CompareAndBranch(current, Operand(factory->null_value()), ne, &loop_again);
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
- ASSERT(!result.Is(ldr_location));
+ DCHECK(!result.Is(ldr_location));
const uint32_t kLdrLitOffset_lsb = 5;
const uint32_t kLdrLitOffset_width = 19;
Ldr(result, MemOperand(ldr_location));
Register load_scratch,
Register length_scratch,
Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
- // Sequential string, either ASCII or UC16.
- // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+ // Sequential string, either Latin1 or UC16.
+ // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
String::kLengthOffset));
Tst(instance_type, kStringEncodingMask);
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
-
- // Make sure that the macro assembler doesn't try to use any of our arguments
- // as scratch registers.
- ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
- ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
-
- // We cannot print the stack pointer because it is typically used to preserve
- // caller-saved registers (using other Printf variants which depend on this
- // helper).
- ASSERT(!AreAliased(arg0, StackPointer()));
- ASSERT(!AreAliased(arg1, StackPointer()));
- ASSERT(!AreAliased(arg2, StackPointer()));
- ASSERT(!AreAliased(arg3, StackPointer()));
-
- static const int kMaxArgCount = 4;
- // Assume that we have the maximum number of arguments until we know
- // otherwise.
- int arg_count = kMaxArgCount;
-
- // The provided arguments.
- CPURegister args[kMaxArgCount] = {arg0, arg1, arg2, arg3};
-
- // The PCS registers where the arguments need to end up.
- CPURegister pcs[kMaxArgCount] = {NoCPUReg, NoCPUReg, NoCPUReg, NoCPUReg};
-
- // Promote FP arguments to doubles, and integer arguments to X registers.
- // Note that FP and integer arguments cannot be mixed, but we'll check
- // AreSameSizeAndType once we've processed these promotions.
- for (int i = 0; i < kMaxArgCount; i++) {
+ DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+
+ // The provided arguments, and their proper procedure-call standard registers.
+ CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
+ CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
+
+ int arg_count = kPrintfMaxArgCount;
+
+ // The PCS varargs registers for printf. Note that x0 is used for the printf
+ // format string.
+ static const CPURegList kPCSVarargs =
+ CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
+ static const CPURegList kPCSVarargsFP =
+ CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
+
+ // We can use caller-saved registers as scratch values, except for the
+ // arguments and the PCS registers where they might need to go.
+ CPURegList tmp_list = kCallerSaved;
+ tmp_list.Remove(x0); // Used to pass the format string.
+ tmp_list.Remove(kPCSVarargs);
+ tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ CPURegList fp_tmp_list = kCallerSavedFP;
+ fp_tmp_list.Remove(kPCSVarargsFP);
+ fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+
+ // Override the MacroAssembler's scratch register list. The lists will be
+ // reset automatically at the end of the UseScratchRegisterScope.
+ UseScratchRegisterScope temps(this);
+ TmpList()->set_list(tmp_list.list());
+ FPTmpList()->set_list(fp_tmp_list.list());
+
+ // Copies of the printf vararg registers that we can pop from.
+ CPURegList pcs_varargs = kPCSVarargs;
+ CPURegList pcs_varargs_fp = kPCSVarargsFP;
+
+ // Place the arguments. There are lots of clever tricks and optimizations we
+ // could use here, but Printf is a debug tool so instead we just try to keep
+ // it simple: Move each input that isn't already in the right place to a
+ // scratch register, then move everything back.
+ for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
+ // Work out the proper PCS register for this argument.
if (args[i].IsRegister()) {
- // Note that we use x1 onwards, because x0 will hold the format string.
- pcs[i] = Register::XRegFromCode(i + 1);
- // For simplicity, we handle all integer arguments as X registers. An X
- // register argument takes the same space as a W register argument in the
- // PCS anyway. The only limitation is that we must explicitly clear the
- // top word for W register arguments as the callee will expect it to be
- // clear.
- if (!args[i].Is64Bits()) {
- const Register& as_x = args[i].X();
- And(as_x, as_x, 0x00000000ffffffff);
- args[i] = as_x;
- }
+ pcs[i] = pcs_varargs.PopLowestIndex().X();
+ // We might only need a W register here. We need to know the size of the
+ // argument so we can properly encode it for the simulator call.
+ if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
} else if (args[i].IsFPRegister()) {
- pcs[i] = FPRegister::DRegFromCode(i);
- // C and C++ varargs functions (such as printf) implicitly promote float
- // arguments to doubles.
- if (!args[i].Is64Bits()) {
- FPRegister s(args[i]);
- const FPRegister& as_d = args[i].D();
- Fcvt(as_d, s);
- args[i] = as_d;
- }
+ // In C, floats are always cast to doubles for varargs calls.
+ pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
} else {
- // This is the first empty (NoCPUReg) argument, so use it to set the
- // argument count and bail out.
+ DCHECK(args[i].IsNone());
arg_count = i;
break;
}
- }
- ASSERT((arg_count >= 0) && (arg_count <= kMaxArgCount));
- // Check that every remaining argument is NoCPUReg.
- for (int i = arg_count; i < kMaxArgCount; i++) {
- ASSERT(args[i].IsNone());
- }
- ASSERT((arg_count == 0) || AreSameSizeAndType(args[0], args[1],
- args[2], args[3],
- pcs[0], pcs[1],
- pcs[2], pcs[3]));
- // Move the arguments into the appropriate PCS registers.
- //
- // Arranging an arbitrary list of registers into x1-x4 (or d0-d3) is
- // surprisingly complicated.
- //
- // * For even numbers of registers, we push the arguments and then pop them
- // into their final registers. This maintains 16-byte stack alignment in
- // case csp is the stack pointer, since we're only handling X or D
- // registers at this point.
- //
- // * For odd numbers of registers, we push and pop all but one register in
- // the same way, but the left-over register is moved directly, since we
- // can always safely move one register without clobbering any source.
- if (arg_count >= 4) {
- Push(args[3], args[2], args[1], args[0]);
- } else if (arg_count >= 2) {
- Push(args[1], args[0]);
- }
-
- if ((arg_count % 2) != 0) {
- // Move the left-over register directly.
- const CPURegister& leftover_arg = args[arg_count - 1];
- const CPURegister& leftover_pcs = pcs[arg_count - 1];
- if (leftover_arg.IsRegister()) {
- Mov(Register(leftover_pcs), Register(leftover_arg));
- } else {
- Fmov(FPRegister(leftover_pcs), FPRegister(leftover_arg));
+ // If the argument is already in the right place, leave it where it is.
+ if (args[i].Aliases(pcs[i])) continue;
+
+ // Otherwise, if the argument is in a PCS argument register, allocate an
+ // appropriate scratch register and then move it out of the way.
+ if (kPCSVarargs.IncludesAliasOf(args[i]) ||
+ kPCSVarargsFP.IncludesAliasOf(args[i])) {
+ if (args[i].IsRegister()) {
+ Register old_arg = Register(args[i]);
+ Register new_arg = temps.AcquireSameSizeAs(old_arg);
+ Mov(new_arg, old_arg);
+ args[i] = new_arg;
+ } else {
+ FPRegister old_arg = FPRegister(args[i]);
+ FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
+ Fmov(new_arg, old_arg);
+ args[i] = new_arg;
+ }
}
}
- if (arg_count >= 4) {
- Pop(pcs[0], pcs[1], pcs[2], pcs[3]);
- } else if (arg_count >= 2) {
- Pop(pcs[0], pcs[1]);
+ // Do a second pass to move values into their final positions and perform any
+ // conversions that may be required.
+ for (int i = 0; i < arg_count; i++) {
+ DCHECK(pcs[i].type() == args[i].type());
+ if (pcs[i].IsRegister()) {
+ Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
+ } else {
+ DCHECK(pcs[i].IsFPRegister());
+ if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
+ Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
+ } else {
+ Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
+ }
+ }
}
// Load the format string into x0, as per the procedure-call standard.
Bic(csp, StackPointer(), 0xf);
}
- CallPrintf(pcs[0].type());
+ CallPrintf(arg_count, pcs);
}
-void MacroAssembler::CallPrintf(CPURegister::RegisterType type) {
+void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
// A call to printf needs special handling for the simulator, since the system
// printf function will use a different instruction set and the procedure-call
// standard will not be compatible.
#ifdef USE_SIMULATOR
{ InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
hlt(kImmExceptionIsPrintf);
- dc32(type);
+ dc32(arg_count); // kPrintfArgCountOffset
+
+ // Determine the argument pattern.
+ uint32_t arg_pattern_list = 0;
+ for (int i = 0; i < arg_count; i++) {
+ uint32_t arg_pattern;
+ if (args[i].IsRegister()) {
+ arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
+ } else {
+ DCHECK(args[i].Is64Bits());
+ arg_pattern = kPrintfArgD;
+ }
+ DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
+ arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
+ }
+ dc32(arg_pattern_list); // kPrintfArgPatternListOffset
}
#else
Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
void MacroAssembler::Printf(const char * format,
- const CPURegister& arg0,
- const CPURegister& arg1,
- const CPURegister& arg2,
- const CPURegister& arg3) {
+ CPURegister arg0,
+ CPURegister arg1,
+ CPURegister arg2,
+ CPURegister arg3) {
+ // We can only print sp if it is the current stack pointer.
+ if (!csp.Is(StackPointer())) {
+ DCHECK(!csp.Aliases(arg0));
+ DCHECK(!csp.Aliases(arg1));
+ DCHECK(!csp.Aliases(arg2));
+ DCHECK(!csp.Aliases(arg3));
+ }
+
// Printf is expected to preserve all registers, so make sure that none are
// available as scratch registers until we've preserved them.
RegList old_tmp_list = TmpList()->list();
TmpList()->set_list(tmp_list.list());
FPTmpList()->set_list(fp_tmp_list.list());
- // Preserve NZCV.
{ UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Mrs(tmp, NZCV);
- Push(tmp, xzr);
- }
+ // If any of the arguments are the current stack pointer, allocate a new
+ // register for them, and adjust the value to compensate for pushing the
+ // caller-saved registers.
+ bool arg0_sp = StackPointer().Aliases(arg0);
+ bool arg1_sp = StackPointer().Aliases(arg1);
+ bool arg2_sp = StackPointer().Aliases(arg2);
+ bool arg3_sp = StackPointer().Aliases(arg3);
+ if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
+ // Allocate a register to hold the original stack pointer value, to pass
+ // to PrintfNoPreserve as an argument.
+ Register arg_sp = temps.AcquireX();
+ Add(arg_sp, StackPointer(),
+ kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
+ if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
+ if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
+ if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
+ if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
+ }
- PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+ // Preserve NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Mrs(tmp, NZCV);
+ Push(tmp, xzr);
+ }
- { UseScratchRegisterScope temps(this);
- Register tmp = temps.AcquireX();
- Pop(xzr, tmp);
- Msr(NZCV, tmp);
+ PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
+
+ // Restore NZCV.
+ { UseScratchRegisterScope temps(this);
+ Register tmp = temps.AcquireX();
+ Pop(xzr, tmp);
+ Msr(NZCV, tmp);
+ }
}
PopCPURegList(kCallerSavedFP);
// TODO(jbramley): Other architectures use the internal memcpy to copy the
// sequence. If this is a performance bottleneck, we should consider caching
// the sequence and copying it in the same way.
- InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ InstructionAccurateScope scope(this,
+ kNoCodeAgeSequenceLength / kInstructionSize);
+ DCHECK(jssp.Is(StackPointer()));
EmitFrameSetupForCodeAgePatching(this);
}
void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
- InstructionAccurateScope scope(this, kCodeAgeSequenceSize / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ InstructionAccurateScope scope(this,
+ kNoCodeAgeSequenceLength / kInstructionSize);
+ DCHECK(jssp.Is(StackPointer()));
EmitCodeAgeSequence(this, stub);
}
__ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
__ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
- __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
}
//
// A branch (br) is used rather than a call (blr) because this code replaces
// the frame setup code that would normally preserve lr.
- __ LoadLiteral(ip0, kCodeAgeStubEntryOffset);
+ __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
__ adr(x0, &start);
__ br(ip0);
// IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
__ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
if (stub) {
__ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
- __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeSequenceSize);
+ __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
}
}
-bool MacroAssembler::IsYoungSequence(byte* sequence) {
- // Generate a young sequence to compare with.
- const int length = kCodeAgeSequenceSize / kInstructionSize;
- static bool initialized = false;
- static byte young[kCodeAgeSequenceSize];
- if (!initialized) {
- PatchingAssembler patcher(young, length);
- // The young sequence is the frame setup code for FUNCTION code types. It is
- // generated by FullCodeGenerator::Generate.
- MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
- initialized = true;
- }
-
- bool is_young = (memcmp(sequence, young, kCodeAgeSequenceSize) == 0);
- ASSERT(is_young || IsCodeAgeSequence(sequence));
+bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
+ bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
+ DCHECK(is_young ||
+ isolate->code_aging_helper()->IsOld(sequence));
return is_young;
}
-#ifdef DEBUG
-bool MacroAssembler::IsCodeAgeSequence(byte* sequence) {
- // The old sequence varies depending on the code age. However, the code up
- // until kCodeAgeStubEntryOffset does not change, so we can check that part to
- // get a reasonable level of verification.
- const int length = kCodeAgeStubEntryOffset / kInstructionSize;
- static bool initialized = false;
- static byte old[kCodeAgeStubEntryOffset];
- if (!initialized) {
- PatchingAssembler patcher(old, length);
- MacroAssembler::EmitCodeAgeSequence(&patcher, NULL);
- initialized = true;
- }
- return memcmp(sequence, old, kCodeAgeStubEntryOffset) == 0;
-}
-#endif
-
-
void MacroAssembler::TruncatingDiv(Register result,
Register dividend,
int32_t divisor) {
- ASSERT(!AreAliased(result, dividend));
- ASSERT(result.Is32Bits() && dividend.Is32Bits());
- MultiplierAndShift ms(divisor);
- Mov(result, ms.multiplier());
+ DCHECK(!AreAliased(result, dividend));
+ DCHECK(result.Is32Bits() && dividend.Is32Bits());
+ base::MagicNumbersForDivision<uint32_t> mag =
+ base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+ Mov(result, mag.multiplier);
Smull(result.X(), dividend, result);
Asr(result.X(), result.X(), 32);
- if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
- if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
- if (ms.shift() > 0) Asr(result, result, ms.shift());
+ bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+ if (divisor > 0 && neg) Add(result, result, dividend);
+ if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
+ if (mag.shift > 0) Asr(result, result, mag.shift);
Add(result, result, Operand(dividend, LSR, 31));
}
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- ASSERT(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, csp));
return result;
}
CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
const CPURegister& reg) {
- ASSERT(available->IncludesAliasOf(reg));
+ DCHECK(available->IncludesAliasOf(reg));
available->Remove(reg);
return reg;
}
const Label* smi_check) {
Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) {
- ASSERT(smi_check->is_bound());
- ASSERT(reg.Is64Bits());
+ DCHECK(smi_check->is_bound());
+ DCHECK(reg.Is64Bits());
// Encode the register (x0-x30) in the lowest 5 bits, then the offset to
// 'check' in the other bits. The possible offset is limited in that we
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
- ASSERT(!smi_check->is_bound());
+ DCHECK(!smi_check->is_bound());
// An offset of 0 indicates that there is no patch site.
__ InlineData(0);
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
: reg_(NoReg), smi_check_(NULL) {
InstructionSequence* inline_data = InstructionSequence::At(info);
- ASSERT(inline_data->IsInlineData());
+ DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
uint64_t payload = inline_data->InlineData();
// We use BitField to decode the payload, and BitField can only handle
// 32-bit values.
- ASSERT(is_uint32(payload));
+ DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
- ASSERT(smi_check_delta != 0);
+ DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
}