} else if (operand.IsImmediate()) {
int64_t immediate = operand.ImmediateValue();
unsigned reg_size = rd.SizeInBits();
- ASSERT(rd.Is64Bits() || is_uint32(immediate));
// If the operation is NOT, invert the operation and immediate.
if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = ~immediate;
- if (rd.Is32Bits()) {
- immediate &= kWRegMask;
- }
}
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
+ ((immediate >> kWRegSizeInBits) == -1));
+ immediate &= kWRegMask;
+ }
+
+ DCHECK(rd.Is64Bits() || is_uint32(immediate));
+
// Special cases for all set or all clear immediates.
if (immediate == 0) {
switch (op) {
} else {
// Immediate can't be encoded: synthesize using move immediate.
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, immediate);
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
if (rd.Is(csp)) {
// If rd is the stack pointer we cannot use it as the destination
// register so we use the temp register as an intermediate again.
- Logical(temp, rn, temp, op);
+ Logical(temp, rn, imm_operand, op);
Mov(csp, temp);
AssertStackConsistency();
} else {
- Logical(rd, rn, temp, op);
+ Logical(rd, rn, imm_operand, op);
}
}
} else if (operand.IsExtendedRegister()) {
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports shift <= 4. We want to support exactly the
// same modes here.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
} else {
// The operand can be encoded in the instruction.
- ASSERT(operand.IsShiftedRegister());
+ DCHECK(operand.IsShiftedRegister());
Logical(rd, rn, operand, op);
}
}
void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
- ASSERT(allow_macro_instructions_);
- ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+ DCHECK(!rd.IsZero());
// TODO(all) extend to support more immediates.
//
// applying move-keep operations to move-zero and move-inverted initial
// values.
- unsigned reg_size = rd.SizeInBits();
- unsigned n, imm_s, imm_r;
- if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move zero instruction. Movz can't
- // write to the stack pointer.
- movz(rd, imm);
- } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
- // Immediate can be represented in a move inverted instruction. Movn can't
- // write to the stack pointer.
- movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
- } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
- // Immediate can be represented in a logical orr instruction.
- LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
- } else {
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (!TryOneInstrMoveImmediate(rd, imm)) {
+ unsigned reg_size = rd.SizeInBits();
+
// Generic immediate case. Imm will be represented by
// [imm3, imm2, imm1, imm0], where each imm is 16 bits.
// A move-zero or move-inverted is generated for the first non-zero or
// Iterate through the halfwords. Use movn/movz for the first non-ignored
// halfword, and movk for subsequent halfwords.
- ASSERT((reg_size % 16) == 0);
+ DCHECK((reg_size % 16) == 0);
bool first_mov_done = false;
for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
}
}
}
- ASSERT(first_mov_done);
+ DCHECK(first_mov_done);
// Move the temporary if the original destination register was the stack
// pointer.
void MacroAssembler::Mov(const Register& rd,
const Operand& operand,
DiscardMoveMode discard_mode) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
// Provide a swap register for instructions that need to write into the
// system stack pointer (and can't do this inherently).
// Copy the result to the system stack pointer.
if (!dst.Is(rd)) {
- ASSERT(rd.IsSP());
+ DCHECK(rd.IsSP());
Assembler::mov(rd, dst);
}
}
void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
if (operand.NeedsRelocation(this)) {
Ldr(rd, operand.immediate());
unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size % 8) == 0);
+ DCHECK((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
// The movz instruction can generate immediates containing an arbitrary 16-bit
// half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
- ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+ DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
}
StatusFlags nzcv,
Condition cond,
ConditionalCompareOp op) {
- ASSERT((cond != al) && (cond != nv));
+ DCHECK((cond != al) && (cond != nv));
if (operand.NeedsRelocation(this)) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
const Register& rn,
const Operand& operand,
Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
+ DCHECK((cond != al) && (cond != nv));
if (operand.IsImmediate()) {
// Immediate argument. Handle special cases of 0, 1 and -1 using zero
// register.
}
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.SizeInBits();
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ movz(dst, imm);
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move not instruction. Movn can't write
+ // to the stack pointer.
+ movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ return true;
+ }
+ return false;
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm) {
+ int reg_size = dst.SizeInBits();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register. We
+ // insert set bits in the least-significant bits, as this creates a
+ // different immediate that may be encodable using movn or orr-immediate.
+ // If this new immediate is encodable, the set bits will be eliminated by
+ // the post shift on the following instruction.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+
+ if (TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ // Use the generic move operation to set up the immediate.
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
void MacroAssembler::AddSubMacro(const Register& rd,
const Register& rn,
const Operand& operand,
(operand.IsShiftedRegister() && (operand.shift() == ROR))) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireSameSizeAs(rn);
- Mov(temp, operand);
- AddSub(rd, rn, temp, S, op);
+ if (operand.IsImmediate()) {
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
} else {
AddSub(rd, rn, operand, S, op);
}
const Operand& operand,
FlagsUpdate S,
AddSubWithCarryOp op) {
- ASSERT(rd.SizeInBits() == rn.SizeInBits());
+ DCHECK(rd.SizeInBits() == rn.SizeInBits());
UseScratchRegisterScope temps(this);
if (operand.NeedsRelocation(this)) {
} else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
// Add/sub with carry (shifted register).
- ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
- ASSERT(operand.shift() != ROR);
- ASSERT(is_uintn(operand.shift_amount(),
+ DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+ DCHECK(operand.shift() != ROR);
+ DCHECK(is_uintn(operand.shift_amount(),
rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
: kWRegSizeInBitsLog2));
Register temp = temps.AcquireSameSizeAs(rn);
} else if (operand.IsExtendedRegister()) {
// Add/sub with carry (extended register).
- ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+ DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
// Add/sub extended supports a shift <= 4. We want to support exactly the
// same modes.
- ASSERT(operand.shift_amount() <= 4);
- ASSERT(operand.reg().Is64Bits() ||
+ DCHECK(operand.shift_amount() <= 4);
+ DCHECK(operand.reg().Is64Bits() ||
((operand.extend() != UXTX) && (operand.extend() != SXTX)));
Register temp = temps.AcquireSameSizeAs(rn);
EmitExtendShift(temp, operand.reg(), operand.extend(),
}
}
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ DCHECK(!addr.IsRegisterOffset());
+
+ int64_t offset = addr.offset();
+ LSDataSize size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ DCHECK(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
void MacroAssembler::Load(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
Ldrsb(rt, addr);
} else if (r.IsInteger32()) {
Ldr(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
Ldr(rt, addr);
}
}
void MacroAssembler::Store(const Register& rt,
const MemOperand& addr,
Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
Strb(rt, addr);
} else if (r.IsInteger32()) {
Str(rt.W(), addr);
} else {
- ASSERT(rt.Is64Bits());
+ DCHECK(rt.Is64Bits());
if (r.IsHeapObject()) {
AssertNotSmi(rt);
} else if (r.IsSmi()) {
void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
- ASSERT(allow_macro_instructions_);
- ASSERT(!rd.IsZero());
+ DCHECK(allow_macro_instructions_);
+ DCHECK(!rd.IsZero());
if (hint == kAdrNear) {
adr(rd, label);
return;
}
- ASSERT(hint == kAdrFar);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.AcquireX();
- ASSERT(!AreAliased(rd, scratch));
-
+ DCHECK(hint == kAdrFar);
if (label->is_bound()) {
int label_offset = label->pos() - pc_offset();
if (Instruction::IsValidPCRelOffset(label_offset)) {
adr(rd, label);
} else {
- ASSERT(label_offset <= 0);
+ DCHECK(label_offset <= 0);
int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
adr(rd, min_adr_offset);
Add(rd, rd, label_offset - min_adr_offset);
}
} else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
InstructionAccurateScope scope(
this, PatchingAssembler::kAdrFarPatchableNInstrs);
adr(rd, label);
nop(ADR_FAR_NOP);
}
movz(scratch, 0);
- add(rd, rd, scratch);
}
}
void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
- ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+ DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
(bit == -1 || type >= kBranchTypeFirstUsingBit));
if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
B(static_cast<Condition>(type), label);
void MacroAssembler::B(Label* label, Condition cond) {
- ASSERT(allow_macro_instructions_);
- ASSERT((cond != al) && (cond != nv));
+ DCHECK(allow_macro_instructions_);
+ DCHECK((cond != al) && (cond != nv));
Label done;
bool need_extra_instructions =
void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Cbnz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Cbz(const Register& rt, Label* label) {
- ASSERT(allow_macro_instructions_);
+ DCHECK(allow_macro_instructions_);
Label done;
bool need_extra_instructions =
void MacroAssembler::Abs(const Register& rd, const Register& rm,
Label* is_not_representable,
Label* is_representable) {
- ASSERT(allow_macro_instructions_);
- ASSERT(AreSameSizeAndType(rd, rm));
+ DCHECK(allow_macro_instructions_);
+ DCHECK(AreSameSizeAndType(rd, rm));
Cmp(rm, 1);
Cneg(rd, rm, lt);
void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
const CPURegister& src2, const CPURegister& src3) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
int size = src0.SizeInBytes();
const CPURegister& src2, const CPURegister& src3,
const CPURegister& src4, const CPURegister& src5,
const CPURegister& src6, const CPURegister& src7) {
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
int size = src0.SizeInBytes();
const CPURegister& dst2, const CPURegister& dst3) {
// It is not valid to pop into the same register more than once in one
// instruction, not even into the zero register.
- ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(dst0.IsValid());
+ DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(dst0.IsValid());
int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
int size = dst0.SizeInBytes();
}
+void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
+ int size = src0.SizeInBytes() + src1.SizeInBytes();
+
+ PushPreamble(size);
+ // Reserve room for src0 and push src1.
+ str(src1, MemOperand(StackPointer(), -size, PreIndex));
+ // Fill the gap with src0.
+ str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+}
+
+
void MacroAssembler::PushPopQueue::PushQueued(
PreambleDirective preamble_directive) {
if (queued_.empty()) return;
PushHelper(1, size, src, NoReg, NoReg, NoReg);
count -= 1;
}
- ASSERT(count == 0);
+ DCHECK(count == 0);
}
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
- ASSERT(size == src0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
+ DCHECK(size == src0.SizeInBytes());
// When pushing multiple registers, the store order is chosen such that
// Push(a, b) is equivalent to Push(a) followed by Push(b).
switch (count) {
case 1:
- ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
break;
case 2:
- ASSERT(src2.IsNone() && src3.IsNone());
+ DCHECK(src2.IsNone() && src3.IsNone());
stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
break;
case 3:
- ASSERT(src3.IsNone());
+ DCHECK(src3.IsNone());
stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
str(src0, MemOperand(StackPointer(), 2 * size));
break;
// Ensure that we don't unintentially modify scratch or debug registers.
InstructionAccurateScope scope(this);
- ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
- ASSERT(size == dst0.SizeInBytes());
+ DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ DCHECK(size == dst0.SizeInBytes());
// When popping multiple registers, the load order is chosen such that
// Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
switch (count) {
case 1:
- ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
break;
case 2:
- ASSERT(dst2.IsNone() && dst3.IsNone());
+ DCHECK(dst2.IsNone() && dst3.IsNone());
ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
break;
case 3:
- ASSERT(dst3.IsNone());
+ DCHECK(dst3.IsNone());
ldr(dst2, MemOperand(StackPointer(), 2 * size));
ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
break;
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.ImmediateValue() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
// on entry and the total size of the specified registers must also be a
// multiple of 16 bytes.
if (total_size.IsImmediate()) {
- ASSERT((total_size.ImmediateValue() % 16) == 0);
+ DCHECK((total_size.ImmediateValue() % 16) == 0);
}
// Don't check access size for non-immediate sizes. It's difficult to do
void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.ImmediateValue() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
if (offset.IsImmediate()) {
- ASSERT(offset.ImmediateValue() >= 0);
+ DCHECK(offset.ImmediateValue() >= 0);
} else if (emit_debug_code()) {
Cmp(xzr, offset);
Check(le, kStackAccessBelowStackPointer);
void MacroAssembler::PokePair(const CPURegister& src1,
const CPURegister& src2,
int offset) {
- ASSERT(AreSameSizeAndType(src1, src2));
- ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(src1, src2));
+ DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
Stp(src1, src2, MemOperand(StackPointer(), offset));
}
void MacroAssembler::PeekPair(const CPURegister& dst1,
const CPURegister& dst2,
int offset) {
- ASSERT(AreSameSizeAndType(dst1, dst2));
- ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+ DCHECK(AreSameSizeAndType(dst1, dst2));
+ DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
}
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, -2 * kXRegSize, PreIndex);
// This method must not be called unless the current stack pointer is the
// system stack pointer (csp).
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
MemOperand tos(csp, 2 * kXRegSize, PostIndex);
Register scratch2,
Register scratch3,
Label* call_runtime) {
- ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+ DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
scratch3));
Register empty_fixed_array_value = scratch0;
Register scratch1,
Register scratch2) {
// Handler expects argument in x0.
- ASSERT(exception.Is(x0));
+ DCHECK(exception.Is(x0));
// Compute the handler entry address and jump to it. The handler table is
// a fixed array of (smi-tagged) code offsets.
void MacroAssembler::InNewSpace(Register object,
Condition cond,
Label* branch) {
- ASSERT(cond == eq || cond == ne);
+ DCHECK(cond == eq || cond == ne);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
And(temp, object, ExternalReference::new_space_mask(isolate()));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
// The handler expects the exception in x0.
- ASSERT(value.Is(x0));
+ DCHECK(value.Is(x0));
// Drop the stack pointer to the top of the top stack handler.
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
isolate())));
Ldr(jssp, MemOperand(scratch1));
void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
- ASSERT(smi.Is64Bits());
+ DCHECK(smi.Is64Bits());
Abs(smi, smi, slow);
}
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
+ DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
ExternalReference::handle_scope_level_address(isolate()),
next_address);
- ASSERT(function_address.is(x1) || function_address.is(x2));
+ DCHECK(function_address.is(x1) || function_address.is(x2));
Label profiler_disabled;
Label end_profiler_check;
FrameScope frame(this, StackFrame::INTERNAL);
CallExternalReference(
ExternalReference(
- Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+ Runtime::kPromoteScheduledException, isolate()), 0);
}
B(&exception_handled);
void MacroAssembler::GetBuiltinEntry(Register target,
Register function,
Builtins::JavaScript id) {
- ASSERT(!AreAliased(target, function));
+ DCHECK(!AreAliased(target, function));
GetBuiltinFunction(function, id);
// Load the code entry point from the builtins object.
Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
const CallWrapper& call_wrapper) {
ASM_LOCATION("MacroAssembler::InvokeBuiltin");
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Get the builtin entry in x2 and setup the function object in x1.
GetBuiltinEntry(x2, x1, id);
Call(x2);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(x2);
}
}
Heap::RootListIndex map_index,
Register scratch1,
Register scratch2) {
- ASSERT(!AreAliased(string, length, scratch1, scratch2));
+ DCHECK(!AreAliased(string, length, scratch1, scratch2));
LoadRoot(scratch2, map_index);
SmiTag(scratch1, length);
Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
// environment.
// Note: This will break if we ever start generating snapshots on one ARM
// platform for another ARM platform with a different alignment.
- return OS::ActivationFrameAlignment();
+ return base::OS::ActivationFrameAlignment();
#else // V8_HOST_ARCH_ARM64
// If we are using the simulator then we should always align to the expected
// alignment. As the simulator is used to generate snapshots we do not know
void MacroAssembler::CallCFunction(Register function,
int num_of_reg_args,
int num_of_double_args) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// We can pass 8 integer arguments in registers. If we need to pass more than
// that, we'll need to implement support for passing them on the stack.
- ASSERT(num_of_reg_args <= 8);
+ DCHECK(num_of_reg_args <= 8);
// If we're passing doubles, we're limited to the following prototypes
// (defined by ExternalReference::Type):
// BUILTIN_FP_CALL: double f(double)
// BUILTIN_FP_INT_CALL: double f(double, int)
if (num_of_double_args > 0) {
- ASSERT(num_of_reg_args <= 1);
- ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+ DCHECK(num_of_reg_args <= 1);
+ DCHECK((num_of_double_args + num_of_reg_args) <= 2);
}
int sp_alignment = ActivationFrameAlignment();
// The ABI mandates at least 16-byte alignment.
- ASSERT(sp_alignment >= 16);
- ASSERT(IsPowerOf2(sp_alignment));
+ DCHECK(sp_alignment >= 16);
+ DCHECK(IsPowerOf2(sp_alignment));
// The current stack pointer is a callee saved register, and is preserved
// across the call.
- ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+ DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
// Align and synchronize the system stack pointer with jssp.
Bic(csp, old_stack_pointer, sp_alignment - 1);
// where we only pushed one W register on top of an aligned jssp.
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(ActivationFrameAlignment() == 16);
+ DCHECK(ActivationFrameAlignment() == 16);
Sub(temp, csp, old_stack_pointer);
// We want temp <= 0 && temp >= -12.
Cmp(temp, 0);
void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
- ASSERT(!RelocInfo::IsCodeTarget(rmode));
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
Jump(reinterpret_cast<intptr_t>(target), rmode);
}
void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
- ASSERT(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
AllowDeferredHandleDereference embedding_raw_address;
Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
}
positions_recorder()->WriteRecordedPositions();
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
// Addresses are 48 bits so we never need to load the upper 16 bits.
uint64_t imm = reinterpret_cast<uint64_t>(target);
// If we don't use ARM tagged addresses, the 16 higher bits must be 0.
- ASSERT(((imm >> 48) & 0xffff) == 0);
+ DCHECK(((imm >> 48) & 0xffff) == 0);
movz(temp, (imm >> 0) & 0xffff, 0);
movk(temp, (imm >> 16) & 0xffff, 16);
movk(temp, (imm >> 32) & 0xffff, 32);
USE(target);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
USE(ast_id);
// Addresses always have 64 bits, so we shouldn't encounter NONE32.
- ASSERT(rmode != RelocInfo::NONE32);
+ DCHECK(rmode != RelocInfo::NONE32);
if (rmode == RelocInfo::NONE64) {
return kCallSizeWithoutRelocation;
Register heap_number_map,
Label* on_heap_number,
Label* on_not_heap_number) {
- ASSERT(on_heap_number || on_not_heap_number);
+ DCHECK(on_heap_number || on_not_heap_number);
AssertNotSmi(object);
UseScratchRegisterScope temps(this);
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
}
- ASSERT(!AreAliased(temp, heap_number_map));
+ DCHECK(!AreAliased(temp, heap_number_map));
Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
Cmp(temp, heap_number_map);
Register scratch2,
Register scratch3,
Label* not_found) {
- ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
// Use of registers. Register result is used as a temporary.
Register number_string_cache = result;
void MacroAssembler::JumpIfMinusZero(Register input,
Label* on_negative_zero) {
- ASSERT(input.Is64Bits());
+ DCHECK(input.Is64Bits());
// Floating point value is in an integer register. Detect -0.0 by subtracting
// 1 (cmp), which will cause overflow.
Cmp(input, 1);
Register scratch5) {
// Untag src and dst into scratch registers.
// Copy src->dst in a tight loop.
- ASSERT(!AreAliased(dst, src,
+ DCHECK(!AreAliased(dst, src,
scratch1, scratch2, scratch3, scratch4, scratch5));
- ASSERT(count >= 2);
+ DCHECK(count >= 2);
const Register& remaining = scratch3;
Mov(remaining, count / 2);
Register scratch4) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
Register scratch3) {
// Untag src and dst into scratch registers.
// Copy src->dst in an unrolled loop.
- ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
const Register& dst_untagged = scratch1;
const Register& src_untagged = scratch2;
//
// In both cases, fields are copied in pairs if possible, and left-overs are
// handled separately.
- ASSERT(!AreAliased(dst, src));
- ASSERT(!temps.IncludesAliasOf(dst));
- ASSERT(!temps.IncludesAliasOf(src));
- ASSERT(!temps.IncludesAliasOf(xzr));
+ DCHECK(!AreAliased(dst, src));
+ DCHECK(!temps.IncludesAliasOf(dst));
+ DCHECK(!temps.IncludesAliasOf(src));
+ DCHECK(!temps.IncludesAliasOf(xzr));
if (emit_debug_code()) {
Cmp(dst, src);
UseScratchRegisterScope temps(this);
Register tmp1 = temps.AcquireX();
Register tmp2 = temps.AcquireX();
- ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
- ASSERT(!AreAliased(src, dst, csp));
+ DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+ DCHECK(!AreAliased(src, dst, csp));
if (emit_debug_code()) {
// Check copy length.
void MacroAssembler::FillFields(Register dst,
Register field_count,
Register filler) {
- ASSERT(!dst.Is(csp));
+ DCHECK(!dst.Is(csp));
UseScratchRegisterScope temps(this);
Register field_ptr = temps.AcquireX();
Register counter = temps.AcquireX();
if (smi_check == DO_SMI_CHECK) {
JumpIfEitherSmi(first, second, failure);
} else if (emit_debug_code()) {
- ASSERT(smi_check == DONT_DO_SMI_CHECK);
+ DCHECK(smi_check == DONT_DO_SMI_CHECK);
Label not_smi;
JumpIfEitherSmi(first, second, NULL, ¬_smi);
Register scratch1,
Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(scratch1, second));
- ASSERT(!AreAliased(scratch1, scratch2));
+ DCHECK(!AreAliased(scratch1, second));
+ DCHECK(!AreAliased(scratch1, scratch2));
static const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
Register scratch1,
Register scratch2,
Label* failure) {
- ASSERT(!AreAliased(first, second, scratch1, scratch2));
+ DCHECK(!AreAliased(first, second, scratch1, scratch2));
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
const int kFlatAsciiStringTag =
// The code below is made a lot easier because the calling code already sets
// up actual and expected registers according to the contract if values are
// passed in registers.
- ASSERT(actual.is_immediate() || actual.reg().is(x0));
- ASSERT(expected.is_immediate() || expected.reg().is(x2));
- ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+ DCHECK(actual.is_immediate() || actual.reg().is(x0));
+ DCHECK(expected.is_immediate() || expected.reg().is(x2));
+ DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
Call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
Jump(code);
}
}
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.is(x1));
+ DCHECK(function.is(x1));
Register expected_reg = x2;
Register code_reg = x3;
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Contract with called JS functions requires that function is passed in x1.
// (See FullCodeGenerator::Generate().)
- ASSERT(function.Is(x1));
+ DCHECK(function.Is(x1));
Register code_reg = x3;
void MacroAssembler::TruncateDoubleToI(Register result,
DoubleRegister double_input) {
Label done;
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Try to convert the double to an int64. If successful, the bottom 32 bits
// contain our truncated int32 result.
TryConvertDoubleToInt64(result, double_input, &done);
// If we fell through then inline version didn't succeed - call stub instead.
- Push(lr);
- Push(double_input); // Put input on stack.
+ Push(lr, double_input);
DoubleToIStub stub(isolate(),
jssp,
void MacroAssembler::TruncateHeapNumberToI(Register result,
Register object) {
Label done;
- ASSERT(!result.is(object));
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(!result.is(object));
+ DCHECK(jssp.Is(StackPointer()));
Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
void MacroAssembler::StubPrologue() {
- ASSERT(StackPointer().Is(jssp));
+ DCHECK(StackPointer().Is(jssp));
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
__ Mov(temp, Smi::FromInt(StackFrame::STUB));
void MacroAssembler::EnterFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
UseScratchRegisterScope temps(this);
Register type_reg = temps.AcquireX();
Register code_reg = temps.AcquireX();
void MacroAssembler::LeaveFrame(StackFrame::Type type) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Drop the execution stack down to the frame pointer and restore
// the caller frame pointer and return address.
Mov(jssp, fp);
// Read the registers from the stack without popping them. The stack pointer
// will be reset as part of the unwinding process.
CPURegList saved_fp_regs = kCallerSavedFP;
- ASSERT(saved_fp_regs.Count() % 2 == 0);
+ DCHECK(saved_fp_regs.Count() % 2 == 0);
int offset = ExitFrameConstants::kLastExitFrameField;
while (!saved_fp_regs.IsEmpty()) {
void MacroAssembler::EnterExitFrame(bool save_doubles,
const Register& scratch,
int extra_space) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Set up the new stack frame.
Mov(scratch, Operand(CodeObject()));
// Align and synchronize the system stack pointer with jssp.
AlignAndSetCSPForFrame();
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
// fp[8]: CallerPC (lr)
// fp -> fp[0]: CallerFP (old fp)
void MacroAssembler::LeaveExitFrame(bool restore_doubles,
const Register& scratch,
bool restore_context) {
- ASSERT(csp.Is(StackPointer()));
+ DCHECK(csp.Is(StackPointer()));
if (restore_doubles) {
ExitFrameRestoreFPRegs();
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
Register scratch1, Register scratch2) {
- ASSERT(value != 0);
+ DCHECK(value != 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Mov(scratch2, ExternalReference(counter));
Ldr(scratch1, MemOperand(scratch2));
Mov(x0, 0);
Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(isolate(), 1);
- ASSERT(AllowThisStubCall(&ces));
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
int handler_index) {
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
// Adjust this code if the asserts don't hold.
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
// Push the frame pointer, context, state, and code object.
if (kind == StackHandler::JS_ENTRY) {
- ASSERT(Smi::FromInt(0) == 0);
+ DCHECK(Smi::FromInt(0) == 0);
Push(xzr, xzr, x11, x10);
} else {
Push(fp, cp, x11, x10);
Register scratch2,
Label* gc_required,
AllocationFlags flags) {
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
- ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+ DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
+ DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
// Make object size into bytes.
if ((flags & SIZE_IN_WORDS) != 0) {
object_size *= kPointerSize;
}
- ASSERT(0 == (object_size & kObjectAlignmentMask));
+ DCHECK(0 == (object_size & kObjectAlignmentMask));
// Check relative positions of allocation top and limit addresses.
// The values must be adjacent in memory to allow the use of LDP.
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
UseScratchRegisterScope temps(this);
Register scratch3 = temps.AcquireX();
- ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
- ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+ DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+ DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
scratch1.Is64Bits() && scratch2.Is64Bits());
// Check relative positions of allocation top and limit addresses.
AllocationUtils::GetAllocationLimitReference(isolate(), flags);
intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
- ASSERT((limit - top) == kPointerSize);
+ DCHECK((limit - top) == kPointerSize);
// Set up allocation top address and object size registers.
Register top_address = scratch1;
Register scratch2,
Register scratch3,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
Register scratch2,
Register scratch3,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(!AreAliased(result, length, scratch1, scratch2));
+ DCHECK(!AreAliased(result, length, scratch1, scratch2));
Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
Register scratch1,
Register scratch2,
CPURegister value,
- CPURegister heap_number_map) {
- ASSERT(!value.IsValid() || value.Is64Bits());
+ CPURegister heap_number_map,
+ MutableMode mode) {
+ DCHECK(!value.IsValid() || value.Is64Bits());
UseScratchRegisterScope temps(this);
// Allocate an object in the heap for the heap number and tag it as a heap
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
NO_ALLOCATION_FLAGS);
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
+
// Prepare the heap number map.
if (!heap_number_map.IsValid()) {
// If we have a valid value register, use the same type of register to store
} else {
heap_number_map = scratch1;
}
- LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+ LoadRoot(heap_number_map, map_index);
}
if (emit_debug_code()) {
Register map;
} else {
map = Register(heap_number_map);
}
- AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+ AssertRegisterIsRoot(map, map_index);
}
// Store the heap number map and the value in the allocated object.
Register scratch,
Label* miss,
BoundFunctionAction action) {
- ASSERT(!AreAliased(function, result, scratch));
+ DCHECK(!AreAliased(function, result, scratch));
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
+ Label non_instance;
+ if (action == kMissOnBoundFunction) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- // Check that the function really is a function. Load map into result reg.
- JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+ // Check that the function really is a function. Load map into result reg.
+ JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
- if (action == kMissOnBoundFunction) {
Register scratch_w = scratch.W();
Ldr(scratch,
FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
Ldr(scratch_w,
FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
- Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ // Make sure that the function has an instance prototype.
+ Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+ Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+ }
// Get the prototype or initial map from the function.
Ldr(result,
// Get the prototype from the initial map.
Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
- B(&done);
- // Non-instance prototype: fetch prototype from constructor field in initial
- // map.
- Bind(&non_instance);
- Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ if (action == kMissOnBoundFunction) {
+ B(&done);
+
+ // Non-instance prototype: fetch prototype from constructor field in initial
+ // map.
+ Bind(&non_instance);
+ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+ }
// All done.
Bind(&done);
Heap::RootListIndex index) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- ASSERT(!AreAliased(obj, temp));
+ DCHECK(!AreAliased(obj, temp));
LoadRoot(temp, index);
Cmp(obj, temp);
}
FPRegister fpscratch1,
Label* fail,
int elements_offset) {
- ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+ DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
Label store_num;
// Speculatively convert the smi to a double - all smis can be exactly
// that the constants for the maximum number of digits for an array index
// cached in the hash field and the number of bits reserved for it does not
// conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
DecodeField<String::ArrayIndexValueBits>(index, hash);
SmiTag(index, index);
SeqStringSetCharCheckIndexType index_type,
Register scratch,
uint32_t encoding_mask) {
- ASSERT(!AreAliased(string, index, scratch));
+ DCHECK(!AreAliased(string, index, scratch));
if (index_type == kIndexIsSmi) {
AssertSmi(index);
Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
Check(lt, kIndexIsTooLarge);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
Cmp(index, 0);
Check(ge, kIndexIsNegative);
}
Register scratch1,
Register scratch2,
Label* miss) {
- ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+ DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
Label same_contexts;
// Load current lexical context from the stack frame.
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register key, Register scratch) {
- ASSERT(!AreAliased(key, scratch));
+ DCHECK(!AreAliased(key, scratch));
// Xor original key with a seed.
LoadRoot(scratch, Heap::kHashSeedRootIndex);
Register scratch1,
Register scratch2,
Register scratch3) {
- ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+ DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
Label done;
And(scratch2, scratch2, scratch1);
// Scale the index by multiplying by the element size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
// Check if the key is identical to the name.
Register scratch1,
SaveFPRegsMode fp_mode,
RememberedSetFinalAction and_then) {
- ASSERT(!AreAliased(object, address, scratch1));
+ DCHECK(!AreAliased(object, address, scratch1));
Label done, store_buffer_overflow;
if (emit_debug_code()) {
Label ok;
Str(scratch1, MemOperand(scratch2));
// Call stub on end of buffer.
// Check for end of buffer.
- ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+ DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
(1 << (14 + kPointerSizeLog2)));
if (and_then == kFallThroughAtEnd) {
Tbz(scratch1, (14 + kPointerSizeLog2), &done);
} else {
- ASSERT(and_then == kReturnAtEnd);
+ DCHECK(and_then == kReturnAtEnd);
Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
Ret();
}
// Safepoints expect a block of kNumSafepointRegisters values on the stack, so
// adjust the stack for unsaved registers.
const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
- ASSERT(num_unsaved >= 0);
+ DCHECK(num_unsaved >= 0);
Claim(num_unsaved);
PushXRegList(kSafepointSavedRegisters);
}
int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
// Make sure the safepoint registers list is what we expect.
- ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+ DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
// Safepoint registers are stored contiguously on the stack, but not all the
// registers are saved. The following registers are excluded:
// Although the object register is tagged, the offset is relative to the start
// of the object, so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
Add(scratch, object, offset - kHeapObjectTag);
if (emit_debug_code()) {
LinkRegisterStatus lr_status,
SaveFPRegsMode fp_mode) {
ASM_LOCATION("MacroAssembler::RecordWrite");
- ASSERT(!AreAliased(object, map));
+ DCHECK(!AreAliased(object, map));
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- // TODO(mstarzinger): Dynamic counter missing.
-
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
Bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
+ dst);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
ASM_LOCATION("MacroAssembler::RecordWrite");
- ASSERT(!AreAliased(object, value));
+ DCHECK(!AreAliased(object, value));
if (emit_debug_code()) {
UseScratchRegisterScope temps(this);
Check(eq, kWrongAddressOrValuePassedToRecordWrite);
}
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- // TODO(mstarzinger): Dynamic counter missing.
-
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
JumpIfSmi(value, &done);
}
Bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
+ value);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
if (emit_debug_code()) {
// The bit sequence is backward. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label color_is_valid;
Tbnz(reg, 0, &color_is_valid);
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register shift_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
- ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+ DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
// addr_reg is divided into fields:
// |63 page base 20|19 high 8|7 shift 3|2 0|
// 'high' gives the index of the cell holding color bits for the object.
int first_bit,
int second_bit) {
// See mark-compact.h for color definitions.
- ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+ DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
GetMarkBits(object, bitmap_scratch, shift_scratch);
Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
// Check for the color.
if (first_bit == 0) {
// Checking for white.
- ASSERT(second_bit == 0);
+ DCHECK(second_bit == 0);
// We only need to test the first bit.
Tbz(bitmap_scratch, 0, has_color);
} else {
Register scratch0,
Register scratch1,
Label* on_black) {
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
}
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!AreAliased(object, scratch0, scratch1));
+ DCHECK(!AreAliased(object, scratch0, scratch1));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
Register result) {
- ASSERT(!result.Is(ldr_location));
+ DCHECK(!result.Is(ldr_location));
const uint32_t kLdrLitOffset_lsb = 5;
const uint32_t kLdrLitOffset_width = 19;
Ldr(result, MemOperand(ldr_location));
Register load_scratch,
Register length_scratch,
Label* value_is_white_and_not_data) {
- ASSERT(!AreAliased(
+ DCHECK(!AreAliased(
value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
// These bit sequences are backwards. The first character in the string
// represents the least significant bit.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
GetMarkBits(value, bitmap_scratch, shift_scratch);
Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = load_scratch;
// Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
Mov(length_scratch, ExternalString::kSize);
TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
String::kLengthOffset));
Tst(instance_type, kStringEncodingMask);
const CPURegister& arg3) {
// We cannot handle a caller-saved stack pointer. It doesn't make much sense
// in most cases anyway, so this restriction shouldn't be too serious.
- ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+ DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
// The provided arguments, and their proper procedure-call standard registers.
CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
// In C, floats are always cast to doubles for varargs calls.
pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
} else {
- ASSERT(args[i].IsNone());
+ DCHECK(args[i].IsNone());
arg_count = i;
break;
}
// Do a second pass to move values into their final positions and perform any
// conversions that may be required.
for (int i = 0; i < arg_count; i++) {
- ASSERT(pcs[i].type() == args[i].type());
+ DCHECK(pcs[i].type() == args[i].type());
if (pcs[i].IsRegister()) {
Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
} else {
- ASSERT(pcs[i].IsFPRegister());
+ DCHECK(pcs[i].IsFPRegister());
if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
} else {
if (args[i].IsRegister()) {
arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
} else {
- ASSERT(args[i].Is64Bits());
+ DCHECK(args[i].Is64Bits());
arg_pattern = kPrintfArgD;
}
- ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
+ DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
}
dc32(arg_pattern_list); // kPrintfArgPatternListOffset
CPURegister arg3) {
// We can only print sp if it is the current stack pointer.
if (!csp.Is(StackPointer())) {
- ASSERT(!csp.Aliases(arg0));
- ASSERT(!csp.Aliases(arg1));
- ASSERT(!csp.Aliases(arg2));
- ASSERT(!csp.Aliases(arg3));
+ DCHECK(!csp.Aliases(arg0));
+ DCHECK(!csp.Aliases(arg1));
+ DCHECK(!csp.Aliases(arg2));
+ DCHECK(!csp.Aliases(arg3));
}
// Printf is expected to preserve all registers, so make sure that none are
// the sequence and copying it in the same way.
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
EmitFrameSetupForCodeAgePatching(this);
}
void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
InstructionAccurateScope scope(this,
kNoCodeAgeSequenceLength / kInstructionSize);
- ASSERT(jssp.Is(StackPointer()));
+ DCHECK(jssp.Is(StackPointer()));
EmitCodeAgeSequence(this, stub);
}
bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
- ASSERT(is_young ||
+ DCHECK(is_young ||
isolate->code_aging_helper()->IsOld(sequence));
return is_young;
}
void MacroAssembler::TruncatingDiv(Register result,
Register dividend,
int32_t divisor) {
- ASSERT(!AreAliased(result, dividend));
- ASSERT(result.Is32Bits() && dividend.Is32Bits());
+ DCHECK(!AreAliased(result, dividend));
+ DCHECK(result.Is32Bits() && dividend.Is32Bits());
MultiplierAndShift ms(divisor);
Mov(result, ms.multiplier());
Smull(result.X(), dividend, result);
CPURegList* available) {
CHECK(!available->IsEmpty());
CPURegister result = available->PopLowestIndex();
- ASSERT(!AreAliased(result, xzr, csp));
+ DCHECK(!AreAliased(result, xzr, csp));
return result;
}
CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
const CPURegister& reg) {
- ASSERT(available->IncludesAliasOf(reg));
+ DCHECK(available->IncludesAliasOf(reg));
available->Remove(reg);
return reg;
}
const Label* smi_check) {
Assembler::BlockPoolsScope scope(masm);
if (reg.IsValid()) {
- ASSERT(smi_check->is_bound());
- ASSERT(reg.Is64Bits());
+ DCHECK(smi_check->is_bound());
+ DCHECK(reg.Is64Bits());
// Encode the register (x0-x30) in the lowest 5 bits, then the offset to
// 'check' in the other bits. The possible offset is limited in that we
uint32_t delta = __ InstructionsGeneratedSince(smi_check);
__ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
} else {
- ASSERT(!smi_check->is_bound());
+ DCHECK(!smi_check->is_bound());
// An offset of 0 indicates that there is no patch site.
__ InlineData(0);
InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
: reg_(NoReg), smi_check_(NULL) {
InstructionSequence* inline_data = InstructionSequence::At(info);
- ASSERT(inline_data->IsInlineData());
+ DCHECK(inline_data->IsInlineData());
if (inline_data->IsInlineData()) {
uint64_t payload = inline_data->InlineData();
// We use BitField to decode the payload, and BitField can only handle
// 32-bit values.
- ASSERT(is_uint32(payload));
+ DCHECK(is_uint32(payload));
if (payload != 0) {
int reg_code = RegisterBits::decode(payload);
reg_ = Register::XRegFromCode(reg_code);
uint64_t smi_check_delta = DeltaBits::decode(payload);
- ASSERT(smi_check_delta != 0);
+ DCHECK(smi_check_delta != 0);
smi_check_ = inline_data->preceding(smi_check_delta);
}
}