// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "v8.h"
+#include "src/v8.h"
#if V8_TARGET_ARCH_X64
-#include "bootstrapper.h"
-#include "codegen.h"
-#include "cpu-profiler.h"
-#include "assembler-x64.h"
-#include "macro-assembler-x64.h"
-#include "serialize.h"
-#include "debug.h"
-#include "heap.h"
-#include "isolate-inl.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/heap/heap.h"
+#include "src/isolate-inl.h"
+#include "src/serialize.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
namespace v8 {
namespace internal {
Operand MacroAssembler::ExternalOperand(ExternalReference target,
Register scratch) {
- if (root_array_available_ && !Serializer::enabled(isolate())) {
+ if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(target);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
return Operand(kRootRegister, static_cast<int32_t>(delta));
void MacroAssembler::Load(Register destination, ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled(isolate())) {
+ if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
void MacroAssembler::Store(ExternalReference destination, Register source) {
- if (root_array_available_ && !Serializer::enabled(isolate())) {
+ if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(destination);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
movp(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
void MacroAssembler::LoadAddress(Register destination,
ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled(isolate())) {
+ if (root_array_available_ && !serializer_enabled()) {
int64_t delta = RootRegisterDelta(source);
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) {
leap(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
int MacroAssembler::LoadAddressSize(ExternalReference source) {
- if (root_array_available_ && !Serializer::enabled(isolate())) {
+ if (root_array_available_ && !serializer_enabled()) {
// This calculation depends on the internals of LoadAddress.
// It's correctness is ensured by the asserts in the Call
// instruction below.
void MacroAssembler::PushAddress(ExternalReference source) {
int64_t address = reinterpret_cast<int64_t>(source.address());
- if (is_int32(address) && !Serializer::enabled(isolate())) {
+ if (is_int32(address) && !serializer_enabled()) {
if (emit_debug_code()) {
Move(kScratchRegister, kZapValue, Assembler::RelocInfoNone());
}
void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(destination, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::LoadRootIndexed(Register destination,
Register variable_offset,
int fixed_offset) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(destination,
Operand(kRootRegister,
variable_offset, times_pointer_size,
void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
source);
}
void MacroAssembler::PushRoot(Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(root_array_available_);
+ DCHECK(root_array_available_);
cmpp(with, Operand(kRootRegister,
(index << kPointerSizeLog2) - kRootRegisterBias));
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
- ASSERT(root_array_available_);
- ASSERT(!with.AddressUsesRegister(kScratchRegister));
+ DCHECK(root_array_available_);
+ DCHECK(!with.AddressUsesRegister(kScratchRegister));
LoadRoot(kScratchRegister, index);
cmpp(with, kScratchRegister);
}
ret(0);
bind(&buffer_overflowed);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
if (and_then == kReturnAtEnd) {
ret(0);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
bind(&done);
}
}
Condition cc,
Label* branch,
Label::Distance distance) {
- if (Serializer::enabled(isolate())) {
+ if (serializer_enabled()) {
// Can't do arithmetic on external references if it might get serialized.
// The mask isn't really an address. We load it as an external reference in
// case the size of the new space is different between the snapshot maker
cmpp(scratch, kScratchRegister);
j(cc, branch, distance);
} else {
- ASSERT(is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())));
+ DCHECK(kPointerSize == kInt64Size
+ ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
+ : kPointerSize == kInt32Size);
intptr_t new_space_start =
reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
Register dst,
SaveFPRegsMode save_fp,
RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
leap(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
bind(&ok);
}
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
}
-void MacroAssembler::RecordWriteArray(Register object,
- Register value,
- Register index,
- SaveFPRegsMode save_fp,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
+void MacroAssembler::RecordWriteArray(
+ Register object,
+ Register value,
+ Register index,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis.
Label done;
leap(dst, Operand(object, index, times_pointer_size,
FixedArray::kHeaderSize - kHeapObjectTag));
- RecordWrite(
- object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK);
+ RecordWrite(object, dst, value, save_fp, remembered_set_action,
+ OMIT_SMI_CHECK, pointers_to_here_check_for_value);
bind(&done);
}
-void MacroAssembler::RecordWrite(Register object,
- Register address,
- Register value,
- SaveFPRegsMode fp_mode,
- RememberedSetAction remembered_set_action,
- SmiCheck smi_check) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
+void MacroAssembler::RecordWriteForMap(Register object,
+ Register map,
+ Register dst,
+ SaveFPRegsMode fp_mode) {
+ DCHECK(!object.is(kScratchRegister));
+ DCHECK(!object.is(map));
+ DCHECK(!object.is(dst));
+ DCHECK(!map.is(dst));
AssertNotSmi(object);
- if (remembered_set_action == OMIT_REMEMBERED_SET &&
- !FLAG_incremental_marking) {
+ if (emit_debug_code()) {
+ Label ok;
+ if (map.is(kScratchRegister)) pushq(map);
+ CompareMap(map, isolate()->factory()->meta_map());
+ if (map.is(kScratchRegister)) popq(map);
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
+ if (!FLAG_incremental_marking) {
return;
}
if (emit_debug_code()) {
Label ok;
- cmpp(value, Operand(address, 0));
+ if (map.is(kScratchRegister)) pushq(map);
+ cmpp(map, FieldOperand(object, HeapObject::kMapOffset));
+ if (map.is(kScratchRegister)) popq(map);
j(equal, &ok, Label::kNear);
int3();
bind(&ok);
}
+ // Compute the address.
+ leap(dst, FieldOperand(object, HeapObject::kMapOffset));
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ // A single check of the map's pages interesting flag suffices, since it is
+ // only set during incremental collection, and then it's also guaranteed that
+ // the from object's page's interesting flag is also set. This optimization
+ // relies on the fact that maps can never be in new space.
+ CheckPageFlag(map,
+ map, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+
+ RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+ fp_mode);
+ CallStub(&stub);
+
+ bind(&done);
+
// Count number of write barriers in generated code.
isolate()->counters()->write_barriers_static()->Increment();
IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+ // Clobber clobbered registers when running with the debug-code flag
+ // turned on to provoke errors.
+ if (emit_debug_code()) {
+ Move(dst, kZapValue, Assembler::RelocInfoNone());
+ Move(map, kZapValue, Assembler::RelocInfoNone());
+ }
+}
+
+
+void MacroAssembler::RecordWrite(
+ Register object,
+ Register address,
+ Register value,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check,
+ PointersToHereCheck pointers_to_here_check_for_value) {
+ DCHECK(!object.is(value));
+ DCHECK(!object.is(address));
+ DCHECK(!value.is(address));
+ AssertNotSmi(object);
+
+ if (remembered_set_action == OMIT_REMEMBERED_SET &&
+ !FLAG_incremental_marking) {
+ return;
+ }
+
+ if (emit_debug_code()) {
+ Label ok;
+ cmpp(value, Operand(address, 0));
+ j(equal, &ok, Label::kNear);
+ int3();
+ bind(&ok);
+ }
+
// First, check if a write barrier is even needed. The tests below
// catch stores of smis and stores into the young generation.
Label done;
JumpIfSmi(value, &done);
}
- CheckPageFlag(value,
- value, // Used as scratch.
- MemoryChunk::kPointersToHereAreInterestingMask,
- zero,
- &done,
- Label::kNear);
+ if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+ CheckPageFlag(value,
+ value, // Used as scratch.
+ MemoryChunk::kPointersToHereAreInterestingMask,
+ zero,
+ &done,
+ Label::kNear);
+ }
CheckPageFlag(object,
value, // Used as scratch.
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
testp(rsp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected, Label::kNear);
}
#endif
- Push(rax);
Move(kScratchRegister, Smi::FromInt(static_cast<int>(reason)),
Assembler::RelocInfoNone());
Push(kScratchRegister);
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs
Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
+ DCHECK(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
}
-void MacroAssembler::IllegalOperation(int num_arguments) {
- if (num_arguments > 0) {
- addp(rsp, Immediate(num_arguments * kPointerSize));
- }
- LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-}
-
-
void MacroAssembler::IndexFromHash(Register hash, Register index) {
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
- // We want the smi-tagged index in key. Even if we subsequently go to
- // the slow case, converting the key to a smi is always valid.
- // key: string key
- // hash: key's hash field, including its array index value.
- andp(hash, Immediate(String::kArrayIndexValueMask));
- shrp(hash, Immediate(String::kHashShift));
- // Here we actually clobber the key which will be used if calling into
- // runtime later. However as the new key is the numeric value of a string key
- // there is no difference in using either key.
- Integer32ToSmi(index, hash);
+ if (!hash.is(index)) {
+ movl(index, hash);
+ }
+ DecodeFieldToSmi<String::ArrayIndexValueBits>(index);
}
// If the expected number of arguments of the runtime function is
// constant, we check that the actual number of arguments match the
// expectation.
- if (f->nargs >= 0 && f->nargs != num_arguments) {
- IllegalOperation(num_arguments);
- return;
- }
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
// TODO(1236192): Most runtime routines don't need the number of
// arguments passed in because it is constant. At some point we
static int Offset(ExternalReference ref0, ExternalReference ref1) {
int64_t offset = (ref0.address() - ref1.address());
// Check that fits into int.
- ASSERT(static_cast<int>(offset) == offset);
+ DCHECK(static_cast<int>(offset) == offset);
return static_cast<int>(offset);
}
ExternalReference scheduled_exception_address =
ExternalReference::scheduled_exception_address(isolate());
- ASSERT(rdx.is(function_address) || r8.is(function_address));
+ DCHECK(rdx.is(function_address) || r8.is(function_address));
// Allocate HandleScope in callee-save registers.
Register prev_next_address_reg = r14;
Register prev_limit_reg = rbx;
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
}
jmp(&exception_handled);
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(rdi));
+ DCHECK(!target.is(rdi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(rdi, id);
movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- subp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
+ subp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(Operand(rsp, i * kSIMD128Size), reg);
+ movsd(Operand(rsp, i * kDoubleSize), reg);
}
}
}
if (fp_mode == kSaveFPRegs) {
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(reg, Operand(rsp, i * kSIMD128Size));
+ movsd(reg, Operand(rsp, i * kDoubleSize));
}
- addp(rsp, Immediate(kSIMD128Size * XMMRegister::kMaxNumRegisters));
+ addp(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
}
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
Register reg = saved_regs[i];
void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsxbq(dst, src);
} else if (r.IsUInteger8()) {
void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
movb(dst, src);
} else if (r.IsInteger16() || r.IsUInteger16()) {
} else if (r.IsInteger32()) {
movl(dst, src);
} else {
+ if (r.IsHeapObject()) {
+ AssertNotSmi(src);
+ } else if (r.IsSmi()) {
+ AssertSmi(src);
+ }
movp(dst, src);
}
}
void MacroAssembler::SafeMove(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
if (SmiValuesAre32Bits()) {
// JIT cookie can be converted to Smi.
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
xorp(dst, kScratchRegister);
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
movp(dst, Immediate(value ^ jit_cookie()));
xorp(dst, Immediate(jit_cookie()));
Move(kScratchRegister, Smi::FromInt(jit_cookie()));
xorp(Operand(rsp, 0), kScratchRegister);
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
Push(Immediate(value ^ jit_cookie()));
xorp(Operand(rsp, 0), Immediate(jit_cookie()));
if (emit_debug_code()) {
Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
Assembler::RelocInfoNone());
- cmpq(dst, kSmiConstantRegister);
+ cmpp(dst, kSmiConstantRegister);
Assert(equal, kUninitializedKSmiConstantRegister);
}
int value = source->value();
}
if (SmiValuesAre32Bits()) {
- ASSERT(kSmiShift % kBitsPerByte == 0);
+ DCHECK(kSmiShift % kBitsPerByte == 0);
movl(Operand(dst, kSmiShift / kBitsPerByte), src);
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
Integer32ToSmi(kScratchRegister, src);
movp(dst, kScratchRegister);
}
if (SmiValuesAre32Bits()) {
shrp(dst, Immediate(kSmiShift));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
sarl(dst, Immediate(kSmiShift));
}
}
if (SmiValuesAre32Bits()) {
movl(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
movl(dst, src);
sarl(dst, Immediate(kSmiShift));
}
if (SmiValuesAre32Bits()) {
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
movp(dst, src);
SmiToInteger64(dst, dst);
}
void MacroAssembler::Cmp(Register dst, Smi* src) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
if (src->value() == 0) {
testp(dst, dst);
} else {
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
cmpl(dst, Immediate(src));
}
}
void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
// The Operand cannot use the smi register.
Register smi_reg = GetSmiConstant(src);
- ASSERT(!dst.AddressUsesRegister(smi_reg));
+ DCHECK(!dst.AddressUsesRegister(smi_reg));
cmpp(dst, smi_reg);
}
if (SmiValuesAre32Bits()) {
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
SmiToInteger32(kScratchRegister, dst);
cmpl(kScratchRegister, src);
}
void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
Register src,
int power) {
- ASSERT(power >= 0);
- ASSERT(power < 64);
+ DCHECK(power >= 0);
+ DCHECK(power < 64);
if (power == 0) {
SmiToInteger64(dst, src);
return;
void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
Register src,
int power) {
- ASSERT((0 <= power) && (power < 32));
+ DCHECK((0 <= power) && (power < 32));
if (dst.is(src)) {
shrp(dst, Immediate(power + kSmiShift));
} else {
Label* on_not_smis,
Label::Distance near_jump) {
if (dst.is(src1) || dst.is(src2)) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
movp(kScratchRegister, src1);
orp(kScratchRegister, src2);
JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
leal(kScratchRegister, Operand(first, second, times_1, 0));
testb(kScratchRegister, Immediate(0x03));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
movl(kScratchRegister, first);
orl(kScratchRegister, second);
testb(kScratchRegister, Immediate(kSmiTagMask));
Condition MacroAssembler::CheckIsMinSmi(Register src) {
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
// If we overflow by subtracting one, it's the minimal smi value.
cmpp(src, kSmiConstantRegister);
return overflow;
// A 32-bit integer value can always be converted to a smi.
return always;
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
cmpl(src, Immediate(0xc0000000));
return positive;
}
testl(src, src);
return positive;
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
testl(src, Immediate(0xc0000000));
return zero;
}
}
+void MacroAssembler::JumpIfValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
}
+void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
+ Label* on_valid,
+ Label::Distance near_jump) {
+ Condition is_valid = CheckUInteger32ValidSmiValue(src);
+ j(is_valid, on_valid, near_jump);
+}
+
+
void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
Label* on_invalid,
Label::Distance near_jump) {
}
return;
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
switch (constant->value()) {
case 1:
addp(dst, kSmiConstantRegister);
addl(Operand(dst, kSmiShift / kBitsPerByte),
Immediate(constant->value()));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
addp(dst, Immediate(constant));
}
}
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
addp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
subp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
CHECK(mode.IsEmpty());
}
} else {
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
LoadSmiConstant(dst, constant);
addp(dst, src);
j(overflow, bailout_label, near_jump);
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
subp(dst, constant_reg);
} else {
movp(dst, src);
}
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
LoadSmiConstant(kScratchRegister, constant);
subp(dst, kScratchRegister);
if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
j(no_overflow, bailout_label, near_jump);
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
addp(dst, kScratchRegister);
} else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
CHECK(mode.IsEmpty());
}
} else {
- ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
- ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+ DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+ DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
if (constant->value() == Smi::kMinValue) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
movp(dst, src);
LoadSmiConstant(kScratchRegister, constant);
subp(dst, kScratchRegister);
Label* on_smi_result,
Label::Distance near_jump) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
movp(kScratchRegister, src);
negp(dst); // Low 32 bits are retained as zero by negation.
// Test if result is zero or Smi::kMinValue.
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!src2.AddressUsesRegister(dst));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!dst.is(src2));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!dst.is(src2));
SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
const Operand& src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT_NOT_NULL(on_not_smi_result);
- ASSERT(!src2.AddressUsesRegister(dst));
+ DCHECK_NOT_NULL(on_not_smi_result);
+ DCHECK(!src2.AddressUsesRegister(dst));
SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
}
void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(src2));
SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(src2));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
if (dst.is(src1)) {
Label failure, zero_correct_result;
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
// Check for 0 divisor (result is +/-Infinity).
testp(src2, src2);
// We overshoot a little and go to slow case if we divide min-value
// by any negative value, not just -1.
Label safe_div;
- testl(rax, Immediate(0x7fffffff));
+ testl(rax, Immediate(~Smi::kMinValue));
j(not_zero, &safe_div, Label::kNear);
testp(src2, src2);
if (src1.is(rax)) {
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!src2.is(rax));
- ASSERT(!src2.is(rdx));
- ASSERT(!src1.is(rdx));
- ASSERT(!src1.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!src2.is(rax));
+ DCHECK(!src2.is(rdx));
+ DCHECK(!src1.is(rdx));
+ DCHECK(!src1.is(src2));
testp(src2, src2);
j(zero, on_not_smi_result, near_jump);
void MacroAssembler::SmiNot(Register dst, Register src) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
if (SmiValuesAre32Bits()) {
// Set tag and padding bits before negating, so that they are zero
// afterwards.
movl(kScratchRegister, Immediate(~0));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
movl(kScratchRegister, Immediate(1));
}
if (dst.is(src)) {
void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(src2));
if (!dst.is(src1)) {
movp(dst, src1);
}
if (constant->value() == 0) {
Set(dst, 0);
} else if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
andp(dst, constant_reg);
} else {
void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
movp(dst, src1);
}
orp(dst, src2);
void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
orp(dst, constant_reg);
} else {
void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
if (!dst.is(src1)) {
- ASSERT(!src1.is(src2));
+ DCHECK(!src1.is(src2));
movp(dst, src1);
}
xorp(dst, src2);
void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
if (dst.is(src)) {
- ASSERT(!dst.is(kScratchRegister));
+ DCHECK(!dst.is(kScratchRegister));
Register constant_reg = GetSmiConstant(constant);
xorp(dst, constant_reg);
} else {
void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
Register src,
int shift_value) {
- ASSERT(is_uint5(shift_value));
+ DCHECK(is_uint5(shift_value));
if (shift_value > 0) {
if (dst.is(src)) {
sarp(dst, Immediate(shift_value + kSmiShift));
void MacroAssembler::SmiShiftLeftConstant(Register dst,
Register src,
- int shift_value) {
- if (!dst.is(src)) {
- movp(dst, src);
- }
- if (shift_value > 0) {
- shlp(dst, Immediate(shift_value));
+ int shift_value,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ if (!dst.is(src)) {
+ movp(dst, src);
+ }
+ if (shift_value > 0) {
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ shlq(dst, Immediate(shift_value & 0x1f));
+ }
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ if (dst.is(src)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ SmiToInteger32(dst, src);
+ shll(dst, Immediate(shift_value));
+ JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
if (dst.is(src)) {
UNIMPLEMENTED(); // Not used.
} else {
- movp(dst, src);
if (shift_value == 0) {
- testp(dst, dst);
+ testp(src, src);
j(negative, on_not_smi_result, near_jump);
}
- shrq(dst, Immediate(shift_value + kSmiShift));
- shlq(dst, Immediate(kSmiShift));
+ if (SmiValuesAre32Bits()) {
+ movp(dst, src);
+ shrp(dst, Immediate(shift_value + kSmiShift));
+ shlp(dst, Immediate(kSmiShift));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ SmiToInteger32(dst, src);
+ shrp(dst, Immediate(shift_value));
+ JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
+ Integer32ToSmi(dst, dst);
+ }
}
}
void MacroAssembler::SmiShiftLeft(Register dst,
Register src1,
- Register src2) {
- ASSERT(!dst.is(rcx));
- // Untag shift amount.
- if (!dst.is(src1)) {
- movq(dst, src1);
+ Register src2,
+ Label* on_not_smi_result,
+ Label::Distance near_jump) {
+ if (SmiValuesAre32Bits()) {
+ DCHECK(!dst.is(rcx));
+ if (!dst.is(src1)) {
+ movp(dst, src1);
+ }
+ // Untag shift amount.
+ SmiToInteger32(rcx, src2);
+ // Shift amount specified by lower 5 bits, not six as the shl opcode.
+ andp(rcx, Immediate(0x1f));
+ shlq_cl(dst);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
+
+ if (src1.is(rcx) || src2.is(rcx)) {
+ movq(kScratchRegister, rcx);
+ }
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
+ } else {
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shll_cl(dst);
+ JumpIfValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
+ }
}
- SmiToInteger32(rcx, src2);
- // Shift amount specified by lower 5 bits, not six as the shl opcode.
- andq(rcx, Immediate(0x1f));
- shlq_cl(dst);
}
Register src2,
Label* on_not_smi_result,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- // dst and src1 can be the same, because the one case that bails out
- // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src2));
+ DCHECK(!dst.is(rcx));
if (src1.is(rcx) || src2.is(rcx)) {
movq(kScratchRegister, rcx);
}
- if (!dst.is(src1)) {
- movq(dst, src1);
- }
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- shrq_cl(dst); // Shift is rcx modulo 0x1f + 32.
- shlq(dst, Immediate(kSmiShift));
- testq(dst, dst);
- if (src1.is(rcx) || src2.is(rcx)) {
- Label positive_result;
- j(positive, &positive_result, Label::kNear);
- if (src1.is(rcx)) {
- movq(src1, kScratchRegister);
- } else {
- movq(src2, kScratchRegister);
- }
- jmp(on_not_smi_result, near_jump);
- bind(&positive_result);
+ if (dst.is(src1)) {
+ UNIMPLEMENTED(); // Not used.
} else {
- // src2 was zero and src1 negative.
- j(negative, on_not_smi_result, near_jump);
+ Label valid_result;
+ SmiToInteger32(dst, src1);
+ SmiToInteger32(rcx, src2);
+ shrl_cl(dst);
+ JumpIfUIntValidSmiValue(dst, &valid_result, Label::kNear);
+ // As src1 or src2 could not be dst, we do not need to restore them for
+ // clobbering dst.
+ if (src1.is(rcx) || src2.is(rcx)) {
+ if (src1.is(rcx)) {
+ movq(src1, kScratchRegister);
+ } else {
+ movq(src2, kScratchRegister);
+ }
+ }
+ jmp(on_not_smi_result, near_jump);
+ bind(&valid_result);
+ Integer32ToSmi(dst, dst);
}
}
void MacroAssembler::SmiShiftArithmeticRight(Register dst,
Register src1,
Register src2) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(rcx));
- if (src1.is(rcx)) {
- movp(kScratchRegister, src1);
- } else if (src2.is(rcx)) {
- movp(kScratchRegister, src2);
- }
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(rcx));
+
+ SmiToInteger32(rcx, src2);
if (!dst.is(src1)) {
movp(dst, src1);
}
- SmiToInteger32(rcx, src2);
- orl(rcx, Immediate(kSmiShift));
- sarp_cl(dst); // Shift 32 + original rcx & 0x1f.
- shlp(dst, Immediate(kSmiShift));
- if (src1.is(rcx)) {
- movp(src1, kScratchRegister);
- } else if (src2.is(rcx)) {
- movp(src2, kScratchRegister);
- }
+ SmiToInteger32(dst, dst);
+ sarl_cl(dst);
+ Integer32ToSmi(dst, dst);
}
Register src2,
Label* on_not_smis,
Label::Distance near_jump) {
- ASSERT(!dst.is(kScratchRegister));
- ASSERT(!src1.is(kScratchRegister));
- ASSERT(!src2.is(kScratchRegister));
- ASSERT(!dst.is(src1));
- ASSERT(!dst.is(src2));
+ DCHECK(!dst.is(kScratchRegister));
+ DCHECK(!src1.is(kScratchRegister));
+ DCHECK(!src2.is(kScratchRegister));
+ DCHECK(!dst.is(src1));
+ DCHECK(!dst.is(src2));
// Both operands must not be smis.
#ifdef DEBUG
Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
#endif
STATIC_ASSERT(kSmiTag == 0);
- ASSERT_EQ(0, Smi::FromInt(0));
+ DCHECK_EQ(0, Smi::FromInt(0));
movl(kScratchRegister, Immediate(kSmiTagMask));
andp(kScratchRegister, src1);
testl(kScratchRegister, src2);
j(not_zero, on_not_smis, near_jump);
// Exactly one operand is a smi.
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+ DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
// kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
subp(kScratchRegister, Immediate(1));
// If src1 is a smi, then scratch register all 1s, else it is all 0s.
Register src,
int shift) {
if (SmiValuesAre32Bits()) {
- ASSERT(is_uint6(shift));
+ DCHECK(is_uint6(shift));
// There is a possible optimization if shift is in the range 60-63, but that
// will (and must) never happen.
if (!dst.is(src)) {
}
return SmiIndex(dst, times_1);
} else {
- ASSERT(SmiValuesAre31Bits());
- ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
if (!dst.is(src)) {
movp(dst, src);
}
int shift) {
if (SmiValuesAre32Bits()) {
// Register src holds a positive smi.
- ASSERT(is_uint6(shift));
+ DCHECK(is_uint6(shift));
if (!dst.is(src)) {
movp(dst, src);
}
}
return SmiIndex(dst, times_1);
} else {
- ASSERT(SmiValuesAre31Bits());
- ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
if (!dst.is(src)) {
movp(dst, src);
}
void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
if (SmiValuesAre32Bits()) {
- ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+ DCHECK_EQ(0, kSmiShift % kBitsPerByte);
addl(dst, Operand(src, kSmiShift / kBitsPerByte));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
SmiToInteger32(kScratchRegister, src);
addl(dst, kScratchRegister);
}
void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
- ASSERT(!src.is(scratch));
+ DCHECK(!src.is(scratch));
movp(scratch, src);
// High bits.
shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
- ASSERT(!dst.is(scratch));
+ DCHECK(!dst.is(scratch));
Pop(scratch);
// Low bits.
shrp(scratch, Immediate(kSmiShift));
if (SmiValuesAre32Bits()) {
testl(Operand(src, kIntSize), Immediate(source->value()));
} else {
- ASSERT(SmiValuesAre31Bits());
+ DCHECK(SmiValuesAre31Bits());
testl(src, Immediate(source));
}
}
}
-void MacroAssembler::absps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_absolute_constant =
- { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_absolute_constant));
- andps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::abspd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint64_t a;
- uint64_t b;
- } double_absolute_constant =
- { V8_UINT64_C(0x7FFFFFFFFFFFFFFF), V8_UINT64_C(0x7FFFFFFFFFFFFFFF) };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
- andpd(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::negateps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_negate_constant =
- { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_negate_constant));
- xorps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::negatepd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint64_t a;
- uint64_t b;
- } double_absolute_constant =
- { V8_UINT64_C(0x8000000000000000), V8_UINT64_C(0x8000000000000000) };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&double_absolute_constant));
- xorpd(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::notps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_not_constant =
- { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&float_not_constant));
- xorps(dst, Operand(kScratchRegister, 0));
-}
-
-
-void MacroAssembler::pnegd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
- notps(dst);
- Set(kScratchRegister, reinterpret_cast<intptr_t>(&int32_one_constant));
- paddd(dst, Operand(kScratchRegister, 0));
-}
-
-
-
void MacroAssembler::JumpIfNotString(Register object,
Register object_map,
Label* not_string,
movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
// Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag =
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
movp(scratch2, second_object_instance_type);
// Check that both are flat ASCII strings.
- ASSERT(kNotStringTag != 0);
+ DCHECK(kNotStringTag != 0);
const int kFlatAsciiStringMask =
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag =
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
void MacroAssembler::MoveHeapObject(Register result,
Handle<Object> object) {
AllowDeferredHandleDereference using_raw_address;
- ASSERT(object->IsHeapObject());
+ DCHECK(object->IsHeapObject());
if (isolate()->heap()->InNewSpace(*object)) {
Handle<Cell> cell = isolate()->factory()->NewCell(object);
Move(result, cell, RelocInfo::CELL);
void MacroAssembler::DropUnderReturnAddress(int stack_elements,
Register scratch) {
- ASSERT(stack_elements > 0);
+ DCHECK(stack_elements > 0);
if (kPointerSize == kInt64Size && stack_elements == 1) {
popq(MemOperand(rsp, 0));
return;
pushq(src);
} else {
// x32 uses 64-bit push for rbp in the prologue.
- ASSERT(src.code() != rbp.code());
+ DCHECK(src.code() != rbp.code());
leal(rsp, Operand(rsp, -4));
movp(Operand(rsp, 0), src);
}
popq(dst);
} else {
// x32 uses 64-bit pop for rbp in the epilogue.
- ASSERT(dst.code() != rbp.code());
+ DCHECK(dst.code() != rbp.code());
movp(dst, Operand(rsp, 0));
leal(rsp, Operand(rsp, 4));
}
void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
Register base,
int offset) {
- ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
offset <= SharedFunctionInfo::kSize &&
(((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
if (kPointerSize == kInt64Size) {
void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
int offset,
int bits) {
- ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+ DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
offset <= SharedFunctionInfo::kSize &&
(((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
if (kPointerSize == kInt32Size) {
#ifdef DEBUG
int end_position = pc_offset() + CallSize(code_object);
#endif
- ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+ DCHECK(RelocInfo::IsCodeTarget(rmode) ||
rmode == RelocInfo::CODE_AGE_SEQUENCE);
call(code_object, rmode, ast_id);
#ifdef DEBUG
void MacroAssembler::LoadUint32(XMMRegister dst,
- Register src,
- XMMRegister scratch) {
+ Register src) {
if (FLAG_debug_code) {
cmpq(src, Immediate(0xffffffff));
Assert(below_equal, kInputGPRIsExpectedToHaveUpper32Cleared);
Label* lost_precision,
Label::Distance dst) {
Label done;
- ASSERT(!temp.is(xmm0));
+ DCHECK(!temp.is(xmm0));
// Heap number map check.
CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
}
-void MacroAssembler::Throw(BailoutReason reason) {
-#ifdef DEBUG
- const char* msg = GetBailoutReason(reason);
- if (msg != NULL) {
- RecordComment("Throw message: ");
- RecordComment(msg);
- }
-#endif
-
- Push(rax);
- Push(Smi::FromInt(reason));
- if (!has_frame_) {
- // We don't actually want to generate a pile of code for this, so just
- // claim there is a stack frame, without generating one.
- FrameScope scope(this, StackFrame::NONE);
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- } else {
- CallRuntime(Runtime::kHiddenThrowMessage, 1);
- }
- // Control will not return here.
- int3();
-}
-
-
-void MacroAssembler::ThrowIf(Condition cc, BailoutReason reason) {
- Label L;
- j(NegateCondition(cc), &L);
- Throw(reason);
- // will not return here
- bind(&L);
-}
-
-
void MacroAssembler::LoadInstanceDescriptors(Register map,
Register descriptors) {
movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- movp(dst, FieldOperand(map, Map::kBitField3Offset));
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- movp(dst, FieldOperand(map, Map::kBitField3Offset));
- Move(kScratchRegister, Smi::FromInt(Map::EnumLengthBits::kMask));
- andp(dst, kScratchRegister);
+ movl(dst, FieldOperand(map, Map::kBitField3Offset));
+ andl(dst, Immediate(Map::EnumLengthBits::kMask));
+ Integer32ToSmi(dst, dst);
}
void MacroAssembler::AssertZeroExtended(Register int32_register) {
if (emit_debug_code()) {
- ASSERT(!int32_register.is(kScratchRegister));
+ DCHECK(!int32_register.is(kScratchRegister));
movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
cmpq(kScratchRegister, int32_register);
Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
Heap::RootListIndex root_value_index,
BailoutReason reason) {
if (emit_debug_code()) {
- ASSERT(!src.is(kScratchRegister));
+ DCHECK(!src.is(kScratchRegister));
LoadRoot(kScratchRegister, root_value_index);
cmpp(src, kScratchRegister);
Check(equal, reason);
Register result,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- testl(function, Immediate(kSmiTagMask));
- j(zero, miss);
+ Label non_instance;
+ if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ testl(function, Immediate(kSmiTagMask));
+ j(zero, miss);
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
- if (miss_on_bound_function) {
movp(kScratchRegister,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
// It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
SharedFunctionInfo::kCompilerHintsOffset,
SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- testb(FieldOperand(result, Map::kBitFieldOffset),
- Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance, Label::kNear);
+ // Make sure that the function has an instance prototype.
+ testb(FieldOperand(result, Map::kBitFieldOffset),
+ Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance, Label::kNear);
+ }
// Get the prototype or initial map from the function.
movp(result,
// Get the prototype from the initial map.
movp(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done, Label::kNear);
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- movp(result, FieldOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done, Label::kNear);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ movp(result, FieldOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand counter_operand = ExternalOperand(ExternalReference(counter));
if (value == 1) {
Set(rax, 0); // No arguments.
LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
CEntryStub ces(isolate(), 1);
- ASSERT(AllowThisStubCall(&ces));
+ DCHECK(AllowThisStubCall(&ces));
Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
}
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function.is(rdi));
+ DCHECK(function.is(rdi));
movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
LoadSharedFunctionInfoSpecialField(rbx, rdx,
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(function.is(rdi));
+ DCHECK(function.is(rdi));
movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
// Advances rdx to the end of the Code object header, to the start of
// the executable code.
*definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
// IC mechanism.
cmpp(expected.reg(), Immediate(actual.immediate()));
j(equal, &invoke, Label::kNear);
- ASSERT(expected.reg().is(rbx));
+ DCHECK(expected.reg().is(rbx));
Set(rax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmpp(expected.reg(), actual.reg());
j(equal, &invoke, Label::kNear);
- ASSERT(actual.reg().is(rax));
- ASSERT(expected.reg().is(rbx));
+ DCHECK(actual.reg().is(rax));
+ DCHECK(expected.reg().is(rbx));
}
}
}
-void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
- if (frame_mode == BUILD_STUB_FRAME) {
+void MacroAssembler::StubPrologue() {
pushq(rbp); // Caller's frame pointer.
movp(rbp, rsp);
Push(rsi); // Callee's context.
Push(Smi::FromInt(StackFrame::STUB));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+ PredictableCodeSizeScope predictible_code_size_scope(this,
+ kNoCodeAgeSequenceLength);
+ if (code_pre_aging) {
+ // Pre-age the code.
+ Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
+ RelocInfo::CODE_AGE_SEQUENCE);
+ Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
} else {
- PredictableCodeSizeScope predictible_code_size_scope(this,
- kNoCodeAgeSequenceLength);
- if (isolate()->IsCodePreAgingActive()) {
- // Pre-age the code.
- Call(isolate()->builtins()->MarkCodeAsExecutedOnce(),
- RelocInfo::CODE_AGE_SEQUENCE);
- Nop(kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
- } else {
- pushq(rbp); // Caller's frame pointer.
- movp(rbp, rsp);
- Push(rsi); // Callee's context.
- Push(rdi); // Callee's JS function.
- }
+ pushq(rbp); // Caller's frame pointer.
+ movp(rbp, rsp);
+ Push(rsi); // Callee's context.
+ Push(rdi); // Callee's JS function.
}
}
void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
// Set up the frame structure on the stack.
// All constants are relative to the frame pointer of the exit frame.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+ DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
kFPOnStackSize + kPCOnStackSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+ DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
pushq(rbp);
movp(rbp, rsp);
// Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
Push(Immediate(0)); // Saved entry sp, patched before call.
Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
Push(kScratchRegister); // Accessed from EditFrame::code_slot.
#endif
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumAllocatableRegisters * kSIMD128Size +
+ int space = XMMRegister::kMaxNumAllocatableRegisters * kDoubleSize +
arg_stack_space * kRegisterSize;
subp(rsp, Immediate(space));
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movups(Operand(rbp, offset - ((i + 1) * kSIMD128Size)), reg);
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else if (arg_stack_space > 0) {
subp(rsp, Immediate(arg_stack_space * kRegisterSize));
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
- ASSERT(is_int8(kFrameAlignment));
+ DCHECK(IsPowerOf2(kFrameAlignment));
+ DCHECK(is_int8(kFrameAlignment));
andp(rsp, Immediate(-kFrameAlignment));
}
int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) {
XMMRegister reg = XMMRegister::FromAllocationIndex(i);
- movups(reg, Operand(rbp, offset - ((i + 1) * kSIMD128Size)));
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
}
}
// Get the return address from the stack and restore the frame pointer.
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
- ASSERT(!scratch.is(kScratchRegister));
+ DCHECK(!holder_reg.is(scratch));
+ DCHECK(!scratch.is(kScratchRegister));
// Load current lexical context from the stack frame.
movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
// First of all we assign the hash seed to scratch.
andp(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
// Check that the value is a normal propety.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
+ DCHECK_EQ(NORMAL, 0);
Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Smi::FromInt(PropertyDetails::TypeField::kMask));
j(not_zero, miss);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
- ASSERT(!scratch.is_valid());
+ DCHECK(!scratch.is_valid());
#ifdef DEBUG
// Assert that result actually contains top on entry.
Operand top_operand = ExternalOperand(allocation_top);
}
+void MacroAssembler::MakeSureDoubleAlignedHelper(Register result,
+ Register scratch,
+ Label* gc_required,
+ AllocationFlags flags) {
+ if (kPointerSize == kDoubleSize) {
+ if (FLAG_debug_code) {
+ testl(result, Immediate(kDoubleAlignmentMask));
+ Check(zero, kAllocationIsNotDoubleAligned);
+ }
+ } else {
+ // Align the next allocation. Storing the filler map without checking top
+ // is safe in new-space because the limit of the heap is aligned there.
+ DCHECK(kPointerSize * 2 == kDoubleSize);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+ // Make sure scratch is not clobbered by this function as it might be
+ // used in UpdateAllocationTopHelper later.
+ DCHECK(!scratch.is(kScratchRegister));
+ Label aligned;
+ testl(result, Immediate(kDoubleAlignmentMask));
+ j(zero, &aligned, Label::kNear);
+ if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ ExternalReference allocation_limit =
+ AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+ cmpp(result, ExternalOperand(allocation_limit));
+ j(above_equal, gc_required);
+ }
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex);
+ movp(Operand(result, 0), kScratchRegister);
+ addp(result, Immediate(kDoubleSize / 2));
+ bind(&aligned);
+ }
+}
+
+
void MacroAssembler::UpdateAllocationTopHelper(Register result_end,
Register scratch,
AllocationFlags flags) {
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
}
} else if (tag_result) {
// Tag the result if requested.
- ASSERT(kHeapObjectTag == 1);
+ DCHECK(kHeapObjectTag == 1);
incp(result);
}
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
leap(result_end, Operand(element_count, element_size, header_size));
Allocate(result_end, result, result_end, scratch, gc_required, flags);
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
- // Align the next allocation. Storing the filler map without checking top is
- // safe in new-space because the limit of the heap is aligned there.
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
- testq(result, Immediate(kDoubleAlignmentMask));
- Check(zero, kAllocationIsNotDoubleAligned);
+ if ((flags & DOUBLE_ALIGNMENT) != 0) {
+ MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags);
}
// Calculate new top and bail out if new space is exhausted.
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch,
- Label* gc_required) {
+ Label* gc_required,
+ MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
- // Set the map.
- LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
- movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
-}
-
-
-void MacroAssembler::AllocateSIMDHeapObject(int size,
- Register result,
- Register scratch,
- Label* gc_required,
- Heap::RootListIndex map_index) {
- Allocate(size, result, scratch, no_reg, gc_required, TAG_OBJECT);
+ Heap::RootListIndex map_index = mode == MUTABLE
+ ? Heap::kMutableHeapNumberMapRootIndex
+ : Heap::kHeapNumberMapRootIndex;
// Set the map.
LoadRoot(kScratchRegister, map_index);
// observing object alignment.
const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
kObjectAlignmentMask;
- ASSERT(kShortSize == 2);
+ DCHECK(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
kHeaderAlignment));
const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
- ASSERT(kCharSize == 1);
+ DCHECK(kCharSize == 1);
addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
andp(scratch1, Immediate(~kObjectAlignmentMask));
if (kHeaderAlignment > 0) {
Register scratch1,
Register scratch2,
Label* gc_required) {
- Label allocate_new_space, install_map;
- AllocationFlags flags = TAG_OBJECT;
-
- ExternalReference high_promotion_mode = ExternalReference::
- new_space_high_promotion_mode_active_address(isolate());
-
- Load(scratch1, high_promotion_mode);
- testb(scratch1, Immediate(1));
- j(zero, &allocate_new_space);
- Allocate(ConsString::kSize,
- result,
- scratch1,
- scratch2,
- gc_required,
- static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE));
-
- jmp(&install_map);
-
- bind(&allocate_new_space);
Allocate(ConsString::kSize,
result,
scratch1,
scratch2,
gc_required,
- flags);
-
- bind(&install_map);
+ TAG_OBJECT);
// Set the map. The other fields are left uninitialized.
LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
Register length,
int min_length,
Register scratch) {
- ASSERT(min_length >= 0);
+ DCHECK(min_length >= 0);
if (emit_debug_code()) {
cmpl(length, Immediate(min_length));
Assert(greater_equal, kInvalidMinLength);
j(below, &short_string, Label::kNear);
}
- ASSERT(source.is(rsi));
- ASSERT(destination.is(rdi));
- ASSERT(length.is(rcx));
+ DCHECK(source.is(rsi));
+ DCHECK(destination.is(rdi));
+ DCHECK(length.is(rcx));
if (min_length <= kLongStringLimit) {
cmpl(length, Immediate(2 * kPointerSize));
// arguments.
// On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
// and the caller does not reserve stack slots for them.
- ASSERT(num_arguments >= 0);
+ DCHECK(num_arguments >= 0);
#ifdef _WIN64
const int kMinimumStackSlots = kRegisterPassedArguments;
if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
- int frame_alignment = OS::ActivationFrameAlignment();
- ASSERT(frame_alignment != 0);
- ASSERT(num_arguments >= 0);
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ DCHECK(frame_alignment != 0);
+ DCHECK(num_arguments >= 0);
// Make stack end at alignment and allocate space for arguments and old rsp.
movp(kScratchRegister, rsp);
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
void MacroAssembler::CallCFunction(Register function, int num_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
call(function);
- ASSERT(OS::ActivationFrameAlignment() != 0);
- ASSERT(num_arguments >= 0);
+ DCHECK(base::OS::ActivationFrameAlignment() != 0);
+ DCHECK(num_arguments >= 0);
int argument_slots_on_stack =
ArgumentStackSlotsForCFunctionCall(num_arguments);
movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
CodePatcher::CodePatcher(byte* address, int size)
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
+ DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
andp(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
Label* if_deprecated) {
if (map->CanBeDeprecated()) {
Move(scratch, map);
- movp(scratch, FieldOperand(scratch, Map::kBitField3Offset));
- SmiToInteger32(scratch, scratch);
- andp(scratch, Immediate(Map::Deprecated::kMask));
+ movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
+ andl(scratch, Immediate(Map::Deprecated::kMask));
j(not_zero, if_deprecated);
}
}
Register mask_scratch,
Label* on_black,
Label::Distance on_black_distance) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(object, bitmap_scratch, mask_scratch);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
// The mask_scratch register contains a 1 at the position of the first bit
// and a 0 at all other positions, including the position of the second bit.
movp(rcx, mask_scratch);
movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
j(equal, &is_data_object, Label::kNear);
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+ DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
movp(bitmap_reg, addr_reg);
// Sign extended 32 bit immediate.
andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
Register mask_scratch,
Label* value_is_white_and_not_data,
Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
bind(¬_heap_number);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = rcx;
Label not_external;
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
testb(instance_type, Immediate(kExternalStringTag));
j(zero, ¬_external, Label::kNear);
movp(length, Immediate(ExternalString::kSize));
bind(¬_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
+ DCHECK(kOneByteStringTag == 0x04);
andp(length, Immediate(kStringEncodingMask));
xorp(length, Immediate(kStringEncodingMask));
addp(length, Immediate(0x04));
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
- ASSERT(!scratch1.is(scratch0));
+ DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+ DCHECK(!scratch1.is(scratch0));
Register current = scratch0;
Label loop_again;
bind(&loop_again);
movp(current, FieldOperand(current, HeapObject::kMapOffset));
movp(scratch1, FieldOperand(current, Map::kBitField2Offset));
- andp(scratch1, Immediate(Map::kElementsKindMask));
- shrp(scratch1, Immediate(Map::kElementsKindShift));
+ DecodeField<Map::ElementsKindBits>(scratch1);
cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS));
j(equal, found);
movp(current, FieldOperand(current, Map::kPrototypeOffset));
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
- ASSERT(!dividend.is(rax));
- ASSERT(!dividend.is(rdx));
+ DCHECK(!dividend.is(rax));
+ DCHECK(!dividend.is(rdx));
MultiplierAndShift ms(divisor);
movl(rax, Immediate(ms.multiplier()));
imull(dividend);