void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8()) {
movsx_b(dst, src);
} else if (r.IsUInteger8()) {
void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
- ASSERT(!r.IsDouble());
+ DCHECK(!r.IsDouble());
if (r.IsInteger8() || r.IsUInteger8()) {
mov_b(dst, src);
} else if (r.IsInteger16() || r.IsUInteger16()) {
void MacroAssembler::StoreRoot(Register source,
Register scratch,
Heap::RootListIndex index) {
- ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+ DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
ExternalReference roots_array_start =
ExternalReference::roots_array_start(isolate());
mov(scratch, Immediate(index));
void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
- ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
void MacroAssembler::CompareRoot(const Operand& with,
Heap::RootListIndex index) {
- ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+ DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
cmp(with, value);
}
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == equal || cc == not_equal);
+ DCHECK(cc == equal || cc == not_equal);
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
and_(scratch, object);
}
// Check that we can use a test_b.
- ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
- ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+ DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
+ DCHECK(MemoryChunk::IN_TO_SPACE < 8);
int mask = (1 << MemoryChunk::IN_FROM_SPACE)
| (1 << MemoryChunk::IN_TO_SPACE);
// If non-zero, the page belongs to new-space.
ret(0);
bind(&buffer_overflowed);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
j(equal, &done, Label::kNear);
}
StoreBufferOverflowStub store_buffer_overflow =
if (and_then == kReturnAtEnd) {
ret(0);
} else {
- ASSERT(and_then == kFallThroughAtEnd);
+ DCHECK(and_then == kFallThroughAtEnd);
bind(&done);
}
}
MinusZeroMode minus_zero_mode,
Label* conversion_failed,
Label::Distance dst) {
- ASSERT(!input_reg.is(scratch));
+ DCHECK(!input_reg.is(scratch));
cvttsd2si(result_reg, Operand(input_reg));
Cvtsi2sd(scratch, Operand(result_reg));
ucomisd(scratch, input_reg);
MinusZeroMode minus_zero_mode,
Label* lost_precision) {
Label done;
- ASSERT(!temp.is(xmm0));
+ DCHECK(!temp.is(xmm0));
cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
isolate()->factory()->heap_number_map());
j(not_equal, lost_precision, Label::kNear);
- ASSERT(!temp.is(no_xmm_reg));
+ DCHECK(!temp.is(no_xmm_reg));
movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
cvttsd2si(result_reg, Operand(xmm0));
// Skip barrier if writing a smi.
if (smi_check == INLINE_SMI_CHECK) {
- ASSERT_EQ(0, kSmiTag);
+ DCHECK_EQ(0, kSmiTag);
test(value, Immediate(kSmiTagMask));
j(zero, &done);
}
// Although the object register is tagged, the offset is relative to the start
// of the object, so so offset must be a multiple of kPointerSize.
- ASSERT(IsAligned(offset, kPointerSize));
+ DCHECK(IsAligned(offset, kPointerSize));
lea(dst, FieldOperand(object, offset));
if (emit_debug_code()) {
bind(&ok);
}
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
+ DCHECK(!object.is(value));
+ DCHECK(!object.is(address));
+ DCHECK(!value.is(address));
AssertNotSmi(object);
if (!FLAG_incremental_marking) {
// Compute the address.
lea(address, FieldOperand(object, HeapObject::kMapOffset));
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
// A single check of the map's pages interesting flag suffices, since it is
// only set during incremental collection, and then it's also guaranteed that
// the from object's page's interesting flag is also set. This optimization
// relies on the fact that maps can never be in new space.
- ASSERT(!isolate()->heap()->InNewSpace(*map));
+ DCHECK(!isolate()->heap()->InNewSpace(*map));
CheckPageFlagForMap(map,
MemoryChunk::kPointersToHereAreInterestingMask,
zero,
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// Clobber clobbered input registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
RememberedSetAction remembered_set_action,
SmiCheck smi_check,
PointersToHereCheck pointers_to_here_check_for_value) {
- ASSERT(!object.is(value));
- ASSERT(!object.is(address));
- ASSERT(!value.is(address));
+ DCHECK(!object.is(value));
+ DCHECK(!object.is(address));
+ DCHECK(!value.is(address));
AssertNotSmi(object);
if (remembered_set_action == OMIT_REMEMBERED_SET &&
bind(&ok);
}
- // Count number of write barriers in generated code.
- isolate()->counters()->write_barriers_static()->Increment();
- IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
// First, check if a write barrier is even needed. The tests below
// catch stores of Smis and stores into young gen.
Label done;
bind(&done);
+ // Count number of write barriers in generated code.
+ isolate()->counters()->write_barriers_static()->Increment();
+ IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
// Clobber clobbered registers when running with the debug-code flag
// turned on to provoke errors.
if (emit_debug_code()) {
void MacroAssembler::EnterExitFramePrologue() {
// Set up the frame structure on the stack.
- ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
- ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+ DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+ DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+ DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
push(ebp);
mov(ebp, esp);
// Reserve room for entry stack pointer and push the code object.
- ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+ DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
push(Immediate(0)); // Saved entry sp, patched before call.
push(Immediate(CodeObject())); // Accessed from ExitFrame::code_slot.
void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
// Optionally save all XMM registers.
if (save_doubles) {
- int space = XMMRegister::kMaxNumRegisters * kSIMD128Size +
+ int space = XMMRegister::kMaxNumRegisters * kDoubleSize +
argc * kPointerSize;
sub(esp, Immediate(space));
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(Operand(ebp, offset - ((i + 1) * kSIMD128Size)), reg);
+ movsd(Operand(ebp, offset - ((i + 1) * kDoubleSize)), reg);
}
} else {
sub(esp, Immediate(argc * kPointerSize));
}
// Get the required frame alignment for the OS.
- const int kFrameAlignment = OS::ActivationFrameAlignment();
+ const int kFrameAlignment = base::OS::ActivationFrameAlignment();
if (kFrameAlignment > 0) {
- ASSERT(IsPowerOf2(kFrameAlignment));
+ DCHECK(IsPowerOf2(kFrameAlignment));
and_(esp, -kFrameAlignment);
}
const int offset = -2 * kPointerSize;
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
- movups(reg, Operand(ebp, offset - ((i + 1) * kSIMD128Size)));
+ movsd(reg, Operand(ebp, offset - ((i + 1) * kDoubleSize)));
}
}
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch1));
- ASSERT(!holder_reg.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
+ DCHECK(!holder_reg.is(scratch1));
+ DCHECK(!holder_reg.is(scratch2));
+ DCHECK(!scratch1.is(scratch2));
// Load current lexical context from the stack frame.
mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
// Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
// code-stub-hydrogen.cc
//
// Note: r0 will contain hash code
and_(r2, r1);
// Scale the index by multiplying by the entry size.
- ASSERT(SeededNumberDictionary::kEntrySize == 3);
+ DCHECK(SeededNumberDictionary::kEntrySize == 3);
lea(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3
// Check if the key matches.
// Check that the value is a normal propety.
const int kDetailsOffset =
SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
- ASSERT_EQ(NORMAL, 0);
+ DCHECK_EQ(NORMAL, 0);
test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
j(not_zero, miss);
// Just return if allocation top is already known.
if ((flags & RESULT_CONTAINS_TOP) != 0) {
// No use of scratch if allocation top is provided.
- ASSERT(scratch.is(no_reg));
+ DCHECK(scratch.is(no_reg));
#ifdef DEBUG
// Assert that result actually contains top on entry.
cmp(result, Operand::StaticVariable(allocation_top));
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
- ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
sub(result, Immediate(object_size));
}
} else if (tag_result) {
- ASSERT(kHeapObjectTag == 1);
+ DCHECK(kHeapObjectTag == 1);
inc(result);
}
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & SIZE_IN_WORDS) == 0);
+ DCHECK((flags & SIZE_IN_WORDS) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
- ASSERT(element_size >= times_2);
- ASSERT(kSmiTagSize == 1);
+ DCHECK(element_size >= times_2);
+ DCHECK(kSmiTagSize == 1);
element_size = static_cast<ScaleFactor>(element_size - 1);
} else {
- ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+ DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
}
lea(result_end, Operand(element_count, element_size, header_size));
add(result_end, result);
j(above, gc_required);
if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
+ DCHECK(kHeapObjectTag == 1);
inc(result);
}
Register scratch,
Label* gc_required,
AllocationFlags flags) {
- ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+ DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
if (!FLAG_inline_new) {
if (emit_debug_code()) {
// Trash the registers to simulate an allocation failure.
jmp(gc_required);
return;
}
- ASSERT(!result.is(result_end));
+ DCHECK(!result.is(result_end));
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if ((flags & DOUBLE_ALIGNMENT) != 0) {
- ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
- ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+ DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+ DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
Label aligned;
test(result, Immediate(kDoubleAlignmentMask));
j(zero, &aligned, Label::kNear);
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- ASSERT(kHeapObjectTag == 1);
+ DCHECK(kHeapObjectTag == 1);
inc(result);
}
void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
- Label* gc_required) {
+ Label* gc_required,
+ MutableMode mode) {
// Allocate heap number in new space.
Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
TAG_OBJECT);
+ Handle<Map> map = mode == MUTABLE
+ ? isolate()->factory()->mutable_heap_number_map()
+ : isolate()->factory()->heap_number_map();
+
// Set the map.
- mov(FieldOperand(result, HeapObject::kMapOffset),
- Immediate(isolate()->factory()->heap_number_map()));
-}
-
-
-#define SIMD128_HEAP_ALLOCATE_FUNCTIONS(V) \
- V(Float32x4, float32x4, FLOAT32x4) \
- V(Float64x2, float64x2, FLOAT64x2) \
- V(Int32x4, int32x4, INT32x4)
-
-#define DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION(Type, type, TYPE) \
-void MacroAssembler::Allocate##Type(Register result, \
- Register scratch1, \
- Register scratch2, \
- Label* gc_required) { \
- /* Allocate SIMD128 object */ \
- Allocate(Type::kSize, result, scratch1, no_reg, gc_required, TAG_OBJECT);\
- /* Load the initial map and assign to new allocated object. */ \
- mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset)); \
- mov(scratch1, \
- Operand(scratch1, \
- Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); \
- mov(scratch1, \
- FieldOperand(scratch1, GlobalObject::kNativeContextOffset)); \
- mov(scratch1, \
- Operand(scratch1, \
- Context::SlotOffset(Context::TYPE##_FUNCTION_INDEX))); \
- LoadGlobalFunctionInitialMap(scratch1, scratch1); \
- mov(FieldOperand(result, JSObject::kMapOffset), scratch1); \
- /* Initialize properties and elements. */ \
- mov(FieldOperand(result, JSObject::kPropertiesOffset), \
- Immediate(isolate()->factory()->empty_fixed_array())); \
- mov(FieldOperand(result, JSObject::kElementsOffset), \
- Immediate(isolate()->factory()->empty_fixed_array())); \
- /* Allocate FixedTypedArray object */ \
- Allocate(FixedTypedArrayBase::kDataOffset + k##Type##Size, \
- scratch1, scratch2, no_reg, gc_required, TAG_OBJECT); \
- \
- mov(FieldOperand(scratch1, FixedTypedArrayBase::kMapOffset), \
- Immediate(isolate()->factory()->fixed_##type##_array_map())); \
- mov(scratch2, Immediate(1)); \
- SmiTag(scratch2); \
- mov(FieldOperand(scratch1, FixedTypedArrayBase::kLengthOffset), \
- scratch2); \
- /* Assign TifxedTypedArray object to SIMD128 object */ \
- mov(FieldOperand(result, Type::kValueOffset), scratch1); \
-}
-
-SIMD128_HEAP_ALLOCATE_FUNCTIONS(DECLARE_SIMD_HEAP_ALLOCATE_FUNCTION)
+ mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
+}
void MacroAssembler::AllocateTwoByteString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
- ASSERT(kShortSize == 2);
+ DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK(kShortSize == 2);
// scratch1 = length * 2 + kObjectAlignmentMask.
lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
- ASSERT(kCharSize == 1);
+ DCHECK(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
Register scratch1,
Register scratch2,
Label* gc_required) {
- ASSERT(length > 0);
+ DCHECK(length > 0);
// Allocate ASCII string in new space.
Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
Register length,
Register scratch) {
Label short_loop, len4, len8, len12, done, short_string;
- ASSERT(source.is(esi));
- ASSERT(destination.is(edi));
- ASSERT(length.is(ecx));
+ DCHECK(source.is(esi));
+ DCHECK(destination.is(edi));
+ DCHECK(length.is(ecx));
cmp(length, Immediate(4));
j(below, &short_string, Label::kNear);
int field_offset,
int bit_index) {
bit_index += kSmiTagSize + kSmiShiftSize;
- ASSERT(IsPowerOf2(kBitsPerByte));
+ DCHECK(IsPowerOf2(kBitsPerByte));
int byte_index = bit_index / kBitsPerByte;
int byte_bit_index = bit_index & (kBitsPerByte - 1);
test_b(FieldOperand(object, field_offset + byte_index),
Register scratch,
Label* miss,
bool miss_on_bound_function) {
- // Check that the receiver isn't a smi.
- JumpIfSmi(function, miss);
+ Label non_instance;
+ if (miss_on_bound_function) {
+ // Check that the receiver isn't a smi.
+ JumpIfSmi(function, miss);
- // Check that the function really is a function.
- CmpObjectType(function, JS_FUNCTION_TYPE, result);
- j(not_equal, miss);
+ // Check that the function really is a function.
+ CmpObjectType(function, JS_FUNCTION_TYPE, result);
+ j(not_equal, miss);
- if (miss_on_bound_function) {
// If a bound function, go to miss label.
mov(scratch,
FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
SharedFunctionInfo::kBoundFunction);
j(not_zero, miss);
- }
- // Make sure that the function has an instance prototype.
- Label non_instance;
- movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
- test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
- j(not_zero, &non_instance);
+ // Make sure that the function has an instance prototype.
+ movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+ test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+ j(not_zero, &non_instance);
+ }
// Get the prototype or initial map from the function.
mov(result,
// Get the prototype from the initial map.
mov(result, FieldOperand(result, Map::kPrototypeOffset));
- jmp(&done);
- // Non-instance prototype: Fetch prototype from constructor field
- // in initial map.
- bind(&non_instance);
- mov(result, FieldOperand(result, Map::kConstructorOffset));
+ if (miss_on_bound_function) {
+ jmp(&done);
+
+ // Non-instance prototype: Fetch prototype from constructor field
+ // in initial map.
+ bind(&non_instance);
+ mov(result, FieldOperand(result, Map::kConstructorOffset));
+ }
// All done.
bind(&done);
void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
- ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
+ DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs.
call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
}
void MacroAssembler::StubReturn(int argc) {
- ASSERT(argc >= 1 && generating_stub());
+ DCHECK(argc >= 1 && generating_stub());
ret((argc - 1) * kPointerSize);
}
// The assert checks that the constants for the maximum number of digits
// for an array index cached in the hash field and the number of bits
// reserved for it does not conflict.
- ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+ DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
if (!index.is(hash)) {
mov(index, hash);
ExternalReference level_address =
ExternalReference::handle_scope_level_address(isolate());
- ASSERT(edx.is(function_address));
+ DCHECK(edx.is(function_address));
// Allocate HandleScope in callee-save registers.
mov(ebx, Operand::StaticVariable(next_address));
mov(edi, Operand::StaticVariable(limit_address));
bind(&promote_scheduled_exception);
{
FrameScope frame(this, StackFrame::INTERNAL);
- CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+ CallRuntime(Runtime::kPromoteScheduledException, 0);
}
jmp(&exception_handled);
*definitely_mismatches = false;
Label invoke;
if (expected.is_immediate()) {
- ASSERT(actual.is_immediate());
+ DCHECK(actual.is_immediate());
if (expected.immediate() == actual.immediate()) {
definitely_matches = true;
} else {
// IC mechanism.
cmp(expected.reg(), actual.immediate());
j(equal, &invoke);
- ASSERT(expected.reg().is(ebx));
+ DCHECK(expected.reg().is(ebx));
mov(eax, actual.immediate());
} else if (!expected.reg().is(actual.reg())) {
// Both expected and actual are in (different) registers. This
// is the case when we invoke functions using call and apply.
cmp(expected.reg(), actual.reg());
j(equal, &invoke);
- ASSERT(actual.reg().is(eax));
- ASSERT(expected.reg().is(ebx));
+ DCHECK(actual.reg().is(eax));
+ DCHECK(expected.reg().is(ebx));
}
}
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
Label done;
bool definitely_mismatches = false;
call(code);
call_wrapper.AfterCall();
} else {
- ASSERT(flag == JUMP_FUNCTION);
+ DCHECK(flag == JUMP_FUNCTION);
jmp(code);
}
bind(&done);
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(fun.is(edi));
+ DCHECK(fun.is(edi));
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a function without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
- ASSERT(fun.is(edi));
+ DCHECK(fun.is(edi));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
InvokeFlag flag,
const CallWrapper& call_wrapper) {
// You can't call a builtin without a valid frame.
- ASSERT(flag == JUMP_FUNCTION || has_frame());
+ DCHECK(flag == JUMP_FUNCTION || has_frame());
// Rely on the assertion to check that the number of provided
// arguments match the expected number of arguments. Fake a
void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
- ASSERT(!target.is(edi));
+ DCHECK(!target.is(edi));
// Load the JavaScript builtin function from the builtins object.
GetBuiltinFunction(edi, id);
// Load the code entry point from the function into the target register.
// The registers are pushed starting with the lowest encoding,
// which means that lowest encodings are furthest away from
// the stack pointer.
- ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+ DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
return kNumSafepointRegisters - reg_code - 1;
}
void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Operand operand = Operand::StaticVariable(ExternalReference(counter));
if (value == 1) {
void MacroAssembler::IncrementCounter(Condition cc,
StatsCounter* counter,
int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Label skip;
j(NegateCondition(cc), &skip);
void MacroAssembler::DecrementCounter(Condition cc,
StatsCounter* counter,
int value) {
- ASSERT(value > 0);
+ DCHECK(value > 0);
if (FLAG_native_code_counters && counter->Enabled()) {
Label skip;
j(NegateCondition(cc), &skip);
void MacroAssembler::CheckStackAlignment() {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
int frame_alignment_mask = frame_alignment - 1;
if (frame_alignment > kPointerSize) {
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
Label alignment_as_expected;
test(esp, Immediate(frame_alignment_mask));
j(zero, &alignment_as_expected);
void MacroAssembler::LoadPowerOf2(XMMRegister dst,
Register scratch,
int power) {
- ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+ DCHECK(is_uintn(power + HeapNumber::kExponentBias,
HeapNumber::kExponentBits));
mov(scratch, Immediate(power + HeapNumber::kExponentBias));
movd(dst, scratch);
const int kFlatAsciiStringTag =
kStringTag | kOneByteStringTag | kSeqStringTag;
// Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
and_(scratch1, kFlatAsciiStringMask);
and_(scratch2, kFlatAsciiStringMask);
lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
- int frame_alignment = OS::ActivationFrameAlignment();
+ int frame_alignment = base::OS::ActivationFrameAlignment();
if (frame_alignment != 0) {
// Make stack end at alignment and make room for num_arguments words
// and the original value of esp.
mov(scratch, esp);
sub(esp, Immediate((num_arguments + 1) * kPointerSize));
- ASSERT(IsPowerOf2(frame_alignment));
+ DCHECK(IsPowerOf2(frame_alignment));
and_(esp, -frame_alignment);
mov(Operand(esp, num_arguments * kPointerSize), scratch);
} else {
void MacroAssembler::CallCFunction(Register function,
int num_arguments) {
- ASSERT(has_frame());
+ DCHECK(has_frame());
// Check stack alignment.
if (emit_debug_code()) {
CheckStackAlignment();
}
call(function);
- if (OS::ActivationFrameAlignment() != 0) {
+ if (base::OS::ActivationFrameAlignment() != 0) {
mov(esp, Operand(esp, num_arguments * kPointerSize));
} else {
add(esp, Immediate(num_arguments * kPointerSize));
}
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
- if (r1.is(r2)) return true;
- if (r1.is(r3)) return true;
- if (r1.is(r4)) return true;
- if (r2.is(r3)) return true;
- if (r2.is(r4)) return true;
- if (r3.is(r4)) return true;
- return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+ Register reg2,
+ Register reg3,
+ Register reg4,
+ Register reg5,
+ Register reg6,
+ Register reg7,
+ Register reg8) {
+ int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+ reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+ reg7.is_valid() + reg8.is_valid();
+
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+ if (reg7.is_valid()) regs |= reg7.bit();
+ if (reg8.is_valid()) regs |= reg8.bit();
+ int n_of_non_aliasing_regs = NumRegs(regs);
+
+ return n_of_valid_regs != n_of_non_aliasing_regs;
}
+#endif
CodePatcher::CodePatcher(byte* address, int size)
// Create a new macro assembler pointing to the address of the code to patch.
// The size is adjusted with kGap on order for the assembler to generate size
// bytes of instructions without failing with buffer size constraints.
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
CodePatcher::~CodePatcher() {
// Indicate that code has changed.
- CPU::FlushICache(address_, size_);
+ CpuFeatures::FlushICache(address_, size_);
// Check that the code was patched as expected.
- ASSERT(masm_.pc_ == address_ + size_);
- ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+ DCHECK(masm_.pc_ == address_ + size_);
+ DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
}
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
+ DCHECK(cc == zero || cc == not_zero);
if (scratch.is(object)) {
and_(scratch, Immediate(~Page::kPageAlignmentMask));
} else {
Condition cc,
Label* condition_met,
Label::Distance condition_met_distance) {
- ASSERT(cc == zero || cc == not_zero);
+ DCHECK(cc == zero || cc == not_zero);
Page* page = Page::FromAddress(map->address());
+ DCHECK(!serializer_enabled()); // Serializer cannot match page_flags.
ExternalReference reference(ExternalReference::page_flags(page));
// The inlined static address check of the page's flags relies
// on maps never being compacted.
- ASSERT(!isolate()->heap()->mark_compact_collector()->
+ DCHECK(!isolate()->heap()->mark_compact_collector()->
IsOnEvacuationCandidate(*map));
if (mask < (1 << kBitsPerByte)) {
test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
HasColor(object, scratch0, scratch1,
on_black, on_black_near,
1, 0); // kBlackBitPattern.
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
}
Label::Distance has_color_distance,
int first_bit,
int second_bit) {
- ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+ DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(object, bitmap_scratch, mask_scratch);
void MacroAssembler::GetMarkBits(Register addr_reg,
Register bitmap_reg,
Register mask_reg) {
- ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+ DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
and_(bitmap_reg, addr_reg);
mov(ecx, addr_reg);
Register mask_scratch,
Label* value_is_white_and_not_data,
Label::Distance distance) {
- ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+ DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
GetMarkBits(value, bitmap_scratch, mask_scratch);
// If the value is black or grey we don't need to do anything.
- ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
- ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
- ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
- ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+ DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+ DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+ DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+ DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
Label done;
bind(¬_heap_number);
// Check for strings.
- ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
- ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+ DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+ DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
// If it's a string and it's not a cons string then it's an object containing
// no GC pointers.
Register instance_type = ecx;
Label not_external;
// External strings are the only ones with the kExternalStringTag bit
// set.
- ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
- ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+ DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
test_b(instance_type, kExternalStringTag);
j(zero, ¬_external, Label::kNear);
mov(length, Immediate(ExternalString::kSize));
bind(¬_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
+ DCHECK(kOneByteStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
- ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqOneByteString::kMaxSize <=
+ DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+ DCHECK(SeqOneByteString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
Register scratch0,
Register scratch1,
Label* found) {
- ASSERT(!scratch1.is(scratch0));
+ DCHECK(!scratch1.is(scratch0));
Factory* factory = isolate()->factory();
Register current = scratch0;
Label loop_again;
void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
- ASSERT(!dividend.is(eax));
- ASSERT(!dividend.is(edx));
+ DCHECK(!dividend.is(eax));
+ DCHECK(!dividend.is(edx));
MultiplierAndShift ms(divisor);
mov(eax, Immediate(ms.multiplier()));
imul(dividend);
}
-void MacroAssembler::absps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_absolute_constant =
- { 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF };
- andps(dst,
- Operand(reinterpret_cast<int32_t>(&float_absolute_constant),
- RelocInfo::NONE32));
-}
-
-
-void MacroAssembler::abspd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } double_absolute_constant =
- { 0xFFFFFFFF, 0x7FFFFFFF, 0xFFFFFFFF, 0x7FFFFFFF };
- andps(dst,
- Operand(reinterpret_cast<int32_t>(&double_absolute_constant),
- RelocInfo::NONE32));
-}
-
-
-void MacroAssembler::notps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_not_constant =
- { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF };
- xorps(dst,
- Operand(reinterpret_cast<int32_t>(&float_not_constant),
- RelocInfo::NONE32));
-}
-
-
-void MacroAssembler::negateps(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } float_negate_constant =
- { 0x80000000, 0x80000000, 0x80000000, 0x80000000 };
- xorps(dst,
- Operand(reinterpret_cast<int32_t>(&float_negate_constant),
- RelocInfo::NONE32));
-}
-
-
-void MacroAssembler::negatepd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } double_negate_constant =
- { 0x00000000, 0x80000000, 0x00000000, 0x80000000 };
- xorpd(dst,
- Operand(reinterpret_cast<int32_t>(&double_negate_constant),
- RelocInfo::NONE32));
-}
-
-
-void MacroAssembler::pnegd(XMMRegister dst) {
- static const struct V8_ALIGNED(16) {
- uint32_t a;
- uint32_t b;
- uint32_t c;
- uint32_t d;
- } int32_one_constant = { 0x1, 0x1, 0x1, 0x1 };
- notps(dst);
- paddd(dst,
- Operand(reinterpret_cast<int32_t>(&int32_one_constant),
- RelocInfo::NONE32));
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32