void Heap::CopyBlock(Address dst, Address src, int byte_size) {
- ASSERT(IsAligned(byte_size, kPointerSize));
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
HeapObject** slot,
HeapObject* object,
int object_size) {
- ASSERT((size_restriction != SMALL) ||
- (object_size <= Page::kMaxHeapObjectSize));
- ASSERT(object->Size() == object_size);
+ SLOW_ASSERT((size_restriction != SMALL) ||
+ (object_size <= Page::kMaxHeapObjectSize));
+ SLOW_ASSERT(object->Size() == object_size);
Heap* heap = map->GetHeap();
if (heap->ShouldBePromoted(object->address(), object_size)) {
void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
- ASSERT(HEAP->InFromSpace(object));
+ SLOW_ASSERT(HEAP->InFromSpace(object));
MapWord first_word = object->map_word();
- ASSERT(!first_word.IsForwardingAddress());
+ SLOW_ASSERT(!first_word.IsForwardingAddress());
Map* map = first_word.ToMap();
map->GetHeap()->DoScavengeObject(map, p, object);
}
MaybeObject* Heap::CopyJSObject(JSObject* source) {
// Never used to copy functions. If functions need to be copied we
// have to be careful to clear the literals array.
- ASSERT(!source->IsJSFunction());
+ SLOW_ASSERT(!source->IsJSFunction());
// Make the clone.
Map* map = source->map();
{ MaybeObject* maybe_clone = new_space_.AllocateRaw(object_size);
if (!maybe_clone->ToObject(&clone)) return maybe_clone;
}
- ASSERT(InNewSpace(clone));
+ SLOW_ASSERT(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(HeapObject::cast(clone)->address(),
object_size);
}
- ASSERT(JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
+ SLOW_ASSERT(
+ JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
FixedArray* properties = FixedArray::cast(source->properties());
// Update elements if necessary.
HeapObject::cast(object));
Object* new_object = *slot;
if (InNewSpace(new_object)) {
- ASSERT(Heap::InToSpace(new_object));
- ASSERT(new_object->IsHeapObject());
+ SLOW_ASSERT(Heap::InToSpace(new_object));
+ SLOW_ASSERT(new_object->IsHeapObject());
store_buffer_.EnterDirectlyIntoStoreBuffer(
reinterpret_cast<Address>(slot));
}
- ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+ SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
} else if (record_slots &&
MarkCompactCollector::IsOnEvacuationCandidate(object)) {
mark_compact_collector()->RecordSlot(slot, slot, object);
void IncrementalMarking::WhiteToGrey(HeapObject* obj, MarkBit mark_bit) {
- ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
- ASSERT(obj->Size() >= 2*kPointerSize);
- ASSERT(IsMarking());
Marking::WhiteToGrey(mark_bit);
}
}
MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
- ASSERT(Marking::IsGrey(obj_mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+ SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
Marking::MarkBlack(obj_mark_bit);
MemoryChunk::IncrementLiveBytes(obj->address(), size);
}
MarkBit Marking::MarkBitFrom(Address addr) {
- MemoryChunk *p = MemoryChunk::FromAddress(addr);
+ MemoryChunk* p = MemoryChunk::FromAddress(addr);
return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
p->ContainsOnlyData());
}
// Impossible markbits: 01
static const char* kImpossibleBitPattern;
static inline bool IsImpossible(MarkBit mark_bit) {
- ASSERT(strcmp(kImpossibleBitPattern, "01") == 0);
return !mark_bit.Get() && mark_bit.Next().Get();
}
// Black markbits: 10 - this is required by the sweeper.
static const char* kBlackBitPattern;
static inline bool IsBlack(MarkBit mark_bit) {
- ASSERT(strcmp(kBlackBitPattern, "10") == 0);
- ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && !mark_bit.Next().Get();
}
// White markbits: 00 - this is required by the mark bit clearer.
static const char* kWhiteBitPattern;
static inline bool IsWhite(MarkBit mark_bit) {
- ASSERT(strcmp(kWhiteBitPattern, "00") == 0);
- ASSERT(!IsImpossible(mark_bit));
return !mark_bit.Get();
}
// Grey markbits: 11
static const char* kGreyBitPattern;
static inline bool IsGrey(MarkBit mark_bit) {
- ASSERT(strcmp(kGreyBitPattern, "11") == 0);
- ASSERT(!IsImpossible(mark_bit));
return mark_bit.Get() && mark_bit.Next().Get();
}
static inline void MarkBlack(MarkBit mark_bit) {
mark_bit.Set();
mark_bit.Next().Clear();
- ASSERT(Marking::IsBlack(mark_bit));
}
static inline void BlackToGrey(MarkBit markbit) {
- ASSERT(IsBlack(markbit));
markbit.Next().Set();
- ASSERT(IsGrey(markbit));
}
static inline void WhiteToGrey(MarkBit markbit) {
- ASSERT(IsWhite(markbit));
markbit.Set();
markbit.Next().Set();
- ASSERT(IsGrey(markbit));
}
static inline void GreyToBlack(MarkBit markbit) {
- ASSERT(IsGrey(markbit));
markbit.Next().Clear();
- ASSERT(IsBlack(markbit));
}
static inline void BlackToGrey(HeapObject* obj) {
- ASSERT(obj->Size() >= 2 * kPointerSize);
BlackToGrey(MarkBitFrom(obj));
}
static inline void AnyToGrey(MarkBit markbit) {
markbit.Set();
markbit.Next().Set();
- ASSERT(IsGrey(markbit));
}
// Returns true if the the object whose mark is transferred is marked black.
to_mark_bit.Next().Set();
is_black = false; // Was actually gray.
}
- ASSERT(Color(from) == Color(to));
- ASSERT(is_black == (Color(to) == BLACK_OBJECT));
return is_black;
}
inline void PushGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
- ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
array_[top_] = object;
inline void UnshiftGrey(HeapObject* object) {
ASSERT(object->IsHeapObject());
if (IsFull()) {
- ASSERT(Marking::IsGrey(Marking::MarkBitFrom(object)));
SetOverflowed();
} else {
bottom_ = ((bottom_ - 1) & mask_);
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
- ASSERT(array->HasValidElements());
return static_cast<FixedArrayBase*>(array);
}
if (new_top > allocation_info_.limit) return NULL;
allocation_info_.top = new_top;
- ASSERT(allocation_info_.VerifyPagedAllocation());
- ASSERT(current_top != NULL);
return HeapObject::FromAddress(current_top);
}
// Raw allocation.
MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
- ASSERT(HasBeenSetup());
- ASSERT_OBJECT_SIZE(size_in_bytes);
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
// [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page* FromAllocationTop(Address top)) {
Page* p = FromAddress(top - kPointerSize);
- ASSERT_PAGE_OFFSET(p->Offset(top));
return p;
}
// Returns the offset of a given address to this page.
INLINE(int Offset(Address a)) {
int offset = static_cast<int>(a - address());
- ASSERT_PAGE_OFFSET(offset);
return offset;
}
reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
~Page::kPageAlignmentMask);
NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
- ASSERT(page->InNewSpace());
return page;
}
// Returns the start address of the current page of the space.
Address page_low() {
- ASSERT(anchor_.next_page() != &anchor_);
return current_page_->body();
}
// Return the current capacity of a semispace.
intptr_t EffectiveCapacity() {
- ASSERT(to_space_.Capacity() == from_space_.Capacity());
+ SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize;
}
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
+ SLOW_ASSERT((space).page_low() <= (info).top \
+ && (info).top <= (space).page_high() \
+ && (info).limit <= (space).page_high())
// -----------------------------------------------------------------------------
void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
if (store_buffer_rebuilding_enabled_) {
- ASSERT(!heap_->cell_space()->Contains(addr));
- ASSERT(!heap_->code_space()->Contains(addr));
- ASSERT(!heap_->old_data_space()->Contains(addr));
- ASSERT(!heap_->new_space()->Contains(addr));
+ SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
+ !heap_->code_space()->Contains(addr) &&
+ !heap_->old_data_space()->Contains(addr) &&
+ !heap_->new_space()->Contains(addr));
Address* top = old_top_;
*top++ = addr;
old_top_ = top;
template <typename T, typename U>
static inline bool IsAligned(T value, U alignment) {
- ASSERT(IsPowerOf2(alignment));
return (value & (alignment - 1)) == 0;
}