if (newspace->Contains(start_of_string) &&
newspace->top() == start_of_string + old_size) {
// Last allocated object in new space. Simply lower allocation top.
- *(newspace->allocation_top_address()) = start_of_string + new_size;
+ newspace->set_top(start_of_string + new_size);
} else {
// Sizes are pointer size aligned, so that we can use filler objects
// that are a multiple of pointer size.
// allocation) so it can be used by all the allocation functions and for all
// the paged spaces.
HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top > allocation_info_.limit) return NULL;
+ if (new_top > allocation_info_.limit()) return NULL;
- allocation_info_.top = new_top;
+ allocation_info_.set_top(new_top);
return HeapObject::FromAddress(current_top);
}
MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
#ifdef DEBUG
// If we are stressing compaction we waste some memory in new space
// in order to get more frequent GCs.
if (FLAG_stress_compaction && !heap()->linear_allocation()) {
- if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
+ if (allocation_info_.limit() - old_top >= size_in_bytes * 4) {
int filler_size = size_in_bytes * 4;
for (int i = 0; i < filler_size; i += kPointerSize) {
*(reinterpret_cast<Object**>(old_top + i)) =
heap()->one_pointer_filler_map();
}
old_top += filler_size;
- allocation_info_.top += filler_size;
+ allocation_info_.set_top(allocation_info_.top() + filler_size);
}
}
#endif
- if (allocation_info_.limit - old_top < size_in_bytes) {
+ if (allocation_info_.limit() - old_top < size_in_bytes) {
return SlowAllocateRaw(size_in_bytes);
}
HeapObject* obj = HeapObject::FromAddress(old_top);
- allocation_info_.top += size_in_bytes;
+ allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
HeapProfiler* profiler = heap()->isolate()->heap_profiler();
* AreaSize();
accounting_stats_.Clear();
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
anchor_.InitializeAsAnchor(this);
}
size_t PagedSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = 0;
PageIterator it(this);
while (it.has_next()) {
DecreaseUnsweptFreeBytes(page);
}
- if (Page::FromAllocationTop(allocation_info_.top) == page) {
- allocation_info_.top = allocation_info_.limit = NULL;
+ if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
if (unlink) {
if (was_swept_conservatively_) return;
bool allocation_pointer_found_in_space =
- (allocation_info_.top == allocation_info_.limit);
+ (allocation_info_.top() == allocation_info_.limit());
PageIterator page_iterator(this);
while (page_iterator.has_next()) {
Page* page = page_iterator.next();
CHECK(page->owner() == this);
- if (page == Page::FromAllocationTop(allocation_info_.top)) {
+ if (page == Page::FromAllocationTop(allocation_info_.top())) {
allocation_pointer_found_in_space = true;
}
CHECK(page->WasSweptPrecisely());
}
start_ = NULL;
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
to_space_.TearDown();
from_space_.TearDown();
}
}
}
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = to_space_.page_low();
- allocation_info_.limit = to_space_.page_high();
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(to_space_.page_low());
+ allocation_info_.set_limit(to_space_.page_high());
// Lower limit during incremental marking.
if (heap()->incremental_marking()->IsMarking() &&
inline_allocation_limit_step() != 0) {
Address new_limit =
- allocation_info_.top + inline_allocation_limit_step();
- allocation_info_.limit = Min(new_limit, allocation_info_.limit);
+ allocation_info_.top() + inline_allocation_limit_step();
+ allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
}
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
}
bool NewSpace::AddFreshPage() {
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if (NewSpacePage::IsAtStart(top)) {
// The current page is already empty. Don't try to make another.
MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
- Address old_top = allocation_info_.top;
+ Address old_top = allocation_info_.top();
Address new_top = old_top + size_in_bytes;
Address high = to_space_.page_high();
- if (allocation_info_.limit < high) {
+ if (allocation_info_.limit() < high) {
// Incremental marking has lowered the limit to get a
// chance to do a step.
- allocation_info_.limit = Min(
- allocation_info_.limit + inline_allocation_limit_step_,
+ Address new_limit = Min(
+ allocation_info_.limit() + inline_allocation_limit_step_,
high);
+ allocation_info_.set_limit(new_limit);
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
size_t NewSpace::CommittedPhysicalMemory() {
if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
size_t size = to_space_.CommittedPhysicalMemory();
if (from_space_.is_committed()) {
size += from_space_.CommittedPhysicalMemory();
Object* object = NULL;
if (!maybe->ToObject(&object)) return false;
HeapObject* allocation = HeapObject::cast(object);
- Address top = allocation_info_.top;
+ Address top = allocation_info_.top();
if ((top - bytes) == allocation->address()) {
- allocation_info_.top = allocation->address();
+ allocation_info_.set_top(allocation->address());
return true;
}
// There may be a borderline case here where the allocation succeeded, but
bool PagedSpace::ReserveSpace(int size_in_bytes) {
ASSERT(size_in_bytes <= AreaSize());
ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
- Address current_top = allocation_info_.top;
+ Address current_top = allocation_info_.top();
Address new_top = current_top + size_in_bytes;
- if (new_top <= allocation_info_.limit) return true;
+ if (new_top <= allocation_info_.limit()) return true;
HeapObject* new_area = free_list_.Allocate(size_in_bytes);
if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
- if (allocation_info_.top >= allocation_info_.limit) return;
+ if (allocation_info_.top() >= allocation_info_.limit()) return;
- if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) {
+ if (Page::FromAllocationTop(allocation_info_.top())->
+ IsEvacuationCandidate()) {
// Create filler object to keep page iterable if it was iterable.
int remaining =
- static_cast<int>(allocation_info_.limit - allocation_info_.top);
- heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
+ static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+ heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
- allocation_info_.top = NULL;
- allocation_info_.limit = NULL;
+ allocation_info_.set_top(NULL);
+ allocation_info_.set_limit(NULL);
}
}
// space.
class AllocationInfo {
public:
- AllocationInfo() : top(NULL), limit(NULL) {
+ AllocationInfo() : top_(NULL), limit_(NULL) {
}
- Address top; // Current allocation top.
- Address limit; // Current allocation limit.
+ INLINE(void set_top(Address top)) {
+ SLOW_ASSERT(top == NULL ||
+ (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+ top_ = top;
+ }
+
+ INLINE(Address top()) const {
+ SLOW_ASSERT(top_ == NULL ||
+ (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+ return top_;
+ }
+
+ Address* top_address() {
+ return &top_;
+ }
+
+ INLINE(void set_limit(Address limit)) {
+ SLOW_ASSERT(limit == NULL ||
+ (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+ limit_ = limit;
+ }
+
+ INLINE(Address limit()) const {
+ SLOW_ASSERT(limit_ == NULL ||
+ (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
+ return limit_;
+ }
+
+ Address* limit_address() {
+ return &limit_;
+ }
#ifdef DEBUG
bool VerifyPagedAllocation() {
- return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit))
- && (top <= limit);
+ return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
+ && (top_ <= limit_);
}
#endif
+
+ private:
+ // Current allocation top.
+ Address top_;
+ // Current allocation limit.
+ Address limit_;
};
virtual intptr_t Waste() { return accounting_stats_.Waste(); }
// Returns the allocation pointer in this space.
- Address top() { return allocation_info_.top; }
- Address limit() { return allocation_info_.limit; }
+ Address top() { return allocation_info_.top(); }
+ Address limit() { return allocation_info_.limit(); }
+
+ // The allocation top address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
enum AllocationType {
NEW_OBJECT,
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- allocation_info_.top = top;
- allocation_info_.limit = limit;
+ MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+ allocation_info_.set_top(top);
+ allocation_info_.set_limit(limit);
}
void Allocate(int bytes) {
// Return the address of the allocation pointer in the active semispace.
Address top() {
- ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top));
- return allocation_info_.top;
+ ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+ return allocation_info_.top();
}
+
+ void set_top(Address top) {
+ ASSERT(to_space_.current_page()->ContainsLimit(top));
+ allocation_info_.set_top(top);
+ }
+
// Return the address of the first object in the active semispace.
Address bottom() { return to_space_.space_start(); }
return reinterpret_cast<Address>(index << kPointerSizeLog2);
}
- // The allocation top and limit addresses.
- Address* allocation_top_address() { return &allocation_info_.top; }
- Address* allocation_limit_address() { return &allocation_info_.limit; }
+ // The allocation top and limit address.
+ Address* allocation_top_address() {
+ return allocation_info_.top_address();
+ }
+
+ // The allocation limit address.
+ Address* allocation_limit_address() {
+ return allocation_info_.limit_address();
+ }
MUST_USE_RESULT INLINE(MaybeObject* AllocateRaw(int size_in_bytes));
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
if (step == 0) {
- allocation_info_.limit = to_space_.page_high();
+ allocation_info_.set_limit(to_space_.page_high());
} else {
- allocation_info_.limit = Min(
- allocation_info_.top + inline_allocation_limit_step_,
- allocation_info_.limit);
+ Address new_limit = Min(
+ allocation_info_.top() + inline_allocation_limit_step_,
+ allocation_info_.limit());
+ allocation_info_.set_limit(new_limit);
}
- top_on_previous_step_ = allocation_info_.top;
+ top_on_previous_step_ = allocation_info_.top();
}
// Get the extent of the inactive semispace (for use as a marking stack,
// For contiguous spaces, top should be in the space (or at the end) and limit
// should be the end of the space.
#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
- SLOW_ASSERT((space).page_low() <= (info).top \
- && (info).top <= (space).page_high() \
- && (info).limit <= (space).page_high())
+ SLOW_ASSERT((space).page_low() <= (info).top() \
+ && (info).top() <= (space).page_high() \
+ && (info).limit() <= (space).page_high())
// -----------------------------------------------------------------------------