#ifndef V8_SPACES_H_
#define V8_SPACES_H_
-#include "allocation.h"
-#include "hashmap.h"
-#include "list.h"
-#include "log.h"
-#include "platform/mutex.h"
-#include "utils.h"
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/log.h"
+#include "src/platform/mutex.h"
+#include "src/utils.h"
namespace v8 {
namespace internal {
bool is_valid() { return address() != NULL; }
MemoryChunk* next_chunk() const {
- return reinterpret_cast<MemoryChunk*>(Acquire_Load(&next_chunk_));
+ return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
}
MemoryChunk* prev_chunk() const {
- return reinterpret_cast<MemoryChunk*>(Acquire_Load(&prev_chunk_));
+ return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
}
void set_next_chunk(MemoryChunk* next) {
- Release_Store(&next_chunk_, reinterpret_cast<AtomicWord>(next));
+ base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
}
void set_prev_chunk(MemoryChunk* prev) {
- Release_Store(&prev_chunk_, reinterpret_cast<AtomicWord>(prev));
+ base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
}
Space* owner() const {
ParallelSweepingState parallel_sweeping() {
return static_cast<ParallelSweepingState>(
- Acquire_Load(¶llel_sweeping_));
+ base::Acquire_Load(¶llel_sweeping_));
}
void set_parallel_sweeping(ParallelSweepingState state) {
- Release_Store(¶llel_sweeping_, state);
+ base::Release_Store(¶llel_sweeping_, state);
}
bool TryParallelSweeping() {
- return Acquire_CompareAndSwap(¶llel_sweeping_,
- PARALLEL_SWEEPING_PENDING,
- PARALLEL_SWEEPING_IN_PROGRESS) ==
- PARALLEL_SWEEPING_PENDING;
+ return base::Acquire_CompareAndSwap(
+ ¶llel_sweeping_, PARALLEL_SWEEPING_PENDING,
+ PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
}
// Manage live byte count (count of bytes known to be live,
// count highest number of bytes ever allocated on the page.
int high_water_mark_;
- AtomicWord parallel_sweeping_;
+ base::AtomicWord parallel_sweeping_;
// PagedSpace free-list statistics.
intptr_t available_in_small_free_list_;
private:
// next_chunk_ holds a pointer of type MemoryChunk
- AtomicWord next_chunk_;
+ base::AtomicWord next_chunk_;
// prev_chunk_ holds a pointer of type MemoryChunk
- AtomicWord prev_chunk_;
+ base::AtomicWord prev_chunk_;
friend class MemoryAllocator;
};
-STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
// -----------------------------------------------------------------------------
};
-STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
class LargePage : public MemoryChunk {
friend class MemoryAllocator;
};
-STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
// ----------------------------------------------------------------------------
// Space is the abstract superclass for all allocation spaces.
// manage it.
void TearDown();
- bool exists() { return this != NULL && code_range_ != NULL; }
+ bool valid() { return code_range_ != NULL; }
Address start() {
- if (this == NULL || code_range_ == NULL) return NULL;
+ ASSERT(valid());
return static_cast<Address>(code_range_->address());
}
bool contains(Address address) {
- if (this == NULL || code_range_ == NULL) return false;
+ if (!valid()) return false;
Address start = static_cast<Address>(code_range_->address());
return start <= address && address < start + code_range_->size();
}
// Finds a block on the allocation list that contains at least the
// requested amount of memory. If none is found, sorts and merges
// the existing free memory blocks, and searches again.
- // If none can be found, terminates V8 with FatalProcessOutOfMemory.
- void GetNextAllocationBlock(size_t requested);
+ // If none can be found, returns false.
+ bool GetNextAllocationBlock(size_t requested);
// Compares the start addresses of two free blocks.
static int CompareFreeBlockAddress(const FreeBlock* left,
const FreeBlock* right);
void RepairFreeList(Heap* heap);
FreeListNode* top() const {
- return reinterpret_cast<FreeListNode*>(NoBarrier_Load(&top_));
+ return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
}
void set_top(FreeListNode* top) {
- NoBarrier_Store(&top_, reinterpret_cast<AtomicWord>(top));
+ base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
}
FreeListNode** GetEndAddress() { return &end_; }
private:
// top_ points to the top FreeListNode* in the free list category.
- AtomicWord top_;
+ base::AtomicWord top_;
FreeListNode* end_;
Mutex mutex_;
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
+ MUST_USE_RESULT HeapObject*
+ WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
+
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class MarkCompactCollector;
inline_allocation_limit_step_(0) {}
// Sets up the new space using the given chunk.
- bool SetUp(int reserved_semispace_size_, int max_semispace_size);
+ bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
// Tears down the space. Heap memory was not allocated by the space, so it
// is not deallocated here.
return to_space_.MaximumCapacity();
}
+ bool IsAtMaximumCapacity() {
+ return Capacity() == MaximumCapacity();
+ }
+
// Returns the initial capacity of a semispace.
int InitialCapacity() {
ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());