HeapStatistics();
size_t total_heap_size() { return total_heap_size_; }
size_t total_heap_size_executable() { return total_heap_size_executable_; }
+ size_t total_physical_size() { return total_physical_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
void set_total_heap_size_executable(size_t size) {
total_heap_size_executable_ = size;
}
+ void set_total_physical_size(size_t size) {
+ total_physical_size_ = size;
+ }
void set_used_heap_size(size_t size) { used_heap_size_ = size; }
void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
size_t total_heap_size_;
size_t total_heap_size_executable_;
+ size_t total_physical_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
+ total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
// Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0);
+ heap_statistics->set_total_physical_size(0);
heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0);
return;
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
heap->CommittedMemoryExecutable());
+ heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
heap_statistics->set_used_heap_size(heap->SizeOfObjects());
heap_statistics->set_heap_size_limit(heap->MaxReserved());
}
lo_space_->Size();
}
+
+size_t Heap::CommittedPhysicalMemory() {
+ if (!HasBeenSetUp()) return 0;
+
+ return new_space_.CommittedPhysicalMemory() +
+ old_pointer_space_->CommittedPhysicalMemory() +
+ old_data_space_->CommittedPhysicalMemory() +
+ code_space_->CommittedPhysicalMemory() +
+ map_space_->CommittedPhysicalMemory() +
+ cell_space_->CommittedPhysicalMemory() +
+ lo_space_->CommittedPhysicalMemory();
+}
+
+
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
+ // Returns the amount of phyical memory currently committed for the heap.
+ size_t CommittedPhysicalMemory();
+
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
pthread_t thread_; // Thread handle for pthread.
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ const size_t page_size = sysconf(_SC_PAGESIZE);
+ base = reinterpret_cast<void*>(
+ reinterpret_cast<intptr_t>(base) & ~(page_size - 1));
+ const size_t pages = (size + page_size - 1) / page_size;
+ ScopedVector<unsigned char> buffer(pages);
+ int result = mincore(base, size, buffer.start());
+ if (result) return false;
+ int resident_pages = 0;
+ for (unsigned i = 0; i < pages; ++i) {
+ resident_pages += buffer[i] & 1;
+ }
+ *physical = resident_pages * page_size;
+ return true;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() {
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) { }
}
+bool VirtualMemory::CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical) {
+ // TODO(alph): implement for the platform.
+ return false;
+}
+
+
// ----------------------------------------------------------------------------
// Win32 thread support.
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
+ // Returns the size of committed memory which is currently resident
+ // in the physical memory for the region specified with base and size
+ // arguments.
+ // On success stores the committed physical memory size at the location
+ // pointed by the last argument and returns true. Returns false on failure.
+ static bool CommittedPhysicalSizeInRegion(
+ void* base, size_t size, size_t* physical);
+
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
}
+size_t MemoryChunk::CommittedPhysicalMemory() {
+ size_t physical;
+ size_t size = area_size();
+ if (VirtualMemory::CommittedPhysicalSizeInRegion(
+ area_start_, size, &physical)) {
+ return physical;
+ } else {
+ return size;
+ }
+}
+
+
MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
Executability executable,
Space* owner) {
}
+size_t PagedSpace::CommittedPhysicalMemory() {
+ size_t size = 0;
+ PageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
}
+size_t SemiSpace::CommittedPhysicalMemory() {
+ if (!is_committed()) return 0;
+ size_t size = 0;
+ NewSpacePageIterator it(this);
+ while (it.has_next()) {
+ size += it.next()->CommittedPhysicalMemory();
+ }
+ return size;
+}
+
+
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
}
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+ size_t size = 0;
+ LargePage* current = first_page_;
+ while (current != NULL) {
+ size += current->CommittedPhysicalMemory();
+ current = current->next_page();
+ }
+ return size;
+}
+
+
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
return static_cast<int>(area_end() - area_start());
}
+ size_t CommittedPhysicalMemory();
+
protected:
MemoryChunk* next_chunk_;
MemoryChunk* prev_chunk_;
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
+ // Total amount of physical memory committed for this space.
+ size_t CommittedPhysicalMemory();
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
static void Swap(SemiSpace* from, SemiSpace* to);
+ size_t CommittedPhysicalMemory();
+
private:
// Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
return Capacity();
}
+ size_t CommittedPhysicalMemory() {
+ return to_space_.CommittedPhysicalMemory()
+ + (from_space_.is_committed() ? from_space_.CommittedPhysicalMemory()
+ : 0);
+ }
+
// Return the available bytes without growing.
intptr_t Available() {
return Capacity() - Size();
return Size();
}
+ size_t CommittedPhysicalMemory();
+
int PageCount() {
return page_count_;
}