From 0db34dbe8111f8670c82bb4c42110400a9050d08 Mon Sep 17 00:00:00 2001 From: mlippautz Date: Tue, 15 Sep 2015 08:11:36 -0700 Subject: [PATCH] Revert of [heap] Concurrency support for heap book-keeping info (patchset #4 id:60001 of https://codereview.chromium.org/1340923004/ ) Reason for revert: crashing: http://build.chromium.org/p/client.v8/builders/V8%20Win32%20-%20debug%20-%203/builds/4716 Original issue's description: > [heap] Concurrency support for heap book-keeping info. > > Adds concurrency support for: > - MemoryChunk: Fragmentation counters > - MemoryChunk: High-water mark > - MemoryAllocator: Lowest and highest ever allocated addresses, size, and > capacity > > R=hpayer@chromium.org > BUG=chromium:524425 > LOG=N > > Committed: https://crrev.com/63190721cda4966e01d71e92a730ce48ea789fbc > Cr-Commit-Position: refs/heads/master@{#30749} TBR=hpayer@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=chromium:524425 Review URL: https://codereview.chromium.org/1340323002 Cr-Commit-Position: refs/heads/master@{#30752} --- src/atomic-utils.h | 7 ---- src/heap/spaces.cc | 40 +++++++++--------- src/heap/spaces.h | 120 +++++++++++++++++++++++------------------------------ 3 files changed, 71 insertions(+), 96 deletions(-) diff --git a/src/atomic-utils.h b/src/atomic-utils.h index 2aa78f8..0bfd52c 100644 --- a/src/atomic-utils.h +++ b/src/atomic-utils.h @@ -30,11 +30,6 @@ class AtomicNumber { base::Release_Store(&value_, static_cast(new_value)); } - V8_INLINE T operator=(T value) { - SetValue(value); - return value; - } - private: STATIC_ASSERT(sizeof(T) <= sizeof(base::AtomicWord)); @@ -46,8 +41,6 @@ class AtomicNumber { template class AtomicValue { public: - AtomicValue() : value_(0) {} - explicit AtomicValue(T initial) : value_(cast_helper::to_storage_type(initial)) {} diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index a68db97..68a7507 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -322,7 +322,7 @@ bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { void MemoryAllocator::TearDown() { // Check that spaces were torn down before MemoryAllocator. - DCHECK(size_.Value() == 0); + DCHECK(size_ == 0); // TODO(gc) this will be true again when we fix FreeMemory. // DCHECK(size_executable_ == 0); capacity_ = 0; @@ -347,9 +347,9 @@ void MemoryAllocator::FreeNewSpaceMemory(Address addr, LOG(isolate_, DeleteEvent("NewSpace", addr)); DCHECK(reservation->IsReserved()); - const intptr_t size = static_cast(reservation->size()); - DCHECK(size_.Value() >= size); - size_.Increment(-size); + const size_t size = reservation->size(); + DCHECK(size_ >= size); + size_ -= size; isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); FreeMemory(reservation, NOT_EXECUTABLE); } @@ -392,7 +392,7 @@ Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment, base::VirtualMemory reservation(size, alignment); if (!reservation.IsReserved()) return NULL; - size_.Increment(static_cast(reservation.size())); + size_ += reservation.size(); Address base = RoundUp(static_cast
(reservation.address()), alignment); controller->TakeControl(&reservation); @@ -490,7 +490,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size, chunk->skip_list_ = NULL; chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; chunk->progress_bar_ = 0; - chunk->high_water_mark_.SetValue(static_cast(area_start - base)); + chunk->high_water_mark_ = static_cast(area_start - base); chunk->set_parallel_sweeping(SWEEPING_DONE); chunk->mutex_ = NULL; chunk->available_in_small_free_list_ = 0; @@ -636,8 +636,7 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, CodePageGuardSize(); // Check executable memory limit. - if ((size_executable_.Value() + static_cast(chunk_size)) > - capacity_executable_) { + if ((size_executable_ + chunk_size) > capacity_executable_) { LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory", "V8 Executable Allocation capacity exceeded")); return NULL; @@ -661,16 +660,16 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size, DCHECK( IsAligned(reinterpret_cast(base), MemoryChunk::kAlignment)); if (base == NULL) return NULL; - size_.Increment(static_cast(chunk_size)); + size_ += chunk_size; // Update executable memory size. - size_executable_.Increment(static_cast(chunk_size)); + size_executable_ += chunk_size; } else { base = AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment, executable, &reservation); if (base == NULL) return NULL; // Update executable memory size. - size_executable_.Increment(static_cast(chunk_size)); + size_executable_ += reservation.size(); } if (Heap::ShouldZapGarbage()) { @@ -757,20 +756,20 @@ void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) { isolate_->heap()->RememberUnmappedPage(reinterpret_cast
(chunk), chunk->IsEvacuationCandidate()); - intptr_t size; + size_t size; base::VirtualMemory* reservation = chunk->reserved_memory(); if (reservation->IsReserved()) { - size = static_cast(reservation->size()); + size = reservation->size(); } else { - size = static_cast(chunk->size()); + size = chunk->size(); } - DCHECK(size_.Value() >= size); - size_.Increment(-size); + DCHECK(size_ >= size); + size_ -= size; isolate_->counters()->memory_allocated()->Decrement(static_cast(size)); if (chunk->executable() == EXECUTABLE) { - DCHECK(size_executable_.Value() >= size); - size_executable_.Increment(-size); + DCHECK(size_executable_ >= size); + size_executable_ -= size; } chunk->SetFlag(MemoryChunk::PRE_FREED); @@ -870,14 +869,13 @@ void MemoryAllocator::RemoveMemoryAllocationCallback( #ifdef DEBUG void MemoryAllocator::ReportStatistics() { - intptr_t size = Size(); - float pct = static_cast(capacity_ - size) / capacity_; + float pct = static_cast(capacity_ - size_) / capacity_; PrintF(" capacity: %" V8_PTR_PREFIX "d" ", used: %" V8_PTR_PREFIX "d" ", available: %%%d\n\n", - capacity_, size, static_cast(pct * 100)); + capacity_, size_, static_cast(pct * 100)); } #endif diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 9a91abb..6e1750e 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -6,7 +6,6 @@ #define V8_HEAP_SPACES_H_ #include "src/allocation.h" -#include "src/atomic-utils.h" #include "src/base/atomicops.h" #include "src/base/bits.h" #include "src/base/platform/mutex.h" @@ -561,14 +560,14 @@ class MemoryChunk { static const size_t kMinHeaderSize = kWriteBarrierCounterOffset + - kIntptrSize // intptr_t write_barrier_counter_ - + kIntSize // int progress_bar_ - + kPointerSize // AtomicValue high_water_mark_ - + kPointerSize // base::Mutex* mutex_ - + kPointerSize // base::AtomicWord parallel_sweeping_ - + 5 * kPointerSize // AtomicNumber free-list statistics - + kPointerSize // base::AtomicWord next_chunk_ - + kPointerSize; // base::AtomicWord prev_chunk_ + kIntptrSize // intptr_t write_barrier_counter_ + + kIntSize // int progress_bar_ + + kIntSize // int high_water_mark_ + + kPointerSize // base::Mutex* mutex_ + + kPointerSize // base::AtomicWord parallel_sweeping_ + + 5 * kIntSize // int free-list statistics + + kPointerSize // base::AtomicWord next_chunk_ + + kPointerSize; // base::AtomicWord prev_chunk_ // We add some more space to the computed header size to amount for missing // alignment requirements in our computation. @@ -675,23 +674,21 @@ class MemoryChunk { bool CommitArea(size_t requested); // Approximate amount of physical memory committed for this chunk. - size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } + size_t CommittedPhysicalMemory() { return high_water_mark_; } // Should be called when memory chunk is about to be freed. void ReleaseAllocatedMemory(); static inline void UpdateHighWaterMark(Address mark) { - if (mark == nullptr) return; + if (mark == NULL) return; // Need to subtract one from the mark because when a chunk is full the // top points to the next address after the chunk, which effectively belongs // to another chunk. See the comment to Page::FromAllocationTop. MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); - intptr_t new_mark = static_cast(mark - chunk->address()); - intptr_t old_mark = 0; - do { - old_mark = chunk->high_water_mark_.Value(); - } while ((new_mark > old_mark) && - !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); + int new_mark = static_cast(mark - chunk->address()); + if (new_mark > chunk->high_water_mark_) { + chunk->high_water_mark_ = new_mark; + } } protected: @@ -722,17 +719,17 @@ class MemoryChunk { int progress_bar_; // Assuming the initial allocation on a page is sequential, // count highest number of bytes ever allocated on the page. - AtomicValue high_water_mark_; + int high_water_mark_; base::Mutex* mutex_; base::AtomicWord parallel_sweeping_; // PagedSpace free-list statistics. - AtomicNumber available_in_small_free_list_; - AtomicNumber available_in_medium_free_list_; - AtomicNumber available_in_large_free_list_; - AtomicNumber available_in_huge_free_list_; - AtomicNumber non_available_small_blocks_; + int available_in_small_free_list_; + int available_in_medium_free_list_; + int available_in_large_free_list_; + int available_in_huge_free_list_; + int non_available_small_blocks_; // next_chunk_ holds a pointer of type MemoryChunk base::AtomicWord next_chunk_; @@ -831,22 +828,21 @@ class Page : public MemoryChunk { void ResetFreeListStatistics(); int LiveBytesFromFreeList() { - return static_cast( - area_size() - non_available_small_blocks() - - available_in_small_free_list() - available_in_medium_free_list() - - available_in_large_free_list() - available_in_huge_free_list()); + return area_size() - non_available_small_blocks_ - + available_in_small_free_list_ - available_in_medium_free_list_ - + available_in_large_free_list_ - available_in_huge_free_list_; } -#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ - type name() { return name##_.Value(); } \ - void set_##name(type name) { name##_.SetValue(name); } \ - void add_##name(type name) { name##_.Increment(name); } +#define FRAGMENTATION_STATS_ACCESSORS(type, name) \ + type name() { return name##_; } \ + void set_##name(type name) { name##_ = name; } \ + void add_##name(type name) { name##_ += name; } - FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) - FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, non_available_small_blocks) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_small_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_medium_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_large_free_list) + FRAGMENTATION_STATS_ACCESSORS(int, available_in_huge_free_list) #undef FRAGMENTATION_STATS_ACCESSORS @@ -1132,25 +1128,21 @@ class MemoryAllocator { // together. void Free(MemoryChunk* chunk); - // Returns allocated spaces in bytes. - intptr_t Size() { return size_.Value(); } - - // Returns allocated executable spaces in bytes. - intptr_t SizeExecutable() { return size_executable_.Value(); } - // Returns the maximum available bytes of heaps. - intptr_t Available() { - intptr_t size = Size(); - return capacity_ < size ? 0 : capacity_ - size; - } + intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } + + // Returns allocated spaces in bytes. + intptr_t Size() { return size_; } // Returns the maximum available executable bytes of heaps. intptr_t AvailableExecutable() { - intptr_t executable_size = SizeExecutable(); - if (capacity_executable_ < executable_size) return 0; - return capacity_executable_ - executable_size; + if (capacity_executable_ < size_executable_) return 0; + return capacity_executable_ - size_executable_; } + // Returns allocated executable spaces in bytes. + intptr_t SizeExecutable() { return size_executable_; } + // Returns maximum available bytes that the old space can have. intptr_t MaxAvailable() { return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; @@ -1158,9 +1150,9 @@ class MemoryAllocator { // Returns an indication of whether a pointer is in a space that has // been allocated by this MemoryAllocator. - V8_INLINE bool IsOutsideAllocatedSpace(const void* address) { - return address < lowest_ever_allocated_.Value() || - address >= highest_ever_allocated_.Value(); + V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { + return address < lowest_ever_allocated_ || + address >= highest_ever_allocated_; } #ifdef DEBUG @@ -1240,22 +1232,22 @@ class MemoryAllocator { Isolate* isolate_; // Maximum space size in bytes. - intptr_t capacity_; + size_t capacity_; // Maximum subset of capacity_ that can be executable - intptr_t capacity_executable_; + size_t capacity_executable_; // Allocated space size in bytes. - AtomicNumber size_; + size_t size_; // Allocated executable space size in bytes. - AtomicNumber size_executable_; + size_t size_executable_; // We keep the lowest and highest addresses allocated as a quick way // of determining that pointers are outside the heap. The estimate is // conservative, i.e. not all addrsses in 'allocated' space are allocated // to our heap. The range is [lowest, highest[, inclusive on the low end // and exclusive on the high end. - AtomicValue lowest_ever_allocated_; - AtomicValue highest_ever_allocated_; + void* lowest_ever_allocated_; + void* highest_ever_allocated_; struct MemoryAllocationCallbackRegistration { MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, @@ -1278,16 +1270,8 @@ class MemoryAllocator { PagedSpace* owner); void UpdateAllocatedSpaceLimits(void* low, void* high) { - // The use of atomic primitives does not guarantee correctness (wrt. - // desired semantics) by default. The loop here ensures that we update the - // values only if they did not change in between. - void* ptr = nullptr; - do { - ptr = lowest_ever_allocated_.Value(); - } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low)); - do { - ptr = highest_ever_allocated_.Value(); - } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high)); + lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); + highest_ever_allocated_ = Max(highest_ever_allocated_, high); } DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); -- 2.7.4