#include "src/heap-profiler.h"
#include "src/isolate.h"
#include "src/list-inl.h"
+#include "src/log.h"
#include "src/msan.h"
#include "src/objects.h"
}
+int DescriptorLookupCache::Lookup(Map* source, Name* name) {
+ if (!name->IsUniqueName()) return kAbsent;
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ if ((key.source == source) && (key.name == name)) return results_[index];
+ return kAbsent;
+}
+
+
+void DescriptorLookupCache::Update(Map* source, Name* name, int result) {
+ DCHECK(result != kAbsent);
+ if (name->IsUniqueName()) {
+ int index = Hash(source, name);
+ Key& key = keys_[index];
+ key.source = source;
+ key.name = name;
+ results_[index] = result;
+ }
+}
+
+
void Heap::ClearInstanceofCache() {
set_instanceof_cache_function(Smi::FromInt(0));
}
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
- int Lookup(Map* source, Name* name) {
- if (!name->IsUniqueName()) return kAbsent;
- int index = Hash(source, name);
- Key& key = keys_[index];
- if ((key.source == source) && (key.name == name)) return results_[index];
- return kAbsent;
- }
+ inline int Lookup(Map* source, Name* name);
// Update an element in the cache.
- void Update(Map* source, Name* name, int result) {
- DCHECK(result != kAbsent);
- if (name->IsUniqueName()) {
- int index = Hash(source, name);
- Key& key = keys_[index];
- key.source = source;
- key.name = name;
- results_[index] = result;
- }
- }
+ inline void Update(Map* source, Name* name, int result);
// Clear the cache.
void Clear();
}
}
}
+
+
+void CodeFlusher::AddCandidate(SharedFunctionInfo* shared_info) {
+ if (GetNextCandidate(shared_info) == NULL) {
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
+ }
+}
+
+
+void CodeFlusher::AddCandidate(JSFunction* function) {
+ DCHECK(function->code() == function->shared()->code());
+ if (GetNextCandidate(function)->IsUndefined()) {
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
+ }
+}
+
+
+void CodeFlusher::AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+ if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+ SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+ optimized_code_map_holder_head_ = code_map_holder;
+ }
+}
+
+
+JSFunction** CodeFlusher::GetNextCandidateSlot(JSFunction* candidate) {
+ return reinterpret_cast<JSFunction**>(
+ HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+}
+
+
+JSFunction* CodeFlusher::GetNextCandidate(JSFunction* candidate) {
+ Object* next_candidate = candidate->next_function_link();
+ return reinterpret_cast<JSFunction*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate) {
+ candidate->set_next_function_link(next_candidate, UPDATE_WEAK_WRITE_BARRIER);
+}
+
+
+void CodeFlusher::ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+ DCHECK(undefined->IsUndefined());
+ candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
+}
+
+
+SharedFunctionInfo* CodeFlusher::GetNextCandidate(
+ SharedFunctionInfo* candidate) {
+ Object* next_candidate = candidate->code()->gc_metadata();
+ return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+}
+
+
+void CodeFlusher::SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate) {
+ candidate->code()->set_gc_metadata(next_candidate);
+}
+
+
+void CodeFlusher::ClearNextCandidate(SharedFunctionInfo* candidate) {
+ candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
}
-} // namespace v8::internal
+
+
+SharedFunctionInfo* CodeFlusher::GetNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+ return reinterpret_cast<SharedFunctionInfo*>(next_map);
+}
+
+
+void CodeFlusher::SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+}
+
+
+void CodeFlusher::ClearNextCodeMap(SharedFunctionInfo* holder) {
+ FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+ code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+}
+
+} // namespace internal
+} // namespace v8
#endif // V8_HEAP_MARK_COMPACT_INL_H_
shared_function_info_candidates_head_(NULL),
optimized_code_map_holder_head_(NULL) {}
- void AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
- }
-
- void AddCandidate(JSFunction* function) {
- DCHECK(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
- }
-
- void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
- if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
- SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
- optimized_code_map_holder_head_ = code_map_holder;
- }
- }
+ inline void AddCandidate(SharedFunctionInfo* shared_info);
+ inline void AddCandidate(JSFunction* function);
+ inline void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
void EvictCandidate(SharedFunctionInfo* shared_info);
void EvictJSFunctionCandidates();
void EvictSharedFunctionInfoCandidates();
- static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
- }
-
- static JSFunction* GetNextCandidate(JSFunction* candidate) {
- Object* next_candidate = candidate->next_function_link();
- return reinterpret_cast<JSFunction*>(next_candidate);
- }
-
- static void SetNextCandidate(JSFunction* candidate,
- JSFunction* next_candidate) {
- candidate->set_next_function_link(next_candidate,
- UPDATE_WEAK_WRITE_BARRIER);
- }
-
- static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
- DCHECK(undefined->IsUndefined());
- candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
- Object* next_candidate = candidate->code()->gc_metadata();
- return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
- }
-
- static void SetNextCandidate(SharedFunctionInfo* candidate,
- SharedFunctionInfo* next_candidate) {
- candidate->code()->set_gc_metadata(next_candidate);
- }
-
- static void ClearNextCandidate(SharedFunctionInfo* candidate) {
- candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
- }
-
- static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
- return reinterpret_cast<SharedFunctionInfo*>(next_map);
- }
-
- static void SetNextCodeMap(SharedFunctionInfo* holder,
- SharedFunctionInfo* next_holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
- }
-
- static void ClearNextCodeMap(SharedFunctionInfo* holder) {
- FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
- code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
- }
+ static inline JSFunction** GetNextCandidateSlot(JSFunction* candidate);
+ static inline JSFunction* GetNextCandidate(JSFunction* candidate);
+ static inline void SetNextCandidate(JSFunction* candidate,
+ JSFunction* next_candidate);
+ static inline void ClearNextCandidate(JSFunction* candidate,
+ Object* undefined);
+
+ static inline SharedFunctionInfo* GetNextCandidate(
+ SharedFunctionInfo* candidate);
+ static inline void SetNextCandidate(SharedFunctionInfo* candidate,
+ SharedFunctionInfo* next_candidate);
+ static inline void ClearNextCandidate(SharedFunctionInfo* candidate);
+
+ static inline SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder);
+ static inline void SetNextCodeMap(SharedFunctionInfo* holder,
+ SharedFunctionInfo* next_holder);
+ static inline void ClearNextCodeMap(SharedFunctionInfo* holder);
Isolate* isolate_;
JSFunction* jsfunction_candidates_head_;
namespace internal {
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(Map* map) {
+ return GetVisitorId(map->instance_type(), map->instance_size(),
+ FLAG_unbox_double_fields && !map->HasFastPointerLayout());
+}
+
+
StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
int instance_type, int instance_size, bool has_unboxed_fields) {
if (instance_type < FIRST_NONSTRING_TYPE) {
bool has_unboxed_fields);
// Determine which specialized visitor should be used for given map.
- static VisitorId GetVisitorId(Map* map) {
- return GetVisitorId(
- map->instance_type(), map->instance_size(),
- FLAG_unbox_double_fields && !map->HasFastPointerLayout());
- }
+ static VisitorId GetVisitorId(Map* map);
// For visitors that allow specialization by size calculate VisitorId based
// on size, base visitor id and generic visitor id.
// -----------------------------------------------------------------------------
// PageIterator
-
PageIterator::PageIterator(PagedSpace* space)
: space_(space),
prev_page_(&space->anchor_),
// -----------------------------------------------------------------------------
-// NewSpacePageIterator
+// SemiSpaceIterator
+
+HeapObject* SemiSpaceIterator::Next() {
+ if (current_ == limit_) return NULL;
+ if (NewSpacePage::IsAtEnd(current_)) {
+ NewSpacePage* page = NewSpacePage::FromLimit(current_);
+ page = page->next_page();
+ DCHECK(!page->is_anchor());
+ current_ = page->area_start();
+ if (current_ == limit_) return NULL;
+ }
+
+ HeapObject* object = HeapObject::FromAddress(current_);
+ int size = object->Size();
+
+ current_ += size;
+ return object;
+}
+HeapObject* SemiSpaceIterator::next_object() { return Next(); }
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
+
NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
: prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
// -----------------------------------------------------------------------------
// HeapObjectIterator
+
+HeapObject* HeapObjectIterator::Next() {
+ do {
+ HeapObject* next_obj = FromCurrentPage();
+ if (next_obj != NULL) return next_obj;
+ } while (AdvanceToNextPage());
+ return NULL;
+}
+
+
+HeapObject* HeapObjectIterator::next_object() { return Next(); }
+
+
HeapObject* HeapObjectIterator::FromCurrentPage() {
while (cur_addr_ != cur_end_) {
if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
// --------------------------------------------------------------------------
+// AllocationResult
+
+AllocationSpace AllocationResult::RetrySpace() {
+ DCHECK(IsRetry());
+ return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+
+// --------------------------------------------------------------------------
// PagedSpace
+
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
}
+bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
+
+
void MemoryChunk::set_scan_on_scavenge(bool scan) {
if (scan) {
if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
}
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
- if (mark == NULL) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- int new_mark = static_cast<int>(mark - chunk->address());
- if (new_mark > chunk->high_water_mark_) {
- chunk->high_water_mark_ = new_mark;
- }
-}
-
-
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldSpaceState),
old_iterator_(heap->old_space()),
lo_iterator_(heap->lo_space()) {}
-Page* Page::next_page() {
- DCHECK(next_chunk()->owner() == owner());
- return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
- DCHECK(prev_chunk()->owner() == owner());
- return static_cast<Page*>(prev_chunk());
+MemoryChunk* PointerChunkIterator::next() {
+ switch (state_) {
+ case kOldSpaceState: {
+ if (old_iterator_.has_next()) {
+ return old_iterator_.next();
+ }
+ state_ = kMapState;
+ // Fall through.
+ }
+ case kMapState: {
+ if (map_iterator_.has_next()) {
+ return map_iterator_.next();
+ }
+ state_ = kLargeObjectState;
+ // Fall through.
+ }
+ case kLargeObjectState: {
+ HeapObject* heap_object;
+ do {
+ heap_object = lo_iterator_.Next();
+ if (heap_object == NULL) {
+ state_ = kFinishedState;
+ return NULL;
+ }
+ // Fixed arrays are the only pointer-containing objects in large
+ // object space.
+ } while (!heap_object->IsFixedArray());
+ MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+ return answer;
+ }
+ case kFinishedState:
+ return NULL;
+ default:
+ break;
+ }
+ UNREACHABLE();
+ return NULL;
}
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/base/platform/mutex.h"
+#include "src/flags.h"
#include "src/hashmap.h"
#include "src/list.h"
-#include "src/log.h"
+#include "src/objects.h"
#include "src/utils.h"
namespace v8 {
// Approximate amount of physical memory committed for this chunk.
size_t CommittedPhysicalMemory() { return high_water_mark_; }
- static inline void UpdateHighWaterMark(Address mark);
+ static inline void UpdateHighWaterMark(Address mark) {
+ if (mark == NULL) return;
+ // Need to subtract one from the mark because when a chunk is full the
+ // top points to the next address after the chunk, which effectively belongs
+ // to another chunk. See the comment to Page::FromAllocationTop.
+ MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+ int new_mark = static_cast<int>(mark - chunk->address());
+ if (new_mark > chunk->high_water_mark_) {
+ chunk->high_water_mark_ = new_mark;
+ }
+ }
protected:
size_t size_;
}
// Returns the next page in the chain of pages owned by a space.
- inline Page* next_page();
- inline Page* prev_page();
+ inline Page* next_page() {
+ DCHECK(next_chunk()->owner() == owner());
+ return static_cast<Page*>(next_chunk());
+ }
+ inline Page* prev_page() {
+ DCHECK(prev_chunk()->owner() == owner());
+ return static_cast<Page*>(prev_chunk());
+ }
inline void set_next_page(Page* page);
inline void set_prev_page(Page* page);
// Advance to the next object, skipping free spaces and other fillers and
// skipping the special garbage section of which there is one per space.
// Returns NULL when the iteration has ended.
- inline HeapObject* Next() {
- do {
- HeapObject* next_obj = FromCurrentPage();
- if (next_obj != NULL) return next_obj;
- } while (AdvanceToNextPage());
- return NULL;
- }
-
- virtual HeapObject* next_object() { return Next(); }
+ inline HeapObject* Next();
+ virtual inline HeapObject* next_object();
private:
enum PageMode { kOnePageOnly, kAllPagesInSpace };
return object_;
}
- AllocationSpace RetrySpace() {
- DCHECK(IsRetry());
- return static_cast<AllocationSpace>(Smi::cast(object_)->value());
- }
+ inline AllocationSpace RetrySpace();
private:
explicit AllocationResult(AllocationSpace space)
// Checks whether an object/address is in this space.
inline bool Contains(Address a);
- bool Contains(HeapObject* o) { return Contains(o->address()); }
+ inline bool Contains(HeapObject* o);
// Unlike Contains() methods it is safe to call this one even for addresses
// of unmapped memory.
bool ContainsSafe(Address addr);
// Create an iterator over the allocated objects in the given to-space.
explicit SemiSpaceIterator(NewSpace* space);
- HeapObject* Next() {
- if (current_ == limit_) return NULL;
- if (NewSpacePage::IsAtEnd(current_)) {
- NewSpacePage* page = NewSpacePage::FromLimit(current_);
- page = page->next_page();
- DCHECK(!page->is_anchor());
- current_ = page->area_start();
- if (current_ == limit_) return NULL;
- }
-
- HeapObject* object = HeapObject::FromAddress(current_);
- int size = object->Size();
-
- current_ += size;
- return object;
- }
+ inline HeapObject* Next();
// Implementation of the ObjectIterator functions.
- virtual HeapObject* next_object() { return Next(); }
+ virtual inline HeapObject* next_object();
private:
void Initialize(Address start, Address end);
inline explicit PointerChunkIterator(Heap* heap);
// Return NULL when the iterator is done.
- MemoryChunk* next() {
- switch (state_) {
- case kOldSpaceState: {
- if (old_iterator_.has_next()) {
- return old_iterator_.next();
- }
- state_ = kMapState;
- // Fall through.
- }
- case kMapState: {
- if (map_iterator_.has_next()) {
- return map_iterator_.next();
- }
- state_ = kLargeObjectState;
- // Fall through.
- }
- case kLargeObjectState: {
- HeapObject* heap_object;
- do {
- heap_object = lo_iterator_.Next();
- if (heap_object == NULL) {
- state_ = kFinishedState;
- return NULL;
- }
- // Fixed arrays are the only pointer-containing objects in large
- // object space.
- } while (!heap_object->IsFixedArray());
- MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
- return answer;
- }
- case kFinishedState:
- return NULL;
- default:
- break;
- }
- UNREACHABLE();
- return NULL;
- }
-
+ inline MemoryChunk* next();
private:
enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
bool Object::IsAccessorInfo() const { return IsExecutableAccessorInfo(); }
-bool Object::IsSmi() const {
- return HAS_SMI_TAG(this);
-}
-
-
-bool Object::IsHeapObject() const {
- return Internals::HasHeapObjectTag(this);
-}
-
-
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
};
+// In objects.h to be usable without objects-inl.h inclusion.
+bool Object::IsSmi() const { return HAS_SMI_TAG(this); }
+bool Object::IsHeapObject() const { return Internals::HasHeapObjectTag(this); }
+
+
struct Brief {
explicit Brief(const Object* const v) : value(v) {}
const Object* value;