namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute frame type and stack pointer.
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
}
-inline void StackHandler::Iterate(ObjectVisitor* v) const {
- // Stack handlers do not contain any pointers that need to be
- // traversed.
+inline void StackHandler::Iterate(ObjectVisitor* v, Code* holder) const {
+ StackFrame::IteratePc(v, pc_address(), holder);
}
}
-inline Address StackHandler::pc() const {
+inline Address* StackHandler::pc_address() const {
const int offset = StackHandlerConstants::kPCOffset;
- return Memory::Address_at(address() + offset);
-}
-
-
-inline void StackHandler::set_pc(Address value) {
- const int offset = StackHandlerConstants::kPCOffset;
- Memory::Address_at(address() + offset) = value;
+ return reinterpret_cast<Address*>(address() + offset);
}
namespace v8 {
namespace internal {
+PcToCodeCache::PcToCodeCacheEntry
+ PcToCodeCache::cache_[PcToCodeCache::kPcToCodeCacheSize];
+
+int SafeStackFrameIterator::active_count_ = 0;
+
// Iterator that supports traversing the stack handlers of a
// particular frame. Needs to know the top of the handler chain.
class StackHandlerIterator BASE_EMBEDDED {
if (use_top || fp != NULL) {
Reset();
}
- JavaScriptFrame_.DisableHeapAccess();
}
#undef INITIALIZE_SINGLETON
SafeStackFrameIterator::SafeStackFrameIterator(
Address fp, Address sp, Address low_bound, Address high_bound) :
- low_bound_(low_bound), high_bound_(high_bound),
+ maintainer_(), low_bound_(low_bound), high_bound_(high_bound),
is_valid_top_(
IsWithinBounds(low_bound, high_bound,
Top::c_entry_fp(Top::GetCurrentThread())) &&
#endif
-// -------------------------------------------------------------------------
-
-
-void StackHandler::Cook(Code* code) {
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackHandler::Uncook(Code* code) {
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
-
-// -------------------------------------------------------------------------
-
-
bool StackFrame::HasHandler() const {
StackHandlerIterator it(this, top_handler());
return !it.done();
}
-
-void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(!thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Cook();
+void StackFrame::IteratePc(ObjectVisitor* v,
+ Address* pc_address,
+ Code* holder) {
+ Address pc = *pc_address;
+ ASSERT(holder->contains(pc));
+ unsigned pc_offset = pc - holder->instruction_start();
+ Object* code = holder;
+ v->VisitPointer(&code);
+ if (code != holder) {
+ holder = reinterpret_cast<Code*>(code);
+ pc = holder->instruction_start() + pc_offset;
+ *pc_address = pc;
}
- thread->set_stack_is_cooked(true);
}
-void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
- ASSERT(thread->stack_is_cooked());
- for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
- it.frame()->Uncook();
+StackFrame::Type StackFrame::ComputeType(State* state) {
+ ASSERT(state->fp != NULL);
+ if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
+ return ARGUMENTS_ADAPTOR;
}
- thread->set_stack_is_cooked(false);
+ // The marker and function offsets overlap. If the marker isn't a
+ // smi then the frame is a JavaScript frame -- and the marker is
+ // really the function.
+ const int offset = StandardFrameConstants::kMarkerOffset;
+ Object* marker = Memory::Object_at(state->fp + offset);
+ if (!marker->IsSmi()) return JAVA_SCRIPT;
+ return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
}
-void StackFrame::Cook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Cook(code);
- }
- ASSERT(code->contains(pc()));
- set_pc(AddressFrom<Address>(pc() - code->instruction_start()));
-}
-
-
-void StackFrame::Uncook() {
- Code* code = this->code();
- ASSERT(code->IsCode());
- for (StackHandlerIterator it(this, top_handler()); !it.done(); it.Advance()) {
- it.handler()->Uncook(code);
- }
- set_pc(code->instruction_start() + OffsetFrom(pc()));
- ASSERT(code->contains(pc()));
-}
-
StackFrame::Type StackFrame::GetCallerState(State* state) const {
ComputeCallerState(state);
}
+void ExitFrame::Iterate(ObjectVisitor* v) const {
+ // The arguments are traversed as part of the expression stack of
+ // the calling frame.
+ IteratePc(v, pc_address(), code());
+ v->VisitPointer(&code_slot());
+}
+
+
Address ExitFrame::GetCallerStackPointer() const {
return fp() + ExitFrameConstants::kCallerSPDisplacement;
}
}
+int JavaScriptFrame::GetProvidedParametersCount() const {
+ return ComputeParametersCount();
+}
+
+
+Address JavaScriptFrame::GetCallerStackPointer() const {
+ int arguments;
+ if (Heap::gc_state() != Heap::NOT_IN_GC ||
+ SafeStackFrameIterator::is_active()) {
+ // If the we are currently iterating the safe stack the
+ // arguments for frames are traversed as if they were
+ // expression stack elements of the calling frame. The reason for
+ // this rather strange decision is that we cannot access the
+ // function during mark-compact GCs when objects may have been marked.
+ // In fact accessing heap objects (like function->shared() below)
+ // at all during GC is problematic.
+ arguments = 0;
+ } else {
+ // Compute the number of arguments by getting the number of formal
+ // parameters of the function. We must remember to take the
+ // receiver into account (+1).
+ JSFunction* function = JSFunction::cast(this->function());
+ arguments = function->shared()->formal_parameter_count() + 1;
+ }
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments * kPointerSize);
+}
+
+
+Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
+ const int arguments = Smi::cast(GetExpression(0))->value();
+ const int offset = StandardFrameConstants::kCallerSPOffset;
+ return fp() + offset + (arguments + 1) * kPointerSize;
+}
+
+
+Address InternalFrame::GetCallerStackPointer() const {
+ // Internal frames have no arguments. The stack pointer of the
+ // caller is at a fixed offset from the frame pointer.
+ return fp() + StandardFrameConstants::kCallerSPOffset;
+}
+
+
Code* ArgumentsAdaptorFrame::unchecked_code() const {
return Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline);
}
ASSERT(!it.done());
StackHandler* handler = it.handler();
ASSERT(handler->is_entry());
- handler->Iterate(v);
- // Make sure that there's the entry frame does not contain more than
- // one stack handler.
+ handler->Iterate(v, code());
#ifdef DEBUG
+ // Make sure that the entry frame does not contain more than one
+ // stack handler.
it.Advance();
ASSERT(it.done());
#endif
+ IteratePc(v, pc_address(), code());
}
v->VisitPointers(base, reinterpret_cast<Object**>(address));
base = reinterpret_cast<Object**>(address + StackHandlerConstants::kSize);
// Traverse the pointers in the handler itself.
- handler->Iterate(v);
+ handler->Iterate(v, code());
}
v->VisitPointers(base, limit);
}
void JavaScriptFrame::Iterate(ObjectVisitor* v) const {
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
// Traverse callee-saved registers, receiver, and parameters.
const int kBaseOffset = JavaScriptFrameConstants::kSavedRegistersOffset;
// Internal frames only have object pointers on the expression stack
// as they never have any arguments.
IterateExpressions(v);
+ IteratePc(v, pc_address(), code());
}
// -------------------------------------------------------------------------
+Code* PcToCodeCache::GcSafeCastToCode(HeapObject* object, Address pc) {
+ Code* code = reinterpret_cast<Code*>(object);
+ ASSERT(code != NULL && code->contains(pc));
+ return code;
+}
+
+
+Code* PcToCodeCache::GcSafeFindCodeForPc(Address pc) {
+ // Check if the pc points into a large object chunk.
+ LargeObjectChunk* chunk = Heap::lo_space()->FindChunkContainingPc(pc);
+ if (chunk != NULL) return GcSafeCastToCode(chunk->GetObject(), pc);
+
+ // Iterate through the 8K page until we reach the end or find an
+ // object starting after the pc.
+ Page* page = Page::FromAddress(pc);
+ HeapObjectIterator iterator(page, Heap::GcSafeSizeOfOldObjectFunction());
+ HeapObject* previous = NULL;
+ while (true) {
+ HeapObject* next = iterator.next();
+ if (next == NULL || next->address() >= pc) {
+ return GcSafeCastToCode(previous, pc);
+ }
+ previous = next;
+ }
+}
+
+PcToCodeCache::PcToCodeCacheEntry* PcToCodeCache::GetCacheEntry(Address pc) {
+ Counters::pc_to_code.Increment();
+ ASSERT(IsPowerOf2(kPcToCodeCacheSize));
+ uint32_t hash = ComputeIntegerHash(
+ static_cast<uint32_t>(reinterpret_cast<uintptr_t>(pc)));
+ uint32_t index = hash & (kPcToCodeCacheSize - 1);
+ PcToCodeCacheEntry* entry = cache(index);
+ if (entry->pc == pc) {
+ Counters::pc_to_code_cached.Increment();
+ ASSERT(entry->code == GcSafeFindCodeForPc(pc));
+ } else {
+ // Because this code may be interrupted by a profiling signal that
+ // also queries the cache, we cannot update pc before the code has
+ // been set. Otherwise, we risk trying to use a cache entry before
+ // the code has been computed.
+ entry->code = GcSafeFindCodeForPc(pc);
+ entry->pc = pc;
+ }
+ return entry;
+}
+
+
+// -------------------------------------------------------------------------
+
int NumRegs(RegList reglist) {
int n = 0;
while (reglist != 0) {
class ThreadLocalTop;
+class PcToCodeCache : AllStatic {
+ public:
+ struct PcToCodeCacheEntry {
+ Address pc;
+ Code* code;
+ };
+
+ static PcToCodeCacheEntry* cache(int index) {
+ return &cache_[index];
+ }
+
+ static Code* GcSafeFindCodeForPc(Address pc);
+ static Code* GcSafeCastToCode(HeapObject* object, Address pc);
+
+ static void FlushPcToCodeCache() {
+ memset(&cache_[0], 0, sizeof(cache_));
+ }
+
+ static PcToCodeCacheEntry* GetCacheEntry(Address pc);
+
+ private:
+ static const int kPcToCodeCacheSize = 256;
+ static PcToCodeCacheEntry cache_[kPcToCodeCacheSize];
+};
+
+
class StackHandler BASE_EMBEDDED {
public:
enum State {
inline bool includes(Address address) const;
// Garbage collection support.
- inline void Iterate(ObjectVisitor* v) const;
+ inline void Iterate(ObjectVisitor* v, Code* holder) const;
// Conversion support.
static inline StackHandler* FromAddress(Address address);
bool is_try_catch() { return state() == TRY_CATCH; }
bool is_try_finally() { return state() == TRY_FINALLY; }
- // Garbage collection support.
- void Cook(Code* code);
- void Uncook(Code* code);
-
private:
// Accessors.
inline State state() const;
- inline Address pc() const;
- inline void set_pc(Address value);
+ inline Address* pc_address() const;
DISALLOW_IMPLICIT_CONSTRUCTORS(StackHandler);
};
virtual Code* unchecked_code() const = 0;
// Get the code associated with this frame.
- inline Code* code() const {
- return Code::cast(unchecked_code());
+ Code* code() const { return GetContainingCode(pc()); }
+
+ // Get the code object that contains the given pc.
+ Code* GetContainingCode(Address pc) const {
+ return PcToCodeCache::GetCacheEntry(pc)->code;
}
- // Garbage collection support.
- static void CookFramesForThread(ThreadLocalTop* thread);
- static void UncookFramesForThread(ThreadLocalTop* thread);
+ virtual void Iterate(ObjectVisitor* v) const = 0;
+ static void IteratePc(ObjectVisitor* v, Address* pc_address, Code* holder);
- virtual void Iterate(ObjectVisitor* v) const { }
// Printing support.
enum PrintMode { OVERVIEW, DETAILS };
// Get the type and the state of the calling frame.
virtual Type GetCallerState(State* state) const;
- // Cooking/uncooking support.
- void Cook();
- void Uncook();
-
friend class StackFrameIterator;
friend class StackHandlerIterator;
friend class SafeStackFrameIterator;
protected:
explicit JavaScriptFrame(StackFrameIterator* iterator)
- : StandardFrame(iterator), disable_heap_access_(false) { }
+ : StandardFrame(iterator) { }
virtual Address GetCallerStackPointer() const;
- // When this mode is enabled it is not allowed to access heap objects.
- // This is a special mode used when gathering stack samples in profiler.
- // A shortcoming is that caller's SP value will be calculated incorrectly
- // (see GetCallerStackPointer implementation), but it is not used for stack
- // sampling.
- void DisableHeapAccess() { disable_heap_access_ = true; }
-
private:
- bool disable_heap_access_;
inline Object* function_slot_object() const;
friend class StackFrameIterator;
void Advance();
void Reset();
+ static bool is_active() { return active_count_ > 0; }
+
static bool IsWithinBounds(
Address low_bound, Address high_bound, Address addr) {
return low_bound <= addr && addr <= high_bound;
bool IsValidFrame(StackFrame* frame) const;
bool IsValidCaller(StackFrame* frame);
+ // This is a nasty hack to make sure the active count is incremented
+ // before the constructor for the embedded iterator is invoked. This
+ // is needed because the constructor will start looking at frames
+ // right away and we need to make sure it doesn't start inspecting
+ // heap objects.
+ class ActiveCountMaintainer BASE_EMBEDDED {
+ public:
+ ActiveCountMaintainer() { active_count_++; }
+ ~ActiveCountMaintainer() { active_count_--; }
+ };
+
+ ActiveCountMaintainer maintainer_;
+ static int active_count_;
Address low_bound_;
Address high_bound_;
const bool is_valid_top_;
GCCallback Heap::global_gc_prologue_callback_ = NULL;
GCCallback Heap::global_gc_epilogue_callback_ = NULL;
+HeapObjectCallback Heap::gc_safe_size_of_old_object_ = NULL;
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
}
+int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(!MarkCompactCollector::are_map_pointers_encoded());
+ MapWord map_word = object->map_word();
+ map_word.ClearMark();
+ map_word.ClearOverflow();
+ return object->SizeFromMap(map_word.ToMap());
+}
+
+
+int Heap::GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object) {
+ ASSERT(!Heap::InNewSpace(object)); // Code only works for old objects.
+ ASSERT(MarkCompactCollector::are_map_pointers_encoded());
+ uint32_t marker = Memory::uint32_at(object->address());
+ if (marker == MarkCompactCollector::kSingleFreeEncoding) {
+ return kIntSize;
+ } else if (marker == MarkCompactCollector::kMultiFreeEncoding) {
+ return Memory::int_at(object->address() + kIntSize);
+ } else {
+ MapWord map_word = object->map_word();
+ Address map_address = map_word.DecodeMapAddress(Heap::map_space());
+ Map* map = reinterpret_cast<Map*>(HeapObject::FromAddress(map_address));
+ return object->SizeFromMap(map);
+ }
+}
+
+
GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space) {
// Is global GC requested?
if (space != NEW_SPACE || FLAG_gc_global) {
MarkCompactCollector::CollectGarbage();
- MarkCompactEpilogue(is_compacting);
-
LOG(ResourceEvent("markcompact", "end"));
gc_state_ = NOT_IN_GC;
CompilationCache::MarkCompactPrologue();
- Top::MarkCompactPrologue(is_compacting);
- ThreadManager::MarkCompactPrologue(is_compacting);
-
CompletelyClearInstanceofCache();
if (is_compacting) FlushNumberStringCache();
}
-void Heap::MarkCompactEpilogue(bool is_compacting) {
- Top::MarkCompactEpilogue(is_compacting);
- ThreadManager::MarkCompactEpilogue(is_compacting);
-}
-
-
Object* Heap::FindCodeObject(Address a) {
Object* obj = code_space_->FindObject(a);
if (obj->IsFailure()) {
NewSpaceScavenger::Initialize();
MarkCompactCollector::Initialize();
+ MarkMapPointersAsEncoded(false);
+
// Setup memory allocator and reserve a chunk of memory for new
// space. The chunk is double the size of the requested reserved
// new space size to ensure that we can find a pair of semispaces that
roots_[kCodeStubsRootIndex] = value;
}
+ // Support for computing object sizes for old objects during GCs. Returns
+ // a function that is guaranteed to be safe for computing object sizes in
+ // the current GC phase.
+ static HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+ return gc_safe_size_of_old_object_;
+ }
+
// Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
static void public_set_non_monomorphic_cache(NumberDictionary* value) {
roots_[kNonMonomorphicCacheRootIndex] = value;
static GCCallback global_gc_prologue_callback_;
static GCCallback global_gc_epilogue_callback_;
+ // Support for computing object sizes during GC.
+ static HeapObjectCallback gc_safe_size_of_old_object_;
+ static int GcSafeSizeOfOldObject(HeapObject* object);
+ static int GcSafeSizeOfOldObjectWithEncodedMap(HeapObject* object);
+
+ // Update the GC state. Called from the mark-compact collector.
+ static void MarkMapPointersAsEncoded(bool encoded) {
+ gc_safe_size_of_old_object_ = encoded
+ ? &GcSafeSizeOfOldObjectWithEncodedMap
+ : &GcSafeSizeOfOldObject;
+ }
+
// Checks whether a global GC is necessary
static GarbageCollector SelectGarbageCollector(AllocationSpace space);
// Code to be run before and after mark-compact.
static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
friend class DisallowAllocationFailure;
friend class AlwaysAllocateScope;
friend class LinearAllocationScope;
+ friend class MarkCompactCollector;
};
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
-
-
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
if (fp == 0) return NONE;
// Compute the stack pointer.
}
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-Address JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-Address ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
-
-Address InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
};
-class FrameCookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::CookFramesForThread(top);
- }
-};
-
-class FrameUncookingThreadVisitor : public ThreadVisitor {
- public:
- void VisitThread(ThreadLocalTop* top) {
- StackFrame::UncookFramesForThread(top);
- }
-};
-
-static void IterateAllThreads(ThreadVisitor* visitor) {
- Top::IterateThread(visitor);
- ThreadManager::IterateArchivedThreads(visitor);
-}
-
// Finds all references to original and replaces them with substitution.
static void ReplaceCodeObject(Code* original, Code* substitution) {
ASSERT(!Heap::InNewSpace(substitution));
// so temporary replace the pointers with offset numbers
// in prologue/epilogue.
{
- FrameCookingThreadVisitor cooking_visitor;
- IterateAllThreads(&cooking_visitor);
-
Heap::IterateStrongRoots(&visitor, VISIT_ALL);
-
- FrameUncookingThreadVisitor uncooking_visitor;
- IterateAllThreads(&uncooking_visitor);
}
// Now iterate over all pointers of all objects, including code_target
GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_COMPACT);
EncodeForwardingAddresses();
+ Heap::MarkMapPointersAsEncoded(true);
UpdatePointers();
+ Heap::MarkMapPointersAsEncoded(false);
+ PcToCodeCache::FlushPcToCodeCache();
RelocateObjects();
} else {
SweepSpaces();
+ PcToCodeCache::FlushPcToCodeCache();
}
Finish();
// pair of distinguished invalid map encodings (for single word and multiple
// words) to indicate free regions in the page found during computation of
// forwarding addresses and skipped over in subsequent sweeps.
-static const uint32_t kSingleFreeEncoding = 0;
-static const uint32_t kMultiFreeEncoding = 1;
// Encode a free region, defined by the given start address and size, in the
void EncodeFreeRegion(Address free_start, int free_size) {
ASSERT(free_size >= kIntSize);
if (free_size == kIntSize) {
- Memory::uint32_at(free_start) = kSingleFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kSingleFreeEncoding;
} else {
ASSERT(free_size >= 2 * kIntSize);
- Memory::uint32_at(free_start) = kMultiFreeEncoding;
+ Memory::uint32_at(free_start) = MarkCompactCollector::kMultiFreeEncoding;
Memory::int_at(free_start + kIntSize) = free_size;
}
#ifdef DEBUG
// Checks whether performing mark-compact collection.
static bool in_use() { return state_ > PREPARE_GC; }
+ static bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
#endif
// Determine type of object and emit deletion log event.
static void ReportDeleteIfNeeded(HeapObject* obj);
+ // Distinguishable invalid map encodings (for single word and multiple words)
+ // that indicate free regions.
+ static const uint32_t kSingleFreeEncoding = 0;
+ static const uint32_t kMultiFreeEncoding = 1;
+
private:
#ifdef DEBUG
enum CollectorState {
class Memory {
public:
+ static uint8_t& uint8_at(Address addr) {
+ return *reinterpret_cast<uint8_t*>(addr);
+ }
+
static uint16_t& uint16_at(Address addr) {
return *reinterpret_cast<uint16_t*>(addr);
}
}
+HeapObjectIterator::HeapObjectIterator(Page* page,
+ HeapObjectCallback size_func) {
+ Initialize(page->ObjectAreaStart(), page->AllocationTop(), size_func);
+}
+
+
void HeapObjectIterator::Initialize(Address cur, Address end,
HeapObjectCallback size_f) {
cur_addr_ = cur;
return Failure::Exception();
}
+
+LargeObjectChunk* LargeObjectSpace::FindChunkContainingPc(Address pc) {
+ // TODO(853): Change this implementation to only find executable
+ // chunks and use some kind of hash-based approach to speed it up.
+ for (LargeObjectChunk* chunk = first_chunk_;
+ chunk != NULL;
+ chunk = chunk->next()) {
+ Address chunk_address = chunk->address();
+ if (chunk_address <= pc && pc < chunk_address + chunk->size()) {
+ return chunk;
+ }
+ }
+ return NULL;
+}
+
+
void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
HeapObjectIterator(PagedSpace* space,
Address start,
HeapObjectCallback size_func);
+ HeapObjectIterator(Page* page, HeapObjectCallback size_func);
inline HeapObject* next() {
return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage();
// space, may be slow.
Object* FindObject(Address a);
+ // Finds a large object page containing the given pc, returns NULL
+ // if such a page doesn't exist.
+ LargeObjectChunk* FindChunkContainingPc(Address pc);
+
+
// Iterates objects covered by dirty regions.
void IterateDirtyRegions(ObjectSlotCallback func);
#ifdef ENABLE_LOGGING_AND_PROFILING
js_entry_sp_ = 0;
#endif
- stack_is_cooked_ = false;
try_catch_handler_address_ = NULL;
context_ = NULL;
int id = ThreadManager::CurrentId();
}
-void Top::MarkCompactPrologue(bool is_compacting) {
- MarkCompactPrologue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, char* data) {
- MarkCompactPrologue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactPrologue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::CookFramesForThread(thread);
- }
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, char* data) {
- MarkCompactEpilogue(is_compacting, reinterpret_cast<ThreadLocalTop*>(data));
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting) {
- MarkCompactEpilogue(is_compacting, &thread_local_);
-}
-
-
-void Top::MarkCompactEpilogue(bool is_compacting, ThreadLocalTop* thread) {
- if (is_compacting) {
- StackFrame::UncookFramesForThread(thread);
- }
-}
-
static int stack_trace_nesting_level = 0;
static StringStream* incomplete_message = NULL;
#ifdef ENABLE_LOGGING_AND_PROFILING
Address js_entry_sp_; // the stack pointer of the bottom js entry frame
#endif
- bool stack_is_cooked_;
- inline bool stack_is_cooked() { return stack_is_cooked_; }
- inline void set_stack_is_cooked(bool value) { stack_is_cooked_ = value; }
// Generated code scratch locations.
int32_t formal_count_;
// Generated code scratch locations.
static void* formal_count_address() { return &thread_local_.formal_count_; }
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
- static void MarkCompactPrologue(bool is_compacting,
- char* archived_thread_data);
- static void MarkCompactEpilogue(bool is_compacting,
- char* archived_thread_data);
static void PrintCurrentStackTrace(FILE* out);
static void PrintStackTrace(FILE* out, char* thread_data);
static void PrintStack(StringStream* accumulator);
/* Number of contexts created from scratch. */ \
SC(contexts_created_from_scratch, V8.ContextsCreatedFromScratch) \
/* Number of contexts created by partial snapshot. */ \
- SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot)
+ SC(contexts_created_by_snapshot, V8.ContextsCreatedBySnapshot) \
+ /* Number of code objects found from pc. */ \
+ SC(pc_to_code, V8.PcToCode) \
+ SC(pc_to_code_cached, V8.PcToCodeCached)
#define STATS_COUNTER_LIST_2(SC) \
}
-void ThreadManager::MarkCompactPrologue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactPrologue(is_compacting, data);
- }
-}
-
-
-void ThreadManager::MarkCompactEpilogue(bool is_compacting) {
- for (ThreadState* state = ThreadState::FirstInUse();
- state != NULL;
- state = state->Next()) {
- char* data = state->data();
- data += HandleScopeImplementer::ArchiveSpacePerThread();
- Top::MarkCompactEpilogue(is_compacting, data);
- }
-}
-
-
int ThreadManager::CurrentId() {
return Thread::GetThreadLocalInt(thread_id_key);
}
static void Iterate(ObjectVisitor* v);
static void IterateArchivedThreads(ThreadVisitor* v);
- static void MarkCompactPrologue(bool is_compacting);
- static void MarkCompactEpilogue(bool is_compacting);
static bool IsLockedByCurrentThread() { return mutex_owner_.IsSelf(); }
static int CurrentId();
namespace internal {
-StackFrame::Type StackFrame::ComputeType(State* state) {
- ASSERT(state->fp != NULL);
- if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
- return ARGUMENTS_ADAPTOR;
- }
- // The marker and function offsets overlap. If the marker isn't a
- // smi then the frame is a JavaScript frame -- and the marker is
- // really the function.
- const int offset = StandardFrameConstants::kMarkerOffset;
- Object* marker = Memory::Object_at(state->fp + offset);
- if (!marker->IsSmi()) return JAVA_SCRIPT;
- return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
-}
StackFrame::Type ExitFrame::GetStateForFramePointer(Address fp, State* state) {
return EXIT;
}
-int JavaScriptFrame::GetProvidedParametersCount() const {
- return ComputeParametersCount();
-}
-
-
-void ExitFrame::Iterate(ObjectVisitor* v) const {
- v->VisitPointer(&code_slot());
- // The arguments are traversed as part of the expression stack of
- // the calling frame.
-}
-
-byte* InternalFrame::GetCallerStackPointer() const {
- // Internal frames have no arguments. The stack pointer of the
- // caller is at a fixed offset from the frame pointer.
- return fp() + StandardFrameConstants::kCallerSPOffset;
-}
-
-byte* JavaScriptFrame::GetCallerStackPointer() const {
- int arguments;
- if (Heap::gc_state() != Heap::NOT_IN_GC || disable_heap_access_) {
- // The arguments for cooked frames are traversed as if they were
- // expression stack elements of the calling frame. The reason for
- // this rather strange decision is that we cannot access the
- // function during mark-compact GCs when the stack is cooked.
- // In fact accessing heap objects (like function->shared() below)
- // at all during GC is problematic.
- arguments = 0;
- } else {
- // Compute the number of arguments by getting the number of formal
- // parameters of the function. We must remember to take the
- // receiver into account (+1).
- JSFunction* function = JSFunction::cast(this->function());
- arguments = function->shared()->formal_parameter_count() + 1;
- }
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments * kPointerSize);
-}
-
-
-byte* ArgumentsAdaptorFrame::GetCallerStackPointer() const {
- const int arguments = Smi::cast(GetExpression(0))->value();
- const int offset = StandardFrameConstants::kCallerSPOffset;
- return fp() + offset + (arguments + 1) * kPointerSize;
-}
-
} } // namespace v8::internal