*/
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ /**
+ * Starts recording JS allocations immediately as they arrive and tracking of
+ * heap objects population statistics.
+ */
+ void StartRecordingHeapAllocations();
+
+ /**
+ * Stops recording JS allocations and tracking of heap objects population
+ * statistics, cleans all collected heap objects population statistics data.
+ */
+ void StopRecordingHeapAllocations();
+
+
private:
HeapProfiler();
~HeapProfiler();
}
+void HeapProfiler::StartRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StartHeapAllocationsRecording();
+}
+
+
+void HeapProfiler::StopRecordingHeapAllocations() {
+ reinterpret_cast<i::HeapProfiler*>(this)->StopHeapAllocationsRecording();
+}
+
+
v8::Testing::StressType internal::Testing::stress_type_ =
v8::Testing::kStressTypeOpt;
}
+ExternalReference ExternalReference::record_object_allocation_function(
+ Isolate* isolate) {
+ return ExternalReference(
+ Redirect(isolate,
+ FUNCTION_ADDR(HeapProfiler::RecordObjectAllocationFromMasm)));
+}
+
+
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state(
static ExternalReference get_make_code_young_function(Isolate* isolate);
+ // New heap objects tracking support.
+ static ExternalReference record_object_allocation_function(Isolate* isolate);
+
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
}
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
- elms->address() + size_delta));
+ elms->address() + size_delta,
+ elms->Size()));
return FixedArrayBase::cast(HeapObject::FromAddress(
elms->address() + to_trim * entry_size));
}
#include "v8.h"
+#include "deoptimizer.h"
#include "heap-profiler.h"
#include "heap-snapshot-generator-inl.h"
HeapProfiler::HeapProfiler(Heap* heap)
: snapshots_(new HeapSnapshotsCollection(heap)),
- next_snapshot_uid_(1) {
+ next_snapshot_uid_(1),
+ is_tracking_allocations_(false) {
}
}
-void HeapProfiler::ObjectMoveEvent(Address from, Address to) {
- snapshots_->ObjectMoveEvent(from, to);
+void HeapProfiler::ObjectMoveEvent(Address from, Address to, int size) {
+ snapshots_->ObjectMoveEvent(from, to, size);
}
+
+void HeapProfiler::NewObjectEvent(Address addr, int size) {
+ snapshots_->NewObjectEvent(addr, size);
+}
+
+
+void HeapProfiler::UpdateObjectSizeEvent(Address addr, int size) {
+ snapshots_->UpdateObjectSizeEvent(addr, size);
+}
+
+
void HeapProfiler::SetRetainedObjectInfo(UniqueId id,
RetainedObjectInfo* info) {
// TODO(yurus, marja): Don't route this information through GlobalHandles.
heap()->isolate()->global_handles()->SetRetainedObjectInfo(id, info);
}
+
+void HeapProfiler::StartHeapAllocationsRecording() {
+ StartHeapObjectsTracking();
+ is_tracking_allocations_ = true;
+ DropCompiledCode();
+ snapshots_->UpdateHeapObjectsMap();
+}
+
+
+void HeapProfiler::StopHeapAllocationsRecording() {
+ StopHeapObjectsTracking();
+ is_tracking_allocations_ = false;
+ DropCompiledCode();
+}
+
+
+void HeapProfiler::RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size) {
+ isolate->heap_profiler()->NewObjectEvent(obj, size);
+}
+
+
+void HeapProfiler::DropCompiledCode() {
+ Isolate* isolate = heap()->isolate();
+ HandleScope scope(isolate);
+
+ if (FLAG_concurrent_recompilation) {
+ isolate->optimizing_compiler_thread()->Flush();
+ }
+
+ Deoptimizer::DeoptimizeAll(isolate);
+
+ Handle<Code> lazy_compile =
+ Handle<Code>(isolate->builtins()->builtin(Builtins::kLazyCompile));
+
+ heap()->CollectAllGarbage(Heap::kMakeHeapIterableMask,
+ "switch allocations tracking");
+
+ DisallowHeapAllocation no_allocation;
+
+ HeapIterator iterator(heap());
+ HeapObject* obj = NULL;
+ while (((obj = iterator.next()) != NULL)) {
+ if (obj->IsJSFunction()) {
+ JSFunction* function = JSFunction::cast(obj);
+ SharedFunctionInfo* shared = function->shared();
+
+ if (!shared->allows_lazy_compilation()) continue;
+ if (!shared->script()->IsScript()) continue;
+
+ Code::Kind kind = function->code()->kind();
+ if (kind == Code::FUNCTION || kind == Code::BUILTIN) {
+ function->set_code(*lazy_compile);
+ shared->set_code(*lazy_compile);
+ }
+ }
+ }
+}
+
+
} } // namespace v8::internal
void StartHeapObjectsTracking();
void StopHeapObjectsTracking();
+
+ static void RecordObjectAllocationFromMasm(Isolate* isolate,
+ Address obj,
+ int size);
+
SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
int GetSnapshotsCount();
HeapSnapshot* GetSnapshot(int index);
SnapshotObjectId GetSnapshotObjectId(Handle<Object> obj);
void DeleteAllSnapshots();
- void ObjectMoveEvent(Address from, Address to);
+ void ObjectMoveEvent(Address from, Address to, int size);
+
+ void NewObjectEvent(Address addr, int size);
+
+ void UpdateObjectSizeEvent(Address addr, int size);
void DefineWrapperClass(
uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback);
void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
+ bool is_tracking_allocations() {
+ return is_tracking_allocations_;
+ }
+
+ void StartHeapAllocationsRecording();
+ void StopHeapAllocationsRecording();
+
+ int FindUntrackedObjects() {
+ return snapshots_->FindUntrackedObjects();
+ }
+
+ void DropCompiledCode();
+
private:
Heap* heap() const { return snapshots_->heap(); }
HeapSnapshotsCollection* snapshots_;
unsigned next_snapshot_uid_;
List<v8::HeapProfiler::WrapperInfoCallback> wrapper_callbacks_;
+ bool is_tracking_allocations_;
};
} } // namespace v8::internal
}
-void HeapObjectsMap::MoveObject(Address from, Address to) {
+void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
ASSERT(to != NULL);
ASSERT(from != NULL);
if (from == to) return;
int from_entry_info_index =
static_cast<int>(reinterpret_cast<intptr_t>(from_value));
entries_.at(from_entry_info_index).addr = to;
+ // Size of an object can change during its life, so to keep information
+ // about the object in entries_ consistent, we have to adjust size when the
+ // object is migrated.
+ entries_.at(from_entry_info_index).size = object_size;
to_entry->value = from_value;
}
}
+void HeapObjectsMap::NewObject(Address addr, int size) {
+ ASSERT(addr != NULL);
+ FindOrAddEntry(addr, size, false);
+}
+
+
+void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
+ FindOrAddEntry(addr, size, false);
+}
+
+
SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
false);
SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
- unsigned int size) {
+ unsigned int size,
+ bool accessed) {
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
true);
int entry_index =
static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
EntryInfo& entry_info = entries_.at(entry_index);
- entry_info.accessed = true;
+ entry_info.accessed = accessed;
entry_info.size = size;
return entry_info.id;
}
entry->value = reinterpret_cast<void*>(entries_.length());
SnapshotObjectId id = next_id_;
next_id_ += kObjectIdStep;
- entries_.Add(EntryInfo(id, addr, size));
+ entries_.Add(EntryInfo(id, addr, size, accessed));
ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
return id;
}
}
+int HeapObjectsMap::FindUntrackedObjects() {
+ HeapIterator iterator(heap_);
+ int untracked = 0;
+ for (HeapObject* obj = iterator.next();
+ obj != NULL;
+ obj = iterator.next()) {
+ HashMap::Entry* entry = entries_map_.Lookup(
+ obj->address(), ComputePointerHash(obj->address()), false);
+ if (entry == NULL) {
+ untracked++;
+ } else {
+ int entry_index = static_cast<int>(
+ reinterpret_cast<intptr_t>(entry->value));
+ EntryInfo& entry_info = entries_.at(entry_index);
+ CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
+ }
+ }
+ return untracked;
+}
+
+
SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
UpdateHeapObjectsMap();
time_intervals_.Add(TimeInterval(next_id_));
void SnapshotGenerationFinished();
SnapshotObjectId FindEntry(Address addr);
- SnapshotObjectId FindOrAddEntry(Address addr, unsigned int size);
- void MoveObject(Address from, Address to);
+ SnapshotObjectId FindOrAddEntry(Address addr,
+ unsigned int size,
+ bool accessed = true);
+ void MoveObject(Address from, Address to, int size);
+ void NewObject(Address addr, int size);
+ void UpdateObjectSize(Address addr, int size);
SnapshotObjectId last_assigned_id() const {
return next_id_ - kObjectIdStep;
}
static const SnapshotObjectId kGcRootsFirstSubrootId;
static const SnapshotObjectId kFirstAvailableObjectId;
+ int FindUntrackedObjects();
+
+ void UpdateHeapObjectsMap();
+
private:
struct EntryInfo {
EntryInfo(SnapshotObjectId id, Address addr, unsigned int size)
uint32_t count;
};
- void UpdateHeapObjectsMap();
void RemoveDeadEntries();
SnapshotObjectId next_id_;
return ids_.FindOrAddEntry(object_addr, object_size);
}
Handle<HeapObject> FindHeapObjectById(SnapshotObjectId id);
- void ObjectMoveEvent(Address from, Address to) { ids_.MoveObject(from, to); }
+ void ObjectMoveEvent(Address from, Address to, int size) {
+ ids_.MoveObject(from, to, size);
+ }
+ void NewObjectEvent(Address addr, int size) { ids_.NewObject(addr, size); }
+ void UpdateObjectSizeEvent(Address addr, int size) {
+ ids_.UpdateObjectSize(addr, size);
+ }
SnapshotObjectId last_assigned_id() const {
return ids_.last_assigned_id();
}
size_t GetUsedMemorySize() const;
+ int FindUntrackedObjects() { return ids_.FindUntrackedObjects(); }
+
+ void UpdateHeapObjectsMap() { ids_.UpdateHeapObjectsMap(); }
+
private:
bool is_tracking_objects_; // Whether tracking object moves is needed.
List<HeapSnapshot*> snapshots_;
if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
// Update NewSpace stats if necessary.
RecordCopiedObject(heap, target);
- HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address()));
+ HEAP_PROFILE(heap,
+ ObjectMoveEvent(source->address(), target->address(), size));
Isolate* isolate = heap->isolate();
if (isolate->logger()->is_logging_code_events() ||
isolate->cpu_profiler()->is_profiling()) {
alloc_memento->set_map_no_write_barrier(allocation_memento_map());
ASSERT(site->map() == allocation_site_map());
alloc_memento->set_allocation_site(site, SKIP_WRITE_BARRIER);
+ HeapProfiler* profiler = isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(HeapObject::cast(clone)->address(),
+ object_size);
+ profiler->NewObjectEvent(alloc_memento->address(),
+ AllocationMemento::kSize);
+ }
}
}
Address src,
int size,
AllocationSpace dest) {
- HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst));
+ HEAP_PROFILE(heap(), ObjectMoveEvent(src, dst, size));
ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
ASSERT(dest != LO_SPACE && size <= Page::kMaxNonCodeHeapObjectSize);
if (dest == OLD_POINTER_SPACE) {
ASSERT(target_space == heap()->old_pointer_space() ||
target_space == heap()->old_data_space());
Object* result;
- MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
+ MaybeObject* maybe_result = target_space->AllocateRaw(
+ object_size,
+ PagedSpace::MOVE_OBJECT);
if (maybe_result->ToObject(&result)) {
HeapObject* target = HeapObject::cast(result);
MigrateObject(target->address(),
int size = object->Size();
- MaybeObject* target = space->AllocateRaw(size);
+ MaybeObject* target = space->AllocateRaw(size, PagedSpace::MOVE_OBJECT);
if (target->IsFailure()) {
// OS refused to give us memory.
V8::FatalProcessOutOfMemory("Evacuation");
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
}
}
+
+ // The array may not be moved during GC,
+ // and size has to be adjusted nevertheless.
+ HeapProfiler* profiler = heap->isolate()->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
+ }
}
UNCLASSIFIED,
63,
"Heap::allocation_sites_list_address()");
+ Add(ExternalReference::record_object_allocation_function(isolate).address(),
+ UNCLASSIFIED,
+ 64,
+ "HeapProfiler::RecordObjectAllocationFromMasm");
// Add a small set of deopt entry addresses to encoder without generating the
// deopt table code, which isn't possible at deserialization time.
entry,
Deoptimizer::LAZY,
Deoptimizer::CALCULATE_ENTRY_ADDRESS);
- Add(address, LAZY_DEOPTIMIZATION, 64 + entry, "lazy_deopt");
+ Add(address, LAZY_DEOPTIMIZATION, 65 + entry, "lazy_deopt");
}
}
Address Allocate(int space_index, int size) {
Address address = high_water_[space_index];
high_water_[space_index] = address + size;
+ HeapProfiler* profiler = isolate_->heap_profiler();
+ if (profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(address, size);
+ }
return address;
}
#ifndef V8_SPACES_INL_H_
#define V8_SPACES_INL_H_
+#include "heap-profiler.h"
#include "isolate.h"
#include "spaces.h"
#include "v8memory.h"
// Raw allocation.
-MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
+MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
+ AllocationType event) {
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+
HeapObject* object = AllocateLinearly(size_in_bytes);
if (object != NULL) {
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
if (identity() == CODE_SPACE) {
SkipList::Update(object->address(), size_in_bytes);
}
+ if (event == NEW_OBJECT && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(object->address(), size_in_bytes);
+ }
return object;
}
return SlowAllocateRaw(size_in_bytes);
}
- Object* obj = HeapObject::FromAddress(old_top);
+ HeapObject* obj = HeapObject::FromAddress(old_top);
allocation_info_.top += size_in_bytes;
ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+ HeapProfiler* profiler = heap()->isolate()->heap_profiler();
+ if (profiler != NULL && profiler->is_tracking_allocations()) {
+ profiler->NewObjectEvent(obj->address(), size_in_bytes);
+ }
+
return obj;
}
Address* allocation_top_address() { return &allocation_info_.top; }
Address* allocation_limit_address() { return &allocation_info_.limit; }
+ enum AllocationType {
+ NEW_OBJECT,
+ MOVE_OBJECT
+ };
+
// Allocate the requested number of bytes in the space if possible, return a
// failure object if not.
- MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT inline MaybeObject* AllocateRaw(
+ int size_in_bytes,
+ AllocationType event = NEW_OBJECT);
virtual bool ReserveSpace(int bytes);
STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
- __ j(zero, &two_byte_slice, Label::kNear);
+ // Make long jumps when allocations tracking is on due to
+ // RecordObjectAllocation inside MacroAssembler::Allocate.
+ Label::Distance jump_distance =
+ masm->isolate()->heap_profiler()->is_tracking_allocations()
+ ? Label::kFar
+ : Label::kNear;
+ __ j(zero, &two_byte_slice, jump_distance);
__ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
- __ jmp(&set_slice_header, Label::kNear);
+ __ jmp(&set_slice_header, jump_distance);
__ bind(&two_byte_slice);
__ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
__ bind(&set_slice_header);
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
// Load address of new object into result.
LoadAllocationTopHelper(result, scratch, flags);
+ if (isolate()->heap_profiler()->is_tracking_allocations()) {
+ RecordObjectAllocation(isolate(), result, object_size);
+ }
+
// Align the next allocation. Storing the filler map without checking top is
// safe in new-space because the limit of the heap is aligned there.
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
}
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ // In case object is rdx
+ movq(kScratchRegister, object);
+ movq(arg_reg_3, object_size);
+ movq(arg_reg_2, kScratchRegister);
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
+}
+
+
+void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size) {
+ FrameScope frame(this, StackFrame::EXIT);
+ PushSafepointRegisters();
+ PrepareCallCFunction(3);
+ movq(arg_reg_2, object);
+ movq(arg_reg_3, Immediate(object_size));
+ movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
+ CallCFunction(
+ ExternalReference::record_object_allocation_function(isolate), 3);
+ PopSafepointRegisters();
+}
+
+
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
Label* gc_required,
AllocationFlags flags);
+ // Record a JS object allocation if allocations tracking mode is on.
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ Register object_size);
+
+ void RecordObjectAllocation(Isolate* isolate,
+ Register object,
+ int object_size);
+
// Undo allocation in new space. The object passed and objects allocated after
// it will no longer be allocated. Make sure that no pointers are left to the
// object(s) no longer allocated as they would be invalid when allocation is
}
+// Helper class for new allocations tracking and checking.
+// To use checking of JS allocations tracking in a test,
+// just create an instance of this class.
+class HeapObjectsTracker {
+ public:
+ HeapObjectsTracker() {
+ heap_profiler_ = i::Isolate::Current()->heap_profiler();
+ CHECK_NE(NULL, heap_profiler_);
+ heap_profiler_->StartHeapAllocationsRecording();
+ }
+
+ ~HeapObjectsTracker() {
+ i::Isolate::Current()->heap()->CollectAllAvailableGarbage();
+ CHECK_EQ(0, heap_profiler_->FindUntrackedObjects());
+ heap_profiler_->StopHeapAllocationsRecording();
+ }
+
+ private:
+ i::HeapProfiler* heap_profiler_;
+};
+
+
#endif // ifndef CCTEST_H_
GetProperty(foo_func, v8::HeapGraphEdge::kInternal, "code");
CHECK_NE(NULL, code);
}
+
+
+// This is an example of using checking of JS allocations tracking in a test.
+TEST(HeapObjectsTracker) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+ HeapObjectsTracker tracker;
+ CompileRun("var a = 1.2");
+ CompileRun("var a = 1.2; var b = 1.0; var c = 1.0;");
+ CompileRun(
+ "var a = [];"
+ "for (var i = 0; i < 5; ++i)"
+ " a[i] = i;\n"
+ "for (var i = 0; i < 3; ++i)"
+ " a.shift();\n");
+}