1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "profile-generator-inl.h"
35 #include "global-handles.h"
36 #include "scopeinfo.h"
44 bool StringsStorage::StringsMatch(void* key1, void* key2) {
45 return strcmp(reinterpret_cast<char*>(key1),
46 reinterpret_cast<char*>(key2)) == 0;
50 StringsStorage::StringsStorage(Heap* heap)
51 : hash_seed_(heap->HashSeed()), names_(StringsMatch) {
55 StringsStorage::~StringsStorage() {
56 for (HashMap::Entry* p = names_.Start();
59 DeleteArray(reinterpret_cast<const char*>(p->value));
64 const char* StringsStorage::GetCopy(const char* src) {
65 int len = static_cast<int>(strlen(src));
66 HashMap::Entry* entry = GetEntry(src, len);
67 if (entry->value == NULL) {
68 Vector<char> dst = Vector<char>::New(len + 1);
69 OS::StrNCpy(dst, src, len);
71 entry->key = dst.start();
72 entry->value = entry->key;
74 return reinterpret_cast<const char*>(entry->value);
78 const char* StringsStorage::GetFormatted(const char* format, ...) {
80 va_start(args, format);
81 const char* result = GetVFormatted(format, args);
87 const char* StringsStorage::AddOrDisposeString(char* str, int len) {
88 HashMap::Entry* entry = GetEntry(str, len);
89 if (entry->value == NULL) {
96 return reinterpret_cast<const char*>(entry->value);
100 const char* StringsStorage::GetVFormatted(const char* format, va_list args) {
101 Vector<char> str = Vector<char>::New(1024);
102 int len = OS::VSNPrintF(str, format, args);
104 DeleteArray(str.start());
105 return GetCopy(format);
107 return AddOrDisposeString(str.start(), len);
111 const char* StringsStorage::GetName(Name* name) {
112 if (name->IsString()) {
113 String* str = String::cast(name);
114 int length = Min(kMaxNameSize, str->length());
115 int actual_length = 0;
116 SmartArrayPointer<char> data =
117 str->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL, 0, length,
119 return AddOrDisposeString(data.Detach(), actual_length);
120 } else if (name->IsSymbol()) {
127 const char* StringsStorage::GetName(int index) {
128 return GetFormatted("%d", index);
132 const char* StringsStorage::GetFunctionName(Name* name) {
133 return BeautifyFunctionName(GetName(name));
137 const char* StringsStorage::GetFunctionName(const char* name) {
138 return BeautifyFunctionName(GetCopy(name));
142 const char* StringsStorage::BeautifyFunctionName(const char* name) {
143 return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
147 size_t StringsStorage::GetUsedMemorySize() const {
148 size_t size = sizeof(*this);
149 size += sizeof(HashMap::Entry) * names_.capacity();
150 for (HashMap::Entry* p = names_.Start(); p != NULL; p = names_.Next(p)) {
151 size += strlen(reinterpret_cast<const char*>(p->value)) + 1;
157 HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
158 uint32_t hash = StringHasher::HashSequentialString(str, len, hash_seed_);
159 return names_.Lookup(const_cast<char*>(str), hash, true);
163 const char* const CodeEntry::kEmptyNamePrefix = "";
164 const char* const CodeEntry::kEmptyResourceName = "";
165 const char* const CodeEntry::kEmptyBailoutReason = "";
168 CodeEntry::~CodeEntry() {
169 delete no_frame_ranges_;
173 uint32_t CodeEntry::GetCallUid() const {
174 uint32_t hash = ComputeIntegerHash(tag_, v8::internal::kZeroHashSeed);
175 if (shared_id_ != 0) {
176 hash ^= ComputeIntegerHash(static_cast<uint32_t>(shared_id_),
177 v8::internal::kZeroHashSeed);
179 hash ^= ComputeIntegerHash(
180 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_prefix_)),
181 v8::internal::kZeroHashSeed);
182 hash ^= ComputeIntegerHash(
183 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name_)),
184 v8::internal::kZeroHashSeed);
185 hash ^= ComputeIntegerHash(
186 static_cast<uint32_t>(reinterpret_cast<uintptr_t>(resource_name_)),
187 v8::internal::kZeroHashSeed);
188 hash ^= ComputeIntegerHash(line_number_, v8::internal::kZeroHashSeed);
194 bool CodeEntry::IsSameAs(CodeEntry* entry) const {
196 || (tag_ == entry->tag_
197 && shared_id_ == entry->shared_id_
199 || (name_prefix_ == entry->name_prefix_
200 && name_ == entry->name_
201 && resource_name_ == entry->resource_name_
202 && line_number_ == entry->line_number_)));
206 void CodeEntry::SetBuiltinId(Builtins::Name id) {
207 tag_ = Logger::BUILTIN_TAG;
212 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) {
213 HashMap::Entry* map_entry =
214 children_.Lookup(entry, CodeEntryHash(entry), false);
215 return map_entry != NULL ?
216 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL;
220 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) {
221 HashMap::Entry* map_entry =
222 children_.Lookup(entry, CodeEntryHash(entry), true);
223 if (map_entry->value == NULL) {
225 ProfileNode* new_node = new ProfileNode(tree_, entry);
226 map_entry->value = new_node;
227 children_list_.Add(new_node);
229 return reinterpret_cast<ProfileNode*>(map_entry->value);
233 void ProfileNode::Print(int indent) {
234 OS::Print("%5u %*c %s%s %d #%d %s",
237 entry_->name_prefix(),
241 entry_->bailout_reason());
242 if (entry_->resource_name()[0] != '\0')
243 OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
245 for (HashMap::Entry* p = children_.Start();
247 p = children_.Next(p)) {
248 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2);
253 class DeleteNodesCallback {
255 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
257 void AfterAllChildrenTraversed(ProfileNode* node) {
261 void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
265 ProfileTree::ProfileTree()
266 : root_entry_(Logger::FUNCTION_TAG, "(root)"),
268 root_(new ProfileNode(this, &root_entry_)) {
272 ProfileTree::~ProfileTree() {
273 DeleteNodesCallback cb;
274 TraverseDepthFirst(&cb);
278 ProfileNode* ProfileTree::AddPathFromEnd(const Vector<CodeEntry*>& path) {
279 ProfileNode* node = root_;
280 for (CodeEntry** entry = path.start() + path.length() - 1;
281 entry != path.start() - 1;
283 if (*entry != NULL) {
284 node = node->FindOrAddChild(*entry);
287 node->IncrementSelfTicks();
292 void ProfileTree::AddPathFromStart(const Vector<CodeEntry*>& path) {
293 ProfileNode* node = root_;
294 for (CodeEntry** entry = path.start();
295 entry != path.start() + path.length();
297 if (*entry != NULL) {
298 node = node->FindOrAddChild(*entry);
301 node->IncrementSelfTicks();
306 NodesPair(ProfileNode* src, ProfileNode* dst)
307 : src(src), dst(dst) { }
315 explicit Position(ProfileNode* node)
316 : node(node), child_idx_(0) { }
317 INLINE(ProfileNode* current_child()) {
318 return node->children()->at(child_idx_);
320 INLINE(bool has_current_child()) {
321 return child_idx_ < node->children()->length();
323 INLINE(void next_child()) { ++child_idx_; }
331 // Non-recursive implementation of a depth-first post-order tree traversal.
332 template <typename Callback>
333 void ProfileTree::TraverseDepthFirst(Callback* callback) {
334 List<Position> stack(10);
335 stack.Add(Position(root_));
336 while (stack.length() > 0) {
337 Position& current = stack.last();
338 if (current.has_current_child()) {
339 callback->BeforeTraversingChild(current.node, current.current_child());
340 stack.Add(Position(current.current_child()));
342 callback->AfterAllChildrenTraversed(current.node);
343 if (stack.length() > 1) {
344 Position& parent = stack[stack.length() - 2];
345 callback->AfterChildTraversed(parent.node, current.node);
348 // Remove child from the stack.
355 CpuProfile::CpuProfile(const char* title, bool record_samples)
357 record_samples_(record_samples),
358 start_time_(Time::NowFromSystemTime()) {
363 void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
364 ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
365 if (record_samples_) samples_.Add(top_frame_node);
369 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
370 end_time_ = start_time_ + timer_.Elapsed();
374 void CpuProfile::Print() {
375 OS::Print("[Top down]:\n");
380 CodeEntry* const CodeMap::kSharedFunctionCodeEntry = NULL;
381 const CodeMap::CodeTreeConfig::Key CodeMap::CodeTreeConfig::kNoKey = NULL;
384 void CodeMap::AddCode(Address addr, CodeEntry* entry, unsigned size) {
385 DeleteAllCoveredCode(addr, addr + size);
386 CodeTree::Locator locator;
387 tree_.Insert(addr, &locator);
388 locator.set_value(CodeEntryInfo(entry, size));
392 void CodeMap::DeleteAllCoveredCode(Address start, Address end) {
393 List<Address> to_delete;
394 Address addr = end - 1;
395 while (addr >= start) {
396 CodeTree::Locator locator;
397 if (!tree_.FindGreatestLessThan(addr, &locator)) break;
398 Address start2 = locator.key(), end2 = start2 + locator.value().size;
399 if (start2 < end && start < end2) to_delete.Add(start2);
402 for (int i = 0; i < to_delete.length(); ++i) tree_.Remove(to_delete[i]);
406 CodeEntry* CodeMap::FindEntry(Address addr, Address* start) {
407 CodeTree::Locator locator;
408 if (tree_.FindGreatestLessThan(addr, &locator)) {
409 // locator.key() <= addr. Need to check that addr is within entry.
410 const CodeEntryInfo& entry = locator.value();
411 if (addr < (locator.key() + entry.size)) {
413 *start = locator.key();
422 int CodeMap::GetSharedId(Address addr) {
423 CodeTree::Locator locator;
424 // For shared function entries, 'size' field is used to store their IDs.
425 if (tree_.Find(addr, &locator)) {
426 const CodeEntryInfo& entry = locator.value();
427 ASSERT(entry.entry == kSharedFunctionCodeEntry);
430 tree_.Insert(addr, &locator);
431 int id = next_shared_id_++;
432 locator.set_value(CodeEntryInfo(kSharedFunctionCodeEntry, id));
438 void CodeMap::MoveCode(Address from, Address to) {
439 if (from == to) return;
440 CodeTree::Locator locator;
441 if (!tree_.Find(from, &locator)) return;
442 CodeEntryInfo entry = locator.value();
444 AddCode(to, entry.entry, entry.size);
448 void CodeMap::CodeTreePrinter::Call(
449 const Address& key, const CodeMap::CodeEntryInfo& value) {
450 // For shared function entries, 'size' field is used to store their IDs.
451 if (value.entry == kSharedFunctionCodeEntry) {
452 OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
454 OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
459 void CodeMap::Print() {
460 CodeTreePrinter printer;
461 tree_.ForEach(&printer);
465 CpuProfilesCollection::CpuProfilesCollection(Heap* heap)
466 : function_and_resource_names_(heap),
467 current_profiles_semaphore_(1) {
471 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
476 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
481 CpuProfilesCollection::~CpuProfilesCollection() {
482 finished_profiles_.Iterate(DeleteCpuProfile);
483 current_profiles_.Iterate(DeleteCpuProfile);
484 code_entries_.Iterate(DeleteCodeEntry);
488 bool CpuProfilesCollection::StartProfiling(const char* title,
489 bool record_samples) {
490 current_profiles_semaphore_.Wait();
491 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
492 current_profiles_semaphore_.Signal();
495 for (int i = 0; i < current_profiles_.length(); ++i) {
496 if (strcmp(current_profiles_[i]->title(), title) == 0) {
497 // Ignore attempts to start profile with the same title.
498 current_profiles_semaphore_.Signal();
502 current_profiles_.Add(new CpuProfile(title, record_samples));
503 current_profiles_semaphore_.Signal();
508 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
509 const int title_len = StrLength(title);
510 CpuProfile* profile = NULL;
511 current_profiles_semaphore_.Wait();
512 for (int i = current_profiles_.length() - 1; i >= 0; --i) {
513 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) {
514 profile = current_profiles_.Remove(i);
518 current_profiles_semaphore_.Signal();
520 if (profile == NULL) return NULL;
521 profile->CalculateTotalTicksAndSamplingRate();
522 finished_profiles_.Add(profile);
527 bool CpuProfilesCollection::IsLastProfile(const char* title) {
528 // Called from VM thread, and only it can mutate the list,
529 // so no locking is needed here.
530 if (current_profiles_.length() != 1) return false;
531 return StrLength(title) == 0
532 || strcmp(current_profiles_[0]->title(), title) == 0;
536 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
537 // Called from VM thread for a completed profile.
538 for (int i = 0; i < finished_profiles_.length(); i++) {
539 if (profile == finished_profiles_[i]) {
540 finished_profiles_.Remove(i);
548 void CpuProfilesCollection::AddPathToCurrentProfiles(
549 const Vector<CodeEntry*>& path) {
550 // As starting / stopping profiles is rare relatively to this
551 // method, we don't bother minimizing the duration of lock holding,
552 // e.g. copying contents of the list to a local vector.
553 current_profiles_semaphore_.Wait();
554 for (int i = 0; i < current_profiles_.length(); ++i) {
555 current_profiles_[i]->AddPath(path);
557 current_profiles_semaphore_.Signal();
561 CodeEntry* CpuProfilesCollection::NewCodeEntry(
562 Logger::LogEventsAndTags tag,
564 const char* name_prefix,
565 const char* resource_name,
568 CodeEntry* code_entry = new CodeEntry(tag,
574 code_entries_.Add(code_entry);
579 const char* const ProfileGenerator::kAnonymousFunctionName =
580 "(anonymous function)";
581 const char* const ProfileGenerator::kProgramEntryName =
583 const char* const ProfileGenerator::kIdleEntryName =
585 const char* const ProfileGenerator::kGarbageCollectorEntryName =
586 "(garbage collector)";
587 const char* const ProfileGenerator::kUnresolvedFunctionName =
588 "(unresolved function)";
591 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles)
592 : profiles_(profiles),
594 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)),
596 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kIdleEntryName)),
598 profiles->NewCodeEntry(Logger::BUILTIN_TAG,
599 kGarbageCollectorEntryName)),
601 profiles->NewCodeEntry(Logger::FUNCTION_TAG,
602 kUnresolvedFunctionName)) {
606 void ProfileGenerator::RecordTickSample(const TickSample& sample) {
607 // Allocate space for stack frames + pc + function + vm-state.
608 ScopedVector<CodeEntry*> entries(sample.frames_count + 3);
609 // As actual number of decoded code entries may vary, initialize
610 // entries vector with NULL values.
611 CodeEntry** entry = entries.start();
612 memset(entry, 0, entries.length() * sizeof(*entry));
613 if (sample.pc != NULL) {
614 if (sample.has_external_callback && sample.state == EXTERNAL &&
615 sample.top_frame_type == StackFrame::EXIT) {
616 // Don't use PC when in external callback code, as it can point
617 // inside callback's code, and we will erroneously report
618 // that a callback calls itself.
619 *entry++ = code_map_.FindEntry(sample.external_callback);
622 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start);
623 // If pc is in the function code before it set up stack frame or after the
624 // frame was destroyed SafeStackFrameIterator incorrectly thinks that
625 // ebp contains return address of the current function and skips caller's
626 // frame. Check for this case and just skip such samples.
628 List<OffsetRange>* ranges = pc_entry->no_frame_ranges();
630 Code* code = Code::cast(HeapObject::FromAddress(start));
631 int pc_offset = static_cast<int>(
632 sample.pc - code->instruction_start());
633 for (int i = 0; i < ranges->length(); i++) {
634 OffsetRange& range = ranges->at(i);
635 if (range.from <= pc_offset && pc_offset < range.to) {
642 if (pc_entry->builtin_id() == Builtins::kFunctionCall ||
643 pc_entry->builtin_id() == Builtins::kFunctionApply) {
644 // When current function is FunctionCall or FunctionApply builtin the
645 // top frame is either frame of the calling JS function or internal
646 // frame. In the latter case we know the caller for sure but in the
647 // former case we don't so we simply replace the frame with
648 // 'unresolved' entry.
649 if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) {
650 *entry++ = unresolved_entry_;
656 for (const Address* stack_pos = sample.stack,
657 *stack_end = stack_pos + sample.frames_count;
658 stack_pos != stack_end;
660 *entry++ = code_map_.FindEntry(*stack_pos);
664 if (FLAG_prof_browser_mode) {
665 bool no_symbolized_entries = true;
666 for (CodeEntry** e = entries.start(); e != entry; ++e) {
668 no_symbolized_entries = false;
672 // If no frames were symbolized, put the VM state entry in.
673 if (no_symbolized_entries) {
674 *entry++ = EntryForVMState(sample.state);
678 profiles_->AddPathToCurrentProfiles(entries);
682 CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
688 // DOM events handlers are reported as OTHER / EXTERNAL entries.
689 // To avoid confusing people, let's put all these entries into
693 return program_entry_;
696 default: return NULL;
700 } } // namespace v8::internal