Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(99)

Side by Side Diff: src/profile-generator.cc

Issue 6685084: Add support for CPU and heap profiles deletion. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Implemente per-profile deletion Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after
313 struct NodesPair { 313 struct NodesPair {
314 NodesPair(ProfileNode* src, ProfileNode* dst) 314 NodesPair(ProfileNode* src, ProfileNode* dst)
315 : src(src), dst(dst) { } 315 : src(src), dst(dst) { }
316 ProfileNode* src; 316 ProfileNode* src;
317 ProfileNode* dst; 317 ProfileNode* dst;
318 }; 318 };
319 319
320 320
321 class FilteredCloneCallback { 321 class FilteredCloneCallback {
322 public: 322 public:
323 explicit FilteredCloneCallback(ProfileNode* dst_root, int security_token_id) 323 FilteredCloneCallback(ProfileNode* dst_root, int security_token_id)
324 : stack_(10), 324 : stack_(10),
325 security_token_id_(security_token_id) { 325 security_token_id_(security_token_id) {
326 stack_.Add(NodesPair(NULL, dst_root)); 326 stack_.Add(NodesPair(NULL, dst_root));
327 } 327 }
328 328
329 void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) { 329 void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) {
330 if (IsTokenAcceptable(child->entry()->security_token_id(), 330 if (IsTokenAcceptable(child->entry()->security_token_id(),
331 parent->entry()->security_token_id())) { 331 parent->entry()->security_token_id())) {
332 ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry()); 332 ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry());
333 clone->IncreaseSelfTicks(child->self_ticks()); 333 clone->IncreaseSelfTicks(child->self_ticks());
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
543 543
544 static void DeleteCodeEntry(CodeEntry** entry_ptr) { 544 static void DeleteCodeEntry(CodeEntry** entry_ptr) {
545 delete *entry_ptr; 545 delete *entry_ptr;
546 } 546 }
547 547
548 static void DeleteCpuProfile(CpuProfile** profile_ptr) { 548 static void DeleteCpuProfile(CpuProfile** profile_ptr) {
549 delete *profile_ptr; 549 delete *profile_ptr;
550 } 550 }
551 551
552 static void DeleteProfilesList(List<CpuProfile*>** list_ptr) { 552 static void DeleteProfilesList(List<CpuProfile*>** list_ptr) {
553 (*list_ptr)->Iterate(DeleteCpuProfile); 553 if (*list_ptr) {
Vitaly Repeshko 2011/03/22 14:30:36 nit: Use explicit != NULL comparison.
mnaganov (inactive) 2011/03/22 16:03:09 Done.
554 delete *list_ptr; 554 (*list_ptr)->Iterate(DeleteCpuProfile);
555 delete *list_ptr;
556 }
555 } 557 }
556 558
557 CpuProfilesCollection::~CpuProfilesCollection() { 559 CpuProfilesCollection::~CpuProfilesCollection() {
558 delete current_profiles_semaphore_; 560 delete current_profiles_semaphore_;
559 current_profiles_.Iterate(DeleteCpuProfile); 561 current_profiles_.Iterate(DeleteCpuProfile);
562 detached_profiles_.Iterate(DeleteCpuProfile);
560 profiles_by_token_.Iterate(DeleteProfilesList); 563 profiles_by_token_.Iterate(DeleteProfilesList);
561 code_entries_.Iterate(DeleteCodeEntry); 564 code_entries_.Iterate(DeleteCodeEntry);
562 } 565 }
563 566
564 567
565 bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) { 568 bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid) {
566 ASSERT(uid > 0); 569 ASSERT(uid > 0);
567 current_profiles_semaphore_->Wait(); 570 current_profiles_semaphore_->Wait();
568 if (current_profiles_.length() >= kMaxSimultaneousProfiles) { 571 if (current_profiles_.length() >= kMaxSimultaneousProfiles) {
569 current_profiles_semaphore_->Signal(); 572 current_profiles_semaphore_->Signal();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
614 ASSERT(entry->value == NULL); 617 ASSERT(entry->value == NULL);
615 entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1); 618 entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1);
616 return GetProfile(security_token_id, profile->uid()); 619 return GetProfile(security_token_id, profile->uid());
617 } 620 }
618 return NULL; 621 return NULL;
619 } 622 }
620 623
621 624
622 CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id, 625 CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id,
623 unsigned uid) { 626 unsigned uid) {
624 HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid), 627 int index = GetProfileIndex(uid);
625 static_cast<uint32_t>(uid), 628 if (index < 0) return NULL;
626 false);
627 int index;
628 if (entry != NULL) {
629 index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
630 } else {
631 return NULL;
632 }
633 List<CpuProfile*>* unabridged_list = 629 List<CpuProfile*>* unabridged_list =
634 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)]; 630 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)];
635 if (security_token_id == TokenEnumerator::kNoSecurityToken) { 631 if (security_token_id == TokenEnumerator::kNoSecurityToken) {
636 return unabridged_list->at(index); 632 return unabridged_list->at(index);
637 } 633 }
638 List<CpuProfile*>* list = GetProfilesList(security_token_id); 634 List<CpuProfile*>* list = GetProfilesList(security_token_id);
639 if (list->at(index) == NULL) { 635 if (list->at(index) == NULL) {
640 (*list)[index] = 636 (*list)[index] =
641 unabridged_list->at(index)->FilteredClone(security_token_id); 637 unabridged_list->at(index)->FilteredClone(security_token_id);
642 } 638 }
643 return list->at(index); 639 return list->at(index);
644 } 640 }
645 641
646 642
643 int CpuProfilesCollection::GetProfileIndex(unsigned uid) {
644 HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid),
645 static_cast<uint32_t>(uid),
646 false);
647 return entry != NULL ?
648 static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1;
649 }
650
651
647 bool CpuProfilesCollection::IsLastProfile(const char* title) { 652 bool CpuProfilesCollection::IsLastProfile(const char* title) {
648 // Called from VM thread, and only it can mutate the list, 653 // Called from VM thread, and only it can mutate the list,
649 // so no locking is needed here. 654 // so no locking is needed here.
650 if (current_profiles_.length() != 1) return false; 655 if (current_profiles_.length() != 1) return false;
651 return StrLength(title) == 0 656 return StrLength(title) == 0
652 || strcmp(current_profiles_[0]->title(), title) == 0; 657 || strcmp(current_profiles_[0]->title(), title) == 0;
653 } 658 }
654 659
655 660
661 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) {
662 // Called from VM thread for a completed profile.
663 unsigned uid = profile->uid();
664 int index = GetProfileIndex(uid);
665 if (index < 0) {
666 detached_profiles_.RemoveElement(profile);
667 return;
668 }
669 profiles_uids_.Remove(reinterpret_cast<void*>(uid),
670 static_cast<uint32_t>(uid));
671 // Decrement all indexes above the deleted one.
672 for (HashMap::Entry* p = profiles_uids_.Start();
673 p;
Vitaly Repeshko 2011/03/22 14:30:36 != NULL
mnaganov (inactive) 2011/03/22 16:03:09 Done.
674 p = profiles_uids_.Next(p)) {
675 intptr_t p_index = reinterpret_cast<intptr_t>(p->value);
676 if (p_index > index) {
677 p->value = reinterpret_cast<void*>(p_index - 1);
678 }
679 }
680 for (int i = 0; i < profiles_by_token_.length(); ++i) {
681 List<CpuProfile*>* list = profiles_by_token_[i];
682 if (list != NULL && index < list->length()) {
683 // Move all filtered clones into detached_profiles_,
684 // so we can know that they are still in use.
685 CpuProfile* cloned_profile = list->Remove(index);
686 if (cloned_profile != NULL && cloned_profile != profile) {
687 detached_profiles_.Add(cloned_profile);
688 }
689 }
690 }
691 }
692
693
656 int CpuProfilesCollection::TokenToIndex(int security_token_id) { 694 int CpuProfilesCollection::TokenToIndex(int security_token_id) {
657 ASSERT(TokenEnumerator::kNoSecurityToken == -1); 695 ASSERT(TokenEnumerator::kNoSecurityToken == -1);
658 return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ... 696 return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ...
659 } 697 }
660 698
661 699
662 List<CpuProfile*>* CpuProfilesCollection::GetProfilesList( 700 List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
663 int security_token_id) { 701 int security_token_id) {
664 const int index = TokenToIndex(security_token_id); 702 const int index = TokenToIndex(security_token_id);
665 const int lists_to_add = index - profiles_by_token_.length() + 1; 703 const int lists_to_add = index - profiles_by_token_.length() + 1;
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after
1261 p != NULL; 1299 p != NULL;
1262 p = retaining_paths_.Next(p)) { 1300 p = retaining_paths_.Next(p)) {
1263 List<HeapGraphPath*>* list = 1301 List<HeapGraphPath*>* list =
1264 reinterpret_cast<List<HeapGraphPath*>*>(p->value); 1302 reinterpret_cast<List<HeapGraphPath*>*>(p->value);
1265 list->Iterate(DeleteHeapGraphPath); 1303 list->Iterate(DeleteHeapGraphPath);
1266 delete list; 1304 delete list;
1267 } 1305 }
1268 } 1306 }
1269 1307
1270 1308
1309 void HeapSnapshot::Delete() {
1310 collection_->RemoveSnapshot(this);
1311 delete this;
1312 }
1313
1314
1271 void HeapSnapshot::AllocateEntries(int entries_count, 1315 void HeapSnapshot::AllocateEntries(int entries_count,
1272 int children_count, 1316 int children_count,
1273 int retainers_count) { 1317 int retainers_count) {
1274 ASSERT(raw_entries_ == NULL); 1318 ASSERT(raw_entries_ == NULL);
1275 raw_entries_ = NewArray<char>( 1319 raw_entries_ = NewArray<char>(
1276 HeapEntry::EntriesSize(entries_count, children_count, retainers_count)); 1320 HeapEntry::EntriesSize(entries_count, children_count, retainers_count));
1277 #ifdef DEBUG 1321 #ifdef DEBUG
1278 raw_entries_size_ = 1322 raw_entries_size_ =
1279 HeapEntry::EntriesSize(entries_count, children_count, retainers_count); 1323 HeapEntry::EntriesSize(entries_count, children_count, retainers_count);
1280 #endif 1324 #endif
(...skipping 291 matching lines...) Expand 10 before | Expand all | Expand 10 after
1572 1616
1573 1617
1574 HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) { 1618 HeapSnapshot* HeapSnapshotsCollection::GetSnapshot(unsigned uid) {
1575 HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid), 1619 HashMap::Entry* entry = snapshots_uids_.Lookup(reinterpret_cast<void*>(uid),
1576 static_cast<uint32_t>(uid), 1620 static_cast<uint32_t>(uid),
1577 false); 1621 false);
1578 return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL; 1622 return entry != NULL ? reinterpret_cast<HeapSnapshot*>(entry->value) : NULL;
1579 } 1623 }
1580 1624
1581 1625
1626 void HeapSnapshotsCollection::RemoveSnapshot(HeapSnapshot* snapshot) {
1627 snapshots_.RemoveElement(snapshot);
1628 unsigned uid = snapshot->uid();
1629 snapshots_uids_.Remove(reinterpret_cast<void*>(uid),
1630 static_cast<uint32_t>(uid));
1631 }
1632
1633
1582 HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots( 1634 HeapSnapshotsDiff* HeapSnapshotsCollection::CompareSnapshots(
1583 HeapSnapshot* snapshot1, 1635 HeapSnapshot* snapshot1,
1584 HeapSnapshot* snapshot2) { 1636 HeapSnapshot* snapshot2) {
1585 return comparator_.Compare(snapshot1, snapshot2); 1637 return comparator_.Compare(snapshot1, snapshot2);
1586 } 1638 }
1587 1639
1588 1640
1589 HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder = 1641 HeapEntry *const HeapEntriesMap::kHeapEntryPlaceholder =
1590 reinterpret_cast<HeapEntry*>(1); 1642 reinterpret_cast<HeapEntry*>(1);
1591 1643
(...skipping 1604 matching lines...) Expand 10 before | Expand all | Expand 10 after
3196 3248
3197 3249
3198 String* GetConstructorNameForHeapProfile(JSObject* object) { 3250 String* GetConstructorNameForHeapProfile(JSObject* object) {
3199 if (object->IsJSFunction()) return HEAP->closure_symbol(); 3251 if (object->IsJSFunction()) return HEAP->closure_symbol();
3200 return object->constructor_name(); 3252 return object->constructor_name();
3201 } 3253 }
3202 3254
3203 } } // namespace v8::internal 3255 } } // namespace v8::internal
3204 3256
3205 #endif // ENABLE_LOGGING_AND_PROFILING 3257 #endif // ENABLE_LOGGING_AND_PROFILING
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698