| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #include "profile-generator-inl.h" | 30 #include "profile-generator-inl.h" |
| 31 | 31 |
| 32 #include "compiler.h" | 32 #include "compiler.h" |
| 33 #include "debug.h" |
| 34 #include "sampler.h" |
| 33 #include "global-handles.h" | 35 #include "global-handles.h" |
| 34 #include "scopeinfo.h" | 36 #include "scopeinfo.h" |
| 35 #include "unicode.h" | 37 #include "unicode.h" |
| 36 #include "zone-inl.h" | 38 #include "zone-inl.h" |
| 37 #include "debug.h" | |
| 38 | 39 |
| 39 namespace v8 { | 40 namespace v8 { |
| 40 namespace internal { | 41 namespace internal { |
| 41 | 42 |
| 42 | 43 |
| 43 TokenEnumerator::TokenEnumerator() | |
| 44 : token_locations_(4), | |
| 45 token_removed_(4) { | |
| 46 } | |
| 47 | |
| 48 | |
| 49 TokenEnumerator::~TokenEnumerator() { | |
| 50 Isolate* isolate = Isolate::Current(); | |
| 51 for (int i = 0; i < token_locations_.length(); ++i) { | |
| 52 if (!token_removed_[i]) { | |
| 53 isolate->global_handles()->ClearWeakness(token_locations_[i]); | |
| 54 isolate->global_handles()->Destroy(token_locations_[i]); | |
| 55 } | |
| 56 } | |
| 57 } | |
| 58 | |
| 59 | |
| 60 int TokenEnumerator::GetTokenId(Object* token) { | |
| 61 Isolate* isolate = Isolate::Current(); | |
| 62 if (token == NULL) return TokenEnumerator::kNoSecurityToken; | |
| 63 for (int i = 0; i < token_locations_.length(); ++i) { | |
| 64 if (*token_locations_[i] == token && !token_removed_[i]) return i; | |
| 65 } | |
| 66 Handle<Object> handle = isolate->global_handles()->Create(token); | |
| 67 // handle.location() points to a memory cell holding a pointer | |
| 68 // to a token object in the V8's heap. | |
| 69 isolate->global_handles()->MakeWeak(handle.location(), | |
| 70 this, | |
| 71 TokenRemovedCallback); | |
| 72 token_locations_.Add(handle.location()); | |
| 73 token_removed_.Add(false); | |
| 74 return token_locations_.length() - 1; | |
| 75 } | |
| 76 | |
| 77 | |
| 78 void TokenEnumerator::TokenRemovedCallback(v8::Isolate* isolate, | |
| 79 v8::Persistent<v8::Value>* handle, | |
| 80 void* parameter) { | |
| 81 reinterpret_cast<TokenEnumerator*>(parameter)->TokenRemoved( | |
| 82 Utils::OpenPersistent(handle).location()); | |
| 83 handle->Dispose(isolate); | |
| 84 } | |
| 85 | |
| 86 | |
| 87 void TokenEnumerator::TokenRemoved(Object** token_location) { | |
| 88 for (int i = 0; i < token_locations_.length(); ++i) { | |
| 89 if (token_locations_[i] == token_location && !token_removed_[i]) { | |
| 90 token_removed_[i] = true; | |
| 91 return; | |
| 92 } | |
| 93 } | |
| 94 } | |
| 95 | |
| 96 | |
| 97 StringsStorage::StringsStorage() | 44 StringsStorage::StringsStorage() |
| 98 : names_(StringsMatch) { | 45 : names_(StringsMatch) { |
| 99 } | 46 } |
| 100 | 47 |
| 101 | 48 |
| 102 StringsStorage::~StringsStorage() { | 49 StringsStorage::~StringsStorage() { |
| 103 for (HashMap::Entry* p = names_.Start(); | 50 for (HashMap::Entry* p = names_.Start(); |
| 104 p != NULL; | 51 p != NULL; |
| 105 p = names_.Next(p)) { | 52 p = names_.Next(p)) { |
| 106 DeleteArray(reinterpret_cast<const char*>(p->value)); | 53 DeleteArray(reinterpret_cast<const char*>(p->value)); |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 228 || (tag_ == entry->tag_ | 175 || (tag_ == entry->tag_ |
| 229 && shared_id_ == entry->shared_id_ | 176 && shared_id_ == entry->shared_id_ |
| 230 && (shared_id_ != 0 | 177 && (shared_id_ != 0 |
| 231 || (name_prefix_ == entry->name_prefix_ | 178 || (name_prefix_ == entry->name_prefix_ |
| 232 && name_ == entry->name_ | 179 && name_ == entry->name_ |
| 233 && resource_name_ == entry->resource_name_ | 180 && resource_name_ == entry->resource_name_ |
| 234 && line_number_ == entry->line_number_))); | 181 && line_number_ == entry->line_number_))); |
| 235 } | 182 } |
| 236 | 183 |
| 237 | 184 |
| 185 void CodeEntry::SetBuiltinId(Builtins::Name id) { |
| 186 tag_ = Logger::BUILTIN_TAG; |
| 187 builtin_id_ = id; |
| 188 } |
| 189 |
| 190 |
| 238 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) { | 191 ProfileNode* ProfileNode::FindChild(CodeEntry* entry) { |
| 239 HashMap::Entry* map_entry = | 192 HashMap::Entry* map_entry = |
| 240 children_.Lookup(entry, CodeEntryHash(entry), false); | 193 children_.Lookup(entry, CodeEntryHash(entry), false); |
| 241 return map_entry != NULL ? | 194 return map_entry != NULL ? |
| 242 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL; | 195 reinterpret_cast<ProfileNode*>(map_entry->value) : NULL; |
| 243 } | 196 } |
| 244 | 197 |
| 245 | 198 |
| 246 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) { | 199 ProfileNode* ProfileNode::FindOrAddChild(CodeEntry* entry) { |
| 247 HashMap::Entry* map_entry = | 200 HashMap::Entry* map_entry = |
| (...skipping 12 matching lines...) Expand all Loading... |
| 260 return tree_->TicksToMillis(self_ticks_); | 213 return tree_->TicksToMillis(self_ticks_); |
| 261 } | 214 } |
| 262 | 215 |
| 263 | 216 |
| 264 double ProfileNode::GetTotalMillis() const { | 217 double ProfileNode::GetTotalMillis() const { |
| 265 return tree_->TicksToMillis(total_ticks_); | 218 return tree_->TicksToMillis(total_ticks_); |
| 266 } | 219 } |
| 267 | 220 |
| 268 | 221 |
| 269 void ProfileNode::Print(int indent) { | 222 void ProfileNode::Print(int indent) { |
| 270 OS::Print("%5u %5u %*c %s%s [%d] #%d", | 223 OS::Print("%5u %5u %*c %s%s #%d %d", |
| 271 total_ticks_, self_ticks_, | 224 total_ticks_, self_ticks_, |
| 272 indent, ' ', | 225 indent, ' ', |
| 273 entry_->name_prefix(), | 226 entry_->name_prefix(), |
| 274 entry_->name(), | 227 entry_->name(), |
| 275 entry_->security_token_id(), | 228 entry_->script_id(), |
| 276 id()); | 229 id()); |
| 277 if (entry_->resource_name()[0] != '\0') | 230 if (entry_->resource_name()[0] != '\0') |
| 278 OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number()); | 231 OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number()); |
| 279 OS::Print("\n"); | 232 OS::Print("\n"); |
| 280 for (HashMap::Entry* p = children_.Start(); | 233 for (HashMap::Entry* p = children_.Start(); |
| 281 p != NULL; | 234 p != NULL; |
| 282 p = children_.Next(p)) { | 235 p = children_.Next(p)) { |
| 283 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2); | 236 reinterpret_cast<ProfileNode*>(p->value)->Print(indent + 2); |
| 284 } | 237 } |
| 285 } | 238 } |
| 286 | 239 |
| 287 | 240 |
| 288 class DeleteNodesCallback { | 241 class DeleteNodesCallback { |
| 289 public: | 242 public: |
| 290 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { } | 243 void BeforeTraversingChild(ProfileNode*, ProfileNode*) { } |
| 291 | 244 |
| 292 void AfterAllChildrenTraversed(ProfileNode* node) { | 245 void AfterAllChildrenTraversed(ProfileNode* node) { |
| 293 delete node; | 246 delete node; |
| 294 } | 247 } |
| 295 | 248 |
| 296 void AfterChildTraversed(ProfileNode*, ProfileNode*) { } | 249 void AfterChildTraversed(ProfileNode*, ProfileNode*) { } |
| 297 }; | 250 }; |
| 298 | 251 |
| 299 | 252 |
| 300 ProfileTree::ProfileTree() | 253 ProfileTree::ProfileTree() |
| 301 : root_entry_(Logger::FUNCTION_TAG, "", "(root)"), | 254 : root_entry_(Logger::FUNCTION_TAG, "(root)"), |
| 302 next_node_id_(1), | 255 next_node_id_(1), |
| 303 root_(new ProfileNode(this, &root_entry_)) { | 256 root_(new ProfileNode(this, &root_entry_)) { |
| 304 } | 257 } |
| 305 | 258 |
| 306 | 259 |
| 307 ProfileTree::~ProfileTree() { | 260 ProfileTree::~ProfileTree() { |
| 308 DeleteNodesCallback cb; | 261 DeleteNodesCallback cb; |
| 309 TraverseDepthFirst(&cb); | 262 TraverseDepthFirst(&cb); |
| 310 } | 263 } |
| 311 | 264 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 338 | 291 |
| 339 | 292 |
| 340 struct NodesPair { | 293 struct NodesPair { |
| 341 NodesPair(ProfileNode* src, ProfileNode* dst) | 294 NodesPair(ProfileNode* src, ProfileNode* dst) |
| 342 : src(src), dst(dst) { } | 295 : src(src), dst(dst) { } |
| 343 ProfileNode* src; | 296 ProfileNode* src; |
| 344 ProfileNode* dst; | 297 ProfileNode* dst; |
| 345 }; | 298 }; |
| 346 | 299 |
| 347 | 300 |
| 348 class FilteredCloneCallback { | |
| 349 public: | |
| 350 FilteredCloneCallback(ProfileNode* dst_root, int security_token_id) | |
| 351 : stack_(10), | |
| 352 security_token_id_(security_token_id) { | |
| 353 stack_.Add(NodesPair(NULL, dst_root)); | |
| 354 } | |
| 355 | |
| 356 void BeforeTraversingChild(ProfileNode* parent, ProfileNode* child) { | |
| 357 if (IsTokenAcceptable(child->entry()->security_token_id(), | |
| 358 parent->entry()->security_token_id())) { | |
| 359 ProfileNode* clone = stack_.last().dst->FindOrAddChild(child->entry()); | |
| 360 clone->IncreaseSelfTicks(child->self_ticks()); | |
| 361 stack_.Add(NodesPair(child, clone)); | |
| 362 } else { | |
| 363 // Attribute ticks to parent node. | |
| 364 stack_.last().dst->IncreaseSelfTicks(child->self_ticks()); | |
| 365 } | |
| 366 } | |
| 367 | |
| 368 void AfterAllChildrenTraversed(ProfileNode* parent) { } | |
| 369 | |
| 370 void AfterChildTraversed(ProfileNode*, ProfileNode* child) { | |
| 371 if (stack_.last().src == child) { | |
| 372 stack_.RemoveLast(); | |
| 373 } | |
| 374 } | |
| 375 | |
| 376 private: | |
| 377 bool IsTokenAcceptable(int token, int parent_token) { | |
| 378 if (token == TokenEnumerator::kNoSecurityToken | |
| 379 || token == security_token_id_) return true; | |
| 380 if (token == TokenEnumerator::kInheritsSecurityToken) { | |
| 381 ASSERT(parent_token != TokenEnumerator::kInheritsSecurityToken); | |
| 382 return parent_token == TokenEnumerator::kNoSecurityToken | |
| 383 || parent_token == security_token_id_; | |
| 384 } | |
| 385 return false; | |
| 386 } | |
| 387 | |
| 388 List<NodesPair> stack_; | |
| 389 int security_token_id_; | |
| 390 }; | |
| 391 | |
| 392 void ProfileTree::FilteredClone(ProfileTree* src, int security_token_id) { | |
| 393 ms_to_ticks_scale_ = src->ms_to_ticks_scale_; | |
| 394 FilteredCloneCallback cb(root_, security_token_id); | |
| 395 src->TraverseDepthFirst(&cb); | |
| 396 CalculateTotalTicks(); | |
| 397 } | |
| 398 | |
| 399 | |
| 400 void ProfileTree::SetTickRatePerMs(double ticks_per_ms) { | 301 void ProfileTree::SetTickRatePerMs(double ticks_per_ms) { |
| 401 ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0; | 302 ms_to_ticks_scale_ = ticks_per_ms > 0 ? 1.0 / ticks_per_ms : 1.0; |
| 402 } | 303 } |
| 403 | 304 |
| 404 | 305 |
| 405 class Position { | 306 class Position { |
| 406 public: | 307 public: |
| 407 explicit Position(ProfileNode* node) | 308 explicit Position(ProfileNode* node) |
| 408 : node(node), child_idx_(0) { } | 309 : node(node), child_idx_(0) { } |
| 409 INLINE(ProfileNode* current_child()) { | 310 INLINE(ProfileNode* current_child()) { |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 480 void CpuProfile::CalculateTotalTicks() { | 381 void CpuProfile::CalculateTotalTicks() { |
| 481 top_down_.CalculateTotalTicks(); | 382 top_down_.CalculateTotalTicks(); |
| 482 } | 383 } |
| 483 | 384 |
| 484 | 385 |
| 485 void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) { | 386 void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) { |
| 486 top_down_.SetTickRatePerMs(actual_sampling_rate); | 387 top_down_.SetTickRatePerMs(actual_sampling_rate); |
| 487 } | 388 } |
| 488 | 389 |
| 489 | 390 |
| 490 CpuProfile* CpuProfile::FilteredClone(int security_token_id) { | |
| 491 ASSERT(security_token_id != TokenEnumerator::kNoSecurityToken); | |
| 492 CpuProfile* clone = new CpuProfile(title_, uid_, false); | |
| 493 clone->top_down_.FilteredClone(&top_down_, security_token_id); | |
| 494 return clone; | |
| 495 } | |
| 496 | |
| 497 | |
| 498 void CpuProfile::ShortPrint() { | 391 void CpuProfile::ShortPrint() { |
| 499 OS::Print("top down "); | 392 OS::Print("top down "); |
| 500 top_down_.ShortPrint(); | 393 top_down_.ShortPrint(); |
| 501 } | 394 } |
| 502 | 395 |
| 503 | 396 |
| 504 void CpuProfile::Print() { | 397 void CpuProfile::Print() { |
| 505 OS::Print("[Top down]:\n"); | 398 OS::Print("[Top down]:\n"); |
| 506 top_down_.Print(); | 399 top_down_.Print(); |
| 507 } | 400 } |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 586 } | 479 } |
| 587 | 480 |
| 588 | 481 |
| 589 void CodeMap::Print() { | 482 void CodeMap::Print() { |
| 590 CodeTreePrinter printer; | 483 CodeTreePrinter printer; |
| 591 tree_.ForEach(&printer); | 484 tree_.ForEach(&printer); |
| 592 } | 485 } |
| 593 | 486 |
| 594 | 487 |
| 595 CpuProfilesCollection::CpuProfilesCollection() | 488 CpuProfilesCollection::CpuProfilesCollection() |
| 596 : profiles_uids_(UidsMatch), | 489 : current_profiles_semaphore_(OS::CreateSemaphore(1)) { |
| 597 current_profiles_semaphore_(OS::CreateSemaphore(1)) { | |
| 598 // Create list of unabridged profiles. | |
| 599 profiles_by_token_.Add(new List<CpuProfile*>()); | |
| 600 } | 490 } |
| 601 | 491 |
| 602 | 492 |
| 603 static void DeleteCodeEntry(CodeEntry** entry_ptr) { | 493 static void DeleteCodeEntry(CodeEntry** entry_ptr) { |
| 604 delete *entry_ptr; | 494 delete *entry_ptr; |
| 605 } | 495 } |
| 606 | 496 |
| 497 |
| 607 static void DeleteCpuProfile(CpuProfile** profile_ptr) { | 498 static void DeleteCpuProfile(CpuProfile** profile_ptr) { |
| 608 delete *profile_ptr; | 499 delete *profile_ptr; |
| 609 } | 500 } |
| 610 | 501 |
| 611 static void DeleteProfilesList(List<CpuProfile*>** list_ptr) { | |
| 612 if (*list_ptr != NULL) { | |
| 613 (*list_ptr)->Iterate(DeleteCpuProfile); | |
| 614 delete *list_ptr; | |
| 615 } | |
| 616 } | |
| 617 | 502 |
| 618 CpuProfilesCollection::~CpuProfilesCollection() { | 503 CpuProfilesCollection::~CpuProfilesCollection() { |
| 619 delete current_profiles_semaphore_; | 504 delete current_profiles_semaphore_; |
| 505 finished_profiles_.Iterate(DeleteCpuProfile); |
| 620 current_profiles_.Iterate(DeleteCpuProfile); | 506 current_profiles_.Iterate(DeleteCpuProfile); |
| 621 detached_profiles_.Iterate(DeleteCpuProfile); | |
| 622 profiles_by_token_.Iterate(DeleteProfilesList); | |
| 623 code_entries_.Iterate(DeleteCodeEntry); | 507 code_entries_.Iterate(DeleteCodeEntry); |
| 624 } | 508 } |
| 625 | 509 |
| 626 | 510 |
| 627 bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid, | 511 bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid, |
| 628 bool record_samples) { | 512 bool record_samples) { |
| 629 ASSERT(uid > 0); | 513 ASSERT(uid > 0); |
| 630 current_profiles_semaphore_->Wait(); | 514 current_profiles_semaphore_->Wait(); |
| 631 if (current_profiles_.length() >= kMaxSimultaneousProfiles) { | 515 if (current_profiles_.length() >= kMaxSimultaneousProfiles) { |
| 632 current_profiles_semaphore_->Signal(); | 516 current_profiles_semaphore_->Signal(); |
| 633 return false; | 517 return false; |
| 634 } | 518 } |
| 635 for (int i = 0; i < current_profiles_.length(); ++i) { | 519 for (int i = 0; i < current_profiles_.length(); ++i) { |
| 636 if (strcmp(current_profiles_[i]->title(), title) == 0) { | 520 if (strcmp(current_profiles_[i]->title(), title) == 0) { |
| 637 // Ignore attempts to start profile with the same title. | 521 // Ignore attempts to start profile with the same title. |
| 638 current_profiles_semaphore_->Signal(); | 522 current_profiles_semaphore_->Signal(); |
| 639 return false; | 523 return false; |
| 640 } | 524 } |
| 641 } | 525 } |
| 642 current_profiles_.Add(new CpuProfile(title, uid, record_samples)); | 526 current_profiles_.Add(new CpuProfile(title, uid, record_samples)); |
| 643 current_profiles_semaphore_->Signal(); | 527 current_profiles_semaphore_->Signal(); |
| 644 return true; | 528 return true; |
| 645 } | 529 } |
| 646 | 530 |
| 647 | 531 |
| 648 CpuProfile* CpuProfilesCollection::StopProfiling(int security_token_id, | 532 CpuProfile* CpuProfilesCollection::StopProfiling(const char* title, |
| 649 const char* title, | |
| 650 double actual_sampling_rate) { | 533 double actual_sampling_rate) { |
| 651 const int title_len = StrLength(title); | 534 const int title_len = StrLength(title); |
| 652 CpuProfile* profile = NULL; | 535 CpuProfile* profile = NULL; |
| 653 current_profiles_semaphore_->Wait(); | 536 current_profiles_semaphore_->Wait(); |
| 654 for (int i = current_profiles_.length() - 1; i >= 0; --i) { | 537 for (int i = current_profiles_.length() - 1; i >= 0; --i) { |
| 655 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) { | 538 if (title_len == 0 || strcmp(current_profiles_[i]->title(), title) == 0) { |
| 656 profile = current_profiles_.Remove(i); | 539 profile = current_profiles_.Remove(i); |
| 657 break; | 540 break; |
| 658 } | 541 } |
| 659 } | 542 } |
| 660 current_profiles_semaphore_->Signal(); | 543 current_profiles_semaphore_->Signal(); |
| 661 | 544 |
| 662 if (profile != NULL) { | 545 if (profile == NULL) return NULL; |
| 663 profile->CalculateTotalTicks(); | 546 profile->CalculateTotalTicks(); |
| 664 profile->SetActualSamplingRate(actual_sampling_rate); | 547 profile->SetActualSamplingRate(actual_sampling_rate); |
| 665 List<CpuProfile*>* unabridged_list = | 548 finished_profiles_.Add(profile); |
| 666 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)]; | 549 return profile; |
| 667 unabridged_list->Add(profile); | |
| 668 HashMap::Entry* entry = | |
| 669 profiles_uids_.Lookup(reinterpret_cast<void*>(profile->uid()), | |
| 670 static_cast<uint32_t>(profile->uid()), | |
| 671 true); | |
| 672 ASSERT(entry->value == NULL); | |
| 673 entry->value = reinterpret_cast<void*>(unabridged_list->length() - 1); | |
| 674 return GetProfile(security_token_id, profile->uid()); | |
| 675 } | |
| 676 return NULL; | |
| 677 } | 550 } |
| 678 | 551 |
| 679 | 552 |
| 680 CpuProfile* CpuProfilesCollection::GetProfile(int security_token_id, | |
| 681 unsigned uid) { | |
| 682 int index = GetProfileIndex(uid); | |
| 683 if (index < 0) return NULL; | |
| 684 List<CpuProfile*>* unabridged_list = | |
| 685 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)]; | |
| 686 if (security_token_id == TokenEnumerator::kNoSecurityToken) { | |
| 687 return unabridged_list->at(index); | |
| 688 } | |
| 689 List<CpuProfile*>* list = GetProfilesList(security_token_id); | |
| 690 if (list->at(index) == NULL) { | |
| 691 (*list)[index] = | |
| 692 unabridged_list->at(index)->FilteredClone(security_token_id); | |
| 693 } | |
| 694 return list->at(index); | |
| 695 } | |
| 696 | |
| 697 | |
| 698 int CpuProfilesCollection::GetProfileIndex(unsigned uid) { | |
| 699 HashMap::Entry* entry = profiles_uids_.Lookup(reinterpret_cast<void*>(uid), | |
| 700 static_cast<uint32_t>(uid), | |
| 701 false); | |
| 702 return entry != NULL ? | |
| 703 static_cast<int>(reinterpret_cast<intptr_t>(entry->value)) : -1; | |
| 704 } | |
| 705 | |
| 706 | |
| 707 bool CpuProfilesCollection::IsLastProfile(const char* title) { | 553 bool CpuProfilesCollection::IsLastProfile(const char* title) { |
| 708 // Called from VM thread, and only it can mutate the list, | 554 // Called from VM thread, and only it can mutate the list, |
| 709 // so no locking is needed here. | 555 // so no locking is needed here. |
| 710 if (current_profiles_.length() != 1) return false; | 556 if (current_profiles_.length() != 1) return false; |
| 711 return StrLength(title) == 0 | 557 return StrLength(title) == 0 |
| 712 || strcmp(current_profiles_[0]->title(), title) == 0; | 558 || strcmp(current_profiles_[0]->title(), title) == 0; |
| 713 } | 559 } |
| 714 | 560 |
| 715 | 561 |
| 716 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) { | 562 void CpuProfilesCollection::RemoveProfile(CpuProfile* profile) { |
| 717 // Called from VM thread for a completed profile. | 563 // Called from VM thread for a completed profile. |
| 718 unsigned uid = profile->uid(); | 564 unsigned uid = profile->uid(); |
| 719 int index = GetProfileIndex(uid); | 565 for (int i = 0; i < finished_profiles_.length(); i++) { |
| 720 if (index < 0) { | 566 if (uid == finished_profiles_[i]->uid()) { |
| 721 detached_profiles_.RemoveElement(profile); | 567 finished_profiles_.Remove(i); |
| 722 return; | 568 return; |
| 723 } | |
| 724 profiles_uids_.Remove(reinterpret_cast<void*>(uid), | |
| 725 static_cast<uint32_t>(uid)); | |
| 726 // Decrement all indexes above the deleted one. | |
| 727 for (HashMap::Entry* p = profiles_uids_.Start(); | |
| 728 p != NULL; | |
| 729 p = profiles_uids_.Next(p)) { | |
| 730 intptr_t p_index = reinterpret_cast<intptr_t>(p->value); | |
| 731 if (p_index > index) { | |
| 732 p->value = reinterpret_cast<void*>(p_index - 1); | |
| 733 } | 569 } |
| 734 } | 570 } |
| 735 for (int i = 0; i < profiles_by_token_.length(); ++i) { | 571 UNREACHABLE(); |
| 736 List<CpuProfile*>* list = profiles_by_token_[i]; | |
| 737 if (list != NULL && index < list->length()) { | |
| 738 // Move all filtered clones into detached_profiles_, | |
| 739 // so we can know that they are still in use. | |
| 740 CpuProfile* cloned_profile = list->Remove(index); | |
| 741 if (cloned_profile != NULL && cloned_profile != profile) { | |
| 742 detached_profiles_.Add(cloned_profile); | |
| 743 } | |
| 744 } | |
| 745 } | |
| 746 } | 572 } |
| 747 | 573 |
| 748 | 574 |
| 749 int CpuProfilesCollection::TokenToIndex(int security_token_id) { | |
| 750 ASSERT(TokenEnumerator::kNoSecurityToken == -1); | |
| 751 return security_token_id + 1; // kNoSecurityToken -> 0, 0 -> 1, ... | |
| 752 } | |
| 753 | |
| 754 | |
| 755 List<CpuProfile*>* CpuProfilesCollection::GetProfilesList( | |
| 756 int security_token_id) { | |
| 757 const int index = TokenToIndex(security_token_id); | |
| 758 const int lists_to_add = index - profiles_by_token_.length() + 1; | |
| 759 if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add); | |
| 760 List<CpuProfile*>* unabridged_list = | |
| 761 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)]; | |
| 762 const int current_count = unabridged_list->length(); | |
| 763 if (profiles_by_token_[index] == NULL) { | |
| 764 profiles_by_token_[index] = new List<CpuProfile*>(current_count); | |
| 765 } | |
| 766 List<CpuProfile*>* list = profiles_by_token_[index]; | |
| 767 const int profiles_to_add = current_count - list->length(); | |
| 768 if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add); | |
| 769 return list; | |
| 770 } | |
| 771 | |
| 772 | |
| 773 List<CpuProfile*>* CpuProfilesCollection::Profiles(int security_token_id) { | |
| 774 List<CpuProfile*>* unabridged_list = | |
| 775 profiles_by_token_[TokenToIndex(TokenEnumerator::kNoSecurityToken)]; | |
| 776 if (security_token_id == TokenEnumerator::kNoSecurityToken) { | |
| 777 return unabridged_list; | |
| 778 } | |
| 779 List<CpuProfile*>* list = GetProfilesList(security_token_id); | |
| 780 const int current_count = unabridged_list->length(); | |
| 781 for (int i = 0; i < current_count; ++i) { | |
| 782 if (list->at(i) == NULL) { | |
| 783 (*list)[i] = unabridged_list->at(i)->FilteredClone(security_token_id); | |
| 784 } | |
| 785 } | |
| 786 return list; | |
| 787 } | |
| 788 | |
| 789 | |
| 790 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, | |
| 791 Name* name, | |
| 792 String* resource_name, | |
| 793 int line_number) { | |
| 794 CodeEntry* entry = new CodeEntry(tag, | |
| 795 CodeEntry::kEmptyNamePrefix, | |
| 796 GetFunctionName(name), | |
| 797 TokenEnumerator::kNoSecurityToken, | |
| 798 GetName(resource_name), | |
| 799 line_number); | |
| 800 code_entries_.Add(entry); | |
| 801 return entry; | |
| 802 } | |
| 803 | |
| 804 | |
| 805 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, | |
| 806 const char* name) { | |
| 807 CodeEntry* entry = new CodeEntry(tag, | |
| 808 CodeEntry::kEmptyNamePrefix, | |
| 809 GetFunctionName(name)); | |
| 810 code_entries_.Add(entry); | |
| 811 return entry; | |
| 812 } | |
| 813 | |
| 814 | |
| 815 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, | |
| 816 const char* name_prefix, | |
| 817 Name* name) { | |
| 818 CodeEntry* entry = new CodeEntry(tag, | |
| 819 name_prefix, | |
| 820 GetName(name), | |
| 821 TokenEnumerator::kInheritsSecurityToken); | |
| 822 code_entries_.Add(entry); | |
| 823 return entry; | |
| 824 } | |
| 825 | |
| 826 | |
| 827 CodeEntry* CpuProfilesCollection::NewCodeEntry(Logger::LogEventsAndTags tag, | |
| 828 int args_count) { | |
| 829 CodeEntry* entry = new CodeEntry(tag, | |
| 830 "args_count: ", | |
| 831 GetName(args_count), | |
| 832 TokenEnumerator::kInheritsSecurityToken); | |
| 833 code_entries_.Add(entry); | |
| 834 return entry; | |
| 835 } | |
| 836 | |
| 837 | |
| 838 void CpuProfilesCollection::AddPathToCurrentProfiles( | 575 void CpuProfilesCollection::AddPathToCurrentProfiles( |
| 839 const Vector<CodeEntry*>& path) { | 576 const Vector<CodeEntry*>& path) { |
| 840 // As starting / stopping profiles is rare relatively to this | 577 // As starting / stopping profiles is rare relatively to this |
| 841 // method, we don't bother minimizing the duration of lock holding, | 578 // method, we don't bother minimizing the duration of lock holding, |
| 842 // e.g. copying contents of the list to a local vector. | 579 // e.g. copying contents of the list to a local vector. |
| 843 current_profiles_semaphore_->Wait(); | 580 current_profiles_semaphore_->Wait(); |
| 844 for (int i = 0; i < current_profiles_.length(); ++i) { | 581 for (int i = 0; i < current_profiles_.length(); ++i) { |
| 845 current_profiles_[i]->AddPath(path); | 582 current_profiles_[i]->AddPath(path); |
| 846 } | 583 } |
| 847 current_profiles_semaphore_->Signal(); | 584 current_profiles_semaphore_->Signal(); |
| 848 } | 585 } |
| 849 | 586 |
| 850 | 587 |
| 588 CodeEntry* CpuProfilesCollection::NewCodeEntry( |
| 589 Logger::LogEventsAndTags tag, |
| 590 const char* name, |
| 591 const char* name_prefix, |
| 592 const char* resource_name, |
| 593 int line_number) { |
| 594 CodeEntry* code_entry = new CodeEntry(tag, |
| 595 name, |
| 596 name_prefix, |
| 597 resource_name, |
| 598 line_number); |
| 599 code_entries_.Add(code_entry); |
| 600 return code_entry; |
| 601 } |
| 602 |
| 603 |
| 851 void SampleRateCalculator::Tick() { | 604 void SampleRateCalculator::Tick() { |
| 852 if (--wall_time_query_countdown_ == 0) | 605 if (--wall_time_query_countdown_ == 0) |
| 853 UpdateMeasurements(OS::TimeCurrentMillis()); | 606 UpdateMeasurements(OS::TimeCurrentMillis()); |
| 854 } | 607 } |
| 855 | 608 |
| 856 | 609 |
| 857 void SampleRateCalculator::UpdateMeasurements(double current_time) { | 610 void SampleRateCalculator::UpdateMeasurements(double current_time) { |
| 858 if (measurements_count_++ != 0) { | 611 if (measurements_count_++ != 0) { |
| 859 const double measured_ticks_per_ms = | 612 const double measured_ticks_per_ms = |
| 860 (kWallTimeQueryIntervalMs * ticks_per_ms_) / | 613 (kWallTimeQueryIntervalMs * ticks_per_ms_) / |
| 861 (current_time - last_wall_time_); | 614 (current_time - last_wall_time_); |
| 862 // Update the average value. | 615 // Update the average value. |
| 863 ticks_per_ms_ += | 616 ticks_per_ms_ += |
| 864 (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_; | 617 (measured_ticks_per_ms - ticks_per_ms_) / measurements_count_; |
| 865 // Update the externally accessible result. | 618 // Update the externally accessible result. |
| 866 result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale); | 619 result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale); |
| 867 } | 620 } |
| 868 last_wall_time_ = current_time; | 621 last_wall_time_ = current_time; |
| 869 wall_time_query_countdown_ = | 622 wall_time_query_countdown_ = |
| 870 static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_); | 623 static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_); |
| 871 } | 624 } |
| 872 | 625 |
| 873 | 626 |
| 874 const char* const ProfileGenerator::kAnonymousFunctionName = | 627 const char* const ProfileGenerator::kAnonymousFunctionName = |
| 875 "(anonymous function)"; | 628 "(anonymous function)"; |
| 876 const char* const ProfileGenerator::kProgramEntryName = | 629 const char* const ProfileGenerator::kProgramEntryName = |
| 877 "(program)"; | 630 "(program)"; |
| 878 const char* const ProfileGenerator::kGarbageCollectorEntryName = | 631 const char* const ProfileGenerator::kGarbageCollectorEntryName = |
| 879 "(garbage collector)"; | 632 "(garbage collector)"; |
| 633 const char* const ProfileGenerator::kUnresolvedFunctionName = |
| 634 "(unresolved function)"; |
| 880 | 635 |
| 881 | 636 |
| 882 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles) | 637 ProfileGenerator::ProfileGenerator(CpuProfilesCollection* profiles) |
| 883 : profiles_(profiles), | 638 : profiles_(profiles), |
| 884 program_entry_( | 639 program_entry_( |
| 885 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)), | 640 profiles->NewCodeEntry(Logger::FUNCTION_TAG, kProgramEntryName)), |
| 886 gc_entry_( | 641 gc_entry_( |
| 887 profiles->NewCodeEntry(Logger::BUILTIN_TAG, | 642 profiles->NewCodeEntry(Logger::BUILTIN_TAG, |
| 888 kGarbageCollectorEntryName)) { | 643 kGarbageCollectorEntryName)), |
| 644 unresolved_entry_( |
| 645 profiles->NewCodeEntry(Logger::FUNCTION_TAG, |
| 646 kUnresolvedFunctionName)) { |
| 889 } | 647 } |
| 890 | 648 |
| 891 | 649 |
| 892 void ProfileGenerator::RecordTickSample(const TickSample& sample) { | 650 void ProfileGenerator::RecordTickSample(const TickSample& sample) { |
| 893 // Allocate space for stack frames + pc + function + vm-state. | 651 // Allocate space for stack frames + pc + function + vm-state. |
| 894 ScopedVector<CodeEntry*> entries(sample.frames_count + 3); | 652 ScopedVector<CodeEntry*> entries(sample.frames_count + 3); |
| 895 // As actual number of decoded code entries may vary, initialize | 653 // As actual number of decoded code entries may vary, initialize |
| 896 // entries vector with NULL values. | 654 // entries vector with NULL values. |
| 897 CodeEntry** entry = entries.start(); | 655 CodeEntry** entry = entries.start(); |
| 898 memset(entry, 0, entries.length() * sizeof(*entry)); | 656 memset(entry, 0, entries.length() * sizeof(*entry)); |
| 899 if (sample.pc != NULL) { | 657 if (sample.pc != NULL) { |
| 900 Address start; | 658 if (sample.has_external_callback) { |
| 901 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start); | 659 // Don't use PC when in external callback code, as it can point |
| 902 // If pc is in the function code before it set up stack frame or after the | 660 // inside callback's code, and we will erroneously report |
| 903 // frame was destroyed SafeStackFrameIterator incorrectly thinks that | 661 // that a callback calls itself. |
| 904 // ebp contains return address of the current function and skips caller's | 662 *entry++ = code_map_.FindEntry(sample.external_callback); |
| 905 // frame. Check for this case and just skip such samples. | 663 } else { |
| 906 if (pc_entry) { | 664 Address start; |
| 907 List<OffsetRange>* ranges = pc_entry->no_frame_ranges(); | 665 CodeEntry* pc_entry = code_map_.FindEntry(sample.pc, &start); |
| 908 if (ranges) { | 666 // If pc is in the function code before it set up stack frame or after the |
| 909 Code* code = Code::cast(HeapObject::FromAddress(start)); | 667 // frame was destroyed SafeStackFrameIterator incorrectly thinks that |
| 910 int pc_offset = static_cast<int>(sample.pc - code->instruction_start()); | 668 // ebp contains return address of the current function and skips caller's |
| 911 for (int i = 0; i < ranges->length(); i++) { | 669 // frame. Check for this case and just skip such samples. |
| 912 OffsetRange& range = ranges->at(i); | 670 if (pc_entry) { |
| 913 if (range.from <= pc_offset && pc_offset < range.to) { | 671 List<OffsetRange>* ranges = pc_entry->no_frame_ranges(); |
| 914 return; | 672 if (ranges) { |
| 673 Code* code = Code::cast(HeapObject::FromAddress(start)); |
| 674 int pc_offset = static_cast<int>( |
| 675 sample.pc - code->instruction_start()); |
| 676 for (int i = 0; i < ranges->length(); i++) { |
| 677 OffsetRange& range = ranges->at(i); |
| 678 if (range.from <= pc_offset && pc_offset < range.to) { |
| 679 return; |
| 680 } |
| 681 } |
| 682 } |
| 683 *entry++ = pc_entry; |
| 684 |
| 685 if (pc_entry->builtin_id() == Builtins::kFunctionCall || |
| 686 pc_entry->builtin_id() == Builtins::kFunctionApply) { |
| 687 // When current function is FunctionCall or FunctionApply builtin the |
| 688 // top frame is either frame of the calling JS function or internal |
| 689 // frame. In the latter case we know the caller for sure but in the |
| 690 // former case we don't so we simply replace the frame with |
| 691 // 'unresolved' entry. |
| 692 if (sample.top_frame_type == StackFrame::JAVA_SCRIPT) { |
| 693 *entry++ = unresolved_entry_; |
| 915 } | 694 } |
| 916 } | 695 } |
| 917 } | 696 } |
| 918 } | 697 } |
| 919 *entry++ = pc_entry; | |
| 920 | |
| 921 if (sample.has_external_callback) { | |
| 922 // Don't use PC when in external callback code, as it can point | |
| 923 // inside callback's code, and we will erroneously report | |
| 924 // that a callback calls itself. | |
| 925 *(entries.start()) = NULL; | |
| 926 *entry++ = code_map_.FindEntry(sample.external_callback); | |
| 927 } | |
| 928 | 698 |
| 929 for (const Address* stack_pos = sample.stack, | 699 for (const Address* stack_pos = sample.stack, |
| 930 *stack_end = stack_pos + sample.frames_count; | 700 *stack_end = stack_pos + sample.frames_count; |
| 931 stack_pos != stack_end; | 701 stack_pos != stack_end; |
| 932 ++stack_pos) { | 702 ++stack_pos) { |
| 933 *entry++ = code_map_.FindEntry(*stack_pos); | 703 *entry++ = code_map_.FindEntry(*stack_pos); |
| 934 } | 704 } |
| 935 } | 705 } |
| 936 | 706 |
| 937 if (FLAG_prof_browser_mode) { | 707 if (FLAG_prof_browser_mode) { |
| 938 bool no_symbolized_entries = true; | 708 bool no_symbolized_entries = true; |
| 939 for (CodeEntry** e = entries.start(); e != entry; ++e) { | 709 for (CodeEntry** e = entries.start(); e != entry; ++e) { |
| 940 if (*e != NULL) { | 710 if (*e != NULL) { |
| 941 no_symbolized_entries = false; | 711 no_symbolized_entries = false; |
| 942 break; | 712 break; |
| 943 } | 713 } |
| 944 } | 714 } |
| 945 // If no frames were symbolized, put the VM state entry in. | 715 // If no frames were symbolized, put the VM state entry in. |
| 946 if (no_symbolized_entries) { | 716 if (no_symbolized_entries) { |
| 947 *entry++ = EntryForVMState(sample.state); | 717 *entry++ = EntryForVMState(sample.state); |
| 948 } | 718 } |
| 949 } | 719 } |
| 950 | 720 |
| 951 profiles_->AddPathToCurrentProfiles(entries); | 721 profiles_->AddPathToCurrentProfiles(entries); |
| 952 } | 722 } |
| 953 | 723 |
| 954 | 724 |
| 955 } } // namespace v8::internal | 725 } } // namespace v8::internal |
| OLD | NEW |