Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(738)

Side by Side Diff: src/heap-snapshot-generator.cc

Issue 39973003: Merge bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/parser
Patch Set: again Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap-snapshot-generator.h ('k') | src/hydrogen.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 11 matching lines...) Expand all
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #include "heap-snapshot-generator-inl.h" 30 #include "heap-snapshot-generator-inl.h"
31 31
32 #include "allocation-tracker.h"
32 #include "heap-profiler.h" 33 #include "heap-profiler.h"
33 #include "debug.h" 34 #include "debug.h"
34 #include "types.h" 35 #include "types.h"
35 36
36 namespace v8 { 37 namespace v8 {
37 namespace internal { 38 namespace internal {
38 39
39 40
40 HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to) 41 HeapGraphEdge::HeapGraphEdge(Type type, const char* name, int from, int to)
41 : type_(type), 42 : type_(type),
(...skipping 348 matching lines...) Expand 10 before | Expand all | Expand 10 after
390 // This fact is using in MoveObject method. 391 // This fact is using in MoveObject method.
391 entries_.Add(EntryInfo(0, NULL, 0)); 392 entries_.Add(EntryInfo(0, NULL, 0));
392 } 393 }
393 394
394 395
395 void HeapObjectsMap::SnapshotGenerationFinished() { 396 void HeapObjectsMap::SnapshotGenerationFinished() {
396 RemoveDeadEntries(); 397 RemoveDeadEntries();
397 } 398 }
398 399
399 400
400 void HeapObjectsMap::MoveObject(Address from, Address to) { 401 void HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
401 ASSERT(to != NULL); 402 ASSERT(to != NULL);
402 ASSERT(from != NULL); 403 ASSERT(from != NULL);
403 if (from == to) return; 404 if (from == to) return;
404 void* from_value = entries_map_.Remove(from, ComputePointerHash(from)); 405 void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
405 if (from_value == NULL) { 406 if (from_value == NULL) {
406 // It may occur that some untracked object moves to an address X and there 407 // It may occur that some untracked object moves to an address X and there
407 // is a tracked object at that address. In this case we should remove the 408 // is a tracked object at that address. In this case we should remove the
408 // entry as we know that the object has died. 409 // entry as we know that the object has died.
409 void* to_value = entries_map_.Remove(to, ComputePointerHash(to)); 410 void* to_value = entries_map_.Remove(to, ComputePointerHash(to));
410 if (to_value != NULL) { 411 if (to_value != NULL) {
(...skipping 10 matching lines...) Expand all
421 // value in addr field. It is bad because later at RemoveDeadEntries 422 // value in addr field. It is bad because later at RemoveDeadEntries
422 // one of this entry will be removed with the corresponding entries_map_ 423 // one of this entry will be removed with the corresponding entries_map_
423 // entry. 424 // entry.
424 int to_entry_info_index = 425 int to_entry_info_index =
425 static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value)); 426 static_cast<int>(reinterpret_cast<intptr_t>(to_entry->value));
426 entries_.at(to_entry_info_index).addr = NULL; 427 entries_.at(to_entry_info_index).addr = NULL;
427 } 428 }
428 int from_entry_info_index = 429 int from_entry_info_index =
429 static_cast<int>(reinterpret_cast<intptr_t>(from_value)); 430 static_cast<int>(reinterpret_cast<intptr_t>(from_value));
430 entries_.at(from_entry_info_index).addr = to; 431 entries_.at(from_entry_info_index).addr = to;
432 // Size of an object can change during its life, so to keep information
433 // about the object in entries_ consistent, we have to adjust size when the
434 // object is migrated.
435 if (FLAG_heap_profiler_trace_objects) {
436 PrintF("Move object from %p to %p old size %6d new size %6d\n",
437 from,
438 to,
439 entries_.at(from_entry_info_index).size,
440 object_size);
441 }
442 entries_.at(from_entry_info_index).size = object_size;
431 to_entry->value = from_value; 443 to_entry->value = from_value;
432 } 444 }
433 } 445 }
434 446
435 447
448 void HeapObjectsMap::NewObject(Address addr, int size) {
449 if (FLAG_heap_profiler_trace_objects) {
450 PrintF("New object : %p %6d. Next address is %p\n",
451 addr,
452 size,
453 addr + size);
454 }
455 ASSERT(addr != NULL);
456 FindOrAddEntry(addr, size, false);
457 }
458
459
460 void HeapObjectsMap::UpdateObjectSize(Address addr, int size) {
461 FindOrAddEntry(addr, size, false);
462 }
463
464
436 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) { 465 SnapshotObjectId HeapObjectsMap::FindEntry(Address addr) {
437 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), 466 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
438 false); 467 false);
439 if (entry == NULL) return 0; 468 if (entry == NULL) return 0;
440 int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 469 int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
441 EntryInfo& entry_info = entries_.at(entry_index); 470 EntryInfo& entry_info = entries_.at(entry_index);
442 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); 471 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
443 return entry_info.id; 472 return entry_info.id;
444 } 473 }
445 474
446 475
447 SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr, 476 SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
448 unsigned int size) { 477 unsigned int size,
478 bool accessed) {
449 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); 479 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
450 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr), 480 HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
451 true); 481 true);
452 if (entry->value != NULL) { 482 if (entry->value != NULL) {
453 int entry_index = 483 int entry_index =
454 static_cast<int>(reinterpret_cast<intptr_t>(entry->value)); 484 static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
455 EntryInfo& entry_info = entries_.at(entry_index); 485 EntryInfo& entry_info = entries_.at(entry_index);
456 entry_info.accessed = true; 486 entry_info.accessed = accessed;
487 if (FLAG_heap_profiler_trace_objects) {
488 PrintF("Update object size : %p with old size %d and new size %d\n",
489 addr,
490 entry_info.size,
491 size);
492 }
457 entry_info.size = size; 493 entry_info.size = size;
458 return entry_info.id; 494 return entry_info.id;
459 } 495 }
460 entry->value = reinterpret_cast<void*>(entries_.length()); 496 entry->value = reinterpret_cast<void*>(entries_.length());
461 SnapshotObjectId id = next_id_; 497 SnapshotObjectId id = next_id_;
462 next_id_ += kObjectIdStep; 498 next_id_ += kObjectIdStep;
463 entries_.Add(EntryInfo(id, addr, size)); 499 entries_.Add(EntryInfo(id, addr, size, accessed));
464 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); 500 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
465 return id; 501 return id;
466 } 502 }
467 503
468 504
469 void HeapObjectsMap::StopHeapObjectsTracking() { 505 void HeapObjectsMap::StopHeapObjectsTracking() {
470 time_intervals_.Clear(); 506 time_intervals_.Clear();
471 } 507 }
472 508
473 509
474 void HeapObjectsMap::UpdateHeapObjectsMap() { 510 void HeapObjectsMap::UpdateHeapObjectsMap() {
511 if (FLAG_heap_profiler_trace_objects) {
512 PrintF("Begin HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
513 entries_map_.occupancy());
514 }
475 heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask, 515 heap_->CollectAllGarbage(Heap::kMakeHeapIterableMask,
476 "HeapSnapshotsCollection::UpdateHeapObjectsMap"); 516 "HeapSnapshotsCollection::UpdateHeapObjectsMap");
477 HeapIterator iterator(heap_); 517 HeapIterator iterator(heap_);
478 for (HeapObject* obj = iterator.next(); 518 for (HeapObject* obj = iterator.next();
479 obj != NULL; 519 obj != NULL;
480 obj = iterator.next()) { 520 obj = iterator.next()) {
481 FindOrAddEntry(obj->address(), obj->Size()); 521 FindOrAddEntry(obj->address(), obj->Size());
522 if (FLAG_heap_profiler_trace_objects) {
523 PrintF("Update object : %p %6d. Next address is %p\n",
524 obj->address(),
525 obj->Size(),
526 obj->address() + obj->Size());
527 }
482 } 528 }
483 RemoveDeadEntries(); 529 RemoveDeadEntries();
530 if (FLAG_heap_profiler_trace_objects) {
531 PrintF("End HeapObjectsMap::UpdateHeapObjectsMap. map has %d entries.\n",
532 entries_map_.occupancy());
533 }
484 } 534 }
485 535
486 536
537 namespace {
538
539
540 struct HeapObjectInfo {
541 HeapObjectInfo(HeapObject* obj, int expected_size)
542 : obj(obj),
543 expected_size(expected_size) {
544 }
545
546 HeapObject* obj;
547 int expected_size;
548
549 bool IsValid() const { return expected_size == obj->Size(); }
550
551 void Print() const {
552 if (expected_size == 0) {
553 PrintF("Untracked object : %p %6d. Next address is %p\n",
554 obj->address(),
555 obj->Size(),
556 obj->address() + obj->Size());
557 } else if (obj->Size() != expected_size) {
558 PrintF("Wrong size %6d: %p %6d. Next address is %p\n",
559 expected_size,
560 obj->address(),
561 obj->Size(),
562 obj->address() + obj->Size());
563 } else {
564 PrintF("Good object : %p %6d. Next address is %p\n",
565 obj->address(),
566 expected_size,
567 obj->address() + obj->Size());
568 }
569 }
570 };
571
572
573 static int comparator(const HeapObjectInfo* a, const HeapObjectInfo* b) {
574 if (a->obj < b->obj) return -1;
575 if (a->obj > b->obj) return 1;
576 return 0;
577 }
578
579
580 } // namespace
581
582
583 int HeapObjectsMap::FindUntrackedObjects() {
584 List<HeapObjectInfo> heap_objects(1000);
585
586 HeapIterator iterator(heap_);
587 int untracked = 0;
588 for (HeapObject* obj = iterator.next();
589 obj != NULL;
590 obj = iterator.next()) {
591 HashMap::Entry* entry = entries_map_.Lookup(
592 obj->address(), ComputePointerHash(obj->address()), false);
593 if (entry == NULL) {
594 ++untracked;
595 if (FLAG_heap_profiler_trace_objects) {
596 heap_objects.Add(HeapObjectInfo(obj, 0));
597 }
598 } else {
599 int entry_index = static_cast<int>(
600 reinterpret_cast<intptr_t>(entry->value));
601 EntryInfo& entry_info = entries_.at(entry_index);
602 if (FLAG_heap_profiler_trace_objects) {
603 heap_objects.Add(HeapObjectInfo(obj,
604 static_cast<int>(entry_info.size)));
605 if (obj->Size() != static_cast<int>(entry_info.size))
606 ++untracked;
607 } else {
608 CHECK_EQ(obj->Size(), static_cast<int>(entry_info.size));
609 }
610 }
611 }
612 if (FLAG_heap_profiler_trace_objects) {
613 PrintF("\nBegin HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n",
614 entries_map_.occupancy());
615 heap_objects.Sort(comparator);
616 int last_printed_object = -1;
617 bool print_next_object = false;
618 for (int i = 0; i < heap_objects.length(); ++i) {
619 const HeapObjectInfo& object_info = heap_objects[i];
620 if (!object_info.IsValid()) {
621 ++untracked;
622 if (last_printed_object != i - 1) {
623 if (i > 0) {
624 PrintF("%d objects were skipped\n", i - 1 - last_printed_object);
625 heap_objects[i - 1].Print();
626 }
627 }
628 object_info.Print();
629 last_printed_object = i;
630 print_next_object = true;
631 } else if (print_next_object) {
632 object_info.Print();
633 print_next_object = false;
634 last_printed_object = i;
635 }
636 }
637 if (last_printed_object < heap_objects.length() - 1) {
638 PrintF("Last %d objects were skipped\n",
639 heap_objects.length() - 1 - last_printed_object);
640 }
641 PrintF("End HeapObjectsMap::FindUntrackedObjects. %d entries in map.\n\n",
642 entries_map_.occupancy());
643 }
644 return untracked;
645 }
646
647
487 SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) { 648 SnapshotObjectId HeapObjectsMap::PushHeapObjectsStats(OutputStream* stream) {
488 UpdateHeapObjectsMap(); 649 UpdateHeapObjectsMap();
489 time_intervals_.Add(TimeInterval(next_id_)); 650 time_intervals_.Add(TimeInterval(next_id_));
490 int prefered_chunk_size = stream->GetChunkSize(); 651 int prefered_chunk_size = stream->GetChunkSize();
491 List<v8::HeapStatsUpdate> stats_buffer; 652 List<v8::HeapStatsUpdate> stats_buffer;
492 ASSERT(!entries_.is_empty()); 653 ASSERT(!entries_.is_empty());
493 EntryInfo* entry_info = &entries_.first(); 654 EntryInfo* entry_info = &entries_.first();
494 EntryInfo* end_entry_info = &entries_.last() + 1; 655 EntryInfo* end_entry_info = &entries_.last() + 1;
495 for (int time_interval_index = 0; 656 for (int time_interval_index = 0;
496 time_interval_index < time_intervals_.length(); 657 time_interval_index < time_intervals_.length();
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
580 sizeof(*this) + 741 sizeof(*this) +
581 sizeof(HashMap::Entry) * entries_map_.capacity() + 742 sizeof(HashMap::Entry) * entries_map_.capacity() +
582 GetMemoryUsedByList(entries_) + 743 GetMemoryUsedByList(entries_) +
583 GetMemoryUsedByList(time_intervals_); 744 GetMemoryUsedByList(time_intervals_);
584 } 745 }
585 746
586 747
587 HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap) 748 HeapSnapshotsCollection::HeapSnapshotsCollection(Heap* heap)
588 : is_tracking_objects_(false), 749 : is_tracking_objects_(false),
589 names_(heap), 750 names_(heap),
590 ids_(heap) { 751 ids_(heap),
752 allocation_tracker_(NULL) {
591 } 753 }
592 754
593 755
594 static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) { 756 static void DeleteHeapSnapshot(HeapSnapshot** snapshot_ptr) {
595 delete *snapshot_ptr; 757 delete *snapshot_ptr;
596 } 758 }
597 759
598 760
599 HeapSnapshotsCollection::~HeapSnapshotsCollection() { 761 HeapSnapshotsCollection::~HeapSnapshotsCollection() {
762 delete allocation_tracker_;
600 snapshots_.Iterate(DeleteHeapSnapshot); 763 snapshots_.Iterate(DeleteHeapSnapshot);
601 } 764 }
602 765
603 766
767 void HeapSnapshotsCollection::StartHeapObjectsTracking() {
768 ids_.UpdateHeapObjectsMap();
769 if (allocation_tracker_ == NULL) {
770 allocation_tracker_ = new AllocationTracker(&ids_, names());
771 }
772 is_tracking_objects_ = true;
773 }
774
775
776 void HeapSnapshotsCollection::StopHeapObjectsTracking() {
777 ids_.StopHeapObjectsTracking();
778 if (allocation_tracker_ != NULL) {
779 delete allocation_tracker_;
780 allocation_tracker_ = NULL;
781 }
782 }
783
784
604 HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name, 785 HeapSnapshot* HeapSnapshotsCollection::NewSnapshot(const char* name,
605 unsigned uid) { 786 unsigned uid) {
606 is_tracking_objects_ = true; // Start watching for heap objects moves. 787 is_tracking_objects_ = true; // Start watching for heap objects moves.
607 return new HeapSnapshot(this, name, uid); 788 return new HeapSnapshot(this, name, uid);
608 } 789 }
609 790
610 791
611 void HeapSnapshotsCollection::SnapshotGenerationFinished( 792 void HeapSnapshotsCollection::SnapshotGenerationFinished(
612 HeapSnapshot* snapshot) { 793 HeapSnapshot* snapshot) {
613 ids_.SnapshotGenerationFinished(); 794 ids_.SnapshotGenerationFinished();
(...skipping 23 matching lines...) Expand all
637 if (ids_.FindEntry(obj->address()) == id) { 818 if (ids_.FindEntry(obj->address()) == id) {
638 ASSERT(object == NULL); 819 ASSERT(object == NULL);
639 object = obj; 820 object = obj;
640 // Can't break -- kFilterUnreachable requires full heap traversal. 821 // Can't break -- kFilterUnreachable requires full heap traversal.
641 } 822 }
642 } 823 }
643 return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>(); 824 return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
644 } 825 }
645 826
646 827
828 void HeapSnapshotsCollection::NewObjectEvent(Address addr, int size) {
829 DisallowHeapAllocation no_allocation;
830 ids_.NewObject(addr, size);
831 if (allocation_tracker_ != NULL) {
832 allocation_tracker_->NewObjectEvent(addr, size);
833 }
834 }
835
836
647 size_t HeapSnapshotsCollection::GetUsedMemorySize() const { 837 size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
648 size_t size = sizeof(*this); 838 size_t size = sizeof(*this);
649 size += names_.GetUsedMemorySize(); 839 size += names_.GetUsedMemorySize();
650 size += ids_.GetUsedMemorySize(); 840 size += ids_.GetUsedMemorySize();
651 size += GetMemoryUsedByList(snapshots_); 841 size += GetMemoryUsedByList(snapshots_);
652 for (int i = 0; i < snapshots_.length(); ++i) { 842 for (int i = 0; i < snapshots_.length(); ++i) {
653 size += snapshots_[i]->RawSnapshotSize(); 843 size += snapshots_[i]->RawSnapshotSize();
654 } 844 }
655 return size; 845 return size;
656 } 846 }
(...skipping 1778 matching lines...) Expand 10 before | Expand all | Expand 10 after
2435 bool aborted_; 2625 bool aborted_;
2436 }; 2626 };
2437 2627
2438 2628
2439 // type, name|index, to_node. 2629 // type, name|index, to_node.
2440 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3; 2630 const int HeapSnapshotJSONSerializer::kEdgeFieldsCount = 3;
2441 // type, name, id, self_size, children_index. 2631 // type, name, id, self_size, children_index.
2442 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5; 2632 const int HeapSnapshotJSONSerializer::kNodeFieldsCount = 5;
2443 2633
2444 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) { 2634 void HeapSnapshotJSONSerializer::Serialize(v8::OutputStream* stream) {
2635 if (AllocationTracker* allocation_tracker =
2636 snapshot_->collection()->allocation_tracker()) {
2637 allocation_tracker->PrepareForSerialization();
2638 }
2445 ASSERT(writer_ == NULL); 2639 ASSERT(writer_ == NULL);
2446 writer_ = new OutputStreamWriter(stream); 2640 writer_ = new OutputStreamWriter(stream);
2447 SerializeImpl(); 2641 SerializeImpl();
2448 delete writer_; 2642 delete writer_;
2449 writer_ = NULL; 2643 writer_ = NULL;
2450 } 2644 }
2451 2645
2452 2646
2453 void HeapSnapshotJSONSerializer::SerializeImpl() { 2647 void HeapSnapshotJSONSerializer::SerializeImpl() {
2454 ASSERT(0 == snapshot_->root()->index()); 2648 ASSERT(0 == snapshot_->root()->index());
2455 writer_->AddCharacter('{'); 2649 writer_->AddCharacter('{');
2456 writer_->AddString("\"snapshot\":{"); 2650 writer_->AddString("\"snapshot\":{");
2457 SerializeSnapshot(); 2651 SerializeSnapshot();
2458 if (writer_->aborted()) return; 2652 if (writer_->aborted()) return;
2459 writer_->AddString("},\n"); 2653 writer_->AddString("},\n");
2460 writer_->AddString("\"nodes\":["); 2654 writer_->AddString("\"nodes\":[");
2461 SerializeNodes(); 2655 SerializeNodes();
2462 if (writer_->aborted()) return; 2656 if (writer_->aborted()) return;
2463 writer_->AddString("],\n"); 2657 writer_->AddString("],\n");
2464 writer_->AddString("\"edges\":["); 2658 writer_->AddString("\"edges\":[");
2465 SerializeEdges(); 2659 SerializeEdges();
2466 if (writer_->aborted()) return; 2660 if (writer_->aborted()) return;
2467 writer_->AddString("],\n"); 2661 writer_->AddString("],\n");
2662
2663 writer_->AddString("\"trace_function_infos\":[");
2664 SerializeTraceNodeInfos();
2665 if (writer_->aborted()) return;
2666 writer_->AddString("],\n");
2667 writer_->AddString("\"trace_tree\":[");
2668 SerializeTraceTree();
2669 if (writer_->aborted()) return;
2670 writer_->AddString("],\n");
2671
2468 writer_->AddString("\"strings\":["); 2672 writer_->AddString("\"strings\":[");
2469 SerializeStrings(); 2673 SerializeStrings();
2470 if (writer_->aborted()) return; 2674 if (writer_->aborted()) return;
2471 writer_->AddCharacter(']'); 2675 writer_->AddCharacter(']');
2472 writer_->AddCharacter('}'); 2676 writer_->AddCharacter('}');
2473 writer_->Finalize(); 2677 writer_->Finalize();
2474 } 2678 }
2475 2679
2476 2680
2477 int HeapSnapshotJSONSerializer::GetStringId(const char* s) { 2681 int HeapSnapshotJSONSerializer::GetStringId(const char* s) {
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after
2618 JSON_S("edge_types") ":" JSON_A( 2822 JSON_S("edge_types") ":" JSON_A(
2619 JSON_A( 2823 JSON_A(
2620 JSON_S("context") "," 2824 JSON_S("context") ","
2621 JSON_S("element") "," 2825 JSON_S("element") ","
2622 JSON_S("property") "," 2826 JSON_S("property") ","
2623 JSON_S("internal") "," 2827 JSON_S("internal") ","
2624 JSON_S("hidden") "," 2828 JSON_S("hidden") ","
2625 JSON_S("shortcut") "," 2829 JSON_S("shortcut") ","
2626 JSON_S("weak")) "," 2830 JSON_S("weak")) ","
2627 JSON_S("string_or_number") "," 2831 JSON_S("string_or_number") ","
2628 JSON_S("node")))); 2832 JSON_S("node")) ","
2833 JSON_S("trace_function_info_fields") ":" JSON_A(
2834 JSON_S("function_id") ","
2835 JSON_S("name") ","
2836 JSON_S("script_name") ","
2837 JSON_S("script_id") ","
2838 JSON_S("line") ","
2839 JSON_S("column")) ","
2840 JSON_S("trace_node_fields") ":" JSON_A(
2841 JSON_S("id") ","
2842 JSON_S("function_id") ","
2843 JSON_S("count") ","
2844 JSON_S("size") ","
2845 JSON_S("children"))));
2629 #undef JSON_S 2846 #undef JSON_S
2630 #undef JSON_O 2847 #undef JSON_O
2631 #undef JSON_A 2848 #undef JSON_A
2632 writer_->AddString(",\"node_count\":"); 2849 writer_->AddString(",\"node_count\":");
2633 writer_->AddNumber(snapshot_->entries().length()); 2850 writer_->AddNumber(snapshot_->entries().length());
2634 writer_->AddString(",\"edge_count\":"); 2851 writer_->AddString(",\"edge_count\":");
2635 writer_->AddNumber(snapshot_->edges().length()); 2852 writer_->AddNumber(snapshot_->edges().length());
2853 writer_->AddString(",\"trace_function_count\":");
2854 uint32_t count = 0;
2855 AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
2856 if (tracker) {
2857 count = tracker->id_to_function_info()->occupancy();
2858 }
2859 writer_->AddNumber(count);
2636 } 2860 }
2637 2861
2638 2862
2639 static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) { 2863 static void WriteUChar(OutputStreamWriter* w, unibrow::uchar u) {
2640 static const char hex_chars[] = "0123456789ABCDEF"; 2864 static const char hex_chars[] = "0123456789ABCDEF";
2641 w->AddString("\\u"); 2865 w->AddString("\\u");
2642 w->AddCharacter(hex_chars[(u >> 12) & 0xf]); 2866 w->AddCharacter(hex_chars[(u >> 12) & 0xf]);
2643 w->AddCharacter(hex_chars[(u >> 8) & 0xf]); 2867 w->AddCharacter(hex_chars[(u >> 8) & 0xf]);
2644 w->AddCharacter(hex_chars[(u >> 4) & 0xf]); 2868 w->AddCharacter(hex_chars[(u >> 4) & 0xf]);
2645 w->AddCharacter(hex_chars[u & 0xf]); 2869 w->AddCharacter(hex_chars[u & 0xf]);
2646 } 2870 }
2647 2871
2648 2872
2873 void HeapSnapshotJSONSerializer::SerializeTraceTree() {
2874 AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
2875 if (!tracker) return;
2876 AllocationTraceTree* traces = tracker->trace_tree();
2877 SerializeTraceNode(traces->root());
2878 }
2879
2880
2881 void HeapSnapshotJSONSerializer::SerializeTraceNode(AllocationTraceNode* node) {
2882 // The buffer needs space for 4 unsigned ints, 4 commas, [ and \0
2883 const int kBufferSize =
2884 4 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
2885 + 4 + 1 + 1;
2886 EmbeddedVector<char, kBufferSize> buffer;
2887 int buffer_pos = 0;
2888 buffer_pos = utoa(node->id(), buffer, buffer_pos);
2889 buffer[buffer_pos++] = ',';
2890 buffer_pos = utoa(node->function_id(), buffer, buffer_pos);
2891 buffer[buffer_pos++] = ',';
2892 buffer_pos = utoa(node->allocation_count(), buffer, buffer_pos);
2893 buffer[buffer_pos++] = ',';
2894 buffer_pos = utoa(node->allocation_size(), buffer, buffer_pos);
2895 buffer[buffer_pos++] = ',';
2896 buffer[buffer_pos++] = '[';
2897 buffer[buffer_pos++] = '\0';
2898 writer_->AddString(buffer.start());
2899
2900 Vector<AllocationTraceNode*> children = node->children();
2901 for (int i = 0; i < children.length(); i++) {
2902 if (i > 0) {
2903 writer_->AddCharacter(',');
2904 }
2905 SerializeTraceNode(children[i]);
2906 }
2907 writer_->AddCharacter(']');
2908 }
2909
2910
2911 // 0-based position is converted to 1-based during the serialization.
2912 static int SerializePosition(int position, const Vector<char>& buffer,
2913 int buffer_pos) {
2914 if (position == -1) {
2915 buffer[buffer_pos++] = '0';
2916 } else {
2917 ASSERT(position >= 0);
2918 buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
2919 }
2920 return buffer_pos;
2921 }
2922
2923
2924 void HeapSnapshotJSONSerializer::SerializeTraceNodeInfos() {
2925 AllocationTracker* tracker = snapshot_->collection()->allocation_tracker();
2926 if (!tracker) return;
2927 // The buffer needs space for 6 unsigned ints, 6 commas, \n and \0
2928 const int kBufferSize =
2929 6 * MaxDecimalDigitsIn<sizeof(unsigned)>::kUnsigned // NOLINT
2930 + 6 + 1 + 1;
2931 EmbeddedVector<char, kBufferSize> buffer;
2932 HashMap* id_to_function_info = tracker->id_to_function_info();
2933 bool first_entry = true;
2934 for (HashMap::Entry* p = id_to_function_info->Start();
2935 p != NULL;
2936 p = id_to_function_info->Next(p)) {
2937 SnapshotObjectId id =
2938 static_cast<SnapshotObjectId>(reinterpret_cast<intptr_t>(p->key));
2939 AllocationTracker::FunctionInfo* info =
2940 reinterpret_cast<AllocationTracker::FunctionInfo* >(p->value);
2941 int buffer_pos = 0;
2942 if (first_entry) {
2943 first_entry = false;
2944 } else {
2945 buffer[buffer_pos++] = ',';
2946 }
2947 buffer_pos = utoa(id, buffer, buffer_pos);
2948 buffer[buffer_pos++] = ',';
2949 buffer_pos = utoa(GetStringId(info->name), buffer, buffer_pos);
2950 buffer[buffer_pos++] = ',';
2951 buffer_pos = utoa(GetStringId(info->script_name), buffer, buffer_pos);
2952 buffer[buffer_pos++] = ',';
2953 // The cast is safe because script id is a non-negative Smi.
2954 buffer_pos = utoa(static_cast<unsigned>(info->script_id), buffer,
2955 buffer_pos);
2956 buffer[buffer_pos++] = ',';
2957 buffer_pos = SerializePosition(info->line, buffer, buffer_pos);
2958 buffer[buffer_pos++] = ',';
2959 buffer_pos = SerializePosition(info->column, buffer, buffer_pos);
2960 buffer[buffer_pos++] = '\n';
2961 buffer[buffer_pos++] = '\0';
2962 writer_->AddString(buffer.start());
2963 }
2964 }
2965
2966
2649 void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) { 2967 void HeapSnapshotJSONSerializer::SerializeString(const unsigned char* s) {
2650 writer_->AddCharacter('\n'); 2968 writer_->AddCharacter('\n');
2651 writer_->AddCharacter('\"'); 2969 writer_->AddCharacter('\"');
2652 for ( ; *s != '\0'; ++s) { 2970 for ( ; *s != '\0'; ++s) {
2653 switch (*s) { 2971 switch (*s) {
2654 case '\b': 2972 case '\b':
2655 writer_->AddString("\\b"); 2973 writer_->AddString("\\b");
2656 continue; 2974 continue;
2657 case '\f': 2975 case '\f':
2658 writer_->AddString("\\f"); 2976 writer_->AddString("\\f");
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
2708 writer_->AddString("\"<dummy>\""); 3026 writer_->AddString("\"<dummy>\"");
2709 for (int i = 1; i < sorted_strings.length(); ++i) { 3027 for (int i = 1; i < sorted_strings.length(); ++i) {
2710 writer_->AddCharacter(','); 3028 writer_->AddCharacter(',');
2711 SerializeString(sorted_strings[i]); 3029 SerializeString(sorted_strings[i]);
2712 if (writer_->aborted()) return; 3030 if (writer_->aborted()) return;
2713 } 3031 }
2714 } 3032 }
2715 3033
2716 3034
2717 } } // namespace v8::internal 3035 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap-snapshot-generator.h ('k') | src/hydrogen.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698