Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(534)

Side by Side Diff: src/heap-snapshot-generator.cc

Issue 15825019: Do not force GC on each objects tracking sample. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Made it calling GC once per second. Created 7 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap-snapshot-generator.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after
181 181
182 // It is very important to keep objects that form a heap snapshot 182 // It is very important to keep objects that form a heap snapshot
183 // as small as possible. 183 // as small as possible.
184 namespace { // Avoid littering the global namespace. 184 namespace { // Avoid littering the global namespace.
185 185
186 template <size_t ptr_size> struct SnapshotSizeConstants; 186 template <size_t ptr_size> struct SnapshotSizeConstants;
187 187
188 template <> struct SnapshotSizeConstants<4> { 188 template <> struct SnapshotSizeConstants<4> {
189 static const int kExpectedHeapGraphEdgeSize = 12; 189 static const int kExpectedHeapGraphEdgeSize = 12;
190 static const int kExpectedHeapEntrySize = 24; 190 static const int kExpectedHeapEntrySize = 24;
191 static const int kExpectedHeapSnapshotsCollectionSize = 100;
192 static const int kExpectedHeapSnapshotSize = 132; 191 static const int kExpectedHeapSnapshotSize = 132;
193 }; 192 };
194 193
195 template <> struct SnapshotSizeConstants<8> { 194 template <> struct SnapshotSizeConstants<8> {
196 static const int kExpectedHeapGraphEdgeSize = 24; 195 static const int kExpectedHeapGraphEdgeSize = 24;
197 static const int kExpectedHeapEntrySize = 32; 196 static const int kExpectedHeapEntrySize = 32;
198 static const int kExpectedHeapSnapshotsCollectionSize = 152;
199 static const int kExpectedHeapSnapshotSize = 160; 197 static const int kExpectedHeapSnapshotSize = 160;
200 }; 198 };
201 199
202 } // namespace 200 } // namespace
203 201
204 HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection, 202 HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
205 const char* title, 203 const char* title,
206 unsigned uid) 204 unsigned uid)
207 : collection_(collection), 205 : collection_(collection),
208 title_(title), 206 title_(title),
(...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after
370 HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep; 368 HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
371 const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId = 369 const SnapshotObjectId HeapObjectsMap::kGcRootsFirstSubrootId =
372 HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep; 370 HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
373 const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId = 371 const SnapshotObjectId HeapObjectsMap::kFirstAvailableObjectId =
374 HeapObjectsMap::kGcRootsFirstSubrootId + 372 HeapObjectsMap::kGcRootsFirstSubrootId +
375 VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep; 373 VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
376 374
377 HeapObjectsMap::HeapObjectsMap(Heap* heap) 375 HeapObjectsMap::HeapObjectsMap(Heap* heap)
378 : next_id_(kFirstAvailableObjectId), 376 : next_id_(kFirstAvailableObjectId),
379 entries_map_(AddressesMatch), 377 entries_map_(AddressesMatch),
380 heap_(heap) { 378 heap_(heap),
379 last_gc_time_(0) {
381 // This dummy element solves a problem with entries_map_. 380 // This dummy element solves a problem with entries_map_.
382 // When we do lookup in HashMap we see no difference between two cases: 381 // When we do lookup in HashMap we see no difference between two cases:
383 // it has an entry with NULL as the value or it has created 382 // it has an entry with NULL as the value or it has created
384 // a new entry on the fly with NULL as the default value. 383 // a new entry on the fly with NULL as the default value.
385 // With such dummy element we have a guaranty that all entries_map_ entries 384 // With such dummy element we have a guaranty that all entries_map_ entries
386 // will have the value field grater than 0. 385 // will have the value field grater than 0.
387 // This fact is using in MoveObject method. 386 // This fact is using in MoveObject method.
388 entries_.Add(EntryInfo(0, NULL, 0)); 387 entries_.Add(EntryInfo(0, NULL, 0));
389 } 388 }
390 389
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
457 entries_.Add(EntryInfo(id, addr, size)); 456 entries_.Add(EntryInfo(id, addr, size));
458 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy()); 457 ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
459 return id; 458 return id;
460 } 459 }
461 460
462 461
463 void HeapObjectsMap::StopHeapObjectsTracking() { 462 void HeapObjectsMap::StopHeapObjectsTracking() {
464 time_intervals_.Clear(); 463 time_intervals_.Clear();
465 } 464 }
466 465
466
467 void HeapObjectsMap::UpdateHeapObjectsMap() { 467 void HeapObjectsMap::UpdateHeapObjectsMap() {
468 HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask, 468 double now = OS::TimeCurrentMillis();
469 "HeapSnapshotsCollection::UpdateHeapObjectsMap"); 469 if (now - last_gc_time_ > 1000) {
470 HEAP->CollectAllGarbage(Heap::kMakeHeapIterableMask,
471 "HeapObjectsMap::UpdateHeapObjectsMap");
472 last_gc_time_ = now;
473 } else {
474 HEAP->EnsureHeapIsIterable();
475 }
476
470 HeapIterator iterator(heap_); 477 HeapIterator iterator(heap_);
471 for (HeapObject* obj = iterator.next(); 478 for (HeapObject* obj = iterator.next();
472 obj != NULL; 479 obj != NULL;
473 obj = iterator.next()) { 480 obj = iterator.next()) {
474 FindOrAddEntry(obj->address(), obj->Size()); 481 FindOrAddEntry(obj->address(), obj->Size());
475 } 482 }
476 RemoveDeadEntries(); 483 RemoveDeadEntries();
477 } 484 }
478 485
479 486
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after
648 ASSERT(object == NULL); 655 ASSERT(object == NULL);
649 object = obj; 656 object = obj;
650 // Can't break -- kFilterUnreachable requires full heap traversal. 657 // Can't break -- kFilterUnreachable requires full heap traversal.
651 } 658 }
652 } 659 }
653 return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>(); 660 return object != NULL ? Handle<HeapObject>(object) : Handle<HeapObject>();
654 } 661 }
655 662
656 663
657 size_t HeapSnapshotsCollection::GetUsedMemorySize() const { 664 size_t HeapSnapshotsCollection::GetUsedMemorySize() const {
658 STATIC_CHECK(SnapshotSizeConstants<kPointerSize>::
659 kExpectedHeapSnapshotsCollectionSize ==
660 sizeof(HeapSnapshotsCollection)); // NOLINT
661 size_t size = sizeof(*this); 665 size_t size = sizeof(*this);
662 size += names_.GetUsedMemorySize(); 666 size += names_.GetUsedMemorySize();
663 size += ids_.GetUsedMemorySize(); 667 size += ids_.GetUsedMemorySize();
664 size += sizeof(HashMap::Entry) * snapshots_uids_.capacity(); 668 size += sizeof(HashMap::Entry) * snapshots_uids_.capacity();
665 size += GetMemoryUsedByList(snapshots_); 669 size += GetMemoryUsedByList(snapshots_);
666 for (int i = 0; i < snapshots_.length(); ++i) { 670 for (int i = 0; i < snapshots_.length(); ++i) {
667 size += snapshots_[i]->RawSnapshotSize(); 671 size += snapshots_[i]->RawSnapshotSize();
668 } 672 }
669 return size; 673 return size;
670 } 674 }
(...skipping 1987 matching lines...) Expand 10 before | Expand all | Expand 10 after
2658 2662
2659 2663
2660 void HeapSnapshotJSONSerializer::SortHashMap( 2664 void HeapSnapshotJSONSerializer::SortHashMap(
2661 HashMap* map, List<HashMap::Entry*>* sorted_entries) { 2665 HashMap* map, List<HashMap::Entry*>* sorted_entries) {
2662 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p)) 2666 for (HashMap::Entry* p = map->Start(); p != NULL; p = map->Next(p))
2663 sorted_entries->Add(p); 2667 sorted_entries->Add(p);
2664 sorted_entries->Sort(SortUsingEntryValue); 2668 sorted_entries->Sort(SortUsingEntryValue);
2665 } 2669 }
2666 2670
2667 } } // namespace v8::internal 2671 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap-snapshot-generator.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698