Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(275)

Side by Side Diff: src/heap.cc

Issue 8055029: Add experimental support for tracing the state of the VM heap to a file Base URL: http://v8.googlecode.com/svn/branches/experimental/heap-visualization/
Patch Set: Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
85 max_executable_size_(128l * LUMP_OF_MEMORY), 85 max_executable_size_(128l * LUMP_OF_MEMORY),
86 86
87 // Variables set based on semispace_size_ and old_generation_size_ in 87 // Variables set based on semispace_size_ and old_generation_size_ in
88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_) 88 // ConfigureHeap (survived_since_last_expansion_, external_allocation_limit_)
89 // Will be 4 * reserved_semispace_size_ to ensure that young 89 // Will be 4 * reserved_semispace_size_ to ensure that young
90 // generation can be aligned to its size. 90 // generation can be aligned to its size.
91 survived_since_last_expansion_(0), 91 survived_since_last_expansion_(0),
92 sweep_generation_(0), 92 sweep_generation_(0),
93 always_allocate_scope_depth_(0), 93 always_allocate_scope_depth_(0),
94 linear_allocation_scope_depth_(0), 94 linear_allocation_scope_depth_(0),
95 visualizer_(NULL),
95 contexts_disposed_(0), 96 contexts_disposed_(0),
96 scan_on_scavenge_pages_(0), 97 scan_on_scavenge_pages_(0),
97 new_space_(this), 98 new_space_(this),
98 old_pointer_space_(NULL), 99 old_pointer_space_(NULL),
99 old_data_space_(NULL), 100 old_data_space_(NULL),
100 code_space_(NULL), 101 code_space_(NULL),
101 map_space_(NULL), 102 map_space_(NULL),
102 cell_space_(NULL), 103 cell_space_(NULL),
103 lo_space_(NULL), 104 lo_space_(NULL),
104 gc_state_(NOT_IN_GC), 105 gc_state_(NOT_IN_GC),
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
352 ReportHeapStatistics("After GC"); 353 ReportHeapStatistics("After GC");
353 } else if (FLAG_log_gc) { 354 } else if (FLAG_log_gc) {
354 new_space_.ReportStatistics(); 355 new_space_.ReportStatistics();
355 } 356 }
356 #else 357 #else
357 if (FLAG_log_gc) new_space_.ReportStatistics(); 358 if (FLAG_log_gc) new_space_.ReportStatistics();
358 #endif // DEBUG 359 #endif // DEBUG
359 } 360 }
360 361
361 362
362 void Heap::GarbageCollectionPrologue() { 363 void Heap::GarbageCollectionPrologue(GarbageCollector collector) {
364 VisualizerTimeStamp(collector == SCAVENGER ?
365 HeapVisualizer::kScavenging :
366 HeapVisualizer::kMarking);
363 isolate_->transcendental_cache()->Clear(); 367 isolate_->transcendental_cache()->Clear();
364 ClearJSFunctionResultCaches(); 368 ClearJSFunctionResultCaches();
365 gc_count_++; 369 gc_count_++;
366 unflattened_strings_length_ = 0; 370 unflattened_strings_length_ = 0;
367 #ifdef DEBUG 371 #ifdef DEBUG
368 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC); 372 ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
369 allow_allocation(false); 373 allow_allocation(false);
370 374
371 if (FLAG_verify_heap) { 375 if (FLAG_verify_heap) {
372 Verify(); 376 Verify();
373 } 377 }
374 378
375 if (FLAG_gc_verbose) Print(); 379 if (FLAG_gc_verbose) Print();
376 #endif // DEBUG 380 #endif // DEBUG
377 381
378 #if defined(DEBUG) 382 #if defined(DEBUG)
379 ReportStatisticsBeforeGC(); 383 ReportStatisticsBeforeGC();
380 #endif // DEBUG 384 #endif // DEBUG
381 385
382 LiveObjectList::GCPrologue(); 386 LiveObjectList::GCPrologue();
383 store_buffer()->GCPrologue(); 387 store_buffer()->GCPrologue();
384 } 388 }
385 389
390
386 intptr_t Heap::SizeOfObjects() { 391 intptr_t Heap::SizeOfObjects() {
387 intptr_t total = 0; 392 intptr_t total = 0;
388 AllSpaces spaces; 393 AllSpaces spaces;
389 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) { 394 for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
390 total += space->SizeOfObjects(); 395 total += space->SizeOfObjects();
391 } 396 }
392 return total; 397 return total;
393 } 398 }
394 399
395 void Heap::GarbageCollectionEpilogue() { 400 void Heap::GarbageCollectionEpilogue() {
(...skipping 19 matching lines...) Expand all
415 isolate_->counters()->symbol_table_capacity()->Set( 420 isolate_->counters()->symbol_table_capacity()->Set(
416 symbol_table()->Capacity()); 421 symbol_table()->Capacity());
417 isolate_->counters()->number_of_symbols()->Set( 422 isolate_->counters()->number_of_symbols()->Set(
418 symbol_table()->NumberOfElements()); 423 symbol_table()->NumberOfElements());
419 #if defined(DEBUG) 424 #if defined(DEBUG)
420 ReportStatisticsAfterGC(); 425 ReportStatisticsAfterGC();
421 #endif // DEBUG 426 #endif // DEBUG
422 #ifdef ENABLE_DEBUGGER_SUPPORT 427 #ifdef ENABLE_DEBUGGER_SUPPORT
423 isolate_->debug()->AfterGarbageCollection(); 428 isolate_->debug()->AfterGarbageCollection();
424 #endif // ENABLE_DEBUGGER_SUPPORT 429 #endif // ENABLE_DEBUGGER_SUPPORT
430 VisualizerTimeStamp(HeapVisualizer::kRunning);
431 for (HeapVisualizer* vis = visualizer(); vis != NULL; vis = vis->next()) {
432 UpdateHeapVisualizer(vis, new_space());
433 }
425 } 434 }
426 435
427 436
428 void Heap::CollectAllGarbage(int flags) { 437 void Heap::CollectAllGarbage(int flags) {
429 // Since we are ignoring the return value, the exact choice of space does 438 // Since we are ignoring the return value, the exact choice of space does
430 // not matter, so long as we do not specify NEW_SPACE, which would not 439 // not matter, so long as we do not specify NEW_SPACE, which would not
431 // cause a full GC. 440 // cause a full GC.
432 mark_compact_collector_.SetFlags(flags); 441 mark_compact_collector_.SetFlags(flags);
433 CollectGarbage(OLD_POINTER_SPACE); 442 CollectGarbage(OLD_POINTER_SPACE);
434 mark_compact_collector_.SetFlags(kNoGCFlags); 443 mark_compact_collector_.SetFlags(kNoGCFlags);
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
484 FLAG_incremental_marking_steps) { 493 FLAG_incremental_marking_steps) {
485 if (FLAG_trace_incremental_marking) { 494 if (FLAG_trace_incremental_marking) {
486 PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); 495 PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
487 } 496 }
488 collector = SCAVENGER; 497 collector = SCAVENGER;
489 } 498 }
490 499
491 bool next_gc_likely_to_collect_more = false; 500 bool next_gc_likely_to_collect_more = false;
492 501
493 { GCTracer tracer(this); 502 { GCTracer tracer(this);
494 GarbageCollectionPrologue(); 503 GarbageCollectionPrologue(collector);
495 // The GC count was incremented in the prologue. Tell the tracer about 504 // The GC count was incremented in the prologue. Tell the tracer about
496 // it. 505 // it.
497 tracer.set_gc_count(gc_count_); 506 tracer.set_gc_count(gc_count_);
498 507
499 // Tell the tracer which collector we've selected. 508 // Tell the tracer which collector we've selected.
500 tracer.set_collector(collector); 509 tracer.set_collector(collector);
501 510
502 HistogramTimer* rate = (collector == SCAVENGER) 511 HistogramTimer* rate = (collector == SCAVENGER)
503 ? isolate_->counters()->gc_scavenger() 512 ? isolate_->counters()->gc_scavenger()
504 : isolate_->counters()->gc_compactor(); 513 : isolate_->counters()->gc_compactor();
(...skipping 5881 matching lines...) Expand 10 before | Expand all | Expand 10 after
6386 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't 6395 uint32_t in0 = 0xffffffffu; // Bit-pattern for a NaN that isn't
6387 uint32_t in1 = 0xffffffffu; // generated by the FPU. 6396 uint32_t in1 = 0xffffffffu; // generated by the FPU.
6388 for (int i = 0; i < kCacheSize; i++) { 6397 for (int i = 0; i < kCacheSize; i++) {
6389 elements_[i].in[0] = in0; 6398 elements_[i].in[0] = in0;
6390 elements_[i].in[1] = in1; 6399 elements_[i].in[1] = in1;
6391 elements_[i].output = NULL; 6400 elements_[i].output = NULL;
6392 } 6401 }
6393 } 6402 }
6394 6403
6395 6404
6405 static void HandleRange(uintptr_t* allocated_bytes,
6406 int pixel_size_log_2,
6407 uintptr_t page_address,
6408 uintptr_t object_start,
6409 uintptr_t object_size) {
6410 uintptr_t pixel_size = 1 << pixel_size_log_2;
6411 int start_pixel = (object_start - page_address) >> pixel_size_log_2;
6412 int end_pixel =
6413 (object_start + object_size - page_address) >> pixel_size_log_2;
6414 if (start_pixel != end_pixel) {
6415 int on_first_page = pixel_size - (object_start & (pixel_size - 1));
6416 allocated_bytes[start_pixel++] += on_first_page;
6417 object_size -= on_first_page;
6418 object_start += on_first_page;
6419 while (start_pixel != end_pixel) {
6420 allocated_bytes[start_pixel++] += pixel_size;
6421 object_size -= pixel_size;
6422 object_start += pixel_size;
6423 }
6424 }
6425 allocated_bytes[start_pixel] += object_size;
6426 }
6427
6428
6429 void Heap::UpdateHeapVisualizer(HeapVisualizer* visualizer, NewSpace* space) {
6430 SemiSpace* semi_space = space->active_space();
6431
6432 Address top = space->top();
6433 NewSpacePage* top_page = NewSpacePage::FromLimit(top);
6434 bool allocated = true;
6435 for (NewSpacePage* page = semi_space->first_page();
6436 !page->is_anchor();
6437 page = page->next_page()) {
6438 uintptr_t page_address = reinterpret_cast<uintptr_t>(page->address());
6439 uintptr_t address = reinterpret_cast<uintptr_t>(page->ObjectAreaStart());
6440 uintptr_t end = reinterpret_cast<uintptr_t>(page->ObjectAreaEnd());
6441 int overhead_size = address - page_address;
6442 visualizer->Name(HeapVisualizer::kHeapOverheadPseudoSpaceIdentity,
6443 page_address,
6444 overhead_size);
6445 visualizer->ConstantAllocation(page_address, overhead_size, 0);
6446 Address age_mark = space->to_space_age_mark();
6447 if (page->ObjectAreaStart() <= age_mark &&
6448 page->ObjectAreaEnd() > age_mark) {
6449 visualizer->Name(HeapVisualizer::kSurvivingNewSpacePseudoSpaceIdentity,
6450 address,
6451 age_mark - page->ObjectAreaStart());
6452 visualizer->Name(space->identity(),
6453 reinterpret_cast<uintptr_t>(age_mark),
6454 page->ObjectAreaEnd() - age_mark);
6455 } else {
6456 bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
6457 int space_id = below_mark ?
6458 HeapVisualizer::kSurvivingNewSpacePseudoSpaceIdentity :
6459 space->identity();
6460 visualizer->Name(space_id, address, end - address);
6461 }
6462 if (page == top_page) {
6463 int allocated_size = reinterpret_cast<uintptr_t>(top) - address;
6464 allocated = false;
6465 if (allocated_size != 0) {
6466 uintptr_t size =
6467 Max(allocated_size, 1 << visualizer->pixel_size_log_2());
6468 visualizer->ConstantAllocation(address, size, 0);
6469 }
6470 }
6471 if (allocated) {
6472 visualizer->ConstantAllocation(address, end - address, 0);
6473 }
6474 }
6475 }
6476
6477
6478 static void UpdateAllocation(HeapVisualizer* visualizer, Page* page) {
6479 int pixel_size_log_2 = visualizer->pixel_size_log_2();
6480 uint32_t page_address = reinterpret_cast<uintptr_t>(page->address());
6481 uint32_t address = reinterpret_cast<uintptr_t>(page->ObjectAreaStart());
6482 uintptr_t end = reinterpret_cast<uintptr_t>(page->ObjectAreaEnd());
6483 int overhead_size = address - page_address;
6484 visualizer->Name(HeapVisualizer::kHeapOverheadPseudoSpaceIdentity,
6485 page_address,
6486 overhead_size);
6487 visualizer->ConstantAllocation(page_address, overhead_size, 0);
6488
6489 visualizer->Name(page->owner()->identity(), address, end - address);
6490 if (page->WasSwept()) {
6491 uintptr_t pixel_size = 1 << pixel_size_log_2;
6492 int pixels = ((end - address) >> pixel_size_log_2) + 1;
6493 uintptr_t* allocated_bytes = new uintptr_t[pixels];
6494 for (int i = 0; i < pixels; i++) {
6495 allocated_bytes[i] = 0;
6496 }
6497 if (page->WasSweptPrecisely()) {
6498 HeapObjectIterator iterator(page, NULL);
6499 for (HeapObject* object = iterator.Next();
6500 object != NULL; object = iterator.Next()) {
6501 HandleRange(allocated_bytes,
6502 pixel_size_log_2,
6503 address,
6504 reinterpret_cast<intptr_t>(object->address()),
6505 object->Size());
6506 }
6507 } else {
6508 FreeList* free_list =
6509 reinterpret_cast<PagedSpace*>(page->owner())->free_list();
6510 if (free_list->IsVeryLong()) {
6511 // Make checker board pattern to indicate fragmentation.
6512 for (int i = 0; i < pixels; i += 8) {
6513 allocated_bytes[i] = pixel_size;
6514 allocated_bytes[i + 1] = pixel_size;
6515 allocated_bytes[i + 4] = pixel_size;
6516 allocated_bytes[i + 5] = pixel_size;
6517 }
6518 } else {
6519 FreeListNode* node;
6520 for (int i = 0; i < FreeList::kNumberOfChains; i++) {
6521 for (node = free_list->get_chain(i);
6522 node != NULL;
6523 node = node->next()) {
6524 if (node->address() >= page->ObjectAreaStart() &&
6525 node->address() < page->ObjectAreaEnd()) {
6526 HandleRange(allocated_bytes,
6527 pixel_size_log_2,
6528 address,
6529 reinterpret_cast<intptr_t>(node->address()),
6530 node->Size());
6531 }
6532 }
6533 }
6534 }
6535 for (int i = 0; i < pixels; i++) {
6536 allocated_bytes[i] = pixel_size - allocated_bytes[i];
6537 }
6538 }
6539 unsigned char* freeness = new unsigned char[pixels];
6540 for (int i = 0; i < pixels; i++) {
6541 if (allocated_bytes[i] == pixel_size) {
6542 freeness[i] = 0;
6543 } else {
6544 freeness[i] = 255 - (allocated_bytes[i] >> (8 - pixel_size_log_2));
6545 }
6546 }
6547 visualizer->Allocate(address, pixels, freeness);
6548 delete[] allocated_bytes;
6549 delete[] freeness;
6550 }
6551 }
6552
6553
6554 void Heap::UpdateVisualizers(Page* page) {
6555 for (HeapVisualizer* vis = visualizer(); vis != NULL; vis = vis->next()) {
6556 UpdateAllocation(vis, page);
6557 }
6558 }
6559
6560
6561 void Heap::VisualizeCrankshaft() {
6562 VisualizerTimeStamp(HeapVisualizer::kCrankshafting);
6563 }
6564
6565
6566 static void VisualizeAllocationTop(PagedSpace* space) {
6567 space->SetTop(space->top(), space->limit());
6568 }
6569
6570
6571 void Heap::VisualizeCrankshaftDone() {
6572 VisualizeAllocationTop(old_data_space());
6573 VisualizeAllocationTop(old_pointer_space());
6574 VisualizeAllocationTop(map_space());
6575 VisualizeAllocationTop(cell_space());
6576 VisualizeAllocationTop(code_space());
6577 new_space()->VisualizeTop();
6578 VisualizerTimeStamp(HeapVisualizer::kRunning);
6579 }
6580
6581
6582 void Heap::VisualizerTimeStamp(HeapVisualizer::ProfileState state) {
6583 uint32_t secs, usecs;
6584 OS::GetUserTime(&secs, &usecs);
6585 for (HeapVisualizer* vis = visualizer(); vis != NULL; vis = vis->next()) {
6586 if (vis->secs_zero() == 0) {
6587 vis->set_time_zero(secs, usecs);
6588 }
6589 if (usecs < vis->usecs_zero()) {
6590 usecs += 1000000;
6591 secs -= 1;
6592 }
6593 vis->TimeStamp(state, secs - vis->secs_zero(), usecs - vis->usecs_zero());
6594 vis->Flush();
6595 }
6596 }
6597
6598
6599
6600 void Heap::UpdateHeapVisualizer(HeapVisualizer* visualizer, PagedSpace* space) {
6601 // Update the other visualizers (not the new one) with the latest allocation
6602 // events.
6603 space->SetTop(space->top(), space->limit());
6604
6605 PageIterator pages(space);
6606 while (pages.has_next()) {
6607 Page* page = pages.next();
6608 UpdateAllocation(visualizer, page);
6609 }
6610 }
6611
6612
6613 void Heap::UpdateHeapVisualizer(
6614 HeapVisualizer* visualizer, LargeObjectSpace* space) {
6615 LargeObjectIterator it(space);
6616 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
6617 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
6618 int space_id = OLD_DATA_SPACE;
6619 if (object->IsCode()) space_id = CODE_SPACE;
6620 if (object->IsFixedArray()) space_id = OLD_POINTER_SPACE;
6621 visualizer->Name(HeapVisualizer::kHeapOverheadPseudoSpaceIdentity,
6622 reinterpret_cast<uint32_t>(chunk->address()),
6623 object->address() - chunk->address());
6624 visualizer->Name(space_id,
6625 reinterpret_cast<intptr_t>(object->address()),
6626 object->Size());
6627 visualizer->ConstantAllocation(
6628 reinterpret_cast<uint32_t>(chunk->address()),
6629 object->address() - chunk->address() + object->Size(),
6630 0);
6631 }
6632 }
6633
6634
6635 void Heap::UpdateVisualizer(HeapVisualizer* visualizer) {
6636 UpdateHeapVisualizer(visualizer, old_data_space());
6637 UpdateHeapVisualizer(visualizer, old_pointer_space());
6638 UpdateHeapVisualizer(visualizer, map_space());
6639 UpdateHeapVisualizer(visualizer, cell_space());
6640 UpdateHeapVisualizer(visualizer, code_space());
6641 UpdateHeapVisualizer(visualizer, lo_space());
6642 UpdateHeapVisualizer(visualizer, new_space());
6643 visualizer->Flush();
6644 }
6645
6646
6396 void TranscendentalCache::Clear() { 6647 void TranscendentalCache::Clear() {
6397 for (int i = 0; i < kNumberOfCaches; i++) { 6648 for (int i = 0; i < kNumberOfCaches; i++) {
6398 if (caches_[i] != NULL) { 6649 if (caches_[i] != NULL) {
6399 delete caches_[i]; 6650 delete caches_[i];
6400 caches_[i] = NULL; 6651 caches_[i] = NULL;
6401 } 6652 }
6402 } 6653 }
6403 } 6654 }
6404 6655
6405 6656
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
6466 inner->set_owner(lo_space()); 6717 inner->set_owner(lo_space());
6467 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED); 6718 inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
6468 inner = MemoryChunk::FromAddress( 6719 inner = MemoryChunk::FromAddress(
6469 inner->address() + Page::kPageSize); 6720 inner->address() + Page::kPageSize);
6470 } 6721 }
6471 } 6722 }
6472 } 6723 }
6473 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED); 6724 isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
6474 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) { 6725 for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
6475 next = chunk->next_chunk(); 6726 next = chunk->next_chunk();
6727 for (HeapVisualizer* vis = isolate_->heap()->visualizer();
6728 vis != NULL;
6729 vis = vis->next()) {
6730 vis->ConstantAllocation(reinterpret_cast<uintptr_t>(chunk->address()),
6731 chunk->size(),
6732 255);
6733 }
6476 isolate_->memory_allocator()->Free(chunk); 6734 isolate_->memory_allocator()->Free(chunk);
6477 } 6735 }
6478 chunks_queued_for_free_ = NULL; 6736 chunks_queued_for_free_ = NULL;
6479 } 6737 }
6480 6738
6481 } } // namespace v8::internal 6739 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/incremental-marking.cc » ('j') | src/spaces.h » ('J')

Powered by Google App Engine
This is Rietveld 408576698