Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(214)

Side by Side Diff: src/spaces.cc

Issue 8055029: Add experimental support for tracing the state of the VM heap to a file Base URL: http://v8.googlecode.com/svn/branches/experimental/heap-visualization/
Patch Set: Created 9 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after
437 chunk->initialize_scan_on_scavenge(false); 437 chunk->initialize_scan_on_scavenge(false);
438 chunk->SetFlag(WAS_SWEPT_PRECISELY); 438 chunk->SetFlag(WAS_SWEPT_PRECISELY);
439 439
440 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); 440 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
441 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); 441 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
442 442
443 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); 443 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
444 444
445 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); 445 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
446 446
447 if (owner != NULL) {
Vyacheslav Egorov (Chromium) 2011/09/28 15:21:11 Move out of the spaces.cc to some visualizer speci
448 for (HeapVisualizer* vis = heap->visualizer();
449 vis != NULL;
450 vis = vis->next()) {
451 int space_id = executable ? CODE_SPACE : owner->identity();
452 vis->Name(space_id,
453 reinterpret_cast<uintptr_t>(chunk->body()),
454 size - (chunk->body() - chunk->address()));
455 vis->Name(HeapVisualizer::kHeapOverheadPseudoSpaceIdentity,
456 reinterpret_cast<uintptr_t>(base),
457 (chunk->body() - chunk->address()));
458 if (owner == heap->lo_space()) {
459 // Large pages are all allocated all the time, no need to track.
460 vis->ConstantAllocation(reinterpret_cast<uintptr_t>(base), size, 0);
461 }
462 }
463 }
464
447 return chunk; 465 return chunk;
448 } 466 }
449 467
450 468
451 void MemoryChunk::InsertAfter(MemoryChunk* other) { 469 void MemoryChunk::InsertAfter(MemoryChunk* other) {
452 next_chunk_ = other->next_chunk_; 470 next_chunk_ = other->next_chunk_;
453 prev_chunk_ = other; 471 prev_chunk_ = other;
454 other->next_chunk_->prev_chunk_ = this; 472 other->next_chunk_->prev_chunk_ = this;
455 other->next_chunk_ = this; 473 other->next_chunk_ = this;
456 } 474 }
457 475
458 476
459 void MemoryChunk::Unlink() { 477 void MemoryChunk::Unlink() {
460 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) { 478 if (!InNewSpace() && IsFlagSet(SCAN_ON_SCAVENGE)) {
461 heap_->decrement_scan_on_scavenge_pages(); 479 heap_->decrement_scan_on_scavenge_pages();
462 ClearFlag(SCAN_ON_SCAVENGE); 480 ClearFlag(SCAN_ON_SCAVENGE);
463 } 481 }
482 for (HeapVisualizer* vis = heap_->visualizer();
483 vis != NULL;
Vyacheslav Egorov (Chromium) 2011/09/28 15:21:11 ditto
484 vis = vis->next()) {
485 vis->ConstantAllocation(reinterpret_cast<uintptr_t>(address()),
486 size(),
487 255);
488 }
464 next_chunk_->prev_chunk_ = prev_chunk_; 489 next_chunk_->prev_chunk_ = prev_chunk_;
465 prev_chunk_->next_chunk_ = next_chunk_; 490 prev_chunk_->next_chunk_ = next_chunk_;
466 prev_chunk_ = NULL; 491 prev_chunk_ = NULL;
467 next_chunk_ = NULL; 492 next_chunk_ = NULL;
468 } 493 }
469 494
470 495
471 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 496 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
472 Executability executable, 497 Executability executable,
473 Space* owner) { 498 Space* owner) {
(...skipping 185 matching lines...) Expand 10 before | Expand all | Expand 10 after
659 684
660 // ----------------------------------------------------------------------------- 685 // -----------------------------------------------------------------------------
661 // PagedSpace implementation 686 // PagedSpace implementation
662 687
663 PagedSpace::PagedSpace(Heap* heap, 688 PagedSpace::PagedSpace(Heap* heap,
664 intptr_t max_capacity, 689 intptr_t max_capacity,
665 AllocationSpace id, 690 AllocationSpace id,
666 Executability executable) 691 Executability executable)
667 : Space(heap, id, executable), 692 : Space(heap, id, executable),
668 free_list_(this), 693 free_list_(this),
694 last_visualized_top_(NULL),
669 was_swept_conservatively_(false), 695 was_swept_conservatively_(false),
670 first_unswept_page_(Page::FromAddress(NULL)), 696 first_unswept_page_(Page::FromAddress(NULL)),
671 last_unswept_page_(Page::FromAddress(NULL)) { 697 last_unswept_page_(Page::FromAddress(NULL)) {
672 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 698 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
673 * Page::kObjectAreaSize; 699 * Page::kObjectAreaSize;
674 accounting_stats_.Clear(); 700 accounting_stats_.Clear();
675 701
676 allocation_info_.top = NULL; 702 SetTop(NULL, NULL);
677 allocation_info_.limit = NULL;
678 703
679 anchor_.InitializeAsAnchor(this); 704 anchor_.InitializeAsAnchor(this);
680 } 705 }
681 706
682 707
683 bool PagedSpace::Setup() { 708 bool PagedSpace::Setup() {
684 return true; 709 return true;
685 } 710 }
686 711
687 712
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after
903 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 928 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
904 929
905 ASSERT(reservation_.IsReserved()); 930 ASSERT(reservation_.IsReserved());
906 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, 931 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
907 NOT_EXECUTABLE); 932 NOT_EXECUTABLE);
908 chunk_base_ = NULL; 933 chunk_base_ = NULL;
909 chunk_size_ = 0; 934 chunk_size_ = 0;
910 } 935 }
911 936
912 937
938 void NewSpace::VisualizeUnallocation(SemiSpace* semispace) {
939 for (NewSpacePage* page = semispace->first_page();
940 !page->is_anchor();
Vyacheslav Egorov (Chromium) 2011/09/28 15:21:11 ditto
941 page = page->next_page()) {
942 uint32_t address = reinterpret_cast<uintptr_t>(page->ObjectAreaStart());
943 uintptr_t end = reinterpret_cast<uintptr_t>(page->ObjectAreaEnd());
944 for (HeapVisualizer* vis = heap()->visualizer();
945 vis != NULL;
946 vis = vis->next()) {
947 vis->ConstantAllocation(address, end - address, 255);
948 }
949 }
950 }
951
952
953 void NewSpace::VisualizeTop() {
954 if (last_visualized_top_ != NULL) {
955 uintptr_t last = reinterpret_cast<uintptr_t>(last_visualized_top_);
956 uintptr_t next = reinterpret_cast<uintptr_t>(allocation_info_.top);
957 for (HeapVisualizer* vis = heap()->visualizer();
958 vis != NULL;
959 vis = vis->next()) {
960 int pix = vis->pixel_size_log_2();
961 if (next >> pix != last >> pix) {
962 vis->ConstantAllocation(
963 last, ((next >> pix) - (last >> pix)) << pix, 0);
964 }
965 }
966 }
967 last_visualized_top_ = allocation_info_.top;
968 }
969
970
913 void NewSpace::Flip() { 971 void NewSpace::Flip() {
972 VisualizeUnallocation(&to_space_);
914 SemiSpace::Swap(&from_space_, &to_space_); 973 SemiSpace::Swap(&from_space_, &to_space_);
915 } 974 }
916 975
917 976
918 void NewSpace::Grow() { 977 void NewSpace::Grow() {
919 // Double the semispace size but only up to maximum capacity. 978 // Double the semispace size but only up to maximum capacity.
920 ASSERT(Capacity() < MaximumCapacity()); 979 ASSERT(Capacity() < MaximumCapacity());
921 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity())); 980 int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
922 if (to_space_.GrowTo(new_capacity)) { 981 if (to_space_.GrowTo(new_capacity)) {
923 // Only grow from space if we managed to grow to-space. 982 // Only grow from space if we managed to grow to-space.
(...skipping 30 matching lines...) Expand all
954 } 1013 }
955 allocation_info_.limit = to_space_.page_high(); 1014 allocation_info_.limit = to_space_.page_high();
956 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1015 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
957 } 1016 }
958 1017
959 1018
960 void NewSpace::UpdateAllocationInfo() { 1019 void NewSpace::UpdateAllocationInfo() {
961 allocation_info_.top = to_space_.page_low(); 1020 allocation_info_.top = to_space_.page_low();
962 allocation_info_.limit = to_space_.page_high(); 1021 allocation_info_.limit = to_space_.page_high();
963 1022
1023 last_visualized_top_ = allocation_info_.top;
1024
964 // Lower limit during incremental marking. 1025 // Lower limit during incremental marking.
965 if (heap()->incremental_marking()->IsMarking() && 1026 if (heap()->incremental_marking()->IsMarking() &&
966 inline_allocation_limit_step() != 0) { 1027 inline_allocation_limit_step() != 0) {
967 Address new_limit = 1028 Address new_limit =
968 allocation_info_.top + inline_allocation_limit_step(); 1029 allocation_info_.top + inline_allocation_limit_step();
969 allocation_info_.limit = Min(new_limit, allocation_info_.limit); 1030 allocation_info_.limit = Min(new_limit, allocation_info_.limit);
970 } 1031 }
971 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1032 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
972 } 1033 }
973 1034
(...skipping 21 matching lines...) Expand all
995 // from happening (all such allocations should go directly to LOSpace). 1056 // from happening (all such allocations should go directly to LOSpace).
996 return false; 1057 return false;
997 } 1058 }
998 if (!to_space_.AdvancePage()) { 1059 if (!to_space_.AdvancePage()) {
999 // Failed to get a new page in to-space. 1060 // Failed to get a new page in to-space.
1000 return false; 1061 return false;
1001 } 1062 }
1002 // Clear remainder of current page. 1063 // Clear remainder of current page.
1003 int remaining_in_page = 1064 int remaining_in_page =
1004 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top); 1065 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
1066 VisualizeTop();
1005 heap()->CreateFillerObjectAt(top, remaining_in_page); 1067 heap()->CreateFillerObjectAt(top, remaining_in_page);
1006 pages_used_++; 1068 pages_used_++;
1007 UpdateAllocationInfo(); 1069 UpdateAllocationInfo();
1008 return true; 1070 return true;
1009 } 1071 }
1010 1072
1011 1073
1012 #ifdef DEBUG 1074 #ifdef DEBUG
1013 // We do not use the SemiSpaceIterator because verification doesn't assume 1075 // We do not use the SemiSpaceIterator because verification doesn't assume
1014 // that it works (it depends on the invariants we are checking). 1076 // that it works (it depends on the invariants we are checking).
(...skipping 803 matching lines...) Expand 10 before | Expand all | Expand 10 after
1818 } else { 1880 } else {
1819 // TODO(gc) Try not freeing linear allocation region when bytes_left 1881 // TODO(gc) Try not freeing linear allocation region when bytes_left
1820 // are zero. 1882 // are zero.
1821 owner_->SetTop(NULL, NULL); 1883 owner_->SetTop(NULL, NULL);
1822 } 1884 }
1823 1885
1824 return new_node; 1886 return new_node;
1825 } 1887 }
1826 1888
1827 1889
1890 void PagedSpace::VisualizeTopChange(Address new_top) {
1891 if (heap()->has_visualizer()) {
1892 if (last_visualized_top_ == NULL) {
1893 last_visualized_top_ = new_top;
1894 return;
1895 }
1896 uintptr_t last = reinterpret_cast<uintptr_t>(last_visualized_top_);
1897 uintptr_t next = reinterpret_cast<uintptr_t>(allocation_info_.top);
1898 ASSERT(next >= last);
1899 if (next > last) {
1900 for (HeapVisualizer* vis = heap()->visualizer();
1901 vis != NULL;
1902 vis = vis->next()) {
1903 int bits = vis->pixel_size_log_2();
1904 uintptr_t last_pixel = last >> bits;
1905 uintptr_t next_pixel = next >> bits;
1906 if ((last_pixel << bits) != last) {
1907 if (next_pixel == last_pixel) {
1908 vis->ChangeAllocation(last, next - last);
1909 continue;
1910 }
1911 last_pixel++;
1912 vis->ChangeAllocation(last, (last_pixel << bits) - last);
1913 last = last_pixel << bits;
1914 }
1915 if (next_pixel > last_pixel) {
1916 vis->ConstantAllocation(last_pixel << bits,
1917 (next_pixel - last_pixel) << bits,
1918 0);
1919 }
1920 if (last_pixel << bits != last) {
1921 vis->ChangeAllocation(last, (last_pixel << bits) - last);
1922 }
1923 }
1924 }
1925 }
1926 last_visualized_top_ = new_top;
1927 }
1928
1929
1828 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) { 1930 static intptr_t CountFreeListItemsInList(FreeListNode* n, Page* p) {
1829 intptr_t sum = 0; 1931 intptr_t sum = 0;
1830 while (n != NULL) { 1932 while (n != NULL) {
1831 if (Page::FromAddress(n->address()) == p) { 1933 if (Page::FromAddress(n->address()) == p) {
1832 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n); 1934 FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
1833 sum += free_space->Size(); 1935 sum += free_space->Size();
1834 } 1936 }
1835 n = n->next(); 1937 n = n->next();
1836 } 1938 }
1837 return sum; 1939 return sum;
(...skipping 13 matching lines...) Expand all
1851 while (cur != NULL) { 1953 while (cur != NULL) {
1852 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map()); 1954 ASSERT(cur->map() == HEAP->raw_unchecked_free_space_map());
1853 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur); 1955 FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
1854 sum += cur_as_free_space->Size(); 1956 sum += cur_as_free_space->Size();
1855 cur = cur->next(); 1957 cur = cur->next();
1856 } 1958 }
1857 return sum; 1959 return sum;
1858 } 1960 }
1859 1961
1860 1962
1963 // This can take a very long time because it is linear in the number of entries
1964 // on the free list, so it should not be called if FreeListLength returns
1965 // kVeryLongFreeList.
1966 intptr_t FreeList::SumFreeLists() {
1967 intptr_t sum = SumFreeList(small_list_);
1968 sum += SumFreeList(medium_list_);
1969 sum += SumFreeList(large_list_);
1970 sum += SumFreeList(huge_list_);
1971 return sum;
1972 }
1973 #endif
1974
1975
1861 static const int kVeryLongFreeList = 500; 1976 static const int kVeryLongFreeList = 500;
1862 1977
1863 1978
1864 int FreeList::FreeListLength(FreeListNode* cur) { 1979 int FreeList::FreeListLength(FreeListNode* cur) {
1865 int length = 0; 1980 int length = 0;
1866 while (cur != NULL) { 1981 while (cur != NULL) {
1867 length++; 1982 length++;
1868 cur = cur->next(); 1983 cur = cur->next();
1869 if (length == kVeryLongFreeList) return length; 1984 if (length == kVeryLongFreeList) return length;
1870 } 1985 }
1871 return length; 1986 return length;
1872 } 1987 }
1873 1988
1874 1989
1875 bool FreeList::IsVeryLong() { 1990 bool FreeList::IsVeryLong() {
1876 if (FreeListLength(small_list_) == kVeryLongFreeList) return true; 1991 if (FreeListLength(small_list_) == kVeryLongFreeList) return true;
1877 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true; 1992 if (FreeListLength(medium_list_) == kVeryLongFreeList) return true;
1878 if (FreeListLength(large_list_) == kVeryLongFreeList) return true; 1993 if (FreeListLength(large_list_) == kVeryLongFreeList) return true;
1879 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true; 1994 if (FreeListLength(huge_list_) == kVeryLongFreeList) return true;
1880 return false; 1995 return false;
1881 } 1996 }
1882 1997
1883 1998
1884 // This can take a very long time because it is linear in the number of entries
1885 // on the free list, so it should not be called if FreeListLength returns
1886 // kVeryLongFreeList.
1887 intptr_t FreeList::SumFreeLists() {
1888 intptr_t sum = SumFreeList(small_list_);
1889 sum += SumFreeList(medium_list_);
1890 sum += SumFreeList(large_list_);
1891 sum += SumFreeList(huge_list_);
1892 return sum;
1893 }
1894 #endif
1895
1896
1897 // ----------------------------------------------------------------------------- 1999 // -----------------------------------------------------------------------------
1898 // OldSpace implementation 2000 // OldSpace implementation
1899 2001
1900 bool NewSpace::ReserveSpace(int bytes) { 2002 bool NewSpace::ReserveSpace(int bytes) {
1901 // We can't reliably unpack a partial snapshot that needs more new space 2003 // We can't reliably unpack a partial snapshot that needs more new space
1902 // space than the minimum NewSpace size. 2004 // space than the minimum NewSpace size.
1903 ASSERT(bytes <= InitialCapacity()); 2005 ASSERT(bytes <= InitialCapacity());
1904 Address limit = allocation_info_.limit; 2006 Address limit = allocation_info_.limit;
1905 Address top = allocation_info_.top; 2007 Address top = allocation_info_.top;
1906 return limit - top >= bytes; 2008 return limit - top >= bytes;
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
2005 2107
2006 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { 2108 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2007 if (allocation_info_.top >= allocation_info_.limit) return; 2109 if (allocation_info_.top >= allocation_info_.limit) return;
2008 2110
2009 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) { 2111 if (Page::FromAddress(allocation_info_.top)->IsEvacuationCandidate()) {
2010 // Create filler object to keep page iterable if it was iterable. 2112 // Create filler object to keep page iterable if it was iterable.
2011 int remaining = 2113 int remaining =
2012 static_cast<int>(allocation_info_.limit - allocation_info_.top); 2114 static_cast<int>(allocation_info_.limit - allocation_info_.top);
2013 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); 2115 heap()->CreateFillerObjectAt(allocation_info_.top, remaining);
2014 2116
2015 allocation_info_.top = NULL; 2117 SetTop(NULL, NULL);
2016 allocation_info_.limit = NULL;
2017 } 2118 }
2018 } 2119 }
2019 2120
2020 2121
2021 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { 2122 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
2022 // Allocation in this space has failed. 2123 // Allocation in this space has failed.
2023 2124
2024 // Free list allocation failed and there is no next page. Fail if we have 2125 // Free list allocation failed and there is no next page. Fail if we have
2025 // hit the old generation size limit that should cause a garbage 2126 // hit the old generation size limit that should cause a garbage
2026 // collection. 2127 // collection.
(...skipping 486 matching lines...) Expand 10 before | Expand all | Expand 10 after
2513 object->ShortPrint(); 2614 object->ShortPrint();
2514 PrintF("\n"); 2615 PrintF("\n");
2515 } 2616 }
2516 printf(" --------------------------------------\n"); 2617 printf(" --------------------------------------\n");
2517 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 2618 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2518 } 2619 }
2519 2620
2520 #endif // DEBUG 2621 #endif // DEBUG
2521 2622
2522 } } // namespace v8::internal 2623 } } // namespace v8::internal
OLDNEW
« src/spaces.h ('K') | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698