Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1125)

Side by Side Diff: src/spaces.cc

Issue 3301008: [Isolates] Add heap pointer to all maps and use map->heap() more. (Closed)
Patch Set: even more Created 10 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 753 matching lines...) Expand 10 before | Expand all | Expand 10 after
764 } 764 }
765 765
766 return last_page; 766 return last_page;
767 } 767 }
768 768
769 769
770 770
771 // ----------------------------------------------------------------------------- 771 // -----------------------------------------------------------------------------
772 // PagedSpace implementation 772 // PagedSpace implementation
773 773
774 PagedSpace::PagedSpace(int max_capacity, 774 PagedSpace::PagedSpace(Heap* heap,
775 int max_capacity,
775 AllocationSpace id, 776 AllocationSpace id,
776 Executability executable) 777 Executability executable)
777 : Space(id, executable) { 778 : Space(heap, id, executable) {
778 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 779 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
779 * Page::kObjectAreaSize; 780 * Page::kObjectAreaSize;
780 accounting_stats_.Clear(); 781 accounting_stats_.Clear();
781 782
782 allocation_info_.top = NULL; 783 allocation_info_.top = NULL;
783 allocation_info_.limit = NULL; 784 allocation_info_.limit = NULL;
784 785
785 mc_forwarding_info_.top = NULL; 786 mc_forwarding_info_.top = NULL;
786 mc_forwarding_info_.limit = NULL; 787 mc_forwarding_info_.limit = NULL;
787 } 788 }
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
871 PageIterator it(this, PageIterator::ALL_PAGES); 872 PageIterator it(this, PageIterator::ALL_PAGES);
872 while (it.has_next()) { 873 while (it.has_next()) {
873 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); 874 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
874 } 875 }
875 } 876 }
876 877
877 878
878 Object* PagedSpace::FindObject(Address addr) { 879 Object* PagedSpace::FindObject(Address addr) {
879 // Note: this function can only be called before or after mark-compact GC 880 // Note: this function can only be called before or after mark-compact GC
880 // because it accesses map pointers. 881 // because it accesses map pointers.
881 ASSERT(!HEAP->mark_compact_collector()->in_use()); 882 ASSERT(!heap()->mark_compact_collector()->in_use());
882 883
883 if (!Contains(addr)) return Failure::Exception(); 884 if (!Contains(addr)) return Failure::Exception();
884 885
885 Page* p = Page::FromAddress(addr); 886 Page* p = Page::FromAddress(addr);
886 ASSERT(IsUsed(p)); 887 ASSERT(IsUsed(p));
887 Address cur = p->ObjectAreaStart(); 888 Address cur = p->ObjectAreaStart();
888 Address end = p->AllocationTop(); 889 Address end = p->AllocationTop();
889 while (cur < end) { 890 while (cur < end) {
890 HeapObject* obj = HeapObject::FromAddress(cur); 891 HeapObject* obj = HeapObject::FromAddress(cur);
891 Address next = cur + obj->Size(); 892 Address next = cur + obj->Size();
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
987 if (Capacity() == max_capacity_) return false; 988 if (Capacity() == max_capacity_) return false;
988 989
989 ASSERT(Capacity() < max_capacity_); 990 ASSERT(Capacity() < max_capacity_);
990 // Last page must be valid and its next page is invalid. 991 // Last page must be valid and its next page is invalid.
991 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid()); 992 ASSERT(last_page->is_valid() && !last_page->next_page()->is_valid());
992 993
993 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize; 994 int available_pages = (max_capacity_ - Capacity()) / Page::kObjectAreaSize;
994 if (available_pages <= 0) return false; 995 if (available_pages <= 0) return false;
995 996
996 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk); 997 int desired_pages = Min(available_pages, MemoryAllocator::kPagesPerChunk);
997 Page* p = Isolate::Current()->memory_allocator()->AllocatePages( 998 Page* p = heap()->isolate()->memory_allocator()->AllocatePages(
998 desired_pages, &desired_pages, this); 999 desired_pages, &desired_pages, this);
999 if (!p->is_valid()) return false; 1000 if (!p->is_valid()) return false;
1000 1001
1001 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize); 1002 accounting_stats_.ExpandSpace(desired_pages * Page::kObjectAreaSize);
1002 ASSERT(Capacity() <= max_capacity_); 1003 ASSERT(Capacity() <= max_capacity_);
1003 1004
1004 Isolate::Current()->memory_allocator()->SetNextPage(last_page, p); 1005 heap()->isolate()->memory_allocator()->SetNextPage(last_page, p);
1005 1006
1006 // Sequentially clear region marks of new pages and and cache the 1007 // Sequentially clear region marks of new pages and and cache the
1007 // new last page in the space. 1008 // new last page in the space.
1008 while (p->is_valid()) { 1009 while (p->is_valid()) {
1009 p->SetRegionMarks(Page::kAllRegionsCleanMarks); 1010 p->SetRegionMarks(Page::kAllRegionsCleanMarks);
1010 last_page_ = p; 1011 last_page_ = p;
1011 p = p->next_page(); 1012 p = p->next_page();
1012 } 1013 }
1013 1014
1014 return true; 1015 return true;
(...skipping 22 matching lines...) Expand all
1037 Page* top_page = AllocationTopPage(); 1038 Page* top_page = AllocationTopPage();
1038 ASSERT(top_page->is_valid()); 1039 ASSERT(top_page->is_valid());
1039 1040
1040 // Count the number of pages we would like to free. 1041 // Count the number of pages we would like to free.
1041 int pages_to_free = 0; 1042 int pages_to_free = 0;
1042 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { 1043 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1043 pages_to_free++; 1044 pages_to_free++;
1044 } 1045 }
1045 1046
1046 // Free pages after top_page. 1047 // Free pages after top_page.
1047 Page* p = Isolate::Current()->memory_allocator()-> 1048 Page* p = heap()->isolate()->memory_allocator()->
1048 FreePages(top_page->next_page()); 1049 FreePages(top_page->next_page());
1049 Isolate::Current()->memory_allocator()->SetNextPage(top_page, p); 1050 heap()->isolate()->memory_allocator()->SetNextPage(top_page, p);
1050 1051
1051 // Find out how many pages we failed to free and update last_page_. 1052 // Find out how many pages we failed to free and update last_page_.
1052 // Please note pages can only be freed in whole chunks. 1053 // Please note pages can only be freed in whole chunks.
1053 last_page_ = top_page; 1054 last_page_ = top_page;
1054 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) { 1055 for (Page* p = top_page->next_page(); p->is_valid(); p = p->next_page()) {
1055 pages_to_free--; 1056 pages_to_free--;
1056 last_page_ = p; 1057 last_page_ = p;
1057 } 1058 }
1058 1059
1059 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize); 1060 accounting_stats_.ShrinkSpace(pages_to_free * Page::kObjectAreaSize);
1060 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize); 1061 ASSERT(Capacity() == CountTotalPages() * Page::kObjectAreaSize);
1061 } 1062 }
1062 1063
1063 1064
1064 bool PagedSpace::EnsureCapacity(int capacity) { 1065 bool PagedSpace::EnsureCapacity(int capacity) {
1065 if (Capacity() >= capacity) return true; 1066 if (Capacity() >= capacity) return true;
1066 1067
1067 // Start from the allocation top and loop to the last page in the space. 1068 // Start from the allocation top and loop to the last page in the space.
1068 Page* last_page = AllocationTopPage(); 1069 Page* last_page = AllocationTopPage();
1069 Page* next_page = last_page->next_page(); 1070 Page* next_page = last_page->next_page();
1070 while (next_page->is_valid()) { 1071 while (next_page->is_valid()) {
1071 last_page = Isolate::Current()->memory_allocator()-> 1072 last_page = heap()->isolate()->memory_allocator()->
1072 FindLastPageInSameChunk(next_page); 1073 FindLastPageInSameChunk(next_page);
1073 next_page = last_page->next_page(); 1074 next_page = last_page->next_page();
1074 } 1075 }
1075 1076
1076 // Expand the space until it has the required capacity or expansion fails. 1077 // Expand the space until it has the required capacity or expansion fails.
1077 do { 1078 do {
1078 if (!Expand(last_page)) return false; 1079 if (!Expand(last_page)) return false;
1079 ASSERT(last_page->next_page()->is_valid()); 1080 ASSERT(last_page->next_page()->is_valid());
1080 last_page = 1081 last_page =
1081 Isolate::Current()->memory_allocator()->FindLastPageInSameChunk( 1082 heap()->isolate()->memory_allocator()->FindLastPageInSameChunk(
1082 last_page->next_page()); 1083 last_page->next_page());
1083 } while (Capacity() < capacity); 1084 } while (Capacity() < capacity);
1084 1085
1085 return true; 1086 return true;
1086 } 1087 }
1087 1088
1088 1089
1089 #ifdef DEBUG 1090 #ifdef DEBUG
1090 void PagedSpace::Print() { } 1091 void PagedSpace::Print() { }
1091 #endif 1092 #endif
1092 1093
1093 1094
1094 #ifdef DEBUG 1095 #ifdef DEBUG
1095 // We do not assume that the PageIterator works, because it depends on the 1096 // We do not assume that the PageIterator works, because it depends on the
1096 // invariants we are checking during verification. 1097 // invariants we are checking during verification.
1097 void PagedSpace::Verify(ObjectVisitor* visitor) { 1098 void PagedSpace::Verify(ObjectVisitor* visitor) {
1098 // The allocation pointer should be valid, and it should be in a page in the 1099 // The allocation pointer should be valid, and it should be in a page in the
1099 // space. 1100 // space.
1100 ASSERT(allocation_info_.VerifyPagedAllocation()); 1101 ASSERT(allocation_info_.VerifyPagedAllocation());
1101 Page* top_page = Page::FromAllocationTop(allocation_info_.top); 1102 Page* top_page = Page::FromAllocationTop(allocation_info_.top);
1102 ASSERT(Isolate::Current()->memory_allocator()->IsPageInSpace(top_page, this)); 1103 ASSERT(heap()->isolate()->memory_allocator()->IsPageInSpace(top_page, this));
1103 1104
1104 // Loop over all the pages. 1105 // Loop over all the pages.
1105 bool above_allocation_top = false; 1106 bool above_allocation_top = false;
1106 Page* current_page = first_page_; 1107 Page* current_page = first_page_;
1107 while (current_page->is_valid()) { 1108 while (current_page->is_valid()) {
1108 if (above_allocation_top) { 1109 if (above_allocation_top) {
1109 // We don't care what's above the allocation top. 1110 // We don't care what's above the allocation top.
1110 } else { 1111 } else {
1111 Address top = current_page->AllocationTop(); 1112 Address top = current_page->AllocationTop();
1112 if (current_page == top_page) { 1113 if (current_page == top_page) {
1113 ASSERT(top == allocation_info_.top); 1114 ASSERT(top == allocation_info_.top);
1114 // The next page will be above the allocation top. 1115 // The next page will be above the allocation top.
1115 above_allocation_top = true; 1116 above_allocation_top = true;
1116 } 1117 }
1117 1118
1118 // It should be packed with objects from the bottom to the top. 1119 // It should be packed with objects from the bottom to the top.
1119 Address current = current_page->ObjectAreaStart(); 1120 Address current = current_page->ObjectAreaStart();
1120 while (current < top) { 1121 while (current < top) {
1121 HeapObject* object = HeapObject::FromAddress(current); 1122 HeapObject* object = HeapObject::FromAddress(current);
1122 1123
1123 // The first word should be a map, and we expect all map pointers to 1124 // The first word should be a map, and we expect all map pointers to
1124 // be in map space. 1125 // be in map space.
1125 Map* map = object->map(); 1126 Map* map = object->map();
1126 ASSERT(map->IsMap()); 1127 ASSERT(map->IsMap());
1127 ASSERT(HEAP->map_space()->Contains(map)); 1128 ASSERT(heap()->map_space()->Contains(map));
1128 1129
1129 // Perform space-specific object verification. 1130 // Perform space-specific object verification.
1130 VerifyObject(object); 1131 VerifyObject(object);
1131 1132
1132 // The object itself should look OK. 1133 // The object itself should look OK.
1133 object->Verify(); 1134 object->Verify();
1134 1135
1135 // All the interior pointers should be contained in the heap and 1136 // All the interior pointers should be contained in the heap and
1136 // have page regions covering intergenerational references should be 1137 // have page regions covering intergenerational references should be
1137 // marked dirty. 1138 // marked dirty.
(...skipping 15 matching lines...) Expand all
1153 1154
1154 // ----------------------------------------------------------------------------- 1155 // -----------------------------------------------------------------------------
1155 // NewSpace implementation 1156 // NewSpace implementation
1156 1157
1157 1158
1158 bool NewSpace::Setup(Address start, int size) { 1159 bool NewSpace::Setup(Address start, int size) {
1159 // Setup new space based on the preallocated memory block defined by 1160 // Setup new space based on the preallocated memory block defined by
1160 // start and size. The provided space is divided into two semi-spaces. 1161 // start and size. The provided space is divided into two semi-spaces.
1161 // To support fast containment testing in the new space, the size of 1162 // To support fast containment testing in the new space, the size of
1162 // this chunk must be a power of two and it must be aligned to its size. 1163 // this chunk must be a power of two and it must be aligned to its size.
1163 int initial_semispace_capacity = HEAP->InitialSemiSpaceSize(); 1164 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1164 int maximum_semispace_capacity = HEAP->MaxSemiSpaceSize(); 1165 int maximum_semispace_capacity = heap()->MaxSemiSpaceSize();
1165 1166
1166 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); 1167 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1167 ASSERT(IsPowerOf2(maximum_semispace_capacity)); 1168 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1168 1169
1169 // Allocate and setup the histogram arrays if necessary. 1170 // Allocate and setup the histogram arrays if necessary.
1170 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 1171 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
1171 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 1172 allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1172 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1); 1173 promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
1173 1174
1174 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \ 1175 #define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
1175 promoted_histogram_[name].set_name(#name); 1176 promoted_histogram_[name].set_name(#name);
1176 INSTANCE_TYPE_LIST(SET_NAME) 1177 INSTANCE_TYPE_LIST(SET_NAME)
1177 #undef SET_NAME 1178 #undef SET_NAME
1178 #endif 1179 #endif
1179 1180
1180 ASSERT(size == 2 * HEAP->ReservedSemiSpaceSize()); 1181 ASSERT(size == 2 * heap()->ReservedSemiSpaceSize());
1181 ASSERT(IsAddressAligned(start, size, 0)); 1182 ASSERT(IsAddressAligned(start, size, 0));
1182 1183
1183 if (!to_space_.Setup(start, 1184 if (!to_space_.Setup(start,
1184 initial_semispace_capacity, 1185 initial_semispace_capacity,
1185 maximum_semispace_capacity)) { 1186 maximum_semispace_capacity)) {
1186 return false; 1187 return false;
1187 } 1188 }
1188 if (!from_space_.Setup(start + maximum_semispace_capacity, 1189 if (!from_space_.Setup(start + maximum_semispace_capacity,
1189 initial_semispace_capacity, 1190 initial_semispace_capacity,
1190 maximum_semispace_capacity)) { 1191 maximum_semispace_capacity)) {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1225 mc_forwarding_info_.limit = NULL; 1226 mc_forwarding_info_.limit = NULL;
1226 1227
1227 to_space_.TearDown(); 1228 to_space_.TearDown();
1228 from_space_.TearDown(); 1229 from_space_.TearDown();
1229 } 1230 }
1230 1231
1231 1232
1232 #ifdef ENABLE_HEAP_PROTECTION 1233 #ifdef ENABLE_HEAP_PROTECTION
1233 1234
1234 void NewSpace::Protect() { 1235 void NewSpace::Protect() {
1235 Isolate::Current()->memory_allocator()->Protect(ToSpaceLow(), Capacity()); 1236 heap()->isolate()->memory_allocator()->Protect(ToSpaceLow(), Capacity());
1236 Isolate::Current()->memory_allocator()->Protect(FromSpaceLow(), Capacity()); 1237 heap()->isolate()->memory_allocator()->Protect(FromSpaceLow(), Capacity());
1237 } 1238 }
1238 1239
1239 1240
1240 void NewSpace::Unprotect() { 1241 void NewSpace::Unprotect() {
1241 Isolate::Current()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(), 1242 heap()->isolate()->memory_allocator()->Unprotect(ToSpaceLow(), Capacity(),
1242 to_space_.executable()); 1243 to_space_.executable());
1243 Isolate::Current()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(), 1244 heap()->isolate()->memory_allocator()->Unprotect(FromSpaceLow(), Capacity(),
1244 from_space_.executable()); 1245 from_space_.executable());
1245 } 1246 }
1246 1247
1247 #endif 1248 #endif
1248 1249
1249 1250
1250 void NewSpace::Flip() { 1251 void NewSpace::Flip() {
1251 SemiSpace tmp = from_space_; 1252 SemiSpace tmp = from_space_;
1252 from_space_ = to_space_; 1253 from_space_ = to_space_;
1253 to_space_ = tmp; 1254 to_space_ = tmp;
1254 } 1255 }
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
1328 // There should be objects packed in from the low address up to the 1329 // There should be objects packed in from the low address up to the
1329 // allocation pointer. 1330 // allocation pointer.
1330 Address current = to_space_.low(); 1331 Address current = to_space_.low();
1331 while (current < top()) { 1332 while (current < top()) {
1332 HeapObject* object = HeapObject::FromAddress(current); 1333 HeapObject* object = HeapObject::FromAddress(current);
1333 1334
1334 // The first word should be a map, and we expect all map pointers to 1335 // The first word should be a map, and we expect all map pointers to
1335 // be in map space. 1336 // be in map space.
1336 Map* map = object->map(); 1337 Map* map = object->map();
1337 ASSERT(map->IsMap()); 1338 ASSERT(map->IsMap());
1338 ASSERT(HEAP->map_space()->Contains(map)); 1339 ASSERT(heap()->map_space()->Contains(map));
1339 1340
1340 // The object should not be code or a map. 1341 // The object should not be code or a map.
1341 ASSERT(!object->IsMap()); 1342 ASSERT(!object->IsMap());
1342 ASSERT(!object->IsCode()); 1343 ASSERT(!object->IsCode());
1343 1344
1344 // The object itself should look OK. 1345 // The object itself should look OK.
1345 object->Verify(); 1346 object->Verify();
1346 1347
1347 // All the interior pointers should be contained in the heap. 1348 // All the interior pointers should be contained in the heap.
1348 VerifyPointersVisitor visitor; 1349 VerifyPointersVisitor visitor;
1349 int size = object->Size(); 1350 int size = object->Size();
1350 object->IterateBody(map->instance_type(), size, &visitor); 1351 object->IterateBody(map->instance_type(), size, &visitor);
1351 1352
1352 current += size; 1353 current += size;
1353 } 1354 }
1354 1355
1355 // The allocation pointer should not be in the middle of an object. 1356 // The allocation pointer should not be in the middle of an object.
1356 ASSERT(current == top()); 1357 ASSERT(current == top());
1357 } 1358 }
1358 #endif 1359 #endif
1359 1360
1360 1361
1361 bool SemiSpace::Commit() { 1362 bool SemiSpace::Commit() {
1362 ASSERT(!is_committed()); 1363 ASSERT(!is_committed());
1363 if (!Isolate::Current()->memory_allocator()->CommitBlock( 1364 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1364 start_, capacity_, executable())) { 1365 start_, capacity_, executable())) {
1365 return false; 1366 return false;
1366 } 1367 }
1367 committed_ = true; 1368 committed_ = true;
1368 return true; 1369 return true;
1369 } 1370 }
1370 1371
1371 1372
1372 bool SemiSpace::Uncommit() { 1373 bool SemiSpace::Uncommit() {
1373 ASSERT(is_committed()); 1374 ASSERT(is_committed());
1374 if (!Isolate::Current()->memory_allocator()->UncommitBlock( 1375 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1375 start_, capacity_)) { 1376 start_, capacity_)) {
1376 return false; 1377 return false;
1377 } 1378 }
1378 committed_ = false; 1379 committed_ = false;
1379 return true; 1380 return true;
1380 } 1381 }
1381 1382
1382 1383
1383 // ----------------------------------------------------------------------------- 1384 // -----------------------------------------------------------------------------
1384 // SemiSpace implementation 1385 // SemiSpace implementation
(...skipping 26 matching lines...) Expand all
1411 start_ = NULL; 1412 start_ = NULL;
1412 capacity_ = 0; 1413 capacity_ = 0;
1413 } 1414 }
1414 1415
1415 1416
1416 bool SemiSpace::Grow() { 1417 bool SemiSpace::Grow() {
1417 // Double the semispace size but only up to maximum capacity. 1418 // Double the semispace size but only up to maximum capacity.
1418 int maximum_extra = maximum_capacity_ - capacity_; 1419 int maximum_extra = maximum_capacity_ - capacity_;
1419 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), 1420 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())),
1420 maximum_extra); 1421 maximum_extra);
1421 if (!Isolate::Current()->memory_allocator()->CommitBlock( 1422 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1422 high(), extra, executable())) { 1423 high(), extra, executable())) {
1423 return false; 1424 return false;
1424 } 1425 }
1425 capacity_ += extra; 1426 capacity_ += extra;
1426 return true; 1427 return true;
1427 } 1428 }
1428 1429
1429 1430
1430 bool SemiSpace::GrowTo(int new_capacity) { 1431 bool SemiSpace::GrowTo(int new_capacity) {
1431 ASSERT(new_capacity <= maximum_capacity_); 1432 ASSERT(new_capacity <= maximum_capacity_);
1432 ASSERT(new_capacity > capacity_); 1433 ASSERT(new_capacity > capacity_);
1433 size_t delta = new_capacity - capacity_; 1434 size_t delta = new_capacity - capacity_;
1434 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1435 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1435 if (!Isolate::Current()->memory_allocator()->CommitBlock( 1436 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1436 high(), delta, executable())) { 1437 high(), delta, executable())) {
1437 return false; 1438 return false;
1438 } 1439 }
1439 capacity_ = new_capacity; 1440 capacity_ = new_capacity;
1440 return true; 1441 return true;
1441 } 1442 }
1442 1443
1443 1444
1444 bool SemiSpace::ShrinkTo(int new_capacity) { 1445 bool SemiSpace::ShrinkTo(int new_capacity) {
1445 ASSERT(new_capacity >= initial_capacity_); 1446 ASSERT(new_capacity >= initial_capacity_);
1446 ASSERT(new_capacity < capacity_); 1447 ASSERT(new_capacity < capacity_);
1447 size_t delta = capacity_ - new_capacity; 1448 size_t delta = capacity_ - new_capacity;
1448 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1449 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1449 if (!Isolate::Current()->memory_allocator()->UncommitBlock( 1450 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1450 high() - delta, delta)) { 1451 high() - delta, delta)) {
1451 return false; 1452 return false;
1452 } 1453 }
1453 capacity_ = new_capacity; 1454 capacity_ = new_capacity;
1454 return true; 1455 return true;
1455 } 1456 }
1456 1457
1457 1458
1458 #ifdef DEBUG 1459 #ifdef DEBUG
1459 void SemiSpace::Print() { } 1460 void SemiSpace::Print() { }
(...skipping 569 matching lines...) Expand 10 before | Expand all | Expand 10 after
2029 } 2030 }
2030 2031
2031 Page* first = NULL; 2032 Page* first = NULL;
2032 2033
2033 // Remove pages from the list. 2034 // Remove pages from the list.
2034 if (prev == NULL) { 2035 if (prev == NULL) {
2035 first = first_page_; 2036 first = first_page_;
2036 first_page_ = last->next_page(); 2037 first_page_ = last->next_page();
2037 } else { 2038 } else {
2038 first = prev->next_page(); 2039 first = prev->next_page();
2039 Isolate::Current()->memory_allocator()->SetNextPage( 2040 heap()->isolate()->memory_allocator()->SetNextPage(
2040 prev, last->next_page()); 2041 prev, last->next_page());
2041 } 2042 }
2042 2043
2043 // Attach it after the last page. 2044 // Attach it after the last page.
2044 Isolate::Current()->memory_allocator()->SetNextPage(last_page_, first); 2045 heap()->isolate()->memory_allocator()->SetNextPage(last_page_, first);
2045 last_page_ = last; 2046 last_page_ = last;
2046 Isolate::Current()->memory_allocator()->SetNextPage(last, NULL); 2047 heap()->isolate()->memory_allocator()->SetNextPage(last, NULL);
2047 2048
2048 // Clean them up. 2049 // Clean them up.
2049 do { 2050 do {
2050 first->InvalidateWatermark(true); 2051 first->InvalidateWatermark(true);
2051 first->SetAllocationWatermark(first->ObjectAreaStart()); 2052 first->SetAllocationWatermark(first->ObjectAreaStart());
2052 first->SetCachedAllocationWatermark(first->ObjectAreaStart()); 2053 first->SetCachedAllocationWatermark(first->ObjectAreaStart());
2053 first->SetRegionMarks(Page::kAllRegionsCleanMarks); 2054 first->SetRegionMarks(Page::kAllRegionsCleanMarks);
2054 first = first->next_page(); 2055 first = first->next_page();
2055 } while (first != NULL); 2056 } while (first != NULL);
2056 2057
(...skipping 18 matching lines...) Expand all
2075 if (p == last_in_use) { 2076 if (p == last_in_use) {
2076 // We passed a page containing allocation top. All consequent 2077 // We passed a page containing allocation top. All consequent
2077 // pages are not used. 2078 // pages are not used.
2078 in_use = false; 2079 in_use = false;
2079 } 2080 }
2080 } 2081 }
2081 2082
2082 if (page_list_is_chunk_ordered_) return; 2083 if (page_list_is_chunk_ordered_) return;
2083 2084
2084 Page* new_last_in_use = Page::FromAddress(NULL); 2085 Page* new_last_in_use = Page::FromAddress(NULL);
2085 Isolate::Current()->memory_allocator()->RelinkPageListInChunkOrder( 2086 heap()->isolate()->memory_allocator()->RelinkPageListInChunkOrder(
2086 this, &first_page_, &last_page_, &new_last_in_use); 2087 this, &first_page_, &last_page_, &new_last_in_use);
2087 ASSERT(new_last_in_use->is_valid()); 2088 ASSERT(new_last_in_use->is_valid());
2088 2089
2089 if (new_last_in_use != last_in_use) { 2090 if (new_last_in_use != last_in_use) {
2090 // Current allocation top points to a page which is now in the middle 2091 // Current allocation top points to a page which is now in the middle
2091 // of page list. We should move allocation top forward to the new last 2092 // of page list. We should move allocation top forward to the new last
2092 // used page so various object iterators will continue to work properly. 2093 // used page so various object iterators will continue to work properly.
2093 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) - 2094 int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
2094 last_in_use->AllocationTop()); 2095 last_in_use->AllocationTop());
2095 2096
2096 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop()); 2097 last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
2097 if (size_in_bytes > 0) { 2098 if (size_in_bytes > 0) {
2098 Address start = last_in_use->AllocationTop(); 2099 Address start = last_in_use->AllocationTop();
2099 if (deallocate_blocks) { 2100 if (deallocate_blocks) {
2100 accounting_stats_.AllocateBytes(size_in_bytes); 2101 accounting_stats_.AllocateBytes(size_in_bytes);
2101 DeallocateBlock(start, size_in_bytes, add_to_freelist); 2102 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2102 } else { 2103 } else {
2103 HEAP->CreateFillerObjectAt(start, size_in_bytes); 2104 heap()->CreateFillerObjectAt(start, size_in_bytes);
2104 } 2105 }
2105 } 2106 }
2106 2107
2107 // New last in use page was in the middle of the list before 2108 // New last in use page was in the middle of the list before
2108 // sorting so it full. 2109 // sorting so it full.
2109 SetTop(new_last_in_use->AllocationTop()); 2110 SetTop(new_last_in_use->AllocationTop());
2110 2111
2111 ASSERT(AllocationTopPage() == new_last_in_use); 2112 ASSERT(AllocationTopPage() == new_last_in_use);
2112 ASSERT(AllocationTopPage()->WasInUseBeforeMC()); 2113 ASSERT(AllocationTopPage()->WasInUseBeforeMC());
2113 } 2114 }
2114 2115
2115 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE); 2116 PageIterator pages_in_use_iterator(this, PageIterator::PAGES_IN_USE);
2116 while (pages_in_use_iterator.has_next()) { 2117 while (pages_in_use_iterator.has_next()) {
2117 Page* p = pages_in_use_iterator.next(); 2118 Page* p = pages_in_use_iterator.next();
2118 if (!p->WasInUseBeforeMC()) { 2119 if (!p->WasInUseBeforeMC()) {
2119 // Empty page is in the middle of a sequence of used pages. 2120 // Empty page is in the middle of a sequence of used pages.
2120 // Allocate it as a whole and deallocate immediately. 2121 // Allocate it as a whole and deallocate immediately.
2121 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) - 2122 int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
2122 p->ObjectAreaStart()); 2123 p->ObjectAreaStart());
2123 2124
2124 p->SetAllocationWatermark(p->ObjectAreaStart()); 2125 p->SetAllocationWatermark(p->ObjectAreaStart());
2125 Address start = p->ObjectAreaStart(); 2126 Address start = p->ObjectAreaStart();
2126 if (deallocate_blocks) { 2127 if (deallocate_blocks) {
2127 accounting_stats_.AllocateBytes(size_in_bytes); 2128 accounting_stats_.AllocateBytes(size_in_bytes);
2128 DeallocateBlock(start, size_in_bytes, add_to_freelist); 2129 DeallocateBlock(start, size_in_bytes, add_to_freelist);
2129 } else { 2130 } else {
2130 HEAP->CreateFillerObjectAt(start, size_in_bytes); 2131 heap()->CreateFillerObjectAt(start, size_in_bytes);
2131 } 2132 }
2132 } 2133 }
2133 } 2134 }
2134 2135
2135 page_list_is_chunk_ordered_ = true; 2136 page_list_is_chunk_ordered_ = true;
2136 } 2137 }
2137 2138
2138 2139
2139 void PagedSpace::PrepareForMarkCompact(bool will_compact) { 2140 void PagedSpace::PrepareForMarkCompact(bool will_compact) {
2140 if (will_compact) { 2141 if (will_compact) {
2141 RelinkPageListInChunkOrder(false); 2142 RelinkPageListInChunkOrder(false);
2142 } 2143 }
2143 } 2144 }
2144 2145
2145 2146
2146 bool PagedSpace::ReserveSpace(int bytes) { 2147 bool PagedSpace::ReserveSpace(int bytes) {
2147 Address limit = allocation_info_.limit; 2148 Address limit = allocation_info_.limit;
2148 Address top = allocation_info_.top; 2149 Address top = allocation_info_.top;
2149 if (limit - top >= bytes) return true; 2150 if (limit - top >= bytes) return true;
2150 2151
2151 // There wasn't enough space in the current page. Lets put the rest 2152 // There wasn't enough space in the current page. Lets put the rest
2152 // of the page on the free list and start a fresh page. 2153 // of the page on the free list and start a fresh page.
2153 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_)); 2154 PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
2154 2155
2155 Page* reserved_page = TopPageOf(allocation_info_); 2156 Page* reserved_page = TopPageOf(allocation_info_);
2156 int bytes_left_to_reserve = bytes; 2157 int bytes_left_to_reserve = bytes;
2157 while (bytes_left_to_reserve > 0) { 2158 while (bytes_left_to_reserve > 0) {
2158 if (!reserved_page->next_page()->is_valid()) { 2159 if (!reserved_page->next_page()->is_valid()) {
2159 if (HEAP->OldGenerationAllocationLimitReached()) return false; 2160 if (heap()->OldGenerationAllocationLimitReached()) return false;
2160 Expand(reserved_page); 2161 Expand(reserved_page);
2161 } 2162 }
2162 bytes_left_to_reserve -= Page::kPageSize; 2163 bytes_left_to_reserve -= Page::kPageSize;
2163 reserved_page = reserved_page->next_page(); 2164 reserved_page = reserved_page->next_page();
2164 if (!reserved_page->is_valid()) return false; 2165 if (!reserved_page->is_valid()) return false;
2165 } 2166 }
2166 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid()); 2167 ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
2167 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true); 2168 TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
2168 SetAllocationInfo(&allocation_info_, 2169 SetAllocationInfo(&allocation_info_,
2169 TopPageOf(allocation_info_)->next_page()); 2170 TopPageOf(allocation_info_)->next_page());
2170 return true; 2171 return true;
2171 } 2172 }
2172 2173
2173 2174
2174 // You have to call this last, since the implementation from PagedSpace 2175 // You have to call this last, since the implementation from PagedSpace
2175 // doesn't know that memory was 'promised' to large object space. 2176 // doesn't know that memory was 'promised' to large object space.
2176 bool LargeObjectSpace::ReserveSpace(int bytes) { 2177 bool LargeObjectSpace::ReserveSpace(int bytes) {
2177 return HEAP->OldGenerationSpaceAvailable() >= bytes; 2178 return heap()->OldGenerationSpaceAvailable() >= bytes;
2178 } 2179 }
2179 2180
2180 2181
2181 // Slow case for normal allocation. Try in order: (1) allocate in the next 2182 // Slow case for normal allocation. Try in order: (1) allocate in the next
2182 // page in the space, (2) allocate off the space's free list, (3) expand the 2183 // page in the space, (2) allocate off the space's free list, (3) expand the
2183 // space, (4) fail. 2184 // space, (4) fail.
2184 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) { 2185 HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
2185 // Linear allocation in this space has failed. If there is another page 2186 // Linear allocation in this space has failed. If there is another page
2186 // in the space, move to that page and allocate there. This allocation 2187 // in the space, move to that page and allocate there. This allocation
2187 // should succeed (size_in_bytes should not be greater than a page's 2188 // should succeed (size_in_bytes should not be greater than a page's
2188 // object area size). 2189 // object area size).
2189 Page* current_page = TopPageOf(allocation_info_); 2190 Page* current_page = TopPageOf(allocation_info_);
2190 if (current_page->next_page()->is_valid()) { 2191 if (current_page->next_page()->is_valid()) {
2191 return AllocateInNextPage(current_page, size_in_bytes); 2192 return AllocateInNextPage(current_page, size_in_bytes);
2192 } 2193 }
2193 2194
2194 // There is no next page in this space. Try free list allocation unless that 2195 // There is no next page in this space. Try free list allocation unless that
2195 // is currently forbidden. 2196 // is currently forbidden.
2196 if (!HEAP->linear_allocation()) { 2197 if (!heap()->linear_allocation()) {
2197 int wasted_bytes; 2198 int wasted_bytes;
2198 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes); 2199 Object* result = free_list_.Allocate(size_in_bytes, &wasted_bytes);
2199 accounting_stats_.WasteBytes(wasted_bytes); 2200 accounting_stats_.WasteBytes(wasted_bytes);
2200 if (!result->IsFailure()) { 2201 if (!result->IsFailure()) {
2201 accounting_stats_.AllocateBytes(size_in_bytes); 2202 accounting_stats_.AllocateBytes(size_in_bytes);
2202 2203
2203 HeapObject* obj = HeapObject::cast(result); 2204 HeapObject* obj = HeapObject::cast(result);
2204 Page* p = Page::FromAddress(obj->address()); 2205 Page* p = Page::FromAddress(obj->address());
2205 2206
2206 if (obj->address() >= p->AllocationWatermark()) { 2207 if (obj->address() >= p->AllocationWatermark()) {
2207 // There should be no hole between the allocation watermark 2208 // There should be no hole between the allocation watermark
2208 // and allocated object address. 2209 // and allocated object address.
2209 // Memory above the allocation watermark was not swept and 2210 // Memory above the allocation watermark was not swept and
2210 // might contain garbage pointers to new space. 2211 // might contain garbage pointers to new space.
2211 ASSERT(obj->address() == p->AllocationWatermark()); 2212 ASSERT(obj->address() == p->AllocationWatermark());
2212 p->SetAllocationWatermark(obj->address() + size_in_bytes); 2213 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2213 } 2214 }
2214 2215
2215 return obj; 2216 return obj;
2216 } 2217 }
2217 } 2218 }
2218 2219
2219 // Free list allocation failed and there is no next page. Fail if we have 2220 // Free list allocation failed and there is no next page. Fail if we have
2220 // hit the old generation size limit that should cause a garbage 2221 // hit the old generation size limit that should cause a garbage
2221 // collection. 2222 // collection.
2222 if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) { 2223 if (!heap()->always_allocate() &&
2224 heap()->OldGenerationAllocationLimitReached()) {
2223 return NULL; 2225 return NULL;
2224 } 2226 }
2225 2227
2226 // Try to expand the space and allocate in the new next page. 2228 // Try to expand the space and allocate in the new next page.
2227 ASSERT(!current_page->next_page()->is_valid()); 2229 ASSERT(!current_page->next_page()->is_valid());
2228 if (Expand(current_page)) { 2230 if (Expand(current_page)) {
2229 return AllocateInNextPage(current_page, size_in_bytes); 2231 return AllocateInNextPage(current_page, size_in_bytes);
2230 } 2232 }
2231 2233
2232 // Finally, fail. 2234 // Finally, fail.
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
2374 it->next(); 2376 it->next();
2375 } 2377 }
2376 EnterComment(isolate, comment_txt, flat_delta); 2378 EnterComment(isolate, comment_txt, flat_delta);
2377 } 2379 }
2378 2380
2379 2381
2380 // Collects code size statistics: 2382 // Collects code size statistics:
2381 // - by code kind 2383 // - by code kind
2382 // - by code comment 2384 // - by code comment
2383 void PagedSpace::CollectCodeStatistics() { 2385 void PagedSpace::CollectCodeStatistics() {
2384 Isolate* isolate = Isolate::Current(); 2386 Isolate* isolate = heap()->isolate();
2385 HeapObjectIterator obj_it(this); 2387 HeapObjectIterator obj_it(this);
2386 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2388 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2387 if (obj->IsCode()) { 2389 if (obj->IsCode()) {
2388 Code* code = Code::cast(obj); 2390 Code* code = Code::cast(obj);
2389 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2391 isolate->code_kind_statistics()[code->kind()] += code->Size();
2390 RelocIterator it(code); 2392 RelocIterator it(code);
2391 int delta = 0; 2393 int delta = 0;
2392 const byte* prev_pc = code->instruction_start(); 2394 const byte* prev_pc = code->instruction_start();
2393 while (!it.done()) { 2395 while (!it.done()) {
2394 if (it.rinfo()->rmode() == RelocInfo::COMMENT) { 2396 if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
2487 // in the space, move to that page and allocate there. This allocation 2489 // in the space, move to that page and allocate there. This allocation
2488 // should succeed. 2490 // should succeed.
2489 Page* current_page = TopPageOf(allocation_info_); 2491 Page* current_page = TopPageOf(allocation_info_);
2490 if (current_page->next_page()->is_valid()) { 2492 if (current_page->next_page()->is_valid()) {
2491 return AllocateInNextPage(current_page, size_in_bytes); 2493 return AllocateInNextPage(current_page, size_in_bytes);
2492 } 2494 }
2493 2495
2494 // There is no next page in this space. Try free list allocation unless 2496 // There is no next page in this space. Try free list allocation unless
2495 // that is currently forbidden. The fixed space free list implicitly assumes 2497 // that is currently forbidden. The fixed space free list implicitly assumes
2496 // that all free blocks are of the fixed size. 2498 // that all free blocks are of the fixed size.
2497 if (!HEAP->linear_allocation()) { 2499 if (!heap()->linear_allocation()) {
2498 Object* result = free_list_.Allocate(); 2500 Object* result = free_list_.Allocate();
2499 if (!result->IsFailure()) { 2501 if (!result->IsFailure()) {
2500 accounting_stats_.AllocateBytes(size_in_bytes); 2502 accounting_stats_.AllocateBytes(size_in_bytes);
2501 HeapObject* obj = HeapObject::cast(result); 2503 HeapObject* obj = HeapObject::cast(result);
2502 Page* p = Page::FromAddress(obj->address()); 2504 Page* p = Page::FromAddress(obj->address());
2503 2505
2504 if (obj->address() >= p->AllocationWatermark()) { 2506 if (obj->address() >= p->AllocationWatermark()) {
2505 // There should be no hole between the allocation watermark 2507 // There should be no hole between the allocation watermark
2506 // and allocated object address. 2508 // and allocated object address.
2507 // Memory above the allocation watermark was not swept and 2509 // Memory above the allocation watermark was not swept and
2508 // might contain garbage pointers to new space. 2510 // might contain garbage pointers to new space.
2509 ASSERT(obj->address() == p->AllocationWatermark()); 2511 ASSERT(obj->address() == p->AllocationWatermark());
2510 p->SetAllocationWatermark(obj->address() + size_in_bytes); 2512 p->SetAllocationWatermark(obj->address() + size_in_bytes);
2511 } 2513 }
2512 2514
2513 return obj; 2515 return obj;
2514 } 2516 }
2515 } 2517 }
2516 2518
2517 // Free list allocation failed and there is no next page. Fail if we have 2519 // Free list allocation failed and there is no next page. Fail if we have
2518 // hit the old generation size limit that should cause a garbage 2520 // hit the old generation size limit that should cause a garbage
2519 // collection. 2521 // collection.
2520 if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) { 2522 if (!heap()->always_allocate() &&
2523 heap()->OldGenerationAllocationLimitReached()) {
2521 return NULL; 2524 return NULL;
2522 } 2525 }
2523 2526
2524 // Try to expand the space and allocate in the new next page. 2527 // Try to expand the space and allocate in the new next page.
2525 ASSERT(!current_page->next_page()->is_valid()); 2528 ASSERT(!current_page->next_page()->is_valid());
2526 if (Expand(current_page)) { 2529 if (Expand(current_page)) {
2527 return AllocateInNextPage(current_page, size_in_bytes); 2530 return AllocateInNextPage(current_page, size_in_bytes);
2528 } 2531 }
2529 2532
2530 // Finally, fail. 2533 // Finally, fail.
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2610 #endif 2613 #endif
2611 2614
2612 2615
2613 // ----------------------------------------------------------------------------- 2616 // -----------------------------------------------------------------------------
2614 // GlobalPropertyCellSpace implementation 2617 // GlobalPropertyCellSpace implementation
2615 2618
2616 #ifdef DEBUG 2619 #ifdef DEBUG
2617 void CellSpace::VerifyObject(HeapObject* object) { 2620 void CellSpace::VerifyObject(HeapObject* object) {
2618 // The object should be a global object property cell or a free-list node. 2621 // The object should be a global object property cell or a free-list node.
2619 ASSERT(object->IsJSGlobalPropertyCell() || 2622 ASSERT(object->IsJSGlobalPropertyCell() ||
2620 object->map() == HEAP->two_pointer_filler_map()); 2623 object->map() == heap()->two_pointer_filler_map());
2621 } 2624 }
2622 #endif 2625 #endif
2623 2626
2624 2627
2625 // ----------------------------------------------------------------------------- 2628 // -----------------------------------------------------------------------------
2626 // LargeObjectIterator 2629 // LargeObjectIterator
2627 2630
2628 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) { 2631 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
2629 current_ = space->first_chunk_; 2632 current_ = space->first_chunk_;
2630 size_func_ = NULL; 2633 size_func_ = NULL;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
2675 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { 2678 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2676 int os_alignment = static_cast<int>(OS::AllocateAlignment()); 2679 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2677 if (os_alignment < Page::kPageSize) 2680 if (os_alignment < Page::kPageSize)
2678 size_in_bytes += (Page::kPageSize - os_alignment); 2681 size_in_bytes += (Page::kPageSize - os_alignment);
2679 return size_in_bytes + Page::kObjectStartOffset; 2682 return size_in_bytes + Page::kObjectStartOffset;
2680 } 2683 }
2681 2684
2682 // ----------------------------------------------------------------------------- 2685 // -----------------------------------------------------------------------------
2683 // LargeObjectSpace 2686 // LargeObjectSpace
2684 2687
2685 LargeObjectSpace::LargeObjectSpace(AllocationSpace id) 2688 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace id)
2686 : Space(id, NOT_EXECUTABLE), // Managed on a per-allocation basis 2689 : Space(heap, id, NOT_EXECUTABLE), // Managed on a per-allocation basis
2687 first_chunk_(NULL), 2690 first_chunk_(NULL),
2688 size_(0), 2691 size_(0),
2689 page_count_(0) {} 2692 page_count_(0) {}
2690 2693
2691 2694
2692 bool LargeObjectSpace::Setup() { 2695 bool LargeObjectSpace::Setup() {
2693 first_chunk_ = NULL; 2696 first_chunk_ = NULL;
2694 size_ = 0; 2697 size_ = 0;
2695 page_count_ = 0; 2698 page_count_ = 0;
2696 return true; 2699 return true;
2697 } 2700 }
2698 2701
2699 2702
2700 void LargeObjectSpace::TearDown() { 2703 void LargeObjectSpace::TearDown() {
2701 while (first_chunk_ != NULL) { 2704 while (first_chunk_ != NULL) {
2702 LargeObjectChunk* chunk = first_chunk_; 2705 LargeObjectChunk* chunk = first_chunk_;
2703 first_chunk_ = first_chunk_->next(); 2706 first_chunk_ = first_chunk_->next();
2704 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); 2707 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2705 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2708 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2706 Executability executable = 2709 Executability executable =
2707 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2710 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2708 ObjectSpace space = kObjectSpaceLoSpace; 2711 ObjectSpace space = kObjectSpaceLoSpace;
2709 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2712 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2710 size_t size = chunk->size(); 2713 size_t size = chunk->size();
2711 Isolate::Current()->memory_allocator()->FreeRawMemory(chunk->address(), 2714 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(),
2712 size, 2715 size,
2713 executable); 2716 executable);
2714 Isolate::Current()->memory_allocator()->PerformAllocationCallback( 2717 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2715 space, kAllocationActionFree, size); 2718 space, kAllocationActionFree, size);
2716 } 2719 }
2717 2720
2718 size_ = 0; 2721 size_ = 0;
2719 page_count_ = 0; 2722 page_count_ = 0;
2720 } 2723 }
2721 2724
2722 2725
2723 #ifdef ENABLE_HEAP_PROTECTION 2726 #ifdef ENABLE_HEAP_PROTECTION
2724 2727
2725 void LargeObjectSpace::Protect() { 2728 void LargeObjectSpace::Protect() {
2726 LargeObjectChunk* chunk = first_chunk_; 2729 LargeObjectChunk* chunk = first_chunk_;
2727 while (chunk != NULL) { 2730 while (chunk != NULL) {
2728 Isolate::Current()->memory_allocator()->Protect(chunk->address(), 2731 heap()->isolate()->memory_allocator()->Protect(chunk->address(),
2729 chunk->size()); 2732 chunk->size());
2730 chunk = chunk->next(); 2733 chunk = chunk->next();
2731 } 2734 }
2732 } 2735 }
2733 2736
2734 2737
2735 void LargeObjectSpace::Unprotect() { 2738 void LargeObjectSpace::Unprotect() {
2736 LargeObjectChunk* chunk = first_chunk_; 2739 LargeObjectChunk* chunk = first_chunk_;
2737 while (chunk != NULL) { 2740 while (chunk != NULL) {
2738 bool is_code = chunk->GetObject()->IsCode(); 2741 bool is_code = chunk->GetObject()->IsCode();
2739 Isolate::Current()->memory_allocator()->Unprotect(chunk->address(), 2742 heap()->isolate()->memory_allocator()->Unprotect(chunk->address(),
2740 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE); 2743 chunk->size(), is_code ? EXECUTABLE : NOT_EXECUTABLE);
2741 chunk = chunk->next(); 2744 chunk = chunk->next();
2742 } 2745 }
2743 } 2746 }
2744 2747
2745 #endif 2748 #endif
2746 2749
2747 2750
2748 Object* LargeObjectSpace::AllocateRawInternal(int requested_size, 2751 Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
2749 int object_size, 2752 int object_size,
2750 Executability executable) { 2753 Executability executable) {
2751 ASSERT(0 < object_size && object_size <= requested_size); 2754 ASSERT(0 < object_size && object_size <= requested_size);
2752 2755
2753 // Check if we want to force a GC before growing the old space further. 2756 // Check if we want to force a GC before growing the old space further.
2754 // If so, fail the allocation. 2757 // If so, fail the allocation.
2755 if (!HEAP->always_allocate() && HEAP->OldGenerationAllocationLimitReached()) { 2758 if (!heap()->always_allocate() &&
2759 heap()->OldGenerationAllocationLimitReached()) {
2756 return Failure::RetryAfterGC(requested_size, identity()); 2760 return Failure::RetryAfterGC(requested_size, identity());
2757 } 2761 }
2758 2762
2759 size_t chunk_size; 2763 size_t chunk_size;
2760 LargeObjectChunk* chunk = 2764 LargeObjectChunk* chunk =
2761 LargeObjectChunk::New(requested_size, &chunk_size, executable); 2765 LargeObjectChunk::New(requested_size, &chunk_size, executable);
2762 if (chunk == NULL) { 2766 if (chunk == NULL) {
2763 return Failure::RetryAfterGC(requested_size, identity()); 2767 return Failure::RetryAfterGC(requested_size, identity());
2764 } 2768 }
2765 2769
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
2852 // regions (modulo 32). So we treat a large page as a sequence of 2856 // regions (modulo 32). So we treat a large page as a sequence of
2853 // normal pages of size Page::kPageSize having same dirty marks 2857 // normal pages of size Page::kPageSize having same dirty marks
2854 // and subsequently iterate dirty regions on each of these pages. 2858 // and subsequently iterate dirty regions on each of these pages.
2855 Address start = object->address(); 2859 Address start = object->address();
2856 Address end = page->ObjectAreaEnd(); 2860 Address end = page->ObjectAreaEnd();
2857 Address object_end = start + object->Size(); 2861 Address object_end = start + object->Size();
2858 2862
2859 // Iterate regions of the first normal page covering object. 2863 // Iterate regions of the first normal page covering object.
2860 uint32_t first_region_number = page->GetRegionNumberForAddress(start); 2864 uint32_t first_region_number = page->GetRegionNumberForAddress(start);
2861 newmarks |= 2865 newmarks |=
2862 HEAP->IterateDirtyRegions(marks >> first_region_number, 2866 heap()->IterateDirtyRegions(marks >> first_region_number,
2863 start, 2867 start,
2864 end, 2868 end,
2865 &Heap::IteratePointersInDirtyRegion, 2869 &Heap::IteratePointersInDirtyRegion,
2866 copy_object) << first_region_number; 2870 copy_object) << first_region_number;
2867 2871
2868 start = end; 2872 start = end;
2869 end = start + Page::kPageSize; 2873 end = start + Page::kPageSize;
2870 while (end <= object_end) { 2874 while (end <= object_end) {
2871 // Iterate next 32 regions. 2875 // Iterate next 32 regions.
2872 newmarks |= 2876 newmarks |=
2873 HEAP->IterateDirtyRegions(marks, 2877 heap()->IterateDirtyRegions(marks,
2874 start, 2878 start,
2875 end, 2879 end,
2876 &Heap::IteratePointersInDirtyRegion, 2880 &Heap::IteratePointersInDirtyRegion,
2877 copy_object); 2881 copy_object);
2878 start = end; 2882 start = end;
2879 end = start + Page::kPageSize; 2883 end = start + Page::kPageSize;
2880 } 2884 }
2881 2885
2882 if (start != object_end) { 2886 if (start != object_end) {
2883 // Iterate the last piece of an object which is less than 2887 // Iterate the last piece of an object which is less than
2884 // Page::kPageSize. 2888 // Page::kPageSize.
2885 newmarks |= 2889 newmarks |=
2886 HEAP->IterateDirtyRegions(marks, 2890 heap()->IterateDirtyRegions(marks,
2887 start, 2891 start,
2888 object_end, 2892 object_end,
2889 &Heap::IteratePointersInDirtyRegion, 2893 &Heap::IteratePointersInDirtyRegion,
2890 copy_object); 2894 copy_object);
2891 } 2895 }
2892 2896
2893 page->SetRegionMarks(newmarks); 2897 page->SetRegionMarks(newmarks);
2894 } 2898 }
2895 } 2899 }
2896 } 2900 }
2897 } 2901 }
2898 2902
2899 2903
2900 void LargeObjectSpace::FreeUnmarkedObjects() { 2904 void LargeObjectSpace::FreeUnmarkedObjects() {
2901 LargeObjectChunk* previous = NULL; 2905 LargeObjectChunk* previous = NULL;
2902 LargeObjectChunk* current = first_chunk_; 2906 LargeObjectChunk* current = first_chunk_;
2903 while (current != NULL) { 2907 while (current != NULL) {
2904 HeapObject* object = current->GetObject(); 2908 HeapObject* object = current->GetObject();
2905 if (object->IsMarked()) { 2909 if (object->IsMarked()) {
2906 object->ClearMark(); 2910 object->ClearMark();
2907 HEAP->mark_compact_collector()->tracer()->decrement_marked_count(); 2911 heap()->mark_compact_collector()->tracer()->decrement_marked_count();
2908 previous = current; 2912 previous = current;
2909 current = current->next(); 2913 current = current->next();
2910 } else { 2914 } else {
2911 Page* page = Page::FromAddress(RoundUp(current->address(), 2915 Page* page = Page::FromAddress(RoundUp(current->address(),
2912 Page::kPageSize)); 2916 Page::kPageSize));
2913 Executability executable = 2917 Executability executable =
2914 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2918 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2915 Address chunk_address = current->address(); 2919 Address chunk_address = current->address();
2916 size_t chunk_size = current->size(); 2920 size_t chunk_size = current->size();
2917 2921
2918 // Cut the chunk out from the chunk list. 2922 // Cut the chunk out from the chunk list.
2919 current = current->next(); 2923 current = current->next();
2920 if (previous == NULL) { 2924 if (previous == NULL) {
2921 first_chunk_ = current; 2925 first_chunk_ = current;
2922 } else { 2926 } else {
2923 previous->set_next(current); 2927 previous->set_next(current);
2924 } 2928 }
2925 2929
2926 // Free the chunk. 2930 // Free the chunk.
2927 HEAP->mark_compact_collector()->ReportDeleteIfNeeded(object); 2931 heap()->mark_compact_collector()->ReportDeleteIfNeeded(object);
2928 size_ -= static_cast<int>(chunk_size); 2932 size_ -= static_cast<int>(chunk_size);
2929 page_count_--; 2933 page_count_--;
2930 ObjectSpace space = kObjectSpaceLoSpace; 2934 ObjectSpace space = kObjectSpaceLoSpace;
2931 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2935 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2932 Isolate::Current()->memory_allocator()->FreeRawMemory(chunk_address, 2936 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address,
2933 chunk_size, 2937 chunk_size,
2934 executable); 2938 executable);
2935 Isolate::Current()->memory_allocator()->PerformAllocationCallback( 2939 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2936 space, kAllocationActionFree, size_); 2940 space, kAllocationActionFree, size_);
2937 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); 2941 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2938 } 2942 }
2939 } 2943 }
2940 } 2944 }
2941 2945
2942 2946
2943 bool LargeObjectSpace::Contains(HeapObject* object) { 2947 bool LargeObjectSpace::Contains(HeapObject* object) {
2944 Address address = object->address(); 2948 Address address = object->address();
2945 if (HEAP->new_space()->Contains(address)) { 2949 if (heap()->new_space()->Contains(address)) {
2946 return false; 2950 return false;
2947 } 2951 }
2948 Page* page = Page::FromAddress(address); 2952 Page* page = Page::FromAddress(address);
2949 2953
2950 SLOW_ASSERT(!page->IsLargeObjectPage() 2954 SLOW_ASSERT(!page->IsLargeObjectPage()
2951 || !FindObject(address)->IsFailure()); 2955 || !FindObject(address)->IsFailure());
2952 2956
2953 return page->IsLargeObjectPage(); 2957 return page->IsLargeObjectPage();
2954 } 2958 }
2955 2959
2956 2960
2957 #ifdef DEBUG 2961 #ifdef DEBUG
2958 // We do not assume that the large object iterator works, because it depends 2962 // We do not assume that the large object iterator works, because it depends
2959 // on the invariants we are checking during verification. 2963 // on the invariants we are checking during verification.
2960 void LargeObjectSpace::Verify() { 2964 void LargeObjectSpace::Verify() {
2961 for (LargeObjectChunk* chunk = first_chunk_; 2965 for (LargeObjectChunk* chunk = first_chunk_;
2962 chunk != NULL; 2966 chunk != NULL;
2963 chunk = chunk->next()) { 2967 chunk = chunk->next()) {
2964 // Each chunk contains an object that starts at the large object page's 2968 // Each chunk contains an object that starts at the large object page's
2965 // object area start. 2969 // object area start.
2966 HeapObject* object = chunk->GetObject(); 2970 HeapObject* object = chunk->GetObject();
2967 Page* page = Page::FromAddress(object->address()); 2971 Page* page = Page::FromAddress(object->address());
2968 ASSERT(object->address() == page->ObjectAreaStart()); 2972 ASSERT(object->address() == page->ObjectAreaStart());
2969 2973
2970 // The first word should be a map, and we expect all map pointers to be 2974 // The first word should be a map, and we expect all map pointers to be
2971 // in map space. 2975 // in map space.
2972 Map* map = object->map(); 2976 Map* map = object->map();
2973 ASSERT(map->IsMap()); 2977 ASSERT(map->IsMap());
2974 ASSERT(HEAP->map_space()->Contains(map)); 2978 ASSERT(heap()->map_space()->Contains(map));
2975 2979
2976 // We have only code, sequential strings, external strings 2980 // We have only code, sequential strings, external strings
2977 // (sequential strings that have been morphed into external 2981 // (sequential strings that have been morphed into external
2978 // strings), fixed arrays, and byte arrays in large object space. 2982 // strings), fixed arrays, and byte arrays in large object space.
2979 ASSERT(object->IsCode() || object->IsSeqString() || 2983 ASSERT(object->IsCode() || object->IsSeqString() ||
2980 object->IsExternalString() || object->IsFixedArray() || 2984 object->IsExternalString() || object->IsFixedArray() ||
2981 object->IsByteArray()); 2985 object->IsByteArray());
2982 2986
2983 // The object itself should look OK. 2987 // The object itself should look OK.
2984 object->Verify(); 2988 object->Verify();
2985 2989
2986 // Byte arrays and strings don't have interior pointers. 2990 // Byte arrays and strings don't have interior pointers.
2987 if (object->IsCode()) { 2991 if (object->IsCode()) {
2988 VerifyPointersVisitor code_visitor; 2992 VerifyPointersVisitor code_visitor;
2989 object->IterateBody(map->instance_type(), 2993 object->IterateBody(map->instance_type(),
2990 object->Size(), 2994 object->Size(),
2991 &code_visitor); 2995 &code_visitor);
2992 } else if (object->IsFixedArray()) { 2996 } else if (object->IsFixedArray()) {
2993 // We loop over fixed arrays ourselves, rather then using the visitor, 2997 // We loop over fixed arrays ourselves, rather then using the visitor,
2994 // because the visitor doesn't support the start/offset iteration 2998 // because the visitor doesn't support the start/offset iteration
2995 // needed for IsRegionDirty. 2999 // needed for IsRegionDirty.
2996 FixedArray* array = FixedArray::cast(object); 3000 FixedArray* array = FixedArray::cast(object);
2997 for (int j = 0; j < array->length(); j++) { 3001 for (int j = 0; j < array->length(); j++) {
2998 Object* element = array->get(j); 3002 Object* element = array->get(j);
2999 if (element->IsHeapObject()) { 3003 if (element->IsHeapObject()) {
3000 HeapObject* element_object = HeapObject::cast(element); 3004 HeapObject* element_object = HeapObject::cast(element);
3001 ASSERT(HEAP->Contains(element_object)); 3005 ASSERT(heap()->Contains(element_object));
3002 ASSERT(element_object->map()->IsMap()); 3006 ASSERT(element_object->map()->IsMap());
3003 if (HEAP->InNewSpace(element_object)) { 3007 if (heap()->InNewSpace(element_object)) {
3004 Address array_addr = object->address(); 3008 Address array_addr = object->address();
3005 Address element_addr = array_addr + FixedArray::kHeaderSize + 3009 Address element_addr = array_addr + FixedArray::kHeaderSize +
3006 j * kPointerSize; 3010 j * kPointerSize;
3007 3011
3008 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr)); 3012 ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
3009 } 3013 }
3010 } 3014 }
3011 } 3015 }
3012 } 3016 }
3013 } 3017 }
(...skipping 17 matching lines...) Expand all
3031 num_objects++; 3035 num_objects++;
3032 CollectHistogramInfo(obj); 3036 CollectHistogramInfo(obj);
3033 } 3037 }
3034 3038
3035 PrintF(" number of objects %d\n", num_objects); 3039 PrintF(" number of objects %d\n", num_objects);
3036 if (num_objects > 0) ReportHistogram(false); 3040 if (num_objects > 0) ReportHistogram(false);
3037 } 3041 }
3038 3042
3039 3043
3040 void LargeObjectSpace::CollectCodeStatistics() { 3044 void LargeObjectSpace::CollectCodeStatistics() {
3041 Isolate* isolate = Isolate::Current(); 3045 Isolate* isolate = heap()->isolate();
3042 LargeObjectIterator obj_it(this); 3046 LargeObjectIterator obj_it(this);
3043 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3047 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3044 if (obj->IsCode()) { 3048 if (obj->IsCode()) {
3045 Code* code = Code::cast(obj); 3049 Code* code = Code::cast(obj);
3046 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3050 isolate->code_kind_statistics()[code->kind()] += code->Size();
3047 } 3051 }
3048 } 3052 }
3049 } 3053 }
3050 #endif // DEBUG 3054 #endif // DEBUG
3051 3055
3052 } } // namespace v8::internal 3056 } } // namespace v8::internal
OLDNEW
« src/mark-compact.cc ('K') | « src/spaces.h ('k') | test/cctest/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698