Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(261)

Side by Side Diff: src/spaces.cc

Issue 7149016: Multi-page growing and shrinking new-space (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Created 9 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 392 matching lines...) Expand 10 before | Expand all | Expand 10 after
403 NOT_EXECUTABLE, 403 NOT_EXECUTABLE,
404 semi_space); 404 semi_space);
405 chunk->set_next_chunk(NULL); 405 chunk->set_next_chunk(NULL);
406 chunk->set_prev_chunk(NULL); 406 chunk->set_prev_chunk(NULL);
407 chunk->initialize_scan_on_scavenge(true); 407 chunk->initialize_scan_on_scavenge(true);
408 bool in_to_space = (semi_space->id() != kFromSpace); 408 bool in_to_space = (semi_space->id() != kFromSpace);
409 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE 409 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
410 : MemoryChunk::IN_FROM_SPACE); 410 : MemoryChunk::IN_FROM_SPACE);
411 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE 411 ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
412 : MemoryChunk::IN_TO_SPACE)); 412 : MemoryChunk::IN_TO_SPACE));
413 heap->incremental_marking()->SetNewSpacePageFlags(chunk); 413 NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
414 return static_cast<NewSpacePage*>(chunk); 414 heap->incremental_marking()->SetNewSpacePageFlags(page);
415 return page;
415 } 416 }
416 417
417 418
418 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) { 419 void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
419 set_owner(semi_space); 420 set_owner(semi_space);
420 set_next_chunk(this); 421 set_next_chunk(this);
421 set_prev_chunk(this); 422 set_prev_chunk(this);
422 // Flags marks this invalid page as not being in new-space. 423 // Flags marks this invalid page as not being in new-space.
423 // All real new-space pages will be in new-space. 424 // All real new-space pages will be in new-space.
424 SetFlags(0, ~0); 425 SetFlags(0, ~0);
(...skipping 535 matching lines...) Expand 10 before | Expand all | Expand 10 after
960 if (!from_space_.Grow()) { 961 if (!from_space_.Grow()) {
961 // If we managed to grow to space but couldn't grow from space, 962 // If we managed to grow to space but couldn't grow from space,
962 // attempt to shrink to space. 963 // attempt to shrink to space.
963 if (!to_space_.ShrinkTo(from_space_.Capacity())) { 964 if (!to_space_.ShrinkTo(from_space_.Capacity())) {
964 // We are in an inconsistent state because we could not 965 // We are in an inconsistent state because we could not
965 // commit/uncommit memory from new space. 966 // commit/uncommit memory from new space.
966 V8::FatalProcessOutOfMemory("Failed to grow new space."); 967 V8::FatalProcessOutOfMemory("Failed to grow new space.");
967 } 968 }
968 } 969 }
969 } 970 }
970 allocation_info_.limit = to_space_.page_high();
971 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 971 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
972 } 972 }
973 973
974 974
975 void NewSpace::Shrink() { 975 void NewSpace::Shrink() {
976 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt()); 976 int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
977 int rounded_new_capacity = 977 int rounded_new_capacity =
978 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment())); 978 RoundUp(new_capacity, static_cast<int>(OS::AllocateAlignment()));
979 if (rounded_new_capacity < Capacity() && 979 if (rounded_new_capacity < Capacity() &&
980 to_space_.ShrinkTo(rounded_new_capacity)) { 980 to_space_.ShrinkTo(rounded_new_capacity)) {
981 // Only shrink from space if we managed to shrink to space. 981 // Only shrink from space if we managed to shrink to space.
982 if (!from_space_.ShrinkTo(rounded_new_capacity)) { 982 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
983 // If we managed to shrink to space but couldn't shrink from 983 // If we managed to shrink to space but couldn't shrink from
984 // space, attempt to grow to space again. 984 // space, attempt to grow to space again.
985 if (!to_space_.GrowTo(from_space_.Capacity())) { 985 if (!to_space_.GrowTo(from_space_.Capacity())) {
986 // We are in an inconsistent state because we could not 986 // We are in an inconsistent state because we could not
987 // commit/uncommit memory from new space. 987 // commit/uncommit memory from new space.
988 V8::FatalProcessOutOfMemory("Failed to shrink new space."); 988 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
989 } 989 }
990 } 990 }
991 } 991 }
992 allocation_info_.limit = to_space_.page_high(); 992 allocation_info_.limit = to_space_.page_high();
993 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 993 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
994 } 994 }
995 995
996 996
997 void NewSpace::UpdateAllocationInfo() { 997 void NewSpace::UpdateAllocationInfo() {
998 allocation_info_.top = to_space_.page_low(); 998 allocation_info_.top = to_space_.page_low();
999 allocation_info_.limit = to_space_.page_high(); 999 allocation_info_.limit = to_space_.page_high();
1000
1001 // Lower limit during incremental marking.
1002 if (heap()->incremental_marking()->IsMarking() &&
1003 inline_allocation_limit_step() != 0) {
1004 Address new_limit =
1005 allocation_info_.top + inline_allocation_limit_step();
1006 if (new_limit < allocation_info_.limit) {
Erik Corry 2011/06/14 12:17:17 This 'if' can be done more elegantly with Min(x,y)
Lasse Reichstein 2011/06/14 14:13:03 Done.
1007 allocation_info_.limit = new_limit;
1008 }
1009 }
1000 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1010 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1001 } 1011 }
1002 1012
1003 1013
1004 void NewSpace::ResetAllocationInfo() { 1014 void NewSpace::ResetAllocationInfo() {
1005 to_space_.Reset(); 1015 to_space_.Reset();
1006 UpdateAllocationInfo(); 1016 UpdateAllocationInfo();
1007 // Clear all mark-bits in the to-space. 1017 // Clear all mark-bits in the to-space.
1008 NewSpacePageIterator it(to_space_.space_low(), to_space_.space_high()); 1018 NewSpacePageIterator it(to_space_.space_low(), to_space_.space_high());
1009 while (it.has_next()) { 1019 while (it.has_next()) {
1010 NewSpacePage* page = it.next(); 1020 NewSpacePage* page = it.next();
1011 page->markbits()->Clear(); 1021 page->markbits()->Clear();
1012 } 1022 }
1013 } 1023 }
1014 1024
1015 1025
1016 bool NewSpace::AddFreshPage() { 1026 bool NewSpace::AddFreshPage() {
1017 Address top = allocation_info_.top; 1027 Address top = allocation_info_.top;
1018 if (top == NewSpacePage::FromLimit(top)->body()) { 1028 if (NewSpacePage::at_start(top)) {
1019 // The current page is already empty. Don't try to make another. 1029 // The current page is already empty. Don't try to make another.
1020 1030
1021 // We should only get here if someone asks to allocate more 1031 // We should only get here if someone asks to allocate more
1022 // than what can be stored in a single page. 1032 // than what can be stored in a single page.
1023 // TODO(gc): Change the limit on new-space allocation to prevent this 1033 // TODO(gc): Change the limit on new-space allocation to prevent this
1024 // from happening (all such allocations should go directly to LOSpace). 1034 // from happening (all such allocations should go directly to LOSpace).
1025 return false; 1035 return false;
1026 } 1036 }
1027 if (!to_space_.AdvancePage()) { 1037 if (!to_space_.AdvancePage()) {
1028 // Failed to get a new page in to-space. 1038 // Failed to get a new page in to-space.
1029 return false; 1039 return false;
1030 } 1040 }
1031 // Clear remainder of current page. 1041 // Clear remainder of current page.
1032 int remaining_in_page = 1042 int remaining_in_page =
1033 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top); 1043 static_cast<int>(NewSpacePage::FromLimit(top)->body_limit() - top);
1034 heap()->CreateFillerObjectAt(top, remaining_in_page); 1044 heap()->CreateFillerObjectAt(top, remaining_in_page);
1045
1035 UpdateAllocationInfo(); 1046 UpdateAllocationInfo();
1036 return true; 1047 return true;
1037 } 1048 }
1038 1049
1039 1050
1040 #ifdef DEBUG 1051 #ifdef DEBUG
1041 // We do not use the SemiSpaceIterator because verification doesn't assume 1052 // We do not use the SemiSpaceIterator because verification doesn't assume
1042 // that it works (it depends on the invariants we are checking). 1053 // that it works (it depends on the invariants we are checking).
1043 void NewSpace::Verify() { 1054 void NewSpace::Verify() {
1044 // The allocation pointer should be in the space or at the very end. 1055 // The allocation pointer should be in the space or at the very end.
1045 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1056 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1046 1057
1047 // There should be objects packed in from the low address up to the 1058 // There should be objects packed in from the low address up to the
1048 // allocation pointer. 1059 // allocation pointer.
1049 NewSpacePage* page = to_space_.first_page(); 1060 Address current = to_space_.first_page()->body();
1050 Address current = page->body();
1051 CHECK_EQ(current, to_space_.space_low()); 1061 CHECK_EQ(current, to_space_.space_low());
1052 1062
1053 while (current != top()) { 1063 while (current != top()) {
1054 if (current == page->body_limit()) { 1064 if (!NewSpacePage::at_end(current)) {
1065 // The allocation pointer should not be in the middle of an object.
1066 CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
1067 current < top());
1068
1069 HeapObject* object = HeapObject::FromAddress(current);
1070
1071 // The first word should be a map, and we expect all map pointers to
1072 // be in map space.
1073 Map* map = object->map();
1074 CHECK(map->IsMap());
1075 CHECK(heap()->map_space()->Contains(map));
1076
1077 // The object should not be code or a map.
1078 CHECK(!object->IsMap());
1079 CHECK(!object->IsCode());
1080
1081 // The object itself should look OK.
1082 object->Verify();
1083
1084 // All the interior pointers should be contained in the heap.
1085 VerifyPointersVisitor visitor;
1086 int size = object->Size();
1087 object->IterateBody(map->instance_type(), size, &visitor);
1088
1089 current += size;
1090 } else {
1055 // At end of page, switch to next page. 1091 // At end of page, switch to next page.
1056 page = page->next_page(); 1092 NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
1057 // Next page should be valid. 1093 // Next page should be valid.
1058 CHECK(!page->is_anchor()); 1094 CHECK(!page->is_anchor());
1059 current = page->body(); 1095 current = page->body();
Erik Corry 2011/06/14 12:17:17 I think you want a 'continue' here in case the end
Lasse Reichstein 2011/06/14 14:13:03 The loop ends immediately after, so that won't mak
1060 } 1096 }
1061 // The allocation pointer should not be in the middle of an object.
1062 CHECK(!page->ContainsLimit(top()) || current < top());
1063
1064 HeapObject* object = HeapObject::FromAddress(current);
1065
1066 // The first word should be a map, and we expect all map pointers to
1067 // be in map space.
1068 Map* map = object->map();
1069 CHECK(map->IsMap());
1070 CHECK(heap()->map_space()->Contains(map));
1071
1072 // The object should not be code or a map.
1073 CHECK(!object->IsMap());
1074 CHECK(!object->IsCode());
1075
1076 // The object itself should look OK.
1077 object->Verify();
1078
1079 // All the interior pointers should be contained in the heap.
1080 VerifyPointersVisitor visitor;
1081 int size = object->Size();
1082 object->IterateBody(map->instance_type(), size, &visitor);
1083
1084 current += size;
1085 } 1097 }
1086 1098
1087 // Check semi-spaces. 1099 // Check semi-spaces.
1088 ASSERT_EQ(from_space_.id(), kFromSpace); 1100 ASSERT_EQ(from_space_.id(), kFromSpace);
1089 ASSERT_EQ(to_space_.id(), kToSpace); 1101 ASSERT_EQ(to_space_.id(), kToSpace);
1090 from_space_.Verify(); 1102 from_space_.Verify();
1091 to_space_.Verify(); 1103 to_space_.Verify();
1092 } 1104 }
1093 #endif 1105 #endif
1094 1106
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1184 } 1196 }
1185 1197
1186 1198
1187 void SemiSpace::TearDown() { 1199 void SemiSpace::TearDown() {
1188 start_ = NULL; 1200 start_ = NULL;
1189 capacity_ = 0; 1201 capacity_ = 0;
1190 } 1202 }
1191 1203
1192 1204
1193 bool SemiSpace::Grow() { 1205 bool SemiSpace::Grow() {
1194 return false; // TODO(gc): Temporary hack while semispaces are only one page.
1195 // Double the semispace size but only up to maximum capacity. 1206 // Double the semispace size but only up to maximum capacity.
1196 int maximum_extra = maximum_capacity_ - capacity_; 1207 ASSERT(static_cast<size_t>(Page::kPageSize) > OS::AllocateAlignment());
1197 int extra = Min(RoundUp(capacity_, static_cast<int>(OS::AllocateAlignment())), 1208 int new_capacity = Min(maximum_capacity_,
1198 maximum_extra); 1209 RoundUp(capacity_ * 2, static_cast<int>(Page::kPageSize)));
1199 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1210 return GrowTo(new_capacity);
1200 space_high(), extra, executable())) {
1201 return false;
1202 }
1203 capacity_ += extra;
1204 return true;
1205 } 1211 }
1206 1212
1207 1213
1208 bool SemiSpace::GrowTo(int new_capacity) { 1214 bool SemiSpace::GrowTo(int new_capacity) {
1209 return false; // TODO(gc): Temporary hack while semispaces are only one page.
1210 ASSERT(new_capacity <= maximum_capacity_); 1215 ASSERT(new_capacity <= maximum_capacity_);
1211 ASSERT(new_capacity > capacity_); 1216 ASSERT(new_capacity > capacity_);
1217 int pages_before = capacity_ / Page::kPageSize;
1212 size_t delta = new_capacity - capacity_; 1218 size_t delta = new_capacity - capacity_;
1213 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1219 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1214 if (!heap()->isolate()->memory_allocator()->CommitBlock( 1220 if (!heap()->isolate()->memory_allocator()->CommitBlock(
1215 space_high(), delta, executable())) { 1221 space_high(), delta, executable())) {
1216 return false; 1222 return false;
1217 } 1223 }
1218 capacity_ = new_capacity; 1224 capacity_ = new_capacity;
1225 int pages_after = capacity_ / Page::kPageSize;
1226 NewSpacePage* last_page = anchor()->prev_page();
1227 ASSERT(last_page != anchor());
1228 for (int i = pages_before; i < pages_after; i++) {
1229 Address page_address = start_ + i * Page::kPageSize;
1230 NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
1231 page_address,
1232 this);
1233 new_page->InsertAfter(last_page);
1234 new_page->markbits()->Clear();
1235 // Duplicate the flags that was set on the old page.
1236 new_page->SetFlags(last_page->GetFlags(),
1237 NewSpacePage::kCopyOnFlipFlagsMask);
1238 last_page = new_page;
1239 }
1219 return true; 1240 return true;
1220 } 1241 }
1221 1242
1222 1243
1223 bool SemiSpace::ShrinkTo(int new_capacity) { 1244 bool SemiSpace::ShrinkTo(int new_capacity) {
1224 return false; // TODO(gc): Temporary hack while semispaces are only one page.
1225 ASSERT(new_capacity >= initial_capacity_); 1245 ASSERT(new_capacity >= initial_capacity_);
1226 ASSERT(new_capacity < capacity_); 1246 ASSERT(new_capacity < capacity_);
1227 size_t delta = capacity_ - new_capacity; 1247 size_t delta = capacity_ - new_capacity;
1228 ASSERT(IsAligned(delta, OS::AllocateAlignment())); 1248 ASSERT(IsAligned(delta, OS::AllocateAlignment()));
1229 if (!heap()->isolate()->memory_allocator()->UncommitBlock( 1249 if (!heap()->isolate()->memory_allocator()->UncommitBlock(
1230 space_high() - delta, delta)) { 1250 space_high() - delta, delta)) {
1231 return false; 1251 return false;
1232 } 1252 }
1233 capacity_ = new_capacity; 1253 capacity_ = new_capacity;
1254
1255 int pages_after = capacity_ / Page::kPageSize;
1256 NewSpacePage* new_last_page =
1257 NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
1258 new_last_page->set_next_page(anchor());
1259 anchor()->set_next_page(new_last_page);
1260 ASSERT(current_page_ == first_page());
1261
1234 return true; 1262 return true;
1235 } 1263 }
1236 1264
1237 1265
1238 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) { 1266 void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
1239 anchor_.set_owner(this); 1267 anchor_.set_owner(this);
1240 // Fixup back-pointers to anchor. Address of anchor changes 1268 // Fixup back-pointers to anchor. Address of anchor changes
1241 // when we swap. 1269 // when we swap.
1242 anchor_.prev_page()->set_next_page(&anchor_); 1270 anchor_.prev_page()->set_next_page(&anchor_);
1243 anchor_.next_page()->set_prev_page(&anchor_); 1271 anchor_.next_page()->set_prev_page(&anchor_);
(...skipping 26 matching lines...) Expand all
1270 bool is_from_space = (id_ == kFromSpace); 1298 bool is_from_space = (id_ == kFromSpace);
1271 NewSpacePage* page = anchor_.next_page(); 1299 NewSpacePage* page = anchor_.next_page();
1272 CHECK(anchor_.semi_space() == this); 1300 CHECK(anchor_.semi_space() == this);
1273 while (page != &anchor_) { 1301 while (page != &anchor_) {
1274 CHECK(page->semi_space() == this); 1302 CHECK(page->semi_space() == this);
1275 CHECK(page->InNewSpace()); 1303 CHECK(page->InNewSpace());
1276 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE 1304 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
1277 : MemoryChunk::IN_TO_SPACE)); 1305 : MemoryChunk::IN_TO_SPACE));
1278 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE 1306 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
1279 : MemoryChunk::IN_FROM_SPACE)); 1307 : MemoryChunk::IN_FROM_SPACE));
1308 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
1309 if (!is_from_space) {
1310 // The pointers-from-here-are-interesting flag isn't updated dynamically
1311 // on from-space pages, so it might be out of sync with the marking state.
Erik Corry 2011/06/14 12:17:17 Would it be simpler to update it dynamically on fr
Lasse Reichstein 2011/06/14 14:13:03 Yes, but why move complexity from debug-only code
1312 if (page->heap()->incremental_marking()->IsMarking()) {
1313 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1314 } else {
1315 CHECK(!page->IsFlagSet(
1316 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
1317 }
1318 }
1319 CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
1280 CHECK(page->prev_page()->next_page() == page); 1320 CHECK(page->prev_page()->next_page() == page);
1281 page = page->next_page(); 1321 page = page->next_page();
1282 } 1322 }
1283 } 1323 }
1284 1324
1285 1325
1286 void SemiSpace::ValidateRange(Address start, Address end) { 1326 void SemiSpace::AssertValidRange(Address start, Address end) {
1287 // Addresses belong to same semi-space 1327 // Addresses belong to same semi-space
1288 NewSpacePage* page = NewSpacePage::FromAddress(start); 1328 NewSpacePage* page = NewSpacePage::FromAddress(start);
1289 NewSpacePage* end_page = NewSpacePage::FromLimit(end); 1329 NewSpacePage* end_page = NewSpacePage::FromLimit(end);
1290 SemiSpace* space = page->semi_space(); 1330 SemiSpace* space = page->semi_space();
1291 CHECK_EQ(space, end_page->semi_space()); 1331 CHECK_EQ(space, end_page->semi_space());
1292 // Start address is before end address, either on same page, 1332 // Start address is before end address, either on same page,
1293 // or end address is on a later page in the linked list of 1333 // or end address is on a later page in the linked list of
1294 // semi-space pages. 1334 // semi-space pages.
1295 if (page == end_page) { 1335 if (page == end_page) {
1296 CHECK(start <= end); 1336 CHECK(start <= end);
(...skipping 26 matching lines...) Expand all
1323 1363
1324 1364
1325 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) { 1365 SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
1326 Initialize(from, to, NULL); 1366 Initialize(from, to, NULL);
1327 } 1367 }
1328 1368
1329 1369
1330 void SemiSpaceIterator::Initialize(Address start, 1370 void SemiSpaceIterator::Initialize(Address start,
1331 Address end, 1371 Address end,
1332 HeapObjectCallback size_func) { 1372 HeapObjectCallback size_func) {
1333 #ifdef DEBUG 1373 SemiSpace::AssertValidRange(start, end);
1334 SemiSpace::ValidateRange(start, end);
1335 #endif
1336 NewSpacePage* page = NewSpacePage::FromAddress(start);
1337 current_ = start; 1374 current_ = start;
1338 limit_ = end; 1375 limit_ = end;
1339 current_page_limit_ = page->body_limit();
1340 size_func_ = size_func; 1376 size_func_ = size_func;
1341 } 1377 }
1342 1378
1343 1379
1344 #ifdef DEBUG 1380 #ifdef DEBUG
1345 // heap_histograms is shared, always clear it before using it. 1381 // heap_histograms is shared, always clear it before using it.
1346 static void ClearHistograms() { 1382 static void ClearHistograms() {
1347 Isolate* isolate = Isolate::Current(); 1383 Isolate* isolate = Isolate::Current();
1348 // We reset the name each time, though it hasn't changed. 1384 // We reset the name each time, though it hasn't changed.
1349 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name); 1385 #define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
(...skipping 1078 matching lines...) Expand 10 before | Expand all | Expand 10 after
2428 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2464 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2429 if (obj->IsCode()) { 2465 if (obj->IsCode()) {
2430 Code* code = Code::cast(obj); 2466 Code* code = Code::cast(obj);
2431 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2467 isolate->code_kind_statistics()[code->kind()] += code->Size();
2432 } 2468 }
2433 } 2469 }
2434 } 2470 }
2435 #endif // DEBUG 2471 #endif // DEBUG
2436 2472
2437 } } // namespace v8::internal 2473 } } // namespace v8::internal
OLDNEW
« src/spaces.h ('K') | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698