Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(596)

Side by Side Diff: src/spaces.cc

Issue 40083002: Make top and limit field in AllocationInfo private, assert on non-aligned setting of these fields, … (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 942 matching lines...) Expand 10 before | Expand all | Expand 10 after
953 if (id == CODE_SPACE) { 953 if (id == CODE_SPACE) {
954 area_size_ = heap->isolate()->memory_allocator()-> 954 area_size_ = heap->isolate()->memory_allocator()->
955 CodePageAreaSize(); 955 CodePageAreaSize();
956 } else { 956 } else {
957 area_size_ = Page::kPageSize - Page::kObjectStartOffset; 957 area_size_ = Page::kPageSize - Page::kObjectStartOffset;
958 } 958 }
959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) 959 max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
960 * AreaSize(); 960 * AreaSize();
961 accounting_stats_.Clear(); 961 accounting_stats_.Clear();
962 962
963 allocation_info_.top = NULL; 963 allocation_info_.set_top(NULL);
964 allocation_info_.limit = NULL; 964 allocation_info_.set_limit(NULL);
965 965
966 anchor_.InitializeAsAnchor(this); 966 anchor_.InitializeAsAnchor(this);
967 } 967 }
968 968
969 969
970 bool PagedSpace::SetUp() { 970 bool PagedSpace::SetUp() {
971 return true; 971 return true;
972 } 972 }
973 973
974 974
975 bool PagedSpace::HasBeenSetUp() { 975 bool PagedSpace::HasBeenSetUp() {
976 return true; 976 return true;
977 } 977 }
978 978
979 979
980 void PagedSpace::TearDown() { 980 void PagedSpace::TearDown() {
981 PageIterator iterator(this); 981 PageIterator iterator(this);
982 while (iterator.has_next()) { 982 while (iterator.has_next()) {
983 heap()->isolate()->memory_allocator()->Free(iterator.next()); 983 heap()->isolate()->memory_allocator()->Free(iterator.next());
984 } 984 }
985 anchor_.set_next_page(&anchor_); 985 anchor_.set_next_page(&anchor_);
986 anchor_.set_prev_page(&anchor_); 986 anchor_.set_prev_page(&anchor_);
987 accounting_stats_.Clear(); 987 accounting_stats_.Clear();
988 } 988 }
989 989
990 990
991 size_t PagedSpace::CommittedPhysicalMemory() { 991 size_t PagedSpace::CommittedPhysicalMemory() {
992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); 992 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); 993 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
994 size_t size = 0; 994 size_t size = 0;
995 PageIterator it(this); 995 PageIterator it(this);
996 while (it.has_next()) { 996 while (it.has_next()) {
997 size += it.next()->CommittedPhysicalMemory(); 997 size += it.next()->CommittedPhysicalMemory();
998 } 998 }
999 return size; 999 return size;
1000 } 1000 }
1001 1001
1002 1002
1003 MaybeObject* PagedSpace::FindObject(Address addr) { 1003 MaybeObject* PagedSpace::FindObject(Address addr) {
(...skipping 131 matching lines...) Expand 10 before | Expand all | Expand 10 after
1135 } 1135 }
1136 1136
1137 if (page->WasSwept()) { 1137 if (page->WasSwept()) {
1138 intptr_t size = free_list_.EvictFreeListItems(page); 1138 intptr_t size = free_list_.EvictFreeListItems(page);
1139 accounting_stats_.AllocateBytes(size); 1139 accounting_stats_.AllocateBytes(size);
1140 ASSERT_EQ(AreaSize(), static_cast<int>(size)); 1140 ASSERT_EQ(AreaSize(), static_cast<int>(size));
1141 } else { 1141 } else {
1142 DecreaseUnsweptFreeBytes(page); 1142 DecreaseUnsweptFreeBytes(page);
1143 } 1143 }
1144 1144
1145 if (Page::FromAllocationTop(allocation_info_.top) == page) { 1145 if (Page::FromAllocationTop(allocation_info_.top()) == page) {
1146 allocation_info_.top = allocation_info_.limit = NULL; 1146 allocation_info_.set_top(NULL);
1147 allocation_info_.set_limit(NULL);
1147 } 1148 }
1148 1149
1149 if (unlink) { 1150 if (unlink) {
1150 page->Unlink(); 1151 page->Unlink();
1151 } 1152 }
1152 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) { 1153 if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
1153 heap()->isolate()->memory_allocator()->Free(page); 1154 heap()->isolate()->memory_allocator()->Free(page);
1154 } else { 1155 } else {
1155 heap()->QueueMemoryChunkForFree(page); 1156 heap()->QueueMemoryChunkForFree(page);
1156 } 1157 }
1157 1158
1158 ASSERT(Capacity() > 0); 1159 ASSERT(Capacity() > 0);
1159 accounting_stats_.ShrinkSpace(AreaSize()); 1160 accounting_stats_.ShrinkSpace(AreaSize());
1160 } 1161 }
1161 1162
1162 1163
1163 #ifdef DEBUG 1164 #ifdef DEBUG
1164 void PagedSpace::Print() { } 1165 void PagedSpace::Print() { }
1165 #endif 1166 #endif
1166 1167
1167 #ifdef VERIFY_HEAP 1168 #ifdef VERIFY_HEAP
1168 void PagedSpace::Verify(ObjectVisitor* visitor) { 1169 void PagedSpace::Verify(ObjectVisitor* visitor) {
1169 // We can only iterate over the pages if they were swept precisely. 1170 // We can only iterate over the pages if they were swept precisely.
1170 if (was_swept_conservatively_) return; 1171 if (was_swept_conservatively_) return;
1171 1172
1172 bool allocation_pointer_found_in_space = 1173 bool allocation_pointer_found_in_space =
1173 (allocation_info_.top == allocation_info_.limit); 1174 (allocation_info_.top() == allocation_info_.limit());
1174 PageIterator page_iterator(this); 1175 PageIterator page_iterator(this);
1175 while (page_iterator.has_next()) { 1176 while (page_iterator.has_next()) {
1176 Page* page = page_iterator.next(); 1177 Page* page = page_iterator.next();
1177 CHECK(page->owner() == this); 1178 CHECK(page->owner() == this);
1178 if (page == Page::FromAllocationTop(allocation_info_.top)) { 1179 if (page == Page::FromAllocationTop(allocation_info_.top())) {
1179 allocation_pointer_found_in_space = true; 1180 allocation_pointer_found_in_space = true;
1180 } 1181 }
1181 CHECK(page->WasSweptPrecisely()); 1182 CHECK(page->WasSweptPrecisely());
1182 HeapObjectIterator it(page, NULL); 1183 HeapObjectIterator it(page, NULL);
1183 Address end_of_previous_object = page->area_start(); 1184 Address end_of_previous_object = page->area_start();
1184 Address top = page->area_end(); 1185 Address top = page->area_end();
1185 int black_size = 0; 1186 int black_size = 0;
1186 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) { 1187 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
1187 CHECK(end_of_previous_object <= object->address()); 1188 CHECK(end_of_previous_object <= object->address());
1188 1189
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
1279 if (allocated_histogram_) { 1280 if (allocated_histogram_) {
1280 DeleteArray(allocated_histogram_); 1281 DeleteArray(allocated_histogram_);
1281 allocated_histogram_ = NULL; 1282 allocated_histogram_ = NULL;
1282 } 1283 }
1283 if (promoted_histogram_) { 1284 if (promoted_histogram_) {
1284 DeleteArray(promoted_histogram_); 1285 DeleteArray(promoted_histogram_);
1285 promoted_histogram_ = NULL; 1286 promoted_histogram_ = NULL;
1286 } 1287 }
1287 1288
1288 start_ = NULL; 1289 start_ = NULL;
1289 allocation_info_.top = NULL; 1290 allocation_info_.set_top(NULL);
1290 allocation_info_.limit = NULL; 1291 allocation_info_.set_limit(NULL);
1291 1292
1292 to_space_.TearDown(); 1293 to_space_.TearDown();
1293 from_space_.TearDown(); 1294 from_space_.TearDown();
1294 1295
1295 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 1296 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
1296 1297
1297 ASSERT(reservation_.IsReserved()); 1298 ASSERT(reservation_.IsReserved());
1298 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_, 1299 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
1299 NOT_EXECUTABLE); 1300 NOT_EXECUTABLE);
1300 chunk_base_ = NULL; 1301 chunk_base_ = NULL;
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1337 if (!from_space_.ShrinkTo(rounded_new_capacity)) { 1338 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
1338 // If we managed to shrink to-space but couldn't shrink from 1339 // If we managed to shrink to-space but couldn't shrink from
1339 // space, attempt to grow to-space again. 1340 // space, attempt to grow to-space again.
1340 if (!to_space_.GrowTo(from_space_.Capacity())) { 1341 if (!to_space_.GrowTo(from_space_.Capacity())) {
1341 // We are in an inconsistent state because we could not 1342 // We are in an inconsistent state because we could not
1342 // commit/uncommit memory from new space. 1343 // commit/uncommit memory from new space.
1343 V8::FatalProcessOutOfMemory("Failed to shrink new space."); 1344 V8::FatalProcessOutOfMemory("Failed to shrink new space.");
1344 } 1345 }
1345 } 1346 }
1346 } 1347 }
1347 allocation_info_.limit = to_space_.page_high(); 1348 allocation_info_.set_limit(to_space_.page_high());
1348 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1349 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1349 } 1350 }
1350 1351
1351 1352
1352 void NewSpace::UpdateAllocationInfo() { 1353 void NewSpace::UpdateAllocationInfo() {
1353 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); 1354 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1354 allocation_info_.top = to_space_.page_low(); 1355 allocation_info_.set_top(to_space_.page_low());
1355 allocation_info_.limit = to_space_.page_high(); 1356 allocation_info_.set_limit(to_space_.page_high());
1356 1357
1357 // Lower limit during incremental marking. 1358 // Lower limit during incremental marking.
1358 if (heap()->incremental_marking()->IsMarking() && 1359 if (heap()->incremental_marking()->IsMarking() &&
1359 inline_allocation_limit_step() != 0) { 1360 inline_allocation_limit_step() != 0) {
1360 Address new_limit = 1361 Address new_limit =
1361 allocation_info_.top + inline_allocation_limit_step(); 1362 allocation_info_.top() + inline_allocation_limit_step();
1362 allocation_info_.limit = Min(new_limit, allocation_info_.limit); 1363 allocation_info_.set_limit(Min(new_limit, allocation_info_.limit()));
1363 } 1364 }
1364 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 1365 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
1365 } 1366 }
1366 1367
1367 1368
1368 void NewSpace::ResetAllocationInfo() { 1369 void NewSpace::ResetAllocationInfo() {
1369 to_space_.Reset(); 1370 to_space_.Reset();
1370 UpdateAllocationInfo(); 1371 UpdateAllocationInfo();
1371 pages_used_ = 0; 1372 pages_used_ = 0;
1372 // Clear all mark-bits in the to-space. 1373 // Clear all mark-bits in the to-space.
1373 NewSpacePageIterator it(&to_space_); 1374 NewSpacePageIterator it(&to_space_);
1374 while (it.has_next()) { 1375 while (it.has_next()) {
1375 Bitmap::Clear(it.next()); 1376 Bitmap::Clear(it.next());
1376 } 1377 }
1377 } 1378 }
1378 1379
1379 1380
1380 bool NewSpace::AddFreshPage() { 1381 bool NewSpace::AddFreshPage() {
1381 Address top = allocation_info_.top; 1382 Address top = allocation_info_.top();
1382 if (NewSpacePage::IsAtStart(top)) { 1383 if (NewSpacePage::IsAtStart(top)) {
1383 // The current page is already empty. Don't try to make another. 1384 // The current page is already empty. Don't try to make another.
1384 1385
1385 // We should only get here if someone asks to allocate more 1386 // We should only get here if someone asks to allocate more
1386 // than what can be stored in a single page. 1387 // than what can be stored in a single page.
1387 // TODO(gc): Change the limit on new-space allocation to prevent this 1388 // TODO(gc): Change the limit on new-space allocation to prevent this
1388 // from happening (all such allocations should go directly to LOSpace). 1389 // from happening (all such allocations should go directly to LOSpace).
1389 return false; 1390 return false;
1390 } 1391 }
1391 if (!to_space_.AdvancePage()) { 1392 if (!to_space_.AdvancePage()) {
(...skipping 11 matching lines...) Expand all
1403 int remaining_in_page = static_cast<int>(limit - top); 1404 int remaining_in_page = static_cast<int>(limit - top);
1404 heap()->CreateFillerObjectAt(top, remaining_in_page); 1405 heap()->CreateFillerObjectAt(top, remaining_in_page);
1405 pages_used_++; 1406 pages_used_++;
1406 UpdateAllocationInfo(); 1407 UpdateAllocationInfo();
1407 1408
1408 return true; 1409 return true;
1409 } 1410 }
1410 1411
1411 1412
1412 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { 1413 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
1413 Address old_top = allocation_info_.top; 1414 Address old_top = allocation_info_.top();
1414 Address new_top = old_top + size_in_bytes; 1415 Address new_top = old_top + size_in_bytes;
1415 Address high = to_space_.page_high(); 1416 Address high = to_space_.page_high();
1416 if (allocation_info_.limit < high) { 1417 if (allocation_info_.limit() < high) {
1417 // Incremental marking has lowered the limit to get a 1418 // Incremental marking has lowered the limit to get a
1418 // chance to do a step. 1419 // chance to do a step.
1419 allocation_info_.limit = Min( 1420 Address new_limit = Min(
1420 allocation_info_.limit + inline_allocation_limit_step_, 1421 allocation_info_.limit() + inline_allocation_limit_step_,
1421 high); 1422 high);
1423 allocation_info_.set_limit(new_limit);
1422 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); 1424 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
1423 heap()->incremental_marking()->Step( 1425 heap()->incremental_marking()->Step(
1424 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); 1426 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
1425 top_on_previous_step_ = new_top; 1427 top_on_previous_step_ = new_top;
1426 return AllocateRaw(size_in_bytes); 1428 return AllocateRaw(size_in_bytes);
1427 } else if (AddFreshPage()) { 1429 } else if (AddFreshPage()) {
1428 // Switched to new page. Try allocating again. 1430 // Switched to new page. Try allocating again.
1429 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); 1431 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
1430 heap()->incremental_marking()->Step( 1432 heap()->incremental_marking()->Step(
1431 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); 1433 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
(...skipping 534 matching lines...) Expand 10 before | Expand all | Expand 10 after
1966 void NewSpace::RecordPromotion(HeapObject* obj) { 1968 void NewSpace::RecordPromotion(HeapObject* obj) {
1967 InstanceType type = obj->map()->instance_type(); 1969 InstanceType type = obj->map()->instance_type();
1968 ASSERT(0 <= type && type <= LAST_TYPE); 1970 ASSERT(0 <= type && type <= LAST_TYPE);
1969 promoted_histogram_[type].increment_number(1); 1971 promoted_histogram_[type].increment_number(1);
1970 promoted_histogram_[type].increment_bytes(obj->Size()); 1972 promoted_histogram_[type].increment_bytes(obj->Size());
1971 } 1973 }
1972 1974
1973 1975
1974 size_t NewSpace::CommittedPhysicalMemory() { 1976 size_t NewSpace::CommittedPhysicalMemory() {
1975 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory(); 1977 if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
1976 MemoryChunk::UpdateHighWaterMark(allocation_info_.top); 1978 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1977 size_t size = to_space_.CommittedPhysicalMemory(); 1979 size_t size = to_space_.CommittedPhysicalMemory();
1978 if (from_space_.is_committed()) { 1980 if (from_space_.is_committed()) {
1979 size += from_space_.CommittedPhysicalMemory(); 1981 size += from_space_.CommittedPhysicalMemory();
1980 } 1982 }
1981 return size; 1983 return size;
1982 } 1984 }
1983 1985
1984 1986
1985 // ----------------------------------------------------------------------------- 1987 // -----------------------------------------------------------------------------
1986 // Free lists for old object spaces implementation 1988 // Free lists for old object spaces implementation
(...skipping 505 matching lines...) Expand 10 before | Expand all | Expand 10 after
2492 // space than the minimum NewSpace size. The limit can be set lower than 2494 // space than the minimum NewSpace size. The limit can be set lower than
2493 // the end of new space either because there is more space on the next page 2495 // the end of new space either because there is more space on the next page
2494 // or because we have lowered the limit in order to get periodic incremental 2496 // or because we have lowered the limit in order to get periodic incremental
2495 // marking. The most reliable way to ensure that there is linear space is 2497 // marking. The most reliable way to ensure that there is linear space is
2496 // to do the allocation, then rewind the limit. 2498 // to do the allocation, then rewind the limit.
2497 ASSERT(bytes <= InitialCapacity()); 2499 ASSERT(bytes <= InitialCapacity());
2498 MaybeObject* maybe = AllocateRaw(bytes); 2500 MaybeObject* maybe = AllocateRaw(bytes);
2499 Object* object = NULL; 2501 Object* object = NULL;
2500 if (!maybe->ToObject(&object)) return false; 2502 if (!maybe->ToObject(&object)) return false;
2501 HeapObject* allocation = HeapObject::cast(object); 2503 HeapObject* allocation = HeapObject::cast(object);
2502 Address top = allocation_info_.top; 2504 Address top = allocation_info_.top();
2503 if ((top - bytes) == allocation->address()) { 2505 if ((top - bytes) == allocation->address()) {
2504 allocation_info_.top = allocation->address(); 2506 allocation_info_.set_top(allocation->address());
2505 return true; 2507 return true;
2506 } 2508 }
2507 // There may be a borderline case here where the allocation succeeded, but 2509 // There may be a borderline case here where the allocation succeeded, but
2508 // the limit and top have moved on to a new page. In that case we try again. 2510 // the limit and top have moved on to a new page. In that case we try again.
2509 return ReserveSpace(bytes); 2511 return ReserveSpace(bytes);
2510 } 2512 }
2511 2513
2512 2514
2513 void PagedSpace::PrepareForMarkCompact() { 2515 void PagedSpace::PrepareForMarkCompact() {
2514 // We don't have a linear allocation area while sweeping. It will be restored 2516 // We don't have a linear allocation area while sweeping. It will be restored
(...skipping 25 matching lines...) Expand all
2540 unswept_free_bytes_ = 0; 2542 unswept_free_bytes_ = 0;
2541 2543
2542 // Clear the free list before a full GC---it will be rebuilt afterward. 2544 // Clear the free list before a full GC---it will be rebuilt afterward.
2543 free_list_.Reset(); 2545 free_list_.Reset();
2544 } 2546 }
2545 2547
2546 2548
2547 bool PagedSpace::ReserveSpace(int size_in_bytes) { 2549 bool PagedSpace::ReserveSpace(int size_in_bytes) {
2548 ASSERT(size_in_bytes <= AreaSize()); 2550 ASSERT(size_in_bytes <= AreaSize());
2549 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); 2551 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes));
2550 Address current_top = allocation_info_.top; 2552 Address current_top = allocation_info_.top();
2551 Address new_top = current_top + size_in_bytes; 2553 Address new_top = current_top + size_in_bytes;
2552 if (new_top <= allocation_info_.limit) return true; 2554 if (new_top <= allocation_info_.limit()) return true;
2553 2555
2554 HeapObject* new_area = free_list_.Allocate(size_in_bytes); 2556 HeapObject* new_area = free_list_.Allocate(size_in_bytes);
2555 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); 2557 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes);
2556 if (new_area == NULL) return false; 2558 if (new_area == NULL) return false;
2557 2559
2558 int old_linear_size = static_cast<int>(limit() - top()); 2560 int old_linear_size = static_cast<int>(limit() - top());
2559 // Mark the old linear allocation area with a free space so it can be 2561 // Mark the old linear allocation area with a free space so it can be
2560 // skipped when scanning the heap. This also puts it back in the free list 2562 // skipped when scanning the heap. This also puts it back in the free list
2561 // if it is big enough. 2563 // if it is big enough.
2562 Free(top(), old_linear_size); 2564 Free(top(), old_linear_size);
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
2617 first_unswept_page_ = p; 2619 first_unswept_page_ = p;
2618 } 2620 }
2619 2621
2620 heap()->FreeQueuedChunks(); 2622 heap()->FreeQueuedChunks();
2621 2623
2622 return IsLazySweepingComplete(); 2624 return IsLazySweepingComplete();
2623 } 2625 }
2624 2626
2625 2627
2626 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() { 2628 void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
2627 if (allocation_info_.top >= allocation_info_.limit) return; 2629 if (allocation_info_.top() >= allocation_info_.limit()) return;
2628 2630
2629 if (Page::FromAllocationTop(allocation_info_.top)->IsEvacuationCandidate()) { 2631 if (Page::FromAllocationTop(allocation_info_.top())->
2632 IsEvacuationCandidate()) {
2630 // Create filler object to keep page iterable if it was iterable. 2633 // Create filler object to keep page iterable if it was iterable.
2631 int remaining = 2634 int remaining =
2632 static_cast<int>(allocation_info_.limit - allocation_info_.top); 2635 static_cast<int>(allocation_info_.limit() - allocation_info_.top());
2633 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); 2636 heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
2634 2637
2635 allocation_info_.top = NULL; 2638 allocation_info_.set_top(NULL);
2636 allocation_info_.limit = NULL; 2639 allocation_info_.set_limit(NULL);
2637 } 2640 }
2638 } 2641 }
2639 2642
2640 2643
2641 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { 2644 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) {
2642 MarkCompactCollector* collector = heap()->mark_compact_collector(); 2645 MarkCompactCollector* collector = heap()->mark_compact_collector();
2643 if (collector->AreSweeperThreadsActivated()) { 2646 if (collector->AreSweeperThreadsActivated()) {
2644 if (collector->IsConcurrentSweepingInProgress()) { 2647 if (collector->IsConcurrentSweepingInProgress()) {
2645 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { 2648 if (collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) {
2646 if (!collector->sequential_sweeping()) { 2649 if (!collector->sequential_sweeping()) {
(...skipping 573 matching lines...) Expand 10 before | Expand all | Expand 10 after
3220 object->ShortPrint(); 3223 object->ShortPrint();
3221 PrintF("\n"); 3224 PrintF("\n");
3222 } 3225 }
3223 printf(" --------------------------------------\n"); 3226 printf(" --------------------------------------\n");
3224 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3227 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3225 } 3228 }
3226 3229
3227 #endif // DEBUG 3230 #endif // DEBUG
3228 3231
3229 } } // namespace v8::internal 3232 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698