| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 1332 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1343 if (!from_space_.ShrinkTo(rounded_new_capacity)) { | 1343 if (!from_space_.ShrinkTo(rounded_new_capacity)) { |
| 1344 // If we managed to shrink to-space but couldn't shrink from | 1344 // If we managed to shrink to-space but couldn't shrink from |
| 1345 // space, attempt to grow to-space again. | 1345 // space, attempt to grow to-space again. |
| 1346 if (!to_space_.GrowTo(from_space_.Capacity())) { | 1346 if (!to_space_.GrowTo(from_space_.Capacity())) { |
| 1347 // We are in an inconsistent state because we could not | 1347 // We are in an inconsistent state because we could not |
| 1348 // commit/uncommit memory from new space. | 1348 // commit/uncommit memory from new space. |
| 1349 V8::FatalProcessOutOfMemory("Failed to shrink new space."); | 1349 V8::FatalProcessOutOfMemory("Failed to shrink new space."); |
| 1350 } | 1350 } |
| 1351 } | 1351 } |
| 1352 } | 1352 } |
| 1353 allocation_info_.set_limit(to_space_.page_high()); | |
| 1354 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1353 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1355 } | 1354 } |
| 1356 | 1355 |
| 1357 | 1356 |
| 1358 void NewSpace::UpdateAllocationInfo() { | 1357 void NewSpace::UpdateAllocationInfo() { |
| 1359 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | 1358 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); |
| 1360 allocation_info_.set_top(to_space_.page_low()); | 1359 allocation_info_.set_top(to_space_.page_low()); |
| 1361 allocation_info_.set_limit(to_space_.page_high()); | 1360 allocation_info_.set_limit(to_space_.page_high()); |
| 1362 | 1361 UpdateInlineAllocationLimit(0); |
| 1363 // Lower limit during incremental marking. | |
| 1364 if (heap()->incremental_marking()->IsMarking() && | |
| 1365 inline_allocation_limit_step() != 0) { | |
| 1366 Address new_limit = | |
| 1367 allocation_info_.top() + inline_allocation_limit_step(); | |
| 1368 allocation_info_.set_limit(Min(new_limit, allocation_info_.limit())); | |
| 1369 } | |
| 1370 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); | 1362 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1371 } | 1363 } |
| 1372 | 1364 |
| 1373 | 1365 |
| 1374 void NewSpace::ResetAllocationInfo() { | 1366 void NewSpace::ResetAllocationInfo() { |
| 1375 to_space_.Reset(); | 1367 to_space_.Reset(); |
| 1376 UpdateAllocationInfo(); | 1368 UpdateAllocationInfo(); |
| 1377 pages_used_ = 0; | 1369 pages_used_ = 0; |
| 1378 // Clear all mark-bits in the to-space. | 1370 // Clear all mark-bits in the to-space. |
| 1379 NewSpacePageIterator it(&to_space_); | 1371 NewSpacePageIterator it(&to_space_); |
| 1380 while (it.has_next()) { | 1372 while (it.has_next()) { |
| 1381 Bitmap::Clear(it.next()); | 1373 Bitmap::Clear(it.next()); |
| 1382 } | 1374 } |
| 1383 } | 1375 } |
| 1384 | 1376 |
| 1385 | 1377 |
| 1378 void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) { |
| 1379 if (heap()->inline_allocation_disabled()) { |
| 1380 // Lowest limit when linear allocation was disabled. |
| 1381 Address high = to_space_.page_high(); |
| 1382 Address new_top = allocation_info_.top() + size_in_bytes; |
| 1383 allocation_info_.set_limit(Min(new_top, high)); |
| 1384 } else if (inline_allocation_limit_step() == 0) { |
| 1385 // Normal limit is the end of the current page. |
| 1386 allocation_info_.set_limit(to_space_.page_high()); |
| 1387 } else { |
| 1388 // Lower limit during incremental marking. |
| 1389 Address high = to_space_.page_high(); |
| 1390 Address new_top = allocation_info_.top() + size_in_bytes; |
| 1391 Address new_limit = new_top + inline_allocation_limit_step_; |
| 1392 allocation_info_.set_limit(Min(new_limit, high)); |
| 1393 } |
| 1394 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); |
| 1395 } |
| 1396 |
| 1397 |
| 1386 bool NewSpace::AddFreshPage() { | 1398 bool NewSpace::AddFreshPage() { |
| 1387 Address top = allocation_info_.top(); | 1399 Address top = allocation_info_.top(); |
| 1388 if (NewSpacePage::IsAtStart(top)) { | 1400 if (NewSpacePage::IsAtStart(top)) { |
| 1389 // The current page is already empty. Don't try to make another. | 1401 // The current page is already empty. Don't try to make another. |
| 1390 | 1402 |
| 1391 // We should only get here if someone asks to allocate more | 1403 // We should only get here if someone asks to allocate more |
| 1392 // than what can be stored in a single page. | 1404 // than what can be stored in a single page. |
| 1393 // TODO(gc): Change the limit on new-space allocation to prevent this | 1405 // TODO(gc): Change the limit on new-space allocation to prevent this |
| 1394 // from happening (all such allocations should go directly to LOSpace). | 1406 // from happening (all such allocations should go directly to LOSpace). |
| 1395 return false; | 1407 return false; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1410 heap()->CreateFillerObjectAt(top, remaining_in_page); | 1422 heap()->CreateFillerObjectAt(top, remaining_in_page); |
| 1411 pages_used_++; | 1423 pages_used_++; |
| 1412 UpdateAllocationInfo(); | 1424 UpdateAllocationInfo(); |
| 1413 | 1425 |
| 1414 return true; | 1426 return true; |
| 1415 } | 1427 } |
| 1416 | 1428 |
| 1417 | 1429 |
| 1418 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { | 1430 MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { |
| 1419 Address old_top = allocation_info_.top(); | 1431 Address old_top = allocation_info_.top(); |
| 1420 Address new_top = old_top + size_in_bytes; | |
| 1421 Address high = to_space_.page_high(); | 1432 Address high = to_space_.page_high(); |
| 1422 if (allocation_info_.limit() < high) { | 1433 if (allocation_info_.limit() < high) { |
| 1423 // Incremental marking has lowered the limit to get a | 1434 // Either the limit has been lowered because linear allocation was disabled |
| 1424 // chance to do a step. | 1435 // or because incremental marking wants to get a chance to do a step. Set |
| 1425 Address new_limit = Min( | 1436 // the new limit accordingly. |
| 1426 allocation_info_.limit() + inline_allocation_limit_step_, | 1437 Address new_top = old_top + size_in_bytes; |
| 1427 high); | |
| 1428 allocation_info_.set_limit(new_limit); | |
| 1429 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); | 1438 int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_); |
| 1430 heap()->incremental_marking()->Step( | 1439 heap()->incremental_marking()->Step( |
| 1431 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1440 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1441 UpdateInlineAllocationLimit(size_in_bytes); |
| 1432 top_on_previous_step_ = new_top; | 1442 top_on_previous_step_ = new_top; |
| 1433 return AllocateRaw(size_in_bytes); | 1443 return AllocateRaw(size_in_bytes); |
| 1434 } else if (AddFreshPage()) { | 1444 } else if (AddFreshPage()) { |
| 1435 // Switched to new page. Try allocating again. | 1445 // Switched to new page. Try allocating again. |
| 1436 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); | 1446 int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_); |
| 1437 heap()->incremental_marking()->Step( | 1447 heap()->incremental_marking()->Step( |
| 1438 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); | 1448 bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); |
| 1439 top_on_previous_step_ = to_space_.page_low(); | 1449 top_on_previous_step_ = to_space_.page_low(); |
| 1440 return AllocateRaw(size_in_bytes); | 1450 return AllocateRaw(size_in_bytes); |
| 1441 } else { | 1451 } else { |
| (...skipping 925 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2367 // skipped when scanning the heap. This also puts it back in the free list | 2377 // skipped when scanning the heap. This also puts it back in the free list |
| 2368 // if it is big enough. | 2378 // if it is big enough. |
| 2369 owner_->Free(owner_->top(), old_linear_size); | 2379 owner_->Free(owner_->top(), old_linear_size); |
| 2370 | 2380 |
| 2371 owner_->heap()->incremental_marking()->OldSpaceStep( | 2381 owner_->heap()->incremental_marking()->OldSpaceStep( |
| 2372 size_in_bytes - old_linear_size); | 2382 size_in_bytes - old_linear_size); |
| 2373 | 2383 |
| 2374 int new_node_size = 0; | 2384 int new_node_size = 0; |
| 2375 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); | 2385 FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size); |
| 2376 if (new_node == NULL) { | 2386 if (new_node == NULL) { |
| 2377 owner_->SetTop(NULL, NULL); | 2387 owner_->SetTopAndLimit(NULL, NULL); |
| 2378 return NULL; | 2388 return NULL; |
| 2379 } | 2389 } |
| 2380 | 2390 |
| 2381 int bytes_left = new_node_size - size_in_bytes; | 2391 int bytes_left = new_node_size - size_in_bytes; |
| 2382 ASSERT(bytes_left >= 0); | 2392 ASSERT(bytes_left >= 0); |
| 2383 | 2393 |
| 2384 #ifdef DEBUG | 2394 #ifdef DEBUG |
| 2385 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { | 2395 for (int i = 0; i < size_in_bytes / kPointerSize; i++) { |
| 2386 reinterpret_cast<Object**>(new_node->address())[i] = | 2396 reinterpret_cast<Object**>(new_node->address())[i] = |
| 2387 Smi::FromInt(kCodeZapValue); | 2397 Smi::FromInt(kCodeZapValue); |
| 2388 } | 2398 } |
| 2389 #endif | 2399 #endif |
| 2390 | 2400 |
| 2391 // The old-space-step might have finished sweeping and restarted marking. | 2401 // The old-space-step might have finished sweeping and restarted marking. |
| 2392 // Verify that it did not turn the page of the new node into an evacuation | 2402 // Verify that it did not turn the page of the new node into an evacuation |
| 2393 // candidate. | 2403 // candidate. |
| 2394 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); | 2404 ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node)); |
| 2395 | 2405 |
| 2396 const int kThreshold = IncrementalMarking::kAllocatedThreshold; | 2406 const int kThreshold = IncrementalMarking::kAllocatedThreshold; |
| 2397 | 2407 |
| 2398 // Memory in the linear allocation area is counted as allocated. We may free | 2408 // Memory in the linear allocation area is counted as allocated. We may free |
| 2399 // a little of this again immediately - see below. | 2409 // a little of this again immediately - see below. |
| 2400 owner_->Allocate(new_node_size); | 2410 owner_->Allocate(new_node_size); |
| 2401 | 2411 |
| 2402 if (bytes_left > kThreshold && | 2412 if (owner_->heap()->inline_allocation_disabled()) { |
| 2403 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && | 2413 // Keep the linear allocation area empty if requested to do so, just |
| 2404 FLAG_incremental_marking_steps) { | 2414 // return area back to the free list instead. |
| 2415 owner_->Free(new_node->address() + size_in_bytes, bytes_left); |
| 2416 ASSERT(owner_->top() == NULL && owner_->limit() == NULL); |
| 2417 } else if (bytes_left > kThreshold && |
| 2418 owner_->heap()->incremental_marking()->IsMarkingIncomplete() && |
| 2419 FLAG_incremental_marking_steps) { |
| 2405 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); | 2420 int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold); |
| 2406 // We don't want to give too large linear areas to the allocator while | 2421 // We don't want to give too large linear areas to the allocator while |
| 2407 // incremental marking is going on, because we won't check again whether | 2422 // incremental marking is going on, because we won't check again whether |
| 2408 // we want to do another increment until the linear area is used up. | 2423 // we want to do another increment until the linear area is used up. |
| 2409 owner_->Free(new_node->address() + size_in_bytes + linear_size, | 2424 owner_->Free(new_node->address() + size_in_bytes + linear_size, |
| 2410 new_node_size - size_in_bytes - linear_size); | 2425 new_node_size - size_in_bytes - linear_size); |
| 2411 owner_->SetTop(new_node->address() + size_in_bytes, | 2426 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2412 new_node->address() + size_in_bytes + linear_size); | 2427 new_node->address() + size_in_bytes + linear_size); |
| 2413 } else if (bytes_left > 0) { | 2428 } else if (bytes_left > 0) { |
| 2414 // Normally we give the rest of the node to the allocator as its new | 2429 // Normally we give the rest of the node to the allocator as its new |
| 2415 // linear allocation area. | 2430 // linear allocation area. |
| 2416 owner_->SetTop(new_node->address() + size_in_bytes, | 2431 owner_->SetTopAndLimit(new_node->address() + size_in_bytes, |
| 2417 new_node->address() + new_node_size); | 2432 new_node->address() + new_node_size); |
| 2418 } else { | 2433 } else { |
| 2419 // TODO(gc) Try not freeing linear allocation region when bytes_left | 2434 // TODO(gc) Try not freeing linear allocation region when bytes_left |
| 2420 // are zero. | 2435 // are zero. |
| 2421 owner_->SetTop(NULL, NULL); | 2436 owner_->SetTopAndLimit(NULL, NULL); |
| 2422 } | 2437 } |
| 2423 | 2438 |
| 2424 return new_node; | 2439 return new_node; |
| 2425 } | 2440 } |
| 2426 | 2441 |
| 2427 | 2442 |
| 2428 intptr_t FreeList::EvictFreeListItems(Page* p) { | 2443 intptr_t FreeList::EvictFreeListItems(Page* p) { |
| 2429 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); | 2444 intptr_t sum = huge_list_.EvictFreeListItemsInList(p); |
| 2430 p->set_available_in_huge_free_list(0); | 2445 p->set_available_in_huge_free_list(0); |
| 2431 | 2446 |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2500 } | 2515 } |
| 2501 #endif | 2516 #endif |
| 2502 | 2517 |
| 2503 | 2518 |
| 2504 // ----------------------------------------------------------------------------- | 2519 // ----------------------------------------------------------------------------- |
| 2505 // OldSpace implementation | 2520 // OldSpace implementation |
| 2506 | 2521 |
| 2507 void PagedSpace::PrepareForMarkCompact() { | 2522 void PagedSpace::PrepareForMarkCompact() { |
| 2508 // We don't have a linear allocation area while sweeping. It will be restored | 2523 // We don't have a linear allocation area while sweeping. It will be restored |
| 2509 // on the first allocation after the sweep. | 2524 // on the first allocation after the sweep. |
| 2510 // Mark the old linear allocation area with a free space map so it can be | 2525 EmptyAllocationInfo(); |
| 2511 // skipped when scanning the heap. | |
| 2512 int old_linear_size = static_cast<int>(limit() - top()); | |
| 2513 Free(top(), old_linear_size); | |
| 2514 SetTop(NULL, NULL); | |
| 2515 | 2526 |
| 2516 // Stop lazy sweeping and clear marking bits for unswept pages. | 2527 // Stop lazy sweeping and clear marking bits for unswept pages. |
| 2517 if (first_unswept_page_ != NULL) { | 2528 if (first_unswept_page_ != NULL) { |
| 2518 Page* p = first_unswept_page_; | 2529 Page* p = first_unswept_page_; |
| 2519 do { | 2530 do { |
| 2520 // Do not use ShouldBeSweptLazily predicate here. | 2531 // Do not use ShouldBeSweptLazily predicate here. |
| 2521 // New evacuation candidates were selected but they still have | 2532 // New evacuation candidates were selected but they still have |
| 2522 // to be swept before collection starts. | 2533 // to be swept before collection starts. |
| 2523 if (!p->WasSwept()) { | 2534 if (!p->WasSwept()) { |
| 2524 Bitmap::Clear(p); | 2535 Bitmap::Clear(p); |
| (...skipping 647 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3172 object->ShortPrint(); | 3183 object->ShortPrint(); |
| 3173 PrintF("\n"); | 3184 PrintF("\n"); |
| 3174 } | 3185 } |
| 3175 printf(" --------------------------------------\n"); | 3186 printf(" --------------------------------------\n"); |
| 3176 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3187 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3177 } | 3188 } |
| 3178 | 3189 |
| 3179 #endif // DEBUG | 3190 #endif // DEBUG |
| 3180 | 3191 |
| 3181 } } // namespace v8::internal | 3192 } } // namespace v8::internal |
| OLD | NEW |