| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2486 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2497 sum += large_list_.SumFreeList(); | 2497 sum += large_list_.SumFreeList(); |
| 2498 sum += huge_list_.SumFreeList(); | 2498 sum += huge_list_.SumFreeList(); |
| 2499 return sum; | 2499 return sum; |
| 2500 } | 2500 } |
| 2501 #endif | 2501 #endif |
| 2502 | 2502 |
| 2503 | 2503 |
| 2504 // ----------------------------------------------------------------------------- | 2504 // ----------------------------------------------------------------------------- |
| 2505 // OldSpace implementation | 2505 // OldSpace implementation |
| 2506 | 2506 |
| 2507 bool NewSpace::ReserveSpace(int bytes) { | |
| 2508 // We can't reliably unpack a partial snapshot that needs more new space | |
| 2509 // space than the minimum NewSpace size. The limit can be set lower than | |
| 2510 // the end of new space either because there is more space on the next page | |
| 2511 // or because we have lowered the limit in order to get periodic incremental | |
| 2512 // marking. The most reliable way to ensure that there is linear space is | |
| 2513 // to do the allocation, then rewind the limit. | |
| 2514 ASSERT(bytes <= InitialCapacity()); | |
| 2515 MaybeObject* maybe = AllocateRaw(bytes); | |
| 2516 Object* object = NULL; | |
| 2517 if (!maybe->ToObject(&object)) return false; | |
| 2518 HeapObject* allocation = HeapObject::cast(object); | |
| 2519 Address top = allocation_info_.top(); | |
| 2520 if ((top - bytes) == allocation->address()) { | |
| 2521 allocation_info_.set_top(allocation->address()); | |
| 2522 return true; | |
| 2523 } | |
| 2524 // There may be a borderline case here where the allocation succeeded, but | |
| 2525 // the limit and top have moved on to a new page. In that case we try again. | |
| 2526 return ReserveSpace(bytes); | |
| 2527 } | |
| 2528 | |
| 2529 | |
| 2530 void PagedSpace::PrepareForMarkCompact() { | 2507 void PagedSpace::PrepareForMarkCompact() { |
| 2531 // We don't have a linear allocation area while sweeping. It will be restored | 2508 // We don't have a linear allocation area while sweeping. It will be restored |
| 2532 // on the first allocation after the sweep. | 2509 // on the first allocation after the sweep. |
| 2533 // Mark the old linear allocation area with a free space map so it can be | 2510 // Mark the old linear allocation area with a free space map so it can be |
| 2534 // skipped when scanning the heap. | 2511 // skipped when scanning the heap. |
| 2535 int old_linear_size = static_cast<int>(limit() - top()); | 2512 int old_linear_size = static_cast<int>(limit() - top()); |
| 2536 Free(top(), old_linear_size); | 2513 Free(top(), old_linear_size); |
| 2537 SetTop(NULL, NULL); | 2514 SetTop(NULL, NULL); |
| 2538 | 2515 |
| 2539 // Stop lazy sweeping and clear marking bits for unswept pages. | 2516 // Stop lazy sweeping and clear marking bits for unswept pages. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2554 } while (p != anchor()); | 2531 } while (p != anchor()); |
| 2555 } | 2532 } |
| 2556 first_unswept_page_ = Page::FromAddress(NULL); | 2533 first_unswept_page_ = Page::FromAddress(NULL); |
| 2557 unswept_free_bytes_ = 0; | 2534 unswept_free_bytes_ = 0; |
| 2558 | 2535 |
| 2559 // Clear the free list before a full GC---it will be rebuilt afterward. | 2536 // Clear the free list before a full GC---it will be rebuilt afterward. |
| 2560 free_list_.Reset(); | 2537 free_list_.Reset(); |
| 2561 } | 2538 } |
| 2562 | 2539 |
| 2563 | 2540 |
| 2564 bool PagedSpace::ReserveSpace(int size_in_bytes) { | |
| 2565 ASSERT(size_in_bytes <= AreaSize()); | |
| 2566 ASSERT(size_in_bytes == RoundSizeDownToObjectAlignment(size_in_bytes)); | |
| 2567 Address current_top = allocation_info_.top(); | |
| 2568 Address new_top = current_top + size_in_bytes; | |
| 2569 if (new_top <= allocation_info_.limit()) return true; | |
| 2570 | |
| 2571 HeapObject* new_area = free_list_.Allocate(size_in_bytes); | |
| 2572 if (new_area == NULL) new_area = SlowAllocateRaw(size_in_bytes); | |
| 2573 if (new_area == NULL) return false; | |
| 2574 | |
| 2575 int old_linear_size = static_cast<int>(limit() - top()); | |
| 2576 // Mark the old linear allocation area with a free space so it can be | |
| 2577 // skipped when scanning the heap. This also puts it back in the free list | |
| 2578 // if it is big enough. | |
| 2579 Free(top(), old_linear_size); | |
| 2580 | |
| 2581 SetTop(new_area->address(), new_area->address() + size_in_bytes); | |
| 2582 return true; | |
| 2583 } | |
| 2584 | |
| 2585 | |
| 2586 intptr_t PagedSpace::SizeOfObjects() { | 2541 intptr_t PagedSpace::SizeOfObjects() { |
| 2587 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); | 2542 ASSERT(!heap()->IsSweepingComplete() || (unswept_free_bytes_ == 0)); |
| 2588 return Size() - unswept_free_bytes_ - (limit() - top()); | 2543 return Size() - unswept_free_bytes_ - (limit() - top()); |
| 2589 } | 2544 } |
| 2590 | 2545 |
| 2591 | 2546 |
| 2592 // After we have booted, we have created a map which represents free space | 2547 // After we have booted, we have created a map which represents free space |
| 2593 // on the heap. If there was already a free list then the elements on it | 2548 // on the heap. If there was already a free list then the elements on it |
| 2594 // were created with the wrong FreeSpaceMap (normally NULL), so we need to | 2549 // were created with the wrong FreeSpaceMap (normally NULL), so we need to |
| 2595 // fix them. | 2550 // fix them. |
| 2596 void PagedSpace::RepairFreeListsAfterBoot() { | 2551 void PagedSpace::RepairFreeListsAfterBoot() { |
| 2597 free_list_.RepairLists(heap()); | 2552 free_list_.RepairLists(heap()); |
| 2598 } | 2553 } |
| 2599 | 2554 |
| 2600 | 2555 |
| 2601 // You have to call this last, since the implementation from PagedSpace | |
| 2602 // doesn't know that memory was 'promised' to large object space. | |
| 2603 bool LargeObjectSpace::ReserveSpace(int bytes) { | |
| 2604 return heap()->OldGenerationCapacityAvailable() >= bytes && | |
| 2605 (!heap()->incremental_marking()->IsStopped() || | |
| 2606 heap()->OldGenerationSpaceAvailable() >= bytes); | |
| 2607 } | |
| 2608 | |
| 2609 | |
| 2610 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { | 2556 bool PagedSpace::AdvanceSweeper(intptr_t bytes_to_sweep) { |
| 2611 if (IsLazySweepingComplete()) return true; | 2557 if (IsLazySweepingComplete()) return true; |
| 2612 | 2558 |
| 2613 intptr_t freed_bytes = 0; | 2559 intptr_t freed_bytes = 0; |
| 2614 Page* p = first_unswept_page_; | 2560 Page* p = first_unswept_page_; |
| 2615 do { | 2561 do { |
| 2616 Page* next_page = p->next_page(); | 2562 Page* next_page = p->next_page(); |
| 2617 if (ShouldBeSweptLazily(p)) { | 2563 if (ShouldBeSweptLazily(p)) { |
| 2618 if (FLAG_gc_verbose) { | 2564 if (FLAG_gc_verbose) { |
| 2619 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 2565 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| (...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3243 object->ShortPrint(); | 3189 object->ShortPrint(); |
| 3244 PrintF("\n"); | 3190 PrintF("\n"); |
| 3245 } | 3191 } |
| 3246 printf(" --------------------------------------\n"); | 3192 printf(" --------------------------------------\n"); |
| 3247 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3193 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3248 } | 3194 } |
| 3249 | 3195 |
| 3250 #endif // DEBUG | 3196 #endif // DEBUG |
| 3251 | 3197 |
| 3252 } } // namespace v8::internal | 3198 } } // namespace v8::internal |
| OLD | NEW |