| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 448 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 459 chunk->area_start_ = area_start; | 459 chunk->area_start_ = area_start; |
| 460 chunk->area_end_ = area_end; | 460 chunk->area_end_ = area_end; |
| 461 chunk->flags_ = 0; | 461 chunk->flags_ = 0; |
| 462 chunk->set_owner(owner); | 462 chunk->set_owner(owner); |
| 463 chunk->InitializeReservedMemory(); | 463 chunk->InitializeReservedMemory(); |
| 464 chunk->slots_buffer_ = NULL; | 464 chunk->slots_buffer_ = NULL; |
| 465 chunk->skip_list_ = NULL; | 465 chunk->skip_list_ = NULL; |
| 466 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; | 466 chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity; |
| 467 chunk->progress_bar_ = 0; | 467 chunk->progress_bar_ = 0; |
| 468 chunk->high_water_mark_ = static_cast<int>(area_start - base); | 468 chunk->high_water_mark_ = static_cast<int>(area_start - base); |
| 469 chunk->parallel_sweeping_ = 0; |
| 469 chunk->ResetLiveBytes(); | 470 chunk->ResetLiveBytes(); |
| 470 Bitmap::Clear(chunk); | 471 Bitmap::Clear(chunk); |
| 471 chunk->initialize_scan_on_scavenge(false); | 472 chunk->initialize_scan_on_scavenge(false); |
| 472 chunk->SetFlag(WAS_SWEPT_PRECISELY); | 473 chunk->SetFlag(WAS_SWEPT_PRECISELY); |
| 473 | 474 |
| 474 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); | 475 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); |
| 475 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); | 476 ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset); |
| 476 | 477 |
| 477 if (executable == EXECUTABLE) { | 478 if (executable == EXECUTABLE) { |
| 478 chunk->SetFlag(IS_EXECUTABLE); | 479 chunk->SetFlag(IS_EXECUTABLE); |
| (...skipping 1555 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2034 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); | 2035 ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize); |
| 2035 Memory::Address_at(address() + kNextOffset) = | 2036 Memory::Address_at(address() + kNextOffset) = |
| 2036 reinterpret_cast<Address>(next); | 2037 reinterpret_cast<Address>(next); |
| 2037 } else { | 2038 } else { |
| 2038 Memory::Address_at(address() + kPointerSize) = | 2039 Memory::Address_at(address() + kPointerSize) = |
| 2039 reinterpret_cast<Address>(next); | 2040 reinterpret_cast<Address>(next); |
| 2040 } | 2041 } |
| 2041 } | 2042 } |
| 2042 | 2043 |
| 2043 | 2044 |
| 2045 intptr_t FreeListCategory::Concatenate(FreeListCategory* category) { |
| 2046 intptr_t free_bytes = 0; |
| 2047 if (category->top_ != NULL) { |
| 2048 ASSERT(category->end_ != NULL); |
| 2049 // This is safe (not going to deadlock) since Concatenate operations |
| 2050 // are never performed on the same free lists at the same time in |
| 2051 // reverse order. |
| 2052 ScopedLock lock_target(mutex_); |
| 2053 ScopedLock lock_source(category->mutex()); |
| 2054 free_bytes = category->available(); |
| 2055 if (end_ == NULL) { |
| 2056 end_ = category->end(); |
| 2057 } else { |
| 2058 category->end()->set_next(top_); |
| 2059 } |
| 2060 top_ = category->top(); |
| 2061 available_ += category->available(); |
| 2062 category->Reset(); |
| 2063 } |
| 2064 return free_bytes; |
| 2065 } |
| 2066 |
| 2067 |
| 2044 void FreeListCategory::Reset() { | 2068 void FreeListCategory::Reset() { |
| 2045 top_ = NULL; | 2069 top_ = NULL; |
| 2046 end_ = NULL; | 2070 end_ = NULL; |
| 2047 available_ = 0; | 2071 available_ = 0; |
| 2048 } | 2072 } |
| 2049 | 2073 |
| 2050 | 2074 |
| 2051 intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { | 2075 intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) { |
| 2052 int sum = 0; | 2076 int sum = 0; |
| 2053 FreeListNode* n = top_; | 2077 FreeListNode* n = top_; |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2132 } | 2156 } |
| 2133 } | 2157 } |
| 2134 | 2158 |
| 2135 | 2159 |
| 2136 FreeList::FreeList(PagedSpace* owner) | 2160 FreeList::FreeList(PagedSpace* owner) |
| 2137 : owner_(owner), heap_(owner->heap()) { | 2161 : owner_(owner), heap_(owner->heap()) { |
| 2138 Reset(); | 2162 Reset(); |
| 2139 } | 2163 } |
| 2140 | 2164 |
| 2141 | 2165 |
| 2166 intptr_t FreeList::Concatenate(FreeList* free_list) { |
| 2167 intptr_t free_bytes = 0; |
| 2168 free_bytes += small_list_.Concatenate(free_list->small_list()); |
| 2169 free_bytes += medium_list_.Concatenate(free_list->medium_list()); |
| 2170 free_bytes += large_list_.Concatenate(free_list->large_list()); |
| 2171 free_bytes += huge_list_.Concatenate(free_list->huge_list()); |
| 2172 return free_bytes; |
| 2173 } |
| 2174 |
| 2175 |
| 2142 void FreeList::Reset() { | 2176 void FreeList::Reset() { |
| 2143 small_list_.Reset(); | 2177 small_list_.Reset(); |
| 2144 medium_list_.Reset(); | 2178 medium_list_.Reset(); |
| 2145 large_list_.Reset(); | 2179 large_list_.Reset(); |
| 2146 huge_list_.Reset(); | 2180 huge_list_.Reset(); |
| 2147 } | 2181 } |
| 2148 | 2182 |
| 2149 | 2183 |
| 2150 int FreeList::Free(Address start, int size_in_bytes) { | 2184 int FreeList::Free(Address start, int size_in_bytes) { |
| 2151 if (size_in_bytes == 0) return 0; | 2185 if (size_in_bytes == 0) return 0; |
| (...skipping 344 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2496 intptr_t freed_bytes = 0; | 2530 intptr_t freed_bytes = 0; |
| 2497 Page* p = first_unswept_page_; | 2531 Page* p = first_unswept_page_; |
| 2498 do { | 2532 do { |
| 2499 Page* next_page = p->next_page(); | 2533 Page* next_page = p->next_page(); |
| 2500 if (ShouldBeSweptLazily(p)) { | 2534 if (ShouldBeSweptLazily(p)) { |
| 2501 if (FLAG_gc_verbose) { | 2535 if (FLAG_gc_verbose) { |
| 2502 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", | 2536 PrintF("Sweeping 0x%" V8PRIxPTR " lazily advanced.\n", |
| 2503 reinterpret_cast<intptr_t>(p)); | 2537 reinterpret_cast<intptr_t>(p)); |
| 2504 } | 2538 } |
| 2505 DecreaseUnsweptFreeBytes(p); | 2539 DecreaseUnsweptFreeBytes(p); |
| 2506 freed_bytes += MarkCompactCollector::SweepConservatively(this, p); | 2540 freed_bytes += |
| 2541 MarkCompactCollector:: |
| 2542 SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>( |
| 2543 this, NULL, p); |
| 2507 } | 2544 } |
| 2508 p = next_page; | 2545 p = next_page; |
| 2509 } while (p != anchor() && freed_bytes < bytes_to_sweep); | 2546 } while (p != anchor() && freed_bytes < bytes_to_sweep); |
| 2510 | 2547 |
| 2511 if (p == anchor()) { | 2548 if (p == anchor()) { |
| 2512 first_unswept_page_ = Page::FromAddress(NULL); | 2549 first_unswept_page_ = Page::FromAddress(NULL); |
| 2513 } else { | 2550 } else { |
| 2514 first_unswept_page_ = p; | 2551 first_unswept_page_ = p; |
| 2515 } | 2552 } |
| 2516 | 2553 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 2528 int remaining = | 2565 int remaining = |
| 2529 static_cast<int>(allocation_info_.limit - allocation_info_.top); | 2566 static_cast<int>(allocation_info_.limit - allocation_info_.top); |
| 2530 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); | 2567 heap()->CreateFillerObjectAt(allocation_info_.top, remaining); |
| 2531 | 2568 |
| 2532 allocation_info_.top = NULL; | 2569 allocation_info_.top = NULL; |
| 2533 allocation_info_.limit = NULL; | 2570 allocation_info_.limit = NULL; |
| 2534 } | 2571 } |
| 2535 } | 2572 } |
| 2536 | 2573 |
| 2537 | 2574 |
| 2575 bool PagedSpace::EnsureSweeperProgress(intptr_t size_in_bytes) { |
| 2576 MarkCompactCollector* collector = heap()->mark_compact_collector(); |
| 2577 if (collector->AreSweeperThreadsActivated()) { |
| 2578 if (FLAG_concurrent_sweeping && |
| 2579 collector->StealMemoryFromSweeperThreads(this) < size_in_bytes) { |
| 2580 collector->WaitUntilSweepingCompleted(); |
| 2581 return true; |
| 2582 } |
| 2583 return false; |
| 2584 } else { |
| 2585 return AdvanceSweeper(size_in_bytes); |
| 2586 } |
| 2587 } |
| 2588 |
| 2589 |
| 2538 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { | 2590 HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) { |
| 2539 // Allocation in this space has failed. | 2591 // Allocation in this space has failed. |
| 2540 | 2592 |
| 2541 // If there are unswept pages advance lazy sweeper a bounded number of times | 2593 // If there are unswept pages advance lazy sweeper a bounded number of times |
| 2542 // until we find a size_in_bytes contiguous piece of memory | 2594 // until we find a size_in_bytes contiguous piece of memory |
| 2543 const int kMaxSweepingTries = 5; | 2595 const int kMaxSweepingTries = 5; |
| 2544 bool sweeping_complete = false; | 2596 bool sweeping_complete = false; |
| 2545 | 2597 |
| 2546 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { | 2598 for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) { |
| 2547 sweeping_complete = AdvanceSweeper(size_in_bytes); | 2599 sweeping_complete = EnsureSweeperProgress(size_in_bytes); |
| 2548 | 2600 |
| 2549 // Retry the free list allocation. | 2601 // Retry the free list allocation. |
| 2550 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2602 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2551 if (object != NULL) return object; | 2603 if (object != NULL) return object; |
| 2552 } | 2604 } |
| 2553 | 2605 |
| 2554 // Free list allocation failed and there is no next page. Fail if we have | 2606 // Free list allocation failed and there is no next page. Fail if we have |
| 2555 // hit the old generation size limit that should cause a garbage | 2607 // hit the old generation size limit that should cause a garbage |
| 2556 // collection. | 2608 // collection. |
| 2557 if (!heap()->always_allocate() && | 2609 if (!heap()->always_allocate() && |
| 2558 heap()->OldGenerationAllocationLimitReached()) { | 2610 heap()->OldGenerationAllocationLimitReached()) { |
| 2559 return NULL; | 2611 return NULL; |
| 2560 } | 2612 } |
| 2561 | 2613 |
| 2562 // Try to expand the space and allocate in the new next page. | 2614 // Try to expand the space and allocate in the new next page. |
| 2563 if (Expand()) { | 2615 if (Expand()) { |
| 2564 return free_list_.Allocate(size_in_bytes); | 2616 return free_list_.Allocate(size_in_bytes); |
| 2565 } | 2617 } |
| 2566 | 2618 |
| 2567 // Last ditch, sweep all the remaining pages to try to find space. This may | 2619 // Last ditch, sweep all the remaining pages to try to find space. This may |
| 2568 // cause a pause. | 2620 // cause a pause. |
| 2569 if (!IsSweepingComplete()) { | 2621 if (!IsSweepingComplete()) { |
| 2570 AdvanceSweeper(kMaxInt); | 2622 EnsureSweeperProgress(kMaxInt); |
| 2571 | 2623 |
| 2572 // Retry the free list allocation. | 2624 // Retry the free list allocation. |
| 2573 HeapObject* object = free_list_.Allocate(size_in_bytes); | 2625 HeapObject* object = free_list_.Allocate(size_in_bytes); |
| 2574 if (object != NULL) return object; | 2626 if (object != NULL) return object; |
| 2575 } | 2627 } |
| 2576 | 2628 |
| 2577 // Finally, fail. | 2629 // Finally, fail. |
| 2578 return NULL; | 2630 return NULL; |
| 2579 } | 2631 } |
| 2580 | 2632 |
| (...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3097 object->ShortPrint(); | 3149 object->ShortPrint(); |
| 3098 PrintF("\n"); | 3150 PrintF("\n"); |
| 3099 } | 3151 } |
| 3100 printf(" --------------------------------------\n"); | 3152 printf(" --------------------------------------\n"); |
| 3101 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); | 3153 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); |
| 3102 } | 3154 } |
| 3103 | 3155 |
| 3104 #endif // DEBUG | 3156 #endif // DEBUG |
| 3105 | 3157 |
| 3106 } } // namespace v8::internal | 3158 } } // namespace v8::internal |
| OLD | NEW |