| Index: src/store-buffer.cc
|
| diff --git a/src/store-buffer.cc b/src/store-buffer.cc
|
| index 3745d91a8a417071b8f40262069ebee34deed332..7ee4fa2d4000ed4529476a427505194d579e443a 100644
|
| --- a/src/store-buffer.cc
|
| +++ b/src/store-buffer.cc
|
| @@ -22,10 +22,13 @@ StoreBuffer::StoreBuffer(Heap* heap)
|
| old_start_(NULL),
|
| old_limit_(NULL),
|
| old_top_(NULL),
|
| + old_regular_limit_(NULL),
|
| old_reserved_limit_(NULL),
|
| + old_virtual_memory_(NULL),
|
| + old_store_buffer_length_(0),
|
| old_buffer_is_sorted_(false),
|
| old_buffer_is_filtered_(false),
|
| - during_gc_(false),
|
| + allow_overflow_(false),
|
| store_buffer_rebuilding_enabled_(false),
|
| callback_(NULL),
|
| may_move_store_buffer_entries_(true),
|
| @@ -44,8 +47,14 @@ void StoreBuffer::SetUp() {
|
| reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
|
| limit_ = start_ + (kStoreBufferSize / kPointerSize);
|
|
|
| + // We set the maximum store buffer size to the maximum size of a semi-space.
|
| + // The store buffer may reach this limit during a full garbage collection.
|
| + // Note that half of the semi-space should be good enough since half of the
|
| + // memory in the semi-space are not object pointers.
|
| + old_store_buffer_length_ = heap_->MaxSemiSpaceSize() / sizeof(Address);
|
| +
|
| old_virtual_memory_ =
|
| - new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
|
| + new base::VirtualMemory(old_store_buffer_length_ * kPointerSize);
|
| old_top_ = old_start_ =
|
| reinterpret_cast<Address*>(old_virtual_memory_->address());
|
| // Don't know the alignment requirements of the OS, but it is certainly not
|
| @@ -54,9 +63,12 @@ void StoreBuffer::SetUp() {
|
| int initial_length =
|
| static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
|
| ASSERT(initial_length > 0);
|
| - ASSERT(initial_length <= kOldStoreBufferLength);
|
| + ASSERT(initial_length <= kOldRegularStoreBufferLength);
|
| + ASSERT(initial_length <= old_store_buffer_length_);
|
| + ASSERT(kOldRegularStoreBufferLength <= old_store_buffer_length_);
|
| old_limit_ = old_start_ + initial_length;
|
| - old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
|
| + old_regular_limit_ = old_start_ + kOldRegularStoreBufferLength;
|
| + old_reserved_limit_ = old_start_ + old_store_buffer_length_;
|
|
|
| CHECK(old_virtual_memory_->Commit(
|
| reinterpret_cast<void*>(old_start_),
|
| @@ -93,8 +105,13 @@ void StoreBuffer::TearDown() {
|
| delete old_virtual_memory_;
|
| delete[] hash_set_1_;
|
| delete[] hash_set_2_;
|
| - old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
|
| - start_ = limit_ = NULL;
|
| + old_start_ = NULL;
|
| + old_top_ = NULL;
|
| + old_limit_ = NULL;
|
| + old_reserved_limit_ = NULL;
|
| + old_regular_limit_ = NULL;
|
| + start_ = NULL;
|
| + limit_ = NULL;
|
| heap_->public_set_store_buffer_top(start_);
|
| }
|
|
|
| @@ -128,9 +145,35 @@ bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
|
| }
|
|
|
|
|
| +template<StoreBuffer::ExemptPopularPagesMode mode>
|
| +void StoreBuffer::IterativelyExemptPopularPages(intptr_t space_needed) {
|
| + // Sample 1 entry in 97 and filter out the pages where we estimate that more
|
| + // than 1 in 8 pointers are to new space.
|
| + static const int kSampleFinenesses = 5;
|
| + static const struct Samples {
|
| + int prime_sample_step;
|
| + int threshold;
|
| + } samples[kSampleFinenesses] = {
|
| + { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
|
| + { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
|
| + { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
|
| + { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
|
| + { 1, 0}
|
| + };
|
| + for (int i = 0; i < kSampleFinenesses; i++) {
|
| + ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
|
| + // As a last resort we mark all pages as being exempt from the store buffer.
|
| + ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
|
| + if (mode == ENSURE_SPACE && SpaceAvailable(space_needed)) return;
|
| + else if (mode == SHRINK_TO_REGULAR_SIZE && old_top_ < old_limit_) return;
|
| + }
|
| +}
|
| +
|
| +
|
| void StoreBuffer::EnsureSpace(intptr_t space_needed) {
|
| while (old_limit_ - old_top_ < space_needed &&
|
| - old_limit_ < old_reserved_limit_) {
|
| + ((!allow_overflow_ && old_limit_ < old_regular_limit_) ||
|
| + (allow_overflow_ && old_limit_ < old_reserved_limit_))) {
|
| size_t grow = old_limit_ - old_start_; // Double size.
|
| CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
|
| grow * kPointerSize,
|
| @@ -162,26 +205,8 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) {
|
|
|
| if (SpaceAvailable(space_needed)) return;
|
|
|
| - // Sample 1 entry in 97 and filter out the pages where we estimate that more
|
| - // than 1 in 8 pointers are to new space.
|
| - static const int kSampleFinenesses = 5;
|
| - static const struct Samples {
|
| - int prime_sample_step;
|
| - int threshold;
|
| - } samples[kSampleFinenesses] = {
|
| - { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
|
| - { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
|
| - { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
|
| - { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
|
| - { 1, 0}
|
| - };
|
| - for (int i = 0; i < kSampleFinenesses; i++) {
|
| - ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
|
| - // As a last resort we mark all pages as being exempt from the store buffer.
|
| - ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
|
| - if (SpaceAvailable(space_needed)) return;
|
| - }
|
| - UNREACHABLE();
|
| + IterativelyExemptPopularPages<ENSURE_SPACE>(space_needed);
|
| + ASSERT(SpaceAvailable(space_needed));
|
| }
|
|
|
|
|
| @@ -328,9 +353,9 @@ void StoreBuffer::ClearFilteringHashSets() {
|
| }
|
|
|
|
|
| -void StoreBuffer::GCPrologue() {
|
| +void StoreBuffer::GCPrologue(bool allow_overflow) {
|
| ClearFilteringHashSets();
|
| - during_gc_ = true;
|
| + allow_overflow_ = allow_overflow;
|
| }
|
|
|
|
|
| @@ -366,7 +391,13 @@ void StoreBuffer::Verify() {
|
|
|
|
|
| void StoreBuffer::GCEpilogue() {
|
| - during_gc_ = false;
|
| + if (allow_overflow_ && old_limit_ > old_regular_limit_) {
|
| + IterativelyExemptPopularPages<SHRINK_TO_REGULAR_SIZE>(0);
|
| + ASSERT(old_limit_ < old_regular_limit_);
|
| + old_virtual_memory_->Uncommit(old_limit_, old_regular_limit_ - old_limit_);
|
| + }
|
| +
|
| + allow_overflow_ = false;
|
| #ifdef VERIFY_HEAP
|
| if (FLAG_verify_heap) {
|
| Verify();
|
|
|