Index: src/store-buffer.cc |
diff --git a/src/store-buffer.cc b/src/store-buffer.cc |
index 7d73dd5ed1be27464640752efc5eb5990a749b4f..1137efb54efe78993c316fd8971705c95e1aacc8 100644 |
--- a/src/store-buffer.cc |
+++ b/src/store-buffer.cc |
@@ -142,6 +142,11 @@ void StoreBuffer::Uniq() { |
} |
+bool StoreBuffer::SpaceAvailable(intptr_t space_needed) { |
+ return old_limit_ - old_top_ >= space_needed; |
+} |
+ |
+ |
void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
while (old_limit_ - old_top_ < space_needed && |
old_limit_ < old_reserved_limit_) { |
@@ -152,7 +157,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
old_limit_ += grow; |
} |
- if (old_limit_ - old_top_ >= space_needed) return; |
+ if (SpaceAvailable(space_needed)) return; |
if (old_buffer_is_filtered_) return; |
ASSERT(may_move_store_buffer_entries_); |
@@ -171,10 +176,7 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
Filter(MemoryChunk::SCAN_ON_SCAVENGE); |
} |
- // If filtering out the entries from scan_on_scavenge pages got us down to |
- // less than half full, then we are satisfied with that. |
- if (old_limit_ - old_top_ > old_top_ - old_start_) return; |
- |
+ if (SpaceAvailable(space_needed)) return; |
// Sample 1 entry in 97 and filter out the pages where we estimate that more |
// than 1 in 8 pointers are to new space. |
static const int kSampleFinenesses = 5; |
@@ -188,11 +190,11 @@ void StoreBuffer::EnsureSpace(intptr_t space_needed) { |
{ 3, ((Page::kPageSize / kPointerSize) / 3) / 256 }, |
{ 1, 0} |
}; |
- for (int i = kSampleFinenesses - 1; i >= 0; i--) { |
+ for (int i = 0; i < kSampleFinenesses; i++) { |
ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold); |
// As a last resort we mark all pages as being exempt from the store buffer. |
ASSERT(i != 0 || old_top_ == old_start_); |
- if (old_limit_ - old_top_ > old_top_ - old_start_) return; |
+ if (SpaceAvailable(space_needed)) return; |
} |
UNREACHABLE(); |
} |