Index: src/store-buffer.h |
diff --git a/src/store-buffer.h b/src/store-buffer.h |
index 20ff6ffdeb3547f50e58fbecb11a6e065af93248..9101c0eb892435d2ba07950d6906416d1fc7610b 100644 |
--- a/src/store-buffer.h |
+++ b/src/store-buffer.h |
@@ -19,6 +19,11 @@ class StoreBuffer; |
typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to); |
+typedef void (StoreBuffer::*RegionCallback)(Address start, |
+ Address end, |
+ ObjectSlotCallback slot_callback, |
+ bool clear_maps); |
+ |
// Used to implement the write barrier by collecting addresses of pointers |
// between spaces. |
class StoreBuffer { |
@@ -63,13 +68,13 @@ class StoreBuffer { |
static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2); |
static const int kStoreBufferSize = kStoreBufferOverflowBit; |
static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address); |
- static const int kOldRegularStoreBufferLength = kStoreBufferLength * 16; |
+ static const int kOldStoreBufferLength = kStoreBufferLength * 16; |
static const int kHashSetLengthLog2 = 12; |
static const int kHashSetLength = 1 << kHashSetLengthLog2; |
void Compact(); |
- void GCPrologue(bool allow_overflow); |
+ void GCPrologue(); |
void GCEpilogue(); |
Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); } |
@@ -113,27 +118,12 @@ class StoreBuffer { |
Address* old_start_; |
Address* old_limit_; |
Address* old_top_; |
- |
- // The regular limit specifies how big the store buffer may become during |
- // mutator execution or while scavenging. |
- Address* old_regular_limit_; |
- |
- // The reserved limit is bigger then the regular limit. It should be the size |
- // of a semi-space to avoid new scan-on-scavenge during new space evacuation |
- // after sweeping in a full garbage collection. |
Address* old_reserved_limit_; |
- |
base::VirtualMemory* old_virtual_memory_; |
- int old_store_buffer_length_; |
bool old_buffer_is_sorted_; |
bool old_buffer_is_filtered_; |
- |
- // If allow_overflow_ is set, we allow the store buffer to grow until |
- // old_reserved_limit_. But we will shrink the store buffer in the epilogue to |
- // stay within the old_regular_limit_. |
- bool allow_overflow_; |
- |
+ bool during_gc_; |
// The garbage collector iterates over many pointers to new space that are not |
// handled by the store buffer. This flag indicates whether the pointers |
// found by the callbacks should be added to the store buffer or not. |
@@ -156,14 +146,6 @@ class StoreBuffer { |
void Uniq(); |
void ExemptPopularPages(int prime_sample_step, int threshold); |
- enum ExemptPopularPagesMode { |
- ENSURE_SPACE, |
- SHRINK_TO_REGULAR_SIZE |
- }; |
- |
- template <ExemptPopularPagesMode mode> |
- void IterativelyExemptPopularPages(intptr_t space_needed); |
- |
// Set the map field of the object to NULL if contains a map. |
inline void ClearDeadObject(HeapObject *object); |
@@ -174,6 +156,17 @@ class StoreBuffer { |
ObjectSlotCallback slot_callback, |
bool clear_maps); |
+ // For each region of pointers on a page in use from an old space call |
+ // visit_pointer_region callback. |
+ // If either visit_pointer_region or callback can cause an allocation |
+ // in old space and changes in allocation watermark then |
+ // can_preallocate_during_iteration should be set to true. |
+ void IteratePointersOnPage( |
+ PagedSpace* space, |
+ Page* page, |
+ RegionCallback region_callback, |
+ ObjectSlotCallback slot_callback); |
+ |
void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, |
bool clear_maps); |