Chromium Code Reviews| Index: src/heap/store-buffer.h |
| diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h |
| index 1b3fcb0a98b22ea5bb254c596e5cfe1ab7e1dadb..dc753b069b08c14df4515eccd3f4f5e266c461de 100644 |
| --- a/src/heap/store-buffer.h |
| +++ b/src/heap/store-buffer.h |
| @@ -8,6 +8,7 @@ |
| #include "src/allocation.h" |
| #include "src/base/logging.h" |
| #include "src/base/platform/platform.h" |
| +#include "src/cancelable-task.h" |
| #include "src/globals.h" |
| #include "src/heap/slot-set.h" |
| @@ -15,11 +16,17 @@ namespace v8 { |
| namespace internal { |
| // Intermediate buffer that accumulates old-to-new stores from the generated |
| -// code. On buffer overflow the slots are moved to the remembered set. |
| +// code. Moreover, it stores invalid old-to-new slots with two two entries. |
|
ulan
2016/10/28 09:18:02
Nit: with _two_ two entries.
Hannes Payer (out of office)
2016/10/28 09:58:32
Done.
|
| +// The first is a tagged address of the start of the invalid range, the second |
| +// one is the end address of the invalid range or null if there is just one slot |
| +// that needs to be removed from the remembered set. On buffer overflow the |
| +// slots are moved to the remembered set. |
| class StoreBuffer { |
| public: |
| static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2); |
| static const int kStoreBufferMask = kStoreBufferSize - 1; |
| + static const int kStoreBuffers = 2; |
| + static const intptr_t kDeletionTag = 1; |
| static void StoreBufferOverflow(Isolate* isolate); |
| @@ -30,17 +37,89 @@ class StoreBuffer { |
| // Used to add entries from generated code. |
| inline Address* top_address() { return reinterpret_cast<Address*>(&top_); } |
| - void MoveEntriesToRememberedSet(); |
| + // Moves entries from a specific store buffer to the remembered set. This |
| + // method takes a lock. |
| + void MoveEntriesToRememberedSet(int index); |
| + |
| + // This method ensures that all used store buffer entries are transfered to |
| + // the remembered set. |
| + void MoveAllEntriesToRememberedSet(); |
| + |
| + inline bool IsDeletionAddress(Address address) const { |
| + return reinterpret_cast<intptr_t>(address) & kDeletionTag; |
| + } |
| + |
| + inline Address MarkDeletionAddress(Address address) { |
| + return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) | |
| + kDeletionTag); |
| + } |
| + |
| + inline Address UnmarkDeletionAddress(Address address) { |
| + return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) & |
| + ~kDeletionTag); |
| + } |
| + |
| + // If we only want to delete a single slot, end should be set to null which |
| + // will be written into the second field. When processing the store buffer |
| + // the more efficient Remove method will be called in this case. |
| + void DeleteEntry(Address start, Address end = nullptr); |
| + |
| + // Used by the concurrent processing thread to transfer entries from the |
| + // store buffer to the remembered set. |
| + void ConcurrentlyProcessStoreBuffer(); |
| + |
| + bool Empty() { |
| + for (int i = 0; i < kStoreBuffers; i++) { |
| + if (lazy_top_[i].Value()) { |
| + return false; |
| + } |
| + } |
| + return top_ == start_[current_]; |
| + } |
| private: |
| + // There are two store buffers. If one store buffer fills up, the main thread |
| + // publishes the top pointer of the store buffer that needs processing in its |
| + // global lazy_top_ field. After that it start the concurrent processing |
| + // thread. The concurrent processing thread iterates over the lazy_top_ area |
| + // and will look for a set top pointer. If one is set, it will grab the given |
| + // mutex and transfer its entries to the remembered set. If the concurrent |
| + // thread does not make progress, the main thread will perform the work. |
| + // Important: there is an ordering constrained. The store buffer with the |
| + // older entries has to be processed first. |
| + class Task : public CancelableTask { |
| + public: |
| + Task(Isolate* isolate, StoreBuffer* store_buffer) |
| + : CancelableTask(isolate), store_buffer_(store_buffer) {} |
| + virtual ~Task() {} |
| + |
| + private: |
| + void RunInternal() override { |
| + store_buffer_->ConcurrentlyProcessStoreBuffer(); |
| + } |
| + StoreBuffer* store_buffer_; |
| + DISALLOW_COPY_AND_ASSIGN(Task); |
| + }; |
| + |
| + void FlipStoreBuffers(); |
| + |
| Heap* heap_; |
| Address* top_; |
| // The start and the limit of the buffer that contains store slots |
| - // added from the generated code. |
| - Address* start_; |
| - Address* limit_; |
| + // added from the generated code. We have to chunks of store buffers. |
|
ulan
2016/10/28 09:18:02
typo: We have _two_
Hannes Payer (out of office)
2016/10/28 09:58:32
Done.
|
| + // Whenever one fills up, it will be put into a concurrent processing queue |
| + // and the other empty one will be used in the meantime. |
| + Address* start_[kStoreBuffers]; |
| + Address* limit_[kStoreBuffers]; |
| + base::AtomicValue<Address*> lazy_top_[kStoreBuffers]; |
|
ulan
2016/10/28 09:18:02
Is it an invariant that at most one lazy_top_ is n
Hannes Payer (out of office)
2016/10/28 09:58:32
Yes, that is correct. Done.
|
| + base::Mutex mutex_; |
| + |
| + base::AtomicValue<bool> task_running_; |
| + |
| + // Points to the current buffer in use. |
| + int current_; |
| base::VirtualMemory* virtual_memory_; |
| }; |