| Index: src/heap/store-buffer.h
|
| diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
|
| index 1b3fcb0a98b22ea5bb254c596e5cfe1ab7e1dadb..ca873783a49c37af88a4a0ae23026efce9ca632b 100644
|
| --- a/src/heap/store-buffer.h
|
| +++ b/src/heap/store-buffer.h
|
| @@ -8,6 +8,7 @@
|
| #include "src/allocation.h"
|
| #include "src/base/logging.h"
|
| #include "src/base/platform/platform.h"
|
| +#include "src/cancelable-task.h"
|
| #include "src/globals.h"
|
| #include "src/heap/slot-set.h"
|
|
|
| @@ -15,11 +16,17 @@ namespace v8 {
|
| namespace internal {
|
|
|
| // Intermediate buffer that accumulates old-to-new stores from the generated
|
| -// code. On buffer overflow the slots are moved to the remembered set.
|
| +// code. Moreover, it stores invalid old-to-new slots with two two entries.
|
| +// The first is a tagged address of the start of the invalid range, the second
|
| +// one is the end address of the invalid range or null if there is just one slot
|
| +// that needs to be removed from the remembered set. On buffer overflow the
|
| +// slots are moved to the remembered set.
|
| class StoreBuffer {
|
| public:
|
| static const int kStoreBufferSize = 1 << (14 + kPointerSizeLog2);
|
| static const int kStoreBufferMask = kStoreBufferSize - 1;
|
| + static const int kStoreBuffers = 2;
|
| + static const intptr_t kDeletionTag = 1;
|
|
|
| static void StoreBufferOverflow(Isolate* isolate);
|
|
|
| @@ -30,17 +37,80 @@ class StoreBuffer {
|
| // Used to add entries from generated code.
|
| inline Address* top_address() { return reinterpret_cast<Address*>(&top_); }
|
|
|
| - void MoveEntriesToRememberedSet();
|
| + // Moves entries from a specific store buffer to the remembered set. This
|
| + // method takes a lock.
|
| + void MoveEntriesToRememberedSet(int index);
|
| +
|
| + // This method ensures that all used store buffer entries are transfered to
|
| + // the remembered set.
|
| + void MoveAllEntriesToRememberedSet();
|
| +
|
| + inline bool IsDeletionAddress(Address address) const {
|
| + return reinterpret_cast<intptr_t>(address) & kDeletionTag;
|
| + }
|
| +
|
| + inline Address MarkDeletionAddress(Address address) {
|
| + return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) |
|
| + kDeletionTag);
|
| + }
|
| +
|
| + inline Address UnmarkDeletionAddress(Address address) {
|
| + return reinterpret_cast<Address>(reinterpret_cast<intptr_t>(address) &
|
| + ~kDeletionTag);
|
| + }
|
| +
|
| + // If we only want to delete a single slot, end should be set to null which
|
| + // will be written into the second field. When processing the store buffer
|
| + // the more efficient Remove method will be called in this case.
|
| + void DeleteEntry(Address start, Address end = nullptr);
|
| +
|
| + // Used by the concurrent processing thread to transfer entries from the
|
| + // store buffer to the remembered set.
|
| + void ConcurrentlyProcessStoreBuffer();
|
|
|
| private:
|
| + // There are two store buffers. If one store buffer fills up, the main thread
|
| + // publishes the top pointer of the store buffer that needs processing in its
|
| + // global lazy_top_ field. After that it start the concurrent processing
|
| + // thread. The concurrent processing thread iterates over the lazy_top_ area
|
| + // and will look for a set top pointer. If one is set, it will grab the given
|
| + // mutex and transfer its entries to the remembered set. If the concurrent
|
| + // thread does not make progress, the main thread will perform the work.
|
| + // Important: there is an ordering constrained. The store buffer with the
|
| + // older entries has to be processed first.
|
| + class Task : public CancelableTask {
|
| + public:
|
| + Task(Isolate* isolate, StoreBuffer* store_buffer)
|
| + : CancelableTask(isolate), store_buffer_(store_buffer) {}
|
| + virtual ~Task() {}
|
| +
|
| + private:
|
| + void RunInternal() override {
|
| + store_buffer_->ConcurrentlyProcessStoreBuffer();
|
| + }
|
| + StoreBuffer* store_buffer_;
|
| + DISALLOW_COPY_AND_ASSIGN(Task);
|
| + };
|
| +
|
| + void FlipStoreBuffers();
|
| +
|
| Heap* heap_;
|
|
|
| Address* top_;
|
|
|
| // The start and the limit of the buffer that contains store slots
|
| - // added from the generated code.
|
| - Address* start_;
|
| - Address* limit_;
|
| + // added from the generated code. We have to chunks of store buffers.
|
| + // Whenever one fills up, it will be put into a concurrent processing queue
|
| + // and the other empty one will be used in the meantime.
|
| + Address* start_[kStoreBuffers];
|
| + Address* limit_[kStoreBuffers];
|
| + base::AtomicValue<Address*> lazy_top_[kStoreBuffers];
|
| + base::Mutex mutex_;
|
| +
|
| + base::AtomicValue<bool> task_running_;
|
| +
|
| + // Points to the current buffer in use.
|
| + int current_;
|
|
|
| base::VirtualMemory* virtual_memory_;
|
| };
|
|
|