| Index: src/heap/concurrent-marking.cc
|
| diff --git a/src/heap/concurrent-marking.cc b/src/heap/concurrent-marking.cc
|
| index 6df60ff4ab5088fd5e31faf1c57ed3b2a18e6901..d47eea01153ae0610c43323e038a25e24810da98 100644
|
| --- a/src/heap/concurrent-marking.cc
|
| +++ b/src/heap/concurrent-marking.cc
|
| @@ -4,8 +4,12 @@
|
|
|
| #include "src/heap/concurrent-marking.h"
|
|
|
| +#include <stack>
|
| +#include <unordered_map>
|
| +
|
| #include "src/heap/heap-inl.h"
|
| #include "src/heap/heap.h"
|
| +#include "src/heap/marking.h"
|
| #include "src/isolate.h"
|
| #include "src/locked-queue-inl.h"
|
| #include "src/v8.h"
|
| @@ -13,13 +17,75 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| +class ConcurrentMarkingMarkbits {
|
| + public:
|
| + ConcurrentMarkingMarkbits() {}
|
| + ~ConcurrentMarkingMarkbits() {
|
| + for (auto chunk_bitmap : bitmap_) {
|
| + FreeBitmap(chunk_bitmap.second);
|
| + }
|
| + }
|
| + bool Mark(HeapObject* obj) {
|
| + Address address = obj->address();
|
| + MemoryChunk* chunk = MemoryChunk::FromAddress(address);
|
| + if (bitmap_.count(chunk) == 0) {
|
| + bitmap_[chunk] = AllocateBitmap();
|
| + }
|
| + MarkBit mark_bit =
|
| + bitmap_[chunk]->MarkBitFromIndex(chunk->AddressToMarkbitIndex(address));
|
| + if (mark_bit.Get()) return false;
|
| + mark_bit.Set();
|
| + return true;
|
| + }
|
| +
|
| + Bitmap* AllocateBitmap() {
|
| + return static_cast<Bitmap*>(calloc(1, Bitmap::kSize));
|
| + }
|
| +
|
| + void FreeBitmap(Bitmap* bitmap) { free(bitmap); }
|
| +
|
| + private:
|
| + std::unordered_map<MemoryChunk*, Bitmap*> bitmap_;
|
| +};
|
| +
|
| +class ConcurrentMarkingVisitor : public ObjectVisitor {
|
| + public:
|
| + ConcurrentMarkingVisitor() {}
|
| +
|
| + void VisitPointers(Object** start, Object** end) override {
|
| + for (Object** p = start; p < end; p++) {
|
| + if (!(*p)->IsHeapObject()) continue;
|
| + MarkObject(HeapObject::cast(*p));
|
| + }
|
| + }
|
| +
|
| + void MarkObject(HeapObject* obj) {
|
| + if (markbits_.Mark(obj)) {
|
| + marking_stack_.push(obj);
|
| + }
|
| + }
|
| +
|
| + void MarkTransitively() {
|
| + while (!marking_stack_.empty()) {
|
| + HeapObject* obj = marking_stack_.top();
|
| + marking_stack_.pop();
|
| + obj->Iterate(this);
|
| + }
|
| + }
|
| +
|
| + private:
|
| + std::stack<HeapObject*> marking_stack_;
|
| + ConcurrentMarkingMarkbits markbits_;
|
| +};
|
| +
|
| class ConcurrentMarking::Task : public CancelableTask {
|
| public:
|
| - Task(Heap* heap, Queue* queue, base::Semaphore* on_finish)
|
| + Task(Heap* heap, std::vector<HeapObject*>* root_set,
|
| + base::Semaphore* on_finish)
|
| : CancelableTask(heap->isolate()),
|
| heap_(heap),
|
| - queue_(queue),
|
| - on_finish_(on_finish) {}
|
| + on_finish_(on_finish),
|
| + root_set_(root_set) {}
|
|
|
| virtual ~Task() {}
|
|
|
| @@ -27,54 +93,40 @@ class ConcurrentMarking::Task : public CancelableTask {
|
| // v8::internal::CancelableTask overrides.
|
| void RunInternal() override {
|
| USE(heap_);
|
| - HeapObject* object;
|
| - while (queue_->Dequeue(&object)) {
|
| - // TODO(ulan): Implement actual marking.
|
| + for (HeapObject* obj : *root_set_) {
|
| + marking_visitor_.MarkObject(obj);
|
| }
|
| + marking_visitor_.MarkTransitively();
|
| on_finish_->Signal();
|
| }
|
|
|
| Heap* heap_;
|
| - Queue* queue_;
|
| base::Semaphore* on_finish_;
|
| + ConcurrentMarkingVisitor marking_visitor_;
|
| + std::vector<HeapObject*>* root_set_;
|
| DISALLOW_COPY_AND_ASSIGN(Task);
|
| };
|
|
|
| ConcurrentMarking::ConcurrentMarking(Heap* heap)
|
| - : heap_(heap), pending_tasks_(0), number_of_tasks_(0) {}
|
| + : heap_(heap), pending_task_(0) {}
|
|
|
| ConcurrentMarking::~ConcurrentMarking() {}
|
|
|
| -void ConcurrentMarking::EnqueueObject(HeapObject* object) {
|
| - queue_.Enqueue(object);
|
| +void ConcurrentMarking::AddRoot(HeapObject* object) {
|
| + root_set_.push_back(object);
|
| }
|
|
|
| -bool ConcurrentMarking::IsQueueEmpty() { return queue_.IsEmpty(); }
|
| -
|
| -void ConcurrentMarking::StartMarkingTasks(int number_of_tasks) {
|
| +void ConcurrentMarking::StartMarkingTask() {
|
| if (!FLAG_concurrent_marking) return;
|
| - DCHECK_EQ(0, number_of_tasks_);
|
|
|
| - number_of_tasks_ = number_of_tasks;
|
| - for (int i = 0; i < number_of_tasks; i++) {
|
| - V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
| - new Task(heap_, &queue_, &pending_tasks_),
|
| - v8::Platform::kShortRunningTask);
|
| - }
|
| + V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
| + new Task(heap_, &root_set_, &pending_task_),
|
| + v8::Platform::kShortRunningTask);
|
| }
|
|
|
| -void ConcurrentMarking::WaitForTasksToComplete() {
|
| +void ConcurrentMarking::WaitForTaskToComplete() {
|
| if (!FLAG_concurrent_marking) return;
|
| -
|
| - CancelableTaskManager* cancelable_task_manager =
|
| - heap_->isolate()->cancelable_task_manager();
|
| - for (int i = 0; i < number_of_tasks_; i++) {
|
| - if (cancelable_task_manager->TryAbort(task_ids_[i]) !=
|
| - CancelableTaskManager::kTaskAborted) {
|
| - pending_tasks_.Wait();
|
| - }
|
| - }
|
| - number_of_tasks_ = 0;
|
| + pending_task_.Wait();
|
| }
|
|
|
| } // namespace internal
|
|
|