| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 #endif | 54 #endif |
| 55 marking_parity_(ODD_MARKING_PARITY), | 55 marking_parity_(ODD_MARKING_PARITY), |
| 56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
| 57 evacuation_(false), | 57 evacuation_(false), |
| 58 heap_(heap), | 58 heap_(heap), |
| 59 marking_deque_memory_(NULL), | 59 marking_deque_memory_(NULL), |
| 60 marking_deque_memory_committed_(0), | 60 marking_deque_memory_committed_(0), |
| 61 code_flusher_(nullptr), | 61 code_flusher_(nullptr), |
| 62 have_code_to_deoptimize_(false), | 62 have_code_to_deoptimize_(false), |
| 63 compacting_(false), | 63 compacting_(false), |
| 64 sweeping_in_progress_(false), | 64 pending_compaction_tasks_semaphore_(0), |
| 65 pending_sweeper_tasks_semaphore_(0), | 65 sweeper_(heap) { |
| 66 pending_compaction_tasks_semaphore_(0) { | |
| 67 } | 66 } |
| 68 | 67 |
| 69 #ifdef VERIFY_HEAP | 68 #ifdef VERIFY_HEAP |
| 70 class VerifyMarkingVisitor : public ObjectVisitor { | 69 class VerifyMarkingVisitor : public ObjectVisitor { |
| 71 public: | 70 public: |
| 72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} | 71 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {} |
| 73 | 72 |
| 74 void VisitPointers(Object** start, Object** end) override { | 73 void VisitPointers(Object** start, Object** end) override { |
| 75 for (Object** current = start; current < end; current++) { | 74 for (Object** current = start; current < end; current++) { |
| 76 if ((*current)->IsHeapObject()) { | 75 if ((*current)->IsHeapObject()) { |
| (...skipping 375 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 452 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 451 Marking::MarkWhite(Marking::MarkBitFrom(obj)); |
| 453 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 452 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
| 454 chunk->ResetProgressBar(); | 453 chunk->ResetProgressBar(); |
| 455 chunk->ResetLiveBytes(); | 454 chunk->ResetLiveBytes(); |
| 456 if (chunk->IsFlagSet(Page::BLACK_PAGE)) { | 455 if (chunk->IsFlagSet(Page::BLACK_PAGE)) { |
| 457 chunk->ClearFlag(Page::BLACK_PAGE); | 456 chunk->ClearFlag(Page::BLACK_PAGE); |
| 458 } | 457 } |
| 459 } | 458 } |
| 460 } | 459 } |
| 461 | 460 |
| 462 | 461 class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task { |
| 463 class MarkCompactCollector::SweeperTask : public v8::Task { | |
| 464 public: | 462 public: |
| 465 SweeperTask(Heap* heap, AllocationSpace space_to_start) | 463 SweeperTask(Sweeper* sweeper, Heap* heap, AllocationSpace space_to_start) |
| 466 : heap_(heap), space_to_start_(space_to_start) {} | 464 : sweeper_(sweeper), heap_(heap), space_to_start_(space_to_start) {} |
| 467 | 465 |
| 468 virtual ~SweeperTask() {} | 466 virtual ~SweeperTask() {} |
| 469 | 467 |
| 470 private: | 468 private: |
| 471 // v8::Task overrides. | 469 // v8::Task overrides. |
| 472 void Run() override { | 470 void Run() override { |
| 473 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); | 471 DCHECK_GE(space_to_start_, FIRST_PAGED_SPACE); |
| 474 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); | 472 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE); |
| 475 const int offset = space_to_start_ - FIRST_PAGED_SPACE; | 473 const int offset = space_to_start_ - FIRST_PAGED_SPACE; |
| 476 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; | 474 const int num_spaces = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1; |
| 477 for (int i = 0; i < num_spaces; i++) { | 475 for (int i = 0; i < num_spaces; i++) { |
| 478 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); | 476 const int space_id = FIRST_PAGED_SPACE + ((i + offset) % num_spaces); |
| 479 DCHECK_GE(space_id, FIRST_PAGED_SPACE); | 477 DCHECK_GE(space_id, FIRST_PAGED_SPACE); |
| 480 DCHECK_LE(space_id, LAST_PAGED_SPACE); | 478 DCHECK_LE(space_id, LAST_PAGED_SPACE); |
| 481 heap_->mark_compact_collector()->SweepInParallel( | 479 sweeper_->HelpSweepInParallel(heap_->paged_space(space_id), 0); |
| 482 heap_->paged_space(space_id), 0); | |
| 483 } | 480 } |
| 484 heap_->mark_compact_collector()->pending_sweeper_tasks_semaphore_.Signal(); | 481 for (int i = 0; i < Sweeper::kNumberOfSweepingSpaces; i++) { |
| 482 SweepingList* late_list = nullptr; |
| 483 SweepingSpace space = static_cast<SweepingSpace>(i); |
| 484 { |
| 485 base::LockGuard<base::Mutex> guard(&(sweeper_->mutex_)); |
| 486 late_list = sweeper_->late_sweeping_list_[space]; |
| 487 } |
| 488 if (late_list != nullptr) { |
| 489 for (Page* p : *late_list) { |
| 490 sweeper_->HelpSweepInParallel( |
| 491 p, heap_->paged_space(sweeper_->allocation_space(space))); |
| 492 } |
| 493 } |
| 494 } |
| 495 sweeper_->pending_sweeper_tasks_semaphore_.Signal(); |
| 485 } | 496 } |
| 486 | 497 |
| 498 Sweeper* sweeper_; |
| 487 Heap* heap_; | 499 Heap* heap_; |
| 488 AllocationSpace space_to_start_; | 500 AllocationSpace space_to_start_; |
| 489 | 501 |
| 490 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 502 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
| 491 }; | 503 }; |
| 492 | 504 |
| 493 | 505 void MarkCompactCollector::Sweeper::StartSweeping() { |
| 494 void MarkCompactCollector::StartSweeperThreads() { | 506 sweeping_in_progress_ = true; |
| 495 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 507 std::sort(sweeping_list_[kOldSpace].begin(), sweeping_list_[kOldSpace].end(), |
| 496 new SweeperTask(heap(), OLD_SPACE), v8::Platform::kShortRunningTask); | 508 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
| 497 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 509 std::sort(sweeping_list_[kCodeSpace].begin(), |
| 498 new SweeperTask(heap(), CODE_SPACE), v8::Platform::kShortRunningTask); | 510 sweeping_list_[kCodeSpace].end(), |
| 499 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 511 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
| 500 new SweeperTask(heap(), MAP_SPACE), v8::Platform::kShortRunningTask); | 512 std::sort(sweeping_list_[kMapSpace].begin(), sweeping_list_[kMapSpace].end(), |
| 513 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
| 514 if (FLAG_concurrent_sweeping) { |
| 515 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 516 new SweeperTask(this, heap_, OLD_SPACE), |
| 517 v8::Platform::kShortRunningTask); |
| 518 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 519 new SweeperTask(this, heap_, CODE_SPACE), |
| 520 v8::Platform::kShortRunningTask); |
| 521 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 522 new SweeperTask(this, heap_, MAP_SPACE), |
| 523 v8::Platform::kShortRunningTask); |
| 524 } |
| 501 } | 525 } |
| 502 | 526 |
| 503 | 527 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 504 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { | 528 Page* page) { |
| 505 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | 529 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 506 if (!page->SweepingDone()) { | 530 if (!page->SweepingDone()) { |
| 507 SweepInParallel(page, owner); | 531 HelpSweepInParallel(page, owner); |
| 508 if (!page->SweepingDone()) { | 532 if (!page->SweepingDone()) { |
| 509 // We were not able to sweep that page, i.e., a concurrent | 533 // We were not able to sweep that page, i.e., a concurrent |
| 510 // sweeper thread currently owns this page. Wait for the sweeper | 534 // sweeper thread currently owns this page. Wait for the sweeper |
| 511 // thread to be done with this page. | 535 // thread to be done with this page. |
| 512 page->WaitUntilSweepingCompleted(); | 536 page->WaitUntilSweepingCompleted(); |
| 513 } | 537 } |
| 514 } | 538 } |
| 515 } | 539 } |
| 516 | 540 |
| 517 | |
| 518 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 541 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
| 519 if (FLAG_concurrent_sweeping && !IsSweepingCompleted()) { | 542 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { |
| 520 SweepInParallel(heap()->paged_space(space->identity()), 0); | 543 sweeper().HelpSweepInParallel(heap()->paged_space(space->identity()), 0); |
| 521 space->RefillFreeList(); | 544 space->RefillFreeList(); |
| 522 } | 545 } |
| 523 } | 546 } |
| 524 | 547 |
| 548 Page* MarkCompactCollector::Sweeper::GetSweptPage(PagedSpace* space) { |
| 549 base::LockGuard<base::Mutex> guard(&mutex_); |
| 550 SweptList& list = swept_list_[sweeping_space(space->identity())]; |
| 551 if (list.length() > 0) { |
| 552 return list.RemoveLast(); |
| 553 } |
| 554 return nullptr; |
| 555 } |
| 525 | 556 |
| 526 void MarkCompactCollector::EnsureSweepingCompleted() { | 557 void MarkCompactCollector::Sweeper::EnsureCompleted() { |
| 527 DCHECK(sweeping_in_progress_ == true); | 558 DCHECK(sweeping_in_progress_ == true); |
| 528 | 559 |
| 529 // If sweeping is not completed or not running at all, we try to complete it | 560 // If sweeping is not completed or not running at all, we try to complete it |
| 530 // here. | 561 // here. |
| 531 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { | 562 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 532 SweepInParallel(heap()->paged_space(OLD_SPACE), 0); | 563 HelpSweepInParallel(heap_->paged_space(OLD_SPACE), 0); |
| 533 SweepInParallel(heap()->paged_space(CODE_SPACE), 0); | 564 HelpSweepInParallel(heap_->paged_space(CODE_SPACE), 0); |
| 534 SweepInParallel(heap()->paged_space(MAP_SPACE), 0); | 565 HelpSweepInParallel(heap_->paged_space(MAP_SPACE), 0); |
| 535 } | 566 } |
| 536 | 567 |
| 537 if (FLAG_concurrent_sweeping) { | 568 if (FLAG_concurrent_sweeping) { |
| 538 pending_sweeper_tasks_semaphore_.Wait(); | 569 pending_sweeper_tasks_semaphore_.Wait(); |
| 539 pending_sweeper_tasks_semaphore_.Wait(); | 570 pending_sweeper_tasks_semaphore_.Wait(); |
| 540 pending_sweeper_tasks_semaphore_.Wait(); | 571 pending_sweeper_tasks_semaphore_.Wait(); |
| 541 } | 572 } |
| 542 | 573 |
| 543 ParallelSweepSpacesComplete(); | 574 ParallelSweepSpacesComplete(); |
| 544 sweeping_in_progress_ = false; | 575 sweeping_in_progress_ = false; |
| 576 } |
| 577 |
| 578 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 579 DCHECK(sweeper().sweeping_in_progress() == true); |
| 580 |
| 581 sweeper().EnsureCompleted(); |
| 545 heap()->old_space()->RefillFreeList(); | 582 heap()->old_space()->RefillFreeList(); |
| 546 heap()->code_space()->RefillFreeList(); | 583 heap()->code_space()->RefillFreeList(); |
| 547 heap()->map_space()->RefillFreeList(); | 584 heap()->map_space()->RefillFreeList(); |
| 548 | 585 |
| 549 #ifdef VERIFY_HEAP | 586 #ifdef VERIFY_HEAP |
| 550 if (FLAG_verify_heap && !evacuation()) { | 587 if (FLAG_verify_heap && !evacuation()) { |
| 551 VerifyEvacuation(heap_); | 588 VerifyEvacuation(heap_); |
| 552 } | 589 } |
| 553 #endif | 590 #endif |
| 554 } | 591 } |
| 555 | 592 |
| 556 | 593 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
| 557 bool MarkCompactCollector::IsSweepingCompleted() { | |
| 558 if (!pending_sweeper_tasks_semaphore_.WaitFor( | 594 if (!pending_sweeper_tasks_semaphore_.WaitFor( |
| 559 base::TimeDelta::FromSeconds(0))) { | 595 base::TimeDelta::FromSeconds(0))) { |
| 560 return false; | 596 return false; |
| 561 } | 597 } |
| 562 pending_sweeper_tasks_semaphore_.Signal(); | 598 pending_sweeper_tasks_semaphore_.Signal(); |
| 563 return true; | 599 return true; |
| 564 } | 600 } |
| 565 | 601 |
| 566 | |
| 567 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { | 602 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) { |
| 568 // This is only used when resizing an object. | 603 // This is only used when resizing an object. |
| 569 DCHECK(MemoryChunk::FromAddress(old_start) == | 604 DCHECK(MemoryChunk::FromAddress(old_start) == |
| 570 MemoryChunk::FromAddress(new_start)); | 605 MemoryChunk::FromAddress(new_start)); |
| 571 | 606 |
| 572 if (!heap->incremental_marking()->IsMarking() || | 607 if (!heap->incremental_marking()->IsMarking() || |
| 573 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) | 608 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE)) |
| 574 return; | 609 return; |
| 575 | 610 |
| 576 // If the mark doesn't move, we don't check the color of the object. | 611 // If the mark doesn't move, we don't check the color of the object. |
| (...skipping 2585 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3162 HeapObject* heap_object = HeapObject::cast(object); | 3197 HeapObject* heap_object = HeapObject::cast(object); |
| 3163 MapWord map_word = heap_object->map_word(); | 3198 MapWord map_word = heap_object->map_word(); |
| 3164 if (map_word.IsForwardingAddress()) { | 3199 if (map_word.IsForwardingAddress()) { |
| 3165 return map_word.ToForwardingAddress(); | 3200 return map_word.ToForwardingAddress(); |
| 3166 } | 3201 } |
| 3167 } | 3202 } |
| 3168 return object; | 3203 return object; |
| 3169 } | 3204 } |
| 3170 }; | 3205 }; |
| 3171 | 3206 |
| 3172 enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS }; | 3207 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode, |
| 3173 | 3208 MarkCompactCollector::Sweeper::SweepingParallelism parallelism, |
| 3174 enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST }; | 3209 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode, |
| 3175 | 3210 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode> |
| 3176 enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE }; | 3211 int MarkCompactCollector::Sweeper::UnmanagedSweep(PagedSpace* space, Page* p, |
| 3177 | 3212 ObjectVisitor* v) { |
| 3178 // Sweeps a page. After sweeping the page can be iterated. | |
| 3179 // Slots in live objects pointing into evacuation candidates are updated | |
| 3180 // if requested. | |
| 3181 // Returns the size of the biggest continuous freed memory chunk in bytes. | |
| 3182 template <SweepingMode sweeping_mode, | |
| 3183 MarkCompactCollector::SweepingParallelism parallelism, | |
| 3184 SkipListRebuildingMode skip_list_mode, | |
| 3185 FreeSpaceTreatmentMode free_space_mode> | |
| 3186 static int Sweep(PagedSpace* space, Page* p, ObjectVisitor* v) { | |
| 3187 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); | 3213 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone()); |
| 3188 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); | 3214 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE)); |
| 3189 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, | 3215 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, |
| 3190 space->identity() == CODE_SPACE); | 3216 space->identity() == CODE_SPACE); |
| 3191 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); | 3217 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); |
| 3192 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || | 3218 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || |
| 3193 sweeping_mode == SWEEP_ONLY); | 3219 sweeping_mode == SWEEP_ONLY); |
| 3194 | 3220 |
| 3195 Address free_start = p->area_start(); | 3221 Address free_start = p->area_start(); |
| 3196 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); | 3222 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3245 if (free_space_mode == ZAP_FREE_SPACE) { | 3271 if (free_space_mode == ZAP_FREE_SPACE) { |
| 3246 memset(free_start, 0xcc, size); | 3272 memset(free_start, 0xcc, size); |
| 3247 } | 3273 } |
| 3248 freed_bytes = space->UnaccountedFree(free_start, size); | 3274 freed_bytes = space->UnaccountedFree(free_start, size); |
| 3249 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3275 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 3250 } | 3276 } |
| 3251 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3277 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3252 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3278 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 3253 } | 3279 } |
| 3254 | 3280 |
| 3255 | |
| 3256 void MarkCompactCollector::InvalidateCode(Code* code) { | 3281 void MarkCompactCollector::InvalidateCode(Code* code) { |
| 3257 if (heap_->incremental_marking()->IsCompacting() && | 3282 if (heap_->incremental_marking()->IsCompacting() && |
| 3258 !ShouldSkipEvacuationSlotRecording(code)) { | 3283 !ShouldSkipEvacuationSlotRecording(code)) { |
| 3259 DCHECK(compacting_); | 3284 DCHECK(compacting_); |
| 3260 | 3285 |
| 3261 // If the object is white than no slots were recorded on it yet. | 3286 // If the object is white than no slots were recorded on it yet. |
| 3262 MarkBit mark_bit = Marking::MarkBitFrom(code); | 3287 MarkBit mark_bit = Marking::MarkBitFrom(code); |
| 3263 if (Marking::IsWhite(mark_bit)) return; | 3288 if (Marking::IsWhite(mark_bit)) return; |
| 3264 | 3289 |
| 3265 // Ignore all slots that might have been recorded in the body of the | 3290 // Ignore all slots that might have been recorded in the body of the |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3342 LiveObjectIterator<kBlackObjects> it(page); | 3367 LiveObjectIterator<kBlackObjects> it(page); |
| 3343 HeapObject* object = NULL; | 3368 HeapObject* object = NULL; |
| 3344 while ((object = it.Next()) != NULL) { | 3369 while ((object = it.Next()) != NULL) { |
| 3345 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3370 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3346 Map* map = object->synchronized_map(); | 3371 Map* map = object->synchronized_map(); |
| 3347 int size = object->SizeFromMap(map); | 3372 int size = object->SizeFromMap(map); |
| 3348 object->IterateBody(map->instance_type(), size, visitor); | 3373 object->IterateBody(map->instance_type(), size, visitor); |
| 3349 } | 3374 } |
| 3350 } | 3375 } |
| 3351 | 3376 |
| 3377 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space, |
| 3378 Page* page) { |
| 3379 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3380 swept_list_[sweeping_space(space->identity())].Add(page); |
| 3381 } |
| 3352 | 3382 |
| 3353 void MarkCompactCollector::SweepAbortedPages() { | 3383 void MarkCompactCollector::SweepAbortedPages() { |
| 3354 // Second pass on aborted pages. | 3384 // Second pass on aborted pages. |
| 3355 for (Page* p : evacuation_candidates_) { | 3385 for (Page* p : evacuation_candidates_) { |
| 3356 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3386 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3357 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | 3387 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
| 3358 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3388 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3359 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3389 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3360 switch (space->identity()) { | 3390 switch (space->identity()) { |
| 3361 case OLD_SPACE: | 3391 case OLD_SPACE: |
| 3362 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3392 Sweeper::UnmanagedSweep< |
| 3363 IGNORE_FREE_SPACE>(space, p, nullptr); | 3393 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3394 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(space, p, |
| 3395 nullptr); |
| 3364 break; | 3396 break; |
| 3365 case CODE_SPACE: | 3397 case CODE_SPACE: |
| 3366 if (FLAG_zap_code_space) { | 3398 if (FLAG_zap_code_space) { |
| 3367 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 3399 Sweeper::UnmanagedSweep< |
| 3368 ZAP_FREE_SPACE>(space, p, nullptr); | 3400 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3401 Sweeper::REBUILD_SKIP_LIST, Sweeper::ZAP_FREE_SPACE>(space, p, |
| 3402 nullptr); |
| 3369 } else { | 3403 } else { |
| 3370 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, | 3404 Sweeper::UnmanagedSweep< |
| 3371 IGNORE_FREE_SPACE>(space, p, nullptr); | 3405 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3406 Sweeper::REBUILD_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>( |
| 3407 space, p, nullptr); |
| 3372 } | 3408 } |
| 3373 break; | 3409 break; |
| 3374 default: | 3410 default: |
| 3375 UNREACHABLE(); | 3411 UNREACHABLE(); |
| 3376 break; | 3412 break; |
| 3377 } | 3413 } |
| 3378 { | 3414 sweeper().AddSweptPageSafe(space, p); |
| 3379 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); | |
| 3380 swept_pages(space->identity())->Add(p); | |
| 3381 } | |
| 3382 } | 3415 } |
| 3383 } | 3416 } |
| 3384 } | 3417 } |
| 3385 | 3418 |
| 3386 | 3419 |
| 3387 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3420 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
| 3388 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3421 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
| 3389 Heap::RelocationLock relocation_lock(heap()); | 3422 Heap::RelocationLock relocation_lock(heap()); |
| 3390 | 3423 |
| 3391 { | 3424 { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 3415 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 3448 // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
| 3416 // ArrayBuffers either re-registers them as live or promotes them. This is | 3449 // ArrayBuffers either re-registers them as live or promotes them. This is |
| 3417 // needed to properly free them. | 3450 // needed to properly free them. |
| 3418 heap()->array_buffer_tracker()->FreeDead(false); | 3451 heap()->array_buffer_tracker()->FreeDead(false); |
| 3419 | 3452 |
| 3420 // Deallocate evacuated candidate pages. | 3453 // Deallocate evacuated candidate pages. |
| 3421 ReleaseEvacuationCandidates(); | 3454 ReleaseEvacuationCandidates(); |
| 3422 } | 3455 } |
| 3423 | 3456 |
| 3424 #ifdef VERIFY_HEAP | 3457 #ifdef VERIFY_HEAP |
| 3425 if (FLAG_verify_heap && !sweeping_in_progress_) { | 3458 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
| 3426 VerifyEvacuation(heap()); | 3459 VerifyEvacuation(heap()); |
| 3427 } | 3460 } |
| 3428 #endif | 3461 #endif |
| 3429 } | 3462 } |
| 3430 | 3463 |
| 3431 template <PointerDirection direction> | 3464 template <PointerDirection direction> |
| 3432 class PointerUpdateJobTraits { | 3465 class PointerUpdateJobTraits { |
| 3433 public: | 3466 public: |
| 3434 typedef int PerPageData; // Per page data is not used in this job. | 3467 typedef int PerPageData; // Per page data is not used in this job. |
| 3435 typedef PointersUpdatingVisitor* PerTaskData; | 3468 typedef PointersUpdatingVisitor* PerTaskData; |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3602 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3635 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3603 p->ResetLiveBytes(); | 3636 p->ResetLiveBytes(); |
| 3604 CHECK(p->SweepingDone()); | 3637 CHECK(p->SweepingDone()); |
| 3605 space->ReleasePage(p); | 3638 space->ReleasePage(p); |
| 3606 } | 3639 } |
| 3607 evacuation_candidates_.Rewind(0); | 3640 evacuation_candidates_.Rewind(0); |
| 3608 compacting_ = false; | 3641 compacting_ = false; |
| 3609 heap()->FreeQueuedChunks(); | 3642 heap()->FreeQueuedChunks(); |
| 3610 } | 3643 } |
| 3611 | 3644 |
| 3612 | 3645 int MarkCompactCollector::Sweeper::HelpSweepInParallel(PagedSpace* space, |
| 3613 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 3646 int required_freed_bytes, |
| 3614 int required_freed_bytes, | 3647 int max_pages) { |
| 3615 int max_pages) { | |
| 3616 int max_freed = 0; | 3648 int max_freed = 0; |
| 3617 int max_freed_overall = 0; | 3649 int max_freed_overall = 0; |
| 3618 int page_count = 0; | 3650 int page_count = 0; |
| 3619 for (Page* p : sweeping_list(space)) { | 3651 for (Page* p : sweeping_list_[sweeping_space(space->identity())]) { |
| 3620 max_freed = SweepInParallel(p, space); | 3652 max_freed = HelpSweepInParallel(p, space); |
| 3621 DCHECK(max_freed >= 0); | 3653 DCHECK(max_freed >= 0); |
| 3622 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 3654 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { |
| 3623 return max_freed; | 3655 return max_freed; |
| 3624 } | 3656 } |
| 3625 max_freed_overall = Max(max_freed, max_freed_overall); | 3657 max_freed_overall = Max(max_freed, max_freed_overall); |
| 3626 page_count++; | 3658 page_count++; |
| 3627 if (max_pages > 0 && page_count >= max_pages) { | 3659 if (max_pages > 0 && page_count >= max_pages) { |
| 3628 break; | 3660 break; |
| 3629 } | 3661 } |
| 3630 } | 3662 } |
| 3631 return max_freed_overall; | 3663 return max_freed_overall; |
| 3632 } | 3664 } |
| 3633 | 3665 |
| 3634 | 3666 int MarkCompactCollector::Sweeper::HelpSweepInParallel(Page* page, |
| 3635 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 3667 PagedSpace* space) { |
| 3636 int max_freed = 0; | 3668 int max_freed = 0; |
| 3637 if (page->mutex()->TryLock()) { | 3669 if (page->mutex()->TryLock()) { |
| 3638 // If this page was already swept in the meantime, we can return here. | 3670 // If this page was already swept in the meantime, we can return here. |
| 3639 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { | 3671 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) { |
| 3640 page->mutex()->Unlock(); | 3672 page->mutex()->Unlock(); |
| 3641 return 0; | 3673 return 0; |
| 3642 } | 3674 } |
| 3643 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3675 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3644 if (space->identity() == OLD_SPACE) { | 3676 if (space->identity() == OLD_SPACE) { |
| 3645 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3677 max_freed = |
| 3646 IGNORE_FREE_SPACE>(space, page, NULL); | 3678 UnmanagedSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3679 IGNORE_FREE_SPACE>(space, page, NULL); |
| 3647 } else if (space->identity() == CODE_SPACE) { | 3680 } else if (space->identity() == CODE_SPACE) { |
| 3648 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, | 3681 max_freed = |
| 3649 IGNORE_FREE_SPACE>(space, page, NULL); | 3682 UnmanagedSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, |
| 3683 IGNORE_FREE_SPACE>(space, page, NULL); |
| 3650 } else { | 3684 } else { |
| 3651 max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, | 3685 max_freed = |
| 3652 IGNORE_FREE_SPACE>(space, page, NULL); | 3686 UnmanagedSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, |
| 3687 IGNORE_FREE_SPACE>(space, page, NULL); |
| 3653 } | 3688 } |
| 3654 { | 3689 { |
| 3655 base::LockGuard<base::Mutex> guard(&swept_pages_mutex_); | 3690 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3656 swept_pages(space->identity())->Add(page); | 3691 swept_list_[sweeping_space(space->identity())].Add(page); |
| 3657 } | 3692 } |
| 3658 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3693 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3659 page->mutex()->Unlock(); | 3694 page->mutex()->Unlock(); |
| 3660 } | 3695 } |
| 3661 return max_freed; | 3696 return max_freed; |
| 3662 } | 3697 } |
| 3663 | 3698 |
| 3699 void MarkCompactCollector::Sweeper::AddPage(PagedSpace* space, Page* page) { |
| 3700 DCHECK(!sweeping_in_progress_); |
| 3701 PrepareAddPage(sweeping_space(space->identity()), page); |
| 3702 sweeping_list_[sweeping_space(space->identity())].push_back(page); |
| 3703 } |
| 3704 |
| 3705 void MarkCompactCollector::Sweeper::AddLatePage(SweepingSpace space, |
| 3706 Page* page) { |
| 3707 DCHECK(!sweeping_in_progress_); |
| 3708 PrepareAddPage(space, page); |
| 3709 if (tmp_late_sweeping_list_[space] == nullptr) { |
| 3710 tmp_late_sweeping_list_[space] = new SweepingList(); |
| 3711 } |
| 3712 tmp_late_sweeping_list_[space]->push_back(page); |
| 3713 } |
| 3714 |
| 3715 void MarkCompactCollector::Sweeper::PrepareAddPage(SweepingSpace space, |
| 3716 Page* page) { |
| 3717 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3718 int to_sweep = page->area_size() - page->LiveBytes(); |
| 3719 heap_->paged_space(allocation_space(space)) |
| 3720 ->accounting_stats_.ShrinkSpace(to_sweep); |
| 3721 } |
| 3722 |
| 3723 void MarkCompactCollector::Sweeper::CommitLateList(SweepingSpace space) { |
| 3724 DCHECK(!sweeping_in_progress_); |
| 3725 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3726 DCHECK_NULL(late_sweeping_list_[space]); |
| 3727 late_sweeping_list_[space] = tmp_late_sweeping_list_[space]; |
| 3728 } |
| 3664 | 3729 |
| 3665 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { | 3730 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { |
| 3666 space->ClearStats(); | 3731 space->ClearStats(); |
| 3667 | 3732 |
| 3668 PageIterator it(space); | 3733 PageIterator it(space); |
| 3669 | 3734 |
| 3670 int will_be_swept = 0; | 3735 int will_be_swept = 0; |
| 3671 bool unused_page_present = false; | 3736 bool unused_page_present = false; |
| 3672 | 3737 |
| 3673 while (it.has_next()) { | 3738 while (it.has_next()) { |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3689 // TODO(hpayer): Free unused memory of last black page. | 3754 // TODO(hpayer): Free unused memory of last black page. |
| 3690 continue; | 3755 continue; |
| 3691 } | 3756 } |
| 3692 | 3757 |
| 3693 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3758 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
| 3694 // We need to sweep the page to get it into an iterable state again. Note | 3759 // We need to sweep the page to get it into an iterable state again. Note |
| 3695 // that this adds unusable memory into the free list that is later on | 3760 // that this adds unusable memory into the free list that is later on |
| 3696 // (in the free list) dropped again. Since we only use the flag for | 3761 // (in the free list) dropped again. Since we only use the flag for |
| 3697 // testing this is fine. | 3762 // testing this is fine. |
| 3698 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); | 3763 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); |
| 3699 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3764 Sweeper::UnmanagedSweep< |
| 3700 IGNORE_FREE_SPACE>(space, p, nullptr); | 3765 Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD, |
| 3766 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_SPACE>(space, p, |
| 3767 nullptr); |
| 3701 continue; | 3768 continue; |
| 3702 } | 3769 } |
| 3703 | 3770 |
| 3704 // One unused page is kept, all further are released before sweeping them. | 3771 // One unused page is kept, all further are released before sweeping them. |
| 3705 if (p->LiveBytes() == 0) { | 3772 if (p->LiveBytes() == 0) { |
| 3706 if (unused_page_present) { | 3773 if (unused_page_present) { |
| 3707 if (FLAG_gc_verbose) { | 3774 if (FLAG_gc_verbose) { |
| 3708 PrintIsolate(isolate(), "sweeping: released page: %p", p); | 3775 PrintIsolate(isolate(), "sweeping: released page: %p", p); |
| 3709 } | 3776 } |
| 3710 space->ReleasePage(p); | 3777 space->ReleasePage(p); |
| 3711 continue; | 3778 continue; |
| 3712 } | 3779 } |
| 3713 unused_page_present = true; | 3780 unused_page_present = true; |
| 3714 } | 3781 } |
| 3715 | 3782 |
| 3716 p->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3783 sweeper().AddPage(space, p); |
| 3717 sweeping_list(space).push_back(p); | |
| 3718 int to_sweep = p->area_size() - p->LiveBytes(); | |
| 3719 space->accounting_stats_.ShrinkSpace(to_sweep); | |
| 3720 will_be_swept++; | 3784 will_be_swept++; |
| 3721 } | 3785 } |
| 3722 | 3786 |
| 3723 if (FLAG_gc_verbose) { | 3787 if (FLAG_gc_verbose) { |
| 3724 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", | 3788 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", |
| 3725 AllocationSpaceName(space->identity()), will_be_swept); | 3789 AllocationSpaceName(space->identity()), will_be_swept); |
| 3726 } | 3790 } |
| 3727 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(), | |
| 3728 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); | |
| 3729 } | 3791 } |
| 3730 | 3792 |
| 3731 | 3793 |
| 3732 void MarkCompactCollector::SweepSpaces() { | 3794 void MarkCompactCollector::SweepSpaces() { |
| 3733 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 3795 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 3734 double start_time = 0.0; | 3796 double start_time = 0.0; |
| 3735 if (FLAG_print_cumulative_gc_stat) { | 3797 if (FLAG_print_cumulative_gc_stat) { |
| 3736 start_time = heap_->MonotonicallyIncreasingTimeInMs(); | 3798 start_time = heap_->MonotonicallyIncreasingTimeInMs(); |
| 3737 } | 3799 } |
| 3738 | 3800 |
| 3739 #ifdef DEBUG | 3801 #ifdef DEBUG |
| 3740 state_ = SWEEP_SPACES; | 3802 state_ = SWEEP_SPACES; |
| 3741 #endif | 3803 #endif |
| 3742 | 3804 |
| 3743 { | 3805 { |
| 3744 sweeping_in_progress_ = true; | |
| 3745 { | 3806 { |
| 3746 GCTracer::Scope sweep_scope(heap()->tracer(), | 3807 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 3747 GCTracer::Scope::MC_SWEEP_OLD); | 3808 GCTracer::Scope::MC_SWEEP_OLD); |
| 3748 StartSweepSpace(heap()->old_space()); | 3809 StartSweepSpace(heap()->old_space()); |
| 3749 } | 3810 } |
| 3750 { | 3811 { |
| 3751 GCTracer::Scope sweep_scope(heap()->tracer(), | 3812 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 3752 GCTracer::Scope::MC_SWEEP_CODE); | 3813 GCTracer::Scope::MC_SWEEP_CODE); |
| 3753 StartSweepSpace(heap()->code_space()); | 3814 StartSweepSpace(heap()->code_space()); |
| 3754 } | 3815 } |
| 3755 { | 3816 { |
| 3756 GCTracer::Scope sweep_scope(heap()->tracer(), | 3817 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 3757 GCTracer::Scope::MC_SWEEP_MAP); | 3818 GCTracer::Scope::MC_SWEEP_MAP); |
| 3758 StartSweepSpace(heap()->map_space()); | 3819 StartSweepSpace(heap()->map_space()); |
| 3759 } | 3820 } |
| 3760 if (FLAG_concurrent_sweeping) { | 3821 sweeper().StartSweeping(); |
| 3761 StartSweeperThreads(); | |
| 3762 } | |
| 3763 } | 3822 } |
| 3764 | 3823 |
| 3765 // Deallocate unmarked large objects. | 3824 // Deallocate unmarked large objects. |
| 3766 heap_->lo_space()->FreeUnmarkedObjects(); | 3825 heap_->lo_space()->FreeUnmarkedObjects(); |
| 3767 | 3826 |
| 3768 if (FLAG_print_cumulative_gc_stat) { | 3827 if (FLAG_print_cumulative_gc_stat) { |
| 3769 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - | 3828 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - |
| 3770 start_time); | 3829 start_time); |
| 3771 } | 3830 } |
| 3772 } | 3831 } |
| 3773 | 3832 |
| 3774 | 3833 void MarkCompactCollector::Sweeper::ParallelSweepSpacesComplete() { |
| 3775 void MarkCompactCollector::ParallelSweepSpacesComplete() { | 3834 for (int i = 0; i < kNumberOfSweepingSpaces; i++) { |
| 3776 sweeping_list(heap()->old_space()).clear(); | 3835 SweepingSpace space = static_cast<SweepingSpace>(i); |
| 3777 sweeping_list(heap()->code_space()).clear(); | 3836 DCHECK_NULL(tmp_late_sweeping_list_[space]); |
| 3778 sweeping_list(heap()->map_space()).clear(); | 3837 sweeping_list_[space].clear(); |
| 3838 if (late_sweeping_list_[space] != nullptr) { |
| 3839 delete late_sweeping_list_[space]; |
| 3840 } |
| 3841 } |
| 3779 } | 3842 } |
| 3780 | 3843 |
| 3781 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } | 3844 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); } |
| 3782 | 3845 |
| 3783 | 3846 |
| 3784 void MarkCompactCollector::Initialize() { | 3847 void MarkCompactCollector::Initialize() { |
| 3785 MarkCompactMarkingVisitor::Initialize(); | 3848 MarkCompactMarkingVisitor::Initialize(); |
| 3786 IncrementalMarking::Initialize(); | 3849 IncrementalMarking::Initialize(); |
| 3787 } | 3850 } |
| 3788 | 3851 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 3806 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3869 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 3807 if (Marking::IsBlack(mark_bit)) { | 3870 if (Marking::IsBlack(mark_bit)) { |
| 3808 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3871 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
| 3809 RecordRelocSlot(host, &rinfo, target); | 3872 RecordRelocSlot(host, &rinfo, target); |
| 3810 } | 3873 } |
| 3811 } | 3874 } |
| 3812 } | 3875 } |
| 3813 | 3876 |
| 3814 } // namespace internal | 3877 } // namespace internal |
| 3815 } // namespace v8 | 3878 } // namespace v8 |
| OLD | NEW |