| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 121 invalidate_dead_objects_in_wrappers_marking_deque_(nullptr), | 121 invalidate_dead_objects_in_wrappers_marking_deque_(nullptr), |
| 122 #if defined(ADDRESS_SANITIZER) | 122 #if defined(ADDRESS_SANITIZER) |
| 123 asan_fake_stack_(__asan_get_current_fake_stack()), | 123 asan_fake_stack_(__asan_get_current_fake_stack()), |
| 124 #endif | 124 #endif |
| 125 #if defined(LEAK_SANITIZER) | 125 #if defined(LEAK_SANITIZER) |
| 126 m_disabledStaticPersistentsRegistration(0), | 126 m_disabledStaticPersistentsRegistration(0), |
| 127 #endif | 127 #endif |
| 128 allocated_object_size_(0), | 128 allocated_object_size_(0), |
| 129 marked_object_size_(0), | 129 marked_object_size_(0), |
| 130 reported_memory_to_v8_(0) { | 130 reported_memory_to_v8_(0) { |
| 131 ASSERT(CheckThread()); | 131 DCHECK(CheckThread()); |
| 132 ASSERT(!**thread_specific_); | 132 DCHECK(!**thread_specific_); |
| 133 **thread_specific_ = this; | 133 **thread_specific_ = this; |
| 134 | 134 |
| 135 heap_ = WTF::WrapUnique(new ThreadHeap(this)); | 135 heap_ = WTF::WrapUnique(new ThreadHeap(this)); |
| 136 | 136 |
| 137 for (int arena_index = 0; arena_index < BlinkGC::kLargeObjectArenaIndex; | 137 for (int arena_index = 0; arena_index < BlinkGC::kLargeObjectArenaIndex; |
| 138 arena_index++) | 138 arena_index++) |
| 139 arenas_[arena_index] = new NormalPageArena(this, arena_index); | 139 arenas_[arena_index] = new NormalPageArena(this, arena_index); |
| 140 arenas_[BlinkGC::kLargeObjectArenaIndex] = | 140 arenas_[BlinkGC::kLargeObjectArenaIndex] = |
| 141 new LargeObjectArena(this, BlinkGC::kLargeObjectArenaIndex); | 141 new LargeObjectArena(this, BlinkGC::kLargeObjectArenaIndex); |
| 142 | 142 |
| 143 likely_to_be_promptly_freed_ = | 143 likely_to_be_promptly_freed_ = |
| 144 WrapArrayUnique(new int[kLikelyToBePromptlyFreedArraySize]); | 144 WrapArrayUnique(new int[kLikelyToBePromptlyFreedArraySize]); |
| 145 ClearArenaAges(); | 145 ClearArenaAges(); |
| 146 } | 146 } |
| 147 | 147 |
| 148 ThreadState::~ThreadState() { | 148 ThreadState::~ThreadState() { |
| 149 ASSERT(CheckThread()); | 149 DCHECK(CheckThread()); |
| 150 if (IsMainThread()) | 150 if (IsMainThread()) |
| 151 DCHECK_EQ(Heap().HeapStats().AllocatedSpace(), 0u); | 151 DCHECK_EQ(Heap().HeapStats().AllocatedSpace(), 0u); |
| 152 CHECK(GcState() == ThreadState::kNoGCScheduled); | 152 CHECK(GcState() == ThreadState::kNoGCScheduled); |
| 153 | 153 |
| 154 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) | 154 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) |
| 155 delete arenas_[i]; | 155 delete arenas_[i]; |
| 156 | 156 |
| 157 **thread_specific_ = nullptr; | 157 **thread_specific_ = nullptr; |
| 158 } | 158 } |
| 159 | 159 |
| 160 void ThreadState::AttachMainThread() { | 160 void ThreadState::AttachMainThread() { |
| 161 thread_specific_ = new WTF::ThreadSpecific<ThreadState*>(); | 161 thread_specific_ = new WTF::ThreadSpecific<ThreadState*>(); |
| 162 new (main_thread_state_storage_) ThreadState(); | 162 new (main_thread_state_storage_) ThreadState(); |
| 163 } | 163 } |
| 164 | 164 |
| 165 void ThreadState::AttachCurrentThread() { | 165 void ThreadState::AttachCurrentThread() { |
| 166 new ThreadState(); | 166 new ThreadState(); |
| 167 } | 167 } |
| 168 | 168 |
| 169 void ThreadState::DetachCurrentThread() { | 169 void ThreadState::DetachCurrentThread() { |
| 170 ThreadState* state = Current(); | 170 ThreadState* state = Current(); |
| 171 state->RunTerminationGC(); | 171 state->RunTerminationGC(); |
| 172 delete state; | 172 delete state; |
| 173 } | 173 } |
| 174 | 174 |
| 175 void ThreadState::RemoveAllPages() { | 175 void ThreadState::RemoveAllPages() { |
| 176 ASSERT(CheckThread()); | 176 DCHECK(CheckThread()); |
| 177 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) | 177 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) |
| 178 arenas_[i]->RemoveAllPages(); | 178 arenas_[i]->RemoveAllPages(); |
| 179 } | 179 } |
| 180 | 180 |
| 181 void ThreadState::RunTerminationGC() { | 181 void ThreadState::RunTerminationGC() { |
| 182 if (IsMainThread()) { | 182 if (IsMainThread()) { |
| 183 RemoveAllPages(); | 183 RemoveAllPages(); |
| 184 return; | 184 return; |
| 185 } | 185 } |
| 186 ASSERT(CheckThread()); | 186 DCHECK(CheckThread()); |
| 187 | 187 |
| 188 // Finish sweeping. | 188 // Finish sweeping. |
| 189 CompleteSweep(); | 189 CompleteSweep(); |
| 190 | 190 |
| 191 ReleaseStaticPersistentNodes(); | 191 ReleaseStaticPersistentNodes(); |
| 192 | 192 |
| 193 ProcessHeap::GetCrossThreadPersistentRegion() | 193 ProcessHeap::GetCrossThreadPersistentRegion() |
| 194 .PrepareForThreadStateTermination(this); | 194 .PrepareForThreadStateTermination(this); |
| 195 | 195 |
| 196 // Do thread local GC's as long as the count of thread local Persistents | 196 // Do thread local GC's as long as the count of thread local Persistents |
| 197 // changes and is above zero. | 197 // changes and is above zero. |
| 198 int old_count = -1; | 198 int old_count = -1; |
| 199 int current_count = GetPersistentRegion()->NumberOfPersistents(); | 199 int current_count = GetPersistentRegion()->NumberOfPersistents(); |
| 200 ASSERT(current_count >= 0); | 200 DCHECK_GE(current_count, 0); |
| 201 while (current_count != old_count) { | 201 while (current_count != old_count) { |
| 202 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithSweep, | 202 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithSweep, |
| 203 BlinkGC::kThreadTerminationGC); | 203 BlinkGC::kThreadTerminationGC); |
| 204 // Release the thread-local static persistents that were | 204 // Release the thread-local static persistents that were |
| 205 // instantiated while running the termination GC. | 205 // instantiated while running the termination GC. |
| 206 ReleaseStaticPersistentNodes(); | 206 ReleaseStaticPersistentNodes(); |
| 207 old_count = current_count; | 207 old_count = current_count; |
| 208 current_count = GetPersistentRegion()->NumberOfPersistents(); | 208 current_count = GetPersistentRegion()->NumberOfPersistents(); |
| 209 } | 209 } |
| 210 // We should not have any persistents left when getting to this point, | 210 // We should not have any persistents left when getting to this point, |
| 211 // if we have it is probably a bug so adding a debug ASSERT to catch this. | 211 // if we have it is probably a bug so adding a debug ASSERT to catch this. |
| 212 ASSERT(!current_count); | 212 DCHECK(!current_count); |
| 213 // All of pre-finalizers should be consumed. | 213 // All of pre-finalizers should be consumed. |
| 214 ASSERT(ordered_pre_finalizers_.IsEmpty()); | 214 DCHECK(ordered_pre_finalizers_.IsEmpty()); |
| 215 CHECK_EQ(GcState(), kNoGCScheduled); | 215 CHECK_EQ(GcState(), kNoGCScheduled); |
| 216 | 216 |
| 217 RemoveAllPages(); | 217 RemoveAllPages(); |
| 218 } | 218 } |
| 219 | 219 |
| 220 NO_SANITIZE_ADDRESS | 220 NO_SANITIZE_ADDRESS |
| 221 void ThreadState::VisitAsanFakeStackForPointer(Visitor* visitor, Address ptr) { | 221 void ThreadState::VisitAsanFakeStackForPointer(Visitor* visitor, Address ptr) { |
| 222 #if defined(ADDRESS_SANITIZER) | 222 #if defined(ADDRESS_SANITIZER) |
| 223 Address* start = reinterpret_cast<Address*>(start_of_stack_); | 223 Address* start = reinterpret_cast<Address*>(start_of_stack_); |
| 224 Address* end = reinterpret_cast<Address*>(end_of_stack_); | 224 Address* end = reinterpret_cast<Address*>(end_of_stack_); |
| (...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 415 | 415 |
| 416 // If we're consuming too much memory, trigger a conservative GC | 416 // If we're consuming too much memory, trigger a conservative GC |
| 417 // aggressively. This is a safe guard to avoid OOM. | 417 // aggressively. This is a safe guard to avoid OOM. |
| 418 bool ThreadState::ShouldForceMemoryPressureGC() { | 418 bool ThreadState::ShouldForceMemoryPressureGC() { |
| 419 if (TotalMemorySize() < 300 * 1024 * 1024) | 419 if (TotalMemorySize() < 300 * 1024 * 1024) |
| 420 return false; | 420 return false; |
| 421 return JudgeGCThreshold(0, 0, 1.5); | 421 return JudgeGCThreshold(0, 0, 1.5); |
| 422 } | 422 } |
| 423 | 423 |
| 424 void ThreadState::ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gc_type) { | 424 void ThreadState::ScheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gc_type) { |
| 425 ASSERT(CheckThread()); | 425 DCHECK(CheckThread()); |
| 426 ThreadHeap::ReportMemoryUsageForTracing(); | 426 ThreadHeap::ReportMemoryUsageForTracing(); |
| 427 | 427 |
| 428 #if PRINT_HEAP_STATS | 428 #if PRINT_HEAP_STATS |
| 429 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", | 429 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", |
| 430 gcType == BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); | 430 gcType == BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); |
| 431 #endif | 431 #endif |
| 432 | 432 |
| 433 if (IsGCForbidden()) | 433 if (IsGCForbidden()) |
| 434 return; | 434 return; |
| 435 | 435 |
| 436 // This completeSweep() will do nothing in common cases since we've | 436 // This completeSweep() will do nothing in common cases since we've |
| 437 // called completeSweep() before V8 starts minor/major GCs. | 437 // called completeSweep() before V8 starts minor/major GCs. |
| 438 CompleteSweep(); | 438 CompleteSweep(); |
| 439 ASSERT(!IsSweepingInProgress()); | 439 DCHECK(!IsSweepingInProgress()); |
| 440 ASSERT(!SweepForbidden()); | 440 DCHECK(!SweepForbidden()); |
| 441 | 441 |
| 442 if ((gc_type == BlinkGC::kV8MajorGC && ShouldForceMemoryPressureGC()) || | 442 if ((gc_type == BlinkGC::kV8MajorGC && ShouldForceMemoryPressureGC()) || |
| 443 ShouldScheduleV8FollowupGC()) { | 443 ShouldScheduleV8FollowupGC()) { |
| 444 #if PRINT_HEAP_STATS | 444 #if PRINT_HEAP_STATS |
| 445 dataLogF("Scheduled PreciseGC\n"); | 445 dataLogF("Scheduled PreciseGC\n"); |
| 446 #endif | 446 #endif |
| 447 SchedulePreciseGC(); | 447 SchedulePreciseGC(); |
| 448 return; | 448 return; |
| 449 } | 449 } |
| 450 if (gc_type == BlinkGC::kV8MajorGC && ShouldScheduleIdleGC()) { | 450 if (gc_type == BlinkGC::kV8MajorGC && ShouldScheduleIdleGC()) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 463 // TODO(haraken): It's a bit too late for a major GC to schedule | 463 // TODO(haraken): It's a bit too late for a major GC to schedule |
| 464 // completeSweep() here, because gcPrologue for a major GC is called | 464 // completeSweep() here, because gcPrologue for a major GC is called |
| 465 // not at the point where the major GC started but at the point where | 465 // not at the point where the major GC started but at the point where |
| 466 // the major GC requests object grouping. | 466 // the major GC requests object grouping. |
| 467 if (gc_type == BlinkGC::kV8MajorGC) | 467 if (gc_type == BlinkGC::kV8MajorGC) |
| 468 CompleteSweep(); | 468 CompleteSweep(); |
| 469 } | 469 } |
| 470 | 470 |
| 471 void ThreadState::SchedulePageNavigationGCIfNeeded( | 471 void ThreadState::SchedulePageNavigationGCIfNeeded( |
| 472 float estimated_removal_ratio) { | 472 float estimated_removal_ratio) { |
| 473 ASSERT(CheckThread()); | 473 DCHECK(CheckThread()); |
| 474 ThreadHeap::ReportMemoryUsageForTracing(); | 474 ThreadHeap::ReportMemoryUsageForTracing(); |
| 475 | 475 |
| 476 #if PRINT_HEAP_STATS | 476 #if PRINT_HEAP_STATS |
| 477 dataLogF( | 477 dataLogF( |
| 478 "ThreadState::schedulePageNavigationGCIfNeeded " | 478 "ThreadState::schedulePageNavigationGCIfNeeded " |
| 479 "(estimatedRemovalRatio=%.2lf)\n", | 479 "(estimatedRemovalRatio=%.2lf)\n", |
| 480 estimatedRemovalRatio); | 480 estimatedRemovalRatio); |
| 481 #endif | 481 #endif |
| 482 | 482 |
| 483 if (IsGCForbidden()) | 483 if (IsGCForbidden()) |
| 484 return; | 484 return; |
| 485 | 485 |
| 486 // Finish on-going lazy sweeping. | 486 // Finish on-going lazy sweeping. |
| 487 // TODO(haraken): It might not make sense to force completeSweep() for all | 487 // TODO(haraken): It might not make sense to force completeSweep() for all |
| 488 // page navigations. | 488 // page navigations. |
| 489 CompleteSweep(); | 489 CompleteSweep(); |
| 490 ASSERT(!IsSweepingInProgress()); | 490 DCHECK(!IsSweepingInProgress()); |
| 491 ASSERT(!SweepForbidden()); | 491 DCHECK(!SweepForbidden()); |
| 492 | 492 |
| 493 if (ShouldForceMemoryPressureGC()) { | 493 if (ShouldForceMemoryPressureGC()) { |
| 494 #if PRINT_HEAP_STATS | 494 #if PRINT_HEAP_STATS |
| 495 dataLogF("Scheduled MemoryPressureGC\n"); | 495 dataLogF("Scheduled MemoryPressureGC\n"); |
| 496 #endif | 496 #endif |
| 497 CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kGCWithoutSweep, | 497 CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kGCWithoutSweep, |
| 498 BlinkGC::kMemoryPressureGC); | 498 BlinkGC::kMemoryPressureGC); |
| 499 return; | 499 return; |
| 500 } | 500 } |
| 501 if (ShouldSchedulePageNavigationGC(estimated_removal_ratio)) { | 501 if (ShouldSchedulePageNavigationGC(estimated_removal_ratio)) { |
| 502 #if PRINT_HEAP_STATS | 502 #if PRINT_HEAP_STATS |
| 503 dataLogF("Scheduled PageNavigationGC\n"); | 503 dataLogF("Scheduled PageNavigationGC\n"); |
| 504 #endif | 504 #endif |
| 505 SchedulePageNavigationGC(); | 505 SchedulePageNavigationGC(); |
| 506 } | 506 } |
| 507 } | 507 } |
| 508 | 508 |
| 509 void ThreadState::SchedulePageNavigationGC() { | 509 void ThreadState::SchedulePageNavigationGC() { |
| 510 ASSERT(CheckThread()); | 510 DCHECK(CheckThread()); |
| 511 ASSERT(!IsSweepingInProgress()); | 511 DCHECK(!IsSweepingInProgress()); |
| 512 SetGCState(kPageNavigationGCScheduled); | 512 SetGCState(kPageNavigationGCScheduled); |
| 513 } | 513 } |
| 514 | 514 |
| 515 void ThreadState::ScheduleGCIfNeeded() { | 515 void ThreadState::ScheduleGCIfNeeded() { |
| 516 ASSERT(CheckThread()); | 516 DCHECK(CheckThread()); |
| 517 ThreadHeap::ReportMemoryUsageForTracing(); | 517 ThreadHeap::ReportMemoryUsageForTracing(); |
| 518 | 518 |
| 519 #if PRINT_HEAP_STATS | 519 #if PRINT_HEAP_STATS |
| 520 dataLogF("ThreadState::scheduleGCIfNeeded\n"); | 520 dataLogF("ThreadState::scheduleGCIfNeeded\n"); |
| 521 #endif | 521 #endif |
| 522 | 522 |
| 523 // Allocation is allowed during sweeping, but those allocations should not | 523 // Allocation is allowed during sweeping, but those allocations should not |
| 524 // trigger nested GCs. | 524 // trigger nested GCs. |
| 525 if (IsGCForbidden()) | 525 if (IsGCForbidden()) |
| 526 return; | 526 return; |
| 527 | 527 |
| 528 if (IsSweepingInProgress()) | 528 if (IsSweepingInProgress()) |
| 529 return; | 529 return; |
| 530 ASSERT(!SweepForbidden()); | 530 DCHECK(!SweepForbidden()); |
| 531 | 531 |
| 532 ReportMemoryToV8(); | 532 ReportMemoryToV8(); |
| 533 | 533 |
| 534 if (ShouldForceMemoryPressureGC()) { | 534 if (ShouldForceMemoryPressureGC()) { |
| 535 CompleteSweep(); | 535 CompleteSweep(); |
| 536 if (ShouldForceMemoryPressureGC()) { | 536 if (ShouldForceMemoryPressureGC()) { |
| 537 #if PRINT_HEAP_STATS | 537 #if PRINT_HEAP_STATS |
| 538 dataLogF("Scheduled MemoryPressureGC\n"); | 538 dataLogF("Scheduled MemoryPressureGC\n"); |
| 539 #endif | 539 #endif |
| 540 CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kGCWithoutSweep, | 540 CollectGarbage(BlinkGC::kHeapPointersOnStack, BlinkGC::kGCWithoutSweep, |
| (...skipping 16 matching lines...) Expand all Loading... |
| 557 if (ShouldScheduleIdleGC()) { | 557 if (ShouldScheduleIdleGC()) { |
| 558 #if PRINT_HEAP_STATS | 558 #if PRINT_HEAP_STATS |
| 559 dataLogF("Scheduled IdleGC\n"); | 559 dataLogF("Scheduled IdleGC\n"); |
| 560 #endif | 560 #endif |
| 561 ScheduleIdleGC(); | 561 ScheduleIdleGC(); |
| 562 return; | 562 return; |
| 563 } | 563 } |
| 564 } | 564 } |
| 565 | 565 |
| 566 ThreadState* ThreadState::FromObject(const void* object) { | 566 ThreadState* ThreadState::FromObject(const void* object) { |
| 567 ASSERT(object); | 567 DCHECK(object); |
| 568 BasePage* page = PageFromObject(object); | 568 BasePage* page = PageFromObject(object); |
| 569 ASSERT(page); | 569 DCHECK(page); |
| 570 ASSERT(page->Arena()); | 570 DCHECK(page->Arena()); |
| 571 return page->Arena()->GetThreadState(); | 571 return page->Arena()->GetThreadState(); |
| 572 } | 572 } |
| 573 | 573 |
| 574 void ThreadState::PerformIdleGC(double deadline_seconds) { | 574 void ThreadState::PerformIdleGC(double deadline_seconds) { |
| 575 ASSERT(CheckThread()); | 575 DCHECK(CheckThread()); |
| 576 ASSERT(Platform::Current()->CurrentThread()->Scheduler()); | 576 DCHECK(Platform::Current()->CurrentThread()->Scheduler()); |
| 577 | 577 |
| 578 if (GcState() != kIdleGCScheduled) | 578 if (GcState() != kIdleGCScheduled) |
| 579 return; | 579 return; |
| 580 | 580 |
| 581 if (IsGCForbidden()) { | 581 if (IsGCForbidden()) { |
| 582 // If GC is forbidden at this point, try again. | 582 // If GC is forbidden at this point, try again. |
| 583 ScheduleIdleGC(); | 583 ScheduleIdleGC(); |
| 584 return; | 584 return; |
| 585 } | 585 } |
| 586 | 586 |
| (...skipping 11 matching lines...) Expand all Loading... |
| 598 } | 598 } |
| 599 | 599 |
| 600 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", | 600 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", |
| 601 idle_delta_in_seconds, "estimatedMarkingTime", | 601 idle_delta_in_seconds, "estimatedMarkingTime", |
| 602 heap_->HeapStats().EstimatedMarkingTime()); | 602 heap_->HeapStats().EstimatedMarkingTime()); |
| 603 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithoutSweep, | 603 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithoutSweep, |
| 604 BlinkGC::kIdleGC); | 604 BlinkGC::kIdleGC); |
| 605 } | 605 } |
| 606 | 606 |
| 607 void ThreadState::PerformIdleLazySweep(double deadline_seconds) { | 607 void ThreadState::PerformIdleLazySweep(double deadline_seconds) { |
| 608 ASSERT(CheckThread()); | 608 DCHECK(CheckThread()); |
| 609 | 609 |
| 610 // If we are not in a sweeping phase, there is nothing to do here. | 610 // If we are not in a sweeping phase, there is nothing to do here. |
| 611 if (!IsSweepingInProgress()) | 611 if (!IsSweepingInProgress()) |
| 612 return; | 612 return; |
| 613 | 613 |
| 614 // This check is here to prevent performIdleLazySweep() from being called | 614 // This check is here to prevent performIdleLazySweep() from being called |
| 615 // recursively. I'm not sure if it can happen but it would be safer to have | 615 // recursively. I'm not sure if it can happen but it would be safer to have |
| 616 // the check just in case. | 616 // the check just in case. |
| 617 if (SweepForbidden()) | 617 if (SweepForbidden()) |
| 618 return; | 618 return; |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 667 // Some threads (e.g. PPAPI thread) don't have a scheduler. | 667 // Some threads (e.g. PPAPI thread) don't have a scheduler. |
| 668 if (!Platform::Current()->CurrentThread()->Scheduler()) | 668 if (!Platform::Current()->CurrentThread()->Scheduler()) |
| 669 return; | 669 return; |
| 670 | 670 |
| 671 Platform::Current()->CurrentThread()->Scheduler()->PostIdleTask( | 671 Platform::Current()->CurrentThread()->Scheduler()->PostIdleTask( |
| 672 BLINK_FROM_HERE, | 672 BLINK_FROM_HERE, |
| 673 WTF::Bind(&ThreadState::PerformIdleLazySweep, WTF::Unretained(this))); | 673 WTF::Bind(&ThreadState::PerformIdleLazySweep, WTF::Unretained(this))); |
| 674 } | 674 } |
| 675 | 675 |
| 676 void ThreadState::SchedulePreciseGC() { | 676 void ThreadState::SchedulePreciseGC() { |
| 677 ASSERT(CheckThread()); | 677 DCHECK(CheckThread()); |
| 678 if (IsSweepingInProgress()) { | 678 if (IsSweepingInProgress()) { |
| 679 SetGCState(kSweepingAndPreciseGCScheduled); | 679 SetGCState(kSweepingAndPreciseGCScheduled); |
| 680 return; | 680 return; |
| 681 } | 681 } |
| 682 | 682 |
| 683 SetGCState(kPreciseGCScheduled); | 683 SetGCState(kPreciseGCScheduled); |
| 684 } | 684 } |
| 685 | 685 |
| 686 namespace { | 686 namespace { |
| 687 | 687 |
| (...skipping 22 matching lines...) Expand all Loading... |
| 710 | 710 |
| 711 } // namespace | 711 } // namespace |
| 712 | 712 |
| 713 #define VERIFY_STATE_TRANSITION(condition) \ | 713 #define VERIFY_STATE_TRANSITION(condition) \ |
| 714 if (UNLIKELY(!(condition))) \ | 714 if (UNLIKELY(!(condition))) \ |
| 715 UnexpectedGCState(gc_state_) | 715 UnexpectedGCState(gc_state_) |
| 716 | 716 |
| 717 void ThreadState::SetGCState(GCState gc_state) { | 717 void ThreadState::SetGCState(GCState gc_state) { |
| 718 switch (gc_state) { | 718 switch (gc_state) { |
| 719 case kNoGCScheduled: | 719 case kNoGCScheduled: |
| 720 ASSERT(CheckThread()); | 720 DCHECK(CheckThread()); |
| 721 VERIFY_STATE_TRANSITION(gc_state_ == kSweeping || | 721 VERIFY_STATE_TRANSITION(gc_state_ == kSweeping || |
| 722 gc_state_ == kSweepingAndIdleGCScheduled); | 722 gc_state_ == kSweepingAndIdleGCScheduled); |
| 723 break; | 723 break; |
| 724 case kIdleGCScheduled: | 724 case kIdleGCScheduled: |
| 725 case kPreciseGCScheduled: | 725 case kPreciseGCScheduled: |
| 726 case kFullGCScheduled: | 726 case kFullGCScheduled: |
| 727 case kPageNavigationGCScheduled: | 727 case kPageNavigationGCScheduled: |
| 728 ASSERT(CheckThread()); | 728 DCHECK(CheckThread()); |
| 729 VERIFY_STATE_TRANSITION( | 729 VERIFY_STATE_TRANSITION( |
| 730 gc_state_ == kNoGCScheduled || gc_state_ == kIdleGCScheduled || | 730 gc_state_ == kNoGCScheduled || gc_state_ == kIdleGCScheduled || |
| 731 gc_state_ == kPreciseGCScheduled || gc_state_ == kFullGCScheduled || | 731 gc_state_ == kPreciseGCScheduled || gc_state_ == kFullGCScheduled || |
| 732 gc_state_ == kPageNavigationGCScheduled || gc_state_ == kSweeping || | 732 gc_state_ == kPageNavigationGCScheduled || gc_state_ == kSweeping || |
| 733 gc_state_ == kSweepingAndIdleGCScheduled || | 733 gc_state_ == kSweepingAndIdleGCScheduled || |
| 734 gc_state_ == kSweepingAndPreciseGCScheduled); | 734 gc_state_ == kSweepingAndPreciseGCScheduled); |
| 735 CompleteSweep(); | 735 CompleteSweep(); |
| 736 break; | 736 break; |
| 737 case kGCRunning: | 737 case kGCRunning: |
| 738 ASSERT(!IsInGC()); | 738 DCHECK(!IsInGC()); |
| 739 VERIFY_STATE_TRANSITION(gc_state_ != kGCRunning); | 739 VERIFY_STATE_TRANSITION(gc_state_ != kGCRunning); |
| 740 break; | 740 break; |
| 741 case kSweeping: | 741 case kSweeping: |
| 742 DCHECK(IsInGC()); | 742 DCHECK(IsInGC()); |
| 743 ASSERT(CheckThread()); | 743 ASSERT(CheckThread()); |
| 744 VERIFY_STATE_TRANSITION(gc_state_ == kGCRunning); | 744 VERIFY_STATE_TRANSITION(gc_state_ == kGCRunning); |
| 745 break; | 745 break; |
| 746 case kSweepingAndIdleGCScheduled: | 746 case kSweepingAndIdleGCScheduled: |
| 747 case kSweepingAndPreciseGCScheduled: | 747 case kSweepingAndPreciseGCScheduled: |
| 748 ASSERT(CheckThread()); | 748 DCHECK(CheckThread()); |
| 749 VERIFY_STATE_TRANSITION(gc_state_ == kSweeping || | 749 VERIFY_STATE_TRANSITION(gc_state_ == kSweeping || |
| 750 gc_state_ == kSweepingAndIdleGCScheduled || | 750 gc_state_ == kSweepingAndIdleGCScheduled || |
| 751 gc_state_ == kSweepingAndPreciseGCScheduled); | 751 gc_state_ == kSweepingAndPreciseGCScheduled); |
| 752 break; | 752 break; |
| 753 default: | 753 default: |
| 754 NOTREACHED(); | 754 NOTREACHED(); |
| 755 } | 755 } |
| 756 gc_state_ = gc_state; | 756 gc_state_ = gc_state; |
| 757 } | 757 } |
| 758 | 758 |
| 759 #undef VERIFY_STATE_TRANSITION | 759 #undef VERIFY_STATE_TRANSITION |
| 760 | 760 |
| 761 void ThreadState::RunScheduledGC(BlinkGC::StackState stack_state) { | 761 void ThreadState::RunScheduledGC(BlinkGC::StackState stack_state) { |
| 762 ASSERT(CheckThread()); | 762 DCHECK(CheckThread()); |
| 763 if (stack_state != BlinkGC::kNoHeapPointersOnStack) | 763 if (stack_state != BlinkGC::kNoHeapPointersOnStack) |
| 764 return; | 764 return; |
| 765 | 765 |
| 766 // If a safe point is entered while initiating a GC, we clearly do | 766 // If a safe point is entered while initiating a GC, we clearly do |
| 767 // not want to do another as part of that -- the safe point is only | 767 // not want to do another as part of that -- the safe point is only |
| 768 // entered after checking if a scheduled GC ought to run first. | 768 // entered after checking if a scheduled GC ought to run first. |
| 769 // Prevent that from happening by marking GCs as forbidden while | 769 // Prevent that from happening by marking GCs as forbidden while |
| 770 // one is initiated and later running. | 770 // one is initiated and later running. |
| 771 if (IsGCForbidden()) | 771 if (IsGCForbidden()) |
| 772 return; | 772 return; |
| (...skipping 19 matching lines...) Expand all Loading... |
| 792 } | 792 } |
| 793 | 793 |
| 794 void ThreadState::FlushHeapDoesNotContainCacheIfNeeded() { | 794 void ThreadState::FlushHeapDoesNotContainCacheIfNeeded() { |
| 795 if (should_flush_heap_does_not_contain_cache_) { | 795 if (should_flush_heap_does_not_contain_cache_) { |
| 796 heap_->FlushHeapDoesNotContainCache(); | 796 heap_->FlushHeapDoesNotContainCache(); |
| 797 should_flush_heap_does_not_contain_cache_ = false; | 797 should_flush_heap_does_not_contain_cache_ = false; |
| 798 } | 798 } |
| 799 } | 799 } |
| 800 | 800 |
| 801 void ThreadState::MakeConsistentForGC() { | 801 void ThreadState::MakeConsistentForGC() { |
| 802 ASSERT(IsInGC()); | 802 DCHECK(IsInGC()); |
| 803 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 803 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
| 804 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) | 804 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) |
| 805 arenas_[i]->MakeConsistentForGC(); | 805 arenas_[i]->MakeConsistentForGC(); |
| 806 } | 806 } |
| 807 | 807 |
| 808 void ThreadState::Compact() { | 808 void ThreadState::Compact() { |
| 809 if (!Heap().Compaction()->IsCompacting()) | 809 if (!Heap().Compaction()->IsCompacting()) |
| 810 return; | 810 return; |
| 811 | 811 |
| 812 SweepForbiddenScope scope(this); | 812 SweepForbiddenScope scope(this); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 827 // TODO: implement bail out wrt any overall deadline, not compacting | 827 // TODO: implement bail out wrt any overall deadline, not compacting |
| 828 // the remaining arenas if the time budget has been exceeded. | 828 // the remaining arenas if the time budget has been exceeded. |
| 829 Heap().Compaction()->StartThreadCompaction(); | 829 Heap().Compaction()->StartThreadCompaction(); |
| 830 for (int i = BlinkGC::kHashTableArenaIndex; i >= BlinkGC::kVector1ArenaIndex; | 830 for (int i = BlinkGC::kHashTableArenaIndex; i >= BlinkGC::kVector1ArenaIndex; |
| 831 --i) | 831 --i) |
| 832 static_cast<NormalPageArena*>(arenas_[i])->SweepAndCompact(); | 832 static_cast<NormalPageArena*>(arenas_[i])->SweepAndCompact(); |
| 833 Heap().Compaction()->FinishThreadCompaction(); | 833 Heap().Compaction()->FinishThreadCompaction(); |
| 834 } | 834 } |
| 835 | 835 |
| 836 void ThreadState::MakeConsistentForMutator() { | 836 void ThreadState::MakeConsistentForMutator() { |
| 837 ASSERT(IsInGC()); | 837 DCHECK(IsInGC()); |
| 838 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) | 838 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) |
| 839 arenas_[i]->MakeConsistentForMutator(); | 839 arenas_[i]->MakeConsistentForMutator(); |
| 840 } | 840 } |
| 841 | 841 |
| 842 void ThreadState::PreGC() { | 842 void ThreadState::PreGC() { |
| 843 if (isolate_ && perform_cleanup_) | 843 if (isolate_ && perform_cleanup_) |
| 844 perform_cleanup_(isolate_); | 844 perform_cleanup_(isolate_); |
| 845 | 845 |
| 846 ASSERT(!IsInGC()); | 846 DCHECK(!IsInGC()); |
| 847 SetGCState(kGCRunning); | 847 SetGCState(kGCRunning); |
| 848 MakeConsistentForGC(); | 848 MakeConsistentForGC(); |
| 849 FlushHeapDoesNotContainCacheIfNeeded(); | 849 FlushHeapDoesNotContainCacheIfNeeded(); |
| 850 ClearArenaAges(); | 850 ClearArenaAges(); |
| 851 } | 851 } |
| 852 | 852 |
| 853 void ThreadState::PostGC(BlinkGC::GCType gc_type) { | 853 void ThreadState::PostGC(BlinkGC::GCType gc_type) { |
| 854 if (invalidate_dead_objects_in_wrappers_marking_deque_) | 854 if (invalidate_dead_objects_in_wrappers_marking_deque_) |
| 855 invalidate_dead_objects_in_wrappers_marking_deque_(isolate_); | 855 invalidate_dead_objects_in_wrappers_marking_deque_(isolate_); |
| 856 | 856 |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 936 // that would be fine. | 936 // that would be fine. |
| 937 ProcessHeap::GetCrossThreadPersistentRegion() | 937 ProcessHeap::GetCrossThreadPersistentRegion() |
| 938 .UnpoisonCrossThreadPersistents(); | 938 .UnpoisonCrossThreadPersistents(); |
| 939 } | 939 } |
| 940 #endif | 940 #endif |
| 941 | 941 |
| 942 void ThreadState::EagerSweep() { | 942 void ThreadState::EagerSweep() { |
| 943 #if defined(ADDRESS_SANITIZER) | 943 #if defined(ADDRESS_SANITIZER) |
| 944 PoisonEagerArena(); | 944 PoisonEagerArena(); |
| 945 #endif | 945 #endif |
| 946 ASSERT(CheckThread()); | 946 DCHECK(CheckThread()); |
| 947 // Some objects need to be finalized promptly and cannot be handled | 947 // Some objects need to be finalized promptly and cannot be handled |
| 948 // by lazy sweeping. Keep those in a designated heap and sweep it | 948 // by lazy sweeping. Keep those in a designated heap and sweep it |
| 949 // eagerly. | 949 // eagerly. |
| 950 ASSERT(IsSweepingInProgress()); | 950 DCHECK(IsSweepingInProgress()); |
| 951 | 951 |
| 952 // Mirroring the completeSweep() condition; see its comment. | 952 // Mirroring the completeSweep() condition; see its comment. |
| 953 if (SweepForbidden()) | 953 if (SweepForbidden()) |
| 954 return; | 954 return; |
| 955 | 955 |
| 956 SweepForbiddenScope scope(this); | 956 SweepForbiddenScope scope(this); |
| 957 ScriptForbiddenIfMainThreadScope script_forbidden_scope; | 957 ScriptForbiddenIfMainThreadScope script_forbidden_scope; |
| 958 | 958 |
| 959 double start_time = WTF::CurrentTimeMS(); | 959 double start_time = WTF::CurrentTimeMS(); |
| 960 arenas_[BlinkGC::kEagerSweepArenaIndex]->CompleteSweep(); | 960 arenas_[BlinkGC::kEagerSweepArenaIndex]->CompleteSweep(); |
| 961 AccumulateSweepingTime(WTF::CurrentTimeMS() - start_time); | 961 AccumulateSweepingTime(WTF::CurrentTimeMS() - start_time); |
| 962 } | 962 } |
| 963 | 963 |
| 964 void ThreadState::CompleteSweep() { | 964 void ThreadState::CompleteSweep() { |
| 965 ASSERT(CheckThread()); | 965 DCHECK(CheckThread()); |
| 966 // If we are not in a sweeping phase, there is nothing to do here. | 966 // If we are not in a sweeping phase, there is nothing to do here. |
| 967 if (!IsSweepingInProgress()) | 967 if (!IsSweepingInProgress()) |
| 968 return; | 968 return; |
| 969 | 969 |
| 970 // completeSweep() can be called recursively if finalizers can allocate | 970 // completeSweep() can be called recursively if finalizers can allocate |
| 971 // memory and the allocation triggers completeSweep(). This check prevents | 971 // memory and the allocation triggers completeSweep(). This check prevents |
| 972 // the sweeping from being executed recursively. | 972 // the sweeping from being executed recursively. |
| 973 if (SweepForbidden()) | 973 if (SweepForbidden()) |
| 974 return; | 974 return; |
| 975 | 975 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 990 if (IsMainThread()) { | 990 if (IsMainThread()) { |
| 991 DEFINE_STATIC_LOCAL(CustomCountHistogram, complete_sweep_histogram, | 991 DEFINE_STATIC_LOCAL(CustomCountHistogram, complete_sweep_histogram, |
| 992 ("BlinkGC.CompleteSweep", 1, 10 * 1000, 50)); | 992 ("BlinkGC.CompleteSweep", 1, 10 * 1000, 50)); |
| 993 complete_sweep_histogram.Count(time_for_complete_sweep); | 993 complete_sweep_histogram.Count(time_for_complete_sweep); |
| 994 } | 994 } |
| 995 | 995 |
| 996 PostSweep(); | 996 PostSweep(); |
| 997 } | 997 } |
| 998 | 998 |
| 999 void ThreadState::PostSweep() { | 999 void ThreadState::PostSweep() { |
| 1000 ASSERT(CheckThread()); | 1000 DCHECK(CheckThread()); |
| 1001 ThreadHeap::ReportMemoryUsageForTracing(); | 1001 ThreadHeap::ReportMemoryUsageForTracing(); |
| 1002 | 1002 |
| 1003 if (IsMainThread()) { | 1003 if (IsMainThread()) { |
| 1004 double collection_rate = 0; | 1004 double collection_rate = 0; |
| 1005 if (heap_->HeapStats().ObjectSizeAtLastGC() > 0) | 1005 if (heap_->HeapStats().ObjectSizeAtLastGC() > 0) |
| 1006 collection_rate = 1 - 1.0 * heap_->HeapStats().MarkedObjectSize() / | 1006 collection_rate = 1 - 1.0 * heap_->HeapStats().MarkedObjectSize() / |
| 1007 heap_->HeapStats().ObjectSizeAtLastGC(); | 1007 heap_->HeapStats().ObjectSizeAtLastGC(); |
| 1008 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), | 1008 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), |
| 1009 "ThreadState::collectionRate", | 1009 "ThreadState::collectionRate", |
| 1010 static_cast<int>(100 * collection_rate)); | 1010 static_cast<int>(100 * collection_rate)); |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1091 } | 1091 } |
| 1092 | 1092 |
| 1093 size_t ThreadState::ObjectPayloadSizeForTesting() { | 1093 size_t ThreadState::ObjectPayloadSizeForTesting() { |
| 1094 size_t object_payload_size = 0; | 1094 size_t object_payload_size = 0; |
| 1095 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) | 1095 for (int i = 0; i < BlinkGC::kNumberOfArenas; ++i) |
| 1096 object_payload_size += arenas_[i]->ObjectPayloadSizeForTesting(); | 1096 object_payload_size += arenas_[i]->ObjectPayloadSizeForTesting(); |
| 1097 return object_payload_size; | 1097 return object_payload_size; |
| 1098 } | 1098 } |
| 1099 | 1099 |
| 1100 void ThreadState::SafePoint(BlinkGC::StackState stack_state) { | 1100 void ThreadState::SafePoint(BlinkGC::StackState stack_state) { |
| 1101 ASSERT(CheckThread()); | 1101 DCHECK(CheckThread()); |
| 1102 ThreadHeap::ReportMemoryUsageForTracing(); | 1102 ThreadHeap::ReportMemoryUsageForTracing(); |
| 1103 | 1103 |
| 1104 RunScheduledGC(stack_state); | 1104 RunScheduledGC(stack_state); |
| 1105 stack_state_ = BlinkGC::kHeapPointersOnStack; | 1105 stack_state_ = BlinkGC::kHeapPointersOnStack; |
| 1106 } | 1106 } |
| 1107 | 1107 |
| 1108 #ifdef ADDRESS_SANITIZER | 1108 #ifdef ADDRESS_SANITIZER |
| 1109 // When we are running under AddressSanitizer with | 1109 // When we are running under AddressSanitizer with |
| 1110 // detect_stack_use_after_return=1 then stack marker obtained from | 1110 // detect_stack_use_after_return=1 then stack marker obtained from |
| 1111 // SafePointScope will point into a fake stack. Detect this case by checking if | 1111 // SafePointScope will point into a fake stack. Detect this case by checking if |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1136 | 1136 |
| 1137 static void EnterSafePointAfterPushRegisters(void*, | 1137 static void EnterSafePointAfterPushRegisters(void*, |
| 1138 ThreadState* state, | 1138 ThreadState* state, |
| 1139 intptr_t* stack_end) { | 1139 intptr_t* stack_end) { |
| 1140 state->RecordStackEnd(stack_end); | 1140 state->RecordStackEnd(stack_end); |
| 1141 state->CopyStackUntilSafePointScope(); | 1141 state->CopyStackUntilSafePointScope(); |
| 1142 } | 1142 } |
| 1143 | 1143 |
| 1144 void ThreadState::EnterSafePoint(BlinkGC::StackState stack_state, | 1144 void ThreadState::EnterSafePoint(BlinkGC::StackState stack_state, |
| 1145 void* scope_marker) { | 1145 void* scope_marker) { |
| 1146 ASSERT(CheckThread()); | 1146 DCHECK(CheckThread()); |
| 1147 #ifdef ADDRESS_SANITIZER | 1147 #ifdef ADDRESS_SANITIZER |
| 1148 if (stack_state == BlinkGC::kHeapPointersOnStack) | 1148 if (stack_state == BlinkGC::kHeapPointersOnStack) |
| 1149 scope_marker = AdjustScopeMarkerForAdressSanitizer(scope_marker); | 1149 scope_marker = AdjustScopeMarkerForAdressSanitizer(scope_marker); |
| 1150 #endif | 1150 #endif |
| 1151 ASSERT(stack_state == BlinkGC::kNoHeapPointersOnStack || scope_marker); | 1151 DCHECK(stack_state == BlinkGC::kNoHeapPointersOnStack || scope_marker); |
| 1152 RunScheduledGC(stack_state); | 1152 RunScheduledGC(stack_state); |
| 1153 stack_state_ = stack_state; | 1153 stack_state_ = stack_state; |
| 1154 safe_point_scope_marker_ = scope_marker; | 1154 safe_point_scope_marker_ = scope_marker; |
| 1155 PushAllRegisters(nullptr, this, EnterSafePointAfterPushRegisters); | 1155 PushAllRegisters(nullptr, this, EnterSafePointAfterPushRegisters); |
| 1156 } | 1156 } |
| 1157 | 1157 |
| 1158 void ThreadState::LeaveSafePoint() { | 1158 void ThreadState::LeaveSafePoint() { |
| 1159 ASSERT(CheckThread()); | 1159 DCHECK(CheckThread()); |
| 1160 stack_state_ = BlinkGC::kHeapPointersOnStack; | 1160 stack_state_ = BlinkGC::kHeapPointersOnStack; |
| 1161 ClearSafePointScopeMarker(); | 1161 ClearSafePointScopeMarker(); |
| 1162 } | 1162 } |
| 1163 | 1163 |
| 1164 void ThreadState::ReportMemoryToV8() { | 1164 void ThreadState::ReportMemoryToV8() { |
| 1165 if (!isolate_) | 1165 if (!isolate_) |
| 1166 return; | 1166 return; |
| 1167 | 1167 |
| 1168 size_t current_heap_size = allocated_object_size_ + marked_object_size_; | 1168 size_t current_heap_size = allocated_object_size_ + marked_object_size_; |
| 1169 int64_t diff = static_cast<int64_t>(current_heap_size) - | 1169 int64_t diff = static_cast<int64_t>(current_heap_size) - |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1199 | 1199 |
| 1200 Address* to = reinterpret_cast<Address*>(safe_point_scope_marker_); | 1200 Address* to = reinterpret_cast<Address*>(safe_point_scope_marker_); |
| 1201 Address* from = reinterpret_cast<Address*>(end_of_stack_); | 1201 Address* from = reinterpret_cast<Address*>(end_of_stack_); |
| 1202 CHECK_LT(from, to); | 1202 CHECK_LT(from, to); |
| 1203 CHECK_LE(to, reinterpret_cast<Address*>(start_of_stack_)); | 1203 CHECK_LE(to, reinterpret_cast<Address*>(start_of_stack_)); |
| 1204 size_t slot_count = static_cast<size_t>(to - from); | 1204 size_t slot_count = static_cast<size_t>(to - from); |
| 1205 // Catch potential performance issues. | 1205 // Catch potential performance issues. |
| 1206 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1206 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 1207 // ASan/LSan use more space on the stack and we therefore | 1207 // ASan/LSan use more space on the stack and we therefore |
| 1208 // increase the allowed stack copying for those builds. | 1208 // increase the allowed stack copying for those builds. |
| 1209 ASSERT(slot_count < 2048); | 1209 DCHECK_LT(slot_count, 2048u); |
| 1210 #else | 1210 #else |
| 1211 ASSERT(slot_count < 1024); | 1211 DCHECK_LT(slot_count, 1024u); |
| 1212 #endif | 1212 #endif |
| 1213 | 1213 |
| 1214 ASSERT(!safe_point_stack_copy_.size()); | 1214 DCHECK(!safe_point_stack_copy_.size()); |
| 1215 safe_point_stack_copy_.Resize(slot_count); | 1215 safe_point_stack_copy_.Resize(slot_count); |
| 1216 for (size_t i = 0; i < slot_count; ++i) { | 1216 for (size_t i = 0; i < slot_count; ++i) { |
| 1217 safe_point_stack_copy_[i] = from[i]; | 1217 safe_point_stack_copy_[i] = from[i]; |
| 1218 } | 1218 } |
| 1219 } | 1219 } |
| 1220 | 1220 |
| 1221 void ThreadState::RegisterStaticPersistentNode( | 1221 void ThreadState::RegisterStaticPersistentNode( |
| 1222 PersistentNode* node, | 1222 PersistentNode* node, |
| 1223 PersistentClearCallback callback) { | 1223 PersistentClearCallback callback) { |
| 1224 #if defined(LEAK_SANITIZER) | 1224 #if defined(LEAK_SANITIZER) |
| 1225 if (m_disabledStaticPersistentsRegistration) | 1225 if (m_disabledStaticPersistentsRegistration) |
| 1226 return; | 1226 return; |
| 1227 #endif | 1227 #endif |
| 1228 | 1228 |
| 1229 ASSERT(!static_persistents_.Contains(node)); | 1229 DCHECK(!static_persistents_.Contains(node)); |
| 1230 static_persistents_.insert(node, callback); | 1230 static_persistents_.insert(node, callback); |
| 1231 } | 1231 } |
| 1232 | 1232 |
| 1233 void ThreadState::ReleaseStaticPersistentNodes() { | 1233 void ThreadState::ReleaseStaticPersistentNodes() { |
| 1234 HashMap<PersistentNode*, ThreadState::PersistentClearCallback> | 1234 HashMap<PersistentNode*, ThreadState::PersistentClearCallback> |
| 1235 static_persistents; | 1235 static_persistents; |
| 1236 static_persistents.swap(static_persistents_); | 1236 static_persistents.swap(static_persistents_); |
| 1237 | 1237 |
| 1238 PersistentRegion* persistent_region = GetPersistentRegion(); | 1238 PersistentRegion* persistent_region = GetPersistentRegion(); |
| 1239 for (const auto& it : static_persistents) | 1239 for (const auto& it : static_persistents) |
| 1240 persistent_region->ReleasePersistentNode(it.key, it.value); | 1240 persistent_region->ReleasePersistentNode(it.key, it.value); |
| 1241 } | 1241 } |
| 1242 | 1242 |
| 1243 void ThreadState::FreePersistentNode(PersistentNode* persistent_node) { | 1243 void ThreadState::FreePersistentNode(PersistentNode* persistent_node) { |
| 1244 PersistentRegion* persistent_region = GetPersistentRegion(); | 1244 PersistentRegion* persistent_region = GetPersistentRegion(); |
| 1245 persistent_region->FreePersistentNode(persistent_node); | 1245 persistent_region->FreePersistentNode(persistent_node); |
| 1246 // Do not allow static persistents to be freed before | 1246 // Do not allow static persistents to be freed before |
| 1247 // they're all released in releaseStaticPersistentNodes(). | 1247 // they're all released in releaseStaticPersistentNodes(). |
| 1248 // | 1248 // |
| 1249 // There's no fundamental reason why this couldn't be supported, | 1249 // There's no fundamental reason why this couldn't be supported, |
| 1250 // but no known use for it. | 1250 // but no known use for it. |
| 1251 ASSERT(!static_persistents_.Contains(persistent_node)); | 1251 DCHECK(!static_persistents_.Contains(persistent_node)); |
| 1252 } | 1252 } |
| 1253 | 1253 |
| 1254 #if defined(LEAK_SANITIZER) | 1254 #if defined(LEAK_SANITIZER) |
| 1255 void ThreadState::enterStaticReferenceRegistrationDisabledScope() { | 1255 void ThreadState::enterStaticReferenceRegistrationDisabledScope() { |
| 1256 m_disabledStaticPersistentsRegistration++; | 1256 m_disabledStaticPersistentsRegistration++; |
| 1257 } | 1257 } |
| 1258 | 1258 |
| 1259 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() { | 1259 void ThreadState::leaveStaticReferenceRegistrationDisabledScope() { |
| 1260 ASSERT(m_disabledStaticPersistentsRegistration); | 1260 DCHECK(m_disabledStaticPersistentsRegistration); |
| 1261 m_disabledStaticPersistentsRegistration--; | 1261 m_disabledStaticPersistentsRegistration--; |
| 1262 } | 1262 } |
| 1263 #endif | 1263 #endif |
| 1264 | 1264 |
| 1265 void ThreadState::InvokePreFinalizers() { | 1265 void ThreadState::InvokePreFinalizers() { |
| 1266 ASSERT(CheckThread()); | 1266 DCHECK(CheckThread()); |
| 1267 ASSERT(!SweepForbidden()); | 1267 DCHECK(!SweepForbidden()); |
| 1268 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); | 1268 TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers"); |
| 1269 | 1269 |
| 1270 SweepForbiddenScope sweep_forbidden(this); | 1270 SweepForbiddenScope sweep_forbidden(this); |
| 1271 ScriptForbiddenIfMainThreadScope script_forbidden; | 1271 ScriptForbiddenIfMainThreadScope script_forbidden; |
| 1272 // Pre finalizers may access unmarked objects but are forbidden from | 1272 // Pre finalizers may access unmarked objects but are forbidden from |
| 1273 // ressurecting them. | 1273 // ressurecting them. |
| 1274 ObjectResurrectionForbiddenScope object_resurrection_forbidden(this); | 1274 ObjectResurrectionForbiddenScope object_resurrection_forbidden(this); |
| 1275 | 1275 |
| 1276 double start_time = WTF::CurrentTimeMS(); | 1276 double start_time = WTF::CurrentTimeMS(); |
| 1277 if (!ordered_pre_finalizers_.IsEmpty()) { | 1277 if (!ordered_pre_finalizers_.IsEmpty()) { |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1313 int end_arena_index) { | 1313 int end_arena_index) { |
| 1314 size_t min_arena_age = arena_ages_[begin_arena_index]; | 1314 size_t min_arena_age = arena_ages_[begin_arena_index]; |
| 1315 int arena_index_with_min_arena_age = begin_arena_index; | 1315 int arena_index_with_min_arena_age = begin_arena_index; |
| 1316 for (int arena_index = begin_arena_index + 1; arena_index <= end_arena_index; | 1316 for (int arena_index = begin_arena_index + 1; arena_index <= end_arena_index; |
| 1317 arena_index++) { | 1317 arena_index++) { |
| 1318 if (arena_ages_[arena_index] < min_arena_age) { | 1318 if (arena_ages_[arena_index] < min_arena_age) { |
| 1319 min_arena_age = arena_ages_[arena_index]; | 1319 min_arena_age = arena_ages_[arena_index]; |
| 1320 arena_index_with_min_arena_age = arena_index; | 1320 arena_index_with_min_arena_age = arena_index; |
| 1321 } | 1321 } |
| 1322 } | 1322 } |
| 1323 ASSERT(IsVectorArenaIndex(arena_index_with_min_arena_age)); | 1323 DCHECK(IsVectorArenaIndex(arena_index_with_min_arena_age)); |
| 1324 return arena_index_with_min_arena_age; | 1324 return arena_index_with_min_arena_age; |
| 1325 } | 1325 } |
| 1326 | 1326 |
| 1327 BaseArena* ThreadState::ExpandedVectorBackingArena(size_t gc_info_index) { | 1327 BaseArena* ThreadState::ExpandedVectorBackingArena(size_t gc_info_index) { |
| 1328 ASSERT(CheckThread()); | 1328 DCHECK(CheckThread()); |
| 1329 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask; | 1329 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask; |
| 1330 --likely_to_be_promptly_freed_[entry_index]; | 1330 --likely_to_be_promptly_freed_[entry_index]; |
| 1331 int arena_index = vector_backing_arena_index_; | 1331 int arena_index = vector_backing_arena_index_; |
| 1332 arena_ages_[arena_index] = ++current_arena_ages_; | 1332 arena_ages_[arena_index] = ++current_arena_ages_; |
| 1333 vector_backing_arena_index_ = ArenaIndexOfVectorArenaLeastRecentlyExpanded( | 1333 vector_backing_arena_index_ = ArenaIndexOfVectorArenaLeastRecentlyExpanded( |
| 1334 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex); | 1334 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex); |
| 1335 return arenas_[arena_index]; | 1335 return arenas_[arena_index]; |
| 1336 } | 1336 } |
| 1337 | 1337 |
| 1338 void ThreadState::AllocationPointAdjusted(int arena_index) { | 1338 void ThreadState::AllocationPointAdjusted(int arena_index) { |
| 1339 arena_ages_[arena_index] = ++current_arena_ages_; | 1339 arena_ages_[arena_index] = ++current_arena_ages_; |
| 1340 if (vector_backing_arena_index_ == arena_index) | 1340 if (vector_backing_arena_index_ == arena_index) |
| 1341 vector_backing_arena_index_ = ArenaIndexOfVectorArenaLeastRecentlyExpanded( | 1341 vector_backing_arena_index_ = ArenaIndexOfVectorArenaLeastRecentlyExpanded( |
| 1342 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex); | 1342 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex); |
| 1343 } | 1343 } |
| 1344 | 1344 |
| 1345 void ThreadState::PromptlyFreed(size_t gc_info_index) { | 1345 void ThreadState::PromptlyFreed(size_t gc_info_index) { |
| 1346 ASSERT(CheckThread()); | 1346 DCHECK(CheckThread()); |
| 1347 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask; | 1347 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask; |
| 1348 // See the comment in vectorBackingArena() for why this is +3. | 1348 // See the comment in vectorBackingArena() for why this is +3. |
| 1349 likely_to_be_promptly_freed_[entry_index] += 3; | 1349 likely_to_be_promptly_freed_[entry_index] += 3; |
| 1350 } | 1350 } |
| 1351 | 1351 |
| 1352 void ThreadState::TakeSnapshot(SnapshotType type) { | 1352 void ThreadState::TakeSnapshot(SnapshotType type) { |
| 1353 ASSERT(IsInGC()); | 1353 DCHECK(IsInGC()); |
| 1354 | 1354 |
| 1355 // 0 is used as index for freelist entries. Objects are indexed 1 to | 1355 // 0 is used as index for freelist entries. Objects are indexed 1 to |
| 1356 // gcInfoIndex. | 1356 // gcInfoIndex. |
| 1357 GCSnapshotInfo info(GCInfoTable::GcInfoIndex() + 1); | 1357 GCSnapshotInfo info(GCInfoTable::GcInfoIndex() + 1); |
| 1358 String thread_dump_name = String::Format("blink_gc/thread_%lu", | 1358 String thread_dump_name = String::Format("blink_gc/thread_%lu", |
| 1359 static_cast<unsigned long>(thread_)); | 1359 static_cast<unsigned long>(thread_)); |
| 1360 const String heaps_dump_name = thread_dump_name + "/heaps"; | 1360 const String heaps_dump_name = thread_dump_name + "/heaps"; |
| 1361 const String classes_dump_name = thread_dump_name + "/classes"; | 1361 const String classes_dump_name = thread_dump_name + "/classes"; |
| 1362 | 1362 |
| 1363 int number_of_heaps_reported = 0; | 1363 int number_of_heaps_reported = 0; |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1385 SNAPSHOT_HEAP(EagerSweep); | 1385 SNAPSHOT_HEAP(EagerSweep); |
| 1386 SNAPSHOT_HEAP(Vector1); | 1386 SNAPSHOT_HEAP(Vector1); |
| 1387 SNAPSHOT_HEAP(Vector2); | 1387 SNAPSHOT_HEAP(Vector2); |
| 1388 SNAPSHOT_HEAP(Vector3); | 1388 SNAPSHOT_HEAP(Vector3); |
| 1389 SNAPSHOT_HEAP(Vector4); | 1389 SNAPSHOT_HEAP(Vector4); |
| 1390 SNAPSHOT_HEAP(InlineVector); | 1390 SNAPSHOT_HEAP(InlineVector); |
| 1391 SNAPSHOT_HEAP(HashTable); | 1391 SNAPSHOT_HEAP(HashTable); |
| 1392 SNAPSHOT_HEAP(LargeObject); | 1392 SNAPSHOT_HEAP(LargeObject); |
| 1393 FOR_EACH_TYPED_ARENA(SNAPSHOT_HEAP); | 1393 FOR_EACH_TYPED_ARENA(SNAPSHOT_HEAP); |
| 1394 | 1394 |
| 1395 ASSERT(number_of_heaps_reported == BlinkGC::kNumberOfArenas); | 1395 DCHECK_EQ(number_of_heaps_reported, BlinkGC::kNumberOfArenas); |
| 1396 | 1396 |
| 1397 #undef SNAPSHOT_HEAP | 1397 #undef SNAPSHOT_HEAP |
| 1398 | 1398 |
| 1399 if (type == SnapshotType::kFreelistSnapshot) | 1399 if (type == SnapshotType::kFreelistSnapshot) |
| 1400 return; | 1400 return; |
| 1401 | 1401 |
| 1402 size_t total_live_count = 0; | 1402 size_t total_live_count = 0; |
| 1403 size_t total_dead_count = 0; | 1403 size_t total_dead_count = 0; |
| 1404 size_t total_live_size = 0; | 1404 size_t total_live_size = 0; |
| 1405 size_t total_dead_size = 0; | 1405 size_t total_dead_size = 0; |
| (...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1559 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithSweep, | 1559 CollectGarbage(BlinkGC::kNoHeapPointersOnStack, BlinkGC::kGCWithSweep, |
| 1560 BlinkGC::kForcedGC); | 1560 BlinkGC::kForcedGC); |
| 1561 size_t live_objects = Heap().HeapStats().MarkedObjectSize(); | 1561 size_t live_objects = Heap().HeapStats().MarkedObjectSize(); |
| 1562 if (live_objects == previous_live_objects) | 1562 if (live_objects == previous_live_objects) |
| 1563 break; | 1563 break; |
| 1564 previous_live_objects = live_objects; | 1564 previous_live_objects = live_objects; |
| 1565 } | 1565 } |
| 1566 } | 1566 } |
| 1567 | 1567 |
| 1568 } // namespace blink | 1568 } // namespace blink |
| OLD | NEW |