Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(37)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.h

Issue 2816033003: Replace ASSERT with DHCECK_op in platform/heap (Closed)
Patch Set: Replace ASSERT with CHECK_op in platform/heap Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
139 139
140 private: 140 private:
141 ThreadState* state_; 141 ThreadState* state_;
142 }; 142 };
143 143
144 class SweepForbiddenScope final { 144 class SweepForbiddenScope final {
145 STACK_ALLOCATED(); 145 STACK_ALLOCATED();
146 146
147 public: 147 public:
148 explicit SweepForbiddenScope(ThreadState* state) : state_(state) { 148 explicit SweepForbiddenScope(ThreadState* state) : state_(state) {
149 ASSERT(!state_->sweep_forbidden_); 149 DCHECK(!state_->sweep_forbidden_);
150 state_->sweep_forbidden_ = true; 150 state_->sweep_forbidden_ = true;
151 } 151 }
152 ~SweepForbiddenScope() { 152 ~SweepForbiddenScope() {
153 ASSERT(state_->sweep_forbidden_); 153 DCHECK(state_->sweep_forbidden_);
154 state_->sweep_forbidden_ = false; 154 state_->sweep_forbidden_ = false;
155 } 155 }
156 156
157 private: 157 private:
158 ThreadState* state_; 158 ThreadState* state_;
159 }; 159 };
160 160
161 // Used to denote when access to unmarked objects is allowed but we shouldn't 161 // Used to denote when access to unmarked objects is allowed but we shouldn't
162 // ressurect it by making new references (e.g. during weak processing and pre 162 // ressurect it by making new references (e.g. during weak processing and pre
163 // finalizer). 163 // finalizer).
(...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after
351 void EnterSafePoint(BlinkGC::StackState, void*); 351 void EnterSafePoint(BlinkGC::StackState, void*);
352 void LeaveSafePoint(); 352 void LeaveSafePoint();
353 353
354 void RecordStackEnd(intptr_t* end_of_stack) { end_of_stack_ = end_of_stack; } 354 void RecordStackEnd(intptr_t* end_of_stack) { end_of_stack_ = end_of_stack; }
355 NO_SANITIZE_ADDRESS void CopyStackUntilSafePointScope(); 355 NO_SANITIZE_ADDRESS void CopyStackUntilSafePointScope();
356 356
357 // Get one of the heap structures for this thread. 357 // Get one of the heap structures for this thread.
358 // The thread heap is split into multiple heap parts based on object types 358 // The thread heap is split into multiple heap parts based on object types
359 // and object sizes. 359 // and object sizes.
360 BaseArena* Arena(int arena_index) const { 360 BaseArena* Arena(int arena_index) const {
361 ASSERT(0 <= arena_index); 361 DCHECK_LE(0, arena_index);
362 ASSERT(arena_index < BlinkGC::kNumberOfArenas); 362 DCHECK_LT(arena_index, BlinkGC::kNumberOfArenas);
363 return arenas_[arena_index]; 363 return arenas_[arena_index];
364 } 364 }
365 365
366 #if DCHECK_IS_ON() 366 #if DCHECK_IS_ON()
367 // Infrastructure to determine if an address is within one of the 367 // Infrastructure to determine if an address is within one of the
368 // address ranges for the Blink heap. If the address is in the Blink 368 // address ranges for the Blink heap. If the address is in the Blink
369 // heap the containing heap page is returned. 369 // heap the containing heap page is returned.
370 BasePage* FindPageFromAddress(Address); 370 BasePage* FindPageFromAddress(Address);
371 BasePage* FindPageFromAddress(const void* pointer) { 371 BasePage* FindPageFromAddress(const void* pointer) {
372 return FindPageFromAddress( 372 return FindPageFromAddress(
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
425 } 425 }
426 426
427 // By entering a gc-forbidden scope, conservative GCs will not 427 // By entering a gc-forbidden scope, conservative GCs will not
428 // be allowed while handling an out-of-line allocation request. 428 // be allowed while handling an out-of-line allocation request.
429 // Intended used when constructing subclasses of GC mixins, where 429 // Intended used when constructing subclasses of GC mixins, where
430 // the object being constructed cannot be safely traced & marked 430 // the object being constructed cannot be safely traced & marked
431 // fully should a GC be allowed while its subclasses are being 431 // fully should a GC be allowed while its subclasses are being
432 // constructed. 432 // constructed.
433 void EnterGCForbiddenScopeIfNeeded( 433 void EnterGCForbiddenScopeIfNeeded(
434 GarbageCollectedMixinConstructorMarker* gc_mixin_marker) { 434 GarbageCollectedMixinConstructorMarker* gc_mixin_marker) {
435 ASSERT(CheckThread()); 435 DCHECK(CheckThread());
436 if (!gc_mixin_marker_) { 436 if (!gc_mixin_marker_) {
437 EnterMixinConstructionScope(); 437 EnterMixinConstructionScope();
438 gc_mixin_marker_ = gc_mixin_marker; 438 gc_mixin_marker_ = gc_mixin_marker;
439 } 439 }
440 } 440 }
441 void LeaveGCForbiddenScopeIfNeeded( 441 void LeaveGCForbiddenScopeIfNeeded(
442 GarbageCollectedMixinConstructorMarker* gc_mixin_marker) { 442 GarbageCollectedMixinConstructorMarker* gc_mixin_marker) {
443 ASSERT(CheckThread()); 443 DCHECK(CheckThread());
444 if (gc_mixin_marker_ == gc_mixin_marker) { 444 if (gc_mixin_marker_ == gc_mixin_marker) {
445 LeaveMixinConstructionScope(); 445 LeaveMixinConstructionScope();
446 gc_mixin_marker_ = nullptr; 446 gc_mixin_marker_ = nullptr;
447 } 447 }
448 } 448 }
449 449
450 // vectorBackingArena() returns an arena that the vector allocation should 450 // vectorBackingArena() returns an arena that the vector allocation should
451 // use. We have four vector arenas and want to choose the best arena here. 451 // use. We have four vector arenas and want to choose the best arena here.
452 // 452 //
453 // The goal is to improve the succession rate where expand and 453 // The goal is to improve the succession rate where expand and
(...skipping 13 matching lines...) Expand all
467 // To implement the heuristics, we add an arenaAge to each arena. The arenaAge 467 // To implement the heuristics, we add an arenaAge to each arena. The arenaAge
468 // is updated if: 468 // is updated if:
469 // 469 //
470 // - a vector on the arena is expanded; or 470 // - a vector on the arena is expanded; or
471 // - a vector that meets the condition (*) is allocated on the arena 471 // - a vector that meets the condition (*) is allocated on the arena
472 // 472 //
473 // (*) More than 33% of the same type of vectors have been promptly 473 // (*) More than 33% of the same type of vectors have been promptly
474 // freed since the last GC. 474 // freed since the last GC.
475 // 475 //
476 BaseArena* VectorBackingArena(size_t gc_info_index) { 476 BaseArena* VectorBackingArena(size_t gc_info_index) {
477 ASSERT(CheckThread()); 477 DCHECK(CheckThread());
478 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask; 478 size_t entry_index = gc_info_index & kLikelyToBePromptlyFreedArrayMask;
479 --likely_to_be_promptly_freed_[entry_index]; 479 --likely_to_be_promptly_freed_[entry_index];
480 int arena_index = vector_backing_arena_index_; 480 int arena_index = vector_backing_arena_index_;
481 // If m_likelyToBePromptlyFreed[entryIndex] > 0, that means that 481 // If m_likelyToBePromptlyFreed[entryIndex] > 0, that means that
482 // more than 33% of vectors of the type have been promptly freed 482 // more than 33% of vectors of the type have been promptly freed
483 // since the last GC. 483 // since the last GC.
484 if (likely_to_be_promptly_freed_[entry_index] > 0) { 484 if (likely_to_be_promptly_freed_[entry_index] > 0) {
485 arena_ages_[arena_index] = ++current_arena_ages_; 485 arena_ages_[arena_index] = ++current_arena_ages_;
486 vector_backing_arena_index_ = 486 vector_backing_arena_index_ =
487 ArenaIndexOfVectorArenaLeastRecentlyExpanded( 487 ArenaIndexOfVectorArenaLeastRecentlyExpanded(
488 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex); 488 BlinkGC::kVector1ArenaIndex, BlinkGC::kVector4ArenaIndex);
489 } 489 }
490 ASSERT(IsVectorArenaIndex(arena_index)); 490 DCHECK(IsVectorArenaIndex(arena_index));
491 return arenas_[arena_index]; 491 return arenas_[arena_index];
492 } 492 }
493 BaseArena* ExpandedVectorBackingArena(size_t gc_info_index); 493 BaseArena* ExpandedVectorBackingArena(size_t gc_info_index);
494 static bool IsVectorArenaIndex(int arena_index) { 494 static bool IsVectorArenaIndex(int arena_index) {
495 return BlinkGC::kVector1ArenaIndex <= arena_index && 495 return BlinkGC::kVector1ArenaIndex <= arena_index &&
496 arena_index <= BlinkGC::kVector4ArenaIndex; 496 arena_index <= BlinkGC::kVector4ArenaIndex;
497 } 497 }
498 void AllocationPointAdjusted(int arena_index); 498 void AllocationPointAdjusted(int arena_index);
499 void PromptlyFreed(size_t gc_info_index); 499 void PromptlyFreed(size_t gc_info_index);
500 500
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after
713 template <ThreadAffinity affinity> 713 template <ThreadAffinity affinity>
714 class ThreadStateFor; 714 class ThreadStateFor;
715 715
716 template <> 716 template <>
717 class ThreadStateFor<kMainThreadOnly> { 717 class ThreadStateFor<kMainThreadOnly> {
718 STATIC_ONLY(ThreadStateFor); 718 STATIC_ONLY(ThreadStateFor);
719 719
720 public: 720 public:
721 static ThreadState* GetState() { 721 static ThreadState* GetState() {
722 // This specialization must only be used from the main thread. 722 // This specialization must only be used from the main thread.
723 ASSERT(ThreadState::Current()->IsMainThread()); 723 DCHECK(ThreadState::Current()->IsMainThread());
724 return ThreadState::MainThreadState(); 724 return ThreadState::MainThreadState();
725 } 725 }
726 }; 726 };
727 727
728 template <> 728 template <>
729 class ThreadStateFor<kAnyThread> { 729 class ThreadStateFor<kAnyThread> {
730 STATIC_ONLY(ThreadStateFor); 730 STATIC_ONLY(ThreadStateFor);
731 731
732 public: 732 public:
733 static ThreadState* GetState() { return ThreadState::Current(); } 733 static ThreadState* GetState() { return ThreadState::Current(); }
734 }; 734 };
735 735
736 } // namespace blink 736 } // namespace blink
737 737
738 #endif // ThreadState_h 738 #endif // ThreadState_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698