Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(684)

Side by Side Diff: third_party/WebKit/Source/platform/heap/ThreadState.h

Issue 2619493003: Replace ASSERTs in platform/heap/ with DCHECKs
Patch Set: temp Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
145 145
146 private: 146 private:
147 ThreadState* m_state; 147 ThreadState* m_state;
148 }; 148 };
149 149
150 class SweepForbiddenScope final { 150 class SweepForbiddenScope final {
151 STACK_ALLOCATED(); 151 STACK_ALLOCATED();
152 152
153 public: 153 public:
154 explicit SweepForbiddenScope(ThreadState* state) : m_state(state) { 154 explicit SweepForbiddenScope(ThreadState* state) : m_state(state) {
155 ASSERT(!m_state->m_sweepForbidden); 155 DCHECK(!m_state->m_sweepForbidden);
156 m_state->m_sweepForbidden = true; 156 m_state->m_sweepForbidden = true;
157 } 157 }
158 ~SweepForbiddenScope() { 158 ~SweepForbiddenScope() {
159 ASSERT(m_state->m_sweepForbidden); 159 DCHECK(m_state->m_sweepForbidden);
160 m_state->m_sweepForbidden = false; 160 m_state->m_sweepForbidden = false;
161 } 161 }
162 162
163 private: 163 private:
164 ThreadState* m_state; 164 ThreadState* m_state;
165 }; 165 };
166 166
167 void lockThreadAttachMutex(); 167 void lockThreadAttachMutex();
168 void unlockThreadAttachMutex(); 168 void unlockThreadAttachMutex();
169 169
(...skipping 20 matching lines...) Expand all
190 return **s_threadSpecific; 190 return **s_threadSpecific;
191 #else 191 #else
192 uintptr_t dummy; 192 uintptr_t dummy;
193 uintptr_t addressDiff = 193 uintptr_t addressDiff =
194 s_mainThreadStackStart - reinterpret_cast<uintptr_t>(&dummy); 194 s_mainThreadStackStart - reinterpret_cast<uintptr_t>(&dummy);
195 // This is a fast way to judge if we are in the main thread. 195 // This is a fast way to judge if we are in the main thread.
196 // If |&dummy| is within |s_mainThreadUnderestimatedStackSize| byte from 196 // If |&dummy| is within |s_mainThreadUnderestimatedStackSize| byte from
197 // the stack start of the main thread, we judge that we are in 197 // the stack start of the main thread, we judge that we are in
198 // the main thread. 198 // the main thread.
199 if (LIKELY(addressDiff < s_mainThreadUnderestimatedStackSize)) { 199 if (LIKELY(addressDiff < s_mainThreadUnderestimatedStackSize)) {
200 ASSERT(**s_threadSpecific == mainThreadState()); 200 DCHECK_EQ(**s_threadSpecific, mainThreadState());
201 return mainThreadState(); 201 return mainThreadState();
202 } 202 }
203 // TLS lookup is slow. 203 // TLS lookup is slow.
204 return **s_threadSpecific; 204 return **s_threadSpecific;
205 #endif 205 #endif
206 } 206 }
207 207
208 static ThreadState* mainThreadState() { 208 static ThreadState* mainThreadState() {
209 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); 209 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage);
210 } 210 }
211 211
212 static ThreadState* fromObject(const void*); 212 static ThreadState* fromObject(const void*);
213 213
214 bool isMainThread() const { return this == mainThreadState(); } 214 bool isMainThread() const { return this == mainThreadState(); }
215 #if ENABLE(ASSERT) 215 #if DCHECK_IS_ON()
216 bool checkThread() const { return m_thread == currentThread(); } 216 bool checkThread() const { return m_thread == currentThread(); }
217 #endif 217 #endif
218 218
219 ThreadHeap& heap() const { return *m_heap; } 219 ThreadHeap& heap() const { return *m_heap; }
220 220
221 // When ThreadState is detaching from non-main thread its 221 // When ThreadState is detaching from non-main thread its
222 // heap is expected to be empty (because it is going away). 222 // heap is expected to be empty (because it is going away).
223 // Perform registered cleanup tasks and garbage collection 223 // Perform registered cleanup tasks and garbage collection
224 // to sweep away any objects that are left on this heap. 224 // to sweep away any objects that are left on this heap.
225 // We assert that nothing must remain after this cleanup. 225 // We assert that nothing must remain after this cleanup.
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
371 bool isAtSafePoint() const { return m_atSafePoint; } 371 bool isAtSafePoint() const { return m_atSafePoint; }
372 372
373 void addInterruptor(std::unique_ptr<BlinkGCInterruptor>); 373 void addInterruptor(std::unique_ptr<BlinkGCInterruptor>);
374 374
375 void recordStackEnd(intptr_t* endOfStack) { m_endOfStack = endOfStack; } 375 void recordStackEnd(intptr_t* endOfStack) { m_endOfStack = endOfStack; }
376 376
377 // Get one of the heap structures for this thread. 377 // Get one of the heap structures for this thread.
378 // The thread heap is split into multiple heap parts based on object types 378 // The thread heap is split into multiple heap parts based on object types
379 // and object sizes. 379 // and object sizes.
380 BaseArena* arena(int arenaIndex) const { 380 BaseArena* arena(int arenaIndex) const {
381 ASSERT(0 <= arenaIndex); 381 DCHECK_LE(0, arenaIndex);
382 ASSERT(arenaIndex < BlinkGC::NumberOfArenas); 382 DCHECK_LT(arenaIndex, BlinkGC::NumberOfArenas);
383 return m_arenas[arenaIndex]; 383 return m_arenas[arenaIndex];
384 } 384 }
385 385
386 #if ENABLE(ASSERT) 386 #if DCHECK_IS_ON()
387 // Infrastructure to determine if an address is within one of the 387 // Infrastructure to determine if an address is within one of the
388 // address ranges for the Blink heap. If the address is in the Blink 388 // address ranges for the Blink heap. If the address is in the Blink
389 // heap the containing heap page is returned. 389 // heap the containing heap page is returned.
390 BasePage* findPageFromAddress(Address); 390 BasePage* findPageFromAddress(Address);
391 BasePage* findPageFromAddress(const void* pointer) { 391 BasePage* findPageFromAddress(const void* pointer) {
392 return findPageFromAddress( 392 return findPageFromAddress(
393 reinterpret_cast<Address>(const_cast<void*>(pointer))); 393 reinterpret_cast<Address>(const_cast<void*>(pointer)));
394 } 394 }
395 #endif 395 #endif
396 396
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
447 } 447 }
448 448
449 // By entering a gc-forbidden scope, conservative GCs will not 449 // By entering a gc-forbidden scope, conservative GCs will not
450 // be allowed while handling an out-of-line allocation request. 450 // be allowed while handling an out-of-line allocation request.
451 // Intended used when constructing subclasses of GC mixins, where 451 // Intended used when constructing subclasses of GC mixins, where
452 // the object being constructed cannot be safely traced & marked 452 // the object being constructed cannot be safely traced & marked
453 // fully should a GC be allowed while its subclasses are being 453 // fully should a GC be allowed while its subclasses are being
454 // constructed. 454 // constructed.
455 void enterGCForbiddenScopeIfNeeded( 455 void enterGCForbiddenScopeIfNeeded(
456 GarbageCollectedMixinConstructorMarker* gcMixinMarker) { 456 GarbageCollectedMixinConstructorMarker* gcMixinMarker) {
457 ASSERT(checkThread()); 457 DCHECK(checkThread());
458 if (!m_gcMixinMarker) { 458 if (!m_gcMixinMarker) {
459 enterMixinConstructionScope(); 459 enterMixinConstructionScope();
460 m_gcMixinMarker = gcMixinMarker; 460 m_gcMixinMarker = gcMixinMarker;
461 } 461 }
462 } 462 }
463 void leaveGCForbiddenScopeIfNeeded( 463 void leaveGCForbiddenScopeIfNeeded(
464 GarbageCollectedMixinConstructorMarker* gcMixinMarker) { 464 GarbageCollectedMixinConstructorMarker* gcMixinMarker) {
465 ASSERT(checkThread()); 465 DCHECK(checkThread());
466 if (m_gcMixinMarker == gcMixinMarker) { 466 if (m_gcMixinMarker == gcMixinMarker) {
467 leaveMixinConstructionScope(); 467 leaveMixinConstructionScope();
468 m_gcMixinMarker = nullptr; 468 m_gcMixinMarker = nullptr;
469 } 469 }
470 } 470 }
471 471
472 // vectorBackingArena() returns an arena that the vector allocation should 472 // vectorBackingArena() returns an arena that the vector allocation should
473 // use. We have four vector arenas and want to choose the best arena here. 473 // use. We have four vector arenas and want to choose the best arena here.
474 // 474 //
475 // The goal is to improve the succession rate where expand and 475 // The goal is to improve the succession rate where expand and
(...skipping 13 matching lines...) Expand all
489 // To implement the heuristics, we add an arenaAge to each arena. The arenaAge 489 // To implement the heuristics, we add an arenaAge to each arena. The arenaAge
490 // is updated if: 490 // is updated if:
491 // 491 //
492 // - a vector on the arena is expanded; or 492 // - a vector on the arena is expanded; or
493 // - a vector that meets the condition (*) is allocated on the arena 493 // - a vector that meets the condition (*) is allocated on the arena
494 // 494 //
495 // (*) More than 33% of the same type of vectors have been promptly 495 // (*) More than 33% of the same type of vectors have been promptly
496 // freed since the last GC. 496 // freed since the last GC.
497 // 497 //
498 BaseArena* vectorBackingArena(size_t gcInfoIndex) { 498 BaseArena* vectorBackingArena(size_t gcInfoIndex) {
499 ASSERT(checkThread()); 499 DCHECK(checkThread());
500 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; 500 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
501 --m_likelyToBePromptlyFreed[entryIndex]; 501 --m_likelyToBePromptlyFreed[entryIndex];
502 int arenaIndex = m_vectorBackingArenaIndex; 502 int arenaIndex = m_vectorBackingArenaIndex;
503 // If m_likelyToBePromptlyFreed[entryIndex] > 0, that means that 503 // If m_likelyToBePromptlyFreed[entryIndex] > 0, that means that
504 // more than 33% of vectors of the type have been promptly freed 504 // more than 33% of vectors of the type have been promptly freed
505 // since the last GC. 505 // since the last GC.
506 if (m_likelyToBePromptlyFreed[entryIndex] > 0) { 506 if (m_likelyToBePromptlyFreed[entryIndex] > 0) {
507 m_arenaAges[arenaIndex] = ++m_currentArenaAges; 507 m_arenaAges[arenaIndex] = ++m_currentArenaAges;
508 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded( 508 m_vectorBackingArenaIndex = arenaIndexOfVectorArenaLeastRecentlyExpanded(
509 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex); 509 BlinkGC::Vector1ArenaIndex, BlinkGC::Vector4ArenaIndex);
510 } 510 }
511 ASSERT(isVectorArenaIndex(arenaIndex)); 511 DCHECK(isVectorArenaIndex(arenaIndex));
512 return m_arenas[arenaIndex]; 512 return m_arenas[arenaIndex];
513 } 513 }
514 BaseArena* expandedVectorBackingArena(size_t gcInfoIndex); 514 BaseArena* expandedVectorBackingArena(size_t gcInfoIndex);
515 static bool isVectorArenaIndex(int arenaIndex) { 515 static bool isVectorArenaIndex(int arenaIndex) {
516 return BlinkGC::Vector1ArenaIndex <= arenaIndex && 516 return BlinkGC::Vector1ArenaIndex <= arenaIndex &&
517 arenaIndex <= BlinkGC::Vector4ArenaIndex; 517 arenaIndex <= BlinkGC::Vector4ArenaIndex;
518 } 518 }
519 void allocationPointAdjusted(int arenaIndex); 519 void allocationPointAdjusted(int arenaIndex);
520 void promptlyFreed(size_t gcInfoIndex); 520 void promptlyFreed(size_t gcInfoIndex);
521 521
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
554 554
555 // Register the pre-finalizer for the |self| object. The class T must have 555 // Register the pre-finalizer for the |self| object. The class T must have
556 // USING_PRE_FINALIZER(). 556 // USING_PRE_FINALIZER().
557 template <typename T> 557 template <typename T>
558 class PrefinalizerRegistration final { 558 class PrefinalizerRegistration final {
559 public: 559 public:
560 PrefinalizerRegistration(T* self) { 560 PrefinalizerRegistration(T* self) {
561 static_assert(sizeof(&T::invokePreFinalizer) > 0, 561 static_assert(sizeof(&T::invokePreFinalizer) > 0,
562 "USING_PRE_FINALIZER(T) must be defined."); 562 "USING_PRE_FINALIZER(T) must be defined.");
563 ThreadState* state = ThreadState::current(); 563 ThreadState* state = ThreadState::current();
564 #if ENABLE(ASSERT) 564 #if DCHECK_IS_ON()
565 DCHECK(state->checkThread()); 565 DCHECK(state->checkThread());
566 #endif 566 #endif
567 DCHECK(!state->sweepForbidden()); 567 DCHECK(!state->sweepForbidden());
568 DCHECK(!state->m_orderedPreFinalizers.contains( 568 DCHECK(!state->m_orderedPreFinalizers.contains(
569 PreFinalizer(self, T::invokePreFinalizer))); 569 PreFinalizer(self, T::invokePreFinalizer)));
570 state->m_orderedPreFinalizers.add( 570 state->m_orderedPreFinalizers.add(
571 PreFinalizer(self, T::invokePreFinalizer)); 571 PreFinalizer(self, T::invokePreFinalizer));
572 } 572 }
573 }; 573 };
574 574
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
759 template <ThreadAffinity affinity> 759 template <ThreadAffinity affinity>
760 class ThreadStateFor; 760 class ThreadStateFor;
761 761
762 template <> 762 template <>
763 class ThreadStateFor<MainThreadOnly> { 763 class ThreadStateFor<MainThreadOnly> {
764 STATIC_ONLY(ThreadStateFor); 764 STATIC_ONLY(ThreadStateFor);
765 765
766 public: 766 public:
767 static ThreadState* state() { 767 static ThreadState* state() {
768 // This specialization must only be used from the main thread. 768 // This specialization must only be used from the main thread.
769 ASSERT(ThreadState::current()->isMainThread()); 769 DCHECK(ThreadState::current()->isMainThread());
770 return ThreadState::mainThreadState(); 770 return ThreadState::mainThreadState();
771 } 771 }
772 }; 772 };
773 773
774 template <> 774 template <>
775 class ThreadStateFor<AnyThread> { 775 class ThreadStateFor<AnyThread> {
776 STATIC_ONLY(ThreadStateFor); 776 STATIC_ONLY(ThreadStateFor);
777 777
778 public: 778 public:
779 static ThreadState* state() { return ThreadState::current(); } 779 static ThreadState* state() { return ThreadState::current(); }
780 }; 780 };
781 781
782 } // namespace blink 782 } // namespace blink
783 783
784 #endif // ThreadState_h 784 #endif // ThreadState_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698