Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(9)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.h

Issue 2619493003: Replace ASSERTs in platform/heap/ with DCHECKs
Patch Set: temp Created 3 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 216 matching lines...) Expand 10 before | Expand all | Expand 10 after
227 class PLATFORM_EXPORT ThreadHeap { 227 class PLATFORM_EXPORT ThreadHeap {
228 public: 228 public:
229 ThreadHeap(); 229 ThreadHeap();
230 ~ThreadHeap(); 230 ~ThreadHeap();
231 231
232 // Returns true for main thread's heap. 232 // Returns true for main thread's heap.
233 // TODO(keishi): Per-thread-heap will return false. 233 // TODO(keishi): Per-thread-heap will return false.
234 bool isMainThreadHeap() { return this == ThreadHeap::mainThreadHeap(); } 234 bool isMainThreadHeap() { return this == ThreadHeap::mainThreadHeap(); }
235 static ThreadHeap* mainThreadHeap() { return s_mainThreadHeap; } 235 static ThreadHeap* mainThreadHeap() { return s_mainThreadHeap; }
236 236
237 #if ENABLE(ASSERT) 237 #if DCHECK_IS_ON()
238 bool isAtSafePoint(); 238 bool isAtSafePoint();
239 BasePage* findPageFromAddress(Address); 239 BasePage* findPageFromAddress(Address);
240 #endif 240 #endif
241 241
242 template <typename T> 242 template <typename T>
243 static inline bool isHeapObjectAlive(const T* object) { 243 static inline bool isHeapObjectAlive(const T* object) {
244 static_assert(sizeof(T), "T must be fully defined"); 244 static_assert(sizeof(T), "T must be fully defined");
245 // The strongification of collections relies on the fact that once a 245 // The strongification of collections relies on the fact that once a
246 // collection has been strongified, there is no way that it can contain 246 // collection has been strongified, there is no way that it can contain
247 // non-live entries, so no entries will be removed. Since you can't set 247 // non-live entries, so no entries will be removed. Since you can't set
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
323 // such a reference. 323 // such a reference.
324 template <typename T> 324 template <typename T>
325 NO_SANITIZE_ADDRESS static bool willObjectBeLazilySwept( 325 NO_SANITIZE_ADDRESS static bool willObjectBeLazilySwept(
326 const T* objectPointer) { 326 const T* objectPointer) {
327 static_assert(IsGarbageCollectedType<T>::value, 327 static_assert(IsGarbageCollectedType<T>::value,
328 "only objects deriving from GarbageCollected can be used."); 328 "only objects deriving from GarbageCollected can be used.");
329 BasePage* page = pageFromObject(objectPointer); 329 BasePage* page = pageFromObject(objectPointer);
330 // Page has been swept and it is still alive. 330 // Page has been swept and it is still alive.
331 if (page->hasBeenSwept()) 331 if (page->hasBeenSwept())
332 return false; 332 return false;
333 ASSERT(page->arena()->getThreadState()->isSweepingInProgress()); 333 DCHECK(page->arena()->getThreadState()->isSweepingInProgress());
334 334
335 // If marked and alive, the object hasn't yet been swept..and won't 335 // If marked and alive, the object hasn't yet been swept..and won't
336 // be once its page is processed. 336 // be once its page is processed.
337 if (ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer))) 337 if (ThreadHeap::isHeapObjectAlive(const_cast<T*>(objectPointer)))
338 return false; 338 return false;
339 339
340 if (page->isLargeObjectPage()) 340 if (page->isLargeObjectPage())
341 return true; 341 return true;
342 342
343 // If the object is unmarked, it may be on the page currently being 343 // If the object is unmarked, it may be on the page currently being
(...skipping 27 matching lines...) Expand all
371 371
372 // Remove an item from the weak callback work list and call the callback 372 // Remove an item from the weak callback work list and call the callback
373 // with the visitor and the closure pointer. Returns false when there is 373 // with the visitor and the closure pointer. Returns false when there is
374 // nothing more to do. 374 // nothing more to do.
375 bool popAndInvokeGlobalWeakCallback(Visitor*); 375 bool popAndInvokeGlobalWeakCallback(Visitor*);
376 376
377 // Register an ephemeron table for fixed-point iteration. 377 // Register an ephemeron table for fixed-point iteration.
378 void registerWeakTable(void* containerObject, 378 void registerWeakTable(void* containerObject,
379 EphemeronCallback, 379 EphemeronCallback,
380 EphemeronCallback); 380 EphemeronCallback);
381 #if ENABLE(ASSERT) 381 #if DCHECK_IS_ON()
382 bool weakTableRegistered(const void*); 382 bool weakTableRegistered(const void*);
383 #endif 383 #endif
384 384
385 // Heap compaction registration methods: 385 // Heap compaction registration methods:
386 386
387 // Register |slot| as containing a reference to a movable heap object. 387 // Register |slot| as containing a reference to a movable heap object.
388 // 388 //
389 // When compaction moves the object pointed to by |*slot| to |newAddress|, 389 // When compaction moves the object pointed to by |*slot| to |newAddress|,
390 // |*slot| must be updated to hold |newAddress| instead. 390 // |*slot| must be updated to hold |newAddress| instead.
391 void registerMovingObjectReference(MovableReference*); 391 void registerMovingObjectReference(MovableReference*);
(...skipping 11 matching lines...) Expand all
403 MovingObjectCallback, 403 MovingObjectCallback,
404 void* callbackData); 404 void* callbackData);
405 405
406 BlinkGC::GCReason lastGCReason() { return m_lastGCReason; } 406 BlinkGC::GCReason lastGCReason() { return m_lastGCReason; }
407 RegionTree* getRegionTree() { return m_regionTree.get(); } 407 RegionTree* getRegionTree() { return m_regionTree.get(); }
408 408
409 static inline size_t allocationSizeFromSize(size_t size) { 409 static inline size_t allocationSizeFromSize(size_t size) {
410 // Add space for header. 410 // Add space for header.
411 size_t allocationSize = size + sizeof(HeapObjectHeader); 411 size_t allocationSize = size + sizeof(HeapObjectHeader);
412 // The allocation size calculation can overflow for large sizes. 412 // The allocation size calculation can overflow for large sizes.
413 RELEASE_ASSERT(allocationSize > size); 413 CHECK(allocationSize > size);
414 // Align size with allocation granularity. 414 // Align size with allocation granularity.
415 allocationSize = (allocationSize + allocationMask) & ~allocationMask; 415 allocationSize = (allocationSize + allocationMask) & ~allocationMask;
416 return allocationSize; 416 return allocationSize;
417 } 417 }
418 static Address allocateOnArenaIndex(ThreadState*, 418 static Address allocateOnArenaIndex(ThreadState*,
419 size_t, 419 size_t,
420 int arenaIndex, 420 int arenaIndex,
421 size_t gcInfoIndex, 421 size_t gcInfoIndex,
422 const char* typeName); 422 const char* typeName);
423 template <typename T> 423 template <typename T>
(...skipping 18 matching lines...) Expand all
442 442
443 FreePagePool* getFreePagePool() { return m_freePagePool.get(); } 443 FreePagePool* getFreePagePool() { return m_freePagePool.get(); }
444 OrphanedPagePool* getOrphanedPagePool() { return m_orphanedPagePool.get(); } 444 OrphanedPagePool* getOrphanedPagePool() { return m_orphanedPagePool.get(); }
445 445
446 // This look-up uses the region search tree and a negative contains cache to 446 // This look-up uses the region search tree and a negative contains cache to
447 // provide an efficient mapping from arbitrary addresses to the containing 447 // provide an efficient mapping from arbitrary addresses to the containing
448 // heap-page if one exists. 448 // heap-page if one exists.
449 BasePage* lookupPageForAddress(Address); 449 BasePage* lookupPageForAddress(Address);
450 450
451 static const GCInfo* gcInfo(size_t gcInfoIndex) { 451 static const GCInfo* gcInfo(size_t gcInfoIndex) {
452 ASSERT(gcInfoIndex >= 1); 452 DCHECK_GE(gcInfoIndex, 1UL);
453 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); 453 DCHECK_LT(gcInfoIndex, gcInfoMaxIndex);
454 ASSERT(s_gcInfoTable); 454 DCHECK(s_gcInfoTable);
455 const GCInfo* info = s_gcInfoTable[gcInfoIndex]; 455 const GCInfo* info = s_gcInfoTable[gcInfoIndex];
456 ASSERT(info); 456 DCHECK(info);
457 return info; 457 return info;
458 } 458 }
459 459
460 static void reportMemoryUsageHistogram(); 460 static void reportMemoryUsageHistogram();
461 static void reportMemoryUsageForTracing(); 461 static void reportMemoryUsageForTracing();
462 462
463 HeapCompact* compaction(); 463 HeapCompact* compaction();
464 464
465 private: 465 private:
466 // Reset counters that track live and allocated-since-last-GC sizes. 466 // Reset counters that track live and allocated-since-last-GC sizes.
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
519 WTF_MAKE_NONCOPYABLE(GarbageCollected); 519 WTF_MAKE_NONCOPYABLE(GarbageCollected);
520 520
521 // For now direct allocation of arrays on the heap is not allowed. 521 // For now direct allocation of arrays on the heap is not allowed.
522 void* operator new[](size_t size); 522 void* operator new[](size_t size);
523 523
524 #if OS(WIN) && COMPILER(MSVC) 524 #if OS(WIN) && COMPILER(MSVC)
525 // Due to some quirkiness in the MSVC compiler we have to provide 525 // Due to some quirkiness in the MSVC compiler we have to provide
526 // the delete[] operator in the GarbageCollected subclasses as it 526 // the delete[] operator in the GarbageCollected subclasses as it
527 // is called when a class is exported in a DLL. 527 // is called when a class is exported in a DLL.
528 protected: 528 protected:
529 void operator delete[](void* p) { ASSERT_NOT_REACHED(); } 529 void operator delete[](void* p) { NOTREACHED(); }
530 #else 530 #else
531 void operator delete[](void* p); 531 void operator delete[](void* p);
532 #endif 532 #endif
533 533
534 public: 534 public:
535 using GarbageCollectedType = T; 535 using GarbageCollectedType = T;
536 536
537 void* operator new(size_t size) { 537 void* operator new(size_t size) {
538 return allocateObject(size, IsEagerlyFinalizedType<T>::value); 538 return allocateObject(size, IsEagerlyFinalizedType<T>::value);
539 } 539 }
540 540
541 static void* allocateObject(size_t size, bool eagerlySweep) { 541 static void* allocateObject(size_t size, bool eagerlySweep) {
542 return ThreadHeap::allocate<T>(size, eagerlySweep); 542 return ThreadHeap::allocate<T>(size, eagerlySweep);
543 } 543 }
544 544
545 void operator delete(void* p) { ASSERT_NOT_REACHED(); } 545 void operator delete(void* p) { NOTREACHED(); }
546 546
547 protected: 547 protected:
548 GarbageCollected() {} 548 GarbageCollected() {}
549 }; 549 };
550 550
551 // Assigning class types to their arenas. 551 // Assigning class types to their arenas.
552 // 552 //
553 // We use sized arenas for most 'normal' objects to improve memory locality. 553 // We use sized arenas for most 'normal' objects to improve memory locality.
554 // It seems that the same type of objects are likely to be accessed together, 554 // It seems that the same type of objects are likely to be accessed together,
555 // which means that we want to group objects by type. That's one reason 555 // which means that we want to group objects by type. That's one reason
(...skipping 27 matching lines...) Expand all
583 index <= BlinkGC::NormalPage4ArenaIndex; 583 index <= BlinkGC::NormalPage4ArenaIndex;
584 } 584 }
585 585
586 #define DECLARE_EAGER_FINALIZATION_OPERATOR_NEW() \ 586 #define DECLARE_EAGER_FINALIZATION_OPERATOR_NEW() \
587 public: \ 587 public: \
588 GC_PLUGIN_IGNORE("491488") \ 588 GC_PLUGIN_IGNORE("491488") \
589 void* operator new(size_t size) { return allocateObject(size, true); } 589 void* operator new(size_t size) { return allocateObject(size, true); }
590 590
591 #define IS_EAGERLY_FINALIZED() \ 591 #define IS_EAGERLY_FINALIZED() \
592 (pageFromObject(this)->arena()->arenaIndex() == BlinkGC::EagerSweepArenaIndex) 592 (pageFromObject(this)->arena()->arenaIndex() == BlinkGC::EagerSweepArenaIndex)
593 #if ENABLE(ASSERT) 593 #if DCHECK_IS_ON()
594 class VerifyEagerFinalization { 594 class VerifyEagerFinalization {
595 DISALLOW_NEW(); 595 DISALLOW_NEW();
596 596
597 public: 597 public:
598 ~VerifyEagerFinalization() { 598 ~VerifyEagerFinalization() {
599 // If this assert triggers, the class annotated as eagerly 599 // If this assert triggers, the class annotated as eagerly
600 // finalized ended up not being allocated on the heap 600 // finalized ended up not being allocated on the heap
601 // set aside for eager finalization. The reason is most 601 // set aside for eager finalization. The reason is most
602 // likely that the effective 'operator new' overload for 602 // likely that the effective 'operator new' overload for
603 // this class' leftmost base is for a class that is not 603 // this class' leftmost base is for a class that is not
604 // eagerly finalized. Declaring and defining an 'operator new' 604 // eagerly finalized. Declaring and defining an 'operator new'
605 // for this class is what's required -- consider using 605 // for this class is what's required -- consider using
606 // DECLARE_EAGER_FINALIZATION_OPERATOR_NEW(). 606 // DECLARE_EAGER_FINALIZATION_OPERATOR_NEW().
607 ASSERT(IS_EAGERLY_FINALIZED()); 607 DCHECK(IS_EAGERLY_FINALIZED());
608 } 608 }
609 }; 609 };
610 #define EAGERLY_FINALIZE() \ 610 #define EAGERLY_FINALIZE() \
611 private: \ 611 private: \
612 VerifyEagerFinalization m_verifyEagerFinalization; \ 612 VerifyEagerFinalization m_verifyEagerFinalization; \
613 \ 613 \
614 public: \ 614 public: \
615 typedef int IsEagerlyFinalizedMarker 615 typedef int IsEagerlyFinalizedMarker
616 #else 616 #else
617 #define EAGERLY_FINALIZE() \ 617 #define EAGERLY_FINALIZE() \
618 public: \ 618 public: \
619 typedef int IsEagerlyFinalizedMarker 619 typedef int IsEagerlyFinalizedMarker
620 #endif 620 #endif
621 621
622 inline Address ThreadHeap::allocateOnArenaIndex(ThreadState* state, 622 inline Address ThreadHeap::allocateOnArenaIndex(ThreadState* state,
623 size_t size, 623 size_t size,
624 int arenaIndex, 624 int arenaIndex,
625 size_t gcInfoIndex, 625 size_t gcInfoIndex,
626 const char* typeName) { 626 const char* typeName) {
627 ASSERT(state->isAllocationAllowed()); 627 DCHECK(state->isAllocationAllowed());
628 ASSERT(arenaIndex != BlinkGC::LargeObjectArenaIndex); 628 DCHECK_NE(arenaIndex, BlinkGC::LargeObjectArenaIndex);
629 NormalPageArena* arena = 629 NormalPageArena* arena =
630 static_cast<NormalPageArena*>(state->arena(arenaIndex)); 630 static_cast<NormalPageArena*>(state->arena(arenaIndex));
631 Address address = 631 Address address =
632 arena->allocateObject(allocationSizeFromSize(size), gcInfoIndex); 632 arena->allocateObject(allocationSizeFromSize(size), gcInfoIndex);
633 HeapAllocHooks::allocationHookIfEnabled(address, size, typeName); 633 HeapAllocHooks::allocationHookIfEnabled(address, size, typeName);
634 return address; 634 return address;
635 } 635 }
636 636
637 template <typename T> 637 template <typename T>
638 Address ThreadHeap::allocate(size_t size, bool eagerlySweep) { 638 Address ThreadHeap::allocate(size_t size, bool eagerlySweep) {
(...skipping 12 matching lines...) Expand all
651 651
652 // TODO(sof): promptly free the previous object. 652 // TODO(sof): promptly free the previous object.
653 if (!size) { 653 if (!size) {
654 // If the new size is 0 this is considered equivalent to free(previous). 654 // If the new size is 0 this is considered equivalent to free(previous).
655 return nullptr; 655 return nullptr;
656 } 656 }
657 657
658 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); 658 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state();
659 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous); 659 HeapObjectHeader* previousHeader = HeapObjectHeader::fromPayload(previous);
660 BasePage* page = pageFromObject(previousHeader); 660 BasePage* page = pageFromObject(previousHeader);
661 ASSERT(page); 661 DCHECK(page);
662 662
663 // Determine arena index of new allocation. 663 // Determine arena index of new allocation.
664 int arenaIndex; 664 int arenaIndex;
665 if (size >= largeObjectSizeThreshold) { 665 if (size >= largeObjectSizeThreshold) {
666 arenaIndex = BlinkGC::LargeObjectArenaIndex; 666 arenaIndex = BlinkGC::LargeObjectArenaIndex;
667 } else { 667 } else {
668 arenaIndex = page->arena()->arenaIndex(); 668 arenaIndex = page->arena()->arenaIndex();
669 if (isNormalArenaIndex(arenaIndex) || 669 if (isNormalArenaIndex(arenaIndex) ||
670 arenaIndex == BlinkGC::LargeObjectArenaIndex) 670 arenaIndex == BlinkGC::LargeObjectArenaIndex)
671 arenaIndex = arenaIndexForObjectSize(size); 671 arenaIndex = arenaIndexForObjectSize(size);
672 } 672 }
673 673
674 size_t gcInfoIndex = GCInfoTrait<T>::index(); 674 size_t gcInfoIndex = GCInfoTrait<T>::index();
675 // TODO(haraken): We don't support reallocate() for finalizable objects. 675 // TODO(haraken): We don't support reallocate() for finalizable objects.
676 ASSERT(!ThreadHeap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer()); 676 DCHECK(!ThreadHeap::gcInfo(previousHeader->gcInfoIndex())->hasFinalizer());
677 ASSERT(previousHeader->gcInfoIndex() == gcInfoIndex); 677 DCHECK_EQ(previousHeader->gcInfoIndex(), gcInfoIndex);
678 HeapAllocHooks::freeHookIfEnabled(static_cast<Address>(previous)); 678 HeapAllocHooks::freeHookIfEnabled(static_cast<Address>(previous));
679 Address address; 679 Address address;
680 if (arenaIndex == BlinkGC::LargeObjectArenaIndex) { 680 if (arenaIndex == BlinkGC::LargeObjectArenaIndex) {
681 address = page->arena()->allocateLargeObject(allocationSizeFromSize(size), 681 address = page->arena()->allocateLargeObject(allocationSizeFromSize(size),
682 gcInfoIndex); 682 gcInfoIndex);
683 } else { 683 } else {
684 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T); 684 const char* typeName = WTF_HEAP_PROFILER_TYPE_NAME(T);
685 address = ThreadHeap::allocateOnArenaIndex(state, size, arenaIndex, 685 address = ThreadHeap::allocateOnArenaIndex(state, size, arenaIndex,
686 gcInfoIndex, typeName); 686 gcInfoIndex, typeName);
687 } 687 }
688 size_t copySize = previousHeader->payloadSize(); 688 size_t copySize = previousHeader->payloadSize();
689 if (copySize > size) 689 if (copySize > size)
690 copySize = size; 690 copySize = size;
691 memcpy(address, previous, copySize); 691 memcpy(address, previous, copySize);
692 return address; 692 return address;
693 } 693 }
694 694
695 template <typename Derived> 695 template <typename Derived>
696 template <typename T> 696 template <typename T>
697 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) { 697 void VisitorHelper<Derived>::handleWeakCell(Visitor* self, void* object) {
698 T** cell = reinterpret_cast<T**>(object); 698 T** cell = reinterpret_cast<T**>(object);
699 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell)) 699 if (*cell && !ObjectAliveTrait<T>::isHeapObjectAlive(*cell))
700 *cell = nullptr; 700 *cell = nullptr;
701 } 701 }
702 702
703 } // namespace blink 703 } // namespace blink
704 704
705 #endif // Heap_h 705 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698