Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(518)

Side by Side Diff: third_party/WebKit/Source/platform/heap/Heap.h

Issue 2816033003: Replace ASSERT with DHCECK_op in platform/heap (Closed)
Patch Set: Replace ASSERT with CHECK_op in platform/heap Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 283 matching lines...) Expand 10 before | Expand all | Expand 10 after
294 // such a reference. 294 // such a reference.
295 template <typename T> 295 template <typename T>
296 NO_SANITIZE_ADDRESS static bool WillObjectBeLazilySwept( 296 NO_SANITIZE_ADDRESS static bool WillObjectBeLazilySwept(
297 const T* object_pointer) { 297 const T* object_pointer) {
298 static_assert(IsGarbageCollectedType<T>::value, 298 static_assert(IsGarbageCollectedType<T>::value,
299 "only objects deriving from GarbageCollected can be used."); 299 "only objects deriving from GarbageCollected can be used.");
300 BasePage* page = PageFromObject(object_pointer); 300 BasePage* page = PageFromObject(object_pointer);
301 // Page has been swept and it is still alive. 301 // Page has been swept and it is still alive.
302 if (page->HasBeenSwept()) 302 if (page->HasBeenSwept())
303 return false; 303 return false;
304 ASSERT(page->Arena()->GetThreadState()->IsSweepingInProgress()); 304 DCHECK(page->Arena()->GetThreadState()->IsSweepingInProgress());
305 305
306 // If marked and alive, the object hasn't yet been swept..and won't 306 // If marked and alive, the object hasn't yet been swept..and won't
307 // be once its page is processed. 307 // be once its page is processed.
308 if (ThreadHeap::IsHeapObjectAlive(const_cast<T*>(object_pointer))) 308 if (ThreadHeap::IsHeapObjectAlive(const_cast<T*>(object_pointer)))
309 return false; 309 return false;
310 310
311 if (page->IsLargeObjectPage()) 311 if (page->IsLargeObjectPage())
312 return true; 312 return true;
313 313
314 // If the object is unmarked, it may be on the page currently being 314 // If the object is unmarked, it may be on the page currently being
(...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after
412 void FlushHeapDoesNotContainCache(); 412 void FlushHeapDoesNotContainCache();
413 413
414 PagePool* GetFreePagePool() { return free_page_pool_.get(); } 414 PagePool* GetFreePagePool() { return free_page_pool_.get(); }
415 415
416 // This look-up uses the region search tree and a negative contains cache to 416 // This look-up uses the region search tree and a negative contains cache to
417 // provide an efficient mapping from arbitrary addresses to the containing 417 // provide an efficient mapping from arbitrary addresses to the containing
418 // heap-page if one exists. 418 // heap-page if one exists.
419 BasePage* LookupPageForAddress(Address); 419 BasePage* LookupPageForAddress(Address);
420 420
421 static const GCInfo* GcInfo(size_t gc_info_index) { 421 static const GCInfo* GcInfo(size_t gc_info_index) {
422 ASSERT(gc_info_index >= 1); 422 DCHECK_GE(gc_info_index, 1u);
423 ASSERT(gc_info_index < GCInfoTable::kMaxIndex); 423 DCHECK(gc_info_index < GCInfoTable::kMaxIndex);
Hwanseung Lee 2017/04/20 00:30:42 when replaced to DCHECK_LT, it was cause of build
424 ASSERT(g_gc_info_table); 424 DCHECK(g_gc_info_table);
425 const GCInfo* info = g_gc_info_table[gc_info_index]; 425 const GCInfo* info = g_gc_info_table[gc_info_index];
426 ASSERT(info); 426 DCHECK(info);
427 return info; 427 return info;
428 } 428 }
429 429
430 static void ReportMemoryUsageHistogram(); 430 static void ReportMemoryUsageHistogram();
431 static void ReportMemoryUsageForTracing(); 431 static void ReportMemoryUsageForTracing();
432 432
433 HeapCompact* Compaction(); 433 HeapCompact* Compaction();
434 434
435 private: 435 private:
436 // Reset counters that track live and allocated-since-last-GC sizes. 436 // Reset counters that track live and allocated-since-last-GC sizes.
(...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after
565 public: 565 public:
566 ~VerifyEagerFinalization() { 566 ~VerifyEagerFinalization() {
567 // If this assert triggers, the class annotated as eagerly 567 // If this assert triggers, the class annotated as eagerly
568 // finalized ended up not being allocated on the heap 568 // finalized ended up not being allocated on the heap
569 // set aside for eager finalization. The reason is most 569 // set aside for eager finalization. The reason is most
570 // likely that the effective 'operator new' overload for 570 // likely that the effective 'operator new' overload for
571 // this class' leftmost base is for a class that is not 571 // this class' leftmost base is for a class that is not
572 // eagerly finalized. Declaring and defining an 'operator new' 572 // eagerly finalized. Declaring and defining an 'operator new'
573 // for this class is what's required -- consider using 573 // for this class is what's required -- consider using
574 // DECLARE_EAGER_FINALIZATION_OPERATOR_NEW(). 574 // DECLARE_EAGER_FINALIZATION_OPERATOR_NEW().
575 ASSERT(IS_EAGERLY_FINALIZED()); 575 DCHECK(IS_EAGERLY_FINALIZED());
576 } 576 }
577 }; 577 };
578 #define EAGERLY_FINALIZE() \ 578 #define EAGERLY_FINALIZE() \
579 private: \ 579 private: \
580 VerifyEagerFinalization verify_eager_finalization_; \ 580 VerifyEagerFinalization verify_eager_finalization_; \
581 \ 581 \
582 public: \ 582 public: \
583 typedef int IsEagerlyFinalizedMarker 583 typedef int IsEagerlyFinalizedMarker
584 #else 584 #else
585 #define EAGERLY_FINALIZE() \ 585 #define EAGERLY_FINALIZE() \
586 public: \ 586 public: \
587 typedef int IsEagerlyFinalizedMarker 587 typedef int IsEagerlyFinalizedMarker
588 #endif 588 #endif
589 589
590 inline Address ThreadHeap::AllocateOnArenaIndex(ThreadState* state, 590 inline Address ThreadHeap::AllocateOnArenaIndex(ThreadState* state,
591 size_t size, 591 size_t size,
592 int arena_index, 592 int arena_index,
593 size_t gc_info_index, 593 size_t gc_info_index,
594 const char* type_name) { 594 const char* type_name) {
595 ASSERT(state->IsAllocationAllowed()); 595 DCHECK(state->IsAllocationAllowed());
596 ASSERT(arena_index != BlinkGC::kLargeObjectArenaIndex); 596 DCHECK_NE(arena_index, BlinkGC::kLargeObjectArenaIndex);
597 NormalPageArena* arena = 597 NormalPageArena* arena =
598 static_cast<NormalPageArena*>(state->Arena(arena_index)); 598 static_cast<NormalPageArena*>(state->Arena(arena_index));
599 Address address = 599 Address address =
600 arena->AllocateObject(AllocationSizeFromSize(size), gc_info_index); 600 arena->AllocateObject(AllocationSizeFromSize(size), gc_info_index);
601 HeapAllocHooks::AllocationHookIfEnabled(address, size, type_name); 601 HeapAllocHooks::AllocationHookIfEnabled(address, size, type_name);
602 return address; 602 return address;
603 } 603 }
604 604
605 template <typename T> 605 template <typename T>
606 Address ThreadHeap::Allocate(size_t size, bool eagerly_sweep) { 606 Address ThreadHeap::Allocate(size_t size, bool eagerly_sweep) {
(...skipping 13 matching lines...) Expand all
620 620
621 // TODO(sof): promptly free the previous object. 621 // TODO(sof): promptly free the previous object.
622 if (!size) { 622 if (!size) {
623 // If the new size is 0 this is considered equivalent to free(previous). 623 // If the new size is 0 this is considered equivalent to free(previous).
624 return nullptr; 624 return nullptr;
625 } 625 }
626 626
627 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState(); 627 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::kAffinity>::GetState();
628 HeapObjectHeader* previous_header = HeapObjectHeader::FromPayload(previous); 628 HeapObjectHeader* previous_header = HeapObjectHeader::FromPayload(previous);
629 BasePage* page = PageFromObject(previous_header); 629 BasePage* page = PageFromObject(previous_header);
630 ASSERT(page); 630 DCHECK(page);
631 631
632 // Determine arena index of new allocation. 632 // Determine arena index of new allocation.
633 int arena_index; 633 int arena_index;
634 if (size >= kLargeObjectSizeThreshold) { 634 if (size >= kLargeObjectSizeThreshold) {
635 arena_index = BlinkGC::kLargeObjectArenaIndex; 635 arena_index = BlinkGC::kLargeObjectArenaIndex;
636 } else { 636 } else {
637 arena_index = page->Arena()->ArenaIndex(); 637 arena_index = page->Arena()->ArenaIndex();
638 if (IsNormalArenaIndex(arena_index) || 638 if (IsNormalArenaIndex(arena_index) ||
639 arena_index == BlinkGC::kLargeObjectArenaIndex) 639 arena_index == BlinkGC::kLargeObjectArenaIndex)
640 arena_index = ArenaIndexForObjectSize(size); 640 arena_index = ArenaIndexForObjectSize(size);
641 } 641 }
642 642
643 size_t gc_info_index = GCInfoTrait<T>::Index(); 643 size_t gc_info_index = GCInfoTrait<T>::Index();
644 // TODO(haraken): We don't support reallocate() for finalizable objects. 644 // TODO(haraken): We don't support reallocate() for finalizable objects.
645 ASSERT(!ThreadHeap::GcInfo(previous_header->GcInfoIndex())->HasFinalizer()); 645 DCHECK(!ThreadHeap::GcInfo(previous_header->GcInfoIndex())->HasFinalizer());
646 ASSERT(previous_header->GcInfoIndex() == gc_info_index); 646 DCHECK_EQ(previous_header->GcInfoIndex(), gc_info_index);
647 HeapAllocHooks::FreeHookIfEnabled(static_cast<Address>(previous)); 647 HeapAllocHooks::FreeHookIfEnabled(static_cast<Address>(previous));
648 Address address; 648 Address address;
649 if (arena_index == BlinkGC::kLargeObjectArenaIndex) { 649 if (arena_index == BlinkGC::kLargeObjectArenaIndex) {
650 address = page->Arena()->AllocateLargeObject(AllocationSizeFromSize(size), 650 address = page->Arena()->AllocateLargeObject(AllocationSizeFromSize(size),
651 gc_info_index); 651 gc_info_index);
652 } else { 652 } else {
653 const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(T); 653 const char* type_name = WTF_HEAP_PROFILER_TYPE_NAME(T);
654 address = ThreadHeap::AllocateOnArenaIndex(state, size, arena_index, 654 address = ThreadHeap::AllocateOnArenaIndex(state, size, arena_index,
655 gc_info_index, type_name); 655 gc_info_index, type_name);
656 } 656 }
657 size_t copy_size = previous_header->PayloadSize(); 657 size_t copy_size = previous_header->PayloadSize();
658 if (copy_size > size) 658 if (copy_size > size)
659 copy_size = size; 659 copy_size = size;
660 memcpy(address, previous, copy_size); 660 memcpy(address, previous, copy_size);
661 return address; 661 return address;
662 } 662 }
663 663
664 template <typename T> 664 template <typename T>
665 void Visitor::HandleWeakCell(Visitor* self, void* object) { 665 void Visitor::HandleWeakCell(Visitor* self, void* object) {
666 T** cell = reinterpret_cast<T**>(object); 666 T** cell = reinterpret_cast<T**>(object);
667 if (*cell && !ObjectAliveTrait<T>::IsHeapObjectAlive(*cell)) 667 if (*cell && !ObjectAliveTrait<T>::IsHeapObjectAlive(*cell))
668 *cell = nullptr; 668 *cell = nullptr;
669 } 669 }
670 670
671 } // namespace blink 671 } // namespace blink
672 672
673 #include "platform/heap/VisitorImpl.h" 673 #include "platform/heap/VisitorImpl.h"
674 674
675 #endif // Heap_h 675 #endif // Heap_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698