Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 169 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 180 // |allocationGranurarity|, |HeapObjectHeader| must fit into the size. | 180 // |allocationGranurarity|, |HeapObjectHeader| must fit into the size. |
| 181 static_assert( | 181 static_assert( |
| 182 sizeof(HeapObjectHeader) <= kAllocationGranularity, | 182 sizeof(HeapObjectHeader) <= kAllocationGranularity, |
| 183 "size of HeapObjectHeader must be smaller than allocationGranularity"); | 183 "size of HeapObjectHeader must be smaller than allocationGranularity"); |
| 184 #if CPU(64BIT) | 184 #if CPU(64BIT) |
| 185 static_assert(sizeof(HeapObjectHeader) == 8, | 185 static_assert(sizeof(HeapObjectHeader) == 8, |
| 186 "sizeof(HeapObjectHeader) must be 8 bytes"); | 186 "sizeof(HeapObjectHeader) must be 8 bytes"); |
| 187 magic_ = GetMagic(); | 187 magic_ = GetMagic(); |
| 188 #endif | 188 #endif |
| 189 | 189 |
| 190 ASSERT(gc_info_index < GCInfoTable::kMaxIndex); | 190 DCHECK(gc_info_index < GCInfoTable::kMaxIndex); |
|
Hwanseung Lee
2017/04/20 00:30:43
when replaced to DCHECK_LT, it was cause of build
| |
| 191 ASSERT(size < kNonLargeObjectPageSizeMax); | 191 DCHECK_LT(size, kNonLargeObjectPageSizeMax); |
| 192 ASSERT(!(size & kAllocationMask)); | 192 DCHECK(!(size & kAllocationMask)); |
| 193 encoded_ = static_cast<uint32_t>( | 193 encoded_ = static_cast<uint32_t>( |
| 194 (gc_info_index << kHeaderGCInfoIndexShift) | size | | 194 (gc_info_index << kHeaderGCInfoIndexShift) | size | |
| 195 (gc_info_index == kGcInfoIndexForFreeListHeader ? kHeaderFreedBitMask | 195 (gc_info_index == kGcInfoIndexForFreeListHeader ? kHeaderFreedBitMask |
| 196 : 0)); | 196 : 0)); |
| 197 } | 197 } |
| 198 | 198 |
| 199 NO_SANITIZE_ADDRESS bool IsFree() const { | 199 NO_SANITIZE_ADDRESS bool IsFree() const { |
| 200 return encoded_ & kHeaderFreedBitMask; | 200 return encoded_ & kHeaderFreedBitMask; |
| 201 } | 201 } |
| 202 | 202 |
| 203 NO_SANITIZE_ADDRESS bool IsPromptlyFreed() const { | 203 NO_SANITIZE_ADDRESS bool IsPromptlyFreed() const { |
| 204 return (encoded_ & kHeaderPromptlyFreedBitMask) == | 204 return (encoded_ & kHeaderPromptlyFreedBitMask) == |
| 205 kHeaderPromptlyFreedBitMask; | 205 kHeaderPromptlyFreedBitMask; |
| 206 } | 206 } |
| 207 | 207 |
| 208 NO_SANITIZE_ADDRESS void MarkPromptlyFreed() { | 208 NO_SANITIZE_ADDRESS void MarkPromptlyFreed() { |
| 209 encoded_ |= kHeaderPromptlyFreedBitMask; | 209 encoded_ |= kHeaderPromptlyFreedBitMask; |
| 210 } | 210 } |
| 211 | 211 |
| 212 size_t size() const; | 212 size_t size() const; |
| 213 | 213 |
| 214 NO_SANITIZE_ADDRESS size_t GcInfoIndex() const { | 214 NO_SANITIZE_ADDRESS size_t GcInfoIndex() const { |
| 215 return (encoded_ & kHeaderGCInfoIndexMask) >> kHeaderGCInfoIndexShift; | 215 return (encoded_ & kHeaderGCInfoIndexMask) >> kHeaderGCInfoIndexShift; |
| 216 } | 216 } |
| 217 | 217 |
| 218 NO_SANITIZE_ADDRESS void SetSize(size_t size) { | 218 NO_SANITIZE_ADDRESS void SetSize(size_t size) { |
| 219 ASSERT(size < kNonLargeObjectPageSizeMax); | 219 DCHECK_LT(size, kNonLargeObjectPageSizeMax); |
| 220 CheckHeader(); | 220 CheckHeader(); |
| 221 encoded_ = static_cast<uint32_t>(size) | (encoded_ & ~kHeaderSizeMask); | 221 encoded_ = static_cast<uint32_t>(size) | (encoded_ & ~kHeaderSizeMask); |
| 222 } | 222 } |
| 223 | 223 |
| 224 bool IsWrapperHeaderMarked() const; | 224 bool IsWrapperHeaderMarked() const; |
| 225 void MarkWrapperHeader(); | 225 void MarkWrapperHeader(); |
| 226 void UnmarkWrapperHeader(); | 226 void UnmarkWrapperHeader(); |
| 227 bool IsMarked() const; | 227 bool IsMarked() const; |
| 228 void Mark(); | 228 void Mark(); |
| 229 void Unmark(); | 229 void Unmark(); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 268 | 268 |
| 269 uint32_t encoded_; | 269 uint32_t encoded_; |
| 270 }; | 270 }; |
| 271 | 271 |
| 272 class FreeListEntry final : public HeapObjectHeader { | 272 class FreeListEntry final : public HeapObjectHeader { |
| 273 public: | 273 public: |
| 274 NO_SANITIZE_ADDRESS | 274 NO_SANITIZE_ADDRESS |
| 275 explicit FreeListEntry(size_t size) | 275 explicit FreeListEntry(size_t size) |
| 276 : HeapObjectHeader(size, kGcInfoIndexForFreeListHeader), next_(nullptr) { | 276 : HeapObjectHeader(size, kGcInfoIndexForFreeListHeader), next_(nullptr) { |
| 277 #if DCHECK_IS_ON() && CPU(64BIT) | 277 #if DCHECK_IS_ON() && CPU(64BIT) |
| 278 ASSERT(size >= sizeof(HeapObjectHeader)); | 278 DCHECK_GE(size, sizeof(HeapObjectHeader)); |
| 279 ZapMagic(); | 279 ZapMagic(); |
| 280 #endif | 280 #endif |
| 281 } | 281 } |
| 282 | 282 |
| 283 Address GetAddress() { return reinterpret_cast<Address>(this); } | 283 Address GetAddress() { return reinterpret_cast<Address>(this); } |
| 284 | 284 |
| 285 NO_SANITIZE_ADDRESS | 285 NO_SANITIZE_ADDRESS |
| 286 void Unlink(FreeListEntry** prev_next) { | 286 void Unlink(FreeListEntry** prev_next) { |
| 287 *prev_next = next_; | 287 *prev_next = next_; |
| 288 next_ = nullptr; | 288 next_ = nullptr; |
| 289 } | 289 } |
| 290 | 290 |
| 291 NO_SANITIZE_ADDRESS | 291 NO_SANITIZE_ADDRESS |
| 292 void Link(FreeListEntry** prev_next) { | 292 void Link(FreeListEntry** prev_next) { |
| 293 next_ = *prev_next; | 293 next_ = *prev_next; |
| 294 *prev_next = this; | 294 *prev_next = this; |
| 295 } | 295 } |
| 296 | 296 |
| 297 NO_SANITIZE_ADDRESS | 297 NO_SANITIZE_ADDRESS |
| 298 FreeListEntry* Next() const { return next_; } | 298 FreeListEntry* Next() const { return next_; } |
| 299 | 299 |
| 300 NO_SANITIZE_ADDRESS | 300 NO_SANITIZE_ADDRESS |
| 301 void Append(FreeListEntry* next) { | 301 void Append(FreeListEntry* next) { |
| 302 ASSERT(!next_); | 302 DCHECK(!next_); |
| 303 next_ = next; | 303 next_ = next; |
| 304 } | 304 } |
| 305 | 305 |
| 306 private: | 306 private: |
| 307 FreeListEntry* next_; | 307 FreeListEntry* next_; |
| 308 }; | 308 }; |
| 309 | 309 |
| 310 // Blink heap pages are set up with a guard page before and after the payload. | 310 // Blink heap pages are set up with a guard page before and after the payload. |
| 311 inline size_t BlinkPagePayloadSize() { | 311 inline size_t BlinkPagePayloadSize() { |
| 312 return kBlinkPageSize - 2 * kBlinkGuardPageSize; | 312 return kBlinkPageSize - 2 * kBlinkGuardPageSize; |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 427 virtual bool IsLargeObjectPage() { return false; } | 427 virtual bool IsLargeObjectPage() { return false; } |
| 428 | 428 |
| 429 Address GetAddress() { return reinterpret_cast<Address>(this); } | 429 Address GetAddress() { return reinterpret_cast<Address>(this); } |
| 430 PageMemory* Storage() const { return storage_; } | 430 PageMemory* Storage() const { return storage_; } |
| 431 BaseArena* Arena() const { return arena_; } | 431 BaseArena* Arena() const { return arena_; } |
| 432 | 432 |
| 433 // Returns true if this page has been swept by the ongoing lazy sweep. | 433 // Returns true if this page has been swept by the ongoing lazy sweep. |
| 434 bool HasBeenSwept() const { return swept_; } | 434 bool HasBeenSwept() const { return swept_; } |
| 435 | 435 |
| 436 void MarkAsSwept() { | 436 void MarkAsSwept() { |
| 437 ASSERT(!swept_); | 437 DCHECK(!swept_); |
| 438 swept_ = true; | 438 swept_ = true; |
| 439 } | 439 } |
| 440 | 440 |
| 441 void MarkAsUnswept() { | 441 void MarkAsUnswept() { |
| 442 ASSERT(swept_); | 442 DCHECK(swept_); |
| 443 swept_ = false; | 443 swept_ = false; |
| 444 } | 444 } |
| 445 | 445 |
| 446 private: | 446 private: |
| 447 PageMemory* storage_; | 447 PageMemory* storage_; |
| 448 BaseArena* arena_; | 448 BaseArena* arena_; |
| 449 BasePage* next_; | 449 BasePage* next_; |
| 450 | 450 |
| 451 // Track the sweeping state of a page. Set to false at the start of a sweep, | 451 // Track the sweeping state of a page. Set to false at the start of a sweep, |
| 452 // true upon completion of lazy sweeping. | 452 // true upon completion of lazy sweeping. |
| (...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 746 | 746 |
| 747 // Index into the page pools. This is used to ensure that the pages of the | 747 // Index into the page pools. This is used to ensure that the pages of the |
| 748 // same type go into the correct page pool and thus avoid type confusion. | 748 // same type go into the correct page pool and thus avoid type confusion. |
| 749 int index_; | 749 int index_; |
| 750 }; | 750 }; |
| 751 | 751 |
| 752 class PLATFORM_EXPORT NormalPageArena final : public BaseArena { | 752 class PLATFORM_EXPORT NormalPageArena final : public BaseArena { |
| 753 public: | 753 public: |
| 754 NormalPageArena(ThreadState*, int); | 754 NormalPageArena(ThreadState*, int); |
| 755 void AddToFreeList(Address address, size_t size) { | 755 void AddToFreeList(Address address, size_t size) { |
| 756 ASSERT(FindPageFromAddress(address)); | 756 #if DCHECK_IS_ON() |
| 757 ASSERT(FindPageFromAddress(address + size - 1)); | 757 DCHECK(FindPageFromAddress(address)); |
| 758 DCHECK(FindPageFromAddress(address + size - 1)); | |
| 759 #endif | |
| 758 free_list_.AddToFreeList(address, size); | 760 free_list_.AddToFreeList(address, size); |
| 759 } | 761 } |
| 760 void ClearFreeLists() override; | 762 void ClearFreeLists() override; |
| 761 #if DCHECK_IS_ON() | 763 #if DCHECK_IS_ON() |
| 762 bool IsConsistentForGC() override; | 764 bool IsConsistentForGC() override; |
| 763 bool PagesToBeSweptContains(Address); | 765 bool PagesToBeSweptContains(Address); |
| 764 #endif | 766 #endif |
| 765 void TakeFreelistSnapshot(const String& dump_base_name) override; | 767 void TakeFreelistSnapshot(const String& dump_base_name) override; |
| 766 | 768 |
| 767 Address AllocateObject(size_t allocation_size, size_t gc_info_index); | 769 Address AllocateObject(size_t allocation_size, size_t gc_info_index); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 830 | 832 |
| 831 // Mask an address down to the enclosing oilpan heap base page. All Oilpan heap | 833 // Mask an address down to the enclosing oilpan heap base page. All Oilpan heap |
| 832 // pages are aligned at |blinkPageBase| plus the size of a guard size. | 834 // pages are aligned at |blinkPageBase| plus the size of a guard size. |
| 833 // | 835 // |
| 834 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 836 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
| 835 // typed arenas. This is only exported to enable tests in HeapTest.cpp. | 837 // typed arenas. This is only exported to enable tests in HeapTest.cpp. |
| 836 PLATFORM_EXPORT inline BasePage* PageFromObject(const void* object) { | 838 PLATFORM_EXPORT inline BasePage* PageFromObject(const void* object) { |
| 837 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 839 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 838 BasePage* page = reinterpret_cast<BasePage*>(BlinkPageAddress(address) + | 840 BasePage* page = reinterpret_cast<BasePage*>(BlinkPageAddress(address) + |
| 839 kBlinkGuardPageSize); | 841 kBlinkGuardPageSize); |
| 840 ASSERT(page->Contains(address)); | 842 #if DCHECK_IS_ON() |
| 843 DCHECK(page->Contains(address)); | |
| 844 #endif | |
| 841 return page; | 845 return page; |
| 842 } | 846 } |
| 843 | 847 |
| 844 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { | 848 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { |
| 845 size_t result = encoded_ & kHeaderSizeMask; | 849 size_t result = encoded_ & kHeaderSizeMask; |
| 846 // Large objects should not refer to header->size(). The actual size of a | 850 // Large objects should not refer to header->size(). The actual size of a |
| 847 // large object is stored in |LargeObjectPage::m_payloadSize|. | 851 // large object is stored in |LargeObjectPage::m_payloadSize|. |
| 848 ASSERT(result != kLargeObjectSizeInHeader); | 852 DCHECK(result != kLargeObjectSizeInHeader); |
| 849 ASSERT(!PageFromObject(this)->IsLargeObjectPage()); | 853 DCHECK(!PageFromObject(this)->IsLargeObjectPage()); |
| 850 return result; | 854 return result; |
| 851 } | 855 } |
| 852 | 856 |
| 853 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::CheckHeader() const { | 857 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::CheckHeader() const { |
| 854 #if CPU(64BIT) | 858 #if CPU(64BIT) |
| 855 const bool good_magic = GetMagic() == magic_; | 859 const bool good_magic = GetMagic() == magic_; |
| 856 DCHECK(good_magic); | 860 DCHECK(good_magic); |
| 857 #endif | 861 #endif |
| 858 } | 862 } |
| 859 | 863 |
| 860 inline Address HeapObjectHeader::Payload() { | 864 inline Address HeapObjectHeader::Payload() { |
| 861 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | 865 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); |
| 862 } | 866 } |
| 863 | 867 |
| 864 inline Address HeapObjectHeader::PayloadEnd() { | 868 inline Address HeapObjectHeader::PayloadEnd() { |
| 865 return reinterpret_cast<Address>(this) + size(); | 869 return reinterpret_cast<Address>(this) + size(); |
| 866 } | 870 } |
| 867 | 871 |
| 868 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::PayloadSize() { | 872 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::PayloadSize() { |
| 869 CheckHeader(); | 873 CheckHeader(); |
| 870 size_t size = encoded_ & kHeaderSizeMask; | 874 size_t size = encoded_ & kHeaderSizeMask; |
| 871 if (UNLIKELY(size == kLargeObjectSizeInHeader)) { | 875 if (UNLIKELY(size == kLargeObjectSizeInHeader)) { |
| 872 ASSERT(PageFromObject(this)->IsLargeObjectPage()); | 876 DCHECK(PageFromObject(this)->IsLargeObjectPage()); |
| 873 return static_cast<LargeObjectPage*>(PageFromObject(this))->PayloadSize(); | 877 return static_cast<LargeObjectPage*>(PageFromObject(this))->PayloadSize(); |
| 874 } | 878 } |
| 875 ASSERT(!PageFromObject(this)->IsLargeObjectPage()); | 879 DCHECK(!PageFromObject(this)->IsLargeObjectPage()); |
| 876 return size - sizeof(HeapObjectHeader); | 880 return size - sizeof(HeapObjectHeader); |
| 877 } | 881 } |
| 878 | 882 |
| 879 inline HeapObjectHeader* HeapObjectHeader::FromPayload(const void* payload) { | 883 inline HeapObjectHeader* HeapObjectHeader::FromPayload(const void* payload) { |
| 880 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 884 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
| 881 HeapObjectHeader* header = | 885 HeapObjectHeader* header = |
| 882 reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader)); | 886 reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader)); |
| 883 header->CheckHeader(); | 887 header->CheckHeader(); |
| 884 return header; | 888 return header; |
| 885 } | 889 } |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 943 #endif // CPU(64BIT) | 947 #endif // CPU(64BIT) |
| 944 | 948 |
| 945 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsWrapperHeaderMarked() | 949 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsWrapperHeaderMarked() |
| 946 const { | 950 const { |
| 947 CheckHeader(); | 951 CheckHeader(); |
| 948 return encoded_ & kHeaderWrapperMarkBitMask; | 952 return encoded_ & kHeaderWrapperMarkBitMask; |
| 949 } | 953 } |
| 950 | 954 |
| 951 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::MarkWrapperHeader() { | 955 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::MarkWrapperHeader() { |
| 952 CheckHeader(); | 956 CheckHeader(); |
| 953 ASSERT(!IsWrapperHeaderMarked()); | 957 DCHECK(!IsWrapperHeaderMarked()); |
| 954 encoded_ |= kHeaderWrapperMarkBitMask; | 958 encoded_ |= kHeaderWrapperMarkBitMask; |
| 955 } | 959 } |
| 956 | 960 |
| 957 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::UnmarkWrapperHeader() { | 961 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::UnmarkWrapperHeader() { |
| 958 CheckHeader(); | 962 CheckHeader(); |
| 959 ASSERT(IsWrapperHeaderMarked()); | 963 DCHECK(IsWrapperHeaderMarked()); |
| 960 encoded_ &= ~kHeaderWrapperMarkBitMask; | 964 encoded_ &= ~kHeaderWrapperMarkBitMask; |
| 961 } | 965 } |
| 962 | 966 |
| 963 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsMarked() const { | 967 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::IsMarked() const { |
| 964 CheckHeader(); | 968 CheckHeader(); |
| 965 return encoded_ & kHeaderMarkBitMask; | 969 return encoded_ & kHeaderMarkBitMask; |
| 966 } | 970 } |
| 967 | 971 |
| 968 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::Mark() { | 972 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::Mark() { |
| 969 CheckHeader(); | 973 CheckHeader(); |
| 970 ASSERT(!IsMarked()); | 974 DCHECK(!IsMarked()); |
| 971 encoded_ = encoded_ | kHeaderMarkBitMask; | 975 encoded_ = encoded_ | kHeaderMarkBitMask; |
| 972 } | 976 } |
| 973 | 977 |
| 974 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::Unmark() { | 978 NO_SANITIZE_ADDRESS inline void HeapObjectHeader::Unmark() { |
| 975 CheckHeader(); | 979 CheckHeader(); |
| 976 ASSERT(IsMarked()); | 980 DCHECK(IsMarked()); |
| 977 encoded_ &= ~kHeaderMarkBitMask; | 981 encoded_ &= ~kHeaderMarkBitMask; |
| 978 } | 982 } |
| 979 | 983 |
| 980 inline Address NormalPageArena::AllocateObject(size_t allocation_size, | 984 inline Address NormalPageArena::AllocateObject(size_t allocation_size, |
| 981 size_t gc_info_index) { | 985 size_t gc_info_index) { |
| 982 if (LIKELY(allocation_size <= remaining_allocation_size_)) { | 986 if (LIKELY(allocation_size <= remaining_allocation_size_)) { |
| 983 Address header_address = current_allocation_point_; | 987 Address header_address = current_allocation_point_; |
| 984 current_allocation_point_ += allocation_size; | 988 current_allocation_point_ += allocation_size; |
| 985 remaining_allocation_size_ -= allocation_size; | 989 remaining_allocation_size_ -= allocation_size; |
| 986 ASSERT(gc_info_index > 0); | 990 DCHECK_GT(gc_info_index, 0u); |
| 987 new (NotNull, header_address) | 991 new (NotNull, header_address) |
| 988 HeapObjectHeader(allocation_size, gc_info_index); | 992 HeapObjectHeader(allocation_size, gc_info_index); |
| 989 Address result = header_address + sizeof(HeapObjectHeader); | 993 Address result = header_address + sizeof(HeapObjectHeader); |
| 990 ASSERT(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); | 994 DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); |
| 991 | 995 |
| 992 SET_MEMORY_ACCESSIBLE(result, allocation_size - sizeof(HeapObjectHeader)); | 996 SET_MEMORY_ACCESSIBLE(result, allocation_size - sizeof(HeapObjectHeader)); |
| 993 ASSERT(FindPageFromAddress(header_address + allocation_size - 1)); | 997 #if DCHECK_IS_ON() |
| 998 DCHECK(FindPageFromAddress(header_address + allocation_size - 1)); | |
| 999 #endif | |
| 994 return result; | 1000 return result; |
| 995 } | 1001 } |
| 996 return OutOfLineAllocate(allocation_size, gc_info_index); | 1002 return OutOfLineAllocate(allocation_size, gc_info_index); |
| 997 } | 1003 } |
| 998 | 1004 |
| 999 inline NormalPageArena* NormalPage::ArenaForNormalPage() const { | 1005 inline NormalPageArena* NormalPage::ArenaForNormalPage() const { |
| 1000 return static_cast<NormalPageArena*>(Arena()); | 1006 return static_cast<NormalPageArena*>(Arena()); |
| 1001 } | 1007 } |
| 1002 | 1008 |
| 1003 } // namespace blink | 1009 } // namespace blink |
| 1004 | 1010 |
| 1005 #endif // HeapPage_h | 1011 #endif // HeapPage_h |
| OLD | NEW |