| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 54 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 54 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
| 55 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. | 55 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. |
| 56 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 | 56 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 |
| 57 | 57 |
| 58 // When finalizing a non-inlined vector backing store/container, remove | 58 // When finalizing a non-inlined vector backing store/container, remove |
| 59 // its contiguous container annotation. Required as it will not be destructed | 59 // its contiguous container annotation. Required as it will not be destructed |
| 60 // from its Vector. | 60 // from its Vector. |
| 61 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \ | 61 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \ |
| 62 do { \ | 62 do { \ |
| 63 BasePage* page = PageFromObject(object); \ | 63 BasePage* page = PageFromObject(object); \ |
| 64 ASSERT(page); \ | 64 DCHECK(page); \ |
| 65 bool is_container = \ | 65 bool is_container = \ |
| 66 ThreadState::IsVectorArenaIndex(page->Arena()->ArenaIndex()); \ | 66 ThreadState::IsVectorArenaIndex(page->Arena()->ArenaIndex()); \ |
| 67 if (!is_container && page->IsLargeObjectPage()) \ | 67 if (!is_container && page->IsLargeObjectPage()) \ |
| 68 is_container = \ | 68 is_container = \ |
| 69 static_cast<LargeObjectPage*>(page)->IsVectorBackingPage(); \ | 69 static_cast<LargeObjectPage*>(page)->IsVectorBackingPage(); \ |
| 70 if (is_container) \ | 70 if (is_container) \ |
| 71 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \ | 71 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \ |
| 72 } while (0) | 72 } while (0) |
| 73 | 73 |
| 74 // A vector backing store represented by a large object is marked | 74 // A vector backing store represented by a large object is marked |
| 75 // so that when it is finalized, its ASan annotation will be | 75 // so that when it is finalized, its ASan annotation will be |
| 76 // correctly retired. | 76 // correctly retired. |
| 77 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, large_object) \ | 77 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, large_object) \ |
| 78 if (ThreadState::IsVectorArenaIndex(arena->ArenaIndex())) { \ | 78 if (ThreadState::IsVectorArenaIndex(arena->ArenaIndex())) { \ |
| 79 BasePage* large_page = PageFromObject(large_object); \ | 79 BasePage* large_page = PageFromObject(large_object); \ |
| 80 ASSERT(large_page->IsLargeObjectPage()); \ | 80 DCHECK(large_page->IsLargeObjectPage()); \ |
| 81 static_cast<LargeObjectPage*>(large_page)->SetIsVectorBackingPage(); \ | 81 static_cast<LargeObjectPage*>(large_page)->SetIsVectorBackingPage(); \ |
| 82 } | 82 } |
| 83 #else | 83 #else |
| 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 | 84 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 |
| 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) | 85 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) |
| 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) | 86 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) |
| 87 #endif | 87 #endif |
| 88 | 88 |
| 89 namespace blink { | 89 namespace blink { |
| 90 | 90 |
| (...skipping 14 matching lines...) Expand all Loading... |
| 105 ASAN_RETIRE_CONTAINER_ANNOTATION(object, object_size); | 105 ASAN_RETIRE_CONTAINER_ANNOTATION(object, object_size); |
| 106 } | 106 } |
| 107 | 107 |
| 108 BaseArena::BaseArena(ThreadState* state, int index) | 108 BaseArena::BaseArena(ThreadState* state, int index) |
| 109 : first_page_(nullptr), | 109 : first_page_(nullptr), |
| 110 first_unswept_page_(nullptr), | 110 first_unswept_page_(nullptr), |
| 111 thread_state_(state), | 111 thread_state_(state), |
| 112 index_(index) {} | 112 index_(index) {} |
| 113 | 113 |
| 114 BaseArena::~BaseArena() { | 114 BaseArena::~BaseArena() { |
| 115 ASSERT(!first_page_); | 115 DCHECK(!first_page_); |
| 116 ASSERT(!first_unswept_page_); | 116 DCHECK(!first_unswept_page_); |
| 117 } | 117 } |
| 118 | 118 |
| 119 void BaseArena::RemoveAllPages() { | 119 void BaseArena::RemoveAllPages() { |
| 120 ClearFreeLists(); | 120 ClearFreeLists(); |
| 121 | 121 |
| 122 ASSERT(!first_unswept_page_); | 122 DCHECK(!first_unswept_page_); |
| 123 while (first_page_) { | 123 while (first_page_) { |
| 124 BasePage* page = first_page_; | 124 BasePage* page = first_page_; |
| 125 page->Unlink(&first_page_); | 125 page->Unlink(&first_page_); |
| 126 page->RemoveFromHeap(); | 126 page->RemoveFromHeap(); |
| 127 } | 127 } |
| 128 } | 128 } |
| 129 | 129 |
| 130 void BaseArena::TakeSnapshot(const String& dump_base_name, | 130 void BaseArena::TakeSnapshot(const String& dump_base_name, |
| 131 ThreadState::GCSnapshotInfo& info) { | 131 ThreadState::GCSnapshotInfo& info) { |
| 132 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" | 132 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 164 for (BasePage* page = first_unswept_page_; page; page = page->Next()) { | 164 for (BasePage* page = first_unswept_page_; page; page = page->Next()) { |
| 165 if (page->Contains(address)) | 165 if (page->Contains(address)) |
| 166 return page; | 166 return page; |
| 167 } | 167 } |
| 168 return nullptr; | 168 return nullptr; |
| 169 } | 169 } |
| 170 #endif | 170 #endif |
| 171 | 171 |
| 172 void BaseArena::MakeConsistentForGC() { | 172 void BaseArena::MakeConsistentForGC() { |
| 173 ClearFreeLists(); | 173 ClearFreeLists(); |
| 174 ASSERT(IsConsistentForGC()); | 174 #if DCHECK_IS_ON() |
| 175 DCHECK(IsConsistentForGC()); |
| 176 #endif |
| 175 for (BasePage* page = first_page_; page; page = page->Next()) { | 177 for (BasePage* page = first_page_; page; page = page->Next()) { |
| 176 page->MarkAsUnswept(); | 178 page->MarkAsUnswept(); |
| 177 page->InvalidateObjectStartBitmap(); | 179 page->InvalidateObjectStartBitmap(); |
| 178 } | 180 } |
| 179 | 181 |
| 180 // We should not start a new GC until we finish sweeping in the current GC. | 182 // We should not start a new GC until we finish sweeping in the current GC. |
| 181 CHECK(!first_unswept_page_); | 183 CHECK(!first_unswept_page_); |
| 182 | 184 |
| 183 HeapCompact* heap_compactor = GetThreadState()->Heap().Compaction(); | 185 HeapCompact* heap_compactor = GetThreadState()->Heap().Compaction(); |
| 184 if (!heap_compactor->IsCompactingArena(ArenaIndex())) | 186 if (!heap_compactor->IsCompactingArena(ArenaIndex())) |
| 185 return; | 187 return; |
| 186 | 188 |
| 187 BasePage* next_page = first_page_; | 189 BasePage* next_page = first_page_; |
| 188 while (next_page) { | 190 while (next_page) { |
| 189 if (!next_page->IsLargeObjectPage()) | 191 if (!next_page->IsLargeObjectPage()) |
| 190 heap_compactor->AddCompactingPage(next_page); | 192 heap_compactor->AddCompactingPage(next_page); |
| 191 next_page = next_page->Next(); | 193 next_page = next_page->Next(); |
| 192 } | 194 } |
| 193 } | 195 } |
| 194 | 196 |
| 195 void BaseArena::MakeConsistentForMutator() { | 197 void BaseArena::MakeConsistentForMutator() { |
| 196 ClearFreeLists(); | 198 ClearFreeLists(); |
| 197 ASSERT(IsConsistentForGC()); | 199 #if DCHECK_IS_ON() |
| 198 ASSERT(!first_page_); | 200 DCHECK(IsConsistentForGC()); |
| 201 #endif |
| 202 DCHECK(!first_page_); |
| 199 | 203 |
| 200 // Drop marks from marked objects and rebuild free lists in preparation for | 204 // Drop marks from marked objects and rebuild free lists in preparation for |
| 201 // resuming the executions of mutators. | 205 // resuming the executions of mutators. |
| 202 BasePage* previous_page = nullptr; | 206 BasePage* previous_page = nullptr; |
| 203 for (BasePage *page = first_unswept_page_; page; | 207 for (BasePage *page = first_unswept_page_; page; |
| 204 previous_page = page, page = page->Next()) { | 208 previous_page = page, page = page->Next()) { |
| 205 page->MakeConsistentForMutator(); | 209 page->MakeConsistentForMutator(); |
| 206 page->MarkAsSwept(); | 210 page->MarkAsSwept(); |
| 207 page->InvalidateObjectStartBitmap(); | 211 page->InvalidateObjectStartBitmap(); |
| 208 } | 212 } |
| 209 if (previous_page) { | 213 if (previous_page) { |
| 210 ASSERT(first_unswept_page_); | 214 DCHECK(first_unswept_page_); |
| 211 previous_page->next_ = first_page_; | 215 previous_page->next_ = first_page_; |
| 212 first_page_ = first_unswept_page_; | 216 first_page_ = first_unswept_page_; |
| 213 first_unswept_page_ = nullptr; | 217 first_unswept_page_ = nullptr; |
| 214 } | 218 } |
| 215 ASSERT(!first_unswept_page_); | 219 DCHECK(!first_unswept_page_); |
| 216 } | 220 } |
| 217 | 221 |
| 218 size_t BaseArena::ObjectPayloadSizeForTesting() { | 222 size_t BaseArena::ObjectPayloadSizeForTesting() { |
| 219 ASSERT(IsConsistentForGC()); | 223 #if DCHECK_IS_ON() |
| 220 ASSERT(!first_unswept_page_); | 224 DCHECK(IsConsistentForGC()); |
| 225 #endif |
| 226 DCHECK(!first_unswept_page_); |
| 221 | 227 |
| 222 size_t object_payload_size = 0; | 228 size_t object_payload_size = 0; |
| 223 for (BasePage* page = first_page_; page; page = page->Next()) | 229 for (BasePage* page = first_page_; page; page = page->Next()) |
| 224 object_payload_size += page->ObjectPayloadSizeForTesting(); | 230 object_payload_size += page->ObjectPayloadSizeForTesting(); |
| 225 return object_payload_size; | 231 return object_payload_size; |
| 226 } | 232 } |
| 227 | 233 |
| 228 void BaseArena::PrepareForSweep() { | 234 void BaseArena::PrepareForSweep() { |
| 229 ASSERT(GetThreadState()->IsInGC()); | 235 DCHECK(GetThreadState()->IsInGC()); |
| 230 ASSERT(!first_unswept_page_); | 236 DCHECK(!first_unswept_page_); |
| 231 | 237 |
| 232 // Move all pages to a list of unswept pages. | 238 // Move all pages to a list of unswept pages. |
| 233 first_unswept_page_ = first_page_; | 239 first_unswept_page_ = first_page_; |
| 234 first_page_ = nullptr; | 240 first_page_ = nullptr; |
| 235 } | 241 } |
| 236 | 242 |
| 237 #if defined(ADDRESS_SANITIZER) | 243 #if defined(ADDRESS_SANITIZER) |
| 238 void BaseArena::PoisonArena() { | 244 void BaseArena::PoisonArena() { |
| 239 for (BasePage* page = first_unswept_page_; page; page = page->Next()) | 245 for (BasePage* page = first_unswept_page_; page; page = page->Next()) |
| 240 page->PoisonUnmarkedObjects(); | 246 page->PoisonUnmarkedObjects(); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 283 } | 289 } |
| 284 | 290 |
| 285 bool BaseArena::LazySweepWithDeadline(double deadline_seconds) { | 291 bool BaseArena::LazySweepWithDeadline(double deadline_seconds) { |
| 286 // It might be heavy to call | 292 // It might be heavy to call |
| 287 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e., | 293 // Platform::current()->monotonicallyIncreasingTimeSeconds() per page (i.e., |
| 288 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10 | 294 // 128 KB sweep or one LargeObject sweep), so we check the deadline per 10 |
| 289 // pages. | 295 // pages. |
| 290 static const int kDeadlineCheckInterval = 10; | 296 static const int kDeadlineCheckInterval = 10; |
| 291 | 297 |
| 292 CHECK(GetThreadState()->IsSweepingInProgress()); | 298 CHECK(GetThreadState()->IsSweepingInProgress()); |
| 293 ASSERT(GetThreadState()->SweepForbidden()); | 299 DCHECK(GetThreadState()->SweepForbidden()); |
| 294 ASSERT(!GetThreadState()->IsMainThread() || | 300 DCHECK(!GetThreadState()->IsMainThread() || |
| 295 ScriptForbiddenScope::IsScriptForbidden()); | 301 ScriptForbiddenScope::IsScriptForbidden()); |
| 296 | 302 |
| 297 NormalPageArena* normal_arena = nullptr; | 303 NormalPageArena* normal_arena = nullptr; |
| 298 if (first_unswept_page_ && !first_unswept_page_->IsLargeObjectPage()) { | 304 if (first_unswept_page_ && !first_unswept_page_->IsLargeObjectPage()) { |
| 299 // Mark this NormalPageArena as being lazily swept. | 305 // Mark this NormalPageArena as being lazily swept. |
| 300 NormalPage* normal_page = | 306 NormalPage* normal_page = |
| 301 reinterpret_cast<NormalPage*>(first_unswept_page_); | 307 reinterpret_cast<NormalPage*>(first_unswept_page_); |
| 302 normal_arena = normal_page->ArenaForNormalPage(); | 308 normal_arena = normal_page->ArenaForNormalPage(); |
| 303 normal_arena->SetIsLazySweeping(true); | 309 normal_arena->SetIsLazySweeping(true); |
| 304 } | 310 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 317 page_count++; | 323 page_count++; |
| 318 } | 324 } |
| 319 ThreadHeap::ReportMemoryUsageForTracing(); | 325 ThreadHeap::ReportMemoryUsageForTracing(); |
| 320 if (normal_arena) | 326 if (normal_arena) |
| 321 normal_arena->SetIsLazySweeping(false); | 327 normal_arena->SetIsLazySweeping(false); |
| 322 return true; | 328 return true; |
| 323 } | 329 } |
| 324 | 330 |
| 325 void BaseArena::CompleteSweep() { | 331 void BaseArena::CompleteSweep() { |
| 326 CHECK(GetThreadState()->IsSweepingInProgress()); | 332 CHECK(GetThreadState()->IsSweepingInProgress()); |
| 327 ASSERT(GetThreadState()->SweepForbidden()); | 333 DCHECK(GetThreadState()->SweepForbidden()); |
| 328 ASSERT(!GetThreadState()->IsMainThread() || | 334 DCHECK(!GetThreadState()->IsMainThread() || |
| 329 ScriptForbiddenScope::IsScriptForbidden()); | 335 ScriptForbiddenScope::IsScriptForbidden()); |
| 330 | 336 |
| 331 while (first_unswept_page_) { | 337 while (first_unswept_page_) { |
| 332 SweepUnsweptPage(); | 338 SweepUnsweptPage(); |
| 333 } | 339 } |
| 334 ThreadHeap::ReportMemoryUsageForTracing(); | 340 ThreadHeap::ReportMemoryUsageForTracing(); |
| 335 } | 341 } |
| 336 | 342 |
| 337 Address BaseArena::AllocateLargeObject(size_t allocation_size, | 343 Address BaseArena::AllocateLargeObject(size_t allocation_size, |
| 338 size_t gc_info_index) { | 344 size_t gc_info_index) { |
| (...skipping 321 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 660 // | 666 // |
| 661 // FIXME: This threshold is determined just to optimize blink_perf | 667 // FIXME: This threshold is determined just to optimize blink_perf |
| 662 // benchmarks. Coalescing is very sensitive to the threashold and | 668 // benchmarks. Coalescing is very sensitive to the threashold and |
| 663 // we need further investigations on the coalescing scheme. | 669 // we need further investigations on the coalescing scheme. |
| 664 if (promptly_freed_size_ < 1024 * 1024) | 670 if (promptly_freed_size_ < 1024 * 1024) |
| 665 return false; | 671 return false; |
| 666 | 672 |
| 667 if (GetThreadState()->SweepForbidden()) | 673 if (GetThreadState()->SweepForbidden()) |
| 668 return false; | 674 return false; |
| 669 | 675 |
| 670 ASSERT(!HasCurrentAllocationArea()); | 676 DCHECK(!HasCurrentAllocationArea()); |
| 671 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); | 677 TRACE_EVENT0("blink_gc", "BaseArena::coalesce"); |
| 672 | 678 |
| 673 // Rebuild free lists. | 679 // Rebuild free lists. |
| 674 free_list_.Clear(); | 680 free_list_.Clear(); |
| 675 size_t freed_size = 0; | 681 size_t freed_size = 0; |
| 676 for (NormalPage* page = static_cast<NormalPage*>(first_page_); page; | 682 for (NormalPage* page = static_cast<NormalPage*>(first_page_); page; |
| 677 page = static_cast<NormalPage*>(page->Next())) { | 683 page = static_cast<NormalPage*>(page->Next())) { |
| 678 Address start_of_gap = page->Payload(); | 684 Address start_of_gap = page->Payload(); |
| 679 for (Address header_address = start_of_gap; | 685 for (Address header_address = start_of_gap; |
| 680 header_address < page->PayloadEnd();) { | 686 header_address < page->PayloadEnd();) { |
| 681 HeapObjectHeader* header = | 687 HeapObjectHeader* header = |
| 682 reinterpret_cast<HeapObjectHeader*>(header_address); | 688 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 683 size_t size = header->size(); | 689 size_t size = header->size(); |
| 684 ASSERT(size > 0); | 690 DCHECK_GT(size, 0u); |
| 685 ASSERT(size < BlinkPagePayloadSize()); | 691 DCHECK_LT(size, BlinkPagePayloadSize()); |
| 686 | 692 |
| 687 if (header->IsPromptlyFreed()) { | 693 if (header->IsPromptlyFreed()) { |
| 688 ASSERT(size >= sizeof(HeapObjectHeader)); | 694 DCHECK_GE(size, sizeof(HeapObjectHeader)); |
| 689 // Zero the memory in the free list header to maintain the | 695 // Zero the memory in the free list header to maintain the |
| 690 // invariant that memory on the free list is zero filled. | 696 // invariant that memory on the free list is zero filled. |
| 691 // The rest of the memory is already on the free list and is | 697 // The rest of the memory is already on the free list and is |
| 692 // therefore already zero filled. | 698 // therefore already zero filled. |
| 693 SET_MEMORY_INACCESSIBLE(header_address, sizeof(HeapObjectHeader)); | 699 SET_MEMORY_INACCESSIBLE(header_address, sizeof(HeapObjectHeader)); |
| 694 CHECK_MEMORY_INACCESSIBLE(header_address, size); | 700 CHECK_MEMORY_INACCESSIBLE(header_address, size); |
| 695 freed_size += size; | 701 freed_size += size; |
| 696 header_address += size; | 702 header_address += size; |
| 697 continue; | 703 continue; |
| 698 } | 704 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 712 AddToFreeList(start_of_gap, header_address - start_of_gap); | 718 AddToFreeList(start_of_gap, header_address - start_of_gap); |
| 713 | 719 |
| 714 header_address += size; | 720 header_address += size; |
| 715 start_of_gap = header_address; | 721 start_of_gap = header_address; |
| 716 } | 722 } |
| 717 | 723 |
| 718 if (start_of_gap != page->PayloadEnd()) | 724 if (start_of_gap != page->PayloadEnd()) |
| 719 AddToFreeList(start_of_gap, page->PayloadEnd() - start_of_gap); | 725 AddToFreeList(start_of_gap, page->PayloadEnd() - start_of_gap); |
| 720 } | 726 } |
| 721 GetThreadState()->DecreaseAllocatedObjectSize(freed_size); | 727 GetThreadState()->DecreaseAllocatedObjectSize(freed_size); |
| 722 ASSERT(promptly_freed_size_ == freed_size); | 728 DCHECK_EQ(promptly_freed_size_, freed_size); |
| 723 promptly_freed_size_ = 0; | 729 promptly_freed_size_ = 0; |
| 724 return true; | 730 return true; |
| 725 } | 731 } |
| 726 | 732 |
| 727 void NormalPageArena::PromptlyFreeObject(HeapObjectHeader* header) { | 733 void NormalPageArena::PromptlyFreeObject(HeapObjectHeader* header) { |
| 728 ASSERT(!GetThreadState()->SweepForbidden()); | 734 DCHECK(!GetThreadState()->SweepForbidden()); |
| 729 Address address = reinterpret_cast<Address>(header); | 735 Address address = reinterpret_cast<Address>(header); |
| 730 Address payload = header->Payload(); | 736 Address payload = header->Payload(); |
| 731 size_t size = header->size(); | 737 size_t size = header->size(); |
| 732 size_t payload_size = header->PayloadSize(); | 738 size_t payload_size = header->PayloadSize(); |
| 733 ASSERT(size > 0); | 739 DCHECK_GT(size, 0u); |
| 734 ASSERT(PageFromObject(address) == FindPageFromAddress(address)); | 740 #if DCHECK_IS_ON() |
| 735 | 741 DCHECK_EQ(PageFromObject(address), FindPageFromAddress(address)); |
| 742 #endif |
| 736 { | 743 { |
| 737 ThreadState::SweepForbiddenScope forbidden_scope(GetThreadState()); | 744 ThreadState::SweepForbiddenScope forbidden_scope(GetThreadState()); |
| 738 header->Finalize(payload, payload_size); | 745 header->Finalize(payload, payload_size); |
| 739 if (address + size == current_allocation_point_) { | 746 if (address + size == current_allocation_point_) { |
| 740 current_allocation_point_ = address; | 747 current_allocation_point_ = address; |
| 741 SetRemainingAllocationSize(remaining_allocation_size_ + size); | 748 SetRemainingAllocationSize(remaining_allocation_size_ + size); |
| 742 SET_MEMORY_INACCESSIBLE(address, size); | 749 SET_MEMORY_INACCESSIBLE(address, size); |
| 743 return; | 750 return; |
| 744 } | 751 } |
| 745 SET_MEMORY_INACCESSIBLE(payload, payload_size); | 752 SET_MEMORY_INACCESSIBLE(payload, payload_size); |
| 746 header->MarkPromptlyFreed(); | 753 header->MarkPromptlyFreed(); |
| 747 } | 754 } |
| 748 | 755 |
| 749 promptly_freed_size_ += size; | 756 promptly_freed_size_ += size; |
| 750 } | 757 } |
| 751 | 758 |
| 752 bool NormalPageArena::ExpandObject(HeapObjectHeader* header, size_t new_size) { | 759 bool NormalPageArena::ExpandObject(HeapObjectHeader* header, size_t new_size) { |
| 753 // It's possible that Vector requests a smaller expanded size because | 760 // It's possible that Vector requests a smaller expanded size because |
| 754 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 761 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
| 755 // size. | 762 // size. |
| 756 if (header->PayloadSize() >= new_size) | 763 if (header->PayloadSize() >= new_size) |
| 757 return true; | 764 return true; |
| 758 size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size); | 765 size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size); |
| 759 ASSERT(allocation_size > header->size()); | 766 DCHECK_GT(allocation_size, header->size()); |
| 760 size_t expand_size = allocation_size - header->size(); | 767 size_t expand_size = allocation_size - header->size(); |
| 761 if (IsObjectAllocatedAtAllocationPoint(header) && | 768 if (IsObjectAllocatedAtAllocationPoint(header) && |
| 762 expand_size <= remaining_allocation_size_) { | 769 expand_size <= remaining_allocation_size_) { |
| 763 current_allocation_point_ += expand_size; | 770 current_allocation_point_ += expand_size; |
| 764 ASSERT(remaining_allocation_size_ >= expand_size); | 771 DCHECK_GE(remaining_allocation_size_, expand_size); |
| 765 SetRemainingAllocationSize(remaining_allocation_size_ - expand_size); | 772 SetRemainingAllocationSize(remaining_allocation_size_ - expand_size); |
| 766 // Unpoison the memory used for the object (payload). | 773 // Unpoison the memory used for the object (payload). |
| 767 SET_MEMORY_ACCESSIBLE(header->PayloadEnd(), expand_size); | 774 SET_MEMORY_ACCESSIBLE(header->PayloadEnd(), expand_size); |
| 768 header->SetSize(allocation_size); | 775 header->SetSize(allocation_size); |
| 769 ASSERT(FindPageFromAddress(header->PayloadEnd() - 1)); | 776 #if DCHECK_IS_ON() |
| 777 DCHECK(FindPageFromAddress(header->PayloadEnd() - 1)); |
| 778 #endif |
| 770 return true; | 779 return true; |
| 771 } | 780 } |
| 772 return false; | 781 return false; |
| 773 } | 782 } |
| 774 | 783 |
| 775 bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) { | 784 bool NormalPageArena::ShrinkObject(HeapObjectHeader* header, size_t new_size) { |
| 776 ASSERT(header->PayloadSize() > new_size); | 785 DCHECK_GT(header->PayloadSize(), new_size); |
| 777 size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size); | 786 size_t allocation_size = ThreadHeap::AllocationSizeFromSize(new_size); |
| 778 ASSERT(header->size() > allocation_size); | 787 DCHECK_GT(header->size(), allocation_size); |
| 779 size_t shrink_size = header->size() - allocation_size; | 788 size_t shrink_size = header->size() - allocation_size; |
| 780 if (IsObjectAllocatedAtAllocationPoint(header)) { | 789 if (IsObjectAllocatedAtAllocationPoint(header)) { |
| 781 current_allocation_point_ -= shrink_size; | 790 current_allocation_point_ -= shrink_size; |
| 782 SetRemainingAllocationSize(remaining_allocation_size_ + shrink_size); | 791 SetRemainingAllocationSize(remaining_allocation_size_ + shrink_size); |
| 783 SET_MEMORY_INACCESSIBLE(current_allocation_point_, shrink_size); | 792 SET_MEMORY_INACCESSIBLE(current_allocation_point_, shrink_size); |
| 784 header->SetSize(allocation_size); | 793 header->SetSize(allocation_size); |
| 785 return true; | 794 return true; |
| 786 } | 795 } |
| 787 ASSERT(shrink_size >= sizeof(HeapObjectHeader)); | 796 DCHECK_GE(shrink_size, sizeof(HeapObjectHeader)); |
| 788 ASSERT(header->GcInfoIndex() > 0); | 797 DCHECK_GT(header->GcInfoIndex(), 0u); |
| 789 Address shrink_address = header->PayloadEnd() - shrink_size; | 798 Address shrink_address = header->PayloadEnd() - shrink_size; |
| 790 HeapObjectHeader* freed_header = new (NotNull, shrink_address) | 799 HeapObjectHeader* freed_header = new (NotNull, shrink_address) |
| 791 HeapObjectHeader(shrink_size, header->GcInfoIndex()); | 800 HeapObjectHeader(shrink_size, header->GcInfoIndex()); |
| 792 freed_header->MarkPromptlyFreed(); | 801 freed_header->MarkPromptlyFreed(); |
| 793 ASSERT(PageFromObject(reinterpret_cast<Address>(header)) == | 802 #if DCHECK_IS_ON() |
| 794 FindPageFromAddress(reinterpret_cast<Address>(header))); | 803 DCHECK_EQ(PageFromObject(reinterpret_cast<Address>(header)), |
| 804 FindPageFromAddress(reinterpret_cast<Address>(header))); |
| 805 #endif |
| 795 promptly_freed_size_ += shrink_size; | 806 promptly_freed_size_ += shrink_size; |
| 796 header->SetSize(allocation_size); | 807 header->SetSize(allocation_size); |
| 797 SET_MEMORY_INACCESSIBLE(shrink_address + sizeof(HeapObjectHeader), | 808 SET_MEMORY_INACCESSIBLE(shrink_address + sizeof(HeapObjectHeader), |
| 798 shrink_size - sizeof(HeapObjectHeader)); | 809 shrink_size - sizeof(HeapObjectHeader)); |
| 799 return false; | 810 return false; |
| 800 } | 811 } |
| 801 | 812 |
| 802 Address NormalPageArena::LazySweepPages(size_t allocation_size, | 813 Address NormalPageArena::LazySweepPages(size_t allocation_size, |
| 803 size_t gc_info_index) { | 814 size_t gc_info_index) { |
| 804 ASSERT(!HasCurrentAllocationArea()); | 815 DCHECK(!HasCurrentAllocationArea()); |
| 805 AutoReset<bool> is_lazy_sweeping(&is_lazy_sweeping_, true); | 816 AutoReset<bool> is_lazy_sweeping(&is_lazy_sweeping_, true); |
| 806 Address result = nullptr; | 817 Address result = nullptr; |
| 807 while (first_unswept_page_) { | 818 while (first_unswept_page_) { |
| 808 BasePage* page = first_unswept_page_; | 819 BasePage* page = first_unswept_page_; |
| 809 if (page->IsEmpty()) { | 820 if (page->IsEmpty()) { |
| 810 page->Unlink(&first_unswept_page_); | 821 page->Unlink(&first_unswept_page_); |
| 811 page->RemoveFromHeap(); | 822 page->RemoveFromHeap(); |
| 812 } else { | 823 } else { |
| 813 // Sweep a page and move the page from m_firstUnsweptPages to | 824 // Sweep a page and move the page from m_firstUnsweptPages to |
| 814 // m_firstPages. | 825 // m_firstPages. |
| (...skipping 28 matching lines...) Expand all Loading... |
| 843 remaining_allocation_size_ - last_remaining_allocation_size_); | 854 remaining_allocation_size_ - last_remaining_allocation_size_); |
| 844 last_remaining_allocation_size_ = remaining_allocation_size_; | 855 last_remaining_allocation_size_ = remaining_allocation_size_; |
| 845 } | 856 } |
| 846 | 857 |
| 847 void NormalPageArena::UpdateRemainingAllocationSize() { | 858 void NormalPageArena::UpdateRemainingAllocationSize() { |
| 848 if (last_remaining_allocation_size_ > RemainingAllocationSize()) { | 859 if (last_remaining_allocation_size_ > RemainingAllocationSize()) { |
| 849 GetThreadState()->IncreaseAllocatedObjectSize( | 860 GetThreadState()->IncreaseAllocatedObjectSize( |
| 850 last_remaining_allocation_size_ - RemainingAllocationSize()); | 861 last_remaining_allocation_size_ - RemainingAllocationSize()); |
| 851 last_remaining_allocation_size_ = RemainingAllocationSize(); | 862 last_remaining_allocation_size_ = RemainingAllocationSize(); |
| 852 } | 863 } |
| 853 ASSERT(last_remaining_allocation_size_ == RemainingAllocationSize()); | 864 DCHECK_EQ(last_remaining_allocation_size_, RemainingAllocationSize()); |
| 854 } | 865 } |
| 855 | 866 |
| 856 void NormalPageArena::SetAllocationPoint(Address point, size_t size) { | 867 void NormalPageArena::SetAllocationPoint(Address point, size_t size) { |
| 857 #if DCHECK_IS_ON() | 868 #if DCHECK_IS_ON() |
| 858 if (point) { | 869 if (point) { |
| 859 ASSERT(size); | 870 DCHECK(size); |
| 860 BasePage* page = PageFromObject(point); | 871 BasePage* page = PageFromObject(point); |
| 861 ASSERT(!page->IsLargeObjectPage()); | 872 DCHECK(!page->IsLargeObjectPage()); |
| 862 ASSERT(size <= static_cast<NormalPage*>(page)->PayloadSize()); | 873 DCHECK_LE(size, static_cast<NormalPage*>(page)->PayloadSize()); |
| 863 } | 874 } |
| 864 #endif | 875 #endif |
| 865 if (HasCurrentAllocationArea()) { | 876 if (HasCurrentAllocationArea()) { |
| 866 AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize()); | 877 AddToFreeList(CurrentAllocationPoint(), RemainingAllocationSize()); |
| 867 } | 878 } |
| 868 UpdateRemainingAllocationSize(); | 879 UpdateRemainingAllocationSize(); |
| 869 current_allocation_point_ = point; | 880 current_allocation_point_ = point; |
| 870 last_remaining_allocation_size_ = remaining_allocation_size_ = size; | 881 last_remaining_allocation_size_ = remaining_allocation_size_ = size; |
| 871 } | 882 } |
| 872 | 883 |
| 873 Address NormalPageArena::OutOfLineAllocate(size_t allocation_size, | 884 Address NormalPageArena::OutOfLineAllocate(size_t allocation_size, |
| 874 size_t gc_info_index) { | 885 size_t gc_info_index) { |
| 875 ASSERT(allocation_size > RemainingAllocationSize()); | 886 DCHECK_GT(allocation_size, RemainingAllocationSize()); |
| 876 ASSERT(allocation_size >= kAllocationGranularity); | 887 DCHECK_GE(allocation_size, kAllocationGranularity); |
| 877 | 888 |
| 878 // 1. If this allocation is big enough, allocate a large object. | 889 // 1. If this allocation is big enough, allocate a large object. |
| 879 if (allocation_size >= kLargeObjectSizeThreshold) | 890 if (allocation_size >= kLargeObjectSizeThreshold) |
| 880 return AllocateLargeObject(allocation_size, gc_info_index); | 891 return AllocateLargeObject(allocation_size, gc_info_index); |
| 881 | 892 |
| 882 // 2. Try to allocate from a free list. | 893 // 2. Try to allocate from a free list. |
| 883 UpdateRemainingAllocationSize(); | 894 UpdateRemainingAllocationSize(); |
| 884 Address result = AllocateFromFreeList(allocation_size, gc_info_index); | 895 Address result = AllocateFromFreeList(allocation_size, gc_info_index); |
| 885 if (result) | 896 if (result) |
| 886 return result; | 897 return result; |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 932 if (allocation_size > bucket_size) { | 943 if (allocation_size > bucket_size) { |
| 933 // Final bucket candidate; check initial entry if it is able | 944 // Final bucket candidate; check initial entry if it is able |
| 934 // to service this allocation. Do not perform a linear scan, | 945 // to service this allocation. Do not perform a linear scan, |
| 935 // as it is considered too costly. | 946 // as it is considered too costly. |
| 936 if (!entry || entry->size() < allocation_size) | 947 if (!entry || entry->size() < allocation_size) |
| 937 break; | 948 break; |
| 938 } | 949 } |
| 939 if (entry) { | 950 if (entry) { |
| 940 entry->Unlink(&free_list_.free_lists_[index]); | 951 entry->Unlink(&free_list_.free_lists_[index]); |
| 941 SetAllocationPoint(entry->GetAddress(), entry->size()); | 952 SetAllocationPoint(entry->GetAddress(), entry->size()); |
| 942 ASSERT(HasCurrentAllocationArea()); | 953 DCHECK(HasCurrentAllocationArea()); |
| 943 ASSERT(RemainingAllocationSize() >= allocation_size); | 954 DCHECK_GE(RemainingAllocationSize(), allocation_size); |
| 944 free_list_.biggest_free_list_index_ = index; | 955 free_list_.biggest_free_list_index_ = index; |
| 945 return AllocateObject(allocation_size, gc_info_index); | 956 return AllocateObject(allocation_size, gc_info_index); |
| 946 } | 957 } |
| 947 } | 958 } |
| 948 free_list_.biggest_free_list_index_ = index; | 959 free_list_.biggest_free_list_index_ = index; |
| 949 return nullptr; | 960 return nullptr; |
| 950 } | 961 } |
| 951 | 962 |
| 952 LargeObjectArena::LargeObjectArena(ThreadState* state, int index) | 963 LargeObjectArena::LargeObjectArena(ThreadState* state, int index) |
| 953 : BaseArena(state, index) {} | 964 : BaseArena(state, index) {} |
| 954 | 965 |
| 955 Address LargeObjectArena::AllocateLargeObjectPage(size_t allocation_size, | 966 Address LargeObjectArena::AllocateLargeObjectPage(size_t allocation_size, |
| 956 size_t gc_info_index) { | 967 size_t gc_info_index) { |
| 957 // Caller already added space for object header and rounded up to allocation | 968 // Caller already added space for object header and rounded up to allocation |
| 958 // alignment | 969 // alignment |
| 959 ASSERT(!(allocation_size & kAllocationMask)); | 970 DCHECK(!(allocation_size & kAllocationMask)); |
| 960 | 971 |
| 961 // 1. Try to sweep large objects more than allocationSize bytes | 972 // 1. Try to sweep large objects more than allocationSize bytes |
| 962 // before allocating a new large object. | 973 // before allocating a new large object. |
| 963 Address result = LazySweep(allocation_size, gc_info_index); | 974 Address result = LazySweep(allocation_size, gc_info_index); |
| 964 if (result) | 975 if (result) |
| 965 return result; | 976 return result; |
| 966 | 977 |
| 967 // 2. If we have failed in sweeping allocationSize bytes, | 978 // 2. If we have failed in sweeping allocationSize bytes, |
| 968 // we complete sweeping before allocating this large object. | 979 // we complete sweeping before allocating this large object. |
| 969 GetThreadState()->CompleteSweep(); | 980 GetThreadState()->CompleteSweep(); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 986 | 997 |
| 987 GetThreadState()->ShouldFlushHeapDoesNotContainCache(); | 998 GetThreadState()->ShouldFlushHeapDoesNotContainCache(); |
| 988 PageMemory* page_memory = PageMemory::Allocate( | 999 PageMemory* page_memory = PageMemory::Allocate( |
| 989 large_object_size, GetThreadState()->Heap().GetRegionTree()); | 1000 large_object_size, GetThreadState()->Heap().GetRegionTree()); |
| 990 Address large_object_address = page_memory->WritableStart(); | 1001 Address large_object_address = page_memory->WritableStart(); |
| 991 Address header_address = | 1002 Address header_address = |
| 992 large_object_address + LargeObjectPage::PageHeaderSize(); | 1003 large_object_address + LargeObjectPage::PageHeaderSize(); |
| 993 #if DCHECK_IS_ON() | 1004 #if DCHECK_IS_ON() |
| 994 // Verify that the allocated PageMemory is expectedly zeroed. | 1005 // Verify that the allocated PageMemory is expectedly zeroed. |
| 995 for (size_t i = 0; i < large_object_size; ++i) | 1006 for (size_t i = 0; i < large_object_size; ++i) |
| 996 ASSERT(!large_object_address[i]); | 1007 DCHECK(!large_object_address[i]); |
| 997 #endif | 1008 #endif |
| 998 ASSERT(gc_info_index > 0); | 1009 DCHECK_GT(gc_info_index, 0u); |
| 999 HeapObjectHeader* header = new (NotNull, header_address) | 1010 HeapObjectHeader* header = new (NotNull, header_address) |
| 1000 HeapObjectHeader(kLargeObjectSizeInHeader, gc_info_index); | 1011 HeapObjectHeader(kLargeObjectSizeInHeader, gc_info_index); |
| 1001 Address result = header_address + sizeof(*header); | 1012 Address result = header_address + sizeof(*header); |
| 1002 ASSERT(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); | 1013 DCHECK(!(reinterpret_cast<uintptr_t>(result) & kAllocationMask)); |
| 1003 LargeObjectPage* large_object = new (large_object_address) | 1014 LargeObjectPage* large_object = new (large_object_address) |
| 1004 LargeObjectPage(page_memory, this, allocation_size); | 1015 LargeObjectPage(page_memory, this, allocation_size); |
| 1005 | 1016 |
| 1006 // Poison the object header and allocationGranularity bytes after the object | 1017 // Poison the object header and allocationGranularity bytes after the object |
| 1007 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1018 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
| 1008 ASAN_POISON_MEMORY_REGION(large_object->GetAddress() + large_object->size(), | 1019 ASAN_POISON_MEMORY_REGION(large_object->GetAddress() + large_object->size(), |
| 1009 kAllocationGranularity); | 1020 kAllocationGranularity); |
| 1010 | 1021 |
| 1011 large_object->Link(&first_page_); | 1022 large_object->Link(&first_page_); |
| 1012 | 1023 |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1042 BasePage* page = first_unswept_page_; | 1053 BasePage* page = first_unswept_page_; |
| 1043 if (page->IsEmpty()) { | 1054 if (page->IsEmpty()) { |
| 1044 swept_size += static_cast<LargeObjectPage*>(page)->PayloadSize() + | 1055 swept_size += static_cast<LargeObjectPage*>(page)->PayloadSize() + |
| 1045 sizeof(HeapObjectHeader); | 1056 sizeof(HeapObjectHeader); |
| 1046 page->Unlink(&first_unswept_page_); | 1057 page->Unlink(&first_unswept_page_); |
| 1047 page->RemoveFromHeap(); | 1058 page->RemoveFromHeap(); |
| 1048 // For LargeObjectPage, stop lazy sweeping once we have swept | 1059 // For LargeObjectPage, stop lazy sweeping once we have swept |
| 1049 // more than allocationSize bytes. | 1060 // more than allocationSize bytes. |
| 1050 if (swept_size >= allocation_size) { | 1061 if (swept_size >= allocation_size) { |
| 1051 result = DoAllocateLargeObjectPage(allocation_size, gc_info_index); | 1062 result = DoAllocateLargeObjectPage(allocation_size, gc_info_index); |
| 1052 ASSERT(result); | 1063 DCHECK(result); |
| 1053 break; | 1064 break; |
| 1054 } | 1065 } |
| 1055 } else { | 1066 } else { |
| 1056 // Sweep a page and move the page from m_firstUnsweptPages to | 1067 // Sweep a page and move the page from m_firstUnsweptPages to |
| 1057 // m_firstPages. | 1068 // m_firstPages. |
| 1058 page->Sweep(); | 1069 page->Sweep(); |
| 1059 page->Unlink(&first_unswept_page_); | 1070 page->Unlink(&first_unswept_page_); |
| 1060 page->Link(&first_page_); | 1071 page->Link(&first_page_); |
| 1061 page->MarkAsSwept(); | 1072 page->MarkAsSwept(); |
| 1062 } | 1073 } |
| 1063 } | 1074 } |
| 1064 return result; | 1075 return result; |
| 1065 } | 1076 } |
| 1066 | 1077 |
| 1067 FreeList::FreeList() : biggest_free_list_index_(0) {} | 1078 FreeList::FreeList() : biggest_free_list_index_(0) {} |
| 1068 | 1079 |
| 1069 void FreeList::AddToFreeList(Address address, size_t size) { | 1080 void FreeList::AddToFreeList(Address address, size_t size) { |
| 1070 ASSERT(size < BlinkPagePayloadSize()); | 1081 DCHECK_LT(size, BlinkPagePayloadSize()); |
| 1071 // The free list entries are only pointer aligned (but when we allocate | 1082 // The free list entries are only pointer aligned (but when we allocate |
| 1072 // from them we are 8 byte aligned due to the header size). | 1083 // from them we are 8 byte aligned due to the header size). |
| 1073 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & | 1084 DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & |
| 1074 kAllocationMask)); | 1085 kAllocationMask)); |
| 1075 ASSERT(!(size & kAllocationMask)); | 1086 DCHECK(!(size & kAllocationMask)); |
| 1076 ASAN_UNPOISON_MEMORY_REGION(address, size); | 1087 ASAN_UNPOISON_MEMORY_REGION(address, size); |
| 1077 FreeListEntry* entry; | 1088 FreeListEntry* entry; |
| 1078 if (size < sizeof(*entry)) { | 1089 if (size < sizeof(*entry)) { |
| 1079 // Create a dummy header with only a size and freelist bit set. | 1090 // Create a dummy header with only a size and freelist bit set. |
| 1080 ASSERT(size >= sizeof(HeapObjectHeader)); | 1091 DCHECK_GE(size, sizeof(HeapObjectHeader)); |
| 1081 // Free list encode the size to mark the lost memory as freelist memory. | 1092 // Free list encode the size to mark the lost memory as freelist memory. |
| 1082 new (NotNull, address) | 1093 new (NotNull, address) |
| 1083 HeapObjectHeader(size, kGcInfoIndexForFreeListHeader); | 1094 HeapObjectHeader(size, kGcInfoIndexForFreeListHeader); |
| 1084 | 1095 |
| 1085 ASAN_POISON_MEMORY_REGION(address, size); | 1096 ASAN_POISON_MEMORY_REGION(address, size); |
| 1086 // This memory gets lost. Sweeping can reclaim it. | 1097 // This memory gets lost. Sweeping can reclaim it. |
| 1087 return; | 1098 return; |
| 1088 } | 1099 } |
| 1089 entry = new (NotNull, address) FreeListEntry(size); | 1100 entry = new (NotNull, address) FreeListEntry(size); |
| 1090 | 1101 |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1155 for (size_t i = 0; i < size; i++) { | 1166 for (size_t i = 0; i < size; i++) { |
| 1156 // See the comment in addToFreeList(). | 1167 // See the comment in addToFreeList(). |
| 1157 if (address[i] != kReuseAllowedZapValue) | 1168 if (address[i] != kReuseAllowedZapValue) |
| 1158 address[i] = kReuseForbiddenZapValue; | 1169 address[i] = kReuseForbiddenZapValue; |
| 1159 } | 1170 } |
| 1160 } | 1171 } |
| 1161 | 1172 |
| 1162 void NEVER_INLINE FreeList::CheckFreedMemoryIsZapped(Address address, | 1173 void NEVER_INLINE FreeList::CheckFreedMemoryIsZapped(Address address, |
| 1163 size_t size) { | 1174 size_t size) { |
| 1164 for (size_t i = 0; i < size; i++) { | 1175 for (size_t i = 0; i < size; i++) { |
| 1165 ASSERT(address[i] == kReuseAllowedZapValue || | 1176 DCHECK(address[i] == kReuseAllowedZapValue || |
| 1166 address[i] == kReuseForbiddenZapValue); | 1177 address[i] == kReuseForbiddenZapValue); |
| 1167 } | 1178 } |
| 1168 } | 1179 } |
| 1169 #endif | 1180 #endif |
| 1170 | 1181 |
| 1171 size_t FreeList::FreeListSize() const { | 1182 size_t FreeList::FreeListSize() const { |
| 1172 size_t free_size = 0; | 1183 size_t free_size = 0; |
| 1173 for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) { | 1184 for (unsigned i = 0; i < kBlinkPageSizeLog2; ++i) { |
| 1174 FreeListEntry* entry = free_lists_[i]; | 1185 FreeListEntry* entry = free_lists_[i]; |
| 1175 while (entry) { | 1186 while (entry) { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1199 return free_size; | 1210 return free_size; |
| 1200 } | 1211 } |
| 1201 | 1212 |
| 1202 void FreeList::Clear() { | 1213 void FreeList::Clear() { |
| 1203 biggest_free_list_index_ = 0; | 1214 biggest_free_list_index_ = 0; |
| 1204 for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) | 1215 for (size_t i = 0; i < kBlinkPageSizeLog2; ++i) |
| 1205 free_lists_[i] = nullptr; | 1216 free_lists_[i] = nullptr; |
| 1206 } | 1217 } |
| 1207 | 1218 |
| 1208 int FreeList::BucketIndexForSize(size_t size) { | 1219 int FreeList::BucketIndexForSize(size_t size) { |
| 1209 ASSERT(size > 0); | 1220 DCHECK_GT(size, 0u); |
| 1210 int index = -1; | 1221 int index = -1; |
| 1211 while (size) { | 1222 while (size) { |
| 1212 size >>= 1; | 1223 size >>= 1; |
| 1213 index++; | 1224 index++; |
| 1214 } | 1225 } |
| 1215 return index; | 1226 return index; |
| 1216 } | 1227 } |
| 1217 | 1228 |
| 1218 bool FreeList::TakeSnapshot(const String& dump_base_name) { | 1229 bool FreeList::TakeSnapshot(const String& dump_base_name) { |
| 1219 bool did_dump_bucket_stats = false; | 1230 bool did_dump_bucket_stats = false; |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1233 ->CreateMemoryAllocatorDumpForCurrentGC(dump_name); | 1244 ->CreateMemoryAllocatorDumpForCurrentGC(dump_name); |
| 1234 bucket_dump->AddScalar("free_count", "objects", entry_count); | 1245 bucket_dump->AddScalar("free_count", "objects", entry_count); |
| 1235 bucket_dump->AddScalar("free_size", "bytes", free_size); | 1246 bucket_dump->AddScalar("free_size", "bytes", free_size); |
| 1236 did_dump_bucket_stats = true; | 1247 did_dump_bucket_stats = true; |
| 1237 } | 1248 } |
| 1238 return did_dump_bucket_stats; | 1249 return did_dump_bucket_stats; |
| 1239 } | 1250 } |
| 1240 | 1251 |
| 1241 BasePage::BasePage(PageMemory* storage, BaseArena* arena) | 1252 BasePage::BasePage(PageMemory* storage, BaseArena* arena) |
| 1242 : storage_(storage), arena_(arena), next_(nullptr), swept_(true) { | 1253 : storage_(storage), arena_(arena), next_(nullptr), swept_(true) { |
| 1243 ASSERT(IsPageHeaderAddress(reinterpret_cast<Address>(this))); | 1254 #if DCHECK_IS_ON() |
| 1255 DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 1256 #endif |
| 1244 } | 1257 } |
| 1245 | 1258 |
| 1246 NormalPage::NormalPage(PageMemory* storage, BaseArena* arena) | 1259 NormalPage::NormalPage(PageMemory* storage, BaseArena* arena) |
| 1247 : BasePage(storage, arena), object_start_bit_map_computed_(false) { | 1260 : BasePage(storage, arena), object_start_bit_map_computed_(false) { |
| 1248 ASSERT(IsPageHeaderAddress(reinterpret_cast<Address>(this))); | 1261 #if DCHECK_IS_ON() |
| 1262 DCHECK(IsPageHeaderAddress(reinterpret_cast<Address>(this))); |
| 1263 #endif |
| 1249 } | 1264 } |
| 1250 | 1265 |
| 1251 size_t NormalPage::ObjectPayloadSizeForTesting() { | 1266 size_t NormalPage::ObjectPayloadSizeForTesting() { |
| 1252 size_t object_payload_size = 0; | 1267 size_t object_payload_size = 0; |
| 1253 Address header_address = Payload(); | 1268 Address header_address = Payload(); |
| 1254 MarkAsSwept(); | 1269 MarkAsSwept(); |
| 1255 ASSERT(header_address != PayloadEnd()); | 1270 DCHECK_NE(header_address, PayloadEnd()); |
| 1256 do { | 1271 do { |
| 1257 HeapObjectHeader* header = | 1272 HeapObjectHeader* header = |
| 1258 reinterpret_cast<HeapObjectHeader*>(header_address); | 1273 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1259 if (!header->IsFree()) { | 1274 if (!header->IsFree()) { |
| 1260 object_payload_size += header->PayloadSize(); | 1275 object_payload_size += header->PayloadSize(); |
| 1261 } | 1276 } |
| 1262 ASSERT(header->size() < BlinkPagePayloadSize()); | 1277 DCHECK_LT(header->size(), BlinkPagePayloadSize()); |
| 1263 header_address += header->size(); | 1278 header_address += header->size(); |
| 1264 ASSERT(header_address <= PayloadEnd()); | 1279 DCHECK_LE(header_address, PayloadEnd()); |
| 1265 } while (header_address < PayloadEnd()); | 1280 } while (header_address < PayloadEnd()); |
| 1266 return object_payload_size; | 1281 return object_payload_size; |
| 1267 } | 1282 } |
| 1268 | 1283 |
| 1269 bool NormalPage::IsEmpty() { | 1284 bool NormalPage::IsEmpty() { |
| 1270 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(Payload()); | 1285 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(Payload()); |
| 1271 return header->IsFree() && header->size() == PayloadSize(); | 1286 return header->IsFree() && header->size() == PayloadSize(); |
| 1272 } | 1287 } |
| 1273 | 1288 |
| 1274 void NormalPage::RemoveFromHeap() { | 1289 void NormalPage::RemoveFromHeap() { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 1288 #endif | 1303 #endif |
| 1289 | 1304 |
| 1290 void NormalPage::Sweep() { | 1305 void NormalPage::Sweep() { |
| 1291 size_t marked_object_size = 0; | 1306 size_t marked_object_size = 0; |
| 1292 Address start_of_gap = Payload(); | 1307 Address start_of_gap = Payload(); |
| 1293 NormalPageArena* page_arena = ArenaForNormalPage(); | 1308 NormalPageArena* page_arena = ArenaForNormalPage(); |
| 1294 for (Address header_address = start_of_gap; header_address < PayloadEnd();) { | 1309 for (Address header_address = start_of_gap; header_address < PayloadEnd();) { |
| 1295 HeapObjectHeader* header = | 1310 HeapObjectHeader* header = |
| 1296 reinterpret_cast<HeapObjectHeader*>(header_address); | 1311 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1297 size_t size = header->size(); | 1312 size_t size = header->size(); |
| 1298 ASSERT(size > 0); | 1313 DCHECK_GT(size, 0u); |
| 1299 ASSERT(size < BlinkPagePayloadSize()); | 1314 DCHECK_LT(size, BlinkPagePayloadSize()); |
| 1300 | 1315 |
| 1301 if (header->IsPromptlyFreed()) | 1316 if (header->IsPromptlyFreed()) |
| 1302 page_arena->DecreasePromptlyFreedSize(size); | 1317 page_arena->DecreasePromptlyFreedSize(size); |
| 1303 if (header->IsFree()) { | 1318 if (header->IsFree()) { |
| 1304 // Zero the memory in the free list header to maintain the | 1319 // Zero the memory in the free list header to maintain the |
| 1305 // invariant that memory on the free list is zero filled. | 1320 // invariant that memory on the free list is zero filled. |
| 1306 // The rest of the memory is already on the free list and is | 1321 // The rest of the memory is already on the free list and is |
| 1307 // therefore already zero filled. | 1322 // therefore already zero filled. |
| 1308 SET_MEMORY_INACCESSIBLE(header_address, size < sizeof(FreeListEntry) | 1323 SET_MEMORY_INACCESSIBLE(header_address, size < sizeof(FreeListEntry) |
| 1309 ? size | 1324 ? size |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1363 NormalPageArena* page_arena = ArenaForNormalPage(); | 1378 NormalPageArena* page_arena = ArenaForNormalPage(); |
| 1364 #if defined(ADDRESS_SANITIZER) | 1379 #if defined(ADDRESS_SANITIZER) |
| 1365 bool is_vector_arena = | 1380 bool is_vector_arena = |
| 1366 ThreadState::IsVectorArenaIndex(page_arena->ArenaIndex()); | 1381 ThreadState::IsVectorArenaIndex(page_arena->ArenaIndex()); |
| 1367 #endif | 1382 #endif |
| 1368 HeapCompact* compact = page_arena->GetThreadState()->Heap().Compaction(); | 1383 HeapCompact* compact = page_arena->GetThreadState()->Heap().Compaction(); |
| 1369 for (Address header_address = Payload(); header_address < PayloadEnd();) { | 1384 for (Address header_address = Payload(); header_address < PayloadEnd();) { |
| 1370 HeapObjectHeader* header = | 1385 HeapObjectHeader* header = |
| 1371 reinterpret_cast<HeapObjectHeader*>(header_address); | 1386 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1372 size_t size = header->size(); | 1387 size_t size = header->size(); |
| 1373 DCHECK(size > 0 && size < BlinkPagePayloadSize()); | 1388 DCHECK_GT(size, 0u); |
| 1389 DCHECK_LT(size, BlinkPagePayloadSize()); |
| 1374 | 1390 |
| 1375 if (header->IsPromptlyFreed()) | 1391 if (header->IsPromptlyFreed()) |
| 1376 page_arena->DecreasePromptlyFreedSize(size); | 1392 page_arena->DecreasePromptlyFreedSize(size); |
| 1377 if (header->IsFree()) { | 1393 if (header->IsFree()) { |
| 1378 // Unpoison the freelist entry so that we | 1394 // Unpoison the freelist entry so that we |
| 1379 // can compact into it as wanted. | 1395 // can compact into it as wanted. |
| 1380 ASAN_UNPOISON_MEMORY_REGION(header_address, size); | 1396 ASAN_UNPOISON_MEMORY_REGION(header_address, size); |
| 1381 header_address += size; | 1397 header_address += size; |
| 1382 continue; | 1398 continue; |
| 1383 } | 1399 } |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1463 #endif | 1479 #endif |
| 1464 } | 1480 } |
| 1465 | 1481 |
| 1466 void NormalPage::MakeConsistentForMutator() { | 1482 void NormalPage::MakeConsistentForMutator() { |
| 1467 Address start_of_gap = Payload(); | 1483 Address start_of_gap = Payload(); |
| 1468 NormalPageArena* normal_arena = ArenaForNormalPage(); | 1484 NormalPageArena* normal_arena = ArenaForNormalPage(); |
| 1469 for (Address header_address = Payload(); header_address < PayloadEnd();) { | 1485 for (Address header_address = Payload(); header_address < PayloadEnd();) { |
| 1470 HeapObjectHeader* header = | 1486 HeapObjectHeader* header = |
| 1471 reinterpret_cast<HeapObjectHeader*>(header_address); | 1487 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1472 size_t size = header->size(); | 1488 size_t size = header->size(); |
| 1473 ASSERT(size < BlinkPagePayloadSize()); | 1489 DCHECK_LT(size, BlinkPagePayloadSize()); |
| 1474 if (header->IsPromptlyFreed()) | 1490 if (header->IsPromptlyFreed()) |
| 1475 ArenaForNormalPage()->DecreasePromptlyFreedSize(size); | 1491 ArenaForNormalPage()->DecreasePromptlyFreedSize(size); |
| 1476 if (header->IsFree()) { | 1492 if (header->IsFree()) { |
| 1477 // Zero the memory in the free list header to maintain the | 1493 // Zero the memory in the free list header to maintain the |
| 1478 // invariant that memory on the free list is zero filled. | 1494 // invariant that memory on the free list is zero filled. |
| 1479 // The rest of the memory is already on the free list and is | 1495 // The rest of the memory is already on the free list and is |
| 1480 // therefore already zero filled. | 1496 // therefore already zero filled. |
| 1481 SET_MEMORY_INACCESSIBLE(header_address, size < sizeof(FreeListEntry) | 1497 SET_MEMORY_INACCESSIBLE(header_address, size < sizeof(FreeListEntry) |
| 1482 ? size | 1498 ? size |
| 1483 : sizeof(FreeListEntry)); | 1499 : sizeof(FreeListEntry)); |
| 1484 CHECK_MEMORY_INACCESSIBLE(header_address, size); | 1500 CHECK_MEMORY_INACCESSIBLE(header_address, size); |
| 1485 header_address += size; | 1501 header_address += size; |
| 1486 continue; | 1502 continue; |
| 1487 } | 1503 } |
| 1488 if (start_of_gap != header_address) | 1504 if (start_of_gap != header_address) |
| 1489 normal_arena->AddToFreeList(start_of_gap, header_address - start_of_gap); | 1505 normal_arena->AddToFreeList(start_of_gap, header_address - start_of_gap); |
| 1490 if (header->IsMarked()) | 1506 if (header->IsMarked()) |
| 1491 header->Unmark(); | 1507 header->Unmark(); |
| 1492 header_address += size; | 1508 header_address += size; |
| 1493 start_of_gap = header_address; | 1509 start_of_gap = header_address; |
| 1494 ASSERT(header_address <= PayloadEnd()); | 1510 DCHECK_LE(header_address, PayloadEnd()); |
| 1495 } | 1511 } |
| 1496 if (start_of_gap != PayloadEnd()) | 1512 if (start_of_gap != PayloadEnd()) |
| 1497 normal_arena->AddToFreeList(start_of_gap, PayloadEnd() - start_of_gap); | 1513 normal_arena->AddToFreeList(start_of_gap, PayloadEnd() - start_of_gap); |
| 1498 } | 1514 } |
| 1499 | 1515 |
| 1500 #if defined(ADDRESS_SANITIZER) | 1516 #if defined(ADDRESS_SANITIZER) |
| 1501 void NormalPage::PoisonUnmarkedObjects() { | 1517 void NormalPage::PoisonUnmarkedObjects() { |
| 1502 for (Address header_address = Payload(); header_address < PayloadEnd();) { | 1518 for (Address header_address = Payload(); header_address < PayloadEnd();) { |
| 1503 HeapObjectHeader* header = | 1519 HeapObjectHeader* header = |
| 1504 reinterpret_cast<HeapObjectHeader*>(header_address); | 1520 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1505 ASSERT(header->size() < BlinkPagePayloadSize()); | 1521 DCHECK_LT(header->size(), BlinkPagePayloadSize()); |
| 1506 // Check if a free list entry first since we cannot call | 1522 // Check if a free list entry first since we cannot call |
| 1507 // isMarked on a free list entry. | 1523 // isMarked on a free list entry. |
| 1508 if (header->IsFree()) { | 1524 if (header->IsFree()) { |
| 1509 header_address += header->size(); | 1525 header_address += header->size(); |
| 1510 continue; | 1526 continue; |
| 1511 } | 1527 } |
| 1512 if (!header->IsMarked()) | 1528 if (!header->IsMarked()) |
| 1513 ASAN_POISON_MEMORY_REGION(header->Payload(), header->PayloadSize()); | 1529 ASAN_POISON_MEMORY_REGION(header->Payload(), header->PayloadSize()); |
| 1514 header_address += header->size(); | 1530 header_address += header->size(); |
| 1515 } | 1531 } |
| 1516 } | 1532 } |
| 1517 #endif | 1533 #endif |
| 1518 | 1534 |
| 1519 void NormalPage::PopulateObjectStartBitMap() { | 1535 void NormalPage::PopulateObjectStartBitMap() { |
| 1520 memset(&object_start_bit_map_, 0, kObjectStartBitMapSize); | 1536 memset(&object_start_bit_map_, 0, kObjectStartBitMapSize); |
| 1521 Address start = Payload(); | 1537 Address start = Payload(); |
| 1522 for (Address header_address = start; header_address < PayloadEnd();) { | 1538 for (Address header_address = start; header_address < PayloadEnd();) { |
| 1523 HeapObjectHeader* header = | 1539 HeapObjectHeader* header = |
| 1524 reinterpret_cast<HeapObjectHeader*>(header_address); | 1540 reinterpret_cast<HeapObjectHeader*>(header_address); |
| 1525 size_t object_offset = header_address - start; | 1541 size_t object_offset = header_address - start; |
| 1526 ASSERT(!(object_offset & kAllocationMask)); | 1542 DCHECK(!(object_offset & kAllocationMask)); |
| 1527 size_t object_start_number = object_offset / kAllocationGranularity; | 1543 size_t object_start_number = object_offset / kAllocationGranularity; |
| 1528 size_t map_index = object_start_number / 8; | 1544 size_t map_index = object_start_number / 8; |
| 1529 ASSERT(map_index < kObjectStartBitMapSize); | 1545 DCHECK_LT(map_index, kObjectStartBitMapSize); |
| 1530 object_start_bit_map_[map_index] |= (1 << (object_start_number & 7)); | 1546 object_start_bit_map_[map_index] |= (1 << (object_start_number & 7)); |
| 1531 header_address += header->size(); | 1547 header_address += header->size(); |
| 1532 ASSERT(header_address <= PayloadEnd()); | 1548 DCHECK_LE(header_address, PayloadEnd()); |
| 1533 } | 1549 } |
| 1534 object_start_bit_map_computed_ = true; | 1550 object_start_bit_map_computed_ = true; |
| 1535 } | 1551 } |
| 1536 | 1552 |
| 1537 static int NumberOfLeadingZeroes(uint8_t byte) { | 1553 static int NumberOfLeadingZeroes(uint8_t byte) { |
| 1538 if (!byte) | 1554 if (!byte) |
| 1539 return 8; | 1555 return 8; |
| 1540 int result = 0; | 1556 int result = 0; |
| 1541 if (byte <= 0x0F) { | 1557 if (byte <= 0x0F) { |
| 1542 result += 4; | 1558 result += 4; |
| 1543 byte = byte << 4; | 1559 byte = byte << 4; |
| 1544 } | 1560 } |
| 1545 if (byte <= 0x3F) { | 1561 if (byte <= 0x3F) { |
| 1546 result += 2; | 1562 result += 2; |
| 1547 byte = byte << 2; | 1563 byte = byte << 2; |
| 1548 } | 1564 } |
| 1549 if (byte <= 0x7F) | 1565 if (byte <= 0x7F) |
| 1550 result++; | 1566 result++; |
| 1551 return result; | 1567 return result; |
| 1552 } | 1568 } |
| 1553 | 1569 |
| 1554 HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) { | 1570 HeapObjectHeader* NormalPage::FindHeaderFromAddress(Address address) { |
| 1555 if (address < Payload()) | 1571 if (address < Payload()) |
| 1556 return nullptr; | 1572 return nullptr; |
| 1557 if (!object_start_bit_map_computed_) | 1573 if (!object_start_bit_map_computed_) |
| 1558 PopulateObjectStartBitMap(); | 1574 PopulateObjectStartBitMap(); |
| 1559 size_t object_offset = address - Payload(); | 1575 size_t object_offset = address - Payload(); |
| 1560 size_t object_start_number = object_offset / kAllocationGranularity; | 1576 size_t object_start_number = object_offset / kAllocationGranularity; |
| 1561 size_t map_index = object_start_number / 8; | 1577 size_t map_index = object_start_number / 8; |
| 1562 ASSERT(map_index < kObjectStartBitMapSize); | 1578 DCHECK_LT(map_index, kObjectStartBitMapSize); |
| 1563 size_t bit = object_start_number & 7; | 1579 size_t bit = object_start_number & 7; |
| 1564 uint8_t byte = object_start_bit_map_[map_index] & ((1 << (bit + 1)) - 1); | 1580 uint8_t byte = object_start_bit_map_[map_index] & ((1 << (bit + 1)) - 1); |
| 1565 while (!byte) { | 1581 while (!byte) { |
| 1566 ASSERT(map_index > 0); | 1582 DCHECK_GT(map_index, 0u); |
| 1567 byte = object_start_bit_map_[--map_index]; | 1583 byte = object_start_bit_map_[--map_index]; |
| 1568 } | 1584 } |
| 1569 int leading_zeroes = NumberOfLeadingZeroes(byte); | 1585 int leading_zeroes = NumberOfLeadingZeroes(byte); |
| 1570 object_start_number = (map_index * 8) + 7 - leading_zeroes; | 1586 object_start_number = (map_index * 8) + 7 - leading_zeroes; |
| 1571 object_offset = object_start_number * kAllocationGranularity; | 1587 object_offset = object_start_number * kAllocationGranularity; |
| 1572 Address object_address = object_offset + Payload(); | 1588 Address object_address = object_offset + Payload(); |
| 1573 HeapObjectHeader* header = | 1589 HeapObjectHeader* header = |
| 1574 reinterpret_cast<HeapObjectHeader*>(object_address); | 1590 reinterpret_cast<HeapObjectHeader*>(object_address); |
| 1575 if (header->IsFree()) | 1591 if (header->IsFree()) |
| 1576 return nullptr; | 1592 return nullptr; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1597 // | 1613 // |
| 1598 // class A : public GarbageCollected<A> { virtual void f() = 0; }; | 1614 // class A : public GarbageCollected<A> { virtual void f() = 0; }; |
| 1599 // class B : public A { | 1615 // class B : public A { |
| 1600 // B() : A(foo()) { }; | 1616 // B() : A(foo()) { }; |
| 1601 // }; | 1617 // }; |
| 1602 // | 1618 // |
| 1603 // If foo() allocates something and triggers a GC, the vtable of A | 1619 // If foo() allocates something and triggers a GC, the vtable of A |
| 1604 // has not yet been initialized. In this case, we should mark the A | 1620 // has not yet been initialized. In this case, we should mark the A |
| 1605 // object without tracing any member of the A object. | 1621 // object without tracing any member of the A object. |
| 1606 visitor->MarkHeaderNoTracing(header); | 1622 visitor->MarkHeaderNoTracing(header); |
| 1607 ASSERT(IsUninitializedMemory(header->Payload(), header->PayloadSize())); | 1623 #if DCHECK_IS_ON() |
| 1624 DCHECK(IsUninitializedMemory(header->Payload(), header->PayloadSize())); |
| 1625 #endif |
| 1608 } else { | 1626 } else { |
| 1609 visitor->MarkHeader(header, gc_info->trace_); | 1627 visitor->MarkHeader(header, gc_info->trace_); |
| 1610 } | 1628 } |
| 1611 } | 1629 } |
| 1612 | 1630 |
| 1613 void NormalPage::CheckAndMarkPointer(Visitor* visitor, Address address) { | 1631 void NormalPage::CheckAndMarkPointer(Visitor* visitor, Address address) { |
| 1614 #if DCHECK_IS_ON() | 1632 #if DCHECK_IS_ON() |
| 1615 DCHECK(Contains(address)); | 1633 DCHECK(Contains(address)); |
| 1616 #endif | 1634 #endif |
| 1617 HeapObjectHeader* header = FindHeaderFromAddress(address); | 1635 HeapObjectHeader* header = FindHeaderFromAddress(address); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1673 page_dump->AddScalar("dead_size", "bytes", dead_size); | 1691 page_dump->AddScalar("dead_size", "bytes", dead_size); |
| 1674 page_dump->AddScalar("free_size", "bytes", free_size); | 1692 page_dump->AddScalar("free_size", "bytes", free_size); |
| 1675 heap_info.free_size += free_size; | 1693 heap_info.free_size += free_size; |
| 1676 heap_info.free_count += free_count; | 1694 heap_info.free_count += free_count; |
| 1677 } | 1695 } |
| 1678 | 1696 |
| 1679 #if DCHECK_IS_ON() | 1697 #if DCHECK_IS_ON() |
| 1680 bool NormalPage::Contains(Address addr) { | 1698 bool NormalPage::Contains(Address addr) { |
| 1681 Address blink_page_start = RoundToBlinkPageStart(GetAddress()); | 1699 Address blink_page_start = RoundToBlinkPageStart(GetAddress()); |
| 1682 // Page is at aligned address plus guard page size. | 1700 // Page is at aligned address plus guard page size. |
| 1683 ASSERT(blink_page_start == GetAddress() - kBlinkGuardPageSize); | 1701 DCHECK_EQ(blink_page_start, GetAddress() - kBlinkGuardPageSize); |
| 1684 return blink_page_start <= addr && addr < blink_page_start + kBlinkPageSize; | 1702 return blink_page_start <= addr && addr < blink_page_start + kBlinkPageSize; |
| 1685 } | 1703 } |
| 1686 #endif | 1704 #endif |
| 1687 | 1705 |
| 1688 LargeObjectPage::LargeObjectPage(PageMemory* storage, | 1706 LargeObjectPage::LargeObjectPage(PageMemory* storage, |
| 1689 BaseArena* arena, | 1707 BaseArena* arena, |
| 1690 size_t payload_size) | 1708 size_t payload_size) |
| 1691 : BasePage(storage, arena), | 1709 : BasePage(storage, arena), |
| 1692 payload_size_(payload_size) | 1710 payload_size_(payload_size) |
| 1693 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) | 1711 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1797 | 1815 |
| 1798 size_t HeapDoesNotContainCache::GetHash(Address address) { | 1816 size_t HeapDoesNotContainCache::GetHash(Address address) { |
| 1799 size_t value = (reinterpret_cast<size_t>(address) >> kBlinkPageSizeLog2); | 1817 size_t value = (reinterpret_cast<size_t>(address) >> kBlinkPageSizeLog2); |
| 1800 value ^= value >> kNumberOfEntriesLog2; | 1818 value ^= value >> kNumberOfEntriesLog2; |
| 1801 value ^= value >> (kNumberOfEntriesLog2 * 2); | 1819 value ^= value >> (kNumberOfEntriesLog2 * 2); |
| 1802 value &= kNumberOfEntries - 1; | 1820 value &= kNumberOfEntries - 1; |
| 1803 return value & ~1; // Returns only even number. | 1821 return value & ~1; // Returns only even number. |
| 1804 } | 1822 } |
| 1805 | 1823 |
| 1806 bool HeapDoesNotContainCache::Lookup(Address address) { | 1824 bool HeapDoesNotContainCache::Lookup(Address address) { |
| 1807 ASSERT(ThreadState::Current()->IsInGC()); | 1825 DCHECK(ThreadState::Current()->IsInGC()); |
| 1808 | 1826 |
| 1809 size_t index = GetHash(address); | 1827 size_t index = GetHash(address); |
| 1810 ASSERT(!(index & 1)); | 1828 DCHECK(!(index & 1)); |
| 1811 Address cache_page = RoundToBlinkPageStart(address); | 1829 Address cache_page = RoundToBlinkPageStart(address); |
| 1812 if (entries_[index] == cache_page) | 1830 if (entries_[index] == cache_page) |
| 1813 return entries_[index]; | 1831 return entries_[index]; |
| 1814 if (entries_[index + 1] == cache_page) | 1832 if (entries_[index + 1] == cache_page) |
| 1815 return entries_[index + 1]; | 1833 return entries_[index + 1]; |
| 1816 return false; | 1834 return false; |
| 1817 } | 1835 } |
| 1818 | 1836 |
| 1819 void HeapDoesNotContainCache::AddEntry(Address address) { | 1837 void HeapDoesNotContainCache::AddEntry(Address address) { |
| 1820 ASSERT(ThreadState::Current()->IsInGC()); | 1838 DCHECK(ThreadState::Current()->IsInGC()); |
| 1821 | 1839 |
| 1822 has_entries_ = true; | 1840 has_entries_ = true; |
| 1823 size_t index = GetHash(address); | 1841 size_t index = GetHash(address); |
| 1824 ASSERT(!(index & 1)); | 1842 DCHECK(!(index & 1)); |
| 1825 Address cache_page = RoundToBlinkPageStart(address); | 1843 Address cache_page = RoundToBlinkPageStart(address); |
| 1826 entries_[index + 1] = entries_[index]; | 1844 entries_[index + 1] = entries_[index]; |
| 1827 entries_[index] = cache_page; | 1845 entries_[index] = cache_page; |
| 1828 } | 1846 } |
| 1829 | 1847 |
| 1830 } // namespace blink | 1848 } // namespace blink |
| OLD | NEW |