Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(354)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapAllocator.cpp

Issue 2816033003: Replace ASSERT with DHCECK_op in platform/heap (Closed)
Patch Set: Replace ASSERT with CHECK_op in platform/heap Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/heap/HeapAllocator.h" 5 #include "platform/heap/HeapAllocator.h"
6 6
7 namespace blink { 7 namespace blink {
8 8
9 void HeapAllocator::BackingFree(void* address) { 9 void HeapAllocator::BackingFree(void* address) {
10 if (!address) 10 if (!address)
11 return; 11 return;
12 12
13 ThreadState* state = ThreadState::Current(); 13 ThreadState* state = ThreadState::Current();
14 if (state->SweepForbidden()) 14 if (state->SweepForbidden())
15 return; 15 return;
16 ASSERT(!state->IsInGC()); 16 DCHECK(!state->IsInGC());
17 17
18 // Don't promptly free large objects because their page is never reused. 18 // Don't promptly free large objects because their page is never reused.
19 // Don't free backings allocated on other threads. 19 // Don't free backings allocated on other threads.
20 BasePage* page = PageFromObject(address); 20 BasePage* page = PageFromObject(address);
21 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state) 21 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
22 return; 22 return;
23 23
24 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address); 24 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
25 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage(); 25 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
26 state->PromptlyFreed(header->GcInfoIndex()); 26 state->PromptlyFreed(header->GcInfoIndex());
(...skipping 12 matching lines...) Expand all
39 BackingFree(address); 39 BackingFree(address);
40 } 40 }
41 41
42 bool HeapAllocator::BackingExpand(void* address, size_t new_size) { 42 bool HeapAllocator::BackingExpand(void* address, size_t new_size) {
43 if (!address) 43 if (!address)
44 return false; 44 return false;
45 45
46 ThreadState* state = ThreadState::Current(); 46 ThreadState* state = ThreadState::Current();
47 if (state->SweepForbidden()) 47 if (state->SweepForbidden())
48 return false; 48 return false;
49 ASSERT(!state->IsInGC()); 49 DCHECK(!state->IsInGC());
50 ASSERT(state->IsAllocationAllowed()); 50 DCHECK(state->IsAllocationAllowed());
51 DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap()); 51 DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
52 52
53 // FIXME: Support expand for large objects. 53 // FIXME: Support expand for large objects.
54 // Don't expand backings allocated on other threads. 54 // Don't expand backings allocated on other threads.
55 BasePage* page = PageFromObject(address); 55 BasePage* page = PageFromObject(address);
56 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state) 56 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
57 return false; 57 return false;
58 58
59 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address); 59 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
60 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage(); 60 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
(...skipping 14 matching lines...) Expand all
75 bool HeapAllocator::ExpandHashTableBacking(void* address, size_t new_size) { 75 bool HeapAllocator::ExpandHashTableBacking(void* address, size_t new_size) {
76 return BackingExpand(address, new_size); 76 return BackingExpand(address, new_size);
77 } 77 }
78 78
79 bool HeapAllocator::BackingShrink(void* address, 79 bool HeapAllocator::BackingShrink(void* address,
80 size_t quantized_current_size, 80 size_t quantized_current_size,
81 size_t quantized_shrunk_size) { 81 size_t quantized_shrunk_size) {
82 if (!address || quantized_shrunk_size == quantized_current_size) 82 if (!address || quantized_shrunk_size == quantized_current_size)
83 return true; 83 return true;
84 84
85 ASSERT(quantized_shrunk_size < quantized_current_size); 85 DCHECK_LT(quantized_shrunk_size, quantized_current_size);
86 86
87 ThreadState* state = ThreadState::Current(); 87 ThreadState* state = ThreadState::Current();
88 if (state->SweepForbidden()) 88 if (state->SweepForbidden())
89 return false; 89 return false;
90 ASSERT(!state->IsInGC()); 90 DCHECK(!state->IsInGC());
91 ASSERT(state->IsAllocationAllowed()); 91 DCHECK(state->IsAllocationAllowed());
92 DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap()); 92 DCHECK_EQ(&state->Heap(), &ThreadState::FromObject(address)->Heap());
93 93
94 // FIXME: Support shrink for large objects. 94 // FIXME: Support shrink for large objects.
95 // Don't shrink backings allocated on other threads. 95 // Don't shrink backings allocated on other threads.
96 BasePage* page = PageFromObject(address); 96 BasePage* page = PageFromObject(address);
97 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state) 97 if (page->IsLargeObjectPage() || page->Arena()->GetThreadState() != state)
98 return false; 98 return false;
99 99
100 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address); 100 HeapObjectHeader* header = HeapObjectHeader::FromPayload(address);
101 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage(); 101 NormalPageArena* arena = static_cast<NormalPage*>(page)->ArenaForNormalPage();
(...skipping 19 matching lines...) Expand all
121 return BackingShrink(address, quantized_current_size, quantized_shrunk_size); 121 return BackingShrink(address, quantized_current_size, quantized_shrunk_size);
122 } 122 }
123 123
124 bool HeapAllocator::ShrinkInlineVectorBacking(void* address, 124 bool HeapAllocator::ShrinkInlineVectorBacking(void* address,
125 size_t quantized_current_size, 125 size_t quantized_current_size,
126 size_t quantized_shrunk_size) { 126 size_t quantized_shrunk_size) {
127 return BackingShrink(address, quantized_current_size, quantized_shrunk_size); 127 return BackingShrink(address, quantized_current_size, quantized_shrunk_size);
128 } 128 }
129 129
130 } // namespace blink 130 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698