Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(966)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapAllocator.cpp

Issue 2698673003: Call HeapObjectHeader::checkHeader solely for its side-effect. (Closed)
Patch Set: Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/heap/HeapAllocator.h" 5 #include "platform/heap/HeapAllocator.h"
6 6
7 namespace blink { 7 namespace blink {
8 8
9 void HeapAllocator::backingFree(void* address) { 9 void HeapAllocator::backingFree(void* address) {
10 if (!address) 10 if (!address)
11 return; 11 return;
12 12
13 ThreadState* state = ThreadState::current(); 13 ThreadState* state = ThreadState::current();
14 if (state->sweepForbidden()) 14 if (state->sweepForbidden())
15 return; 15 return;
16 ASSERT(!state->isInGC()); 16 ASSERT(!state->isInGC());
17 17
18 // Don't promptly free large objects because their page is never reused. 18 // Don't promptly free large objects because their page is never reused.
19 // Don't free backings allocated on other threads. 19 // Don't free backings allocated on other threads.
20 BasePage* page = pageFromObject(address); 20 BasePage* page = pageFromObject(address);
21 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) 21 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)
22 return; 22 return;
23 23
24 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 24 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
25 ASSERT(header->checkHeader()); 25 header->checkHeader();
26 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); 26 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();
27 state->promptlyFreed(header->gcInfoIndex()); 27 state->promptlyFreed(header->gcInfoIndex());
28 arena->promptlyFreeObject(header); 28 arena->promptlyFreeObject(header);
29 } 29 }
30 30
31 void HeapAllocator::freeVectorBacking(void* address) { 31 void HeapAllocator::freeVectorBacking(void* address) {
32 backingFree(address); 32 backingFree(address);
33 } 33 }
34 34
35 void HeapAllocator::freeInlineVectorBacking(void* address) { 35 void HeapAllocator::freeInlineVectorBacking(void* address) {
(...skipping 15 matching lines...) Expand all
51 ASSERT(state->isAllocationAllowed()); 51 ASSERT(state->isAllocationAllowed());
52 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); 52 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap());
53 53
54 // FIXME: Support expand for large objects. 54 // FIXME: Support expand for large objects.
55 // Don't expand backings allocated on other threads. 55 // Don't expand backings allocated on other threads.
56 BasePage* page = pageFromObject(address); 56 BasePage* page = pageFromObject(address);
57 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) 57 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)
58 return false; 58 return false;
59 59
60 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 60 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
61 ASSERT(header->checkHeader()); 61 header->checkHeader();
62 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); 62 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();
63 bool succeed = arena->expandObject(header, newSize); 63 bool succeed = arena->expandObject(header, newSize);
64 if (succeed) 64 if (succeed)
65 state->allocationPointAdjusted(arena->arenaIndex()); 65 state->allocationPointAdjusted(arena->arenaIndex());
66 return succeed; 66 return succeed;
67 } 67 }
68 68
69 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) { 69 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) {
70 return backingExpand(address, newSize); 70 return backingExpand(address, newSize);
71 } 71 }
(...skipping 21 matching lines...) Expand all
93 ASSERT(state->isAllocationAllowed()); 93 ASSERT(state->isAllocationAllowed());
94 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); 94 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap());
95 95
96 // FIXME: Support shrink for large objects. 96 // FIXME: Support shrink for large objects.
97 // Don't shrink backings allocated on other threads. 97 // Don't shrink backings allocated on other threads.
98 BasePage* page = pageFromObject(address); 98 BasePage* page = pageFromObject(address);
99 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) 99 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state)
100 return false; 100 return false;
101 101
102 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 102 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
103 ASSERT(header->checkHeader()); 103 header->checkHeader();
104 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); 104 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage();
105 // We shrink the object only if the shrinking will make a non-small 105 // We shrink the object only if the shrinking will make a non-small
106 // prompt-free block. 106 // prompt-free block.
107 // FIXME: Optimize the threshold size. 107 // FIXME: Optimize the threshold size.
108 if (quantizedCurrentSize <= 108 if (quantizedCurrentSize <=
109 quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 && 109 quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 &&
110 !arena->isObjectAllocatedAtAllocationPoint(header)) 110 !arena->isObjectAllocatedAtAllocationPoint(header))
111 return true; 111 return true;
112 112
113 bool succeededAtAllocationPoint = 113 bool succeededAtAllocationPoint =
114 arena->shrinkObject(header, quantizedShrunkSize); 114 arena->shrinkObject(header, quantizedShrunkSize);
115 if (succeededAtAllocationPoint) 115 if (succeededAtAllocationPoint)
116 state->allocationPointAdjusted(arena->arenaIndex()); 116 state->allocationPointAdjusted(arena->arenaIndex());
117 return true; 117 return true;
118 } 118 }
119 119
120 bool HeapAllocator::shrinkVectorBacking(void* address, 120 bool HeapAllocator::shrinkVectorBacking(void* address,
121 size_t quantizedCurrentSize, 121 size_t quantizedCurrentSize,
122 size_t quantizedShrunkSize) { 122 size_t quantizedShrunkSize) {
123 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 123 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
124 } 124 }
125 125
126 bool HeapAllocator::shrinkInlineVectorBacking(void* address, 126 bool HeapAllocator::shrinkInlineVectorBacking(void* address,
127 size_t quantizedCurrentSize, 127 size_t quantizedCurrentSize,
128 size_t quantizedShrunkSize) { 128 size_t quantizedShrunkSize) {
129 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 129 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
130 } 130 }
131 131
132 } // namespace blink 132 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698