OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "platform/heap/HeapAllocator.h" | 5 #include "platform/heap/HeapAllocator.h" |
6 | 6 |
7 namespace blink { | 7 namespace blink { |
8 | 8 |
9 void HeapAllocator::backingFree(void* address) { | 9 void HeapAllocator::backingFree(void* address) { |
10 if (!address) | 10 if (!address) |
11 return; | 11 return; |
12 | 12 |
13 ThreadState* state = ThreadState::current(); | 13 ThreadState* state = ThreadState::current(); |
14 if (state->sweepForbidden()) | 14 if (state->sweepForbidden()) |
15 return; | 15 return; |
16 ASSERT(!state->isInGC()); | 16 ASSERT(!state->isInGC()); |
17 | 17 |
18 // Don't promptly free large objects because their page is never reused. | 18 // Don't promptly free large objects because their page is never reused. |
19 // Don't free backings allocated on other threads. | 19 // Don't free backings allocated on other threads. |
20 BasePage* page = pageFromObject(address); | 20 BasePage* page = pageFromObject(address); |
21 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) | 21 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
22 return; | 22 return; |
23 | 23 |
24 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 24 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
25 header->checkHeader(); | |
26 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); | 25 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
27 state->promptlyFreed(header->gcInfoIndex()); | 26 state->promptlyFreed(header->gcInfoIndex()); |
28 arena->promptlyFreeObject(header); | 27 arena->promptlyFreeObject(header); |
29 } | 28 } |
30 | 29 |
31 void HeapAllocator::freeVectorBacking(void* address) { | 30 void HeapAllocator::freeVectorBacking(void* address) { |
32 backingFree(address); | 31 backingFree(address); |
33 } | 32 } |
34 | 33 |
35 void HeapAllocator::freeInlineVectorBacking(void* address) { | 34 void HeapAllocator::freeInlineVectorBacking(void* address) { |
(...skipping 15 matching lines...) Expand all Loading... |
51 ASSERT(state->isAllocationAllowed()); | 50 ASSERT(state->isAllocationAllowed()); |
52 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); | 51 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); |
53 | 52 |
54 // FIXME: Support expand for large objects. | 53 // FIXME: Support expand for large objects. |
55 // Don't expand backings allocated on other threads. | 54 // Don't expand backings allocated on other threads. |
56 BasePage* page = pageFromObject(address); | 55 BasePage* page = pageFromObject(address); |
57 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) | 56 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
58 return false; | 57 return false; |
59 | 58 |
60 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 59 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
61 header->checkHeader(); | |
62 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); | 60 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
63 bool succeed = arena->expandObject(header, newSize); | 61 bool succeed = arena->expandObject(header, newSize); |
64 if (succeed) | 62 if (succeed) |
65 state->allocationPointAdjusted(arena->arenaIndex()); | 63 state->allocationPointAdjusted(arena->arenaIndex()); |
66 return succeed; | 64 return succeed; |
67 } | 65 } |
68 | 66 |
69 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) { | 67 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) { |
70 return backingExpand(address, newSize); | 68 return backingExpand(address, newSize); |
71 } | 69 } |
(...skipping 21 matching lines...) Expand all Loading... |
93 ASSERT(state->isAllocationAllowed()); | 91 ASSERT(state->isAllocationAllowed()); |
94 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); | 92 DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); |
95 | 93 |
96 // FIXME: Support shrink for large objects. | 94 // FIXME: Support shrink for large objects. |
97 // Don't shrink backings allocated on other threads. | 95 // Don't shrink backings allocated on other threads. |
98 BasePage* page = pageFromObject(address); | 96 BasePage* page = pageFromObject(address); |
99 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) | 97 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
100 return false; | 98 return false; |
101 | 99 |
102 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 100 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
103 header->checkHeader(); | |
104 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); | 101 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
105 // We shrink the object only if the shrinking will make a non-small | 102 // We shrink the object only if the shrinking will make a non-small |
106 // prompt-free block. | 103 // prompt-free block. |
107 // FIXME: Optimize the threshold size. | 104 // FIXME: Optimize the threshold size. |
108 if (quantizedCurrentSize <= | 105 if (quantizedCurrentSize <= |
109 quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 && | 106 quantizedShrunkSize + sizeof(HeapObjectHeader) + sizeof(void*) * 32 && |
110 !arena->isObjectAllocatedAtAllocationPoint(header)) | 107 !arena->isObjectAllocatedAtAllocationPoint(header)) |
111 return true; | 108 return true; |
112 | 109 |
113 bool succeededAtAllocationPoint = | 110 bool succeededAtAllocationPoint = |
114 arena->shrinkObject(header, quantizedShrunkSize); | 111 arena->shrinkObject(header, quantizedShrunkSize); |
115 if (succeededAtAllocationPoint) | 112 if (succeededAtAllocationPoint) |
116 state->allocationPointAdjusted(arena->arenaIndex()); | 113 state->allocationPointAdjusted(arena->arenaIndex()); |
117 return true; | 114 return true; |
118 } | 115 } |
119 | 116 |
120 bool HeapAllocator::shrinkVectorBacking(void* address, | 117 bool HeapAllocator::shrinkVectorBacking(void* address, |
121 size_t quantizedCurrentSize, | 118 size_t quantizedCurrentSize, |
122 size_t quantizedShrunkSize) { | 119 size_t quantizedShrunkSize) { |
123 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 120 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
124 } | 121 } |
125 | 122 |
126 bool HeapAllocator::shrinkInlineVectorBacking(void* address, | 123 bool HeapAllocator::shrinkInlineVectorBacking(void* address, |
127 size_t quantizedCurrentSize, | 124 size_t quantizedCurrentSize, |
128 size_t quantizedShrunkSize) { | 125 size_t quantizedShrunkSize) { |
129 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 126 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
130 } | 127 } |
131 | 128 |
132 } // namespace blink | 129 } // namespace blink |
OLD | NEW |