| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/heap/HeapAllocator.h" | 5 #include "platform/heap/HeapAllocator.h" |
| 6 | 6 |
| 7 namespace blink { | 7 namespace blink { |
| 8 | 8 |
| 9 void HeapAllocator::backingFree(void* address) | 9 void HeapAllocator::backingFree(void* address) |
| 10 { | 10 { |
| 11 if (!address) | 11 if (!address) |
| 12 return; | 12 return; |
| 13 | 13 |
| 14 ThreadState* state = ThreadState::current(); | 14 ThreadState* state = ThreadState::current(); |
| 15 if (state->sweepForbidden()) | 15 if (state->sweepForbidden()) |
| 16 return; | 16 return; |
| 17 ASSERT(!state->isInGC()); | 17 ASSERT(!state->isInGC()); |
| 18 | 18 |
| 19 // Don't promptly free large objects because their page is never reused. | 19 // Don't promptly free large objects because their page is never reused. |
| 20 // Don't free backings allocated on other threads. | 20 // Don't free backings allocated on other threads. |
| 21 BasePage* page = pageFromObject(address); | 21 BasePage* page = pageFromObject(address); |
| 22 if (page->isLargeObjectPage() || page->arena()->threadState() != state) | 22 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
| 23 return; | 23 return; |
| 24 | 24 |
| 25 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 25 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 26 ASSERT(header->checkHeader()); | 26 ASSERT(header->checkHeader()); |
| 27 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); | 27 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); |
| 28 state->promptlyFreed(header->gcInfoIndex()); | 28 state->promptlyFreed(header->gcInfoIndex()); |
| 29 arena->promptlyFreeObject(header); | 29 arena->promptlyFreeObject(header); |
| 30 } | 30 } |
| 31 | 31 |
| 32 void HeapAllocator::freeVectorBacking(void* address) | 32 void HeapAllocator::freeVectorBacking(void* address) |
| (...skipping 18 matching lines...) Expand all Loading... |
| 51 | 51 |
| 52 ThreadState* state = ThreadState::current(); | 52 ThreadState* state = ThreadState::current(); |
| 53 if (state->sweepForbidden()) | 53 if (state->sweepForbidden()) |
| 54 return false; | 54 return false; |
| 55 ASSERT(!state->isInGC()); | 55 ASSERT(!state->isInGC()); |
| 56 ASSERT(state->isAllocationAllowed()); | 56 ASSERT(state->isAllocationAllowed()); |
| 57 | 57 |
| 58 // FIXME: Support expand for large objects. | 58 // FIXME: Support expand for large objects. |
| 59 // Don't expand backings allocated on other threads. | 59 // Don't expand backings allocated on other threads. |
| 60 BasePage* page = pageFromObject(address); | 60 BasePage* page = pageFromObject(address); |
| 61 if (page->isLargeObjectPage() || page->arena()->threadState() != state) | 61 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
| 62 return false; | 62 return false; |
| 63 | 63 |
| 64 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 64 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 65 ASSERT(header->checkHeader()); | 65 ASSERT(header->checkHeader()); |
| 66 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); | 66 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); |
| 67 bool succeed = arena->expandObject(header, newSize); | 67 bool succeed = arena->expandObject(header, newSize); |
| 68 if (succeed) | 68 if (succeed) |
| 69 state->allocationPointAdjusted(arena->arenaIndex()); | 69 state->allocationPointAdjusted(arena->arenaIndex()); |
| 70 return succeed; | 70 return succeed; |
| 71 } | 71 } |
| (...skipping 22 matching lines...) Expand all Loading... |
| 94 | 94 |
| 95 ThreadState* state = ThreadState::current(); | 95 ThreadState* state = ThreadState::current(); |
| 96 if (state->sweepForbidden()) | 96 if (state->sweepForbidden()) |
| 97 return false; | 97 return false; |
| 98 ASSERT(!state->isInGC()); | 98 ASSERT(!state->isInGC()); |
| 99 ASSERT(state->isAllocationAllowed()); | 99 ASSERT(state->isAllocationAllowed()); |
| 100 | 100 |
| 101 // FIXME: Support shrink for large objects. | 101 // FIXME: Support shrink for large objects. |
| 102 // Don't shrink backings allocated on other threads. | 102 // Don't shrink backings allocated on other threads. |
| 103 BasePage* page = pageFromObject(address); | 103 BasePage* page = pageFromObject(address); |
| 104 if (page->isLargeObjectPage() || page->arena()->threadState() != state) | 104 if (page->isLargeObjectPage() || page->arena()->getThreadState() != state) |
| 105 return false; | 105 return false; |
| 106 | 106 |
| 107 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 107 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
| 108 ASSERT(header->checkHeader()); | 108 ASSERT(header->checkHeader()); |
| 109 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); | 109 NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(
); |
| 110 // We shrink the object only if the shrinking will make a non-small | 110 // We shrink the object only if the shrinking will make a non-small |
| 111 // prompt-free block. | 111 // prompt-free block. |
| 112 // FIXME: Optimize the threshold size. | 112 // FIXME: Optimize the threshold size. |
| 113 if (quantizedCurrentSize <= quantizedShrunkSize + sizeof(HeapObjectHeader) +
sizeof(void*) * 32 && !arena->isObjectAllocatedAtAllocationPoint(header)) | 113 if (quantizedCurrentSize <= quantizedShrunkSize + sizeof(HeapObjectHeader) +
sizeof(void*) * 32 && !arena->isObjectAllocatedAtAllocationPoint(header)) |
| 114 return true; | 114 return true; |
| 115 | 115 |
| 116 bool succeededAtAllocationPoint = arena->shrinkObject(header, quantizedShrun
kSize); | 116 bool succeededAtAllocationPoint = arena->shrinkObject(header, quantizedShrun
kSize); |
| 117 if (succeededAtAllocationPoint) | 117 if (succeededAtAllocationPoint) |
| 118 state->allocationPointAdjusted(arena->arenaIndex()); | 118 state->allocationPointAdjusted(arena->arenaIndex()); |
| 119 return true; | 119 return true; |
| 120 } | 120 } |
| 121 | 121 |
| 122 bool HeapAllocator::shrinkVectorBacking(void* address, size_t quantizedCurrentSi
ze, size_t quantizedShrunkSize) | 122 bool HeapAllocator::shrinkVectorBacking(void* address, size_t quantizedCurrentSi
ze, size_t quantizedShrunkSize) |
| 123 { | 123 { |
| 124 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 124 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
| 125 } | 125 } |
| 126 | 126 |
| 127 bool HeapAllocator::shrinkInlineVectorBacking(void* address, size_t quantizedCur
rentSize, size_t quantizedShrunkSize) | 127 bool HeapAllocator::shrinkInlineVectorBacking(void* address, size_t quantizedCur
rentSize, size_t quantizedShrunkSize) |
| 128 { | 128 { |
| 129 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 129 return backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
| 130 } | 130 } |
| 131 | 131 |
| 132 } // namespace blink | 132 } // namespace blink |
| OLD | NEW |