| OLD | NEW |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "platform/heap/PageMemory.h" | 5 #include "platform/heap/PageMemory.h" |
| 6 | 6 |
| 7 #include "platform/heap/Heap.h" | 7 #include "platform/heap/Heap.h" |
| 8 #include "wtf/AddressSanitizer.h" | 8 #include "wtf/AddressSanitizer.h" |
| 9 #include "wtf/Assertions.h" | 9 #include "wtf/Assertions.h" |
| 10 #include "wtf/Atomics.h" | 10 #include "wtf/Atomics.h" |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 65 size = (size + WTF::kPageAllocationGranularityOffsetMask) & | 65 size = (size + WTF::kPageAllocationGranularityOffsetMask) & |
| 66 WTF::kPageAllocationGranularityBaseMask; | 66 WTF::kPageAllocationGranularityBaseMask; |
| 67 Address base = static_cast<Address>( | 67 Address base = static_cast<Address>( |
| 68 WTF::AllocPages(nullptr, size, blinkPageSize, WTF::PageInaccessible)); | 68 WTF::AllocPages(nullptr, size, blinkPageSize, WTF::PageInaccessible)); |
| 69 if (!base) | 69 if (!base) |
| 70 blinkGCOutOfMemory(); | 70 blinkGCOutOfMemory(); |
| 71 return new PageMemoryRegion(base, size, numPages, regionTree); | 71 return new PageMemoryRegion(base, size, numPages, regionTree); |
| 72 } | 72 } |
| 73 | 73 |
| 74 PageMemoryRegion* RegionTree::lookup(Address address) { | 74 PageMemoryRegion* RegionTree::lookup(Address address) { |
| 75 MutexLocker locker(m_mutex); | |
| 76 RegionTreeNode* current = m_root; | 75 RegionTreeNode* current = m_root; |
| 77 while (current) { | 76 while (current) { |
| 78 Address base = current->m_region->base(); | 77 Address base = current->m_region->base(); |
| 79 if (address < base) { | 78 if (address < base) { |
| 80 current = current->m_left; | 79 current = current->m_left; |
| 81 continue; | 80 continue; |
| 82 } | 81 } |
| 83 if (address >= base + current->m_region->size()) { | 82 if (address >= base + current->m_region->size()) { |
| 84 current = current->m_right; | 83 current = current->m_right; |
| 85 continue; | 84 continue; |
| 86 } | 85 } |
| 87 ASSERT(current->m_region->contains(address)); | 86 ASSERT(current->m_region->contains(address)); |
| 88 return current->m_region; | 87 return current->m_region; |
| 89 } | 88 } |
| 90 return nullptr; | 89 return nullptr; |
| 91 } | 90 } |
| 92 | 91 |
| 93 void RegionTree::add(PageMemoryRegion* region) { | 92 void RegionTree::add(PageMemoryRegion* region) { |
| 94 ASSERT(region); | 93 ASSERT(region); |
| 95 RegionTreeNode* newTree = new RegionTreeNode(region); | 94 RegionTreeNode* newTree = new RegionTreeNode(region); |
| 96 MutexLocker locker(m_mutex); | |
| 97 newTree->addTo(&m_root); | 95 newTree->addTo(&m_root); |
| 98 } | 96 } |
| 99 | 97 |
| 100 void RegionTreeNode::addTo(RegionTreeNode** context) { | 98 void RegionTreeNode::addTo(RegionTreeNode** context) { |
| 101 Address base = m_region->base(); | 99 Address base = m_region->base(); |
| 102 for (RegionTreeNode* current = *context; current; current = *context) { | 100 for (RegionTreeNode* current = *context; current; current = *context) { |
| 103 ASSERT(!current->m_region->contains(base)); | 101 ASSERT(!current->m_region->contains(base)); |
| 104 context = (base < current->m_region->base()) ? ¤t->m_left | 102 context = (base < current->m_region->base()) ? ¤t->m_left |
| 105 : ¤t->m_right; | 103 : ¤t->m_right; |
| 106 } | 104 } |
| 107 *context = this; | 105 *context = this; |
| 108 } | 106 } |
| 109 | 107 |
| 110 void RegionTree::remove(PageMemoryRegion* region) { | 108 void RegionTree::remove(PageMemoryRegion* region) { |
| 111 // Deletion of large objects (and thus their regions) can happen | |
| 112 // concurrently on sweeper threads. Removal can also happen during thread | |
| 113 // shutdown, but that case is safe. Regardless, we make all removals | |
| 114 // mutually exclusive. | |
| 115 MutexLocker locker(m_mutex); | |
| 116 ASSERT(region); | 109 ASSERT(region); |
| 117 ASSERT(m_root); | 110 ASSERT(m_root); |
| 118 Address base = region->base(); | 111 Address base = region->base(); |
| 119 RegionTreeNode** context = &m_root; | 112 RegionTreeNode** context = &m_root; |
| 120 RegionTreeNode* current = m_root; | 113 RegionTreeNode* current = m_root; |
| 121 for (; current; current = *context) { | 114 for (; current; current = *context) { |
| 122 if (region == current->m_region) | 115 if (region == current->m_region) |
| 123 break; | 116 break; |
| 124 context = (base < current->m_region->base()) ? ¤t->m_left | 117 context = (base < current->m_region->base()) ? ¤t->m_left |
| 125 : ¤t->m_right; | 118 : ¤t->m_right; |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 176 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize; | 169 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize; |
| 177 PageMemoryRegion* pageMemoryRegion = | 170 PageMemoryRegion* pageMemoryRegion = |
| 178 PageMemoryRegion::allocateLargePage(allocationSize, regionTree); | 171 PageMemoryRegion::allocateLargePage(allocationSize, regionTree); |
| 179 PageMemory* storage = | 172 PageMemory* storage = |
| 180 setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize); | 173 setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSize); |
| 181 RELEASE_ASSERT(storage->commit()); | 174 RELEASE_ASSERT(storage->commit()); |
| 182 return storage; | 175 return storage; |
| 183 } | 176 } |
| 184 | 177 |
| 185 } // namespace blink | 178 } // namespace blink |
| OLD | NEW |