Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(22)

Side by Side Diff: third_party/WebKit/Source/platform/heap/PageMemory.cpp

Issue 1804863002: Refactor RegionTree (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/heap/PageMemory.h" 5 #include "platform/heap/PageMemory.h"
6 6
7 #include "platform/heap/Heap.h" 7 #include "platform/heap/Heap.h"
8 #include "wtf/Assertions.h" 8 #include "wtf/Assertions.h"
9 #include "wtf/Atomics.h" 9 #include "wtf/Atomics.h"
10 #include "wtf/PageAllocator.h" 10 #include "wtf/PageAllocator.h"
(...skipping 15 matching lines...) Expand all
26 { 26 {
27 WTF::decommitSystemPages(m_base, m_size); 27 WTF::decommitSystemPages(m_base, m_size);
28 WTF::setSystemPagesInaccessible(m_base, m_size); 28 WTF::setSystemPagesInaccessible(m_base, m_size);
29 } 29 }
30 30
31 31
32 PageMemoryRegion::PageMemoryRegion(Address base, size_t size, unsigned numPages) 32 PageMemoryRegion::PageMemoryRegion(Address base, size_t size, unsigned numPages)
33 : MemoryRegion(base, size) 33 : MemoryRegion(base, size)
34 , m_isLargePage(numPages == 1) 34 , m_isLargePage(numPages == 1)
35 , m_numPages(numPages) 35 , m_numPages(numPages)
36 , m_regionTree(nullptr)
36 { 37 {
37 Heap::addPageMemoryRegion(this);
38 for (size_t i = 0; i < blinkPagesPerRegion; ++i) 38 for (size_t i = 0; i < blinkPagesPerRegion; ++i)
39 m_inUse[i] = false; 39 m_inUse[i] = false;
40 } 40 }
41 41
42 PageMemoryRegion::~PageMemoryRegion() 42 PageMemoryRegion::~PageMemoryRegion()
43 { 43 {
44 Heap::removePageMemoryRegion(this); 44 if (m_regionTree)
45 m_regionTree->remove(this);
45 release(); 46 release();
46 } 47 }
47 48
48 void PageMemoryRegion::pageDeleted(Address page) 49 void PageMemoryRegion::pageDeleted(Address page)
49 { 50 {
50 markPageUnused(page); 51 markPageUnused(page);
51 if (!atomicDecrement(&m_numPages)) 52 if (!atomicDecrement(&m_numPages))
52 delete this; 53 delete this;
53 } 54 }
54 55
(...skipping 10 matching lines...) Expand all
65 // Round size up to the allocation granularity. 66 // Round size up to the allocation granularity.
66 size = (size + WTF::kPageAllocationGranularityOffsetMask) & WTF::kPageAlloca tionGranularityBaseMask; 67 size = (size + WTF::kPageAllocationGranularityOffsetMask) & WTF::kPageAlloca tionGranularityBaseMask;
67 Address base = static_cast<Address>(WTF::allocPages(nullptr, size, blinkPage Size, WTF::PageInaccessible)); 68 Address base = static_cast<Address>(WTF::allocPages(nullptr, size, blinkPage Size, WTF::PageInaccessible));
68 if (!base) 69 if (!base)
69 blinkGCOutOfMemory(); 70 blinkGCOutOfMemory();
70 return new PageMemoryRegion(base, size, numPages); 71 return new PageMemoryRegion(base, size, numPages);
71 } 72 }
72 73
73 PageMemoryRegion* RegionTree::lookup(Address address) 74 PageMemoryRegion* RegionTree::lookup(Address address)
74 { 75 {
75 RegionTree* current = this; 76 MutexLocker locker(m_mutex);
77 RegionTreeNode* current = m_root;
76 while (current) { 78 while (current) {
77 Address base = current->m_region->base(); 79 Address base = current->m_region->base();
78 if (address < base) { 80 if (address < base) {
79 current = current->m_left; 81 current = current->m_left;
80 continue; 82 continue;
81 } 83 }
82 if (address >= base + current->m_region->size()) { 84 if (address >= base + current->m_region->size()) {
83 current = current->m_right; 85 current = current->m_right;
84 continue; 86 continue;
85 } 87 }
86 ASSERT(current->m_region->contains(address)); 88 ASSERT(current->m_region->contains(address));
87 return current->m_region; 89 return current->m_region;
88 } 90 }
89 return nullptr; 91 return nullptr;
90 } 92 }
91 93
92 void RegionTree::add(RegionTree* newTree, RegionTree** context) 94 void RegionTree::add(PageMemoryRegion* region)
93 { 95 {
94 ASSERT(newTree); 96 ASSERT(region);
95 Address base = newTree->m_region->base(); 97 RegionTreeNode* newTree = new RegionTreeNode(region);
96 for (RegionTree* current = *context; current; current = *context) { 98 region->setRegionTree(this);
haraken 2016/03/15 05:49:50 I'd prefer removing setRegionTree. It looks nicer
keishi 2016/03/15 07:48:43 Done.
99 MutexLocker locker(m_mutex);
100 newTree->addTo(&m_root);
101 }
102
103 void RegionTreeNode::addTo(RegionTreeNode** context)
104 {
105 Address base = m_region->base();
106 for (RegionTreeNode* current = *context; current; current = *context) {
97 ASSERT(!current->m_region->contains(base)); 107 ASSERT(!current->m_region->contains(base));
98 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right; 108 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
99 } 109 }
100 *context = newTree; 110 *context = this;
101 } 111 }
102 112
103 void RegionTree::remove(PageMemoryRegion* region, RegionTree** context) 113 void RegionTree::remove(PageMemoryRegion* region)
104 { 114 {
115 region->setRegionTree(nullptr);
116 // Deletion of large objects (and thus their regions) can happen
117 // concurrently on sweeper threads. Removal can also happen during thread
118 // shutdown, but that case is safe. Regardless, we make all removals
119 // mutually exclusive.
120 MutexLocker locker(m_mutex);
105 ASSERT(region); 121 ASSERT(region);
106 ASSERT(context); 122 ASSERT(m_root);
107 Address base = region->base(); 123 Address base = region->base();
108 RegionTree* current = *context; 124 RegionTreeNode** context = &m_root;
125 RegionTreeNode* current = m_root;
109 for (; current; current = *context) { 126 for (; current; current = *context) {
110 if (region == current->m_region) 127 if (region == current->m_region)
111 break; 128 break;
112 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right; 129 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
113 } 130 }
114 131
115 // Shutdown via detachMainThread might not have populated the region tree. 132 // Shutdown via detachMainThread might not have populated the region tree.
116 if (!current) 133 if (!current)
117 return; 134 return;
118 135
119 *context = nullptr; 136 *context = nullptr;
120 if (current->m_left) { 137 if (current->m_left) {
121 add(current->m_left, context); 138 current->m_left->addTo(context);
122 current->m_left = nullptr; 139 current->m_left = nullptr;
123 } 140 }
124 if (current->m_right) { 141 if (current->m_right) {
125 add(current->m_right, context); 142 current->m_right->addTo(context);
126 current->m_right = nullptr; 143 current->m_right = nullptr;
127 } 144 }
128 delete current; 145 delete current;
129 } 146 }
130 147
131 PageMemory::PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) 148 PageMemory::PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
132 : m_reserved(reserved) 149 : m_reserved(reserved)
133 , m_writable(writable) 150 , m_writable(writable)
134 { 151 {
135 ASSERT(reserved->contains(writable)); 152 ASSERT(reserved->contains(writable));
(...skipping 22 matching lines...) Expand all
158 ASSERT(payloadSize > 0); 175 ASSERT(payloadSize > 0);
159 176
160 // Virtual memory allocation routines operate in OS page sizes. 177 // Virtual memory allocation routines operate in OS page sizes.
161 // Round up the requested size to nearest os page size. 178 // Round up the requested size to nearest os page size.
162 payloadSize = roundToOsPageSize(payloadSize); 179 payloadSize = roundToOsPageSize(payloadSize);
163 180
164 // Overallocate by 2 times OS page size to have space for a 181 // Overallocate by 2 times OS page size to have space for a
165 // guard page at the beginning and end of blink heap page. 182 // guard page at the beginning and end of blink heap page.
166 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize; 183 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize;
167 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(all ocationSize); 184 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(all ocationSize);
185 Heap::addPageMemoryRegion(pageMemoryRegion);
168 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSi ze); 186 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSi ze);
169 RELEASE_ASSERT(storage->commit()); 187 RELEASE_ASSERT(storage->commit());
170 return storage; 188 return storage;
171 } 189 }
172 190
173 } // namespace blink 191 } // namespace blink
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698