Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(270)

Side by Side Diff: third_party/WebKit/Source/platform/heap/PageMemory.cpp

Issue 1804863002: Refactor RegionTree (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fixed Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « third_party/WebKit/Source/platform/heap/PageMemory.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "platform/heap/PageMemory.h" 5 #include "platform/heap/PageMemory.h"
6 6
7 #include "platform/heap/Heap.h" 7 #include "platform/heap/Heap.h"
8 #include "wtf/Assertions.h" 8 #include "wtf/Assertions.h"
9 #include "wtf/Atomics.h" 9 #include "wtf/Atomics.h"
10 #include "wtf/PageAllocator.h" 10 #include "wtf/PageAllocator.h"
(...skipping 11 matching lines...) Expand all
22 return WTF::setSystemPagesAccessible(m_base, m_size); 22 return WTF::setSystemPagesAccessible(m_base, m_size);
23 } 23 }
24 24
25 void MemoryRegion::decommit() 25 void MemoryRegion::decommit()
26 { 26 {
27 WTF::decommitSystemPages(m_base, m_size); 27 WTF::decommitSystemPages(m_base, m_size);
28 WTF::setSystemPagesInaccessible(m_base, m_size); 28 WTF::setSystemPagesInaccessible(m_base, m_size);
29 } 29 }
30 30
31 31
32 PageMemoryRegion::PageMemoryRegion(Address base, size_t size, unsigned numPages) 32 PageMemoryRegion::PageMemoryRegion(Address base, size_t size, unsigned numPages, RegionTree* regionTree)
33 : MemoryRegion(base, size) 33 : MemoryRegion(base, size)
34 , m_isLargePage(numPages == 1) 34 , m_isLargePage(numPages == 1)
35 , m_numPages(numPages) 35 , m_numPages(numPages)
36 , m_regionTree(regionTree)
36 { 37 {
37 Heap::addPageMemoryRegion(this); 38 m_regionTree->add(this);
38 for (size_t i = 0; i < blinkPagesPerRegion; ++i) 39 for (size_t i = 0; i < blinkPagesPerRegion; ++i)
39 m_inUse[i] = false; 40 m_inUse[i] = false;
40 } 41 }
41 42
42 PageMemoryRegion::~PageMemoryRegion() 43 PageMemoryRegion::~PageMemoryRegion()
43 { 44 {
44 Heap::removePageMemoryRegion(this); 45 if (m_regionTree)
46 m_regionTree->remove(this);
45 release(); 47 release();
46 } 48 }
47 49
48 void PageMemoryRegion::pageDeleted(Address page) 50 void PageMemoryRegion::pageDeleted(Address page)
49 { 51 {
50 markPageUnused(page); 52 markPageUnused(page);
51 if (!atomicDecrement(&m_numPages)) 53 if (!atomicDecrement(&m_numPages))
52 delete this; 54 delete this;
53 } 55 }
54 56
55 // TODO(haraken): Like partitionOutOfMemoryWithLotsOfUncommitedPages(), 57 // TODO(haraken): Like partitionOutOfMemoryWithLotsOfUncommitedPages(),
56 // we should probably have a way to distinguish physical memory OOM from 58 // we should probably have a way to distinguish physical memory OOM from
57 // virtual address space OOM. 59 // virtual address space OOM.
58 static NEVER_INLINE void blinkGCOutOfMemory() 60 static NEVER_INLINE void blinkGCOutOfMemory()
59 { 61 {
60 IMMEDIATE_CRASH(); 62 IMMEDIATE_CRASH();
61 } 63 }
62 64
63 PageMemoryRegion* PageMemoryRegion::allocate(size_t size, unsigned numPages) 65 PageMemoryRegion* PageMemoryRegion::allocate(size_t size, unsigned numPages, Reg ionTree* regionTree)
64 { 66 {
65 // Round size up to the allocation granularity. 67 // Round size up to the allocation granularity.
66 size = (size + WTF::kPageAllocationGranularityOffsetMask) & WTF::kPageAlloca tionGranularityBaseMask; 68 size = (size + WTF::kPageAllocationGranularityOffsetMask) & WTF::kPageAlloca tionGranularityBaseMask;
67 Address base = static_cast<Address>(WTF::allocPages(nullptr, size, blinkPage Size, WTF::PageInaccessible)); 69 Address base = static_cast<Address>(WTF::allocPages(nullptr, size, blinkPage Size, WTF::PageInaccessible));
68 if (!base) 70 if (!base)
69 blinkGCOutOfMemory(); 71 blinkGCOutOfMemory();
70 return new PageMemoryRegion(base, size, numPages); 72 return new PageMemoryRegion(base, size, numPages, regionTree);
71 } 73 }
72 74
73 PageMemoryRegion* RegionTree::lookup(Address address) 75 PageMemoryRegion* RegionTree::lookup(Address address)
74 { 76 {
75 RegionTree* current = this; 77 MutexLocker locker(m_mutex);
78 RegionTreeNode* current = m_root;
76 while (current) { 79 while (current) {
77 Address base = current->m_region->base(); 80 Address base = current->m_region->base();
78 if (address < base) { 81 if (address < base) {
79 current = current->m_left; 82 current = current->m_left;
80 continue; 83 continue;
81 } 84 }
82 if (address >= base + current->m_region->size()) { 85 if (address >= base + current->m_region->size()) {
83 current = current->m_right; 86 current = current->m_right;
84 continue; 87 continue;
85 } 88 }
86 ASSERT(current->m_region->contains(address)); 89 ASSERT(current->m_region->contains(address));
87 return current->m_region; 90 return current->m_region;
88 } 91 }
89 return nullptr; 92 return nullptr;
90 } 93 }
91 94
92 void RegionTree::add(RegionTree* newTree, RegionTree** context) 95 void RegionTree::add(PageMemoryRegion* region)
93 { 96 {
94 ASSERT(newTree); 97 ASSERT(region);
95 Address base = newTree->m_region->base(); 98 RegionTreeNode* newTree = new RegionTreeNode(region);
96 for (RegionTree* current = *context; current; current = *context) { 99 MutexLocker locker(m_mutex);
100 newTree->addTo(&m_root);
101 }
102
103 void RegionTreeNode::addTo(RegionTreeNode** context)
104 {
105 Address base = m_region->base();
106 for (RegionTreeNode* current = *context; current; current = *context) {
97 ASSERT(!current->m_region->contains(base)); 107 ASSERT(!current->m_region->contains(base));
98 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right; 108 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
99 } 109 }
100 *context = newTree; 110 *context = this;
101 } 111 }
102 112
103 void RegionTree::remove(PageMemoryRegion* region, RegionTree** context) 113 void RegionTree::remove(PageMemoryRegion* region)
104 { 114 {
115 // Deletion of large objects (and thus their regions) can happen
116 // concurrently on sweeper threads. Removal can also happen during thread
117 // shutdown, but that case is safe. Regardless, we make all removals
118 // mutually exclusive.
119 MutexLocker locker(m_mutex);
105 ASSERT(region); 120 ASSERT(region);
106 ASSERT(context); 121 ASSERT(m_root);
107 Address base = region->base(); 122 Address base = region->base();
108 RegionTree* current = *context; 123 RegionTreeNode** context = &m_root;
124 RegionTreeNode* current = m_root;
109 for (; current; current = *context) { 125 for (; current; current = *context) {
110 if (region == current->m_region) 126 if (region == current->m_region)
111 break; 127 break;
112 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right; 128 context = (base < current->m_region->base()) ? &current->m_left : &curre nt->m_right;
113 } 129 }
114 130
115 // Shutdown via detachMainThread might not have populated the region tree. 131 // Shutdown via detachMainThread might not have populated the region tree.
116 if (!current) 132 if (!current)
117 return; 133 return;
118 134
119 *context = nullptr; 135 *context = nullptr;
120 if (current->m_left) { 136 if (current->m_left) {
121 add(current->m_left, context); 137 current->m_left->addTo(context);
122 current->m_left = nullptr; 138 current->m_left = nullptr;
123 } 139 }
124 if (current->m_right) { 140 if (current->m_right) {
125 add(current->m_right, context); 141 current->m_right->addTo(context);
126 current->m_right = nullptr; 142 current->m_right = nullptr;
127 } 143 }
128 delete current; 144 delete current;
129 } 145 }
130 146
131 PageMemory::PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable) 147 PageMemory::PageMemory(PageMemoryRegion* reserved, const MemoryRegion& writable)
132 : m_reserved(reserved) 148 : m_reserved(reserved)
133 , m_writable(writable) 149 , m_writable(writable)
134 { 150 {
135 ASSERT(reserved->contains(writable)); 151 ASSERT(reserved->contains(writable));
(...skipping 10 matching lines...) Expand all
146 // Setup the payload one guard page into the page memory. 162 // Setup the payload one guard page into the page memory.
147 Address payloadAddress = region->base() + pageOffset + blinkGuardPageSize; 163 Address payloadAddress = region->base() + pageOffset + blinkGuardPageSize;
148 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize)); 164 return new PageMemory(region, MemoryRegion(payloadAddress, payloadSize));
149 } 165 }
150 166
151 static size_t roundToOsPageSize(size_t size) 167 static size_t roundToOsPageSize(size_t size)
152 { 168 {
153 return (size + WTF::kSystemPageSize - 1) & ~(WTF::kSystemPageSize - 1); 169 return (size + WTF::kSystemPageSize - 1) & ~(WTF::kSystemPageSize - 1);
154 } 170 }
155 171
156 PageMemory* PageMemory::allocate(size_t payloadSize) 172 PageMemory* PageMemory::allocate(size_t payloadSize, RegionTree* regionTree)
157 { 173 {
158 ASSERT(payloadSize > 0); 174 ASSERT(payloadSize > 0);
159 175
160 // Virtual memory allocation routines operate in OS page sizes. 176 // Virtual memory allocation routines operate in OS page sizes.
161 // Round up the requested size to nearest os page size. 177 // Round up the requested size to nearest os page size.
162 payloadSize = roundToOsPageSize(payloadSize); 178 payloadSize = roundToOsPageSize(payloadSize);
163 179
164 // Overallocate by 2 times OS page size to have space for a 180 // Overallocate by 2 times OS page size to have space for a
165 // guard page at the beginning and end of blink heap page. 181 // guard page at the beginning and end of blink heap page.
166 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize; 182 size_t allocationSize = payloadSize + 2 * blinkGuardPageSize;
167 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(all ocationSize); 183 PageMemoryRegion* pageMemoryRegion = PageMemoryRegion::allocateLargePage(all ocationSize, regionTree);
168 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSi ze); 184 PageMemory* storage = setupPageMemoryInRegion(pageMemoryRegion, 0, payloadSi ze);
169 RELEASE_ASSERT(storage->commit()); 185 RELEASE_ASSERT(storage->commit());
170 return storage; 186 return storage;
171 } 187 }
172 188
173 } // namespace blink 189 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/PageMemory.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698