Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(270)

Side by Side Diff: third_party/WebKit/Source/platform/heap/HeapPage.cpp

Issue 1754183002: Rename BaseHeap to BaseArena (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
56 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. 56 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead.
57 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 57 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1
58 58
59 // When finalizing a non-inlined vector backing store/container, remove 59 // When finalizing a non-inlined vector backing store/container, remove
60 // its contiguous container annotation. Required as it will not be destructed 60 // its contiguous container annotation. Required as it will not be destructed
61 // from its Vector. 61 // from its Vector.
62 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \ 62 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \
63 do { \ 63 do { \
64 BasePage* page = pageFromObject(object); \ 64 BasePage* page = pageFromObject(object); \
65 ASSERT(page); \ 65 ASSERT(page); \
66 bool isContainer = ThreadState::isVectorHeapIndex(page->heap()->heapInde x()); \ 66 bool isContainer = ThreadState::isVectorArenaIndex(page->arena()->arenaI ndex()); \
67 if (!isContainer && page->isLargeObjectPage()) \ 67 if (!isContainer && page->isLargeObjectPage()) \
68 isContainer = static_cast<LargeObjectPage*>(page)->isVectorBackingPa ge(); \ 68 isContainer = static_cast<LargeObjectPage*>(page)->isVectorBackingPa ge(); \
69 if (isContainer) \ 69 if (isContainer) \
70 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \ 70 ANNOTATE_DELETE_BUFFER(object, objectSize, 0); \
71 } while (0) 71 } while (0)
72 72
73 // A vector backing store represented by a large object is marked 73 // A vector backing store represented by a large object is marked
74 // so that when it is finalized, its ASan annotation will be 74 // so that when it is finalized, its ASan annotation will be
75 // correctly retired. 75 // correctly retired.
76 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) \ 76 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) \
77 if (ThreadState::isVectorHeapIndex(heap->heapIndex())) { \ 77 if (ThreadState::isVectorArenaIndex(arena->arenaIndex())) { \
78 BasePage* largePage = pageFromObject(largeObject); \ 78 BasePage* largePage = pageFromObject(largeObject); \
79 ASSERT(largePage->isLargeObjectPage()); \ 79 ASSERT(largePage->isLargeObjectPage()); \
80 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \ 80 static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \
81 } 81 }
82 #else 82 #else
83 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0 83 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 0
84 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize) 84 #define ASAN_RETIRE_CONTAINER_ANNOTATION(payload, payloadSize)
85 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(heap, largeObject) 85 #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject)
86 #endif 86 #endif
87 87
88 namespace blink { 88 namespace blink {
89 89
90 #if ENABLE(ASSERT) 90 #if ENABLE(ASSERT)
91 NO_SANITIZE_ADDRESS 91 NO_SANITIZE_ADDRESS
92 void HeapObjectHeader::zapMagic() 92 void HeapObjectHeader::zapMagic()
93 { 93 {
94 ASSERT(checkHeader()); 94 ASSERT(checkHeader());
95 m_magic = zappedMagic; 95 m_magic = zappedMagic;
96 } 96 }
97 #endif 97 #endif
98 98
99 void HeapObjectHeader::finalize(Address object, size_t objectSize) 99 void HeapObjectHeader::finalize(Address object, size_t objectSize)
100 { 100 {
101 HeapAllocHooks::freeHookIfEnabled(object); 101 HeapAllocHooks::freeHookIfEnabled(object);
102 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex()); 102 const GCInfo* gcInfo = Heap::gcInfo(gcInfoIndex());
103 if (gcInfo->hasFinalizer()) 103 if (gcInfo->hasFinalizer())
104 gcInfo->m_finalize(object); 104 gcInfo->m_finalize(object);
105 105
106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize); 106 ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize);
107 } 107 }
108 108
109 BaseHeap::BaseHeap(ThreadState* state, int index) 109 BaseArena::BaseArena(ThreadState* state, int index)
110 : m_firstPage(nullptr) 110 : m_firstPage(nullptr)
111 , m_firstUnsweptPage(nullptr) 111 , m_firstUnsweptPage(nullptr)
112 , m_threadState(state) 112 , m_threadState(state)
113 , m_index(index) 113 , m_index(index)
114 { 114 {
115 } 115 }
116 116
117 BaseHeap::~BaseHeap() 117 BaseArena::~BaseArena()
118 { 118 {
119 ASSERT(!m_firstPage); 119 ASSERT(!m_firstPage);
120 ASSERT(!m_firstUnsweptPage); 120 ASSERT(!m_firstUnsweptPage);
121 } 121 }
122 122
123 void BaseHeap::cleanupPages() 123 void BaseArena::cleanupPages()
124 { 124 {
125 clearFreeLists(); 125 clearFreeLists();
126 126
127 ASSERT(!m_firstUnsweptPage); 127 ASSERT(!m_firstUnsweptPage);
128 // Add the BaseHeap's pages to the orphanedPagePool. 128 // Add the BaseArena's pages to the orphanedPagePool.
129 for (BasePage* page = m_firstPage; page; page = page->next()) { 129 for (BasePage* page = m_firstPage; page; page = page->next()) {
130 Heap::decreaseAllocatedSpace(page->size()); 130 Heap::decreaseAllocatedSpace(page->size());
131 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); 131 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), page);
132 } 132 }
133 m_firstPage = nullptr; 133 m_firstPage = nullptr;
134 } 134 }
135 135
136 void BaseHeap::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotI nfo& info) 136 void BaseArena::takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshot Info& info)
137 { 137 {
138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName" 138 // |dumpBaseName| at this point is "blink_gc/thread_X/heaps/HeapName"
139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName); 139 WebMemoryAllocatorDump* allocatorDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpBaseName);
140 size_t pageCount = 0; 140 size_t pageCount = 0;
141 BasePage::HeapSnapshotInfo heapInfo; 141 BasePage::HeapSnapshotInfo heapInfo;
142 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { 142 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) {
143 String dumpName = dumpBaseName + String::format("/pages/page_%lu", stati c_cast<unsigned long>(pageCount++)); 143 String dumpName = dumpBaseName + String::format("/pages/page_%lu", stati c_cast<unsigned long>(pageCount++));
144 WebMemoryAllocatorDump* pageDump = BlinkGCMemoryDumpProvider::instance() ->createMemoryAllocatorDumpForCurrentGC(dumpName); 144 WebMemoryAllocatorDump* pageDump = BlinkGCMemoryDumpProvider::instance() ->createMemoryAllocatorDumpForCurrentGC(dumpName);
145 145
146 page->takeSnapshot(pageDump, info, heapInfo); 146 page->takeSnapshot(pageDump, info, heapInfo);
147 } 147 }
148 allocatorDump->addScalar("blink_page_count", "objects", pageCount); 148 allocatorDump->addScalar("blink_page_count", "objects", pageCount);
149 149
150 // When taking a full dump (w/ freelist), both the /buckets and /pages 150 // When taking a full dump (w/ freelist), both the /buckets and /pages
151 // report their free size but they are not meant to be added together. 151 // report their free size but they are not meant to be added together.
152 // Therefore, here we override the free_size of the parent heap to be 152 // Therefore, here we override the free_size of the parent heap to be
153 // equal to the free_size of the sum of its heap pages. 153 // equal to the free_size of the sum of its heap pages.
154 allocatorDump->addScalar("free_size", "bytes", heapInfo.freeSize); 154 allocatorDump->addScalar("free_size", "bytes", heapInfo.freeSize);
155 allocatorDump->addScalar("free_count", "objects", heapInfo.freeCount); 155 allocatorDump->addScalar("free_count", "objects", heapInfo.freeCount);
156 } 156 }
157 157
158 #if ENABLE(ASSERT) 158 #if ENABLE(ASSERT)
159 BasePage* BaseHeap::findPageFromAddress(Address address) 159 BasePage* BaseArena::findPageFromAddress(Address address)
160 { 160 {
161 for (BasePage* page = m_firstPage; page; page = page->next()) { 161 for (BasePage* page = m_firstPage; page; page = page->next()) {
162 if (page->contains(address)) 162 if (page->contains(address))
163 return page; 163 return page;
164 } 164 }
165 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) { 165 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) {
166 if (page->contains(address)) 166 if (page->contains(address))
167 return page; 167 return page;
168 } 168 }
169 return nullptr; 169 return nullptr;
170 } 170 }
171 #endif 171 #endif
172 172
173 void BaseHeap::makeConsistentForGC() 173 void BaseArena::makeConsistentForGC()
174 { 174 {
175 clearFreeLists(); 175 clearFreeLists();
176 ASSERT(isConsistentForGC()); 176 ASSERT(isConsistentForGC());
177 for (BasePage* page = m_firstPage; page; page = page->next()) { 177 for (BasePage* page = m_firstPage; page; page = page->next()) {
178 page->markAsUnswept(); 178 page->markAsUnswept();
179 page->invalidateObjectStartBitmap(); 179 page->invalidateObjectStartBitmap();
180 } 180 }
181 181
182 // If a new GC is requested before this thread got around to sweep, 182 // If a new GC is requested before this thread got around to sweep,
183 // ie. due to the thread doing a long running operation, we clear 183 // ie. due to the thread doing a long running operation, we clear
(...skipping 10 matching lines...) Expand all
194 } 194 }
195 if (previousPage) { 195 if (previousPage) {
196 ASSERT(m_firstUnsweptPage); 196 ASSERT(m_firstUnsweptPage);
197 previousPage->m_next = m_firstPage; 197 previousPage->m_next = m_firstPage;
198 m_firstPage = m_firstUnsweptPage; 198 m_firstPage = m_firstUnsweptPage;
199 m_firstUnsweptPage = nullptr; 199 m_firstUnsweptPage = nullptr;
200 } 200 }
201 ASSERT(!m_firstUnsweptPage); 201 ASSERT(!m_firstUnsweptPage);
202 } 202 }
203 203
204 void BaseHeap::makeConsistentForMutator() 204 void BaseArena::makeConsistentForMutator()
205 { 205 {
206 clearFreeLists(); 206 clearFreeLists();
207 ASSERT(isConsistentForGC()); 207 ASSERT(isConsistentForGC());
208 ASSERT(!m_firstPage); 208 ASSERT(!m_firstPage);
209 209
210 // Drop marks from marked objects and rebuild free lists in preparation for 210 // Drop marks from marked objects and rebuild free lists in preparation for
211 // resuming the executions of mutators. 211 // resuming the executions of mutators.
212 BasePage* previousPage = nullptr; 212 BasePage* previousPage = nullptr;
213 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { 213 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) {
214 page->makeConsistentForMutator(); 214 page->makeConsistentForMutator();
215 page->markAsSwept(); 215 page->markAsSwept();
216 page->invalidateObjectStartBitmap(); 216 page->invalidateObjectStartBitmap();
217 } 217 }
218 if (previousPage) { 218 if (previousPage) {
219 ASSERT(m_firstUnsweptPage); 219 ASSERT(m_firstUnsweptPage);
220 previousPage->m_next = m_firstPage; 220 previousPage->m_next = m_firstPage;
221 m_firstPage = m_firstUnsweptPage; 221 m_firstPage = m_firstUnsweptPage;
222 m_firstUnsweptPage = nullptr; 222 m_firstUnsweptPage = nullptr;
223 } 223 }
224 ASSERT(!m_firstUnsweptPage); 224 ASSERT(!m_firstUnsweptPage);
225 } 225 }
226 226
227 size_t BaseHeap::objectPayloadSizeForTesting() 227 size_t BaseArena::objectPayloadSizeForTesting()
228 { 228 {
229 ASSERT(isConsistentForGC()); 229 ASSERT(isConsistentForGC());
230 ASSERT(!m_firstUnsweptPage); 230 ASSERT(!m_firstUnsweptPage);
231 231
232 size_t objectPayloadSize = 0; 232 size_t objectPayloadSize = 0;
233 for (BasePage* page = m_firstPage; page; page = page->next()) 233 for (BasePage* page = m_firstPage; page; page = page->next())
234 objectPayloadSize += page->objectPayloadSizeForTesting(); 234 objectPayloadSize += page->objectPayloadSizeForTesting();
235 return objectPayloadSize; 235 return objectPayloadSize;
236 } 236 }
237 237
238 void BaseHeap::prepareHeapForTermination() 238 void BaseArena::prepareHeapForTermination()
239 { 239 {
240 ASSERT(!m_firstUnsweptPage); 240 ASSERT(!m_firstUnsweptPage);
241 for (BasePage* page = m_firstPage; page; page = page->next()) { 241 for (BasePage* page = m_firstPage; page; page = page->next()) {
242 page->setTerminating(); 242 page->setTerminating();
243 } 243 }
244 } 244 }
245 245
246 void BaseHeap::prepareForSweep() 246 void BaseArena::prepareForSweep()
247 { 247 {
248 ASSERT(threadState()->isInGC()); 248 ASSERT(threadState()->isInGC());
249 ASSERT(!m_firstUnsweptPage); 249 ASSERT(!m_firstUnsweptPage);
250 250
251 // Move all pages to a list of unswept pages. 251 // Move all pages to a list of unswept pages.
252 m_firstUnsweptPage = m_firstPage; 252 m_firstUnsweptPage = m_firstPage;
253 m_firstPage = nullptr; 253 m_firstPage = nullptr;
254 } 254 }
255 255
256 #if defined(ADDRESS_SANITIZER) 256 #if defined(ADDRESS_SANITIZER)
257 void BaseHeap::poisonHeap(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC::Poi soning poisoning) 257 void BaseArena::poisonHeap(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC::Po isoning poisoning)
258 { 258 {
259 // TODO(sof): support complete poisoning of all heaps. 259 // TODO(sof): support complete poisoning of all arenas.
260 ASSERT(objectsToPoison != BlinkGC::MarkedAndUnmarked || heapIndex() == Blink GC::EagerSweepHeapIndex); 260 ASSERT(objectsToPoison != BlinkGC::MarkedAndUnmarked || arenaIndex() == Blin kGC::EagerSweepArenaIndex);
261 261
262 // This method may either be called to poison (SetPoison) heap 262 // This method may either be called to poison (SetPoison) heap
263 // object payloads prior to sweeping, or it may be called at 263 // object payloads prior to sweeping, or it may be called at
264 // the completion of a sweep to unpoison (ClearPoison) the 264 // the completion of a sweep to unpoison (ClearPoison) the
265 // objects remaining in the heap. Those will all be live and unmarked. 265 // objects remaining in the heap. Those will all be live and unmarked.
266 // 266 //
267 // Poisoning may be limited to unmarked objects only, or apply to all. 267 // Poisoning may be limited to unmarked objects only, or apply to all.
268 if (poisoning == BlinkGC::SetPoison) { 268 if (poisoning == BlinkGC::SetPoison) {
269 for (BasePage* page = m_firstUnsweptPage; page; page = page->next()) 269 for (BasePage* page = m_firstUnsweptPage; page; page = page->next())
270 page->poisonObjects(objectsToPoison, poisoning); 270 page->poisonObjects(objectsToPoison, poisoning);
271 return; 271 return;
272 } 272 }
273 // Support clearing of poisoning after sweeping has completed, 273 // Support clearing of poisoning after sweeping has completed,
274 // in which case the pages of the live objects are reachable 274 // in which case the pages of the live objects are reachable
275 // via m_firstPage. 275 // via m_firstPage.
276 ASSERT(!m_firstUnsweptPage); 276 ASSERT(!m_firstUnsweptPage);
277 for (BasePage* page = m_firstPage; page; page = page->next()) 277 for (BasePage* page = m_firstPage; page; page = page->next())
278 page->poisonObjects(objectsToPoison, poisoning); 278 page->poisonObjects(objectsToPoison, poisoning);
279 } 279 }
280 #endif 280 #endif
281 281
282 Address BaseHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) 282 Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex)
283 { 283 {
284 // If there are no pages to be swept, return immediately. 284 // If there are no pages to be swept, return immediately.
285 if (!m_firstUnsweptPage) 285 if (!m_firstUnsweptPage)
286 return nullptr; 286 return nullptr;
287 287
288 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 288 RELEASE_ASSERT(threadState()->isSweepingInProgress());
289 289
290 // lazySweepPages() can be called recursively if finalizers invoked in 290 // lazySweepPages() can be called recursively if finalizers invoked in
291 // page->sweep() allocate memory and the allocation triggers 291 // page->sweep() allocate memory and the allocation triggers
292 // lazySweepPages(). This check prevents the sweeping from being executed 292 // lazySweepPages(). This check prevents the sweeping from being executed
293 // recursively. 293 // recursively.
294 if (threadState()->sweepForbidden()) 294 if (threadState()->sweepForbidden())
295 return nullptr; 295 return nullptr;
296 296
297 TRACE_EVENT0("blink_gc", "BaseHeap::lazySweepPages"); 297 TRACE_EVENT0("blink_gc", "BaseArena::lazySweepPages");
298 ThreadState::SweepForbiddenScope sweepForbidden(threadState()); 298 ThreadState::SweepForbiddenScope sweepForbidden(threadState());
299 ScriptForbiddenIfMainThreadScope scriptForbidden; 299 ScriptForbiddenIfMainThreadScope scriptForbidden;
300 300
301 double startTime = WTF::currentTimeMS(); 301 double startTime = WTF::currentTimeMS();
302 Address result = lazySweepPages(allocationSize, gcInfoIndex); 302 Address result = lazySweepPages(allocationSize, gcInfoIndex);
303 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime); 303 threadState()->accumulateSweepingTime(WTF::currentTimeMS() - startTime);
304 Heap::reportMemoryUsageForTracing(); 304 Heap::reportMemoryUsageForTracing();
305 305
306 return result; 306 return result;
307 } 307 }
308 308
309 void BaseHeap::sweepUnsweptPage() 309 void BaseArena::sweepUnsweptPage()
310 { 310 {
311 BasePage* page = m_firstUnsweptPage; 311 BasePage* page = m_firstUnsweptPage;
312 if (page->isEmpty()) { 312 if (page->isEmpty()) {
313 page->unlink(&m_firstUnsweptPage); 313 page->unlink(&m_firstUnsweptPage);
314 page->removeFromHeap(); 314 page->removeFromHeap();
315 } else { 315 } else {
316 // Sweep a page and move the page from m_firstUnsweptPages to 316 // Sweep a page and move the page from m_firstUnsweptPages to
317 // m_firstPages. 317 // m_firstPages.
318 page->sweep(); 318 page->sweep();
319 page->unlink(&m_firstUnsweptPage); 319 page->unlink(&m_firstUnsweptPage);
320 page->link(&m_firstPage); 320 page->link(&m_firstPage);
321 page->markAsSwept(); 321 page->markAsSwept();
322 } 322 }
323 } 323 }
324 324
325 bool BaseHeap::lazySweepWithDeadline(double deadlineSeconds) 325 bool BaseArena::lazySweepWithDeadline(double deadlineSeconds)
326 { 326 {
327 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim eSeconds() 327 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim eSeconds()
328 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check 328 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check
329 // the deadline per 10 pages. 329 // the deadline per 10 pages.
330 static const int deadlineCheckInterval = 10; 330 static const int deadlineCheckInterval = 10;
331 331
332 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 332 RELEASE_ASSERT(threadState()->isSweepingInProgress());
333 ASSERT(threadState()->sweepForbidden()); 333 ASSERT(threadState()->sweepForbidden());
334 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden()); 334 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden());
335 335
336 int pageCount = 1; 336 int pageCount = 1;
337 while (m_firstUnsweptPage) { 337 while (m_firstUnsweptPage) {
338 sweepUnsweptPage(); 338 sweepUnsweptPage();
339 if (pageCount % deadlineCheckInterval == 0) { 339 if (pageCount % deadlineCheckInterval == 0) {
340 if (deadlineSeconds <= monotonicallyIncreasingTime()) { 340 if (deadlineSeconds <= monotonicallyIncreasingTime()) {
341 // Deadline has come. 341 // Deadline has come.
342 Heap::reportMemoryUsageForTracing(); 342 Heap::reportMemoryUsageForTracing();
343 return !m_firstUnsweptPage; 343 return !m_firstUnsweptPage;
344 } 344 }
345 } 345 }
346 pageCount++; 346 pageCount++;
347 } 347 }
348 Heap::reportMemoryUsageForTracing(); 348 Heap::reportMemoryUsageForTracing();
349 return true; 349 return true;
350 } 350 }
351 351
352 void BaseHeap::completeSweep() 352 void BaseArena::completeSweep()
353 { 353 {
354 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 354 RELEASE_ASSERT(threadState()->isSweepingInProgress());
355 ASSERT(threadState()->sweepForbidden()); 355 ASSERT(threadState()->sweepForbidden());
356 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden()); 356 ASSERT(!threadState()->isMainThread() || ScriptForbiddenScope::isScriptForbi dden());
357 357
358 while (m_firstUnsweptPage) { 358 while (m_firstUnsweptPage) {
359 sweepUnsweptPage(); 359 sweepUnsweptPage();
360 } 360 }
361 Heap::reportMemoryUsageForTracing(); 361 Heap::reportMemoryUsageForTracing();
362 } 362 }
363 363
364 NormalPageHeap::NormalPageHeap(ThreadState* state, int index) 364 NormalPageHeap::NormalPageHeap(ThreadState* state, int index)
365 : BaseHeap(state, index) 365 : BaseArena(state, index)
366 , m_currentAllocationPoint(nullptr) 366 , m_currentAllocationPoint(nullptr)
367 , m_remainingAllocationSize(0) 367 , m_remainingAllocationSize(0)
368 , m_lastRemainingAllocationSize(0) 368 , m_lastRemainingAllocationSize(0)
369 , m_promptlyFreedSize(0) 369 , m_promptlyFreedSize(0)
370 { 370 {
371 clearFreeLists(); 371 clearFreeLists();
372 } 372 }
373 373
374 void NormalPageHeap::clearFreeLists() 374 void NormalPageHeap::clearFreeLists()
375 { 375 {
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
410 if (m_freeList.takeSnapshot(dumpName)) { 410 if (m_freeList.takeSnapshot(dumpName)) {
411 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets"); 411 WebMemoryAllocatorDump* bucketsDump = BlinkGCMemoryDumpProvider::instanc e()->createMemoryAllocatorDumpForCurrentGC(dumpName + "/buckets");
412 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages"); 412 WebMemoryAllocatorDump* pagesDump = BlinkGCMemoryDumpProvider::instance( )->createMemoryAllocatorDumpForCurrentGC(dumpName + "/pages");
413 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid()); 413 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOw nershipEdge(pagesDump->guid(), bucketsDump->guid());
414 } 414 }
415 } 415 }
416 416
417 void NormalPageHeap::allocatePage() 417 void NormalPageHeap::allocatePage()
418 { 418 {
419 threadState()->shouldFlushHeapDoesNotContainCache(); 419 threadState()->shouldFlushHeapDoesNotContainCache();
420 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); 420 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(arenaIndex());
421 421
422 if (!pageMemory) { 422 if (!pageMemory) {
423 // Allocate a memory region for blinkPagesPerRegion pages that 423 // Allocate a memory region for blinkPagesPerRegion pages that
424 // will each have the following layout. 424 // will each have the following layout.
425 // 425 //
426 // [ guard os page | ... payload ... | guard os page ] 426 // [ guard os page | ... payload ... | guard os page ]
427 // ^---{ aligned to blink page size } 427 // ^---{ aligned to blink page size }
428 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); 428 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
429 429
430 // Setup the PageMemory object for each of the pages in the region. 430 // Setup the PageMemory object for each of the pages in the region.
431 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { 431 for (size_t i = 0; i < blinkPagesPerRegion; ++i) {
432 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize()); 432 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, i * blinkPageSize, blinkPagePayloadSize());
433 // Take the first possible page ensuring that this thread actually 433 // Take the first possible page ensuring that this thread actually
434 // gets a page and add the rest to the page pool. 434 // gets a page and add the rest to the page pool.
435 if (!pageMemory) { 435 if (!pageMemory) {
436 bool result = memory->commit(); 436 bool result = memory->commit();
437 // If you hit the ASSERT, it will mean that you're hitting 437 // If you hit the ASSERT, it will mean that you're hitting
438 // the limit of the number of mmapped regions OS can support 438 // the limit of the number of mmapped regions OS can support
439 // (e.g., /proc/sys/vm/max_map_count in Linux). 439 // (e.g., /proc/sys/vm/max_map_count in Linux).
440 RELEASE_ASSERT(result); 440 RELEASE_ASSERT(result);
441 pageMemory = memory; 441 pageMemory = memory;
442 } else { 442 } else {
443 Heap::freePagePool()->addFreePage(heapIndex(), memory); 443 Heap::freePagePool()->addFreePage(arenaIndex(), memory);
444 } 444 }
445 } 445 }
446 } 446 }
447 447
448 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this); 448 NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
449 page->link(&m_firstPage); 449 page->link(&m_firstPage);
450 450
451 Heap::increaseAllocatedSpace(page->size()); 451 Heap::increaseAllocatedSpace(page->size());
452 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) 452 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
453 // Allow the following addToFreeList() to add the newly allocated memory 453 // Allow the following addToFreeList() to add the newly allocated memory
(...skipping 13 matching lines...) Expand all
467 467
468 if (page->terminating()) { 468 if (page->terminating()) {
469 // The thread is shutting down and this page is being removed as a part 469 // The thread is shutting down and this page is being removed as a part
470 // of the thread local GC. In that case the object could be traced in 470 // of the thread local GC. In that case the object could be traced in
471 // the next global GC if there is a dangling pointer from a live thread 471 // the next global GC if there is a dangling pointer from a live thread
472 // heap to this dead thread heap. To guard against this, we put the 472 // heap to this dead thread heap. To guard against this, we put the
473 // page into the orphaned page pool and zap the page memory. This 473 // page into the orphaned page pool and zap the page memory. This
474 // ensures that tracing the dangling pointer in the next global GC just 474 // ensures that tracing the dangling pointer in the next global GC just
475 // crashes instead of causing use-after-frees. After the next global 475 // crashes instead of causing use-after-frees. After the next global
476 // GC, the orphaned pages are removed. 476 // GC, the orphaned pages are removed.
477 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); 477 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), page);
478 } else { 478 } else {
479 PageMemory* memory = page->storage(); 479 PageMemory* memory = page->storage();
480 page->~NormalPage(); 480 page->~NormalPage();
481 Heap::freePagePool()->addFreePage(heapIndex(), memory); 481 Heap::freePagePool()->addFreePage(arenaIndex(), memory);
482 } 482 }
483 } 483 }
484 484
485 bool NormalPageHeap::coalesce() 485 bool NormalPageHeap::coalesce()
486 { 486 {
487 // Don't coalesce heaps if there are not enough promptly freed entries 487 // Don't coalesce arenas if there are not enough promptly freed entries
488 // to be coalesced. 488 // to be coalesced.
489 // 489 //
490 // FIXME: This threshold is determined just to optimize blink_perf 490 // FIXME: This threshold is determined just to optimize blink_perf
491 // benchmarks. Coalescing is very sensitive to the threashold and 491 // benchmarks. Coalescing is very sensitive to the threashold and
492 // we need further investigations on the coalescing scheme. 492 // we need further investigations on the coalescing scheme.
493 if (m_promptlyFreedSize < 1024 * 1024) 493 if (m_promptlyFreedSize < 1024 * 1024)
494 return false; 494 return false;
495 495
496 if (threadState()->sweepForbidden()) 496 if (threadState()->sweepForbidden())
497 return false; 497 return false;
498 498
499 ASSERT(!hasCurrentAllocationArea()); 499 ASSERT(!hasCurrentAllocationArea());
500 TRACE_EVENT0("blink_gc", "BaseHeap::coalesce"); 500 TRACE_EVENT0("blink_gc", "BaseArena::coalesce");
501 501
502 // Rebuild free lists. 502 // Rebuild free lists.
503 m_freeList.clear(); 503 m_freeList.clear();
504 size_t freedSize = 0; 504 size_t freedSize = 0;
505 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page = static_cast<NormalPage*>(page->next())) { 505 for (NormalPage* page = static_cast<NormalPage*>(m_firstPage); page; page = static_cast<NormalPage*>(page->next())) {
506 Address startOfGap = page->payload(); 506 Address startOfGap = page->payload();
507 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) { 507 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) {
508 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress); 508 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress);
509 size_t size = header->size(); 509 size_t size = header->size();
510 ASSERT(size > 0); 510 ASSERT(size > 0);
(...skipping 184 matching lines...) Expand 10 before | Expand all | Expand 10 after
695 } 695 }
696 696
697 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn dex) 697 Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn dex)
698 { 698 {
699 ASSERT(allocationSize > remainingAllocationSize()); 699 ASSERT(allocationSize > remainingAllocationSize());
700 ASSERT(allocationSize >= allocationGranularity); 700 ASSERT(allocationSize >= allocationGranularity);
701 701
702 // 1. If this allocation is big enough, allocate a large object. 702 // 1. If this allocation is big enough, allocate a large object.
703 if (allocationSize >= largeObjectSizeThreshold) { 703 if (allocationSize >= largeObjectSizeThreshold) {
704 // TODO(sof): support eagerly finalized large objects, if ever needed. 704 // TODO(sof): support eagerly finalized large objects, if ever needed.
705 RELEASE_ASSERT(heapIndex() != BlinkGC::EagerSweepHeapIndex); 705 RELEASE_ASSERT(arenaIndex() != BlinkGC::EagerSweepArenaIndex);
706 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->heap(BlinkGC::LargeObjectHeapIndex)); 706 LargeObjectHeap* largeObjectHeap = static_cast<LargeObjectHeap*>(threadS tate()->arena(BlinkGC::LargeObjectArenaIndex));
707 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex); 707 Address largeObject = largeObjectHeap->allocateLargeObjectPage(allocatio nSize, gcInfoIndex);
708 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); 708 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject);
709 return largeObject; 709 return largeObject;
710 } 710 }
711 711
712 // 2. Try to allocate from a free list. 712 // 2. Try to allocate from a free list.
713 updateRemainingAllocationSize(); 713 updateRemainingAllocationSize();
714 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); 714 Address result = allocateFromFreeList(allocationSize, gcInfoIndex);
715 if (result) 715 if (result)
716 return result; 716 return result;
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
772 ASSERT(remainingAllocationSize() >= allocationSize); 772 ASSERT(remainingAllocationSize() >= allocationSize);
773 m_freeList.m_biggestFreeListIndex = index; 773 m_freeList.m_biggestFreeListIndex = index;
774 return allocateObject(allocationSize, gcInfoIndex); 774 return allocateObject(allocationSize, gcInfoIndex);
775 } 775 }
776 } 776 }
777 m_freeList.m_biggestFreeListIndex = index; 777 m_freeList.m_biggestFreeListIndex = index;
778 return nullptr; 778 return nullptr;
779 } 779 }
780 780
781 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index) 781 LargeObjectHeap::LargeObjectHeap(ThreadState* state, int index)
782 : BaseHeap(state, index) 782 : BaseArena(state, index)
783 { 783 {
784 } 784 }
785 785
786 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g cInfoIndex) 786 Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g cInfoIndex)
787 { 787 {
788 // Caller already added space for object header and rounded up to allocation 788 // Caller already added space for object header and rounded up to allocation
789 // alignment 789 // alignment
790 ASSERT(!(allocationSize & allocationMask)); 790 ASSERT(!(allocationSize & allocationMask));
791 791
792 // 1. Try to sweep large objects more than allocationSize bytes 792 // 1. Try to sweep large objects more than allocationSize bytes
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
855 if (object->terminating()) { 855 if (object->terminating()) {
856 ASSERT(ThreadState::current()->isTerminating()); 856 ASSERT(ThreadState::current()->isTerminating());
857 // The thread is shutting down and this page is being removed as a part 857 // The thread is shutting down and this page is being removed as a part
858 // of the thread local GC. In that case the object could be traced in 858 // of the thread local GC. In that case the object could be traced in
859 // the next global GC if there is a dangling pointer from a live thread 859 // the next global GC if there is a dangling pointer from a live thread
860 // heap to this dead thread heap. To guard against this, we put the 860 // heap to this dead thread heap. To guard against this, we put the
861 // page into the orphaned page pool and zap the page memory. This 861 // page into the orphaned page pool and zap the page memory. This
862 // ensures that tracing the dangling pointer in the next global GC just 862 // ensures that tracing the dangling pointer in the next global GC just
863 // crashes instead of causing use-after-frees. After the next global 863 // crashes instead of causing use-after-frees. After the next global
864 // GC, the orphaned pages are removed. 864 // GC, the orphaned pages are removed.
865 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); 865 Heap::orphanedPagePool()->addOrphanedPage(arenaIndex(), object);
866 } else { 866 } else {
867 ASSERT(!ThreadState::current()->isTerminating()); 867 ASSERT(!ThreadState::current()->isTerminating());
868 PageMemory* memory = object->storage(); 868 PageMemory* memory = object->storage();
869 object->~LargeObjectPage(); 869 object->~LargeObjectPage();
870 delete memory; 870 delete memory;
871 } 871 }
872 } 872 }
873 873
874 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x) 874 Address LargeObjectHeap::lazySweepPages(size_t allocationSize, size_t gcInfoInde x)
875 { 875 {
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
1039 1039
1040 String dumpName = dumpBaseName + String::format("/buckets/bucket_%lu", s tatic_cast<unsigned long>(1 << i)); 1040 String dumpName = dumpBaseName + String::format("/buckets/bucket_%lu", s tatic_cast<unsigned long>(1 << i));
1041 WebMemoryAllocatorDump* bucketDump = BlinkGCMemoryDumpProvider::instance ()->createMemoryAllocatorDumpForCurrentGC(dumpName); 1041 WebMemoryAllocatorDump* bucketDump = BlinkGCMemoryDumpProvider::instance ()->createMemoryAllocatorDumpForCurrentGC(dumpName);
1042 bucketDump->addScalar("free_count", "objects", entryCount); 1042 bucketDump->addScalar("free_count", "objects", entryCount);
1043 bucketDump->addScalar("free_size", "bytes", freeSize); 1043 bucketDump->addScalar("free_size", "bytes", freeSize);
1044 didDumpBucketStats = true; 1044 didDumpBucketStats = true;
1045 } 1045 }
1046 return didDumpBucketStats; 1046 return didDumpBucketStats;
1047 } 1047 }
1048 1048
1049 BasePage::BasePage(PageMemory* storage, BaseHeap* heap) 1049 BasePage::BasePage(PageMemory* storage, BaseArena* arena)
1050 : m_storage(storage) 1050 : m_storage(storage)
1051 , m_heap(heap) 1051 , m_arena(arena)
1052 , m_next(nullptr) 1052 , m_next(nullptr)
1053 , m_terminating(false) 1053 , m_terminating(false)
1054 , m_swept(true) 1054 , m_swept(true)
1055 { 1055 {
1056 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1056 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1057 } 1057 }
1058 1058
1059 void BasePage::markOrphaned() 1059 void BasePage::markOrphaned()
1060 { 1060 {
1061 m_heap = nullptr; 1061 m_arena = nullptr;
1062 m_terminating = false; 1062 m_terminating = false;
1063 // Since we zap the page payload for orphaned pages we need to mark it as 1063 // Since we zap the page payload for orphaned pages we need to mark it as
1064 // unused so a conservative pointer won't interpret the object headers. 1064 // unused so a conservative pointer won't interpret the object headers.
1065 storage()->markUnused(); 1065 storage()->markUnused();
1066 } 1066 }
1067 1067
1068 NormalPage::NormalPage(PageMemory* storage, BaseHeap* heap) 1068 NormalPage::NormalPage(PageMemory* storage, BaseArena* arena)
1069 : BasePage(storage, heap) 1069 : BasePage(storage, arena)
1070 , m_objectStartBitMapComputed(false) 1070 , m_objectStartBitMapComputed(false)
1071 { 1071 {
1072 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1072 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1073 } 1073 }
1074 1074
1075 size_t NormalPage::objectPayloadSizeForTesting() 1075 size_t NormalPage::objectPayloadSizeForTesting()
1076 { 1076 {
1077 size_t objectPayloadSize = 0; 1077 size_t objectPayloadSize = 0;
1078 Address headerAddress = payload(); 1078 Address headerAddress = payload();
1079 markAsSwept(); 1079 markAsSwept();
(...skipping 12 matching lines...) Expand all
1092 } 1092 }
1093 1093
1094 bool NormalPage::isEmpty() 1094 bool NormalPage::isEmpty()
1095 { 1095 {
1096 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload()); 1096 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(payload());
1097 return header->isFree() && header->size() == payloadSize(); 1097 return header->isFree() && header->size() == payloadSize();
1098 } 1098 }
1099 1099
1100 void NormalPage::removeFromHeap() 1100 void NormalPage::removeFromHeap()
1101 { 1101 {
1102 heapForNormalPage()->freePage(this); 1102 arenaForNormalPage()->freePage(this);
1103 } 1103 }
1104 1104
1105 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) 1105 #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
1106 static void discardPages(Address begin, Address end) 1106 static void discardPages(Address begin, Address end)
1107 { 1107 {
1108 uintptr_t beginAddress = WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t >(begin)); 1108 uintptr_t beginAddress = WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t >(begin));
1109 uintptr_t endAddress = WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t >(end)); 1109 uintptr_t endAddress = WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t >(end));
1110 if (beginAddress < endAddress) 1110 if (beginAddress < endAddress)
1111 WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), endAddres s - beginAddress); 1111 WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), endAddres s - beginAddress);
1112 } 1112 }
1113 #endif 1113 #endif
1114 1114
1115 void NormalPage::sweep() 1115 void NormalPage::sweep()
1116 { 1116 {
1117 size_t markedObjectSize = 0; 1117 size_t markedObjectSize = 0;
1118 Address startOfGap = payload(); 1118 Address startOfGap = payload();
1119 NormalPageHeap* pageHeap = heapForNormalPage(); 1119 NormalPageHeap* pageHeap = arenaForNormalPage();
1120 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { 1120 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) {
1121 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1121 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1122 size_t size = header->size(); 1122 size_t size = header->size();
1123 ASSERT(size > 0); 1123 ASSERT(size > 0);
1124 ASSERT(size < blinkPagePayloadSize()); 1124 ASSERT(size < blinkPagePayloadSize());
1125 1125
1126 if (header->isPromptlyFreed()) 1126 if (header->isPromptlyFreed())
1127 pageHeap->decreasePromptlyFreedSize(size); 1127 pageHeap->decreasePromptlyFreedSize(size);
1128 if (header->isFree()) { 1128 if (header->isFree()) {
1129 // Zero the memory in the free list header to maintain the 1129 // Zero the memory in the free list header to maintain the
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
1195 ASSERT(header->checkHeader()); 1195 ASSERT(header->checkHeader());
1196 if (header->isMarked()) { 1196 if (header->isMarked()) {
1197 header->unmark(); 1197 header->unmark();
1198 markedObjectSize += header->size(); 1198 markedObjectSize += header->size();
1199 } else { 1199 } else {
1200 header->markDead(); 1200 header->markDead();
1201 } 1201 }
1202 headerAddress += header->size(); 1202 headerAddress += header->size();
1203 } 1203 }
1204 if (markedObjectSize) 1204 if (markedObjectSize)
1205 heapForNormalPage()->threadState()->increaseMarkedObjectSize(markedObjec tSize); 1205 arenaForNormalPage()->threadState()->increaseMarkedObjectSize(markedObje ctSize);
1206 } 1206 }
1207 1207
1208 void NormalPage::makeConsistentForMutator() 1208 void NormalPage::makeConsistentForMutator()
1209 { 1209 {
1210 Address startOfGap = payload(); 1210 Address startOfGap = payload();
1211 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1211 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1212 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1212 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1213 size_t size = header->size(); 1213 size_t size = header->size();
1214 ASSERT(size < blinkPagePayloadSize()); 1214 ASSERT(size < blinkPagePayloadSize());
1215 if (header->isPromptlyFreed()) 1215 if (header->isPromptlyFreed())
1216 heapForNormalPage()->decreasePromptlyFreedSize(size); 1216 arenaForNormalPage()->decreasePromptlyFreedSize(size);
1217 if (header->isFree()) { 1217 if (header->isFree()) {
1218 // Zero the memory in the free list header to maintain the 1218 // Zero the memory in the free list header to maintain the
1219 // invariant that memory on the free list is zero filled. 1219 // invariant that memory on the free list is zero filled.
1220 // The rest of the memory is already on the free list and is 1220 // The rest of the memory is already on the free list and is
1221 // therefore already zero filled. 1221 // therefore already zero filled.
1222 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry)); 1222 SET_MEMORY_INACCESSIBLE(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry));
1223 CHECK_MEMORY_INACCESSIBLE(headerAddress, size); 1223 CHECK_MEMORY_INACCESSIBLE(headerAddress, size);
1224 headerAddress += size; 1224 headerAddress += size;
1225 continue; 1225 continue;
1226 } 1226 }
1227 ASSERT(header->checkHeader()); 1227 ASSERT(header->checkHeader());
1228 1228
1229 if (startOfGap != headerAddress) 1229 if (startOfGap != headerAddress)
1230 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start OfGap); 1230 arenaForNormalPage()->addToFreeList(startOfGap, headerAddress - star tOfGap);
1231 if (header->isMarked()) 1231 if (header->isMarked())
1232 header->unmark(); 1232 header->unmark();
1233 headerAddress += size; 1233 headerAddress += size;
1234 startOfGap = headerAddress; 1234 startOfGap = headerAddress;
1235 ASSERT(headerAddress <= payloadEnd()); 1235 ASSERT(headerAddress <= payloadEnd());
1236 } 1236 }
1237 if (startOfGap != payloadEnd()) 1237 if (startOfGap != payloadEnd())
1238 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap ); 1238 arenaForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGa p);
1239 } 1239 }
1240 1240
1241 #if defined(ADDRESS_SANITIZER) 1241 #if defined(ADDRESS_SANITIZER)
1242 void NormalPage::poisonObjects(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC ::Poisoning poisoning) 1242 void NormalPage::poisonObjects(BlinkGC::ObjectsToPoison objectsToPoison, BlinkGC ::Poisoning poisoning)
1243 { 1243 {
1244 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1244 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1245 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1245 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1246 ASSERT(header->size() < blinkPagePayloadSize()); 1246 ASSERT(header->size() < blinkPagePayloadSize());
1247 // Check if a free list entry first since we cannot call 1247 // Check if a free list entry first since we cannot call
1248 // isMarked on a free list entry. 1248 // isMarked on a free list entry.
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
1426 1426
1427 #if ENABLE(ASSERT) 1427 #if ENABLE(ASSERT)
1428 bool NormalPage::contains(Address addr) 1428 bool NormalPage::contains(Address addr)
1429 { 1429 {
1430 Address blinkPageStart = roundToBlinkPageStart(getAddress()); 1430 Address blinkPageStart = roundToBlinkPageStart(getAddress());
1431 ASSERT(blinkPageStart == getAddress() - blinkGuardPageSize); // Page is at a ligned address plus guard page size. 1431 ASSERT(blinkPageStart == getAddress() - blinkGuardPageSize); // Page is at a ligned address plus guard page size.
1432 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; 1432 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
1433 } 1433 }
1434 #endif 1434 #endif
1435 1435
1436 NormalPageHeap* NormalPage::heapForNormalPage() 1436 NormalPageHeap* NormalPage::arenaForNormalPage()
1437 { 1437 {
1438 return static_cast<NormalPageHeap*>(heap()); 1438 return static_cast<NormalPageHeap*>(arena());
1439 } 1439 }
1440 1440
1441 LargeObjectPage::LargeObjectPage(PageMemory* storage, BaseHeap* heap, size_t pay loadSize) 1441 LargeObjectPage::LargeObjectPage(PageMemory* storage, BaseArena* arena, size_t p ayloadSize)
1442 : BasePage(storage, heap) 1442 : BasePage(storage, arena)
1443 , m_payloadSize(payloadSize) 1443 , m_payloadSize(payloadSize)
1444 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS) 1444 #if ENABLE(ASAN_CONTAINER_ANNOTATIONS)
1445 , m_isVectorBackingPage(false) 1445 , m_isVectorBackingPage(false)
1446 #endif 1446 #endif
1447 { 1447 {
1448 } 1448 }
1449 1449
1450 size_t LargeObjectPage::objectPayloadSizeForTesting() 1450 size_t LargeObjectPage::objectPayloadSizeForTesting()
1451 { 1451 {
1452 markAsSwept(); 1452 markAsSwept();
1453 return payloadSize(); 1453 return payloadSize();
1454 } 1454 }
1455 1455
1456 bool LargeObjectPage::isEmpty() 1456 bool LargeObjectPage::isEmpty()
1457 { 1457 {
1458 return !heapObjectHeader()->isMarked(); 1458 return !heapObjectHeader()->isMarked();
1459 } 1459 }
1460 1460
1461 void LargeObjectPage::removeFromHeap() 1461 void LargeObjectPage::removeFromHeap()
1462 { 1462 {
1463 static_cast<LargeObjectHeap*>(heap())->freeLargeObjectPage(this); 1463 static_cast<LargeObjectHeap*>(arena())->freeLargeObjectPage(this);
1464 } 1464 }
1465 1465
1466 void LargeObjectPage::sweep() 1466 void LargeObjectPage::sweep()
1467 { 1467 {
1468 heapObjectHeader()->unmark(); 1468 heapObjectHeader()->unmark();
1469 heap()->threadState()->increaseMarkedObjectSize(size()); 1469 arena()->threadState()->increaseMarkedObjectSize(size());
1470 } 1470 }
1471 1471
1472 void LargeObjectPage::makeConsistentForGC() 1472 void LargeObjectPage::makeConsistentForGC()
1473 { 1473 {
1474 HeapObjectHeader* header = heapObjectHeader(); 1474 HeapObjectHeader* header = heapObjectHeader();
1475 if (header->isMarked()) { 1475 if (header->isMarked()) {
1476 header->unmark(); 1476 header->unmark();
1477 heap()->threadState()->increaseMarkedObjectSize(size()); 1477 arena()->threadState()->increaseMarkedObjectSize(size());
1478 } else { 1478 } else {
1479 header->markDead(); 1479 header->markDead();
1480 } 1480 }
1481 } 1481 }
1482 1482
1483 void LargeObjectPage::makeConsistentForMutator() 1483 void LargeObjectPage::makeConsistentForMutator()
1484 { 1484 {
1485 HeapObjectHeader* header = heapObjectHeader(); 1485 HeapObjectHeader* header = heapObjectHeader();
1486 if (header->isMarked()) 1486 if (header->isMarked())
1487 header->unmark(); 1487 header->unmark();
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
1588 1588
1589 m_hasEntries = true; 1589 m_hasEntries = true;
1590 size_t index = hash(address); 1590 size_t index = hash(address);
1591 ASSERT(!(index & 1)); 1591 ASSERT(!(index & 1));
1592 Address cachePage = roundToBlinkPageStart(address); 1592 Address cachePage = roundToBlinkPageStart(address);
1593 m_entries[index + 1] = m_entries[index]; 1593 m_entries[index + 1] = m_entries[index];
1594 m_entries[index] = cachePage; 1594 m_entries[index] = cachePage;
1595 } 1595 }
1596 1596
1597 } // namespace blink 1597 } // namespace blink
OLDNEW
« no previous file with comments | « third_party/WebKit/Source/platform/heap/HeapPage.h ('k') | third_party/WebKit/Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698