Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 42 #include "platform/heap/PageMemory.h" | 42 #include "platform/heap/PageMemory.h" |
| 43 #include "platform/heap/PagePool.h" | 43 #include "platform/heap/PagePool.h" |
| 44 #include "platform/heap/SafePoint.h" | 44 #include "platform/heap/SafePoint.h" |
| 45 #include "platform/heap/ThreadState.h" | 45 #include "platform/heap/ThreadState.h" |
| 46 #include "public/platform/Platform.h" | 46 #include "public/platform/Platform.h" |
| 47 #include "wtf/Assertions.h" | 47 #include "wtf/Assertions.h" |
| 48 #include "wtf/ContainerAnnotations.h" | 48 #include "wtf/ContainerAnnotations.h" |
| 49 #include "wtf/CurrentTime.h" | 49 #include "wtf/CurrentTime.h" |
| 50 #include "wtf/LeakAnnotations.h" | 50 #include "wtf/LeakAnnotations.h" |
| 51 #include "wtf/PassOwnPtr.h" | 51 #include "wtf/PassOwnPtr.h" |
| 52 #include "wtf/TemporaryChange.h" | |
| 52 #include "wtf/allocator/PageAllocator.h" | 53 #include "wtf/allocator/PageAllocator.h" |
| 53 #include "wtf/allocator/Partitions.h" | 54 #include "wtf/allocator/Partitions.h" |
| 54 | 55 |
| 55 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 56 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
| 56 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. | 57 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. |
| 57 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 | 58 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 |
| 58 | 59 |
| 59 // When finalizing a non-inlined vector backing store/container, remove | 60 // When finalizing a non-inlined vector backing store/container, remove |
| 60 // its contiguous container annotation. Required as it will not be destructed | 61 // its contiguous container annotation. Required as it will not be destructed |
| 61 // from its Vector. | 62 // from its Vector. |
| (...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 308 { | 309 { |
| 309 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim eSeconds() | 310 // It might be heavy to call Platform::current()->monotonicallyIncreasingTim eSeconds() |
| 310 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check | 311 // per page (i.e., 128 KB sweep or one LargeObject sweep), so we check |
| 311 // the deadline per 10 pages. | 312 // the deadline per 10 pages. |
| 312 static const int deadlineCheckInterval = 10; | 313 static const int deadlineCheckInterval = 10; |
| 313 | 314 |
| 314 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 315 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
| 315 ASSERT(getThreadState()->sweepForbidden()); | 316 ASSERT(getThreadState()->sweepForbidden()); |
| 316 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo rbidden()); | 317 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo rbidden()); |
| 317 | 318 |
| 319 NormalPageArena* normalArena = nullptr; | |
| 320 if (m_firstUnsweptPage && !m_firstUnsweptPage->isLargeObjectPage()) { | |
|
haraken
2016/05/31 05:41:16
Why is it enough to check if the first unswept pag
sof
2016/05/31 07:24:09
How can an arena contain a mixture of large and no
| |
| 321 // Mark this NormalPageArena as being lazily swept. | |
| 322 NormalPage* normalPage = reinterpret_cast<NormalPage*>(m_firstUnsweptPag e); | |
| 323 normalArena = normalPage->arenaForNormalPage(); | |
| 324 normalArena->setIsLazySweeping(true); | |
| 325 } | |
| 318 int pageCount = 1; | 326 int pageCount = 1; |
| 319 while (m_firstUnsweptPage) { | 327 while (m_firstUnsweptPage) { |
| 320 sweepUnsweptPage(); | 328 sweepUnsweptPage(); |
| 321 if (pageCount % deadlineCheckInterval == 0) { | 329 if (pageCount % deadlineCheckInterval == 0) { |
| 322 if (deadlineSeconds <= monotonicallyIncreasingTime()) { | 330 if (deadlineSeconds <= monotonicallyIncreasingTime()) { |
| 323 // Deadline has come. | 331 // Deadline has come. |
| 324 ThreadHeap::reportMemoryUsageForTracing(); | 332 ThreadHeap::reportMemoryUsageForTracing(); |
| 333 if (normalArena) | |
| 334 normalArena->setIsLazySweeping(false); | |
| 325 return !m_firstUnsweptPage; | 335 return !m_firstUnsweptPage; |
| 326 } | 336 } |
| 327 } | 337 } |
| 328 pageCount++; | 338 pageCount++; |
| 329 } | 339 } |
| 330 ThreadHeap::reportMemoryUsageForTracing(); | 340 ThreadHeap::reportMemoryUsageForTracing(); |
| 341 if (normalArena) | |
| 342 normalArena->setIsLazySweeping(false); | |
| 331 return true; | 343 return true; |
| 332 } | 344 } |
| 333 | 345 |
| 334 void BaseArena::completeSweep() | 346 void BaseArena::completeSweep() |
| 335 { | 347 { |
| 336 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); | 348 RELEASE_ASSERT(getThreadState()->isSweepingInProgress()); |
| 337 ASSERT(getThreadState()->sweepForbidden()); | 349 ASSERT(getThreadState()->sweepForbidden()); |
| 338 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo rbidden()); | 350 ASSERT(!getThreadState()->isMainThread() || ScriptForbiddenScope::isScriptFo rbidden()); |
| 339 | 351 |
| 340 while (m_firstUnsweptPage) { | 352 while (m_firstUnsweptPage) { |
| 341 sweepUnsweptPage(); | 353 sweepUnsweptPage(); |
| 342 } | 354 } |
| 343 ThreadHeap::reportMemoryUsageForTracing(); | 355 ThreadHeap::reportMemoryUsageForTracing(); |
| 344 } | 356 } |
| 345 | 357 |
| 346 Address BaseArena::allocateLargeObject(size_t allocationSize, size_t gcInfoIndex ) | 358 Address BaseArena::allocateLargeObject(size_t allocationSize, size_t gcInfoIndex ) |
| 347 { | 359 { |
| 348 // TODO(sof): should need arise, support eagerly finalized large objects. | 360 // TODO(sof): should need arise, support eagerly finalized large objects. |
| 349 CHECK(arenaIndex() != BlinkGC::EagerSweepArenaIndex); | 361 CHECK(arenaIndex() != BlinkGC::EagerSweepArenaIndex); |
| 350 LargeObjectArena* largeObjectArena = static_cast<LargeObjectArena*>(getThrea dState()->arena(BlinkGC::LargeObjectArenaIndex)); | 362 LargeObjectArena* largeObjectArena = static_cast<LargeObjectArena*>(getThrea dState()->arena(BlinkGC::LargeObjectArenaIndex)); |
| 351 Address largeObject = largeObjectArena -> allocateLargeObjectPage(allocation Size, gcInfoIndex); | 363 Address largeObject = largeObjectArena -> allocateLargeObjectPage(allocation Size, gcInfoIndex); |
| 352 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); | 364 ASAN_MARK_LARGE_VECTOR_CONTAINER(this, largeObject); |
| 353 return largeObject; | 365 return largeObject; |
| 354 } | 366 } |
| 355 | 367 |
| 368 bool BaseArena::willObjectBeLazilySwept(BasePage* page, void* objectPointer) con st | |
| 369 { | |
| 370 // If not on the current page being (potentially) lazily swept, |objectPoint er| | |
| 371 // is an unmarked, sweepable object. | |
| 372 if (page != m_firstUnsweptPage) | |
| 373 return true; | |
| 374 | |
| 375 DCHECK(!page->isLargeObjectPage()); | |
| 376 // Check if the arena is currently being lazily swept. | |
| 377 NormalPage* normalPage = reinterpret_cast<NormalPage*>(page); | |
| 378 NormalPageArena* normalArena = normalPage->arenaForNormalPage(); | |
| 379 if (!normalArena->isLazySweeping()) | |
| 380 return true; | |
| 381 | |
| 382 // Rare special case: unmarked object is on the page being lazily swept, | |
| 383 // and a finalizer for an object on that page calls ThreadHeap::willObjectBe LazilySwept(). | |
| 384 // | |
| 385 // Need to determine if |objectPointer| represents a live (unmarked) object or an | |
| 386 // unmarked object that will be lazily swept later. As lazy page sweeping | |
| 387 // doesn't record a frontier pointer representing how far along it is, the | |
| 388 // page is scanned from the start, skipping past freed & unmarked regions. | |
| 389 // | |
| 390 // If no marked objects are encountered before |objectPointer|, we know | |
| 391 // that the finalizing object calling willObjectBeLazilySwept() comes later, | |
| 392 // and |objectPointer| has been deemed to be alive already (=> it won't be s wept.) | |
| 393 // | |
| 394 // If a marked object is encountered before |objectPointer|, it will | |
| 395 // not have been lazily swept past already. Hence it represents an unmarked, | |
| 396 // sweepable object. | |
| 397 // | |
| 398 // As willObjectBeLazilySwept() is used rarely and it happening to be | |
| 399 // used while runnning a finalizer on the page being lazily swept is | |
| 400 // even rarer, the page scan is considered acceptable and something | |
| 401 // really wanted -- willObjectBeLazilySwept()'s result can be trusted. | |
| 402 Address pageEnd = normalPage->payloadEnd(); | |
| 403 for (Address headerAddress = normalPage->payload(); headerAddress < pageEnd; ) { | |
|
haraken
2016/05/30 23:54:49
Instead of scanning the entire page, it would be b
sof
2016/05/31 05:13:23
You can make trade offs like that, i.e., take the
haraken
2016/05/31 05:26:38
Yeah, that makes sense. The reason I didn't want t
| |
| 404 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); | |
| 405 size_t size = header->size(); | |
| 406 // Scan made it to |objectPointer| without encountering any marked objec ts. | |
| 407 // => lazy sweep will have processed this unmarked, but live, object. | |
| 408 // => |objectPointer| will not be lazily swept. | |
| 409 // | |
| 410 // Notice that |objectPointer| might be pointer to a GarbageCollectedMix in, | |
| 411 // hence using fromPayload() to derive the HeapObjectHeader isn't possib le | |
| 412 // (and use its value to check if |headerAddress| is equal to it.) | |
| 413 if (headerAddress > objectPointer) | |
| 414 return false; | |
| 415 if (!header->isFree() && header->isMarked()) { | |
|
haraken
2016/05/30 23:54:49
Hmm, I don't fully understand why this check is do
sof
2016/05/31 05:13:23
That won't work as you could be scanning freelist
haraken
2016/05/31 05:26:38
Sorry, I meant:
for (/* scan the page */) {
if
sof
2016/05/31 05:32:25
No, we already know that objectPointer is unmarked
haraken
2016/05/31 05:41:16
OK. Do you need to check !header->isFree()? If hea
| |
| 416 // There must be a marked object on this page and the one located mu st | |
| 417 // have room after it for the unmarked |objectPointer| object. | |
| 418 DCHECK(headerAddress + size < pageEnd); | |
| 419 return true; | |
| 420 } | |
| 421 headerAddress += size; | |
| 422 } | |
| 423 NOTREACHED(); | |
| 424 return true; | |
| 425 } | |
| 426 | |
| 356 NormalPageArena::NormalPageArena(ThreadState* state, int index) | 427 NormalPageArena::NormalPageArena(ThreadState* state, int index) |
| 357 : BaseArena(state, index) | 428 : BaseArena(state, index) |
| 358 , m_currentAllocationPoint(nullptr) | 429 , m_currentAllocationPoint(nullptr) |
| 359 , m_remainingAllocationSize(0) | 430 , m_remainingAllocationSize(0) |
| 360 , m_lastRemainingAllocationSize(0) | 431 , m_lastRemainingAllocationSize(0) |
| 361 , m_promptlyFreedSize(0) | 432 , m_promptlyFreedSize(0) |
| 433 , m_isLazySweeping(false) | |
| 362 { | 434 { |
| 363 clearFreeLists(); | 435 clearFreeLists(); |
| 364 } | 436 } |
| 365 | 437 |
| 366 void NormalPageArena::clearFreeLists() | 438 void NormalPageArena::clearFreeLists() |
| 367 { | 439 { |
| 368 setAllocationPoint(nullptr, 0); | 440 setAllocationPoint(nullptr, 0); |
| 369 m_freeList.clear(); | 441 m_freeList.clear(); |
| 370 } | 442 } |
| 371 | 443 |
| (...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 614 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); | 686 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFromAddr ess(reinterpret_cast<Address>(header))); |
| 615 m_promptlyFreedSize += shrinkSize; | 687 m_promptlyFreedSize += shrinkSize; |
| 616 header->setSize(allocationSize); | 688 header->setSize(allocationSize); |
| 617 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); | 689 SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader)); |
| 618 return false; | 690 return false; |
| 619 } | 691 } |
| 620 | 692 |
| 621 Address NormalPageArena::lazySweepPages(size_t allocationSize, size_t gcInfoInde x) | 693 Address NormalPageArena::lazySweepPages(size_t allocationSize, size_t gcInfoInde x) |
| 622 { | 694 { |
| 623 ASSERT(!hasCurrentAllocationArea()); | 695 ASSERT(!hasCurrentAllocationArea()); |
| 696 TemporaryChange<bool> isLazySweeping(m_isLazySweeping, true); | |
| 624 Address result = nullptr; | 697 Address result = nullptr; |
| 625 while (m_firstUnsweptPage) { | 698 while (m_firstUnsweptPage) { |
| 626 BasePage* page = m_firstUnsweptPage; | 699 BasePage* page = m_firstUnsweptPage; |
| 627 if (page->isEmpty()) { | 700 if (page->isEmpty()) { |
| 628 page->unlink(&m_firstUnsweptPage); | 701 page->unlink(&m_firstUnsweptPage); |
| 629 page->removeFromHeap(); | 702 page->removeFromHeap(); |
| 630 } else { | 703 } else { |
| 631 // Sweep a page and move the page from m_firstUnsweptPages to | 704 // Sweep a page and move the page from m_firstUnsweptPages to |
| 632 // m_firstPages. | 705 // m_firstPages. |
| 633 page->sweep(); | 706 page->sweep(); |
| (...skipping 932 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1566 | 1639 |
| 1567 m_hasEntries = true; | 1640 m_hasEntries = true; |
| 1568 size_t index = hash(address); | 1641 size_t index = hash(address); |
| 1569 ASSERT(!(index & 1)); | 1642 ASSERT(!(index & 1)); |
| 1570 Address cachePage = roundToBlinkPageStart(address); | 1643 Address cachePage = roundToBlinkPageStart(address); |
| 1571 m_entries[index + 1] = m_entries[index]; | 1644 m_entries[index + 1] = m_entries[index]; |
| 1572 m_entries[index] = cachePage; | 1645 m_entries[index] = cachePage; |
| 1573 } | 1646 } |
| 1574 | 1647 |
| 1575 } // namespace blink | 1648 } // namespace blink |
| OLD | NEW |