| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
| 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
| 85 #else | 85 #else |
| 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
| 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
| 88 #endif | 88 #endif |
| 89 | 89 |
| 90 class CallbackStack; | 90 class CallbackStack; |
| 91 class PageMemory; | 91 class PageMemory; |
| 92 class ThreadHeapForHeapPage; |
| 92 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 93 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
| 93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; | 94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; |
| 94 | 95 |
| 95 #if ENABLE(GC_PROFILING) | 96 #if ENABLE(GC_PROFILING) |
| 96 class TracedValue; | 97 class TracedValue; |
| 97 #endif | 98 #endif |
| 98 | 99 |
| 99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
| 100 // | 101 // |
| 101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | | 102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | |
| (...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 323 inline Address roundToBlinkPageStart(Address address) | 324 inline Address roundToBlinkPageStart(Address address) |
| 324 { | 325 { |
| 325 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 326 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
| 326 } | 327 } |
| 327 | 328 |
| 328 inline Address roundToBlinkPageEnd(Address address) | 329 inline Address roundToBlinkPageEnd(Address address) |
| 329 { | 330 { |
| 330 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; | 331 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; |
| 331 } | 332 } |
| 332 | 333 |
| 333 // Compute the amount of padding we have to add to a header to make | |
| 334 // the size of the header plus the padding a multiple of 8 bytes. | |
| 335 inline size_t headerPadding() | |
| 336 { | |
| 337 return (allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranul
arity)) % allocationGranularity; | |
| 338 } | |
| 339 | |
| 340 // Masks an address down to the enclosing blink page base address. | 334 // Masks an address down to the enclosing blink page base address. |
| 341 inline Address blinkPageAddress(Address address) | 335 inline Address blinkPageAddress(Address address) |
| 342 { | 336 { |
| 343 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 337 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
| 344 } | 338 } |
| 345 | 339 |
| 346 #if ENABLE(ASSERT) | 340 #if ENABLE(ASSERT) |
| 347 | 341 |
| 348 // Sanity check for a page header address: the address of the page | 342 // Sanity check for a page header address: the address of the page |
| 349 // header should be OS page size away from being Blink page size | 343 // header should be OS page size away from being Blink page size |
| 350 // aligned. | 344 // aligned. |
| 351 inline bool isPageHeaderAddress(Address address) | 345 inline bool isPageHeaderAddress(Address address) |
| 352 { | 346 { |
| 353 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); | 347 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); |
| 354 } | 348 } |
| 355 #endif | 349 #endif |
| 356 | 350 |
| 357 // FIXME: Add a good comment about the heap layout once heap relayout work | 351 // FIXME: Add a good comment about the heap layout once heap relayout work |
| 358 // is done. | 352 // is done. |
| 359 class BaseHeapPage { | 353 class BaseHeapPage { |
| 360 public: | 354 public: |
| 361 BaseHeapPage(PageMemory*, ThreadHeap*); | 355 BaseHeapPage(PageMemory*, ThreadHeap*); |
| 362 virtual ~BaseHeapPage() { } | 356 virtual ~BaseHeapPage() { } |
| 363 | 357 |
| 358 void link(BaseHeapPage** previousNext) |
| 359 { |
| 360 m_next = *previousNext; |
| 361 *previousNext = this; |
| 362 } |
| 363 void unlink(BaseHeapPage** previousNext) |
| 364 { |
| 365 *previousNext = m_next; |
| 366 m_next = nullptr; |
| 367 } |
| 368 BaseHeapPage* next() const { return m_next; } |
| 369 |
| 364 // virtual methods are slow. So performance-sensitive methods | 370 // virtual methods are slow. So performance-sensitive methods |
| 365 // should be defined as non-virtual methods on HeapPage and LargeObject. | 371 // should be defined as non-virtual methods on HeapPage and LargeObject. |
| 366 // The following methods are not performance-sensitive. | 372 // The following methods are not performance-sensitive. |
| 367 virtual size_t objectPayloadSizeForTesting() = 0; | 373 virtual size_t objectPayloadSizeForTesting() = 0; |
| 368 virtual bool isEmpty() = 0; | 374 virtual bool isEmpty() = 0; |
| 369 virtual void removeFromHeap(ThreadHeap*) = 0; | 375 virtual void removeFromHeap() = 0; |
| 370 virtual void sweep() = 0; | 376 virtual void sweep() = 0; |
| 371 virtual void markUnmarkedObjectsDead() = 0; | 377 virtual void markUnmarkedObjectsDead() = 0; |
| 372 // Check if the given address points to an object in this | 378 // Check if the given address points to an object in this |
| 373 // heap page. If so, find the start of that object and mark it | 379 // heap page. If so, find the start of that object and mark it |
| 374 // using the given Visitor. Otherwise do nothing. The pointer must | 380 // using the given Visitor. Otherwise do nothing. The pointer must |
| 375 // be within the same aligned blinkPageSize as the this-pointer. | 381 // be within the same aligned blinkPageSize as the this-pointer. |
| 376 // | 382 // |
| 377 // This is used during conservative stack scanning to | 383 // This is used during conservative stack scanning to |
| 378 // conservatively mark all objects that could be referenced from | 384 // conservatively mark all objects that could be referenced from |
| 379 // the stack. | 385 // the stack. |
| (...skipping 27 matching lines...) Expand all Loading... |
| 407 | 413 |
| 408 void markAsUnswept() | 414 void markAsUnswept() |
| 409 { | 415 { |
| 410 ASSERT(m_swept); | 416 ASSERT(m_swept); |
| 411 m_swept = false; | 417 m_swept = false; |
| 412 } | 418 } |
| 413 | 419 |
| 414 private: | 420 private: |
| 415 PageMemory* m_storage; | 421 PageMemory* m_storage; |
| 416 ThreadHeap* m_heap; | 422 ThreadHeap* m_heap; |
| 423 BaseHeapPage* m_next; |
| 417 // Whether the page is part of a terminating thread or not. | 424 // Whether the page is part of a terminating thread or not. |
| 418 bool m_terminating; | 425 bool m_terminating; |
| 419 | 426 |
| 420 // Track the sweeping state of a page. Set to true once | 427 // Track the sweeping state of a page. Set to true once |
| 421 // the lazy sweep completes has processed it. | 428 // the lazy sweep completes has processed it. |
| 422 // | 429 // |
| 423 // Set to false at the start of a sweep, true upon completion | 430 // Set to false at the start of a sweep, true upon completion |
| 424 // of lazy sweeping. | 431 // of lazy sweeping. |
| 425 bool m_swept; | 432 bool m_swept; |
| 433 friend class ThreadHeap; |
| 426 }; | 434 }; |
| 427 | 435 |
| 428 class HeapPage final : public BaseHeapPage { | 436 class HeapPage final : public BaseHeapPage { |
| 429 public: | 437 public: |
| 430 HeapPage(PageMemory*, ThreadHeap*); | 438 HeapPage(PageMemory*, ThreadHeap*); |
| 431 | 439 |
| 432 Address payload() | 440 Address payload() |
| 433 { | 441 { |
| 434 return address() + sizeof(HeapPage) + headerPadding(); | 442 return address() + sizeof(HeapPage) + headerPadding(); |
| 435 } | 443 } |
| 436 size_t payloadSize() | 444 size_t payloadSize() |
| 437 { | 445 { |
| 438 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; | 446 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; |
| 439 } | 447 } |
| 440 Address payloadEnd() { return payload() + payloadSize(); } | 448 Address payloadEnd() { return payload() + payloadSize(); } |
| 441 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 449 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 442 | 450 |
| 443 void link(HeapPage** previousNext) | |
| 444 { | |
| 445 m_next = *previousNext; | |
| 446 *previousNext = this; | |
| 447 } | |
| 448 | |
| 449 void unlink(HeapPage** previousNext) | |
| 450 { | |
| 451 *previousNext = m_next; | |
| 452 m_next = nullptr; | |
| 453 } | |
| 454 | |
| 455 virtual size_t objectPayloadSizeForTesting() override; | 451 virtual size_t objectPayloadSizeForTesting() override; |
| 456 virtual bool isEmpty() override; | 452 virtual bool isEmpty() override; |
| 457 virtual void removeFromHeap(ThreadHeap*) override; | 453 virtual void removeFromHeap() override; |
| 458 virtual void sweep() override; | 454 virtual void sweep() override; |
| 459 virtual void markUnmarkedObjectsDead() override; | 455 virtual void markUnmarkedObjectsDead() override; |
| 460 virtual void checkAndMarkPointer(Visitor*, Address) override; | 456 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 461 virtual void markOrphaned() override | 457 virtual void markOrphaned() override |
| 462 { | 458 { |
| 463 // Zap the payload with a recognizable value to detect any incorrect | 459 // Zap the payload with a recognizable value to detect any incorrect |
| 464 // cross thread pointer usage. | 460 // cross thread pointer usage. |
| 465 #if defined(ADDRESS_SANITIZER) | 461 #if defined(ADDRESS_SANITIZER) |
| 466 // This needs to zap poisoned memory as well. | 462 // This needs to zap poisoned memory as well. |
| 467 // Force unpoison memory before memset. | 463 // Force unpoison memory before memset. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 482 // for the header, and the unmapped guard page at the start. That ensures | 478 // for the header, and the unmapped guard page at the start. That ensures |
| 483 // the result can be used to populate the negative page cache. | 479 // the result can be used to populate the negative page cache. |
| 484 virtual bool contains(Address addr) override | 480 virtual bool contains(Address addr) override |
| 485 { | 481 { |
| 486 Address blinkPageStart = roundToBlinkPageStart(address()); | 482 Address blinkPageStart = roundToBlinkPageStart(address()); |
| 487 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 483 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
| 488 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 484 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
| 489 } | 485 } |
| 490 #endif | 486 #endif |
| 491 virtual size_t size() override { return blinkPageSize; } | 487 virtual size_t size() override { return blinkPageSize; } |
| 488 // Compute the amount of padding we have to add to a header to make |
| 489 // the size of the header plus the padding a multiple of 8 bytes. |
| 490 static size_t headerPadding() |
| 491 { |
| 492 return (sizeof(HeapPage) + allocationGranularity - (sizeof(HeapObjectHea
der) % allocationGranularity)) % allocationGranularity; |
| 493 } |
| 492 | 494 |
| 493 HeapPage* next() { return m_next; } | |
| 494 | 495 |
| 496 ThreadHeapForHeapPage* heapForHeapPage(); |
| 495 void clearObjectStartBitMap(); | 497 void clearObjectStartBitMap(); |
| 496 | 498 |
| 497 #if defined(ADDRESS_SANITIZER) | 499 #if defined(ADDRESS_SANITIZER) |
| 498 void poisonUnmarkedObjects(); | 500 void poisonUnmarkedObjects(); |
| 499 #endif | 501 #endif |
| 500 | 502 |
| 501 // This method is needed just to avoid compilers from removing m_padding. | |
| 502 uint64_t unusedMethod() const { return m_padding; } | |
| 503 | |
| 504 private: | 503 private: |
| 505 HeapObjectHeader* findHeaderFromAddress(Address); | 504 HeapObjectHeader* findHeaderFromAddress(Address); |
| 506 void populateObjectStartBitMap(); | 505 void populateObjectStartBitMap(); |
| 507 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 506 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
| 508 | 507 |
| 509 HeapPage* m_next; | |
| 510 bool m_objectStartBitMapComputed; | 508 bool m_objectStartBitMapComputed; |
| 511 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 509 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 512 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | |
| 513 | |
| 514 friend class ThreadHeap; | |
| 515 }; | 510 }; |
| 516 | 511 |
| 517 // Large allocations are allocated as separate objects and linked in a list. | 512 // Large allocations are allocated as separate objects and linked in a list. |
| 518 // | 513 // |
| 519 // In order to use the same memory allocation routines for everything allocated | 514 // In order to use the same memory allocation routines for everything allocated |
| 520 // in the heap, large objects are considered heap pages containing only one | 515 // in the heap, large objects are considered heap pages containing only one |
| 521 // object. | 516 // object. |
| 522 class LargeObject final : public BaseHeapPage { | 517 class LargeObject final : public BaseHeapPage { |
| 523 public: | 518 public: |
| 524 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) | 519 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) |
| 525 : BaseHeapPage(storage, heap) | 520 : BaseHeapPage(storage, heap) |
| 526 , m_payloadSize(payloadSize) | 521 , m_payloadSize(payloadSize) |
| 527 { | 522 { |
| 528 } | 523 } |
| 529 | 524 |
| 530 Address payload() { return heapObjectHeader()->payload(); } | 525 Address payload() { return heapObjectHeader()->payload(); } |
| 531 size_t payloadSize() { return m_payloadSize; } | 526 size_t payloadSize() { return m_payloadSize; } |
| 532 Address payloadEnd() { return payload() + payloadSize(); } | 527 Address payloadEnd() { return payload() + payloadSize(); } |
| 533 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 528 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
| 534 | 529 |
| 535 virtual size_t objectPayloadSizeForTesting() override; | 530 virtual size_t objectPayloadSizeForTesting() override; |
| 536 virtual bool isEmpty() override; | 531 virtual bool isEmpty() override; |
| 537 virtual void removeFromHeap(ThreadHeap*) override; | 532 virtual void removeFromHeap() override; |
| 538 virtual void sweep() override; | 533 virtual void sweep() override; |
| 539 virtual void markUnmarkedObjectsDead() override; | 534 virtual void markUnmarkedObjectsDead() override; |
| 540 virtual void checkAndMarkPointer(Visitor*, Address) override; | 535 virtual void checkAndMarkPointer(Visitor*, Address) override; |
| 541 virtual void markOrphaned() override | 536 virtual void markOrphaned() override |
| 542 { | 537 { |
| 543 // Zap the payload with a recognizable value to detect any incorrect | 538 // Zap the payload with a recognizable value to detect any incorrect |
| 544 // cross thread pointer usage. | 539 // cross thread pointer usage. |
| 545 memset(payload(), orphanedZapValue, payloadSize()); | 540 memset(payload(), orphanedZapValue, payloadSize()); |
| 546 BaseHeapPage::markOrphaned(); | 541 BaseHeapPage::markOrphaned(); |
| 547 } | 542 } |
| (...skipping 12 matching lines...) Expand all Loading... |
| 560 // populate the negative page cache. | 555 // populate the negative page cache. |
| 561 virtual bool contains(Address object) override | 556 virtual bool contains(Address object) override |
| 562 { | 557 { |
| 563 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); | 558 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
| 564 } | 559 } |
| 565 #endif | 560 #endif |
| 566 virtual size_t size() | 561 virtual size_t size() |
| 567 { | 562 { |
| 568 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; | 563 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; |
| 569 } | 564 } |
| 565 // Compute the amount of padding we have to add to a header to make |
| 566 // the size of the header plus the padding a multiple of 8 bytes. |
| 567 static size_t headerPadding() |
| 568 { |
| 569 return (sizeof(LargeObject) + allocationGranularity - (sizeof(HeapObject
Header) % allocationGranularity)) % allocationGranularity; |
| 570 } |
| 570 virtual bool isLargeObject() override { return true; } | 571 virtual bool isLargeObject() override { return true; } |
| 571 | 572 |
| 572 void link(LargeObject** previousNext) | |
| 573 { | |
| 574 m_next = *previousNext; | |
| 575 *previousNext = this; | |
| 576 } | |
| 577 | |
| 578 void unlink(LargeObject** previousNext) | |
| 579 { | |
| 580 *previousNext = m_next; | |
| 581 m_next = nullptr; | |
| 582 } | |
| 583 | |
| 584 LargeObject* next() | |
| 585 { | |
| 586 return m_next; | |
| 587 } | |
| 588 | |
| 589 HeapObjectHeader* heapObjectHeader() | 573 HeapObjectHeader* heapObjectHeader() |
| 590 { | 574 { |
| 591 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); | 575 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); |
| 592 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | 576 return reinterpret_cast<HeapObjectHeader*>(headerAddress); |
| 593 } | 577 } |
| 594 | 578 |
| 595 // This method is needed just to avoid compilers from removing m_padding. | 579 private: |
| 596 uint64_t unusedMethod() const { return m_padding; } | |
| 597 | 580 |
| 598 private: | |
| 599 friend class ThreadHeap; | |
| 600 LargeObject* m_next; | |
| 601 size_t m_payloadSize; | 581 size_t m_payloadSize; |
| 602 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. | |
| 603 }; | 582 }; |
| 604 | 583 |
| 605 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | 584 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary |
| 606 // pointer-sized word, and determining whether it cannot be interpreted as a | 585 // pointer-sized word, and determining whether it cannot be interpreted as a |
| 607 // pointer to an area that is managed by the garbage collected Blink heap. This | 586 // pointer to an area that is managed by the garbage collected Blink heap. This |
| 608 // is a cache of 'pages' that have previously been determined to be wholly | 587 // is a cache of 'pages' that have previously been determined to be wholly |
| 609 // outside of the heap. The size of these pages must be smaller than the | 588 // outside of the heap. The size of these pages must be smaller than the |
| 610 // allocation alignment of the heap pages. We determine off-heap-ness by | 589 // allocation alignment of the heap pages. We determine off-heap-ness by |
| 611 // rounding down the pointer to the nearest page and looking up the page in the | 590 // rounding down the pointer to the nearest page and looking up the page in the |
| 612 // cache. If there is a miss in the cache we can determine the status of the | 591 // cache. If there is a miss in the cache we can determine the status of the |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 721 | 700 |
| 722 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; | 701 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; |
| 723 #endif | 702 #endif |
| 724 | 703 |
| 725 private: | 704 private: |
| 726 int m_biggestFreeListIndex; | 705 int m_biggestFreeListIndex; |
| 727 | 706 |
| 728 // All FreeListEntries in the nth list have size >= 2^n. | 707 // All FreeListEntries in the nth list have size >= 2^n. |
| 729 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 708 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
| 730 | 709 |
| 731 friend class ThreadHeap; | 710 friend class ThreadHeapForHeapPage; |
| 732 }; | 711 }; |
| 733 | 712 |
| 734 // Thread heaps represent a part of the per-thread Blink heap. | 713 // Thread heaps represent a part of the per-thread Blink heap. |
| 735 // | 714 // |
| 736 // Each Blink thread has a number of thread heaps: one general heap | 715 // Each Blink thread has a number of thread heaps: one general heap |
| 737 // that contains any type of object and a number of heaps specialized | 716 // that contains any type of object and a number of heaps specialized |
| 738 // for specific object types (such as Node). | 717 // for specific object types (such as Node). |
| 739 // | 718 // |
| 740 // Each thread heap contains the functionality to allocate new objects | 719 // Each thread heap contains the functionality to allocate new objects |
| 741 // (potentially adding new pages to the heap), to find and mark | 720 // (potentially adding new pages to the heap), to find and mark |
| 742 // objects during conservative stack scanning and to sweep the set of | 721 // objects during conservative stack scanning and to sweep the set of |
| 743 // pages after a GC. | 722 // pages after a GC. |
| 744 class PLATFORM_EXPORT ThreadHeap final { | 723 class PLATFORM_EXPORT ThreadHeap { |
| 745 public: | 724 public: |
| 746 ThreadHeap(ThreadState*, int); | 725 ThreadHeap(ThreadState*, int); |
| 747 ~ThreadHeap(); | 726 virtual ~ThreadHeap(); |
| 748 void cleanupPages(); | 727 void cleanupPages(); |
| 749 | 728 |
| 750 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 729 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 751 BaseHeapPage* findPageFromAddress(Address); | 730 BaseHeapPage* findPageFromAddress(Address); |
| 752 #endif | 731 #endif |
| 753 #if ENABLE(GC_PROFILING) | 732 #if ENABLE(GC_PROFILING) |
| 754 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 733 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
| 755 void incrementMarkedObjectsAge(); | 734 void incrementMarkedObjectsAge(); |
| 756 #endif | 735 #endif |
| 757 | 736 |
| 758 void clearFreeLists(); | 737 virtual void clearFreeLists() { } |
| 759 void makeConsistentForSweeping(); | 738 void makeConsistentForSweeping(); |
| 760 #if ENABLE(ASSERT) | 739 #if ENABLE(ASSERT) |
| 761 bool isConsistentForSweeping(); | 740 virtual bool isConsistentForSweeping() = 0; |
| 762 #endif | 741 #endif |
| 763 size_t objectPayloadSizeForTesting(); | 742 size_t objectPayloadSizeForTesting(); |
| 743 void prepareHeapForTermination(); |
| 744 void prepareForSweep(); |
| 745 Address lazySweep(size_t, size_t gcInfoIndex); |
| 746 void completeSweep(); |
| 764 | 747 |
| 765 ThreadState* threadState() { return m_threadState; } | 748 ThreadState* threadState() { return m_threadState; } |
| 749 int heapIndex() const { return m_index; } |
| 750 inline static size_t allocationSizeFromSize(size_t); |
| 751 inline static size_t roundedAllocationSize(size_t size) |
| 752 { |
| 753 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 754 } |
| 766 | 755 |
| 756 protected: |
| 757 BaseHeapPage* m_firstPage; |
| 758 BaseHeapPage* m_firstUnsweptPage; |
| 759 |
| 760 private: |
| 761 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; |
| 762 |
| 763 ThreadState* m_threadState; |
| 764 |
| 765 // Index into the page pools. This is used to ensure that the pages of the |
| 766 // same type go into the correct page pool and thus avoid type confusion. |
| 767 int m_index; |
| 768 }; |
| 769 |
| 770 class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap { |
| 771 public: |
| 772 ThreadHeapForHeapPage(ThreadState*, int); |
| 767 void addToFreeList(Address address, size_t size) | 773 void addToFreeList(Address address, size_t size) |
| 768 { | 774 { |
| 769 ASSERT(findPageFromAddress(address)); | 775 ASSERT(findPageFromAddress(address)); |
| 770 ASSERT(findPageFromAddress(address + size - 1)); | 776 ASSERT(findPageFromAddress(address + size - 1)); |
| 771 m_freeList.addToFreeList(address, size); | 777 m_freeList.addToFreeList(address, size); |
| 772 } | 778 } |
| 779 virtual void clearFreeLists() override; |
| 780 #if ENABLE(ASSERT) |
| 781 virtual bool isConsistentForSweeping() override; |
| 782 bool pagesToBeSweptContains(Address); |
| 783 #endif |
| 773 | 784 |
| 774 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); | 785 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); |
| 775 inline static size_t roundedAllocationSize(size_t size) | 786 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 776 { | |
| 777 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | |
| 778 } | |
| 779 inline static size_t allocationSizeFromSize(size_t); | |
| 780 | |
| 781 void prepareHeapForTermination(); | |
| 782 void prepareForSweep(); | |
| 783 void completeSweep(); | |
| 784 | 787 |
| 785 void freePage(HeapPage*); | 788 void freePage(HeapPage*); |
| 786 void freeLargeObject(LargeObject*); | |
| 787 | 789 |
| 790 bool coalesce(); |
| 788 void promptlyFreeObject(HeapObjectHeader*); | 791 void promptlyFreeObject(HeapObjectHeader*); |
| 789 bool expandObject(HeapObjectHeader*, size_t); | 792 bool expandObject(HeapObjectHeader*, size_t); |
| 790 void shrinkObject(HeapObjectHeader*, size_t); | 793 void shrinkObject(HeapObjectHeader*, size_t); |
| 791 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 794 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
| 792 | 795 |
| 793 #if ENABLE(GC_PROFILING) | 796 #if ENABLE(GC_PROFILING) |
| 794 void snapshotFreeList(TracedValue&); | 797 void snapshotFreeList(TracedValue&); |
| 795 | 798 |
| 796 void countMarkedObjects(ClassAgeCountsMap&) const; | 799 void countMarkedObjects(ClassAgeCountsMap&) const; |
| 797 void countObjectsToSweep(ClassAgeCountsMap&) const; | 800 void countObjectsToSweep(ClassAgeCountsMap&) const; |
| 798 #endif | 801 #endif |
| 799 | 802 |
| 800 private: | 803 private: |
| 804 void allocatePage(); |
| 805 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 801 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 806 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
| 802 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 807 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
| 803 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 808 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
| 804 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | 809 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } |
| 805 inline void setAllocationPoint(Address, size_t); | 810 inline void setAllocationPoint(Address, size_t); |
| 806 void updateRemainingAllocationSize(); | 811 void updateRemainingAllocationSize(); |
| 807 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 812 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 808 Address lazySweepPages(size_t, size_t gcInfoIndex); | |
| 809 bool lazySweepLargeObjects(size_t); | |
| 810 | 813 |
| 811 void allocatePage(); | 814 FreeList m_freeList; |
| 812 Address allocateLargeObject(size_t, size_t gcInfoIndex); | |
| 813 | |
| 814 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | |
| 815 | |
| 816 #if ENABLE(ASSERT) | |
| 817 bool pagesToBeSweptContains(Address); | |
| 818 #endif | |
| 819 | |
| 820 bool coalesce(); | |
| 821 void preparePagesForSweeping(); | |
| 822 | |
| 823 Address m_currentAllocationPoint; | 815 Address m_currentAllocationPoint; |
| 824 size_t m_remainingAllocationSize; | 816 size_t m_remainingAllocationSize; |
| 825 size_t m_lastRemainingAllocationSize; | 817 size_t m_lastRemainingAllocationSize; |
| 826 | 818 |
| 827 HeapPage* m_firstPage; | |
| 828 LargeObject* m_firstLargeObject; | |
| 829 HeapPage* m_firstUnsweptPage; | |
| 830 LargeObject* m_firstUnsweptLargeObject; | |
| 831 | |
| 832 ThreadState* m_threadState; | |
| 833 | |
| 834 FreeList m_freeList; | |
| 835 | |
| 836 // Index into the page pools. This is used to ensure that the pages of the | |
| 837 // same type go into the correct page pool and thus avoid type confusion. | |
| 838 int m_index; | |
| 839 | |
| 840 // The size of promptly freed objects in the heap. | 819 // The size of promptly freed objects in the heap. |
| 841 size_t m_promptlyFreedSize; | 820 size_t m_promptlyFreedSize; |
| 842 | 821 |
| 843 #if ENABLE(GC_PROFILING) | 822 #if ENABLE(GC_PROFILING) |
| 844 size_t m_cumulativeAllocationSize; | 823 size_t m_cumulativeAllocationSize; |
| 845 size_t m_allocationCount; | 824 size_t m_allocationCount; |
| 846 size_t m_inlineAllocationCount; | 825 size_t m_inlineAllocationCount; |
| 847 #endif | 826 #endif |
| 848 }; | 827 }; |
| 849 | 828 |
| 829 class ThreadHeapForLargeObject final : public ThreadHeap { |
| 830 public: |
| 831 ThreadHeapForLargeObject(ThreadState*, int); |
| 832 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 833 void freeLargeObject(LargeObject*); |
| 834 #if ENABLE(ASSERT) |
| 835 virtual bool isConsistentForSweeping() override { return true; } |
| 836 #endif |
| 837 private: |
| 838 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); |
| 839 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; |
| 840 }; |
| 841 |
| 850 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 842 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
| 851 // pages are aligned at blinkPageBase plus an OS page size. | 843 // pages are aligned at blinkPageBase plus an OS page size. |
| 852 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 844 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
| 853 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | 845 // typed heaps. This is only exported to enable tests in HeapTest.cpp. |
| 854 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | 846 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) |
| 855 { | 847 { |
| 856 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 848 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
| 857 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); | 849 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); |
| 858 ASSERT(page->contains(address)); | 850 ASSERT(page->contains(address)); |
| 859 return page; | 851 return page; |
| (...skipping 495 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1355 // therefore has to happen before any calculation on the size. | 1347 // therefore has to happen before any calculation on the size. |
| 1356 RELEASE_ASSERT(size < maxHeapObjectSize); | 1348 RELEASE_ASSERT(size < maxHeapObjectSize); |
| 1357 | 1349 |
| 1358 // Add space for header. | 1350 // Add space for header. |
| 1359 size_t allocationSize = size + sizeof(HeapObjectHeader); | 1351 size_t allocationSize = size + sizeof(HeapObjectHeader); |
| 1360 // Align size with allocation granularity. | 1352 // Align size with allocation granularity. |
| 1361 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1353 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
| 1362 return allocationSize; | 1354 return allocationSize; |
| 1363 } | 1355 } |
| 1364 | 1356 |
| 1365 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) | 1357 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
foIndex) |
| 1366 { | 1358 { |
| 1367 #if ENABLE(GC_PROFILING) | 1359 #if ENABLE(GC_PROFILING) |
| 1368 m_cumulativeAllocationSize += allocationSize; | 1360 m_cumulativeAllocationSize += allocationSize; |
| 1369 ++m_allocationCount; | 1361 ++m_allocationCount; |
| 1370 #endif | 1362 #endif |
| 1371 | 1363 |
| 1372 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1364 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
| 1373 #if ENABLE(GC_PROFILING) | 1365 #if ENABLE(GC_PROFILING) |
| 1374 ++m_inlineAllocationCount; | 1366 ++m_inlineAllocationCount; |
| 1375 #endif | 1367 #endif |
| 1376 Address headerAddress = m_currentAllocationPoint; | 1368 Address headerAddress = m_currentAllocationPoint; |
| 1377 m_currentAllocationPoint += allocationSize; | 1369 m_currentAllocationPoint += allocationSize; |
| 1378 m_remainingAllocationSize -= allocationSize; | 1370 m_remainingAllocationSize -= allocationSize; |
| 1379 ASSERT(gcInfoIndex > 0); | 1371 ASSERT(gcInfoIndex > 0); |
| 1380 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | 1372 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); |
| 1381 Address result = headerAddress + sizeof(HeapObjectHeader); | 1373 Address result = headerAddress + sizeof(HeapObjectHeader); |
| 1382 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1374 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
| 1383 | 1375 |
| 1384 // Unpoison the memory used for the object (payload). | 1376 // Unpoison the memory used for the object (payload). |
| 1385 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1377 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1386 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1378 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
| 1387 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 1379 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
| 1388 return result; | 1380 return result; |
| 1389 } | 1381 } |
| 1390 return outOfLineAllocate(allocationSize, gcInfoIndex); | 1382 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 1391 } | 1383 } |
| 1392 | 1384 |
| 1393 Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex) | 1385 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) |
| 1394 { | 1386 { |
| 1395 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1387 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
| 1396 } | 1388 } |
| 1397 | 1389 |
| 1398 template<typename T> | 1390 template<typename T> |
| 1399 struct HeapIndexTrait { | 1391 struct HeapIndexTrait { |
| 1400 static int index() { return GeneralHeap; }; | 1392 static int index() { return GeneralHeap; }; |
| 1401 }; | 1393 }; |
| 1402 | 1394 |
| 1403 // FIXME: The forward declaration is layering violation. | 1395 // FIXME: The forward declaration is layering violation. |
| 1404 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 1396 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
| 1405 class Type; \ | 1397 class Type; \ |
| 1406 template<> \ | 1398 template<> \ |
| 1407 struct HeapIndexTrait<class Type> { \ | 1399 struct HeapIndexTrait<class Type> { \ |
| 1408 static int index() { return Type##Heap; }; \ | 1400 static int index() { return Type##Heap; }; \ |
| 1409 }; | 1401 }; |
| 1410 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 1402 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
| 1411 #undef DEFINE_TYPED_HEAP_TRAIT | 1403 #undef DEFINE_TYPED_HEAP_TRAIT |
| 1412 | 1404 |
| 1413 template<typename T> | 1405 template<typename T> |
| 1414 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) | 1406 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) |
| 1415 { | 1407 { |
| 1416 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1408 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
| 1417 ASSERT(state->isAllocationAllowed()); | 1409 ASSERT(state->isAllocationAllowed()); |
| 1418 return state->heap(heapIndex)->allocate(size, gcInfoIndex); | 1410 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate
(size, gcInfoIndex); |
| 1419 } | 1411 } |
| 1420 | 1412 |
| 1421 template<typename T> | 1413 template<typename T> |
| 1422 Address Heap::allocate(size_t size) | 1414 Address Heap::allocate(size_t size) |
| 1423 { | 1415 { |
| 1424 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); | 1416 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); |
| 1425 } | 1417 } |
| 1426 | 1418 |
| 1427 template<typename T> | 1419 template<typename T> |
| 1428 Address Heap::reallocate(void* previous, size_t size) | 1420 Address Heap::reallocate(void* previous, size_t size) |
| (...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2462 template<typename T, size_t inlineCapacity> | 2454 template<typename T, size_t inlineCapacity> |
| 2463 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2455 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
| 2464 template<typename T, size_t inlineCapacity> | 2456 template<typename T, size_t inlineCapacity> |
| 2465 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2457 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
| 2466 template<typename T, typename U, typename V> | 2458 template<typename T, typename U, typename V> |
| 2467 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2459 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
| 2468 | 2460 |
| 2469 } // namespace blink | 2461 } // namespace blink |
| 2470 | 2462 |
| 2471 #endif // Heap_h | 2463 #endif // Heap_h |
| OLD | NEW |