OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 82 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 83 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 84 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
85 #else | 85 #else |
86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 86 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 87 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
88 #endif | 88 #endif |
89 | 89 |
90 class CallbackStack; | 90 class CallbackStack; |
91 class PageMemory; | 91 class PageMemory; |
92 class ThreadHeapForHeapPage; | |
93 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 92 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
94 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; | 93 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr
ait<T>::Affinity>> class Persistent; |
95 | 94 |
96 #if ENABLE(GC_PROFILING) | 95 #if ENABLE(GC_PROFILING) |
97 class TracedValue; | 96 class TracedValue; |
98 #endif | 97 #endif |
99 | 98 |
100 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 99 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
101 // | 100 // |
102 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | | 101 // | gcInfoIndex (15 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit)
| mark bit (1 bit) | |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
324 inline Address roundToBlinkPageStart(Address address) | 323 inline Address roundToBlinkPageStart(Address address) |
325 { | 324 { |
326 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 325 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
327 } | 326 } |
328 | 327 |
329 inline Address roundToBlinkPageEnd(Address address) | 328 inline Address roundToBlinkPageEnd(Address address) |
330 { | 329 { |
331 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; | 330 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; |
332 } | 331 } |
333 | 332 |
| 333 // Compute the amount of padding we have to add to a header to make |
| 334 // the size of the header plus the padding a multiple of 8 bytes. |
| 335 inline size_t headerPadding() |
| 336 { |
| 337 return (allocationGranularity - (sizeof(HeapObjectHeader) % allocationGranul
arity)) % allocationGranularity; |
| 338 } |
| 339 |
334 // Masks an address down to the enclosing blink page base address. | 340 // Masks an address down to the enclosing blink page base address. |
335 inline Address blinkPageAddress(Address address) | 341 inline Address blinkPageAddress(Address address) |
336 { | 342 { |
337 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 343 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
338 } | 344 } |
339 | 345 |
340 #if ENABLE(ASSERT) | 346 #if ENABLE(ASSERT) |
341 | 347 |
342 // Sanity check for a page header address: the address of the page | 348 // Sanity check for a page header address: the address of the page |
343 // header should be OS page size away from being Blink page size | 349 // header should be OS page size away from being Blink page size |
344 // aligned. | 350 // aligned. |
345 inline bool isPageHeaderAddress(Address address) | 351 inline bool isPageHeaderAddress(Address address) |
346 { | 352 { |
347 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); | 353 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); |
348 } | 354 } |
349 #endif | 355 #endif |
350 | 356 |
351 // FIXME: Add a good comment about the heap layout once heap relayout work | 357 // FIXME: Add a good comment about the heap layout once heap relayout work |
352 // is done. | 358 // is done. |
353 class BaseHeapPage { | 359 class BaseHeapPage { |
354 public: | 360 public: |
355 BaseHeapPage(PageMemory*, ThreadHeap*); | 361 BaseHeapPage(PageMemory*, ThreadHeap*); |
356 virtual ~BaseHeapPage() { } | 362 virtual ~BaseHeapPage() { } |
357 | 363 |
358 void link(BaseHeapPage** previousNext) | |
359 { | |
360 m_next = *previousNext; | |
361 *previousNext = this; | |
362 } | |
363 void unlink(BaseHeapPage** previousNext) | |
364 { | |
365 *previousNext = m_next; | |
366 m_next = nullptr; | |
367 } | |
368 BaseHeapPage* next() const { return m_next; } | |
369 | |
370 // virtual methods are slow. So performance-sensitive methods | 364 // virtual methods are slow. So performance-sensitive methods |
371 // should be defined as non-virtual methods on HeapPage and LargeObject. | 365 // should be defined as non-virtual methods on HeapPage and LargeObject. |
372 // The following methods are not performance-sensitive. | 366 // The following methods are not performance-sensitive. |
373 virtual size_t objectPayloadSizeForTesting() = 0; | 367 virtual size_t objectPayloadSizeForTesting() = 0; |
374 virtual bool isEmpty() = 0; | 368 virtual bool isEmpty() = 0; |
375 virtual void removeFromHeap() = 0; | 369 virtual void removeFromHeap(ThreadHeap*) = 0; |
376 virtual void sweep() = 0; | 370 virtual void sweep() = 0; |
377 virtual void markUnmarkedObjectsDead() = 0; | 371 virtual void markUnmarkedObjectsDead() = 0; |
378 // Check if the given address points to an object in this | 372 // Check if the given address points to an object in this |
379 // heap page. If so, find the start of that object and mark it | 373 // heap page. If so, find the start of that object and mark it |
380 // using the given Visitor. Otherwise do nothing. The pointer must | 374 // using the given Visitor. Otherwise do nothing. The pointer must |
381 // be within the same aligned blinkPageSize as the this-pointer. | 375 // be within the same aligned blinkPageSize as the this-pointer. |
382 // | 376 // |
383 // This is used during conservative stack scanning to | 377 // This is used during conservative stack scanning to |
384 // conservatively mark all objects that could be referenced from | 378 // conservatively mark all objects that could be referenced from |
385 // the stack. | 379 // the stack. |
(...skipping 27 matching lines...) Expand all Loading... |
413 | 407 |
414 void markAsUnswept() | 408 void markAsUnswept() |
415 { | 409 { |
416 ASSERT(m_swept); | 410 ASSERT(m_swept); |
417 m_swept = false; | 411 m_swept = false; |
418 } | 412 } |
419 | 413 |
420 private: | 414 private: |
421 PageMemory* m_storage; | 415 PageMemory* m_storage; |
422 ThreadHeap* m_heap; | 416 ThreadHeap* m_heap; |
423 BaseHeapPage* m_next; | |
424 // Whether the page is part of a terminating thread or not. | 417 // Whether the page is part of a terminating thread or not. |
425 bool m_terminating; | 418 bool m_terminating; |
426 | 419 |
427 // Track the sweeping state of a page. Set to true once | 420 // Track the sweeping state of a page. Set to true once |
428 // the lazy sweep completes has processed it. | 421 // the lazy sweep completes has processed it. |
429 // | 422 // |
430 // Set to false at the start of a sweep, true upon completion | 423 // Set to false at the start of a sweep, true upon completion |
431 // of lazy sweeping. | 424 // of lazy sweeping. |
432 bool m_swept; | 425 bool m_swept; |
433 friend class ThreadHeap; | |
434 }; | 426 }; |
435 | 427 |
436 class HeapPage final : public BaseHeapPage { | 428 class HeapPage final : public BaseHeapPage { |
437 public: | 429 public: |
438 HeapPage(PageMemory*, ThreadHeap*); | 430 HeapPage(PageMemory*, ThreadHeap*); |
439 | 431 |
440 Address payload() | 432 Address payload() |
441 { | 433 { |
442 return address() + sizeof(HeapPage) + headerPadding(); | 434 return address() + sizeof(HeapPage) + headerPadding(); |
443 } | 435 } |
444 size_t payloadSize() | 436 size_t payloadSize() |
445 { | 437 { |
446 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; | 438 return (blinkPagePayloadSize() - sizeof(HeapPage) - headerPadding()) & ~
allocationMask; |
447 } | 439 } |
448 Address payloadEnd() { return payload() + payloadSize(); } | 440 Address payloadEnd() { return payload() + payloadSize(); } |
449 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 441 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
450 | 442 |
| 443 void link(HeapPage** previousNext) |
| 444 { |
| 445 m_next = *previousNext; |
| 446 *previousNext = this; |
| 447 } |
| 448 |
| 449 void unlink(HeapPage** previousNext) |
| 450 { |
| 451 *previousNext = m_next; |
| 452 m_next = nullptr; |
| 453 } |
| 454 |
451 virtual size_t objectPayloadSizeForTesting() override; | 455 virtual size_t objectPayloadSizeForTesting() override; |
452 virtual bool isEmpty() override; | 456 virtual bool isEmpty() override; |
453 virtual void removeFromHeap() override; | 457 virtual void removeFromHeap(ThreadHeap*) override; |
454 virtual void sweep() override; | 458 virtual void sweep() override; |
455 virtual void markUnmarkedObjectsDead() override; | 459 virtual void markUnmarkedObjectsDead() override; |
456 virtual void checkAndMarkPointer(Visitor*, Address) override; | 460 virtual void checkAndMarkPointer(Visitor*, Address) override; |
457 virtual void markOrphaned() override | 461 virtual void markOrphaned() override |
458 { | 462 { |
459 // Zap the payload with a recognizable value to detect any incorrect | 463 // Zap the payload with a recognizable value to detect any incorrect |
460 // cross thread pointer usage. | 464 // cross thread pointer usage. |
461 #if defined(ADDRESS_SANITIZER) | 465 #if defined(ADDRESS_SANITIZER) |
462 // This needs to zap poisoned memory as well. | 466 // This needs to zap poisoned memory as well. |
463 // Force unpoison memory before memset. | 467 // Force unpoison memory before memset. |
(...skipping 14 matching lines...) Expand all Loading... |
478 // for the header, and the unmapped guard page at the start. That ensures | 482 // for the header, and the unmapped guard page at the start. That ensures |
479 // the result can be used to populate the negative page cache. | 483 // the result can be used to populate the negative page cache. |
480 virtual bool contains(Address addr) override | 484 virtual bool contains(Address addr) override |
481 { | 485 { |
482 Address blinkPageStart = roundToBlinkPageStart(address()); | 486 Address blinkPageStart = roundToBlinkPageStart(address()); |
483 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. | 487 ASSERT(blinkPageStart == address() - WTF::kSystemPageSize); // Page is a
t aligned address plus guard page size. |
484 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; | 488 return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize; |
485 } | 489 } |
486 #endif | 490 #endif |
487 virtual size_t size() override { return blinkPageSize; } | 491 virtual size_t size() override { return blinkPageSize; } |
488 // Compute the amount of padding we have to add to a header to make | |
489 // the size of the header plus the padding a multiple of 8 bytes. | |
490 static size_t headerPadding() | |
491 { | |
492 return (sizeof(HeapPage) + allocationGranularity - (sizeof(HeapObjectHea
der) % allocationGranularity)) % allocationGranularity; | |
493 } | |
494 | 492 |
| 493 HeapPage* next() { return m_next; } |
495 | 494 |
496 ThreadHeapForHeapPage* heapForHeapPage(); | |
497 void clearObjectStartBitMap(); | 495 void clearObjectStartBitMap(); |
498 | 496 |
499 #if defined(ADDRESS_SANITIZER) | 497 #if defined(ADDRESS_SANITIZER) |
500 void poisonUnmarkedObjects(); | 498 void poisonUnmarkedObjects(); |
501 #endif | 499 #endif |
502 | 500 |
| 501 // This method is needed just to avoid compilers from removing m_padding. |
| 502 uint64_t unusedMethod() const { return m_padding; } |
| 503 |
503 private: | 504 private: |
504 HeapObjectHeader* findHeaderFromAddress(Address); | 505 HeapObjectHeader* findHeaderFromAddress(Address); |
505 void populateObjectStartBitMap(); | 506 void populateObjectStartBitMap(); |
506 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } | 507 bool isObjectStartBitMapComputed() { return m_objectStartBitMapComputed; } |
507 | 508 |
| 509 HeapPage* m_next; |
508 bool m_objectStartBitMapComputed; | 510 bool m_objectStartBitMapComputed; |
509 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; | 511 uint8_t m_objectStartBitMap[reservedForObjectBitMap]; |
| 512 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
| 513 |
| 514 friend class ThreadHeap; |
510 }; | 515 }; |
511 | 516 |
512 // Large allocations are allocated as separate objects and linked in a list. | 517 // Large allocations are allocated as separate objects and linked in a list. |
513 // | 518 // |
514 // In order to use the same memory allocation routines for everything allocated | 519 // In order to use the same memory allocation routines for everything allocated |
515 // in the heap, large objects are considered heap pages containing only one | 520 // in the heap, large objects are considered heap pages containing only one |
516 // object. | 521 // object. |
517 class LargeObject final : public BaseHeapPage { | 522 class LargeObject final : public BaseHeapPage { |
518 public: | 523 public: |
519 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) | 524 LargeObject(PageMemory* storage, ThreadHeap* heap, size_t payloadSize) |
520 : BaseHeapPage(storage, heap) | 525 : BaseHeapPage(storage, heap) |
521 , m_payloadSize(payloadSize) | 526 , m_payloadSize(payloadSize) |
522 { | 527 { |
523 } | 528 } |
524 | 529 |
525 Address payload() { return heapObjectHeader()->payload(); } | 530 Address payload() { return heapObjectHeader()->payload(); } |
526 size_t payloadSize() { return m_payloadSize; } | 531 size_t payloadSize() { return m_payloadSize; } |
527 Address payloadEnd() { return payload() + payloadSize(); } | 532 Address payloadEnd() { return payload() + payloadSize(); } |
528 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } | 533 bool containedInObjectPayload(Address address) { return payload() <= address
&& address < payloadEnd(); } |
529 | 534 |
530 virtual size_t objectPayloadSizeForTesting() override; | 535 virtual size_t objectPayloadSizeForTesting() override; |
531 virtual bool isEmpty() override; | 536 virtual bool isEmpty() override; |
532 virtual void removeFromHeap() override; | 537 virtual void removeFromHeap(ThreadHeap*) override; |
533 virtual void sweep() override; | 538 virtual void sweep() override; |
534 virtual void markUnmarkedObjectsDead() override; | 539 virtual void markUnmarkedObjectsDead() override; |
535 virtual void checkAndMarkPointer(Visitor*, Address) override; | 540 virtual void checkAndMarkPointer(Visitor*, Address) override; |
536 virtual void markOrphaned() override | 541 virtual void markOrphaned() override |
537 { | 542 { |
538 // Zap the payload with a recognizable value to detect any incorrect | 543 // Zap the payload with a recognizable value to detect any incorrect |
539 // cross thread pointer usage. | 544 // cross thread pointer usage. |
540 memset(payload(), orphanedZapValue, payloadSize()); | 545 memset(payload(), orphanedZapValue, payloadSize()); |
541 BaseHeapPage::markOrphaned(); | 546 BaseHeapPage::markOrphaned(); |
542 } | 547 } |
(...skipping 12 matching lines...) Expand all Loading... |
555 // populate the negative page cache. | 560 // populate the negative page cache. |
556 virtual bool contains(Address object) override | 561 virtual bool contains(Address object) override |
557 { | 562 { |
558 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); | 563 return roundToBlinkPageStart(address()) <= object && object < roundToBli
nkPageEnd(address() + size()); |
559 } | 564 } |
560 #endif | 565 #endif |
561 virtual size_t size() | 566 virtual size_t size() |
562 { | 567 { |
563 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; | 568 return sizeof(LargeObject) + headerPadding() + sizeof(HeapObjectHeader)
+ m_payloadSize; |
564 } | 569 } |
565 // Compute the amount of padding we have to add to a header to make | 570 virtual bool isLargeObject() override { return true; } |
566 // the size of the header plus the padding a multiple of 8 bytes. | 571 |
567 static size_t headerPadding() | 572 void link(LargeObject** previousNext) |
568 { | 573 { |
569 return (sizeof(LargeObject) + allocationGranularity - (sizeof(HeapObject
Header) % allocationGranularity)) % allocationGranularity; | 574 m_next = *previousNext; |
| 575 *previousNext = this; |
570 } | 576 } |
571 virtual bool isLargeObject() override { return true; } | 577 |
| 578 void unlink(LargeObject** previousNext) |
| 579 { |
| 580 *previousNext = m_next; |
| 581 m_next = nullptr; |
| 582 } |
| 583 |
| 584 LargeObject* next() |
| 585 { |
| 586 return m_next; |
| 587 } |
572 | 588 |
573 HeapObjectHeader* heapObjectHeader() | 589 HeapObjectHeader* heapObjectHeader() |
574 { | 590 { |
575 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); | 591 Address headerAddress = address() + sizeof(LargeObject) + headerPadding(
); |
576 return reinterpret_cast<HeapObjectHeader*>(headerAddress); | 592 return reinterpret_cast<HeapObjectHeader*>(headerAddress); |
577 } | 593 } |
578 | 594 |
| 595 // This method is needed just to avoid compilers from removing m_padding. |
| 596 uint64_t unusedMethod() const { return m_padding; } |
| 597 |
579 private: | 598 private: |
580 | 599 friend class ThreadHeap; |
| 600 LargeObject* m_next; |
581 size_t m_payloadSize; | 601 size_t m_payloadSize; |
| 602 uint64_t m_padding; // Preserve 8-byte alignment on 32-bit systems. |
582 }; | 603 }; |
583 | 604 |
584 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary | 605 // A HeapDoesNotContainCache provides a fast way of taking an arbitrary |
585 // pointer-sized word, and determining whether it cannot be interpreted as a | 606 // pointer-sized word, and determining whether it cannot be interpreted as a |
586 // pointer to an area that is managed by the garbage collected Blink heap. This | 607 // pointer to an area that is managed by the garbage collected Blink heap. This |
587 // is a cache of 'pages' that have previously been determined to be wholly | 608 // is a cache of 'pages' that have previously been determined to be wholly |
588 // outside of the heap. The size of these pages must be smaller than the | 609 // outside of the heap. The size of these pages must be smaller than the |
589 // allocation alignment of the heap pages. We determine off-heap-ness by | 610 // allocation alignment of the heap pages. We determine off-heap-ness by |
590 // rounding down the pointer to the nearest page and looking up the page in the | 611 // rounding down the pointer to the nearest page and looking up the page in the |
591 // cache. If there is a miss in the cache we can determine the status of the | 612 // cache. If there is a miss in the cache we can determine the status of the |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
700 | 721 |
701 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; | 722 void getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& totalSiz
e) const; |
702 #endif | 723 #endif |
703 | 724 |
704 private: | 725 private: |
705 int m_biggestFreeListIndex; | 726 int m_biggestFreeListIndex; |
706 | 727 |
707 // All FreeListEntries in the nth list have size >= 2^n. | 728 // All FreeListEntries in the nth list have size >= 2^n. |
708 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 729 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
709 | 730 |
710 friend class ThreadHeapForHeapPage; | 731 friend class ThreadHeap; |
711 }; | 732 }; |
712 | 733 |
713 // Thread heaps represent a part of the per-thread Blink heap. | 734 // Thread heaps represent a part of the per-thread Blink heap. |
714 // | 735 // |
715 // Each Blink thread has a number of thread heaps: one general heap | 736 // Each Blink thread has a number of thread heaps: one general heap |
716 // that contains any type of object and a number of heaps specialized | 737 // that contains any type of object and a number of heaps specialized |
717 // for specific object types (such as Node). | 738 // for specific object types (such as Node). |
718 // | 739 // |
719 // Each thread heap contains the functionality to allocate new objects | 740 // Each thread heap contains the functionality to allocate new objects |
720 // (potentially adding new pages to the heap), to find and mark | 741 // (potentially adding new pages to the heap), to find and mark |
721 // objects during conservative stack scanning and to sweep the set of | 742 // objects during conservative stack scanning and to sweep the set of |
722 // pages after a GC. | 743 // pages after a GC. |
723 class PLATFORM_EXPORT ThreadHeap { | 744 class PLATFORM_EXPORT ThreadHeap final { |
724 public: | 745 public: |
725 ThreadHeap(ThreadState*, int); | 746 ThreadHeap(ThreadState*, int); |
726 virtual ~ThreadHeap(); | 747 ~ThreadHeap(); |
727 void cleanupPages(); | 748 void cleanupPages(); |
728 | 749 |
729 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 750 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
730 BaseHeapPage* findPageFromAddress(Address); | 751 BaseHeapPage* findPageFromAddress(Address); |
731 #endif | 752 #endif |
732 #if ENABLE(GC_PROFILING) | 753 #if ENABLE(GC_PROFILING) |
733 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 754 void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
734 void incrementMarkedObjectsAge(); | 755 void incrementMarkedObjectsAge(); |
735 #endif | 756 #endif |
736 | 757 |
737 virtual void clearFreeLists() { } | 758 void clearFreeLists(); |
738 void makeConsistentForSweeping(); | 759 void makeConsistentForSweeping(); |
739 #if ENABLE(ASSERT) | 760 #if ENABLE(ASSERT) |
740 virtual bool isConsistentForSweeping() = 0; | 761 bool isConsistentForSweeping(); |
741 #endif | 762 #endif |
742 size_t objectPayloadSizeForTesting(); | 763 size_t objectPayloadSizeForTesting(); |
743 void prepareHeapForTermination(); | |
744 void prepareForSweep(); | |
745 Address lazySweep(size_t, size_t gcInfoIndex); | |
746 void completeSweep(); | |
747 | 764 |
748 ThreadState* threadState() { return m_threadState; } | 765 ThreadState* threadState() { return m_threadState; } |
749 int heapIndex() const { return m_index; } | |
750 inline static size_t allocationSizeFromSize(size_t); | |
751 inline static size_t roundedAllocationSize(size_t size) | |
752 { | |
753 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); | |
754 } | |
755 | 766 |
756 protected: | |
757 BaseHeapPage* m_firstPage; | |
758 BaseHeapPage* m_firstUnsweptPage; | |
759 | |
760 private: | |
761 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) = 0; | |
762 | |
763 ThreadState* m_threadState; | |
764 | |
765 // Index into the page pools. This is used to ensure that the pages of the | |
766 // same type go into the correct page pool and thus avoid type confusion. | |
767 int m_index; | |
768 }; | |
769 | |
770 class PLATFORM_EXPORT ThreadHeapForHeapPage final : public ThreadHeap { | |
771 public: | |
772 ThreadHeapForHeapPage(ThreadState*, int); | |
773 void addToFreeList(Address address, size_t size) | 767 void addToFreeList(Address address, size_t size) |
774 { | 768 { |
775 ASSERT(findPageFromAddress(address)); | 769 ASSERT(findPageFromAddress(address)); |
776 ASSERT(findPageFromAddress(address + size - 1)); | 770 ASSERT(findPageFromAddress(address + size - 1)); |
777 m_freeList.addToFreeList(address, size); | 771 m_freeList.addToFreeList(address, size); |
778 } | 772 } |
779 virtual void clearFreeLists() override; | |
780 #if ENABLE(ASSERT) | |
781 virtual bool isConsistentForSweeping() override; | |
782 bool pagesToBeSweptContains(Address); | |
783 #endif | |
784 | 773 |
785 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); | 774 inline Address allocate(size_t payloadSize, size_t gcInfoIndex); |
786 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); | 775 inline static size_t roundedAllocationSize(size_t size) |
| 776 { |
| 777 return allocationSizeFromSize(size) - sizeof(HeapObjectHeader); |
| 778 } |
| 779 inline static size_t allocationSizeFromSize(size_t); |
| 780 |
| 781 void prepareHeapForTermination(); |
| 782 void prepareForSweep(); |
| 783 void completeSweep(); |
787 | 784 |
788 void freePage(HeapPage*); | 785 void freePage(HeapPage*); |
| 786 void freeLargeObject(LargeObject*); |
789 | 787 |
790 bool coalesce(); | |
791 void promptlyFreeObject(HeapObjectHeader*); | 788 void promptlyFreeObject(HeapObjectHeader*); |
792 bool expandObject(HeapObjectHeader*, size_t); | 789 bool expandObject(HeapObjectHeader*, size_t); |
793 void shrinkObject(HeapObjectHeader*, size_t); | 790 void shrinkObject(HeapObjectHeader*, size_t); |
794 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } | 791 void decreasePromptlyFreedSize(size_t size) { m_promptlyFreedSize -= size; } |
795 | 792 |
796 #if ENABLE(GC_PROFILING) | 793 #if ENABLE(GC_PROFILING) |
797 void snapshotFreeList(TracedValue&); | 794 void snapshotFreeList(TracedValue&); |
798 | 795 |
799 void countMarkedObjects(ClassAgeCountsMap&) const; | 796 void countMarkedObjects(ClassAgeCountsMap&) const; |
800 void countObjectsToSweep(ClassAgeCountsMap&) const; | 797 void countObjectsToSweep(ClassAgeCountsMap&) const; |
801 #endif | 798 #endif |
802 | 799 |
803 private: | 800 private: |
804 void allocatePage(); | |
805 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
806 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); | 801 Address outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex); |
807 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 802 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
808 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 803 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
809 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } | 804 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r
emainingAllocationSize(); } |
810 inline void setAllocationPoint(Address, size_t); | 805 inline void setAllocationPoint(Address, size_t); |
811 void updateRemainingAllocationSize(); | 806 void updateRemainingAllocationSize(); |
812 Address allocateFromFreeList(size_t, size_t gcInfoIndex); | 807 Address allocateFromFreeList(size_t, size_t gcInfoIndex); |
| 808 Address lazySweepPages(size_t, size_t gcInfoIndex); |
| 809 bool lazySweepLargeObjects(size_t); |
813 | 810 |
814 FreeList m_freeList; | 811 void allocatePage(); |
| 812 Address allocateLargeObject(size_t, size_t gcInfoIndex); |
| 813 |
| 814 inline Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
| 815 |
| 816 #if ENABLE(ASSERT) |
| 817 bool pagesToBeSweptContains(Address); |
| 818 #endif |
| 819 |
| 820 bool coalesce(); |
| 821 void preparePagesForSweeping(); |
| 822 |
815 Address m_currentAllocationPoint; | 823 Address m_currentAllocationPoint; |
816 size_t m_remainingAllocationSize; | 824 size_t m_remainingAllocationSize; |
817 size_t m_lastRemainingAllocationSize; | 825 size_t m_lastRemainingAllocationSize; |
818 | 826 |
| 827 HeapPage* m_firstPage; |
| 828 LargeObject* m_firstLargeObject; |
| 829 HeapPage* m_firstUnsweptPage; |
| 830 LargeObject* m_firstUnsweptLargeObject; |
| 831 |
| 832 ThreadState* m_threadState; |
| 833 |
| 834 FreeList m_freeList; |
| 835 |
| 836 // Index into the page pools. This is used to ensure that the pages of the |
| 837 // same type go into the correct page pool and thus avoid type confusion. |
| 838 int m_index; |
| 839 |
819 // The size of promptly freed objects in the heap. | 840 // The size of promptly freed objects in the heap. |
820 size_t m_promptlyFreedSize; | 841 size_t m_promptlyFreedSize; |
821 | 842 |
822 #if ENABLE(GC_PROFILING) | 843 #if ENABLE(GC_PROFILING) |
823 size_t m_cumulativeAllocationSize; | 844 size_t m_cumulativeAllocationSize; |
824 size_t m_allocationCount; | 845 size_t m_allocationCount; |
825 size_t m_inlineAllocationCount; | 846 size_t m_inlineAllocationCount; |
826 #endif | 847 #endif |
827 }; | 848 }; |
828 | 849 |
829 class ThreadHeapForLargeObject final : public ThreadHeap { | |
830 public: | |
831 ThreadHeapForLargeObject(ThreadState*, int); | |
832 Address allocateLargeObject(size_t, size_t gcInfoIndex); | |
833 void freeLargeObject(LargeObject*); | |
834 #if ENABLE(ASSERT) | |
835 virtual bool isConsistentForSweeping() override { return true; } | |
836 #endif | |
837 private: | |
838 Address doAllocateLargeObject(size_t, size_t gcInfoIndex); | |
839 virtual Address lazySweepPages(size_t, size_t gcInfoIndex) override; | |
840 }; | |
841 | |
842 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap | 850 // Mask an address down to the enclosing oilpan heap base page. All oilpan heap |
843 // pages are aligned at blinkPageBase plus an OS page size. | 851 // pages are aligned at blinkPageBase plus an OS page size. |
844 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our | 852 // FIXME: Remove PLATFORM_EXPORT once we get a proper public interface to our |
845 // typed heaps. This is only exported to enable tests in HeapTest.cpp. | 853 // typed heaps. This is only exported to enable tests in HeapTest.cpp. |
846 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) | 854 PLATFORM_EXPORT inline BaseHeapPage* pageFromObject(const void* object) |
847 { | 855 { |
848 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); | 856 Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
849 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); | 857 BaseHeapPage* page = reinterpret_cast<BaseHeapPage*>(blinkPageAddress(addres
s) + WTF::kSystemPageSize); |
850 ASSERT(page->contains(address)); | 858 ASSERT(page->contains(address)); |
851 return page; | 859 return page; |
(...skipping 495 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1347 // therefore has to happen before any calculation on the size. | 1355 // therefore has to happen before any calculation on the size. |
1348 RELEASE_ASSERT(size < maxHeapObjectSize); | 1356 RELEASE_ASSERT(size < maxHeapObjectSize); |
1349 | 1357 |
1350 // Add space for header. | 1358 // Add space for header. |
1351 size_t allocationSize = size + sizeof(HeapObjectHeader); | 1359 size_t allocationSize = size + sizeof(HeapObjectHeader); |
1352 // Align size with allocation granularity. | 1360 // Align size with allocation granularity. |
1353 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1361 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
1354 return allocationSize; | 1362 return allocationSize; |
1355 } | 1363 } |
1356 | 1364 |
1357 Address ThreadHeapForHeapPage::allocateObject(size_t allocationSize, size_t gcIn
foIndex) | 1365 Address ThreadHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) |
1358 { | 1366 { |
1359 #if ENABLE(GC_PROFILING) | 1367 #if ENABLE(GC_PROFILING) |
1360 m_cumulativeAllocationSize += allocationSize; | 1368 m_cumulativeAllocationSize += allocationSize; |
1361 ++m_allocationCount; | 1369 ++m_allocationCount; |
1362 #endif | 1370 #endif |
1363 | 1371 |
1364 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | 1372 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
1365 #if ENABLE(GC_PROFILING) | 1373 #if ENABLE(GC_PROFILING) |
1366 ++m_inlineAllocationCount; | 1374 ++m_inlineAllocationCount; |
1367 #endif | 1375 #endif |
1368 Address headerAddress = m_currentAllocationPoint; | 1376 Address headerAddress = m_currentAllocationPoint; |
1369 m_currentAllocationPoint += allocationSize; | 1377 m_currentAllocationPoint += allocationSize; |
1370 m_remainingAllocationSize -= allocationSize; | 1378 m_remainingAllocationSize -= allocationSize; |
1371 ASSERT(gcInfoIndex > 0); | 1379 ASSERT(gcInfoIndex > 0); |
1372 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); | 1380 new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoInde
x); |
1373 Address result = headerAddress + sizeof(HeapObjectHeader); | 1381 Address result = headerAddress + sizeof(HeapObjectHeader); |
1374 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1382 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1375 | 1383 |
1376 // Unpoison the memory used for the object (payload). | 1384 // Unpoison the memory used for the object (payload). |
1377 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1385 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
1378 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); | 1386 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(HeapObjectHe
ader)); |
1379 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); | 1387 ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
1380 return result; | 1388 return result; |
1381 } | 1389 } |
1382 return outOfLineAllocate(allocationSize, gcInfoIndex); | 1390 return outOfLineAllocate(allocationSize, gcInfoIndex); |
1383 } | 1391 } |
1384 | 1392 |
1385 Address ThreadHeapForHeapPage::allocate(size_t size, size_t gcInfoIndex) | 1393 Address ThreadHeap::allocate(size_t size, size_t gcInfoIndex) |
1386 { | 1394 { |
1387 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); | 1395 return allocateObject(allocationSizeFromSize(size), gcInfoIndex); |
1388 } | 1396 } |
1389 | 1397 |
1390 template<typename T> | 1398 template<typename T> |
1391 struct HeapIndexTrait { | 1399 struct HeapIndexTrait { |
1392 static int index() { return GeneralHeap; }; | 1400 static int index() { return GeneralHeap; }; |
1393 }; | 1401 }; |
1394 | 1402 |
1395 // FIXME: The forward declaration is layering violation. | 1403 // FIXME: The forward declaration is layering violation. |
1396 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ | 1404 #define DEFINE_TYPED_HEAP_TRAIT(Type) \ |
1397 class Type; \ | 1405 class Type; \ |
1398 template<> \ | 1406 template<> \ |
1399 struct HeapIndexTrait<class Type> { \ | 1407 struct HeapIndexTrait<class Type> { \ |
1400 static int index() { return Type##Heap; }; \ | 1408 static int index() { return Type##Heap; }; \ |
1401 }; | 1409 }; |
1402 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) | 1410 FOR_EACH_TYPED_HEAP(DEFINE_TYPED_HEAP_TRAIT) |
1403 #undef DEFINE_TYPED_HEAP_TRAIT | 1411 #undef DEFINE_TYPED_HEAP_TRAIT |
1404 | 1412 |
1405 template<typename T> | 1413 template<typename T> |
1406 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) | 1414 Address Heap::allocateOnHeapIndex(size_t size, int heapIndex, size_t gcInfoIndex
) |
1407 { | 1415 { |
1408 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1416 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1409 ASSERT(state->isAllocationAllowed()); | 1417 ASSERT(state->isAllocationAllowed()); |
1410 return static_cast<ThreadHeapForHeapPage*>(state->heap(heapIndex))->allocate
(size, gcInfoIndex); | 1418 return state->heap(heapIndex)->allocate(size, gcInfoIndex); |
1411 } | 1419 } |
1412 | 1420 |
1413 template<typename T> | 1421 template<typename T> |
1414 Address Heap::allocate(size_t size) | 1422 Address Heap::allocate(size_t size) |
1415 { | 1423 { |
1416 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); | 1424 return allocateOnHeapIndex<T>(size, HeapIndexTrait<T>::index(), GCInfoTrait<
T>::index()); |
1417 } | 1425 } |
1418 | 1426 |
1419 template<typename T> | 1427 template<typename T> |
1420 Address Heap::reallocate(void* previous, size_t size) | 1428 Address Heap::reallocate(void* previous, size_t size) |
(...skipping 1033 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2454 template<typename T, size_t inlineCapacity> | 2462 template<typename T, size_t inlineCapacity> |
2455 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; | 2463 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T,
inlineCapacity, HeapAllocator>> { }; |
2456 template<typename T, size_t inlineCapacity> | 2464 template<typename T, size_t inlineCapacity> |
2457 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; | 2465 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i
nlineCapacity, HeapAllocator>> { }; |
2458 template<typename T, typename U, typename V> | 2466 template<typename T, typename U, typename V> |
2459 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; | 2467 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted
Set<T, U, V, HeapAllocator>> { }; |
2460 | 2468 |
2461 } // namespace blink | 2469 } // namespace blink |
2462 | 2470 |
2463 #endif // Heap_h | 2471 #endif // Heap_h |
OLD | NEW |