| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 329 { | 329 { |
| 330 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; | 330 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address - 1) &
blinkPageBaseMask) + blinkPageSize; |
| 331 } | 331 } |
| 332 | 332 |
| 333 // Masks an address down to the enclosing blink page base address. | 333 // Masks an address down to the enclosing blink page base address. |
| 334 inline Address blinkPageAddress(Address address) | 334 inline Address blinkPageAddress(Address address) |
| 335 { | 335 { |
| 336 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); | 336 return reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address) & blin
kPageBaseMask); |
| 337 } | 337 } |
| 338 | 338 |
| 339 inline bool vTableInitialized(void* objectPointer) |
| 340 { |
| 341 return !!(*reinterpret_cast<Address*>(objectPointer)); |
| 342 } |
| 343 |
| 339 #if ENABLE(ASSERT) | 344 #if ENABLE(ASSERT) |
| 340 | |
| 341 // Sanity check for a page header address: the address of the page | 345 // Sanity check for a page header address: the address of the page |
| 342 // header should be OS page size away from being Blink page size | 346 // header should be OS page size away from being Blink page size |
| 343 // aligned. | 347 // aligned. |
| 344 inline bool isPageHeaderAddress(Address address) | 348 inline bool isPageHeaderAddress(Address address) |
| 345 { | 349 { |
| 346 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); | 350 return !((reinterpret_cast<uintptr_t>(address) & blinkPageOffsetMask) - WTF:
:kSystemPageSize); |
| 347 } | 351 } |
| 348 #endif | 352 #endif |
| 349 | 353 |
| 350 // BasePage is a base class for NormalPage and LargeObjectPage. | 354 // BasePage is a base class for NormalPage and LargeObjectPage. |
| (...skipping 1355 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1706 } | 1710 } |
| 1707 }; | 1711 }; |
| 1708 | 1712 |
| 1709 // Vector backing that needs marking. We don't support weak members in vectors. | 1713 // Vector backing that needs marking. We don't support weak members in vectors. |
| 1710 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Trai
ts> | 1714 template<ShouldWeakPointersBeMarkedStrongly strongify, typename T, typename Trai
ts> |
| 1711 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::Hea
pVectorBacking<T, Traits>, void> { | 1715 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::Hea
pVectorBacking<T, Traits>, void> { |
| 1712 template<typename VisitorDispatcher> | 1716 template<typename VisitorDispatcher> |
| 1713 static bool trace(VisitorDispatcher visitor, void* self) | 1717 static bool trace(VisitorDispatcher visitor, void* self) |
| 1714 { | 1718 { |
| 1715 // HeapVectorBacking does not know the exact size of the vector | 1719 // HeapVectorBacking does not know the exact size of the vector |
| 1716 // and thus cannot avoid tracing all slots in the backing. | 1720 // and just knows the capacity of the vector. Due to the constraint, |
| 1717 // This works correctly as long as unused slots are cleared out | 1721 // HeapVectorBacking can support only the following objects: |
| 1718 // (this is done by VectorUnusedSlotClearer) and T can be initialized | 1722 // |
| 1719 // with memset (if T can be initialized with memset, it is safe to | 1723 // - An object that has a vtable. In this case, HeapVectorBacking |
| 1720 // treat a zeroed object as a valid object). | 1724 // traces only slots that are not zeroed out. This is because if |
| 1721 static_assert(!ShouldBeTraced<Traits>::value || Traits::canInitializeWit
hMemset, "HeapVectorBacking doesn't support objects that cannot be initialized w
ith memset."); | 1725 // the object has a vtable, the zeroed slot means that it is |
| 1726 // an unused slot (Remember that the unused slots are guaranteed |
| 1727 // to be zeroed out by VectorUnusedSlotClearer). |
| 1728 // |
| 1729 // - An object that can be initialized with memset. In this case, |
| 1730 // HeapVectorBacking traces all slots including unused slots. |
| 1731 // This is fine because the fact that the object can be initialized |
| 1732 // with memset indicates that it is safe to treat the zerod slot |
| 1733 // as a valid object. |
| 1734 static_assert(!ShouldBeTraced<Traits>::value || Traits::canInitializeWit
hMemset || WTF::IsPolymorphic<T>::value, "HeapVectorBacking doesn't support obje
cts that cannot be initialized with memset."); |
| 1722 | 1735 |
| 1723 T* array = reinterpret_cast<T*>(self); | 1736 T* array = reinterpret_cast<T*>(self); |
| 1724 blink::HeapObjectHeader* header = blink::HeapObjectHeader::fromPayload(s
elf); | 1737 blink::HeapObjectHeader* header = blink::HeapObjectHeader::fromPayload(s
elf); |
| 1725 // Use the payload size as recorded by the heap to determine how many | 1738 // Use the payload size as recorded by the heap to determine how many |
| 1726 // elements to trace. | 1739 // elements to trace. |
| 1727 size_t length = header->payloadSize() / sizeof(T); | 1740 size_t length = header->payloadSize() / sizeof(T); |
| 1741 if (WTF::IsPolymorphic<T>::value) { |
| 1742 for (size_t i = 0; i < length; ++i) { |
| 1743 if (blink::vTableInitialized(&array[i])) |
| 1744 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::v
alue, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor
, array[i]); |
| 1745 } |
| 1746 } else { |
| 1728 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 1747 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
| 1729 // As commented above, HeapVectorBacking can trace unused slots | 1748 // As commented above, HeapVectorBacking can trace unused slots |
| 1730 // (which are already zeroed out). | 1749 // (which are already zeroed out). |
| 1731 ANNOTATE_CHANGE_SIZE(array, length, 0, length); | 1750 ANNOTATE_CHANGE_SIZE(array, length, 0, length); |
| 1732 #endif | 1751 #endif |
| 1733 for (size_t i = 0; i < length; ++i) | 1752 for (size_t i = 0; i < length; ++i) |
| 1734 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value, Tr
aits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, array[
i]); | 1753 blink::CollectionBackingTraceTrait<ShouldBeTraced<Traits>::value
, Traits::weakHandlingFlag, WeakPointersActStrong, T, Traits>::trace(visitor, ar
ray[i]); |
| 1754 } |
| 1735 return false; | 1755 return false; |
| 1736 } | 1756 } |
| 1737 }; | 1757 }; |
| 1738 | 1758 |
| 1739 // Almost all hash table backings are visited with this specialization. | 1759 // Almost all hash table backings are visited with this specialization. |
| 1740 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table> | 1760 template<ShouldWeakPointersBeMarkedStrongly strongify, typename Table> |
| 1741 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::Hea
pHashTableBacking<Table>, void> { | 1761 struct TraceInCollectionTrait<NoWeakHandlingInCollections, strongify, blink::Hea
pHashTableBacking<Table>, void> { |
| 1742 using Value = typename Table::ValueType; | 1762 using Value = typename Table::ValueType; |
| 1743 using Traits = typename Table::ValueTraits; | 1763 using Traits = typename Table::ValueTraits; |
| 1744 | 1764 |
| (...skipping 257 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2002 #if ENABLE(ASSERT) | 2022 #if ENABLE(ASSERT) |
| 2003 assertObjectHasGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing
>::index()); | 2023 assertObjectHasGCInfo(const_cast<Backing*>(backing), GCInfoTrait<Backing
>::index()); |
| 2004 #endif | 2024 #endif |
| 2005 } | 2025 } |
| 2006 }; | 2026 }; |
| 2007 | 2027 |
| 2008 template<typename T, typename Traits> | 2028 template<typename T, typename Traits> |
| 2009 void HeapVectorBacking<T, Traits>::finalize(void* pointer) | 2029 void HeapVectorBacking<T, Traits>::finalize(void* pointer) |
| 2010 { | 2030 { |
| 2011 static_assert(Traits::needsDestruction, "Only vector buffers with items requ
iring destruction should be finalized"); | 2031 static_assert(Traits::needsDestruction, "Only vector buffers with items requ
iring destruction should be finalized"); |
| 2012 // HeapVectorBacking does not know the exact size of the vector | 2032 // See the comment in HeapVectorBacking::trace. |
| 2013 // and thus cannot avoid calling finalizers for all slots in the backing. | 2033 static_assert(Traits::canInitializeWithMemset || WTF::IsPolymorphic<T>::valu
e, "HeapVectorBacking doesn't support objects that cannot be initialized with me
mset or don't have a vtable"); |
| 2014 // This works correctly as long as unused slots are cleared out | |
| 2015 // (this is done by VectorUnusedSlotClearer) and T can be initialized | |
| 2016 // with memset (if T can be initialized with memset, it is safe to | |
| 2017 // treat a zeroed object as a valid object). | |
| 2018 static_assert(Traits::canInitializeWithMemset, "HeapVectorBacking doesn't su
pport objects that cannot be initialized with memset."); | |
| 2019 | 2034 |
| 2020 ASSERT(!WTF::IsTriviallyDestructible<T>::value); | 2035 ASSERT(!WTF::IsTriviallyDestructible<T>::value); |
| 2021 HeapObjectHeader* header = HeapObjectHeader::fromPayload(pointer); | 2036 HeapObjectHeader* header = HeapObjectHeader::fromPayload(pointer); |
| 2022 // Use the payload size as recorded by the heap to determine how many | 2037 // Use the payload size as recorded by the heap to determine how many |
| 2023 // elements to finalize. | 2038 // elements to finalize. |
| 2024 size_t length = header->payloadSize() / sizeof(T); | 2039 size_t length = header->payloadSize() / sizeof(T); |
| 2025 T* buffer = reinterpret_cast<T*>(pointer); | 2040 T* buffer = reinterpret_cast<T*>(pointer); |
| 2026 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 2041 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
| 2027 // As commented above, HeapVectorBacking calls finalizers for unused slots | 2042 // As commented above, HeapVectorBacking calls finalizers for unused slots |
| 2028 // (which are already zeroed out). | 2043 // (which are already zeroed out). |
| 2029 ANNOTATE_CHANGE_SIZE(buffer, length, 0, length); | 2044 ANNOTATE_CHANGE_SIZE(buffer, length, 0, length); |
| 2030 #endif | 2045 #endif |
| 2031 for (unsigned i = 0; i < length; ++i) | 2046 if (WTF::IsPolymorphic<T>::value) { |
| 2032 buffer[i].~T(); | 2047 for (unsigned i = 0; i < length; ++i) { |
| 2048 if (blink::vTableInitialized(&buffer[i])) |
| 2049 buffer[i].~T(); |
| 2050 } |
| 2051 } else { |
| 2052 for (unsigned i = 0; i < length; ++i) { |
| 2053 buffer[i].~T(); |
| 2054 } |
| 2055 } |
| 2033 } | 2056 } |
| 2034 | 2057 |
| 2035 template<typename Table> | 2058 template<typename Table> |
| 2036 void HeapHashTableBacking<Table>::finalize(void* pointer) | 2059 void HeapHashTableBacking<Table>::finalize(void* pointer) |
| 2037 { | 2060 { |
| 2038 using Value = typename Table::ValueType; | 2061 using Value = typename Table::ValueType; |
| 2039 ASSERT(!WTF::IsTriviallyDestructible<Value>::value); | 2062 ASSERT(!WTF::IsTriviallyDestructible<Value>::value); |
| 2040 HeapObjectHeader* header = HeapObjectHeader::fromPayload(pointer); | 2063 HeapObjectHeader* header = HeapObjectHeader::fromPayload(pointer); |
| 2041 // Use the payload size as recorded by the heap to determine how many | 2064 // Use the payload size as recorded by the heap to determine how many |
| 2042 // elements to finalize. | 2065 // elements to finalize. |
| 2043 size_t length = header->payloadSize() / sizeof(Value); | 2066 size_t length = header->payloadSize() / sizeof(Value); |
| 2044 Value* table = reinterpret_cast<Value*>(pointer); | 2067 Value* table = reinterpret_cast<Value*>(pointer); |
| 2045 for (unsigned i = 0; i < length; ++i) { | 2068 for (unsigned i = 0; i < length; ++i) { |
| 2046 if (!Table::isEmptyOrDeletedBucket(table[i])) | 2069 if (!Table::isEmptyOrDeletedBucket(table[i])) |
| 2047 table[i].~Value(); | 2070 table[i].~Value(); |
| 2048 } | 2071 } |
| 2049 } | 2072 } |
| 2050 | 2073 |
| 2051 } // namespace blink | 2074 } // namespace blink |
| 2052 | 2075 |
| 2053 #endif // Heap_h | 2076 #endif // Heap_h |
| OLD | NEW |