Index: third_party/WebKit/Source/platform/heap/HeapPage.h |
diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.h b/third_party/WebKit/Source/platform/heap/HeapPage.h |
index 7dc892e57fb8ffac840fc1c919009ae1dde9b9d1..95151dbbb4f1dc94398f57f0bb8708e2a37308ee 100644 |
--- a/third_party/WebKit/Source/platform/heap/HeapPage.h |
+++ b/third_party/WebKit/Source/platform/heap/HeapPage.h |
@@ -116,12 +116,6 @@ const uint8_t reuseForbiddenZapValue = 0x2c; |
#define CHECK_MEMORY_INACCESSIBLE(address, size) do { } while (false) |
#endif |
-#if !ENABLE(ASSERT) && CPU(64BIT) |
-#define USE_4BYTE_HEADER_PADDING 1 |
-#else |
-#define USE_4BYTE_HEADER_PADDING 0 |
-#endif |
- |
class CallbackStack; |
class FreePagePool; |
class NormalPageHeap; |
@@ -130,9 +124,10 @@ class PageMemory; |
class PageMemoryRegion; |
class WebProcessMemoryDump; |
-// HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
+// HeapObjectHeader has two 4 byte (32 bit) members, and one of them has |
+// the following bit field layout: |
// |
-// | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) | |
+// | gcInfoIndex (14 bit) | DOM mark bit (1 bit) | size (14 bit) | dead bit (1 bit) | freed bit (1 bit) | mark bit (1 bit) |
// |
// - For non-large objects, 14 bit is enough for |size| because the blink |
// page size is 2^17 byte and each object is guaranteed to be aligned with |
@@ -162,17 +157,19 @@ const size_t largeObjectSizeInHeader = 0; |
const size_t gcInfoIndexForFreeListHeader = 0; |
const size_t nonLargeObjectPageSizeMax = 1 << 17; |
+const uint32_t gcGenerationUnchecked = 0; |
+const uint32_t gcGenerationForFreeListEntry = 1; |
+const uint32_t gcGenerationStart = 2; |
+ |
static_assert(nonLargeObjectPageSizeMax >= blinkPageSize, "max size supported by HeapObjectHeader must at least be blinkPageSize"); |
class PLATFORM_EXPORT HeapObjectHeader { |
public: |
// If gcInfoIndex is 0, this header is interpreted as a free list header. |
NO_SANITIZE_ADDRESS |
- HeapObjectHeader(size_t size, size_t gcInfoIndex) |
+ HeapObjectHeader(size_t size, size_t gcInfoIndex, uint32_t generation) |
+ : m_gcGeneration(generation) |
{ |
-#if ENABLE(ASSERT) |
- m_magic = magic; |
-#endif |
// sizeof(HeapObjectHeader) must be equal to or smaller than |
// allocationGranurarity, because HeapObjectHeader is used as a header |
// for an freed entry. Given that the smallest entry size is |
@@ -216,52 +213,30 @@ public: |
#if ENABLE(ASSERT) |
bool checkHeader() const; |
- // Zap magic number with a new magic number that means there was once an |
- // object allocated here, but it was freed because nobody marked it during |
- // GC. |
- void zapMagic(); |
#endif |
+ NO_SANITIZE_ADDRESS |
+ uint32_t gcGeneration() const { return m_gcGeneration; } |
void finalize(Address, size_t); |
static HeapObjectHeader* fromPayload(const void*); |
- static const uint16_t magic = 0xfff1; |
- static const uint16_t zappedMagic = 0x4321; |
- |
private: |
uint32_t m_encoded; |
-#if ENABLE(ASSERT) |
- uint16_t m_magic; |
-#endif |
- |
- // In 64 bit architectures, we intentionally add 4 byte padding immediately |
- // after the HeapHeaderObject. This is because: |
- // |
- // | HeapHeaderObject (4 byte) | padding (4 byte) | object payload (8 * n byte) | |
- // ^8 byte aligned ^8 byte aligned |
- // |
- // is better than: |
- // |
- // | HeapHeaderObject (4 byte) | object payload (8 * n byte) | padding (4 byte) | |
- // ^4 byte aligned ^8 byte aligned ^4 byte aligned |
- // |
- // since the former layout aligns both header and payload to 8 byte. |
-#if USE_4BYTE_HEADER_PADDING |
-public: |
- uint32_t m_padding; |
-#endif |
+ // m_gcGeneration keeps track of the number of GC cycles where the object gets |
+ // allocated. gcGenerationForFreeListentry indicates that the object has |
+ // already been freed. |
+ uint32_t m_gcGeneration; |
}; |
class FreeListEntry final : public HeapObjectHeader { |
public: |
NO_SANITIZE_ADDRESS |
explicit FreeListEntry(size_t size) |
- : HeapObjectHeader(size, gcInfoIndexForFreeListHeader) |
+ : HeapObjectHeader(size, gcInfoIndexForFreeListHeader, gcGenerationForFreeListEntry) |
, m_next(nullptr) |
{ |
#if ENABLE(ASSERT) |
ASSERT(size >= sizeof(HeapObjectHeader)); |
- zapMagic(); |
#endif |
} |
@@ -483,6 +458,8 @@ public: |
NormalPageHeap* heapForNormalPage(); |
void clearObjectStartBitMap(); |
+ HeapObjectHeader* findHeaderFromObject(const void*); |
+ |
private: |
HeapObjectHeader* findHeaderFromAddress(Address); |
void populateObjectStartBitMap(); |
@@ -707,7 +684,7 @@ public: |
#endif |
void takeFreelistSnapshot(const String& dumpBaseName) override; |
- Address allocateObject(size_t allocationSize, size_t gcInfoIndex); |
+ Address allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t generation); |
void freePage(NormalPage*); |
@@ -787,7 +764,9 @@ size_t HeapObjectHeader::size() const |
NO_SANITIZE_ADDRESS inline |
bool HeapObjectHeader::checkHeader() const |
{ |
- return !pageFromObject(this)->orphaned() && m_magic == magic; |
+ ASSERT(isFree() == (m_gcGeneration == gcGenerationForFreeListEntry)); |
+ ASSERT(m_gcGeneration != gcGenerationUnchecked); |
+ return !pageFromObject(this)->orphaned(); |
} |
#endif |
@@ -859,14 +838,14 @@ void HeapObjectHeader::markDead() |
m_encoded |= headerDeadBitMask; |
} |
-inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex) |
+inline Address NormalPageHeap::allocateObject(size_t allocationSize, size_t gcInfoIndex, uint32_t gcGeneration) |
{ |
if (LIKELY(allocationSize <= m_remainingAllocationSize)) { |
Address headerAddress = m_currentAllocationPoint; |
m_currentAllocationPoint += allocationSize; |
m_remainingAllocationSize -= allocationSize; |
ASSERT(gcInfoIndex > 0); |
- new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoIndex); |
+ new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoIndex, gcGeneration); |
Address result = headerAddress + sizeof(HeapObjectHeader); |
ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |