Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #ifndef HeapPage_h | 31 #ifndef HeapPage_h |
| 32 #define HeapPage_h | 32 #define HeapPage_h |
| 33 | 33 |
| 34 #include <stdint.h> | |
| 34 #include "base/trace_event/memory_allocator_dump.h" | 35 #include "base/trace_event/memory_allocator_dump.h" |
| 35 #include "platform/PlatformExport.h" | 36 #include "platform/PlatformExport.h" |
| 36 #include "platform/heap/BlinkGC.h" | 37 #include "platform/heap/BlinkGC.h" |
| 37 #include "platform/heap/GCInfo.h" | 38 #include "platform/heap/GCInfo.h" |
| 38 #include "platform/heap/ThreadState.h" | 39 #include "platform/heap/ThreadState.h" |
| 39 #include "platform/heap/Visitor.h" | 40 #include "platform/heap/Visitor.h" |
| 40 #include "wtf/AddressSanitizer.h" | 41 #include "wtf/AddressSanitizer.h" |
| 41 #include "wtf/Allocator.h" | 42 #include "wtf/Allocator.h" |
| 42 #include "wtf/Assertions.h" | 43 #include "wtf/Assertions.h" |
| 43 #include "wtf/ContainerAnnotations.h" | 44 #include "wtf/ContainerAnnotations.h" |
| 44 #include "wtf/Forward.h" | 45 #include "wtf/Forward.h" |
| 45 #include "wtf/allocator/Partitions.h" | 46 #include "wtf/allocator/Partitions.h" |
| 46 #include <stdint.h> | |
| 47 | 47 |
| 48 namespace blink { | 48 namespace blink { |
| 49 | 49 |
| 50 const size_t blinkPageSizeLog2 = 17; | 50 const size_t blinkPageSizeLog2 = 17; |
| 51 const size_t blinkPageSize = 1 << blinkPageSizeLog2; | 51 const size_t blinkPageSize = 1 << blinkPageSizeLog2; |
| 52 const size_t blinkPageOffsetMask = blinkPageSize - 1; | 52 const size_t blinkPageOffsetMask = blinkPageSize - 1; |
| 53 const size_t blinkPageBaseMask = ~blinkPageOffsetMask; | 53 const size_t blinkPageBaseMask = ~blinkPageOffsetMask; |
| 54 | 54 |
| 55 // We allocate pages at random addresses but in groups of | 55 // We allocate pages at random addresses but in groups of |
| 56 // blinkPagesPerRegion at a given random address. We group pages to | 56 // blinkPagesPerRegion at a given random address. We group pages to |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 117 #else | 117 #else |
| 118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) | 118 #define SET_MEMORY_INACCESSIBLE(address, size) memset((address), 0, (size)) |
| 119 #define SET_MEMORY_ACCESSIBLE(address, size) \ | 119 #define SET_MEMORY_ACCESSIBLE(address, size) \ |
| 120 do { \ | 120 do { \ |
| 121 } while (false) | 121 } while (false) |
| 122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ | 122 #define CHECK_MEMORY_INACCESSIBLE(address, size) \ |
| 123 do { \ | 123 do { \ |
| 124 } while (false) | 124 } while (false) |
| 125 #endif | 125 #endif |
| 126 | 126 |
| 127 #if !DCHECK_IS_ON() && CPU(64BIT) | |
| 128 #define USE_4BYTE_HEADER_PADDING 1 | |
| 129 #else | |
| 130 #define USE_4BYTE_HEADER_PADDING 0 | |
| 131 #endif | |
| 132 | |
| 133 class NormalPageArena; | 127 class NormalPageArena; |
| 134 class PageMemory; | 128 class PageMemory; |
| 135 | 129 |
| 136 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: | 130 // HeapObjectHeader is 4 byte (32 bit) that has the following layout: |
| 137 // | 131 // |
| 138 // | gcInfoIndex (14 bit) | | 132 // | gcInfoIndex (14 bit) | |
| 139 // | DOM mark bit (1 bit) | | 133 // | DOM mark bit (1 bit) | |
| 140 // | size (14 bit) | | 134 // | size (14 bit) | |
| 141 // | dead bit (1 bit) | | 135 // | dead bit (1 bit) | |
| 142 // | freed bit (1 bit) | | 136 // | freed bit (1 bit) | |
| (...skipping 29 matching lines...) Expand all Loading... | |
| 172 nonLargeObjectPageSizeMax >= blinkPageSize, | 166 nonLargeObjectPageSizeMax >= blinkPageSize, |
| 173 "max size supported by HeapObjectHeader must at least be blinkPageSize"); | 167 "max size supported by HeapObjectHeader must at least be blinkPageSize"); |
| 174 | 168 |
| 175 class PLATFORM_EXPORT HeapObjectHeader { | 169 class PLATFORM_EXPORT HeapObjectHeader { |
| 176 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); | 170 DISALLOW_NEW_EXCEPT_PLACEMENT_NEW(); |
| 177 | 171 |
| 178 public: | 172 public: |
| 179 // If gcInfoIndex is 0, this header is interpreted as a free list header. | 173 // If gcInfoIndex is 0, this header is interpreted as a free list header. |
| 180 NO_SANITIZE_ADDRESS | 174 NO_SANITIZE_ADDRESS |
| 181 HeapObjectHeader(size_t size, size_t gcInfoIndex) { | 175 HeapObjectHeader(size_t size, size_t gcInfoIndex) { |
| 182 #if DCHECK_IS_ON() | 176 m_magic = getMagic(); |
| 183 m_magic = magic; | |
| 184 #endif | |
| 185 // sizeof(HeapObjectHeader) must be equal to or smaller than | 177 // sizeof(HeapObjectHeader) must be equal to or smaller than |
| 186 // allocationGranurarity, because HeapObjectHeader is used as a header | 178 // allocationGranurarity, because HeapObjectHeader is used as a header |
| 187 // for an freed entry. Given that the smallest entry size is | 179 // for an freed entry. Given that the smallest entry size is |
| 188 // allocationGranurarity, HeapObjectHeader must fit into the size. | 180 // allocationGranurarity, HeapObjectHeader must fit into the size. |
| 189 static_assert( | 181 static_assert( |
| 190 sizeof(HeapObjectHeader) <= allocationGranularity, | 182 sizeof(HeapObjectHeader) <= allocationGranularity, |
| 191 "size of HeapObjectHeader must be smaller than allocationGranularity"); | 183 "size of HeapObjectHeader must be smaller than allocationGranularity"); |
| 192 #if CPU(64BIT) | 184 #if CPU(64BIT) |
|
sof
2017/02/11 06:38:10
To the extent that it is still useful, this static
| |
| 193 static_assert(sizeof(HeapObjectHeader) == 8, | 185 static_assert(sizeof(HeapObjectHeader) == 8, |
| 194 "size of HeapObjectHeader must be 8 byte aligned"); | 186 "size of HeapObjectHeader must be 8 byte aligned"); |
| 195 #endif | 187 #endif |
| 196 | 188 |
| 197 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); | 189 ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
| 198 ASSERT(size < nonLargeObjectPageSizeMax); | 190 ASSERT(size < nonLargeObjectPageSizeMax); |
| 199 ASSERT(!(size & allocationMask)); | 191 ASSERT(!(size & allocationMask)); |
| 200 m_encoded = static_cast<uint32_t>( | 192 m_encoded = static_cast<uint32_t>( |
| 201 (gcInfoIndex << headerGCInfoIndexShift) | size | | 193 (gcInfoIndex << headerGCInfoIndexShift) | size | |
| 202 (gcInfoIndex == gcInfoIndexForFreeListHeader ? headerFreedBitMask : 0)); | 194 (gcInfoIndex == gcInfoIndexForFreeListHeader ? headerFreedBitMask : 0)); |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 226 void markWrapperHeader(); | 218 void markWrapperHeader(); |
| 227 void unmarkWrapperHeader(); | 219 void unmarkWrapperHeader(); |
| 228 bool isMarked() const; | 220 bool isMarked() const; |
| 229 void mark(); | 221 void mark(); |
| 230 void unmark(); | 222 void unmark(); |
| 231 | 223 |
| 232 Address payload(); | 224 Address payload(); |
| 233 size_t payloadSize(); | 225 size_t payloadSize(); |
| 234 Address payloadEnd(); | 226 Address payloadEnd(); |
| 235 | 227 |
| 236 #if DCHECK_IS_ON() | 228 // TODO(633030): Make |checkHeader| and |zapMagic| private. This class should |
| 229 // manage its integrity on its own, without requiring outside callers to | |
| 230 // explicitly check. | |
| 237 bool checkHeader() const; | 231 bool checkHeader() const; |
| 238 // Zap magic number with a new magic number that means there was once an | 232 // Zap magic number with a new magic number that means there was once an |
| 239 // object allocated here, but it was freed because nobody marked it during | 233 // object allocated here, but it was freed because nobody marked it during |
| 240 // GC. | 234 // GC. |
| 241 void zapMagic(); | 235 void zapMagic(); |
| 242 #endif | |
| 243 | 236 |
| 244 void finalize(Address, size_t); | 237 void finalize(Address, size_t); |
| 245 static HeapObjectHeader* fromPayload(const void*); | 238 static HeapObjectHeader* fromPayload(const void*); |
| 246 | 239 |
| 247 static const uint16_t magic = 0xfff1; | 240 static const uint32_t zappedMagic = 0xDEAD4321; |
| 248 static const uint16_t zappedMagic = 0x4321; | |
| 249 | 241 |
| 250 private: | 242 private: |
| 251 uint32_t m_encoded; | 243 // Returns a random value. |
|
sof
2017/02/11 06:38:10
Just a syntactic issue -- could you declare getMag
palmer
2017/02/14 22:49:50
Done.
| |
| 252 #if DCHECK_IS_ON() | 244 // |
| 253 uint16_t m_magic; | 245 // The implementation gets its randomness from the locations of 2 independent |
| 246 // sources of address space layout randomization: a function in a Chrome | |
| 247 // executable image, and a function in an external DLL/so. This implementation | |
| 248 // should be fast and small, and should have the benefit of requiring | |
| 249 // attackers to discover and use 2 independent weak infoleak bugs, or 1 | |
| 250 // arbitrary infoleak bug (used twice). | |
| 251 uint32_t getMagic() const { | |
| 252 const uintptr_t random1 = | |
| 253 ~(reinterpret_cast<uintptr_t>( | |
| 254 base::trace_event::MemoryAllocatorDump::kNameSize) >> | |
| 255 16); | |
| 256 | |
| 257 #if OS(WIN) | |
| 258 const uintptr_t random2 = ~(reinterpret_cast<uintptr_t>(::ReadFile) << 16); | |
| 259 #elif OS(POSIX) | |
| 260 const uintptr_t random2 = ~(reinterpret_cast<uintptr_t>(::read) << 16); | |
| 261 #else | |
| 262 #error OS not supported | |
| 254 #endif | 263 #endif |
| 255 | 264 |
| 256 // In 64 bit architectures, we intentionally add 4 byte padding immediately | 265 #if CPU(64BIT) |
| 257 // after the HeapObjectHeader. This is because: | 266 static_assert(sizeof(uintptr_t) == sizeof(uint64_t), |
| 258 // | 267 "uintptr_t is not uint64_t"); |
| 259 // | HeapObjectHeader (4 byte) | <- 8 byte aligned | 268 const uint32_t random = static_cast<uint32_t>( |
| 260 // | padding (4 byte) | | 269 (random1 & 0x0FFFFULL) | ((random2 >> 32) & 0x0FFFF0000ULL)); |
| 261 // | object payload (8 * n byte) | <- 8 byte aligned | 270 #elif CPU(32BIT) |
| 262 // | 271 static_assert(sizeof(uintptr_t) == sizeof(uint32_t), |
| 263 // is better than: | 272 "uintptr_t is not uint32_t"); |
| 264 // | 273 const uint32_t random = (random1 & 0x0FFFFUL) | (random2 & 0xFFFF0000UL); |
| 265 // | HeapObjectHeader (4 byte) | <- 4 byte aligned | 274 #else |
| 266 // | object payload (8 * n byte) | <- 8 byte aligned | 275 #error architecture not supported |
| 267 // | padding (4 byte) | <- 4 byte aligned | |
| 268 // | |
| 269 // since the former layout aligns both header and payload to 8 byte. | |
| 270 #if USE_4BYTE_HEADER_PADDING | |
| 271 public: | |
| 272 uint32_t m_padding; | |
| 273 #endif | 276 #endif |
| 277 | |
| 278 return random; | |
| 279 } | |
| 280 | |
| 281 uint32_t m_magic; | |
| 282 uint32_t m_encoded; | |
| 274 }; | 283 }; |
| 275 | 284 |
| 276 class FreeListEntry final : public HeapObjectHeader { | 285 class FreeListEntry final : public HeapObjectHeader { |
| 277 public: | 286 public: |
| 278 NO_SANITIZE_ADDRESS | 287 NO_SANITIZE_ADDRESS |
| 279 explicit FreeListEntry(size_t size) | 288 explicit FreeListEntry(size_t size) |
| 280 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { | 289 : HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { |
| 281 #if DCHECK_IS_ON() | 290 #if DCHECK_IS_ON() |
| 282 ASSERT(size >= sizeof(HeapObjectHeader)); | 291 ASSERT(size >= sizeof(HeapObjectHeader)); |
| 283 zapMagic(); | 292 zapMagic(); |
| (...skipping 565 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 849 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { | 858 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { |
| 850 size_t result = m_encoded & headerSizeMask; | 859 size_t result = m_encoded & headerSizeMask; |
| 851 // Large objects should not refer to header->size(). | 860 // Large objects should not refer to header->size(). |
| 852 // The actual size of a large object is stored in | 861 // The actual size of a large object is stored in |
| 853 // LargeObjectPage::m_payloadSize. | 862 // LargeObjectPage::m_payloadSize. |
| 854 ASSERT(result != largeObjectSizeInHeader); | 863 ASSERT(result != largeObjectSizeInHeader); |
| 855 ASSERT(!pageFromObject(this)->isLargeObjectPage()); | 864 ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
| 856 return result; | 865 return result; |
| 857 } | 866 } |
| 858 | 867 |
| 859 #if DCHECK_IS_ON() | |
| 860 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { | 868 NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { |
| 861 return m_magic == magic; | 869 return m_magic == getMagic(); |
| 862 } | 870 } |
| 863 #endif | |
| 864 | 871 |
| 865 inline Address HeapObjectHeader::payload() { | 872 inline Address HeapObjectHeader::payload() { |
| 866 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); | 873 return reinterpret_cast<Address>(this) + sizeof(HeapObjectHeader); |
| 867 } | 874 } |
| 868 | 875 |
| 869 inline Address HeapObjectHeader::payloadEnd() { | 876 inline Address HeapObjectHeader::payloadEnd() { |
| 870 return reinterpret_cast<Address>(this) + size(); | 877 return reinterpret_cast<Address>(this) + size(); |
| 871 } | 878 } |
| 872 | 879 |
| 873 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::payloadSize() { | 880 NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::payloadSize() { |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 941 return outOfLineAllocate(allocationSize, gcInfoIndex); | 948 return outOfLineAllocate(allocationSize, gcInfoIndex); |
| 942 } | 949 } |
| 943 | 950 |
| 944 inline NormalPageArena* NormalPage::arenaForNormalPage() const { | 951 inline NormalPageArena* NormalPage::arenaForNormalPage() const { |
| 945 return static_cast<NormalPageArena*>(arena()); | 952 return static_cast<NormalPageArena*>(arena()); |
| 946 } | 953 } |
| 947 | 954 |
| 948 } // namespace blink | 955 } // namespace blink |
| 949 | 956 |
| 950 #endif // HeapPage_h | 957 #endif // HeapPage_h |
| OLD | NEW |