OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
42 // | 42 // |
43 // A JS heap consists of a young generation, an old generation, and a large | 43 // A JS heap consists of a young generation, an old generation, and a large |
44 // object space. The young generation is divided into two semispaces. A | 44 // object space. The young generation is divided into two semispaces. A |
45 // scavenger implements Cheney's copying algorithm. The old generation is | 45 // scavenger implements Cheney's copying algorithm. The old generation is |
46 // separated into a map space and an old object space. The map space contains | 46 // separated into a map space and an old object space. The map space contains |
47 // all (and only) map objects, the rest of old objects go into the old space. | 47 // all (and only) map objects, the rest of old objects go into the old space. |
48 // The old generation is collected by a mark-sweep-compact collector. | 48 // The old generation is collected by a mark-sweep-compact collector. |
49 // | 49 // |
50 // The semispaces of the young generation are contiguous. The old and map | 50 // The semispaces of the young generation are contiguous. The old and map |
51 // spaces consists of a list of pages. A page has a page header and an object | 51 // spaces consists of a list of pages. A page has a page header and an object |
52 // area. A page size is deliberately chosen as 8K bytes. | 52 // area. |
53 // The first word of a page is an opaque page header that has the | |
54 // address of the next page and its ownership information. The second word may | |
55 // have the allocation top address of this page. Heap objects are aligned to the | |
56 // pointer size. | |
57 // | 53 // |
58 // There is a separate large object space for objects larger than | 54 // There is a separate large object space for objects larger than |
59 // Page::kMaxHeapObjectSize, so that they do not have to move during | 55 // Page::kMaxHeapObjectSize, so that they do not have to move during |
60 // collection. The large object space is paged. Pages in large object space | 56 // collection. The large object space is paged. Pages in large object space |
61 // may be larger than 8K. | 57 // may be larger than the page size. |
62 // | 58 // |
63 // A card marking write barrier is used to keep track of intergenerational | 59 // A store-buffer based write barrier is used to keep track of intergenerational |
64 // references. Old space pages are divided into regions of Page::kRegionSize | 60 // references. See store-buffer.h. |
65 // size. Each region has a corresponding dirty bit in the page header which is | |
66 // set if the region might contain pointers to new space. For details about | |
67 // dirty bits encoding see comments in the Page::GetRegionNumberForAddress() | |
68 // method body. | |
69 // | 61 // |
70 // During scavenges and mark-sweep collections we iterate intergenerational | 62 // During scavenges and mark-sweep collections we sometimes (after a store |
71 // pointers without decoding heap object maps so if the page belongs to old | 63 // buffer overflow) iterate intergenerational pointers without decoding heap |
72 // pointer space or large object space it is essential to guarantee that | 64 // object maps so if the page belongs to old pointer space or large object |
73 // the page does not contain any garbage pointers to new space: every pointer | 65 // space it is essential to guarantee that the page does not contain any |
74 // aligned word which satisfies the Heap::InNewSpace() predicate must be a | 66 // garbage pointers to new space: every pointer aligned word which satisfies |
75 // pointer to a live heap object in new space. Thus objects in old pointer | 67 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in |
76 // and large object spaces should have a special layout (e.g. no bare integer | 68 // new space. Thus objects in old pointer and large object spaces should have a |
77 // fields). This requirement does not apply to map space which is iterated in | 69 // special layout (e.g. no bare integer fields). This requirement does not |
78 // a special fashion. However we still require pointer fields of dead maps to | 70 // apply to map space which is iterated in a special fashion. However we still |
79 // be cleaned. | 71 // require pointer fields of dead maps to be cleaned. |
80 // | 72 // |
81 // To enable lazy cleaning of old space pages we use a notion of allocation | 73 // To enable lazy cleaning of old space pages we can mark chunks of the page |
82 // watermark. Every pointer under watermark is considered to be well formed. | 74 // as being garbage. Garbage sections are marked with a special map. These |
83 // Page allocation watermark is not necessarily equal to page allocation top but | 75 // sections are skipped when scanning the page, even if we are otherwise |
84 // all alive objects on page should reside under allocation watermark. | 76 // scanning without regard for object boundaries. Garbage sections are chained |
85 // During scavenge allocation watermark might be bumped and invalid pointers | 77 // together to form a free list after a GC. Garbage sections created outside |
86 // might appear below it. To avoid following them we store a valid watermark | 78 // of GCs by object trunctation etc. may not be in the free list chain. Very |
87 // into special field in the page header and set a page WATERMARK_INVALIDATED | 79 // small free spaces are ignored, they need only be cleaned of bogus pointers |
88 // flag. For details see comments in the Page::SetAllocationWatermark() method | 80 // into new space. |
89 // body. | |
90 // | 81 // |
| 82 // Each page may have up to one special garbage section. The start of this |
| 83 // section is denoted by the top field in the space. The end of the section |
| 84 // is denoted by the limit field in the space. This special garbage section |
| 85 // is not marked with a free space map in the data. The point of this section |
| 86 // is to enable linear allocation without having to constantly update the byte |
| 87 // array every time the top field is updated and a new object is created. The |
| 88 // special garbage section is not in the chain of garbage sections. |
| 89 // |
| 90 // Since the top and limit fields are in the space, not the page, only one page |
| 91 // has a special garbage section, and if the top and limit are equal then there |
| 92 // is no special garbage section. |
91 | 93 |
92 // Some assertion macros used in the debugging mode. | 94 // Some assertion macros used in the debugging mode. |
93 | 95 |
94 #define ASSERT_PAGE_ALIGNED(address) \ | 96 #define ASSERT_PAGE_ALIGNED(address) \ |
95 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | 97 ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) |
96 | 98 |
97 #define ASSERT_OBJECT_ALIGNED(address) \ | 99 #define ASSERT_OBJECT_ALIGNED(address) \ |
98 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) | 100 ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0) |
99 | 101 |
100 #define ASSERT_MAP_ALIGNED(address) \ | 102 #define ASSERT_MAP_ALIGNED(address) \ |
101 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) | 103 ASSERT((OffsetFrom(address) & kMapAlignmentMask) == 0) |
102 | 104 |
103 #define ASSERT_OBJECT_SIZE(size) \ | 105 #define ASSERT_OBJECT_SIZE(size) \ |
104 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) | 106 ASSERT((0 < size) && (size <= Page::kMaxHeapObjectSize)) |
105 | 107 |
106 #define ASSERT_PAGE_OFFSET(offset) \ | 108 #define ASSERT_PAGE_OFFSET(offset) \ |
107 ASSERT((Page::kObjectStartOffset <= offset) \ | 109 ASSERT((Page::kObjectStartOffset <= offset) \ |
108 && (offset <= Page::kPageSize)) | 110 && (offset <= Page::kPageSize)) |
109 | 111 |
110 #define ASSERT_MAP_PAGE_INDEX(index) \ | 112 #define ASSERT_MAP_PAGE_INDEX(index) \ |
111 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
112 | 114 |
113 | 115 |
114 class PagedSpace; | 116 class PagedSpace; |
115 class MemoryAllocator; | 117 class MemoryAllocator; |
116 class AllocationInfo; | 118 class AllocationInfo; |
| 119 class Space; |
| 120 class FreeList; |
| 121 class MemoryChunk; |
| 122 |
| 123 class MarkBit { |
| 124 public: |
| 125 typedef uint32_t CellType; |
| 126 |
| 127 inline MarkBit(CellType* cell, CellType mask, bool data_only) |
| 128 : cell_(cell), mask_(mask), data_only_(data_only) { } |
| 129 |
| 130 inline CellType* cell() { return cell_; } |
| 131 inline CellType mask() { return mask_; } |
| 132 |
| 133 #ifdef DEBUG |
| 134 bool operator==(const MarkBit& other) { |
| 135 return cell_ == other.cell_ && mask_ == other.mask_; |
| 136 } |
| 137 #endif |
| 138 |
| 139 inline void Set() { *cell_ |= mask_; } |
| 140 inline bool Get() { return (*cell_ & mask_) != 0; } |
| 141 inline void Clear() { *cell_ &= ~mask_; } |
| 142 |
| 143 inline bool data_only() { return data_only_; } |
| 144 |
| 145 inline MarkBit Next() { |
| 146 CellType new_mask = mask_ << 1; |
| 147 if (new_mask == 0) { |
| 148 return MarkBit(cell_ + 1, 1, data_only_); |
| 149 } else { |
| 150 return MarkBit(cell_, new_mask, data_only_); |
| 151 } |
| 152 } |
| 153 |
| 154 private: |
| 155 CellType* cell_; |
| 156 CellType mask_; |
| 157 // This boolean indicates that the object is in a data-only space with no |
| 158 // pointers. This enables some optimizations when marking. |
| 159 // It is expected that this field is inlined and turned into control flow |
| 160 // at the place where the MarkBit object is created. |
| 161 bool data_only_; |
| 162 }; |
| 163 |
| 164 |
| 165 // Bitmap is a sequence of cells each containing fixed number of bits. |
| 166 class Bitmap { |
| 167 public: |
| 168 static const uint32_t kBitsPerCell = 32; |
| 169 static const uint32_t kBitsPerCellLog2 = 5; |
| 170 static const uint32_t kBitIndexMask = kBitsPerCell - 1; |
| 171 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; |
| 172 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; |
| 173 |
| 174 static const size_t kLength = |
| 175 (1 << kPageSizeBits) >> (kPointerSizeLog2); |
| 176 |
| 177 static const size_t kSize = |
| 178 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); |
| 179 |
| 180 |
| 181 static int CellsForLength(int length) { |
| 182 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; |
| 183 } |
| 184 |
| 185 int CellsCount() { |
| 186 return CellsForLength(kLength); |
| 187 } |
| 188 |
| 189 static int SizeFor(int cells_count) { |
| 190 return sizeof(MarkBit::CellType)*cells_count; |
| 191 } |
| 192 |
| 193 INLINE(static uint32_t IndexToCell(uint32_t index)) { |
| 194 return index >> kBitsPerCellLog2; |
| 195 } |
| 196 |
| 197 INLINE(static uint32_t CellToIndex(uint32_t index)) { |
| 198 return index << kBitsPerCellLog2; |
| 199 } |
| 200 |
| 201 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { |
| 202 return (index + kBitIndexMask) & ~kBitIndexMask; |
| 203 } |
| 204 |
| 205 INLINE(MarkBit::CellType* cells()) { |
| 206 return reinterpret_cast<MarkBit::CellType*>(this); |
| 207 } |
| 208 |
| 209 INLINE(Address address()) { |
| 210 return reinterpret_cast<Address>(this); |
| 211 } |
| 212 |
| 213 INLINE(static Bitmap* FromAddress(Address addr)) { |
| 214 return reinterpret_cast<Bitmap*>(addr); |
| 215 } |
| 216 |
| 217 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { |
| 218 MarkBit::CellType mask = 1 << (index & kBitIndexMask); |
| 219 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); |
| 220 return MarkBit(cell, mask, data_only); |
| 221 } |
| 222 |
| 223 static inline void Clear(MemoryChunk* chunk); |
| 224 |
| 225 static void PrintWord(uint32_t word, uint32_t himask = 0) { |
| 226 for (uint32_t mask = 1; mask != 0; mask <<= 1) { |
| 227 if ((mask & himask) != 0) PrintF("["); |
| 228 PrintF((mask & word) ? "1" : "0"); |
| 229 if ((mask & himask) != 0) PrintF("]"); |
| 230 } |
| 231 } |
| 232 |
| 233 class CellPrinter { |
| 234 public: |
| 235 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } |
| 236 |
| 237 void Print(uint32_t pos, uint32_t cell) { |
| 238 if (cell == seq_type) { |
| 239 seq_length++; |
| 240 return; |
| 241 } |
| 242 |
| 243 Flush(); |
| 244 |
| 245 if (IsSeq(cell)) { |
| 246 seq_start = pos; |
| 247 seq_length = 0; |
| 248 seq_type = cell; |
| 249 return; |
| 250 } |
| 251 |
| 252 PrintF("%d: ", pos); |
| 253 PrintWord(cell); |
| 254 PrintF("\n"); |
| 255 } |
| 256 |
| 257 void Flush() { |
| 258 if (seq_length > 0) { |
| 259 PrintF("%d: %dx%d\n", |
| 260 seq_start, |
| 261 seq_type == 0 ? 0 : 1, |
| 262 seq_length * kBitsPerCell); |
| 263 seq_length = 0; |
| 264 } |
| 265 } |
| 266 |
| 267 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } |
| 268 private: |
| 269 uint32_t seq_start; |
| 270 uint32_t seq_type; |
| 271 uint32_t seq_length; |
| 272 }; |
| 273 |
| 274 void Print() { |
| 275 CellPrinter printer; |
| 276 for (int i = 0; i < CellsCount(); i++) { |
| 277 printer.Print(i, cells()[i]); |
| 278 } |
| 279 printer.Flush(); |
| 280 PrintF("\n"); |
| 281 } |
| 282 |
| 283 bool IsClean() { |
| 284 for (int i = 0; i < CellsCount(); i++) { |
| 285 if (cells()[i] != 0) return false; |
| 286 } |
| 287 return true; |
| 288 } |
| 289 }; |
| 290 |
| 291 |
| 292 class SkipList; |
| 293 class SlotsBuffer; |
| 294 |
| 295 // MemoryChunk represents a memory region owned by a specific space. |
| 296 // It is divided into the header and the body. Chunk start is always |
| 297 // 1MB aligned. Start of the body is aligned so it can accomodate |
| 298 // any heap object. |
| 299 class MemoryChunk { |
| 300 public: |
| 301 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 302 static MemoryChunk* FromAddress(Address a) { |
| 303 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 304 } |
| 305 |
| 306 // Only works for addresses in pointer spaces, not data or code spaces. |
| 307 static inline MemoryChunk* FromAnyPointerAddress(Address addr); |
| 308 |
| 309 Address address() { return reinterpret_cast<Address>(this); } |
| 310 |
| 311 bool is_valid() { return address() != NULL; } |
| 312 |
| 313 MemoryChunk* next_chunk() const { return next_chunk_; } |
| 314 MemoryChunk* prev_chunk() const { return prev_chunk_; } |
| 315 |
| 316 void set_next_chunk(MemoryChunk* next) { next_chunk_ = next; } |
| 317 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_ = prev; } |
| 318 |
| 319 Space* owner() const { |
| 320 if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 321 kFailureTag) { |
| 322 return reinterpret_cast<Space*>(owner_ - kFailureTag); |
| 323 } else { |
| 324 return NULL; |
| 325 } |
| 326 } |
| 327 |
| 328 void set_owner(Space* space) { |
| 329 ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0); |
| 330 owner_ = reinterpret_cast<Address>(space) + kFailureTag; |
| 331 ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) == |
| 332 kFailureTag); |
| 333 } |
| 334 |
| 335 VirtualMemory* reserved_memory() { |
| 336 return &reservation_; |
| 337 } |
| 338 |
| 339 void InitializeReservedMemory() { |
| 340 reservation_.Reset(); |
| 341 } |
| 342 |
| 343 void set_reserved_memory(VirtualMemory* reservation) { |
| 344 ASSERT_NOT_NULL(reservation); |
| 345 reservation_.TakeControl(reservation); |
| 346 } |
| 347 |
| 348 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 349 void initialize_scan_on_scavenge(bool scan) { |
| 350 if (scan) { |
| 351 SetFlag(SCAN_ON_SCAVENGE); |
| 352 } else { |
| 353 ClearFlag(SCAN_ON_SCAVENGE); |
| 354 } |
| 355 } |
| 356 inline void set_scan_on_scavenge(bool scan); |
| 357 |
| 358 int store_buffer_counter() { return store_buffer_counter_; } |
| 359 void set_store_buffer_counter(int counter) { |
| 360 store_buffer_counter_ = counter; |
| 361 } |
| 362 |
| 363 Address body() { return address() + kObjectStartOffset; } |
| 364 |
| 365 Address body_limit() { return address() + size(); } |
| 366 |
| 367 int body_size() { return static_cast<int>(size() - kObjectStartOffset); } |
| 368 |
| 369 bool Contains(Address addr) { |
| 370 return addr >= body() && addr < address() + size(); |
| 371 } |
| 372 |
| 373 // Checks whether addr can be a limit of addresses in this page. |
| 374 // It's a limit if it's in the page, or if it's just after the |
| 375 // last byte of the page. |
| 376 bool ContainsLimit(Address addr) { |
| 377 return addr >= body() && addr <= address() + size(); |
| 378 } |
| 379 |
| 380 enum MemoryChunkFlags { |
| 381 IS_EXECUTABLE, |
| 382 ABOUT_TO_BE_FREED, |
| 383 POINTERS_TO_HERE_ARE_INTERESTING, |
| 384 POINTERS_FROM_HERE_ARE_INTERESTING, |
| 385 SCAN_ON_SCAVENGE, |
| 386 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 387 IN_TO_SPACE, // All pages in new space has one of these two set. |
| 388 NEW_SPACE_BELOW_AGE_MARK, |
| 389 CONTAINS_ONLY_DATA, |
| 390 EVACUATION_CANDIDATE, |
| 391 RESCAN_ON_EVACUATION, |
| 392 |
| 393 // Pages swept precisely can be iterated, hitting only the live objects. |
| 394 // Whereas those swept conservatively cannot be iterated over. Both flags |
| 395 // indicate that marking bits have been cleared by the sweeper, otherwise |
| 396 // marking bits are still intact. |
| 397 WAS_SWEPT_PRECISELY, |
| 398 WAS_SWEPT_CONSERVATIVELY, |
| 399 |
| 400 // Last flag, keep at bottom. |
| 401 NUM_MEMORY_CHUNK_FLAGS |
| 402 }; |
| 403 |
| 404 |
| 405 static const int kPointersToHereAreInterestingMask = |
| 406 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
| 407 |
| 408 static const int kPointersFromHereAreInterestingMask = |
| 409 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
| 410 |
| 411 static const int kEvacuationCandidateMask = |
| 412 1 << EVACUATION_CANDIDATE; |
| 413 |
| 414 static const int kSkipEvacuationSlotsRecordingMask = |
| 415 (1 << EVACUATION_CANDIDATE) | |
| 416 (1 << RESCAN_ON_EVACUATION) | |
| 417 (1 << IN_FROM_SPACE) | |
| 418 (1 << IN_TO_SPACE); |
| 419 |
| 420 |
| 421 void SetFlag(int flag) { |
| 422 flags_ |= static_cast<uintptr_t>(1) << flag; |
| 423 } |
| 424 |
| 425 void ClearFlag(int flag) { |
| 426 flags_ &= ~(static_cast<uintptr_t>(1) << flag); |
| 427 } |
| 428 |
| 429 void SetFlagTo(int flag, bool value) { |
| 430 if (value) { |
| 431 SetFlag(flag); |
| 432 } else { |
| 433 ClearFlag(flag); |
| 434 } |
| 435 } |
| 436 |
| 437 bool IsFlagSet(int flag) { |
| 438 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; |
| 439 } |
| 440 |
| 441 // Set or clear multiple flags at a time. The flags in the mask |
| 442 // are set to the value in "flags", the rest retain the current value |
| 443 // in flags_. |
| 444 void SetFlags(intptr_t flags, intptr_t mask) { |
| 445 flags_ = (flags_ & ~mask) | (flags & mask); |
| 446 } |
| 447 |
| 448 // Return all current flags. |
| 449 intptr_t GetFlags() { return flags_; } |
| 450 |
| 451 // Manage live byte count (count of bytes known to be live, |
| 452 // because they are marked black). |
| 453 void ResetLiveBytes() { |
| 454 live_byte_count_ = 0; |
| 455 } |
| 456 void IncrementLiveBytes(int by) { |
| 457 live_byte_count_ += by; |
| 458 } |
| 459 int LiveBytes() { return live_byte_count_; } |
| 460 static void IncrementLiveBytes(Address address, int by) { |
| 461 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); |
| 462 } |
| 463 |
| 464 static const intptr_t kAlignment = |
| 465 (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 466 |
| 467 static const intptr_t kAlignmentMask = kAlignment - 1; |
| 468 |
| 469 static const intptr_t kLiveBytesOffset = |
| 470 kPointerSize + kPointerSize + kPointerSize + kPointerSize + |
| 471 kPointerSize + kPointerSize + kPointerSize + kPointerSize + |
| 472 kIntSize; |
| 473 |
| 474 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; |
| 475 |
| 476 static const size_t kHeaderSize = |
| 477 kSlotsBufferOffset + kPointerSize + kPointerSize; |
| 478 |
| 479 static const int kBodyOffset = |
| 480 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize)); |
| 481 |
| 482 // The start offset of the object area in a page. Aligned to both maps and |
| 483 // code alignment to be suitable for both. Also aligned to 32 words because |
| 484 // the marking bitmap is arranged in 32 bit chunks. |
| 485 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 486 static const int kObjectStartOffset = kBodyOffset - 1 + |
| 487 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 488 |
| 489 size_t size() const { return size_; } |
| 490 |
| 491 Executability executable() { |
| 492 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 493 } |
| 494 |
| 495 bool ContainsOnlyData() { |
| 496 return IsFlagSet(CONTAINS_ONLY_DATA); |
| 497 } |
| 498 |
| 499 bool InNewSpace() { |
| 500 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 501 } |
| 502 |
| 503 bool InToSpace() { |
| 504 return IsFlagSet(IN_TO_SPACE); |
| 505 } |
| 506 |
| 507 bool InFromSpace() { |
| 508 return IsFlagSet(IN_FROM_SPACE); |
| 509 } |
| 510 |
| 511 // --------------------------------------------------------------------- |
| 512 // Markbits support |
| 513 |
| 514 inline Bitmap* markbits() { |
| 515 return Bitmap::FromAddress(address() + kHeaderSize); |
| 516 } |
| 517 |
| 518 void PrintMarkbits() { markbits()->Print(); } |
| 519 |
| 520 inline uint32_t AddressToMarkbitIndex(Address addr) { |
| 521 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
| 522 } |
| 523 |
| 524 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { |
| 525 const intptr_t offset = |
| 526 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 527 |
| 528 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 529 } |
| 530 |
| 531 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 532 return this->address() + (index << kPointerSizeLog2); |
| 533 } |
| 534 |
| 535 void InsertAfter(MemoryChunk* other); |
| 536 void Unlink(); |
| 537 |
| 538 inline Heap* heap() { return heap_; } |
| 539 |
| 540 static const int kFlagsOffset = kPointerSize * 3; |
| 541 |
| 542 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } |
| 543 |
| 544 bool ShouldSkipEvacuationSlotRecording() { |
| 545 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; |
| 546 } |
| 547 |
| 548 inline SkipList* skip_list() { |
| 549 return skip_list_; |
| 550 } |
| 551 |
| 552 inline void set_skip_list(SkipList* skip_list) { |
| 553 skip_list_ = skip_list; |
| 554 } |
| 555 |
| 556 inline SlotsBuffer* slots_buffer() { |
| 557 return slots_buffer_; |
| 558 } |
| 559 |
| 560 inline SlotsBuffer** slots_buffer_address() { |
| 561 return &slots_buffer_; |
| 562 } |
| 563 |
| 564 void MarkEvacuationCandidate() { |
| 565 ASSERT(slots_buffer_ == NULL); |
| 566 SetFlag(EVACUATION_CANDIDATE); |
| 567 } |
| 568 |
| 569 void ClearEvacuationCandidate() { |
| 570 ASSERT(slots_buffer_ == NULL); |
| 571 ClearFlag(EVACUATION_CANDIDATE); |
| 572 } |
| 573 |
| 574 |
| 575 protected: |
| 576 MemoryChunk* next_chunk_; |
| 577 MemoryChunk* prev_chunk_; |
| 578 size_t size_; |
| 579 intptr_t flags_; |
| 580 // If the chunk needs to remember its memory reservation, it is stored here. |
| 581 VirtualMemory reservation_; |
| 582 // The identity of the owning space. This is tagged as a failure pointer, but |
| 583 // no failure can be in an object, so this can be distinguished from any entry |
| 584 // in a fixed array. |
| 585 Address owner_; |
| 586 Heap* heap_; |
| 587 // Used by the store buffer to keep track of which pages to mark scan-on- |
| 588 // scavenge. |
| 589 int store_buffer_counter_; |
| 590 // Count of bytes marked black on page. |
| 591 int live_byte_count_; |
| 592 SlotsBuffer* slots_buffer_; |
| 593 SkipList* skip_list_; |
| 594 |
| 595 static MemoryChunk* Initialize(Heap* heap, |
| 596 Address base, |
| 597 size_t size, |
| 598 Executability executable, |
| 599 Space* owner); |
| 600 |
| 601 friend class MemoryAllocator; |
| 602 }; |
| 603 |
| 604 STATIC_CHECK(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); |
117 | 605 |
118 // ----------------------------------------------------------------------------- | 606 // ----------------------------------------------------------------------------- |
119 // A page normally has 8K bytes. Large object pages may be larger. A page | 607 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
120 // address is always aligned to the 8K page size. | |
121 // | |
122 // Each page starts with a header of Page::kPageHeaderSize size which contains | |
123 // bookkeeping data. | |
124 // | |
125 // The mark-compact collector transforms a map pointer into a page index and a | |
126 // page offset. The exact encoding is described in the comments for | |
127 // class MapWord in objects.h. | |
128 // | 608 // |
129 // The only way to get a page pointer is by calling factory methods: | 609 // The only way to get a page pointer is by calling factory methods: |
130 // Page* p = Page::FromAddress(addr); or | 610 // Page* p = Page::FromAddress(addr); or |
131 // Page* p = Page::FromAllocationTop(top); | 611 // Page* p = Page::FromAllocationTop(top); |
132 class Page { | 612 class Page : public MemoryChunk { |
133 public: | 613 public: |
134 // Returns the page containing a given address. The address ranges | 614 // Returns the page containing a given address. The address ranges |
135 // from [page_addr .. page_addr + kPageSize[ | 615 // from [page_addr .. page_addr + kPageSize[ |
136 // | 616 // This only works if the object is in fact in a page. See also MemoryChunk:: |
137 // Note that this function only works for addresses in normal paged | 617 // FromAddress() and FromAnyAddress(). |
138 // spaces and addresses in the first 8K of large object pages (i.e., | |
139 // the start of large objects but not necessarily derived pointers | |
140 // within them). | |
141 INLINE(static Page* FromAddress(Address a)) { | 618 INLINE(static Page* FromAddress(Address a)) { |
142 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | 619 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); |
143 } | 620 } |
144 | 621 |
145 // Returns the page containing an allocation top. Because an allocation | 622 // Returns the page containing an allocation top. Because an allocation |
146 // top address can be the upper bound of the page, we need to subtract | 623 // top address can be the upper bound of the page, we need to subtract |
147 // it with kPointerSize first. The address ranges from | 624 // it with kPointerSize first. The address ranges from |
148 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | 625 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. |
149 INLINE(static Page* FromAllocationTop(Address top)) { | 626 INLINE(static Page* FromAllocationTop(Address top)) { |
150 Page* p = FromAddress(top - kPointerSize); | 627 Page* p = FromAddress(top - kPointerSize); |
151 ASSERT_PAGE_OFFSET(p->Offset(top)); | 628 ASSERT_PAGE_OFFSET(p->Offset(top)); |
152 return p; | 629 return p; |
153 } | 630 } |
154 | 631 |
155 // Returns the start address of this page. | 632 // Returns the next page in the chain of pages owned by a space. |
156 Address address() { return reinterpret_cast<Address>(this); } | |
157 | |
158 // Checks whether this is a valid page address. | |
159 bool is_valid() { return address() != NULL; } | |
160 | |
161 // Returns the next page of this page. | |
162 inline Page* next_page(); | 633 inline Page* next_page(); |
163 | 634 inline Page* prev_page(); |
164 // Return the end of allocation in this page. Undefined for unused pages. | 635 inline void set_next_page(Page* page); |
165 inline Address AllocationTop(); | 636 inline void set_prev_page(Page* page); |
166 | |
167 // Return the allocation watermark for the page. | |
168 // For old space pages it is guaranteed that the area under the watermark | |
169 // does not contain any garbage pointers to new space. | |
170 inline Address AllocationWatermark(); | |
171 | |
172 // Return the allocation watermark offset from the beginning of the page. | |
173 inline uint32_t AllocationWatermarkOffset(); | |
174 | |
175 inline void SetAllocationWatermark(Address allocation_watermark); | |
176 | |
177 inline void SetCachedAllocationWatermark(Address allocation_watermark); | |
178 inline Address CachedAllocationWatermark(); | |
179 | 637 |
180 // Returns the start address of the object area in this page. | 638 // Returns the start address of the object area in this page. |
181 Address ObjectAreaStart() { return address() + kObjectStartOffset; } | 639 Address ObjectAreaStart() { return address() + kObjectStartOffset; } |
182 | 640 |
183 // Returns the end address (exclusive) of the object area in this page. | 641 // Returns the end address (exclusive) of the object area in this page. |
184 Address ObjectAreaEnd() { return address() + Page::kPageSize; } | 642 Address ObjectAreaEnd() { return address() + Page::kPageSize; } |
185 | 643 |
186 // Checks whether an address is page aligned. | 644 // Checks whether an address is page aligned. |
187 static bool IsAlignedToPageSize(Address a) { | 645 static bool IsAlignedToPageSize(Address a) { |
188 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | 646 return 0 == (OffsetFrom(a) & kPageAlignmentMask); |
189 } | 647 } |
190 | 648 |
191 // True if this page was in use before current compaction started. | |
192 // Result is valid only for pages owned by paged spaces and | |
193 // only after PagedSpace::PrepareForMarkCompact was called. | |
194 inline bool WasInUseBeforeMC(); | |
195 | |
196 inline void SetWasInUseBeforeMC(bool was_in_use); | |
197 | |
198 // True if this page is a large object page. | |
199 inline bool IsLargeObjectPage(); | |
200 | |
201 inline void SetIsLargeObjectPage(bool is_large_object_page); | |
202 | |
203 inline Executability PageExecutability(); | |
204 | |
205 inline void SetPageExecutability(Executability executable); | |
206 | |
207 // Returns the offset of a given address to this page. | 649 // Returns the offset of a given address to this page. |
208 INLINE(int Offset(Address a)) { | 650 INLINE(int Offset(Address a)) { |
209 int offset = static_cast<int>(a - address()); | 651 int offset = static_cast<int>(a - address()); |
210 ASSERT_PAGE_OFFSET(offset); | 652 ASSERT_PAGE_OFFSET(offset); |
211 return offset; | 653 return offset; |
212 } | 654 } |
213 | 655 |
214 // Returns the address for a given offset to the this page. | 656 // Returns the address for a given offset to the this page. |
215 Address OffsetToAddress(int offset) { | 657 Address OffsetToAddress(int offset) { |
216 ASSERT_PAGE_OFFSET(offset); | 658 ASSERT_PAGE_OFFSET(offset); |
217 return address() + offset; | 659 return address() + offset; |
218 } | 660 } |
219 | 661 |
220 // --------------------------------------------------------------------- | 662 // --------------------------------------------------------------------- |
221 // Card marking support | |
222 | |
223 static const uint32_t kAllRegionsCleanMarks = 0x0; | |
224 static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF; | |
225 | |
226 inline uint32_t GetRegionMarks(); | |
227 inline void SetRegionMarks(uint32_t dirty); | |
228 | |
229 inline uint32_t GetRegionMaskForAddress(Address addr); | |
230 inline uint32_t GetRegionMaskForSpan(Address start, int length_in_bytes); | |
231 inline int GetRegionNumberForAddress(Address addr); | |
232 | |
233 inline void MarkRegionDirty(Address addr); | |
234 inline bool IsRegionDirty(Address addr); | |
235 | |
236 inline void ClearRegionMarks(Address start, | |
237 Address end, | |
238 bool reaches_limit); | |
239 | 663 |
240 // Page size in bytes. This must be a multiple of the OS page size. | 664 // Page size in bytes. This must be a multiple of the OS page size. |
241 static const int kPageSize = 1 << kPageSizeBits; | 665 static const int kPageSize = 1 << kPageSizeBits; |
242 | 666 |
243 // Page size mask. | 667 // Page size mask. |
244 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | 668 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; |
245 | 669 |
246 static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize + | |
247 kIntSize + kPointerSize + kPointerSize; | |
248 | |
249 // The start offset of the object area in a page. Aligned to both maps and | |
250 // code alignment to be suitable for both. | |
251 static const int kObjectStartOffset = | |
252 CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kPageHeaderSize)); | |
253 | |
254 // Object area size in bytes. | 670 // Object area size in bytes. |
255 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; | 671 static const int kObjectAreaSize = kPageSize - kObjectStartOffset; |
256 | 672 |
257 // Maximum object size that fits in a page. | 673 // Maximum object size that fits in a page. |
258 static const int kMaxHeapObjectSize = kObjectAreaSize; | 674 static const int kMaxHeapObjectSize = kObjectAreaSize; |
259 | 675 |
260 static const int kDirtyFlagOffset = 2 * kPointerSize; | 676 static const int kFirstUsedCell = |
261 static const int kRegionSizeLog2 = 8; | 677 (kObjectStartOffset/kPointerSize) >> Bitmap::kBitsPerCellLog2; |
262 static const int kRegionSize = 1 << kRegionSizeLog2; | |
263 static const intptr_t kRegionAlignmentMask = (kRegionSize - 1); | |
264 | 678 |
265 STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt); | 679 static const int kLastUsedCell = |
266 | 680 ((kPageSize - kPointerSize)/kPointerSize) >> |
267 enum PageFlag { | 681 Bitmap::kBitsPerCellLog2; |
268 IS_NORMAL_PAGE = 0, | |
269 WAS_IN_USE_BEFORE_MC, | |
270 | |
271 // Page allocation watermark was bumped by preallocation during scavenge. | |
272 // Correct watermark can be retrieved by CachedAllocationWatermark() method | |
273 WATERMARK_INVALIDATED, | |
274 IS_EXECUTABLE, | |
275 NUM_PAGE_FLAGS // Must be last | |
276 }; | |
277 static const int kPageFlagMask = (1 << NUM_PAGE_FLAGS) - 1; | |
278 | |
279 // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during | |
280 // scavenge we just invalidate the watermark on each old space page after | |
281 // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED | |
282 // flag at the beginning of the next scavenge and each page becomes marked as | |
283 // having a valid watermark. | |
284 // | |
285 // The following invariant must hold for pages in old pointer and map spaces: | |
286 // If page is in use then page is marked as having invalid watermark at | |
287 // the beginning and at the end of any GC. | |
288 // | |
289 // This invariant guarantees that after flipping flag meaning at the | |
290 // beginning of scavenge all pages in use will be marked as having valid | |
291 // watermark. | |
292 static inline void FlipMeaningOfInvalidatedWatermarkFlag(Heap* heap); | |
293 | |
294 // Returns true if the page allocation watermark was not altered during | |
295 // scavenge. | |
296 inline bool IsWatermarkValid(); | |
297 | |
298 inline void InvalidateWatermark(bool value); | |
299 | |
300 inline bool GetPageFlag(PageFlag flag); | |
301 inline void SetPageFlag(PageFlag flag, bool value); | |
302 inline void ClearPageFlags(); | |
303 | 682 |
304 inline void ClearGCFields(); | 683 inline void ClearGCFields(); |
305 | 684 |
306 static const int kAllocationWatermarkOffsetShift = WATERMARK_INVALIDATED + 1; | 685 static inline Page* Initialize(Heap* heap, |
307 static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1; | 686 MemoryChunk* chunk, |
308 static const uint32_t kAllocationWatermarkOffsetMask = | 687 Executability executable, |
309 ((1 << kAllocationWatermarkOffsetBits) - 1) << | 688 PagedSpace* owner); |
310 kAllocationWatermarkOffsetShift; | |
311 | 689 |
312 static const uint32_t kFlagsMask = | 690 void InitializeAsAnchor(PagedSpace* owner); |
313 ((1 << kAllocationWatermarkOffsetShift) - 1); | |
314 | 691 |
315 STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >= | 692 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } |
316 kAllocationWatermarkOffsetBits); | 693 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } |
| 694 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } |
317 | 695 |
318 //--------------------------------------------------------------------------- | 696 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } |
319 // Page header description. | 697 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } |
320 // | |
321 // If a page is not in the large object space, the first word, | |
322 // opaque_header, encodes the next page address (aligned to kPageSize 8K) | |
323 // and the chunk number (0 ~ 8K-1). Only MemoryAllocator should use | |
324 // opaque_header. The value range of the opaque_header is [0..kPageSize[, | |
325 // or [next_page_start, next_page_end[. It cannot point to a valid address | |
326 // in the current page. If a page is in the large object space, the first | |
327 // word *may* (if the page start and large object chunk start are the | |
328 // same) contain the address of the next large object chunk. | |
329 intptr_t opaque_header; | |
330 | 698 |
331 // If the page is not in the large object space, the low-order bit of the | 699 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } |
332 // second word is set. If the page is in the large object space, the | 700 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } |
333 // second word *may* (if the page start and large object chunk start are | |
334 // the same) contain the large object chunk size. In either case, the | |
335 // low-order bit for large object pages will be cleared. | |
336 // For normal pages this word is used to store page flags and | |
337 // offset of allocation top. | |
338 intptr_t flags_; | |
339 | 701 |
340 // This field contains dirty marks for regions covering the page. Only dirty | 702 friend class MemoryAllocator; |
341 // regions might contain intergenerational references. | |
342 // Only 32 dirty marks are supported so for large object pages several regions | |
343 // might be mapped to a single dirty mark. | |
344 uint32_t dirty_regions_; | |
345 | |
346 // The index of the page in its owner space. | |
347 int mc_page_index; | |
348 | |
349 // During mark-compact collections this field contains the forwarding address | |
350 // of the first live object in this page. | |
351 // During scavenge collection this field is used to store allocation watermark | |
352 // if it is altered during scavenge. | |
353 Address mc_first_forwarded; | |
354 | |
355 Heap* heap_; | |
356 }; | 703 }; |
357 | 704 |
358 | 705 |
| 706 STATIC_CHECK(sizeof(Page) <= MemoryChunk::kHeaderSize); |
| 707 |
| 708 |
| 709 class LargePage : public MemoryChunk { |
| 710 public: |
| 711 HeapObject* GetObject() { |
| 712 return HeapObject::FromAddress(body()); |
| 713 } |
| 714 |
| 715 inline LargePage* next_page() const { |
| 716 return static_cast<LargePage*>(next_chunk()); |
| 717 } |
| 718 |
| 719 inline void set_next_page(LargePage* page) { |
| 720 set_next_chunk(page); |
| 721 } |
| 722 private: |
| 723 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 724 |
| 725 friend class MemoryAllocator; |
| 726 }; |
| 727 |
| 728 STATIC_CHECK(sizeof(LargePage) <= MemoryChunk::kHeaderSize); |
| 729 |
359 // ---------------------------------------------------------------------------- | 730 // ---------------------------------------------------------------------------- |
360 // Space is the abstract superclass for all allocation spaces. | 731 // Space is the abstract superclass for all allocation spaces. |
361 class Space : public Malloced { | 732 class Space : public Malloced { |
362 public: | 733 public: |
363 Space(Heap* heap, AllocationSpace id, Executability executable) | 734 Space(Heap* heap, AllocationSpace id, Executability executable) |
364 : heap_(heap), id_(id), executable_(executable) {} | 735 : heap_(heap), id_(id), executable_(executable) {} |
365 | 736 |
366 virtual ~Space() {} | 737 virtual ~Space() {} |
367 | 738 |
368 Heap* heap() const { return heap_; } | 739 Heap* heap() const { return heap_; } |
369 | 740 |
370 // Does the space need executable memory? | 741 // Does the space need executable memory? |
371 Executability executable() { return executable_; } | 742 Executability executable() { return executable_; } |
372 | 743 |
373 // Identity used in error reporting. | 744 // Identity used in error reporting. |
374 AllocationSpace identity() { return id_; } | 745 AllocationSpace identity() { return id_; } |
375 | 746 |
376 // Returns allocated size. | 747 // Returns allocated size. |
377 virtual intptr_t Size() = 0; | 748 virtual intptr_t Size() = 0; |
378 | 749 |
379 // Returns size of objects. Can differ from the allocated size | 750 // Returns size of objects. Can differ from the allocated size |
380 // (e.g. see LargeObjectSpace). | 751 // (e.g. see LargeObjectSpace). |
381 virtual intptr_t SizeOfObjects() { return Size(); } | 752 virtual intptr_t SizeOfObjects() { return Size(); } |
382 | 753 |
| 754 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 755 if (id_ == CODE_SPACE) { |
| 756 return RoundDown(size, kCodeAlignment); |
| 757 } else { |
| 758 return RoundDown(size, kPointerSize); |
| 759 } |
| 760 } |
| 761 |
383 #ifdef DEBUG | 762 #ifdef DEBUG |
384 virtual void Print() = 0; | 763 virtual void Print() = 0; |
385 #endif | 764 #endif |
386 | 765 |
387 // After calling this we can allocate a certain number of bytes using only | 766 // After calling this we can allocate a certain number of bytes using only |
388 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) | 767 // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope) |
389 // without using freelists or causing a GC. This is used by partial | 768 // without using freelists or causing a GC. This is used by partial |
390 // snapshots. It returns true of space was reserved or false if a GC is | 769 // snapshots. It returns true of space was reserved or false if a GC is |
391 // needed. For paged spaces the space requested must include the space wasted | 770 // needed. For paged spaces the space requested must include the space wasted |
392 // at the end of each when allocating linearly. | 771 // at the end of each when allocating linearly. |
(...skipping 30 matching lines...) Expand all Loading... |
423 bool exists() { return this != NULL && code_range_ != NULL; } | 802 bool exists() { return this != NULL && code_range_ != NULL; } |
424 bool contains(Address address) { | 803 bool contains(Address address) { |
425 if (this == NULL || code_range_ == NULL) return false; | 804 if (this == NULL || code_range_ == NULL) return false; |
426 Address start = static_cast<Address>(code_range_->address()); | 805 Address start = static_cast<Address>(code_range_->address()); |
427 return start <= address && address < start + code_range_->size(); | 806 return start <= address && address < start + code_range_->size(); |
428 } | 807 } |
429 | 808 |
430 // Allocates a chunk of memory from the large-object portion of | 809 // Allocates a chunk of memory from the large-object portion of |
431 // the code range. On platforms with no separate code range, should | 810 // the code range. On platforms with no separate code range, should |
432 // not be called. | 811 // not be called. |
433 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, | 812 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested, |
434 size_t* allocated); | 813 size_t* allocated); |
435 void FreeRawMemory(void* buf, size_t length); | 814 void FreeRawMemory(Address buf, size_t length); |
436 | 815 |
437 private: | 816 private: |
438 Isolate* isolate_; | 817 Isolate* isolate_; |
439 | 818 |
440 // The reserved range of virtual memory that all code objects are put in. | 819 // The reserved range of virtual memory that all code objects are put in. |
441 VirtualMemory* code_range_; | 820 VirtualMemory* code_range_; |
442 // Plain old data class, just a struct plus a constructor. | 821 // Plain old data class, just a struct plus a constructor. |
443 class FreeBlock { | 822 class FreeBlock { |
444 public: | 823 public: |
445 FreeBlock(Address start_arg, size_t size_arg) | 824 FreeBlock(Address start_arg, size_t size_arg) |
446 : start(start_arg), size(size_arg) {} | 825 : start(start_arg), size(size_arg) { |
| 826 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 827 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| 828 } |
447 FreeBlock(void* start_arg, size_t size_arg) | 829 FreeBlock(void* start_arg, size_t size_arg) |
448 : start(static_cast<Address>(start_arg)), size(size_arg) {} | 830 : start(static_cast<Address>(start_arg)), size(size_arg) { |
| 831 ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment)); |
| 832 ASSERT(size >= static_cast<size_t>(Page::kPageSize)); |
| 833 } |
449 | 834 |
450 Address start; | 835 Address start; |
451 size_t size; | 836 size_t size; |
452 }; | 837 }; |
453 | 838 |
454 // Freed blocks of memory are added to the free list. When the allocation | 839 // Freed blocks of memory are added to the free list. When the allocation |
455 // list is exhausted, the free list is sorted and merged to make the new | 840 // list is exhausted, the free list is sorted and merged to make the new |
456 // allocation list. | 841 // allocation list. |
457 List<FreeBlock> free_list_; | 842 List<FreeBlock> free_list_; |
458 // Memory is allocated from the free blocks on the allocation list. | 843 // Memory is allocated from the free blocks on the allocation list. |
459 // The block at current_allocation_block_index_ is the current block. | 844 // The block at current_allocation_block_index_ is the current block. |
460 List<FreeBlock> allocation_list_; | 845 List<FreeBlock> allocation_list_; |
461 int current_allocation_block_index_; | 846 int current_allocation_block_index_; |
462 | 847 |
463 // Finds a block on the allocation list that contains at least the | 848 // Finds a block on the allocation list that contains at least the |
464 // requested amount of memory. If none is found, sorts and merges | 849 // requested amount of memory. If none is found, sorts and merges |
465 // the existing free memory blocks, and searches again. | 850 // the existing free memory blocks, and searches again. |
466 // If none can be found, terminates V8 with FatalProcessOutOfMemory. | 851 // If none can be found, terminates V8 with FatalProcessOutOfMemory. |
467 void GetNextAllocationBlock(size_t requested); | 852 void GetNextAllocationBlock(size_t requested); |
468 // Compares the start addresses of two free blocks. | 853 // Compares the start addresses of two free blocks. |
469 static int CompareFreeBlockAddress(const FreeBlock* left, | 854 static int CompareFreeBlockAddress(const FreeBlock* left, |
470 const FreeBlock* right); | 855 const FreeBlock* right); |
471 | 856 |
472 DISALLOW_COPY_AND_ASSIGN(CodeRange); | 857 DISALLOW_COPY_AND_ASSIGN(CodeRange); |
473 }; | 858 }; |
474 | 859 |
475 | 860 |
| 861 class SkipList { |
| 862 public: |
| 863 SkipList() { |
| 864 Clear(); |
| 865 } |
| 866 |
| 867 void Clear() { |
| 868 for (int idx = 0; idx < kSize; idx++) { |
| 869 starts_[idx] = reinterpret_cast<Address>(-1); |
| 870 } |
| 871 } |
| 872 |
| 873 Address StartFor(Address addr) { |
| 874 return starts_[RegionNumber(addr)]; |
| 875 } |
| 876 |
| 877 void AddObject(Address addr, int size) { |
| 878 int start_region = RegionNumber(addr); |
| 879 int end_region = RegionNumber(addr + size - kPointerSize); |
| 880 for (int idx = start_region; idx <= end_region; idx++) { |
| 881 if (starts_[idx] > addr) starts_[idx] = addr; |
| 882 } |
| 883 } |
| 884 |
| 885 static inline int RegionNumber(Address addr) { |
| 886 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; |
| 887 } |
| 888 |
| 889 static void Update(Address addr, int size) { |
| 890 Page* page = Page::FromAddress(addr); |
| 891 SkipList* list = page->skip_list(); |
| 892 if (list == NULL) { |
| 893 list = new SkipList(); |
| 894 page->set_skip_list(list); |
| 895 } |
| 896 |
| 897 list->AddObject(addr, size); |
| 898 } |
| 899 |
| 900 private: |
| 901 static const int kRegionSizeLog2 = 13; |
| 902 static const int kRegionSize = 1 << kRegionSizeLog2; |
| 903 static const int kSize = Page::kPageSize / kRegionSize; |
| 904 |
| 905 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); |
| 906 |
| 907 Address starts_[kSize]; |
| 908 }; |
| 909 |
| 910 |
476 // ---------------------------------------------------------------------------- | 911 // ---------------------------------------------------------------------------- |
477 // A space acquires chunks of memory from the operating system. The memory | 912 // A space acquires chunks of memory from the operating system. The memory |
478 // allocator manages chunks for the paged heap spaces (old space and map | 913 // allocator allocated and deallocates pages for the paged heap spaces and large |
479 // space). A paged chunk consists of pages. Pages in a chunk have contiguous | 914 // pages for large object space. |
480 // addresses and are linked as a list. | |
481 // | 915 // |
482 // The allocator keeps an initial chunk which is used for the new space. The | 916 // Each space has to manage it's own pages. |
483 // leftover regions of the initial chunk are used for the initial chunks of | |
484 // old space and map space if they are big enough to hold at least one page. | |
485 // The allocator assumes that there is one old space and one map space, each | |
486 // expands the space by allocating kPagesPerChunk pages except the last | |
487 // expansion (before running out of space). The first chunk may contain fewer | |
488 // than kPagesPerChunk pages as well. | |
489 // | 917 // |
490 // The memory allocator also allocates chunks for the large object space, but | |
491 // they are managed by the space itself. The new space does not expand. | |
492 // | |
493 // The fact that pages for paged spaces are allocated and deallocated in chunks | |
494 // induces a constraint on the order of pages in a linked lists. We say that | |
495 // pages are linked in the chunk-order if and only if every two consecutive | |
496 // pages from the same chunk are consecutive in the linked list. | |
497 // | |
498 | |
499 | |
500 class MemoryAllocator { | 918 class MemoryAllocator { |
501 public: | 919 public: |
502 explicit MemoryAllocator(Isolate* isolate); | 920 explicit MemoryAllocator(Isolate* isolate); |
503 | 921 |
504 // Initializes its internal bookkeeping structures. | 922 // Initializes its internal bookkeeping structures. |
505 // Max capacity of the total space and executable memory limit. | 923 // Max capacity of the total space and executable memory limit. |
506 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); | 924 bool Setup(intptr_t max_capacity, intptr_t capacity_executable); |
507 | 925 |
508 // Deletes valid chunks. | |
509 void TearDown(); | 926 void TearDown(); |
510 | 927 |
511 // Reserves an initial address range of virtual memory to be split between | 928 Page* AllocatePage(PagedSpace* owner, Executability executable); |
512 // the two new space semispaces, the old space, and the map space. The | |
513 // memory is not yet committed or assigned to spaces and split into pages. | |
514 // The initial chunk is unmapped when the memory allocator is torn down. | |
515 // This function should only be called when there is not already a reserved | |
516 // initial chunk (initial_chunk_ should be NULL). It returns the start | |
517 // address of the initial chunk if successful, with the side effect of | |
518 // setting the initial chunk, or else NULL if unsuccessful and leaves the | |
519 // initial chunk NULL. | |
520 void* ReserveInitialChunk(const size_t requested); | |
521 | 929 |
522 // Commits pages from an as-yet-unmanaged block of virtual memory into a | 930 LargePage* AllocateLargePage(intptr_t object_size, |
523 // paged space. The block should be part of the initial chunk reserved via | 931 Executability executable, |
524 // a call to ReserveInitialChunk. The number of pages is always returned in | 932 Space* owner); |
525 // the output parameter num_pages. This function assumes that the start | |
526 // address is non-null and that it is big enough to hold at least one | |
527 // page-aligned page. The call always succeeds, and num_pages is always | |
528 // greater than zero. | |
529 Page* CommitPages(Address start, size_t size, PagedSpace* owner, | |
530 int* num_pages); | |
531 | 933 |
532 // Commit a contiguous block of memory from the initial chunk. Assumes that | 934 void Free(MemoryChunk* chunk); |
533 // the address is not NULL, the size is greater than zero, and that the | |
534 // block is contained in the initial chunk. Returns true if it succeeded | |
535 // and false otherwise. | |
536 bool CommitBlock(Address start, size_t size, Executability executable); | |
537 | |
538 // Uncommit a contiguous block of memory [start..(start+size)[. | |
539 // start is not NULL, the size is greater than zero, and the | |
540 // block is contained in the initial chunk. Returns true if it succeeded | |
541 // and false otherwise. | |
542 bool UncommitBlock(Address start, size_t size); | |
543 | |
544 // Zaps a contiguous block of memory [start..(start+size)[ thus | |
545 // filling it up with a recognizable non-NULL bit pattern. | |
546 void ZapBlock(Address start, size_t size); | |
547 | |
548 // Attempts to allocate the requested (non-zero) number of pages from the | |
549 // OS. Fewer pages might be allocated than requested. If it fails to | |
550 // allocate memory for the OS or cannot allocate a single page, this | |
551 // function returns an invalid page pointer (NULL). The caller must check | |
552 // whether the returned page is valid (by calling Page::is_valid()). It is | |
553 // guaranteed that allocated pages have contiguous addresses. The actual | |
554 // number of allocated pages is returned in the output parameter | |
555 // allocated_pages. If the PagedSpace owner is executable and there is | |
556 // a code range, the pages are allocated from the code range. | |
557 Page* AllocatePages(int requested_pages, int* allocated_pages, | |
558 PagedSpace* owner); | |
559 | |
560 // Frees pages from a given page and after. Requires pages to be | |
561 // linked in chunk-order (see comment for class). | |
562 // If 'p' is the first page of a chunk, pages from 'p' are freed | |
563 // and this function returns an invalid page pointer. | |
564 // Otherwise, the function searches a page after 'p' that is | |
565 // the first page of a chunk. Pages after the found page | |
566 // are freed and the function returns 'p'. | |
567 Page* FreePages(Page* p); | |
568 | |
569 // Frees all pages owned by given space. | |
570 void FreeAllPages(PagedSpace* space); | |
571 | |
572 // Allocates and frees raw memory of certain size. | |
573 // These are just thin wrappers around OS::Allocate and OS::Free, | |
574 // but keep track of allocated bytes as part of heap. | |
575 // If the flag is EXECUTABLE and a code range exists, the requested | |
576 // memory is allocated from the code range. If a code range exists | |
577 // and the freed memory is in it, the code range manages the freed memory. | |
578 MUST_USE_RESULT void* AllocateRawMemory(const size_t requested, | |
579 size_t* allocated, | |
580 Executability executable); | |
581 void FreeRawMemory(void* buf, | |
582 size_t length, | |
583 Executability executable); | |
584 void PerformAllocationCallback(ObjectSpace space, | |
585 AllocationAction action, | |
586 size_t size); | |
587 | |
588 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | |
589 ObjectSpace space, | |
590 AllocationAction action); | |
591 void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback); | |
592 bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback); | |
593 | 935 |
594 // Returns the maximum available bytes of heaps. | 936 // Returns the maximum available bytes of heaps. |
595 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | 937 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } |
596 | 938 |
597 // Returns allocated spaces in bytes. | 939 // Returns allocated spaces in bytes. |
598 intptr_t Size() { return size_; } | 940 intptr_t Size() { return size_; } |
599 | 941 |
600 // Returns the maximum available executable bytes of heaps. | 942 // Returns the maximum available executable bytes of heaps. |
601 intptr_t AvailableExecutable() { | 943 intptr_t AvailableExecutable() { |
602 if (capacity_executable_ < size_executable_) return 0; | 944 if (capacity_executable_ < size_executable_) return 0; |
603 return capacity_executable_ - size_executable_; | 945 return capacity_executable_ - size_executable_; |
604 } | 946 } |
605 | 947 |
606 // Returns allocated executable spaces in bytes. | 948 // Returns allocated executable spaces in bytes. |
607 intptr_t SizeExecutable() { return size_executable_; } | 949 intptr_t SizeExecutable() { return size_executable_; } |
608 | 950 |
609 // Returns maximum available bytes that the old space can have. | 951 // Returns maximum available bytes that the old space can have. |
610 intptr_t MaxAvailable() { | 952 intptr_t MaxAvailable() { |
611 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; | 953 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; |
612 } | 954 } |
613 | 955 |
614 // Links two pages. | |
615 inline void SetNextPage(Page* prev, Page* next); | |
616 | |
617 // Returns the next page of a given page. | |
618 inline Page* GetNextPage(Page* p); | |
619 | |
620 // Checks whether a page belongs to a space. | |
621 inline bool IsPageInSpace(Page* p, PagedSpace* space); | |
622 | |
623 // Returns the space that owns the given page. | |
624 inline PagedSpace* PageOwner(Page* page); | |
625 | |
626 // Finds the first/last page in the same chunk as a given page. | |
627 Page* FindFirstPageInSameChunk(Page* p); | |
628 Page* FindLastPageInSameChunk(Page* p); | |
629 | |
630 // Relinks list of pages owned by space to make it chunk-ordered. | |
631 // Returns new first and last pages of space. | |
632 // Also returns last page in relinked list which has WasInUsedBeforeMC | |
633 // flag set. | |
634 void RelinkPageListInChunkOrder(PagedSpace* space, | |
635 Page** first_page, | |
636 Page** last_page, | |
637 Page** last_page_in_use); | |
638 | |
639 #ifdef DEBUG | 956 #ifdef DEBUG |
640 // Reports statistic info of the space. | 957 // Reports statistic info of the space. |
641 void ReportStatistics(); | 958 void ReportStatistics(); |
642 #endif | 959 #endif |
643 | 960 |
644 // Due to encoding limitation, we can only have 8K chunks. | 961 MemoryChunk* AllocateChunk(intptr_t body_size, |
645 static const int kMaxNofChunks = 1 << kPageSizeBits; | 962 Executability executable, |
646 // If a chunk has at least 16 pages, the maximum heap size is about | 963 Space* space); |
647 // 8K * 8K * 16 = 1G bytes. | 964 |
648 #ifdef V8_TARGET_ARCH_X64 | 965 Address ReserveAlignedMemory(size_t requested, |
649 static const int kPagesPerChunk = 32; | 966 size_t alignment, |
650 // On 64 bit the chunk table consists of 4 levels of 4096-entry tables. | 967 VirtualMemory* controller); |
651 static const int kChunkTableLevels = 4; | 968 Address AllocateAlignedMemory(size_t requested, |
652 static const int kChunkTableBitsPerLevel = 12; | 969 size_t alignment, |
653 #else | 970 Executability executable, |
654 static const int kPagesPerChunk = 16; | 971 VirtualMemory* controller); |
655 // On 32 bit the chunk table consists of 2 levels of 256-entry tables. | 972 |
656 static const int kChunkTableLevels = 2; | 973 void FreeMemory(VirtualMemory* reservation, Executability executable); |
657 static const int kChunkTableBitsPerLevel = 8; | 974 void FreeMemory(Address addr, size_t size, Executability executable); |
658 #endif | 975 |
| 976 // Commit a contiguous block of memory from the initial chunk. Assumes that |
| 977 // the address is not NULL, the size is greater than zero, and that the |
| 978 // block is contained in the initial chunk. Returns true if it succeeded |
| 979 // and false otherwise. |
| 980 bool CommitBlock(Address start, size_t size, Executability executable); |
| 981 |
| 982 // Uncommit a contiguous block of memory [start..(start+size)[. |
| 983 // start is not NULL, the size is greater than zero, and the |
| 984 // block is contained in the initial chunk. Returns true if it succeeded |
| 985 // and false otherwise. |
| 986 bool UncommitBlock(Address start, size_t size); |
| 987 |
| 988 // Zaps a contiguous block of memory [start..(start+size)[ thus |
| 989 // filling it up with a recognizable non-NULL bit pattern. |
| 990 void ZapBlock(Address start, size_t size); |
| 991 |
| 992 void PerformAllocationCallback(ObjectSpace space, |
| 993 AllocationAction action, |
| 994 size_t size); |
| 995 |
| 996 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
| 997 ObjectSpace space, |
| 998 AllocationAction action); |
| 999 |
| 1000 void RemoveMemoryAllocationCallback( |
| 1001 MemoryAllocationCallback callback); |
| 1002 |
| 1003 bool MemoryAllocationCallbackRegistered( |
| 1004 MemoryAllocationCallback callback); |
659 | 1005 |
660 private: | 1006 private: |
661 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; | |
662 | |
663 Isolate* isolate_; | 1007 Isolate* isolate_; |
664 | 1008 |
665 // Maximum space size in bytes. | 1009 // Maximum space size in bytes. |
666 intptr_t capacity_; | 1010 size_t capacity_; |
667 // Maximum subset of capacity_ that can be executable | 1011 // Maximum subset of capacity_ that can be executable |
668 intptr_t capacity_executable_; | 1012 size_t capacity_executable_; |
669 | 1013 |
670 // Allocated space size in bytes. | 1014 // Allocated space size in bytes. |
671 intptr_t size_; | 1015 size_t size_; |
672 | |
673 // Allocated executable space size in bytes. | 1016 // Allocated executable space size in bytes. |
674 intptr_t size_executable_; | 1017 size_t size_executable_; |
675 | 1018 |
676 struct MemoryAllocationCallbackRegistration { | 1019 struct MemoryAllocationCallbackRegistration { |
677 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | 1020 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, |
678 ObjectSpace space, | 1021 ObjectSpace space, |
679 AllocationAction action) | 1022 AllocationAction action) |
680 : callback(callback), space(space), action(action) { | 1023 : callback(callback), space(space), action(action) { |
681 } | 1024 } |
682 MemoryAllocationCallback callback; | 1025 MemoryAllocationCallback callback; |
683 ObjectSpace space; | 1026 ObjectSpace space; |
684 AllocationAction action; | 1027 AllocationAction action; |
685 }; | 1028 }; |
| 1029 |
686 // A List of callback that are triggered when memory is allocated or free'd | 1030 // A List of callback that are triggered when memory is allocated or free'd |
687 List<MemoryAllocationCallbackRegistration> | 1031 List<MemoryAllocationCallbackRegistration> |
688 memory_allocation_callbacks_; | 1032 memory_allocation_callbacks_; |
689 | 1033 |
690 // The initial chunk of virtual memory. | |
691 VirtualMemory* initial_chunk_; | |
692 | |
693 // Allocated chunk info: chunk start address, chunk size, and owning space. | |
694 class ChunkInfo BASE_EMBEDDED { | |
695 public: | |
696 ChunkInfo() : address_(NULL), | |
697 size_(0), | |
698 owner_(NULL), | |
699 executable_(NOT_EXECUTABLE), | |
700 owner_identity_(FIRST_SPACE) {} | |
701 inline void init(Address a, size_t s, PagedSpace* o); | |
702 Address address() { return address_; } | |
703 size_t size() { return size_; } | |
704 PagedSpace* owner() { return owner_; } | |
705 // We save executability of the owner to allow using it | |
706 // when collecting stats after the owner has been destroyed. | |
707 Executability executable() const { return executable_; } | |
708 AllocationSpace owner_identity() const { return owner_identity_; } | |
709 | |
710 private: | |
711 Address address_; | |
712 size_t size_; | |
713 PagedSpace* owner_; | |
714 Executability executable_; | |
715 AllocationSpace owner_identity_; | |
716 }; | |
717 | |
718 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids. | |
719 List<ChunkInfo> chunks_; | |
720 List<int> free_chunk_ids_; | |
721 int max_nof_chunks_; | |
722 int top_; | |
723 | |
724 // Push/pop a free chunk id onto/from the stack. | |
725 void Push(int free_chunk_id); | |
726 int Pop(); | |
727 bool OutOfChunkIds() { return top_ == 0; } | |
728 | |
729 // Frees a chunk. | |
730 void DeleteChunk(int chunk_id); | |
731 | |
732 // Basic check whether a chunk id is in the valid range. | |
733 inline bool IsValidChunkId(int chunk_id); | |
734 | |
735 // Checks whether a chunk id identifies an allocated chunk. | |
736 inline bool IsValidChunk(int chunk_id); | |
737 | |
738 // Returns the chunk id that a page belongs to. | |
739 inline int GetChunkId(Page* p); | |
740 | |
741 // True if the address lies in the initial chunk. | |
742 inline bool InInitialChunk(Address address); | |
743 | |
744 // Initializes pages in a chunk. Returns the first page address. | 1034 // Initializes pages in a chunk. Returns the first page address. |
745 // This function and GetChunkId() are provided for the mark-compact | 1035 // This function and GetChunkId() are provided for the mark-compact |
746 // collector to rebuild page headers in the from space, which is | 1036 // collector to rebuild page headers in the from space, which is |
747 // used as a marking stack and its page headers are destroyed. | 1037 // used as a marking stack and its page headers are destroyed. |
748 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | 1038 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, |
749 PagedSpace* owner); | 1039 PagedSpace* owner); |
750 | 1040 |
751 Page* RelinkPagesInChunk(int chunk_id, | 1041 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); |
752 Address chunk_start, | |
753 size_t chunk_size, | |
754 Page* prev, | |
755 Page** last_page_in_use); | |
756 | |
757 DISALLOW_COPY_AND_ASSIGN(MemoryAllocator); | |
758 }; | 1042 }; |
759 | 1043 |
760 | 1044 |
761 // ----------------------------------------------------------------------------- | 1045 // ----------------------------------------------------------------------------- |
762 // Interface for heap object iterator to be implemented by all object space | 1046 // Interface for heap object iterator to be implemented by all object space |
763 // object iterators. | 1047 // object iterators. |
764 // | 1048 // |
765 // NOTE: The space specific object iterators also implements the own next() | 1049 // NOTE: The space specific object iterators also implements the own next() |
766 // method which is used to avoid using virtual functions | 1050 // method which is used to avoid using virtual functions |
767 // iterating a specific space. | 1051 // iterating a specific space. |
768 | 1052 |
769 class ObjectIterator : public Malloced { | 1053 class ObjectIterator : public Malloced { |
770 public: | 1054 public: |
771 virtual ~ObjectIterator() { } | 1055 virtual ~ObjectIterator() { } |
772 | 1056 |
773 virtual HeapObject* next_object() = 0; | 1057 virtual HeapObject* next_object() = 0; |
774 }; | 1058 }; |
775 | 1059 |
776 | 1060 |
777 // ----------------------------------------------------------------------------- | 1061 // ----------------------------------------------------------------------------- |
778 // Heap object iterator in new/old/map spaces. | 1062 // Heap object iterator in new/old/map spaces. |
779 // | 1063 // |
780 // A HeapObjectIterator iterates objects from a given address to the | 1064 // A HeapObjectIterator iterates objects from the bottom of the given space |
781 // top of a space. The given address must be below the current | 1065 // to its top or from the bottom of the given page to its top. |
782 // allocation pointer (space top). There are some caveats. | |
783 // | 1066 // |
784 // (1) If the space top changes upward during iteration (because of | 1067 // If objects are allocated in the page during iteration the iterator may |
785 // allocating new objects), the iterator does not iterate objects | 1068 // or may not iterate over those objects. The caller must create a new |
786 // above the original space top. The caller must create a new | 1069 // iterator in order to be sure to visit these new objects. |
787 // iterator starting from the old top in order to visit these new | |
788 // objects. | |
789 // | |
790 // (2) If new objects are allocated below the original allocation top | |
791 // (e.g., free-list allocation in paged spaces), the new objects | |
792 // may or may not be iterated depending on their position with | |
793 // respect to the current point of iteration. | |
794 // | |
795 // (3) The space top should not change downward during iteration, | |
796 // otherwise the iterator will return not-necessarily-valid | |
797 // objects. | |
798 | |
799 class HeapObjectIterator: public ObjectIterator { | 1070 class HeapObjectIterator: public ObjectIterator { |
800 public: | 1071 public: |
801 // Creates a new object iterator in a given space. If a start | 1072 // Creates a new object iterator in a given space. |
802 // address is not given, the iterator starts from the space bottom. | |
803 // If the size function is not given, the iterator calls the default | 1073 // If the size function is not given, the iterator calls the default |
804 // Object::Size(). | 1074 // Object::Size(). |
805 explicit HeapObjectIterator(PagedSpace* space); | 1075 explicit HeapObjectIterator(PagedSpace* space); |
806 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | 1076 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); |
807 HeapObjectIterator(PagedSpace* space, Address start); | |
808 HeapObjectIterator(PagedSpace* space, | |
809 Address start, | |
810 HeapObjectCallback size_func); | |
811 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | 1077 HeapObjectIterator(Page* page, HeapObjectCallback size_func); |
812 | 1078 |
813 inline HeapObject* next() { | 1079 // Advance to the next object, skipping free spaces and other fillers and |
814 return (cur_addr_ < cur_limit_) ? FromCurrentPage() : FromNextPage(); | 1080 // skipping the special garbage section of which there is one per space. |
| 1081 // Returns NULL when the iteration has ended. |
| 1082 inline HeapObject* Next() { |
| 1083 do { |
| 1084 HeapObject* next_obj = FromCurrentPage(); |
| 1085 if (next_obj != NULL) return next_obj; |
| 1086 } while (AdvanceToNextPage()); |
| 1087 return NULL; |
815 } | 1088 } |
816 | 1089 |
817 // implementation of ObjectIterator. | 1090 virtual HeapObject* next_object() { |
818 virtual HeapObject* next_object() { return next(); } | 1091 return Next(); |
| 1092 } |
819 | 1093 |
820 private: | 1094 private: |
821 Address cur_addr_; // current iteration point | 1095 enum PageMode { kOnePageOnly, kAllPagesInSpace }; |
822 Address end_addr_; // end iteration point | |
823 Address cur_limit_; // current page limit | |
824 HeapObjectCallback size_func_; // size function | |
825 Page* end_page_; // caches the page of the end address | |
826 | 1096 |
827 HeapObject* FromCurrentPage() { | 1097 Address cur_addr_; // Current iteration point. |
828 ASSERT(cur_addr_ < cur_limit_); | 1098 Address cur_end_; // End iteration point. |
| 1099 HeapObjectCallback size_func_; // Size function or NULL. |
| 1100 PagedSpace* space_; |
| 1101 PageMode page_mode_; |
829 | 1102 |
830 HeapObject* obj = HeapObject::FromAddress(cur_addr_); | 1103 // Fast (inlined) path of next(). |
831 int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj); | 1104 inline HeapObject* FromCurrentPage(); |
832 ASSERT_OBJECT_SIZE(obj_size); | |
833 | 1105 |
834 cur_addr_ += obj_size; | 1106 // Slow path of next(), goes into the next page. Returns false if the |
835 ASSERT(cur_addr_ <= cur_limit_); | 1107 // iteration has ended. |
836 | 1108 bool AdvanceToNextPage(); |
837 return obj; | |
838 } | |
839 | |
840 // Slow path of next, goes into the next page. | |
841 HeapObject* FromNextPage(); | |
842 | 1109 |
843 // Initializes fields. | 1110 // Initializes fields. |
844 void Initialize(Address start, Address end, HeapObjectCallback size_func); | 1111 inline void Initialize(PagedSpace* owner, |
| 1112 Address start, |
| 1113 Address end, |
| 1114 PageMode mode, |
| 1115 HeapObjectCallback size_func); |
845 | 1116 |
846 #ifdef DEBUG | 1117 #ifdef DEBUG |
847 // Verifies whether fields have valid values. | 1118 // Verifies whether fields have valid values. |
848 void Verify(); | 1119 void Verify(); |
849 #endif | 1120 #endif |
850 }; | 1121 }; |
851 | 1122 |
852 | 1123 |
853 // ----------------------------------------------------------------------------- | 1124 // ----------------------------------------------------------------------------- |
854 // A PageIterator iterates the pages in a paged space. | 1125 // A PageIterator iterates the pages in a paged space. |
855 // | |
856 // The PageIterator class provides three modes for iterating pages in a space: | |
857 // PAGES_IN_USE iterates pages containing allocated objects. | |
858 // PAGES_USED_BY_MC iterates pages that hold relocated objects during a | |
859 // mark-compact collection. | |
860 // ALL_PAGES iterates all pages in the space. | |
861 // | |
862 // There are some caveats. | |
863 // | |
864 // (1) If the space expands during iteration, new pages will not be | |
865 // returned by the iterator in any mode. | |
866 // | |
867 // (2) If new objects are allocated during iteration, they will appear | |
868 // in pages returned by the iterator. Allocation may cause the | |
869 // allocation pointer or MC allocation pointer in the last page to | |
870 // change between constructing the iterator and iterating the last | |
871 // page. | |
872 // | |
873 // (3) The space should not shrink during iteration, otherwise the | |
874 // iterator will return deallocated pages. | |
875 | 1126 |
876 class PageIterator BASE_EMBEDDED { | 1127 class PageIterator BASE_EMBEDDED { |
877 public: | 1128 public: |
878 enum Mode { | 1129 explicit inline PageIterator(PagedSpace* space); |
879 PAGES_IN_USE, | |
880 PAGES_USED_BY_MC, | |
881 ALL_PAGES | |
882 }; | |
883 | |
884 PageIterator(PagedSpace* space, Mode mode); | |
885 | 1130 |
886 inline bool has_next(); | 1131 inline bool has_next(); |
887 inline Page* next(); | 1132 inline Page* next(); |
888 | 1133 |
889 private: | 1134 private: |
890 PagedSpace* space_; | 1135 PagedSpace* space_; |
891 Page* prev_page_; // Previous page returned. | 1136 Page* prev_page_; // Previous page returned. |
892 Page* stop_page_; // Page to stop at (last page returned by the iterator). | 1137 // Next page that will be returned. Cached here so that we can use this |
| 1138 // iterator for operations that deallocate pages. |
| 1139 Page* next_page_; |
893 }; | 1140 }; |
894 | 1141 |
895 | 1142 |
896 // ----------------------------------------------------------------------------- | 1143 // ----------------------------------------------------------------------------- |
897 // A space has a list of pages. The next page can be accessed via | 1144 // A space has a circular list of pages. The next page can be accessed via |
898 // Page::next_page() call. The next page of the last page is an | 1145 // Page::next_page() call. |
899 // invalid page pointer. A space can expand and shrink dynamically. | |
900 | 1146 |
901 // An abstraction of allocation and relocation pointers in a page-structured | 1147 // An abstraction of allocation and relocation pointers in a page-structured |
902 // space. | 1148 // space. |
903 class AllocationInfo { | 1149 class AllocationInfo { |
904 public: | 1150 public: |
905 Address top; // current allocation top | 1151 AllocationInfo() : top(NULL), limit(NULL) { |
906 Address limit; // current allocation limit | 1152 } |
| 1153 |
| 1154 Address top; // Current allocation top. |
| 1155 Address limit; // Current allocation limit. |
907 | 1156 |
908 #ifdef DEBUG | 1157 #ifdef DEBUG |
909 bool VerifyPagedAllocation() { | 1158 bool VerifyPagedAllocation() { |
910 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 1159 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) |
911 && (top <= limit); | 1160 && (top <= limit); |
912 } | 1161 } |
913 #endif | 1162 #endif |
914 }; | 1163 }; |
915 | 1164 |
916 | 1165 |
(...skipping 11 matching lines...) Expand all Loading... |
928 // functions increase or decrease one of the non-capacity stats in | 1177 // functions increase or decrease one of the non-capacity stats in |
929 // conjunction with capacity, or else they always balance increases and | 1178 // conjunction with capacity, or else they always balance increases and |
930 // decreases to the non-capacity stats. | 1179 // decreases to the non-capacity stats. |
931 class AllocationStats BASE_EMBEDDED { | 1180 class AllocationStats BASE_EMBEDDED { |
932 public: | 1181 public: |
933 AllocationStats() { Clear(); } | 1182 AllocationStats() { Clear(); } |
934 | 1183 |
935 // Zero out all the allocation statistics (ie, no capacity). | 1184 // Zero out all the allocation statistics (ie, no capacity). |
936 void Clear() { | 1185 void Clear() { |
937 capacity_ = 0; | 1186 capacity_ = 0; |
938 available_ = 0; | |
939 size_ = 0; | 1187 size_ = 0; |
940 waste_ = 0; | 1188 waste_ = 0; |
941 } | 1189 } |
942 | 1190 |
| 1191 void ClearSizeWaste() { |
| 1192 size_ = capacity_; |
| 1193 waste_ = 0; |
| 1194 } |
| 1195 |
943 // Reset the allocation statistics (ie, available = capacity with no | 1196 // Reset the allocation statistics (ie, available = capacity with no |
944 // wasted or allocated bytes). | 1197 // wasted or allocated bytes). |
945 void Reset() { | 1198 void Reset() { |
946 available_ = capacity_; | |
947 size_ = 0; | 1199 size_ = 0; |
948 waste_ = 0; | 1200 waste_ = 0; |
949 } | 1201 } |
950 | 1202 |
951 // Accessors for the allocation statistics. | 1203 // Accessors for the allocation statistics. |
952 intptr_t Capacity() { return capacity_; } | 1204 intptr_t Capacity() { return capacity_; } |
953 intptr_t Available() { return available_; } | |
954 intptr_t Size() { return size_; } | 1205 intptr_t Size() { return size_; } |
955 intptr_t Waste() { return waste_; } | 1206 intptr_t Waste() { return waste_; } |
956 | 1207 |
957 // Grow the space by adding available bytes. | 1208 // Grow the space by adding available bytes. They are initially marked as |
| 1209 // being in use (part of the size), but will normally be immediately freed, |
| 1210 // putting them on the free list and removing them from size_. |
958 void ExpandSpace(int size_in_bytes) { | 1211 void ExpandSpace(int size_in_bytes) { |
959 capacity_ += size_in_bytes; | 1212 capacity_ += size_in_bytes; |
960 available_ += size_in_bytes; | 1213 size_ += size_in_bytes; |
961 } | 1214 ASSERT(size_ >= 0); |
962 | |
963 // Shrink the space by removing available bytes. | |
964 void ShrinkSpace(int size_in_bytes) { | |
965 capacity_ -= size_in_bytes; | |
966 available_ -= size_in_bytes; | |
967 } | 1215 } |
968 | 1216 |
969 // Allocate from available bytes (available -> size). | 1217 // Allocate from available bytes (available -> size). |
970 void AllocateBytes(intptr_t size_in_bytes) { | 1218 void AllocateBytes(intptr_t size_in_bytes) { |
971 available_ -= size_in_bytes; | |
972 size_ += size_in_bytes; | 1219 size_ += size_in_bytes; |
| 1220 ASSERT(size_ >= 0); |
973 } | 1221 } |
974 | 1222 |
975 // Free allocated bytes, making them available (size -> available). | 1223 // Free allocated bytes, making them available (size -> available). |
976 void DeallocateBytes(intptr_t size_in_bytes) { | 1224 void DeallocateBytes(intptr_t size_in_bytes) { |
977 size_ -= size_in_bytes; | 1225 size_ -= size_in_bytes; |
978 available_ += size_in_bytes; | 1226 ASSERT(size_ >= 0); |
979 } | 1227 } |
980 | 1228 |
981 // Waste free bytes (available -> waste). | 1229 // Waste free bytes (available -> waste). |
982 void WasteBytes(int size_in_bytes) { | 1230 void WasteBytes(int size_in_bytes) { |
983 available_ -= size_in_bytes; | 1231 size_ -= size_in_bytes; |
984 waste_ += size_in_bytes; | 1232 waste_ += size_in_bytes; |
985 } | 1233 ASSERT(size_ >= 0); |
986 | |
987 // Consider the wasted bytes to be allocated, as they contain filler | |
988 // objects (waste -> size). | |
989 void FillWastedBytes(intptr_t size_in_bytes) { | |
990 waste_ -= size_in_bytes; | |
991 size_ += size_in_bytes; | |
992 } | 1234 } |
993 | 1235 |
994 private: | 1236 private: |
995 intptr_t capacity_; | 1237 intptr_t capacity_; |
996 intptr_t available_; | |
997 intptr_t size_; | 1238 intptr_t size_; |
998 intptr_t waste_; | 1239 intptr_t waste_; |
999 }; | 1240 }; |
1000 | 1241 |
1001 | 1242 |
| 1243 // ----------------------------------------------------------------------------- |
| 1244 // Free lists for old object spaces |
| 1245 // |
| 1246 // Free-list nodes are free blocks in the heap. They look like heap objects |
| 1247 // (free-list node pointers have the heap object tag, and they have a map like |
| 1248 // a heap object). They have a size and a next pointer. The next pointer is |
| 1249 // the raw address of the next free list node (or NULL). |
| 1250 class FreeListNode: public HeapObject { |
| 1251 public: |
| 1252 // Obtain a free-list node from a raw address. This is not a cast because |
| 1253 // it does not check nor require that the first word at the address is a map |
| 1254 // pointer. |
| 1255 static FreeListNode* FromAddress(Address address) { |
| 1256 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); |
| 1257 } |
| 1258 |
| 1259 static inline bool IsFreeListNode(HeapObject* object); |
| 1260 |
| 1261 // Set the size in bytes, which can be read with HeapObject::Size(). This |
| 1262 // function also writes a map to the first word of the block so that it |
| 1263 // looks like a heap object to the garbage collector and heap iteration |
| 1264 // functions. |
| 1265 void set_size(Heap* heap, int size_in_bytes); |
| 1266 |
| 1267 // Accessors for the next field. |
| 1268 inline FreeListNode* next(); |
| 1269 inline FreeListNode** next_address(); |
| 1270 inline void set_next(FreeListNode* next); |
| 1271 |
| 1272 inline void Zap(); |
| 1273 |
| 1274 private: |
| 1275 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); |
| 1276 |
| 1277 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); |
| 1278 }; |
| 1279 |
| 1280 |
| 1281 // The free list for the old space. The free list is organized in such a way |
| 1282 // as to encourage objects allocated around the same time to be near each |
| 1283 // other. The normal way to allocate is intended to be by bumping a 'top' |
| 1284 // pointer until it hits a 'limit' pointer. When the limit is hit we need to |
| 1285 // find a new space to allocate from. This is done with the free list, which |
| 1286 // is divided up into rough categories to cut down on waste. Having finer |
| 1287 // categories would scatter allocation more. |
| 1288 |
| 1289 // The old space free list is organized in categories. |
| 1290 // 1-31 words: Such small free areas are discarded for efficiency reasons. |
| 1291 // They can be reclaimed by the compactor. However the distance between top |
| 1292 // and limit may be this small. |
| 1293 // 32-255 words: There is a list of spaces this large. It is used for top and |
| 1294 // limit when the object we need to allocate is 1-31 words in size. These |
| 1295 // spaces are called small. |
| 1296 // 256-2047 words: There is a list of spaces this large. It is used for top and |
| 1297 // limit when the object we need to allocate is 32-255 words in size. These |
| 1298 // spaces are called medium. |
| 1299 // 1048-16383 words: There is a list of spaces this large. It is used for top |
| 1300 // and limit when the object we need to allocate is 256-2047 words in size. |
| 1301 // These spaces are call large. |
| 1302 // At least 16384 words. This list is for objects of 2048 words or larger. |
| 1303 // Empty pages are added to this list. These spaces are called huge. |
| 1304 class FreeList BASE_EMBEDDED { |
| 1305 public: |
| 1306 explicit FreeList(PagedSpace* owner); |
| 1307 |
| 1308 // Clear the free list. |
| 1309 void Reset(); |
| 1310 |
| 1311 // Return the number of bytes available on the free list. |
| 1312 intptr_t available() { return available_; } |
| 1313 |
| 1314 // Place a node on the free list. The block of size 'size_in_bytes' |
| 1315 // starting at 'start' is placed on the free list. The return value is the |
| 1316 // number of bytes that have been lost due to internal fragmentation by |
| 1317 // freeing the block. Bookkeeping information will be written to the block, |
| 1318 // ie, its contents will be destroyed. The start address should be word |
| 1319 // aligned, and the size should be a non-zero multiple of the word size. |
| 1320 int Free(Address start, int size_in_bytes); |
| 1321 |
| 1322 // Allocate a block of size 'size_in_bytes' from the free list. The block |
| 1323 // is unitialized. A failure is returned if no block is available. The |
| 1324 // number of bytes lost to fragmentation is returned in the output parameter |
| 1325 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. |
| 1326 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); |
| 1327 |
| 1328 void MarkNodes(); |
| 1329 |
| 1330 #ifdef DEBUG |
| 1331 void Zap(); |
| 1332 static intptr_t SumFreeList(FreeListNode* node); |
| 1333 static int FreeListLength(FreeListNode* cur); |
| 1334 intptr_t SumFreeLists(); |
| 1335 bool IsVeryLong(); |
| 1336 #endif |
| 1337 |
| 1338 void CountFreeListItems(Page* p, intptr_t* sizes); |
| 1339 |
| 1340 private: |
| 1341 // The size range of blocks, in bytes. |
| 1342 static const int kMinBlockSize = 3 * kPointerSize; |
| 1343 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; |
| 1344 |
| 1345 FreeListNode* PickNodeFromList(FreeListNode** list, int* node_size); |
| 1346 |
| 1347 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); |
| 1348 |
| 1349 PagedSpace* owner_; |
| 1350 Heap* heap_; |
| 1351 |
| 1352 // Total available bytes in all blocks on this free list. |
| 1353 int available_; |
| 1354 |
| 1355 static const int kSmallListMin = 0x20 * kPointerSize; |
| 1356 static const int kSmallListMax = 0xff * kPointerSize; |
| 1357 static const int kMediumListMax = 0x7ff * kPointerSize; |
| 1358 static const int kLargeListMax = 0x3fff * kPointerSize; |
| 1359 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; |
| 1360 static const int kMediumAllocationMax = kSmallListMax; |
| 1361 static const int kLargeAllocationMax = kMediumListMax; |
| 1362 FreeListNode* small_list_; |
| 1363 FreeListNode* medium_list_; |
| 1364 FreeListNode* large_list_; |
| 1365 FreeListNode* huge_list_; |
| 1366 |
| 1367 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); |
| 1368 }; |
| 1369 |
| 1370 |
1002 class PagedSpace : public Space { | 1371 class PagedSpace : public Space { |
1003 public: | 1372 public: |
1004 // Creates a space with a maximum capacity, and an id. | 1373 // Creates a space with a maximum capacity, and an id. |
1005 PagedSpace(Heap* heap, | 1374 PagedSpace(Heap* heap, |
1006 intptr_t max_capacity, | 1375 intptr_t max_capacity, |
1007 AllocationSpace id, | 1376 AllocationSpace id, |
1008 Executability executable); | 1377 Executability executable); |
1009 | 1378 |
1010 virtual ~PagedSpace() {} | 1379 virtual ~PagedSpace() {} |
1011 | 1380 |
1012 // Set up the space using the given address range of virtual memory (from | 1381 // Set up the space using the given address range of virtual memory (from |
1013 // the memory allocator's initial chunk) if possible. If the block of | 1382 // the memory allocator's initial chunk) if possible. If the block of |
1014 // addresses is not big enough to contain a single page-aligned page, a | 1383 // addresses is not big enough to contain a single page-aligned page, a |
1015 // fresh chunk will be allocated. | 1384 // fresh chunk will be allocated. |
1016 bool Setup(Address start, size_t size); | 1385 bool Setup(); |
1017 | 1386 |
1018 // Returns true if the space has been successfully set up and not | 1387 // Returns true if the space has been successfully set up and not |
1019 // subsequently torn down. | 1388 // subsequently torn down. |
1020 bool HasBeenSetup(); | 1389 bool HasBeenSetup(); |
1021 | 1390 |
1022 // Cleans up the space, frees all pages in this space except those belonging | 1391 // Cleans up the space, frees all pages in this space except those belonging |
1023 // to the initial chunk, uncommits addresses in the initial chunk. | 1392 // to the initial chunk, uncommits addresses in the initial chunk. |
1024 void TearDown(); | 1393 void TearDown(); |
1025 | 1394 |
1026 // Checks whether an object/address is in this space. | 1395 // Checks whether an object/address is in this space. |
1027 inline bool Contains(Address a); | 1396 inline bool Contains(Address a); |
1028 bool Contains(HeapObject* o) { return Contains(o->address()); } | 1397 bool Contains(HeapObject* o) { return Contains(o->address()); } |
1029 // Never crashes even if a is not a valid pointer. | |
1030 inline bool SafeContains(Address a); | |
1031 | 1398 |
1032 // Given an address occupied by a live object, return that object if it is | 1399 // Given an address occupied by a live object, return that object if it is |
1033 // in this space, or Failure::Exception() if it is not. The implementation | 1400 // in this space, or Failure::Exception() if it is not. The implementation |
1034 // iterates over objects in the page containing the address, the cost is | 1401 // iterates over objects in the page containing the address, the cost is |
1035 // linear in the number of objects in the page. It may be slow. | 1402 // linear in the number of objects in the page. It may be slow. |
1036 MUST_USE_RESULT MaybeObject* FindObject(Address addr); | 1403 MUST_USE_RESULT MaybeObject* FindObject(Address addr); |
1037 | 1404 |
1038 // Checks whether page is currently in use by this space. | 1405 // Prepares for a mark-compact GC. |
1039 bool IsUsed(Page* page); | 1406 virtual void PrepareForMarkCompact(); |
1040 | 1407 |
1041 void MarkAllPagesClean(); | 1408 // Current capacity without growing (Size() + Available()). |
1042 | |
1043 // Prepares for a mark-compact GC. | |
1044 virtual void PrepareForMarkCompact(bool will_compact); | |
1045 | |
1046 // The top of allocation in a page in this space. Undefined if page is unused. | |
1047 Address PageAllocationTop(Page* page) { | |
1048 return page == TopPageOf(allocation_info_) ? top() | |
1049 : PageAllocationLimit(page); | |
1050 } | |
1051 | |
1052 // The limit of allocation for a page in this space. | |
1053 virtual Address PageAllocationLimit(Page* page) = 0; | |
1054 | |
1055 void FlushTopPageWatermark() { | |
1056 AllocationTopPage()->SetCachedAllocationWatermark(top()); | |
1057 AllocationTopPage()->InvalidateWatermark(true); | |
1058 } | |
1059 | |
1060 // Current capacity without growing (Size() + Available() + Waste()). | |
1061 intptr_t Capacity() { return accounting_stats_.Capacity(); } | 1409 intptr_t Capacity() { return accounting_stats_.Capacity(); } |
1062 | 1410 |
1063 // Total amount of memory committed for this space. For paged | 1411 // Total amount of memory committed for this space. For paged |
1064 // spaces this equals the capacity. | 1412 // spaces this equals the capacity. |
1065 intptr_t CommittedMemory() { return Capacity(); } | 1413 intptr_t CommittedMemory() { return Capacity(); } |
1066 | 1414 |
1067 // Available bytes without growing. | 1415 // Sets the capacity, the available space and the wasted space to zero. |
1068 intptr_t Available() { return accounting_stats_.Available(); } | 1416 // The stats are rebuilt during sweeping by adding each page to the |
| 1417 // capacity and the size when it is encountered. As free spaces are |
| 1418 // discovered during the sweeping they are subtracted from the size and added |
| 1419 // to the available and wasted totals. |
| 1420 void ClearStats() { |
| 1421 accounting_stats_.ClearSizeWaste(); |
| 1422 } |
1069 | 1423 |
1070 // Allocated bytes in this space. | 1424 // Available bytes without growing. These are the bytes on the free list. |
| 1425 // The bytes in the linear allocation area are not included in this total |
| 1426 // because updating the stats would slow down allocation. New pages are |
| 1427 // immediately added to the free list so they show up here. |
| 1428 intptr_t Available() { return free_list_.available(); } |
| 1429 |
| 1430 // Allocated bytes in this space. Garbage bytes that were not found due to |
| 1431 // lazy sweeping are counted as being allocated! The bytes in the current |
| 1432 // linear allocation area (between top and limit) are also counted here. |
1071 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1433 virtual intptr_t Size() { return accounting_stats_.Size(); } |
1072 | 1434 |
1073 // Wasted bytes due to fragmentation and not recoverable until the | 1435 // As size, but the bytes in the current linear allocation area are not |
1074 // next GC of this space. | 1436 // included. |
1075 intptr_t Waste() { return accounting_stats_.Waste(); } | 1437 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } |
1076 | 1438 |
1077 // Returns the address of the first object in this space. | 1439 // Wasted bytes in this space. These are just the bytes that were thrown away |
1078 Address bottom() { return first_page_->ObjectAreaStart(); } | 1440 // due to being too small to use for allocation. They do not include the |
| 1441 // free bytes that were not found at all due to lazy sweeping. |
| 1442 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1079 | 1443 |
1080 // Returns the allocation pointer in this space. | 1444 // Returns the allocation pointer in this space. |
1081 Address top() { return allocation_info_.top; } | 1445 Address top() { |
| 1446 return allocation_info_.top; |
| 1447 } |
| 1448 Address limit() { return allocation_info_.limit; } |
1082 | 1449 |
1083 // Allocate the requested number of bytes in the space if possible, return a | 1450 // Allocate the requested number of bytes in the space if possible, return a |
1084 // failure object if not. | 1451 // failure object if not. |
1085 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1452 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
1086 | 1453 |
1087 // Allocate the requested number of bytes for relocation during mark-compact | |
1088 // collection. | |
1089 MUST_USE_RESULT inline MaybeObject* MCAllocateRaw(int size_in_bytes); | |
1090 | |
1091 virtual bool ReserveSpace(int bytes); | 1454 virtual bool ReserveSpace(int bytes); |
1092 | 1455 |
1093 // Used by ReserveSpace. | 1456 // Give a block of memory to the space's free list. It might be added to |
1094 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0; | 1457 // the free list or accounted as waste. |
| 1458 // If add_to_freelist is false then just accounting stats are updated and |
| 1459 // no attempt to add area to free list is made. |
| 1460 int Free(Address start, int size_in_bytes) { |
| 1461 int wasted = free_list_.Free(start, size_in_bytes); |
| 1462 accounting_stats_.DeallocateBytes(size_in_bytes - wasted); |
| 1463 return size_in_bytes - wasted; |
| 1464 } |
1095 | 1465 |
1096 // Free all pages in range from prev (exclusive) to last (inclusive). | 1466 int FreeOrUnmapPage(Page* page, Address start, int size_in_bytes); |
1097 // Freed pages are moved to the end of page list. | |
1098 void FreePages(Page* prev, Page* last); | |
1099 | |
1100 // Deallocates a block. | |
1101 virtual void DeallocateBlock(Address start, | |
1102 int size_in_bytes, | |
1103 bool add_to_freelist) = 0; | |
1104 | 1467 |
1105 // Set space allocation info. | 1468 // Set space allocation info. |
1106 void SetTop(Address top) { | 1469 void SetTop(Address top, Address limit) { |
| 1470 ASSERT(top == limit || |
| 1471 Page::FromAddress(top) == Page::FromAddress(limit - 1)); |
1107 allocation_info_.top = top; | 1472 allocation_info_.top = top; |
1108 allocation_info_.limit = PageAllocationLimit(Page::FromAllocationTop(top)); | 1473 allocation_info_.limit = limit; |
1109 } | 1474 } |
1110 | 1475 |
1111 // --------------------------------------------------------------------------- | 1476 void Allocate(int bytes) { |
1112 // Mark-compact collection support functions | 1477 accounting_stats_.AllocateBytes(bytes); |
1113 | |
1114 // Set the relocation point to the beginning of the space. | |
1115 void MCResetRelocationInfo(); | |
1116 | |
1117 // Writes relocation info to the top page. | |
1118 void MCWriteRelocationInfoToPage() { | |
1119 TopPageOf(mc_forwarding_info_)-> | |
1120 SetAllocationWatermark(mc_forwarding_info_.top); | |
1121 } | 1478 } |
1122 | 1479 |
1123 // Computes the offset of a given address in this space to the beginning | 1480 void IncreaseCapacity(int size) { |
1124 // of the space. | 1481 accounting_stats_.ExpandSpace(size); |
1125 int MCSpaceOffsetForAddress(Address addr); | 1482 } |
1126 | |
1127 // Updates the allocation pointer to the relocation top after a mark-compact | |
1128 // collection. | |
1129 virtual void MCCommitRelocationInfo() = 0; | |
1130 | 1483 |
1131 // Releases half of unused pages. | 1484 // Releases half of unused pages. |
1132 void Shrink(); | 1485 void Shrink(); |
1133 | 1486 |
1134 // Ensures that the capacity is at least 'capacity'. Returns false on failure. | 1487 // Ensures that the capacity is at least 'capacity'. Returns false on failure. |
1135 bool EnsureCapacity(int capacity); | 1488 bool EnsureCapacity(int capacity); |
1136 | 1489 |
| 1490 // The dummy page that anchors the linked list of pages. |
| 1491 Page* anchor() { return &anchor_; } |
| 1492 |
1137 #ifdef DEBUG | 1493 #ifdef DEBUG |
1138 // Print meta info and objects in this space. | 1494 // Print meta info and objects in this space. |
1139 virtual void Print(); | 1495 virtual void Print(); |
1140 | 1496 |
1141 // Verify integrity of this space. | 1497 // Verify integrity of this space. |
1142 virtual void Verify(ObjectVisitor* visitor); | 1498 virtual void Verify(ObjectVisitor* visitor); |
1143 | 1499 |
| 1500 // Reports statistics for the space |
| 1501 void ReportStatistics(); |
| 1502 |
1144 // Overridden by subclasses to verify space-specific object | 1503 // Overridden by subclasses to verify space-specific object |
1145 // properties (e.g., only maps or free-list nodes are in map space). | 1504 // properties (e.g., only maps or free-list nodes are in map space). |
1146 virtual void VerifyObject(HeapObject* obj) {} | 1505 virtual void VerifyObject(HeapObject* obj) {} |
1147 | 1506 |
1148 // Report code object related statistics | 1507 // Report code object related statistics |
1149 void CollectCodeStatistics(); | 1508 void CollectCodeStatistics(); |
1150 static void ReportCodeStatistics(); | 1509 static void ReportCodeStatistics(); |
1151 static void ResetCodeStatistics(); | 1510 static void ResetCodeStatistics(); |
1152 #endif | 1511 #endif |
1153 | 1512 |
1154 // Returns the page of the allocation pointer. | 1513 bool was_swept_conservatively() { return was_swept_conservatively_; } |
1155 Page* AllocationTopPage() { return TopPageOf(allocation_info_); } | 1514 void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; } |
1156 | 1515 |
1157 void RelinkPageListInChunkOrder(bool deallocate_blocks); | 1516 // Evacuation candidates are swept by evacuator. Needs to return a valid |
| 1517 // result before _and_ after evacuation has finished. |
| 1518 static bool ShouldBeSweptLazily(Page* p) { |
| 1519 return !p->IsEvacuationCandidate() && |
| 1520 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
| 1521 !p->WasSweptPrecisely(); |
| 1522 } |
| 1523 |
| 1524 void SetPagesToSweep(Page* first, Page* last) { |
| 1525 first_unswept_page_ = first; |
| 1526 last_unswept_page_ = last; |
| 1527 } |
| 1528 |
| 1529 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
| 1530 |
| 1531 bool IsSweepingComplete() { |
| 1532 return !first_unswept_page_->is_valid(); |
| 1533 } |
| 1534 |
| 1535 Page* FirstPage() { return anchor_.next_page(); } |
| 1536 Page* LastPage() { return anchor_.prev_page(); } |
| 1537 |
| 1538 bool IsFragmented(Page* p) { |
| 1539 intptr_t sizes[4]; |
| 1540 free_list_.CountFreeListItems(p, sizes); |
| 1541 |
| 1542 intptr_t ratio; |
| 1543 intptr_t ratio_threshold; |
| 1544 if (identity() == CODE_SPACE) { |
| 1545 ratio = (sizes[1] * 10 + sizes[2] * 2) * 100 / Page::kObjectAreaSize; |
| 1546 ratio_threshold = 10; |
| 1547 } else { |
| 1548 ratio = (sizes[0] * 5 + sizes[1]) * 100 / Page::kObjectAreaSize; |
| 1549 ratio_threshold = 15; |
| 1550 } |
| 1551 |
| 1552 if (FLAG_trace_fragmentation) { |
| 1553 PrintF("%p [%d]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n", |
| 1554 reinterpret_cast<void*>(p), |
| 1555 identity(), |
| 1556 static_cast<int>(sizes[0]), |
| 1557 static_cast<double>(sizes[0] * 100) / Page::kObjectAreaSize, |
| 1558 static_cast<int>(sizes[1]), |
| 1559 static_cast<double>(sizes[1] * 100) / Page::kObjectAreaSize, |
| 1560 static_cast<int>(sizes[2]), |
| 1561 static_cast<double>(sizes[2] * 100) / Page::kObjectAreaSize, |
| 1562 static_cast<int>(sizes[3]), |
| 1563 static_cast<double>(sizes[3] * 100) / Page::kObjectAreaSize, |
| 1564 (ratio > ratio_threshold) ? "[fragmented]" : ""); |
| 1565 } |
| 1566 |
| 1567 return (ratio > ratio_threshold) || FLAG_always_compact; |
| 1568 } |
| 1569 |
| 1570 void EvictEvacuationCandidatesFromFreeLists(); |
| 1571 |
| 1572 bool CanExpand(); |
1158 | 1573 |
1159 protected: | 1574 protected: |
1160 // Maximum capacity of this space. | 1575 // Maximum capacity of this space. |
1161 intptr_t max_capacity_; | 1576 intptr_t max_capacity_; |
1162 | 1577 |
1163 // Accounting information for this space. | 1578 // Accounting information for this space. |
1164 AllocationStats accounting_stats_; | 1579 AllocationStats accounting_stats_; |
1165 | 1580 |
1166 // The first page in this space. | 1581 // The dummy page that anchors the double linked list of pages. |
1167 Page* first_page_; | 1582 Page anchor_; |
1168 | 1583 |
1169 // The last page in this space. Initially set in Setup, updated in | 1584 // The space's free list. |
1170 // Expand and Shrink. | 1585 FreeList free_list_; |
1171 Page* last_page_; | |
1172 | |
1173 // True if pages owned by this space are linked in chunk-order. | |
1174 // See comment for class MemoryAllocator for definition of chunk-order. | |
1175 bool page_list_is_chunk_ordered_; | |
1176 | 1586 |
1177 // Normal allocation information. | 1587 // Normal allocation information. |
1178 AllocationInfo allocation_info_; | 1588 AllocationInfo allocation_info_; |
1179 | 1589 |
1180 // Relocation information during mark-compact collections. | |
1181 AllocationInfo mc_forwarding_info_; | |
1182 | |
1183 // Bytes of each page that cannot be allocated. Possibly non-zero | 1590 // Bytes of each page that cannot be allocated. Possibly non-zero |
1184 // for pages in spaces with only fixed-size objects. Always zero | 1591 // for pages in spaces with only fixed-size objects. Always zero |
1185 // for pages in spaces with variable sized objects (those pages are | 1592 // for pages in spaces with variable sized objects (those pages are |
1186 // padded with free-list nodes). | 1593 // padded with free-list nodes). |
1187 int page_extra_; | 1594 int page_extra_; |
1188 | 1595 |
1189 // Sets allocation pointer to a page bottom. | 1596 bool was_swept_conservatively_; |
1190 static void SetAllocationInfo(AllocationInfo* alloc_info, Page* p); | |
1191 | 1597 |
1192 // Returns the top page specified by an allocation info structure. | 1598 Page* first_unswept_page_; |
1193 static Page* TopPageOf(AllocationInfo alloc_info) { | 1599 Page* last_unswept_page_; |
1194 return Page::FromAllocationTop(alloc_info.limit); | |
1195 } | |
1196 | |
1197 int CountPagesToTop() { | |
1198 Page* p = Page::FromAllocationTop(allocation_info_.top); | |
1199 PageIterator it(this, PageIterator::ALL_PAGES); | |
1200 int counter = 1; | |
1201 while (it.has_next()) { | |
1202 if (it.next() == p) return counter; | |
1203 counter++; | |
1204 } | |
1205 UNREACHABLE(); | |
1206 return -1; | |
1207 } | |
1208 | 1600 |
1209 // Expands the space by allocating a fixed number of pages. Returns false if | 1601 // Expands the space by allocating a fixed number of pages. Returns false if |
1210 // it cannot allocate requested number of pages from OS. Newly allocated | 1602 // it cannot allocate requested number of pages from OS. |
1211 // pages are append to the last_page; | 1603 bool Expand(); |
1212 bool Expand(Page* last_page); | |
1213 | 1604 |
1214 // Generic fast case allocation function that tries linear allocation in | 1605 // Generic fast case allocation function that tries linear allocation at the |
1215 // the top page of 'alloc_info'. Returns NULL on failure. | 1606 // address denoted by top in allocation_info_. |
1216 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, | 1607 inline HeapObject* AllocateLinearly(AllocationInfo* alloc_info, |
1217 int size_in_bytes); | 1608 int size_in_bytes); |
1218 | 1609 |
1219 // During normal allocation or deserialization, roll to the next page in | |
1220 // the space (there is assumed to be one) and allocate there. This | |
1221 // function is space-dependent. | |
1222 virtual HeapObject* AllocateInNextPage(Page* current_page, | |
1223 int size_in_bytes) = 0; | |
1224 | |
1225 // Slow path of AllocateRaw. This function is space-dependent. | 1610 // Slow path of AllocateRaw. This function is space-dependent. |
1226 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes) = 0; | 1611 MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes); |
1227 | |
1228 // Slow path of MCAllocateRaw. | |
1229 MUST_USE_RESULT HeapObject* SlowMCAllocateRaw(int size_in_bytes); | |
1230 | 1612 |
1231 #ifdef DEBUG | 1613 #ifdef DEBUG |
1232 // Returns the number of total pages in this space. | 1614 // Returns the number of total pages in this space. |
1233 int CountTotalPages(); | 1615 int CountTotalPages(); |
1234 #endif | 1616 #endif |
1235 | 1617 |
1236 private: | |
1237 // Returns a pointer to the page of the relocation pointer. | |
1238 Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); } | |
1239 | |
1240 friend class PageIterator; | 1618 friend class PageIterator; |
1241 }; | 1619 }; |
1242 | 1620 |
1243 | 1621 |
1244 class NumberAndSizeInfo BASE_EMBEDDED { | 1622 class NumberAndSizeInfo BASE_EMBEDDED { |
1245 public: | 1623 public: |
1246 NumberAndSizeInfo() : number_(0), bytes_(0) {} | 1624 NumberAndSizeInfo() : number_(0), bytes_(0) {} |
1247 | 1625 |
1248 int number() const { return number_; } | 1626 int number() const { return number_; } |
1249 void increment_number(int num) { number_ += num; } | 1627 void increment_number(int num) { number_ += num; } |
(...skipping 19 matching lines...) Expand all Loading... |
1269 HistogramInfo() : NumberAndSizeInfo() {} | 1647 HistogramInfo() : NumberAndSizeInfo() {} |
1270 | 1648 |
1271 const char* name() { return name_; } | 1649 const char* name() { return name_; } |
1272 void set_name(const char* name) { name_ = name; } | 1650 void set_name(const char* name) { name_ = name; } |
1273 | 1651 |
1274 private: | 1652 private: |
1275 const char* name_; | 1653 const char* name_; |
1276 }; | 1654 }; |
1277 | 1655 |
1278 | 1656 |
| 1657 enum SemiSpaceId { |
| 1658 kFromSpace = 0, |
| 1659 kToSpace = 1 |
| 1660 }; |
| 1661 |
| 1662 |
| 1663 class SemiSpace; |
| 1664 |
| 1665 |
| 1666 class NewSpacePage : public MemoryChunk { |
| 1667 public: |
| 1668 // GC related flags copied from from-space to to-space when |
| 1669 // flipping semispaces. |
| 1670 static const intptr_t kCopyOnFlipFlagsMask = |
| 1671 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 1672 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
| 1673 (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
| 1674 |
| 1675 inline NewSpacePage* next_page() const { |
| 1676 return static_cast<NewSpacePage*>(next_chunk()); |
| 1677 } |
| 1678 |
| 1679 inline void set_next_page(NewSpacePage* page) { |
| 1680 set_next_chunk(page); |
| 1681 } |
| 1682 |
| 1683 inline NewSpacePage* prev_page() const { |
| 1684 return static_cast<NewSpacePage*>(prev_chunk()); |
| 1685 } |
| 1686 |
| 1687 inline void set_prev_page(NewSpacePage* page) { |
| 1688 set_prev_chunk(page); |
| 1689 } |
| 1690 |
| 1691 SemiSpace* semi_space() { |
| 1692 return reinterpret_cast<SemiSpace*>(owner()); |
| 1693 } |
| 1694 |
| 1695 bool is_anchor() { return !this->InNewSpace(); } |
| 1696 |
| 1697 static bool IsAtStart(Address addr) { |
| 1698 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) |
| 1699 == kObjectStartOffset; |
| 1700 } |
| 1701 |
| 1702 static bool IsAtEnd(Address addr) { |
| 1703 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; |
| 1704 } |
| 1705 |
| 1706 Address address() { |
| 1707 return reinterpret_cast<Address>(this); |
| 1708 } |
| 1709 |
| 1710 // Finds the NewSpacePage containg the given address. |
| 1711 static inline NewSpacePage* FromAddress(Address address_in_page) { |
| 1712 Address page_start = |
| 1713 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & |
| 1714 ~Page::kPageAlignmentMask); |
| 1715 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); |
| 1716 ASSERT(page->InNewSpace()); |
| 1717 return page; |
| 1718 } |
| 1719 |
| 1720 // Find the page for a limit address. A limit address is either an address |
| 1721 // inside a page, or the address right after the last byte of a page. |
| 1722 static inline NewSpacePage* FromLimit(Address address_limit) { |
| 1723 return NewSpacePage::FromAddress(address_limit - 1); |
| 1724 } |
| 1725 |
| 1726 private: |
| 1727 // Create a NewSpacePage object that is only used as anchor |
| 1728 // for the doubly-linked list of real pages. |
| 1729 explicit NewSpacePage(SemiSpace* owner) { |
| 1730 InitializeAsAnchor(owner); |
| 1731 } |
| 1732 |
| 1733 static NewSpacePage* Initialize(Heap* heap, |
| 1734 Address start, |
| 1735 SemiSpace* semi_space); |
| 1736 |
| 1737 // Intialize a fake NewSpacePage used as sentinel at the ends |
| 1738 // of a doubly-linked list of real NewSpacePages. |
| 1739 // Only uses the prev/next links, and sets flags to not be in new-space. |
| 1740 void InitializeAsAnchor(SemiSpace* owner); |
| 1741 |
| 1742 friend class SemiSpace; |
| 1743 friend class SemiSpaceIterator; |
| 1744 }; |
| 1745 |
| 1746 |
1279 // ----------------------------------------------------------------------------- | 1747 // ----------------------------------------------------------------------------- |
1280 // SemiSpace in young generation | 1748 // SemiSpace in young generation |
1281 // | 1749 // |
1282 // A semispace is a contiguous chunk of memory. The mark-compact collector | 1750 // A semispace is a contiguous chunk of memory holding page-like memory |
1283 // uses the memory in the from space as a marking stack when tracing live | 1751 // chunks. The mark-compact collector uses the memory of the first page in |
1284 // objects. | 1752 // the from space as a marking stack when tracing live objects. |
1285 | 1753 |
1286 class SemiSpace : public Space { | 1754 class SemiSpace : public Space { |
1287 public: | 1755 public: |
1288 // Constructor. | 1756 // Constructor. |
1289 explicit SemiSpace(Heap* heap) : Space(heap, NEW_SPACE, NOT_EXECUTABLE) { | 1757 SemiSpace(Heap* heap, SemiSpaceId semispace) |
1290 start_ = NULL; | 1758 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
1291 age_mark_ = NULL; | 1759 start_(NULL), |
1292 } | 1760 age_mark_(NULL), |
| 1761 id_(semispace), |
| 1762 anchor_(this), |
| 1763 current_page_(NULL) { } |
1293 | 1764 |
1294 // Sets up the semispace using the given chunk. | 1765 // Sets up the semispace using the given chunk. |
1295 bool Setup(Address start, int initial_capacity, int maximum_capacity); | 1766 bool Setup(Address start, int initial_capacity, int maximum_capacity); |
1296 | 1767 |
1297 // Tear down the space. Heap memory was not allocated by the space, so it | 1768 // Tear down the space. Heap memory was not allocated by the space, so it |
1298 // is not deallocated here. | 1769 // is not deallocated here. |
1299 void TearDown(); | 1770 void TearDown(); |
1300 | 1771 |
1301 // True if the space has been set up but not torn down. | 1772 // True if the space has been set up but not torn down. |
1302 bool HasBeenSetup() { return start_ != NULL; } | 1773 bool HasBeenSetup() { return start_ != NULL; } |
1303 | 1774 |
1304 // Grow the size of the semispace by committing extra virtual memory. | 1775 // Grow the size of the semispace by committing extra virtual memory. |
1305 // Assumes that the caller has checked that the semispace has not reached | 1776 // Assumes that the caller has checked that the semispace has not reached |
1306 // its maximum capacity (and thus there is space available in the reserved | 1777 // its maximum capacity (and thus there is space available in the reserved |
1307 // address range to grow). | 1778 // address range to grow). |
1308 bool Grow(); | 1779 bool Grow(); |
1309 | 1780 |
1310 // Grow the semispace to the new capacity. The new capacity | 1781 // Grow the semispace to the new capacity. The new capacity |
1311 // requested must be larger than the current capacity. | 1782 // requested must be larger than the current capacity. |
1312 bool GrowTo(int new_capacity); | 1783 bool GrowTo(int new_capacity); |
1313 | 1784 |
1314 // Shrinks the semispace to the new capacity. The new capacity | 1785 // Shrinks the semispace to the new capacity. The new capacity |
1315 // requested must be more than the amount of used memory in the | 1786 // requested must be more than the amount of used memory in the |
1316 // semispace and less than the current capacity. | 1787 // semispace and less than the current capacity. |
1317 bool ShrinkTo(int new_capacity); | 1788 bool ShrinkTo(int new_capacity); |
1318 | 1789 |
1319 // Returns the start address of the space. | 1790 // Returns the start address of the first page of the space. |
1320 Address low() { return start_; } | 1791 Address space_start() { |
| 1792 ASSERT(anchor_.next_page() != &anchor_); |
| 1793 return anchor_.next_page()->body(); |
| 1794 } |
| 1795 |
| 1796 // Returns the start address of the current page of the space. |
| 1797 Address page_low() { |
| 1798 ASSERT(anchor_.next_page() != &anchor_); |
| 1799 return current_page_->body(); |
| 1800 } |
| 1801 |
1321 // Returns one past the end address of the space. | 1802 // Returns one past the end address of the space. |
1322 Address high() { return low() + capacity_; } | 1803 Address space_end() { |
| 1804 return anchor_.prev_page()->body_limit(); |
| 1805 } |
| 1806 |
| 1807 // Returns one past the end address of the current page of the space. |
| 1808 Address page_high() { |
| 1809 return current_page_->body_limit(); |
| 1810 } |
| 1811 |
| 1812 bool AdvancePage() { |
| 1813 NewSpacePage* next_page = current_page_->next_page(); |
| 1814 if (next_page == anchor()) return false; |
| 1815 current_page_ = next_page; |
| 1816 return true; |
| 1817 } |
| 1818 |
| 1819 // Resets the space to using the first page. |
| 1820 void Reset(); |
1323 | 1821 |
1324 // Age mark accessors. | 1822 // Age mark accessors. |
1325 Address age_mark() { return age_mark_; } | 1823 Address age_mark() { return age_mark_; } |
1326 void set_age_mark(Address mark) { age_mark_ = mark; } | 1824 void set_age_mark(Address mark); |
1327 | 1825 |
1328 // True if the address is in the address range of this semispace (not | 1826 // True if the address is in the address range of this semispace (not |
1329 // necessarily below the allocation pointer). | 1827 // necessarily below the allocation pointer). |
1330 bool Contains(Address a) { | 1828 bool Contains(Address a) { |
1331 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 1829 return (reinterpret_cast<uintptr_t>(a) & address_mask_) |
1332 == reinterpret_cast<uintptr_t>(start_); | 1830 == reinterpret_cast<uintptr_t>(start_); |
1333 } | 1831 } |
1334 | 1832 |
1335 // True if the object is a heap object in the address range of this | 1833 // True if the object is a heap object in the address range of this |
1336 // semispace (not necessarily below the allocation pointer). | 1834 // semispace (not necessarily below the allocation pointer). |
1337 bool Contains(Object* o) { | 1835 bool Contains(Object* o) { |
1338 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | 1836 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; |
1339 } | 1837 } |
1340 | 1838 |
1341 // The offset of an address from the beginning of the space. | |
1342 int SpaceOffsetForAddress(Address addr) { | |
1343 return static_cast<int>(addr - low()); | |
1344 } | |
1345 | |
1346 // If we don't have these here then SemiSpace will be abstract. However | 1839 // If we don't have these here then SemiSpace will be abstract. However |
1347 // they should never be called. | 1840 // they should never be called. |
1348 virtual intptr_t Size() { | 1841 virtual intptr_t Size() { |
1349 UNREACHABLE(); | 1842 UNREACHABLE(); |
1350 return 0; | 1843 return 0; |
1351 } | 1844 } |
1352 | 1845 |
1353 virtual bool ReserveSpace(int bytes) { | 1846 virtual bool ReserveSpace(int bytes) { |
1354 UNREACHABLE(); | 1847 UNREACHABLE(); |
1355 return false; | 1848 return false; |
1356 } | 1849 } |
1357 | 1850 |
1358 bool is_committed() { return committed_; } | 1851 bool is_committed() { return committed_; } |
1359 bool Commit(); | 1852 bool Commit(); |
1360 bool Uncommit(); | 1853 bool Uncommit(); |
1361 | 1854 |
| 1855 NewSpacePage* first_page() { return anchor_.next_page(); } |
| 1856 NewSpacePage* current_page() { return current_page_; } |
| 1857 |
1362 #ifdef DEBUG | 1858 #ifdef DEBUG |
1363 virtual void Print(); | 1859 virtual void Print(); |
1364 virtual void Verify(); | 1860 virtual void Verify(); |
| 1861 // Validate a range of of addresses in a SemiSpace. |
| 1862 // The "from" address must be on a page prior to the "to" address, |
| 1863 // in the linked page order, or it must be earlier on the same page. |
| 1864 static void AssertValidRange(Address from, Address to); |
| 1865 #else |
| 1866 // Do nothing. |
| 1867 inline static void AssertValidRange(Address from, Address to) {} |
1365 #endif | 1868 #endif |
1366 | 1869 |
1367 // Returns the current capacity of the semi space. | 1870 // Returns the current capacity of the semi space. |
1368 int Capacity() { return capacity_; } | 1871 int Capacity() { return capacity_; } |
1369 | 1872 |
1370 // Returns the maximum capacity of the semi space. | 1873 // Returns the maximum capacity of the semi space. |
1371 int MaximumCapacity() { return maximum_capacity_; } | 1874 int MaximumCapacity() { return maximum_capacity_; } |
1372 | 1875 |
1373 // Returns the initial capacity of the semi space. | 1876 // Returns the initial capacity of the semi space. |
1374 int InitialCapacity() { return initial_capacity_; } | 1877 int InitialCapacity() { return initial_capacity_; } |
1375 | 1878 |
| 1879 SemiSpaceId id() { return id_; } |
| 1880 |
| 1881 static void Swap(SemiSpace* from, SemiSpace* to); |
| 1882 |
1376 private: | 1883 private: |
| 1884 // Flips the semispace between being from-space and to-space. |
| 1885 // Copies the flags into the masked positions on all pages in the space. |
| 1886 void FlipPages(intptr_t flags, intptr_t flag_mask); |
| 1887 |
| 1888 NewSpacePage* anchor() { return &anchor_; } |
| 1889 |
1377 // The current and maximum capacity of the space. | 1890 // The current and maximum capacity of the space. |
1378 int capacity_; | 1891 int capacity_; |
1379 int maximum_capacity_; | 1892 int maximum_capacity_; |
1380 int initial_capacity_; | 1893 int initial_capacity_; |
1381 | 1894 |
1382 // The start address of the space. | 1895 // The start address of the space. |
1383 Address start_; | 1896 Address start_; |
1384 // Used to govern object promotion during mark-compact collection. | 1897 // Used to govern object promotion during mark-compact collection. |
1385 Address age_mark_; | 1898 Address age_mark_; |
1386 | 1899 |
1387 // Masks and comparison values to test for containment in this semispace. | 1900 // Masks and comparison values to test for containment in this semispace. |
1388 uintptr_t address_mask_; | 1901 uintptr_t address_mask_; |
1389 uintptr_t object_mask_; | 1902 uintptr_t object_mask_; |
1390 uintptr_t object_expected_; | 1903 uintptr_t object_expected_; |
1391 | 1904 |
1392 bool committed_; | 1905 bool committed_; |
| 1906 SemiSpaceId id_; |
1393 | 1907 |
| 1908 NewSpacePage anchor_; |
| 1909 NewSpacePage* current_page_; |
| 1910 |
| 1911 friend class SemiSpaceIterator; |
| 1912 friend class NewSpacePageIterator; |
1394 public: | 1913 public: |
1395 TRACK_MEMORY("SemiSpace") | 1914 TRACK_MEMORY("SemiSpace") |
1396 }; | 1915 }; |
1397 | 1916 |
1398 | 1917 |
1399 // A SemiSpaceIterator is an ObjectIterator that iterates over the active | 1918 // A SemiSpaceIterator is an ObjectIterator that iterates over the active |
1400 // semispace of the heap's new space. It iterates over the objects in the | 1919 // semispace of the heap's new space. It iterates over the objects in the |
1401 // semispace from a given start address (defaulting to the bottom of the | 1920 // semispace from a given start address (defaulting to the bottom of the |
1402 // semispace) to the top of the semispace. New objects allocated after the | 1921 // semispace) to the top of the semispace. New objects allocated after the |
1403 // iterator is created are not iterated. | 1922 // iterator is created are not iterated. |
1404 class SemiSpaceIterator : public ObjectIterator { | 1923 class SemiSpaceIterator : public ObjectIterator { |
1405 public: | 1924 public: |
1406 // Create an iterator over the objects in the given space. If no start | 1925 // Create an iterator over the objects in the given space. If no start |
1407 // address is given, the iterator starts from the bottom of the space. If | 1926 // address is given, the iterator starts from the bottom of the space. If |
1408 // no size function is given, the iterator calls Object::Size(). | 1927 // no size function is given, the iterator calls Object::Size(). |
| 1928 |
| 1929 // Iterate over all of allocated to-space. |
1409 explicit SemiSpaceIterator(NewSpace* space); | 1930 explicit SemiSpaceIterator(NewSpace* space); |
| 1931 // Iterate over all of allocated to-space, with a custome size function. |
1410 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); | 1932 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); |
| 1933 // Iterate over part of allocated to-space, from start to the end |
| 1934 // of allocation. |
1411 SemiSpaceIterator(NewSpace* space, Address start); | 1935 SemiSpaceIterator(NewSpace* space, Address start); |
| 1936 // Iterate from one address to another in the same semi-space. |
| 1937 SemiSpaceIterator(Address from, Address to); |
1412 | 1938 |
1413 HeapObject* next() { | 1939 HeapObject* Next() { |
1414 if (current_ == limit_) return NULL; | 1940 if (current_ == limit_) return NULL; |
| 1941 if (NewSpacePage::IsAtEnd(current_)) { |
| 1942 NewSpacePage* page = NewSpacePage::FromLimit(current_); |
| 1943 page = page->next_page(); |
| 1944 ASSERT(!page->is_anchor()); |
| 1945 current_ = page->body(); |
| 1946 if (current_ == limit_) return NULL; |
| 1947 } |
1415 | 1948 |
1416 HeapObject* object = HeapObject::FromAddress(current_); | 1949 HeapObject* object = HeapObject::FromAddress(current_); |
1417 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); | 1950 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); |
1418 | 1951 |
1419 current_ += size; | 1952 current_ += size; |
1420 return object; | 1953 return object; |
1421 } | 1954 } |
1422 | 1955 |
1423 // Implementation of the ObjectIterator functions. | 1956 // Implementation of the ObjectIterator functions. |
1424 virtual HeapObject* next_object() { return next(); } | 1957 virtual HeapObject* next_object() { return Next(); } |
1425 | 1958 |
1426 private: | 1959 private: |
1427 void Initialize(NewSpace* space, Address start, Address end, | 1960 void Initialize(Address start, |
| 1961 Address end, |
1428 HeapObjectCallback size_func); | 1962 HeapObjectCallback size_func); |
1429 | 1963 |
1430 // The semispace. | |
1431 SemiSpace* space_; | |
1432 // The current iteration point. | 1964 // The current iteration point. |
1433 Address current_; | 1965 Address current_; |
1434 // The end of iteration. | 1966 // The end of iteration. |
1435 Address limit_; | 1967 Address limit_; |
1436 // The callback function. | 1968 // The callback function. |
1437 HeapObjectCallback size_func_; | 1969 HeapObjectCallback size_func_; |
1438 }; | 1970 }; |
1439 | 1971 |
1440 | 1972 |
1441 // ----------------------------------------------------------------------------- | 1973 // ----------------------------------------------------------------------------- |
| 1974 // A PageIterator iterates the pages in a semi-space. |
| 1975 class NewSpacePageIterator BASE_EMBEDDED { |
| 1976 public: |
| 1977 // Make an iterator that runs over all pages in to-space. |
| 1978 explicit inline NewSpacePageIterator(NewSpace* space); |
| 1979 |
| 1980 // Make an iterator that runs over all pages in the given semispace, |
| 1981 // even those not used in allocation. |
| 1982 explicit inline NewSpacePageIterator(SemiSpace* space); |
| 1983 |
| 1984 // Make iterator that iterates from the page containing start |
| 1985 // to the page that contains limit in the same semispace. |
| 1986 inline NewSpacePageIterator(Address start, Address limit); |
| 1987 |
| 1988 inline bool has_next(); |
| 1989 inline NewSpacePage* next(); |
| 1990 |
| 1991 private: |
| 1992 NewSpacePage* prev_page_; // Previous page returned. |
| 1993 // Next page that will be returned. Cached here so that we can use this |
| 1994 // iterator for operations that deallocate pages. |
| 1995 NewSpacePage* next_page_; |
| 1996 // Last page returned. |
| 1997 NewSpacePage* last_page_; |
| 1998 }; |
| 1999 |
| 2000 |
| 2001 // ----------------------------------------------------------------------------- |
1442 // The young generation space. | 2002 // The young generation space. |
1443 // | 2003 // |
1444 // The new space consists of a contiguous pair of semispaces. It simply | 2004 // The new space consists of a contiguous pair of semispaces. It simply |
1445 // forwards most functions to the appropriate semispace. | 2005 // forwards most functions to the appropriate semispace. |
1446 | 2006 |
1447 class NewSpace : public Space { | 2007 class NewSpace : public Space { |
1448 public: | 2008 public: |
1449 // Constructor. | 2009 // Constructor. |
1450 explicit NewSpace(Heap* heap) | 2010 explicit NewSpace(Heap* heap) |
1451 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | 2011 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), |
1452 to_space_(heap), | 2012 to_space_(heap, kToSpace), |
1453 from_space_(heap) {} | 2013 from_space_(heap, kFromSpace), |
| 2014 reservation_(), |
| 2015 inline_allocation_limit_step_(0) {} |
1454 | 2016 |
1455 // Sets up the new space using the given chunk. | 2017 // Sets up the new space using the given chunk. |
1456 bool Setup(Address start, int size); | 2018 bool Setup(int reserved_semispace_size_, int max_semispace_size); |
1457 | 2019 |
1458 // Tears down the space. Heap memory was not allocated by the space, so it | 2020 // Tears down the space. Heap memory was not allocated by the space, so it |
1459 // is not deallocated here. | 2021 // is not deallocated here. |
1460 void TearDown(); | 2022 void TearDown(); |
1461 | 2023 |
1462 // True if the space has been set up but not torn down. | 2024 // True if the space has been set up but not torn down. |
1463 bool HasBeenSetup() { | 2025 bool HasBeenSetup() { |
1464 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); | 2026 return to_space_.HasBeenSetup() && from_space_.HasBeenSetup(); |
1465 } | 2027 } |
1466 | 2028 |
1467 // Flip the pair of spaces. | 2029 // Flip the pair of spaces. |
1468 void Flip(); | 2030 void Flip(); |
1469 | 2031 |
1470 // Grow the capacity of the semispaces. Assumes that they are not at | 2032 // Grow the capacity of the semispaces. Assumes that they are not at |
1471 // their maximum capacity. | 2033 // their maximum capacity. |
1472 void Grow(); | 2034 void Grow(); |
1473 | 2035 |
1474 // Shrink the capacity of the semispaces. | 2036 // Shrink the capacity of the semispaces. |
1475 void Shrink(); | 2037 void Shrink(); |
1476 | 2038 |
1477 // True if the address or object lies in the address range of either | 2039 // True if the address or object lies in the address range of either |
1478 // semispace (not necessarily below the allocation pointer). | 2040 // semispace (not necessarily below the allocation pointer). |
1479 bool Contains(Address a) { | 2041 bool Contains(Address a) { |
1480 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | 2042 return (reinterpret_cast<uintptr_t>(a) & address_mask_) |
1481 == reinterpret_cast<uintptr_t>(start_); | 2043 == reinterpret_cast<uintptr_t>(start_); |
1482 } | 2044 } |
| 2045 |
1483 bool Contains(Object* o) { | 2046 bool Contains(Object* o) { |
1484 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | 2047 Address a = reinterpret_cast<Address>(o); |
| 2048 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; |
1485 } | 2049 } |
1486 | 2050 |
1487 // Return the allocated bytes in the active semispace. | 2051 // Return the allocated bytes in the active semispace. |
1488 virtual intptr_t Size() { return static_cast<int>(top() - bottom()); } | 2052 virtual intptr_t Size() { |
| 2053 return pages_used_ * Page::kObjectAreaSize + |
| 2054 static_cast<int>(top() - to_space_.page_low()); |
| 2055 } |
| 2056 |
1489 // The same, but returning an int. We have to have the one that returns | 2057 // The same, but returning an int. We have to have the one that returns |
1490 // intptr_t because it is inherited, but if we know we are dealing with the | 2058 // intptr_t because it is inherited, but if we know we are dealing with the |
1491 // new space, which can't get as big as the other spaces then this is useful: | 2059 // new space, which can't get as big as the other spaces then this is useful: |
1492 int SizeAsInt() { return static_cast<int>(Size()); } | 2060 int SizeAsInt() { return static_cast<int>(Size()); } |
1493 | 2061 |
1494 // Return the current capacity of a semispace. | 2062 // Return the current capacity of a semispace. |
| 2063 intptr_t EffectiveCapacity() { |
| 2064 ASSERT(to_space_.Capacity() == from_space_.Capacity()); |
| 2065 return (to_space_.Capacity() / Page::kPageSize) * Page::kObjectAreaSize; |
| 2066 } |
| 2067 |
| 2068 // Return the current capacity of a semispace. |
1495 intptr_t Capacity() { | 2069 intptr_t Capacity() { |
1496 ASSERT(to_space_.Capacity() == from_space_.Capacity()); | 2070 ASSERT(to_space_.Capacity() == from_space_.Capacity()); |
1497 return to_space_.Capacity(); | 2071 return to_space_.Capacity(); |
1498 } | 2072 } |
1499 | 2073 |
1500 // Return the total amount of memory committed for new space. | 2074 // Return the total amount of memory committed for new space. |
1501 intptr_t CommittedMemory() { | 2075 intptr_t CommittedMemory() { |
1502 if (from_space_.is_committed()) return 2 * Capacity(); | 2076 if (from_space_.is_committed()) return 2 * Capacity(); |
1503 return Capacity(); | 2077 return Capacity(); |
1504 } | 2078 } |
1505 | 2079 |
1506 // Return the available bytes without growing in the active semispace. | 2080 // Return the available bytes without growing or switching page in the |
1507 intptr_t Available() { return Capacity() - Size(); } | 2081 // active semispace. |
| 2082 intptr_t Available() { |
| 2083 return allocation_info_.limit - allocation_info_.top; |
| 2084 } |
1508 | 2085 |
1509 // Return the maximum capacity of a semispace. | 2086 // Return the maximum capacity of a semispace. |
1510 int MaximumCapacity() { | 2087 int MaximumCapacity() { |
1511 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); | 2088 ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); |
1512 return to_space_.MaximumCapacity(); | 2089 return to_space_.MaximumCapacity(); |
1513 } | 2090 } |
1514 | 2091 |
1515 // Returns the initial capacity of a semispace. | 2092 // Returns the initial capacity of a semispace. |
1516 int InitialCapacity() { | 2093 int InitialCapacity() { |
1517 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); | 2094 ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity()); |
1518 return to_space_.InitialCapacity(); | 2095 return to_space_.InitialCapacity(); |
1519 } | 2096 } |
1520 | 2097 |
1521 // Return the address of the allocation pointer in the active semispace. | 2098 // Return the address of the allocation pointer in the active semispace. |
1522 Address top() { return allocation_info_.top; } | 2099 Address top() { |
| 2100 ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top)); |
| 2101 return allocation_info_.top; |
| 2102 } |
1523 // Return the address of the first object in the active semispace. | 2103 // Return the address of the first object in the active semispace. |
1524 Address bottom() { return to_space_.low(); } | 2104 Address bottom() { return to_space_.space_start(); } |
1525 | 2105 |
1526 // Get the age mark of the inactive semispace. | 2106 // Get the age mark of the inactive semispace. |
1527 Address age_mark() { return from_space_.age_mark(); } | 2107 Address age_mark() { return from_space_.age_mark(); } |
1528 // Set the age mark in the active semispace. | 2108 // Set the age mark in the active semispace. |
1529 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } | 2109 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } |
1530 | 2110 |
1531 // The start address of the space and a bit mask. Anding an address in the | 2111 // The start address of the space and a bit mask. Anding an address in the |
1532 // new space with the mask will result in the start address. | 2112 // new space with the mask will result in the start address. |
1533 Address start() { return start_; } | 2113 Address start() { return start_; } |
1534 uintptr_t mask() { return address_mask_; } | 2114 uintptr_t mask() { return address_mask_; } |
1535 | 2115 |
| 2116 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { |
| 2117 ASSERT(Contains(addr)); |
| 2118 ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) || |
| 2119 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); |
| 2120 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; |
| 2121 } |
| 2122 |
| 2123 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { |
| 2124 return reinterpret_cast<Address>(index << kPointerSizeLog2); |
| 2125 } |
| 2126 |
1536 // The allocation top and limit addresses. | 2127 // The allocation top and limit addresses. |
1537 Address* allocation_top_address() { return &allocation_info_.top; } | 2128 Address* allocation_top_address() { return &allocation_info_.top; } |
1538 Address* allocation_limit_address() { return &allocation_info_.limit; } | 2129 Address* allocation_limit_address() { return &allocation_info_.limit; } |
1539 | 2130 |
1540 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) { | 2131 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes) { |
1541 return AllocateRawInternal(size_in_bytes, &allocation_info_); | 2132 return AllocateRawInternal(size_in_bytes); |
1542 } | |
1543 | |
1544 // Allocate the requested number of bytes for relocation during mark-compact | |
1545 // collection. | |
1546 MUST_USE_RESULT MaybeObject* MCAllocateRaw(int size_in_bytes) { | |
1547 return AllocateRawInternal(size_in_bytes, &mc_forwarding_info_); | |
1548 } | 2133 } |
1549 | 2134 |
1550 // Reset the allocation pointer to the beginning of the active semispace. | 2135 // Reset the allocation pointer to the beginning of the active semispace. |
1551 void ResetAllocationInfo(); | 2136 void ResetAllocationInfo(); |
1552 // Reset the reloction pointer to the bottom of the inactive semispace in | |
1553 // preparation for mark-compact collection. | |
1554 void MCResetRelocationInfo(); | |
1555 // Update the allocation pointer in the active semispace after a | |
1556 // mark-compact collection. | |
1557 void MCCommitRelocationInfo(); | |
1558 | 2137 |
1559 // Get the extent of the inactive semispace (for use as a marking stack). | 2138 void LowerInlineAllocationLimit(intptr_t step) { |
1560 Address FromSpaceLow() { return from_space_.low(); } | 2139 inline_allocation_limit_step_ = step; |
1561 Address FromSpaceHigh() { return from_space_.high(); } | 2140 if (step == 0) { |
| 2141 allocation_info_.limit = to_space_.page_high(); |
| 2142 } else { |
| 2143 allocation_info_.limit = Min( |
| 2144 allocation_info_.top + inline_allocation_limit_step_, |
| 2145 allocation_info_.limit); |
| 2146 } |
| 2147 top_on_previous_step_ = allocation_info_.top; |
| 2148 } |
1562 | 2149 |
1563 // Get the extent of the active semispace (to sweep newly copied objects | 2150 // Get the extent of the inactive semispace (for use as a marking stack, |
1564 // during a scavenge collection). | 2151 // or to zap it). Notice: space-addresses are not necessarily on the |
1565 Address ToSpaceLow() { return to_space_.low(); } | 2152 // same page, so FromSpaceStart() might be above FromSpaceEnd(). |
1566 Address ToSpaceHigh() { return to_space_.high(); } | 2153 Address FromSpacePageLow() { return from_space_.page_low(); } |
| 2154 Address FromSpacePageHigh() { return from_space_.page_high(); } |
| 2155 Address FromSpaceStart() { return from_space_.space_start(); } |
| 2156 Address FromSpaceEnd() { return from_space_.space_end(); } |
1567 | 2157 |
1568 // Offsets from the beginning of the semispaces. | 2158 // Get the extent of the active semispace's pages' memory. |
1569 int ToSpaceOffsetForAddress(Address a) { | 2159 Address ToSpaceStart() { return to_space_.space_start(); } |
1570 return to_space_.SpaceOffsetForAddress(a); | 2160 Address ToSpaceEnd() { return to_space_.space_end(); } |
| 2161 |
| 2162 inline bool ToSpaceContains(Address address) { |
| 2163 return to_space_.Contains(address); |
1571 } | 2164 } |
1572 int FromSpaceOffsetForAddress(Address a) { | 2165 inline bool FromSpaceContains(Address address) { |
1573 return from_space_.SpaceOffsetForAddress(a); | 2166 return from_space_.Contains(address); |
1574 } | 2167 } |
1575 | 2168 |
1576 // True if the object is a heap object in the address range of the | 2169 // True if the object is a heap object in the address range of the |
1577 // respective semispace (not necessarily below the allocation pointer of the | 2170 // respective semispace (not necessarily below the allocation pointer of the |
1578 // semispace). | 2171 // semispace). |
1579 bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } | 2172 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } |
1580 bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } | 2173 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } |
1581 | 2174 |
1582 bool ToSpaceContains(Address a) { return to_space_.Contains(a); } | 2175 // Try to switch the active semispace to a new, empty, page. |
1583 bool FromSpaceContains(Address a) { return from_space_.Contains(a); } | 2176 // Returns false if this isn't possible or reasonable (i.e., there |
| 2177 // are no pages, or the current page is already empty), or true |
| 2178 // if successful. |
| 2179 bool AddFreshPage(); |
1584 | 2180 |
1585 virtual bool ReserveSpace(int bytes); | 2181 virtual bool ReserveSpace(int bytes); |
1586 | 2182 |
1587 // Resizes a sequential string which must be the most recent thing that was | 2183 // Resizes a sequential string which must be the most recent thing that was |
1588 // allocated in new space. | 2184 // allocated in new space. |
1589 template <typename StringType> | 2185 template <typename StringType> |
1590 inline void ShrinkStringAtAllocationBoundary(String* string, int len); | 2186 inline void ShrinkStringAtAllocationBoundary(String* string, int len); |
1591 | 2187 |
1592 #ifdef DEBUG | 2188 #ifdef DEBUG |
1593 // Verify the active semispace. | 2189 // Verify the active semispace. |
(...skipping 19 matching lines...) Expand all Loading... |
1613 bool CommitFromSpaceIfNeeded() { | 2209 bool CommitFromSpaceIfNeeded() { |
1614 if (from_space_.is_committed()) return true; | 2210 if (from_space_.is_committed()) return true; |
1615 return from_space_.Commit(); | 2211 return from_space_.Commit(); |
1616 } | 2212 } |
1617 | 2213 |
1618 bool UncommitFromSpace() { | 2214 bool UncommitFromSpace() { |
1619 if (!from_space_.is_committed()) return true; | 2215 if (!from_space_.is_committed()) return true; |
1620 return from_space_.Uncommit(); | 2216 return from_space_.Uncommit(); |
1621 } | 2217 } |
1622 | 2218 |
| 2219 inline intptr_t inline_allocation_limit_step() { |
| 2220 return inline_allocation_limit_step_; |
| 2221 } |
| 2222 |
| 2223 SemiSpace* active_space() { return &to_space_; } |
| 2224 |
1623 private: | 2225 private: |
| 2226 // Update allocation info to match the current to-space page. |
| 2227 void UpdateAllocationInfo(); |
| 2228 |
| 2229 Address chunk_base_; |
| 2230 uintptr_t chunk_size_; |
| 2231 |
1624 // The semispaces. | 2232 // The semispaces. |
1625 SemiSpace to_space_; | 2233 SemiSpace to_space_; |
1626 SemiSpace from_space_; | 2234 SemiSpace from_space_; |
| 2235 VirtualMemory reservation_; |
| 2236 int pages_used_; |
1627 | 2237 |
1628 // Start address and bit mask for containment testing. | 2238 // Start address and bit mask for containment testing. |
1629 Address start_; | 2239 Address start_; |
1630 uintptr_t address_mask_; | 2240 uintptr_t address_mask_; |
1631 uintptr_t object_mask_; | 2241 uintptr_t object_mask_; |
1632 uintptr_t object_expected_; | 2242 uintptr_t object_expected_; |
1633 | 2243 |
1634 // Allocation pointer and limit for normal allocation and allocation during | 2244 // Allocation pointer and limit for normal allocation and allocation during |
1635 // mark-compact collection. | 2245 // mark-compact collection. |
1636 AllocationInfo allocation_info_; | 2246 AllocationInfo allocation_info_; |
1637 AllocationInfo mc_forwarding_info_; | 2247 |
| 2248 // When incremental marking is active we will set allocation_info_.limit |
| 2249 // to be lower than actual limit and then will gradually increase it |
| 2250 // in steps to guarantee that we do incremental marking steps even |
| 2251 // when all allocation is performed from inlined generated code. |
| 2252 intptr_t inline_allocation_limit_step_; |
| 2253 |
| 2254 Address top_on_previous_step_; |
1638 | 2255 |
1639 HistogramInfo* allocated_histogram_; | 2256 HistogramInfo* allocated_histogram_; |
1640 HistogramInfo* promoted_histogram_; | 2257 HistogramInfo* promoted_histogram_; |
1641 | 2258 |
1642 // Implementation of AllocateRaw and MCAllocateRaw. | 2259 // Implementation of AllocateRaw. |
1643 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal( | 2260 MUST_USE_RESULT inline MaybeObject* AllocateRawInternal(int size_in_bytes); |
1644 int size_in_bytes, | |
1645 AllocationInfo* alloc_info); | |
1646 | 2261 |
1647 friend class SemiSpaceIterator; | 2262 friend class SemiSpaceIterator; |
1648 | 2263 |
1649 public: | 2264 public: |
1650 TRACK_MEMORY("NewSpace") | 2265 TRACK_MEMORY("NewSpace") |
1651 }; | 2266 }; |
1652 | 2267 |
1653 | 2268 |
1654 // ----------------------------------------------------------------------------- | 2269 // ----------------------------------------------------------------------------- |
1655 // Free lists for old object spaces | |
1656 // | |
1657 // Free-list nodes are free blocks in the heap. They look like heap objects | |
1658 // (free-list node pointers have the heap object tag, and they have a map like | |
1659 // a heap object). They have a size and a next pointer. The next pointer is | |
1660 // the raw address of the next free list node (or NULL). | |
1661 class FreeListNode: public HeapObject { | |
1662 public: | |
1663 // Obtain a free-list node from a raw address. This is not a cast because | |
1664 // it does not check nor require that the first word at the address is a map | |
1665 // pointer. | |
1666 static FreeListNode* FromAddress(Address address) { | |
1667 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
1668 } | |
1669 | |
1670 static inline bool IsFreeListNode(HeapObject* object); | |
1671 | |
1672 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
1673 // function also writes a map to the first word of the block so that it | |
1674 // looks like a heap object to the garbage collector and heap iteration | |
1675 // functions. | |
1676 void set_size(Heap* heap, int size_in_bytes); | |
1677 | |
1678 // Accessors for the next field. | |
1679 inline Address next(Heap* heap); | |
1680 inline void set_next(Heap* heap, Address next); | |
1681 | |
1682 private: | |
1683 static const int kNextOffset = POINTER_SIZE_ALIGN(ByteArray::kHeaderSize); | |
1684 | |
1685 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
1686 }; | |
1687 | |
1688 | |
1689 // The free list for the old space. | |
1690 class OldSpaceFreeList BASE_EMBEDDED { | |
1691 public: | |
1692 OldSpaceFreeList(Heap* heap, AllocationSpace owner); | |
1693 | |
1694 // Clear the free list. | |
1695 void Reset(); | |
1696 | |
1697 // Return the number of bytes available on the free list. | |
1698 intptr_t available() { return available_; } | |
1699 | |
1700 // Place a node on the free list. The block of size 'size_in_bytes' | |
1701 // starting at 'start' is placed on the free list. The return value is the | |
1702 // number of bytes that have been lost due to internal fragmentation by | |
1703 // freeing the block. Bookkeeping information will be written to the block, | |
1704 // ie, its contents will be destroyed. The start address should be word | |
1705 // aligned, and the size should be a non-zero multiple of the word size. | |
1706 int Free(Address start, int size_in_bytes); | |
1707 | |
1708 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
1709 // is unitialized. A failure is returned if no block is available. The | |
1710 // number of bytes lost to fragmentation is returned in the output parameter | |
1711 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
1712 MUST_USE_RESULT MaybeObject* Allocate(int size_in_bytes, int* wasted_bytes); | |
1713 | |
1714 void MarkNodes(); | |
1715 | |
1716 private: | |
1717 // The size range of blocks, in bytes. (Smaller allocations are allowed, but | |
1718 // will always result in waste.) | |
1719 static const int kMinBlockSize = 2 * kPointerSize; | |
1720 static const int kMaxBlockSize = Page::kMaxHeapObjectSize; | |
1721 | |
1722 Heap* heap_; | |
1723 | |
1724 // The identity of the owning space, for building allocation Failure | |
1725 // objects. | |
1726 AllocationSpace owner_; | |
1727 | |
1728 // Total available bytes in all blocks on this free list. | |
1729 int available_; | |
1730 | |
1731 // Blocks are put on exact free lists in an array, indexed by size in words. | |
1732 // The available sizes are kept in an increasingly ordered list. Entries | |
1733 // corresponding to sizes < kMinBlockSize always have an empty free list | |
1734 // (but index kHead is used for the head of the size list). | |
1735 struct SizeNode { | |
1736 // Address of the head FreeListNode of the implied block size or NULL. | |
1737 Address head_node_; | |
1738 // Size (words) of the next larger available size if head_node_ != NULL. | |
1739 int next_size_; | |
1740 }; | |
1741 static const int kFreeListsLength = kMaxBlockSize / kPointerSize + 1; | |
1742 SizeNode free_[kFreeListsLength]; | |
1743 | |
1744 // Sentinel elements for the size list. Real elements are in ]kHead..kEnd[. | |
1745 static const int kHead = kMinBlockSize / kPointerSize - 1; | |
1746 static const int kEnd = kMaxInt; | |
1747 | |
1748 // We keep a "finger" in the size list to speed up a common pattern: | |
1749 // repeated requests for the same or increasing sizes. | |
1750 int finger_; | |
1751 | |
1752 // Starting from *prev, find and return the smallest size >= index (words), | |
1753 // or kEnd. Update *prev to be the largest size < index, or kHead. | |
1754 int FindSize(int index, int* prev) { | |
1755 int cur = free_[*prev].next_size_; | |
1756 while (cur < index) { | |
1757 *prev = cur; | |
1758 cur = free_[cur].next_size_; | |
1759 } | |
1760 return cur; | |
1761 } | |
1762 | |
1763 // Remove an existing element from the size list. | |
1764 void RemoveSize(int index) { | |
1765 int prev = kHead; | |
1766 int cur = FindSize(index, &prev); | |
1767 ASSERT(cur == index); | |
1768 free_[prev].next_size_ = free_[cur].next_size_; | |
1769 finger_ = prev; | |
1770 } | |
1771 | |
1772 // Insert a new element into the size list. | |
1773 void InsertSize(int index) { | |
1774 int prev = kHead; | |
1775 int cur = FindSize(index, &prev); | |
1776 ASSERT(cur != index); | |
1777 free_[prev].next_size_ = index; | |
1778 free_[index].next_size_ = cur; | |
1779 } | |
1780 | |
1781 // The size list is not updated during a sequence of calls to Free, but is | |
1782 // rebuilt before the next allocation. | |
1783 void RebuildSizeList(); | |
1784 bool needs_rebuild_; | |
1785 | |
1786 #ifdef DEBUG | |
1787 // Does this free list contain a free block located at the address of 'node'? | |
1788 bool Contains(FreeListNode* node); | |
1789 #endif | |
1790 | |
1791 DISALLOW_COPY_AND_ASSIGN(OldSpaceFreeList); | |
1792 }; | |
1793 | |
1794 | |
1795 // The free list for the map space. | |
1796 class FixedSizeFreeList BASE_EMBEDDED { | |
1797 public: | |
1798 FixedSizeFreeList(Heap* heap, AllocationSpace owner, int object_size); | |
1799 | |
1800 // Clear the free list. | |
1801 void Reset(); | |
1802 | |
1803 // Return the number of bytes available on the free list. | |
1804 intptr_t available() { return available_; } | |
1805 | |
1806 // Place a node on the free list. The block starting at 'start' (assumed to | |
1807 // have size object_size_) is placed on the free list. Bookkeeping | |
1808 // information will be written to the block, ie, its contents will be | |
1809 // destroyed. The start address should be word aligned. | |
1810 void Free(Address start); | |
1811 | |
1812 // Allocate a fixed sized block from the free list. The block is unitialized. | |
1813 // A failure is returned if no block is available. | |
1814 MUST_USE_RESULT MaybeObject* Allocate(); | |
1815 | |
1816 void MarkNodes(); | |
1817 | |
1818 private: | |
1819 Heap* heap_; | |
1820 | |
1821 // Available bytes on the free list. | |
1822 intptr_t available_; | |
1823 | |
1824 // The head of the free list. | |
1825 Address head_; | |
1826 | |
1827 // The tail of the free list. | |
1828 Address tail_; | |
1829 | |
1830 // The identity of the owning space, for building allocation Failure | |
1831 // objects. | |
1832 AllocationSpace owner_; | |
1833 | |
1834 // The size of the objects in this space. | |
1835 int object_size_; | |
1836 | |
1837 DISALLOW_COPY_AND_ASSIGN(FixedSizeFreeList); | |
1838 }; | |
1839 | |
1840 | |
1841 // ----------------------------------------------------------------------------- | |
1842 // Old object space (excluding map objects) | 2270 // Old object space (excluding map objects) |
1843 | 2271 |
1844 class OldSpace : public PagedSpace { | 2272 class OldSpace : public PagedSpace { |
1845 public: | 2273 public: |
1846 // Creates an old space object with a given maximum capacity. | 2274 // Creates an old space object with a given maximum capacity. |
1847 // The constructor does not allocate pages from OS. | 2275 // The constructor does not allocate pages from OS. |
1848 OldSpace(Heap* heap, | 2276 OldSpace(Heap* heap, |
1849 intptr_t max_capacity, | 2277 intptr_t max_capacity, |
1850 AllocationSpace id, | 2278 AllocationSpace id, |
1851 Executability executable) | 2279 Executability executable) |
1852 : PagedSpace(heap, max_capacity, id, executable), | 2280 : PagedSpace(heap, max_capacity, id, executable) { |
1853 free_list_(heap, id) { | |
1854 page_extra_ = 0; | 2281 page_extra_ = 0; |
1855 } | 2282 } |
1856 | 2283 |
1857 // The bytes available on the free list (ie, not above the linear allocation | |
1858 // pointer). | |
1859 intptr_t AvailableFree() { return free_list_.available(); } | |
1860 | |
1861 // The limit of allocation for a page in this space. | 2284 // The limit of allocation for a page in this space. |
1862 virtual Address PageAllocationLimit(Page* page) { | 2285 virtual Address PageAllocationLimit(Page* page) { |
1863 return page->ObjectAreaEnd(); | 2286 return page->ObjectAreaEnd(); |
1864 } | 2287 } |
1865 | 2288 |
1866 // Give a block of memory to the space's free list. It might be added to | |
1867 // the free list or accounted as waste. | |
1868 // If add_to_freelist is false then just accounting stats are updated and | |
1869 // no attempt to add area to free list is made. | |
1870 void Free(Address start, int size_in_bytes, bool add_to_freelist) { | |
1871 accounting_stats_.DeallocateBytes(size_in_bytes); | |
1872 | |
1873 if (add_to_freelist) { | |
1874 int wasted_bytes = free_list_.Free(start, size_in_bytes); | |
1875 accounting_stats_.WasteBytes(wasted_bytes); | |
1876 } | |
1877 } | |
1878 | |
1879 virtual void DeallocateBlock(Address start, | |
1880 int size_in_bytes, | |
1881 bool add_to_freelist); | |
1882 | |
1883 // Prepare for full garbage collection. Resets the relocation pointer and | |
1884 // clears the free list. | |
1885 virtual void PrepareForMarkCompact(bool will_compact); | |
1886 | |
1887 // Updates the allocation pointer to the relocation top after a mark-compact | |
1888 // collection. | |
1889 virtual void MCCommitRelocationInfo(); | |
1890 | |
1891 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
1892 | |
1893 void MarkFreeListNodes() { free_list_.MarkNodes(); } | |
1894 | |
1895 #ifdef DEBUG | |
1896 // Reports statistics for the space | |
1897 void ReportStatistics(); | |
1898 #endif | |
1899 | |
1900 protected: | |
1901 // Virtual function in the superclass. Slow path of AllocateRaw. | |
1902 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
1903 | |
1904 // Virtual function in the superclass. Allocate linearly at the start of | |
1905 // the page after current_page (there is assumed to be one). | |
1906 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
1907 | |
1908 private: | |
1909 // The space's free list. | |
1910 OldSpaceFreeList free_list_; | |
1911 | |
1912 public: | 2289 public: |
1913 TRACK_MEMORY("OldSpace") | 2290 TRACK_MEMORY("OldSpace") |
1914 }; | 2291 }; |
1915 | 2292 |
1916 | 2293 |
| 2294 // For contiguous spaces, top should be in the space (or at the end) and limit |
| 2295 // should be the end of the space. |
| 2296 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ |
| 2297 ASSERT((space).page_low() <= (info).top \ |
| 2298 && (info).top <= (space).page_high() \ |
| 2299 && (info).limit <= (space).page_high()) |
| 2300 |
| 2301 |
1917 // ----------------------------------------------------------------------------- | 2302 // ----------------------------------------------------------------------------- |
1918 // Old space for objects of a fixed size | 2303 // Old space for objects of a fixed size |
1919 | 2304 |
1920 class FixedSpace : public PagedSpace { | 2305 class FixedSpace : public PagedSpace { |
1921 public: | 2306 public: |
1922 FixedSpace(Heap* heap, | 2307 FixedSpace(Heap* heap, |
1923 intptr_t max_capacity, | 2308 intptr_t max_capacity, |
1924 AllocationSpace id, | 2309 AllocationSpace id, |
1925 int object_size_in_bytes, | 2310 int object_size_in_bytes, |
1926 const char* name) | 2311 const char* name) |
1927 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | 2312 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), |
1928 object_size_in_bytes_(object_size_in_bytes), | 2313 object_size_in_bytes_(object_size_in_bytes), |
1929 name_(name), | 2314 name_(name) { |
1930 free_list_(heap, id, object_size_in_bytes) { | |
1931 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; | 2315 page_extra_ = Page::kObjectAreaSize % object_size_in_bytes; |
1932 } | 2316 } |
1933 | 2317 |
1934 // The limit of allocation for a page in this space. | 2318 // The limit of allocation for a page in this space. |
1935 virtual Address PageAllocationLimit(Page* page) { | 2319 virtual Address PageAllocationLimit(Page* page) { |
1936 return page->ObjectAreaEnd() - page_extra_; | 2320 return page->ObjectAreaEnd() - page_extra_; |
1937 } | 2321 } |
1938 | 2322 |
1939 int object_size_in_bytes() { return object_size_in_bytes_; } | 2323 int object_size_in_bytes() { return object_size_in_bytes_; } |
1940 | 2324 |
1941 // Give a fixed sized block of memory to the space's free list. | |
1942 // If add_to_freelist is false then just accounting stats are updated and | |
1943 // no attempt to add area to free list is made. | |
1944 void Free(Address start, bool add_to_freelist) { | |
1945 if (add_to_freelist) { | |
1946 free_list_.Free(start); | |
1947 } | |
1948 accounting_stats_.DeallocateBytes(object_size_in_bytes_); | |
1949 } | |
1950 | |
1951 // Prepares for a mark-compact GC. | 2325 // Prepares for a mark-compact GC. |
1952 virtual void PrepareForMarkCompact(bool will_compact); | 2326 virtual void PrepareForMarkCompact(); |
1953 | |
1954 // Updates the allocation pointer to the relocation top after a mark-compact | |
1955 // collection. | |
1956 virtual void MCCommitRelocationInfo(); | |
1957 | |
1958 virtual void PutRestOfCurrentPageOnFreeList(Page* current_page); | |
1959 | |
1960 virtual void DeallocateBlock(Address start, | |
1961 int size_in_bytes, | |
1962 bool add_to_freelist); | |
1963 | 2327 |
1964 void MarkFreeListNodes() { free_list_.MarkNodes(); } | 2328 void MarkFreeListNodes() { free_list_.MarkNodes(); } |
1965 | 2329 |
1966 #ifdef DEBUG | |
1967 // Reports statistic info of the space | |
1968 void ReportStatistics(); | |
1969 #endif | |
1970 | |
1971 protected: | 2330 protected: |
1972 // Virtual function in the superclass. Slow path of AllocateRaw. | |
1973 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
1974 | |
1975 // Virtual function in the superclass. Allocate linearly at the start of | |
1976 // the page after current_page (there is assumed to be one). | |
1977 HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes); | |
1978 | |
1979 void ResetFreeList() { | 2331 void ResetFreeList() { |
1980 free_list_.Reset(); | 2332 free_list_.Reset(); |
1981 } | 2333 } |
1982 | 2334 |
1983 private: | 2335 private: |
1984 // The size of objects in this space. | 2336 // The size of objects in this space. |
1985 int object_size_in_bytes_; | 2337 int object_size_in_bytes_; |
1986 | 2338 |
1987 // The name of this space. | 2339 // The name of this space. |
1988 const char* name_; | 2340 const char* name_; |
1989 | |
1990 // The space's free list. | |
1991 FixedSizeFreeList free_list_; | |
1992 }; | 2341 }; |
1993 | 2342 |
1994 | 2343 |
1995 // ----------------------------------------------------------------------------- | 2344 // ----------------------------------------------------------------------------- |
1996 // Old space for all map objects | 2345 // Old space for all map objects |
1997 | 2346 |
1998 class MapSpace : public FixedSpace { | 2347 class MapSpace : public FixedSpace { |
1999 public: | 2348 public: |
2000 // Creates a map space object with a maximum capacity. | 2349 // Creates a map space object with a maximum capacity. |
2001 MapSpace(Heap* heap, | 2350 MapSpace(Heap* heap, |
2002 intptr_t max_capacity, | 2351 intptr_t max_capacity, |
2003 int max_map_space_pages, | 2352 int max_map_space_pages, |
2004 AllocationSpace id) | 2353 AllocationSpace id) |
2005 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), | 2354 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), |
2006 max_map_space_pages_(max_map_space_pages) { | 2355 max_map_space_pages_(max_map_space_pages) { |
2007 ASSERT(max_map_space_pages < kMaxMapPageIndex); | |
2008 } | 2356 } |
2009 | 2357 |
2010 // Prepares for a mark-compact GC. | 2358 // Given an index, returns the page address. |
2011 virtual void PrepareForMarkCompact(bool will_compact); | 2359 // TODO(1600): this limit is artifical just to keep code compilable |
| 2360 static const int kMaxMapPageIndex = 1 << 16; |
2012 | 2361 |
2013 // Given an index, returns the page address. | 2362 virtual int RoundSizeDownToObjectAlignment(int size) { |
2014 Address PageAddress(int page_index) { return page_addresses_[page_index]; } | 2363 if (IsPowerOf2(Map::kSize)) { |
2015 | 2364 return RoundDown(size, Map::kSize); |
2016 static const int kMaxMapPageIndex = 1 << MapWord::kMapPageIndexBits; | 2365 } else { |
2017 | 2366 return (size / Map::kSize) * Map::kSize; |
2018 // Are map pointers encodable into map word? | |
2019 bool MapPointersEncodable() { | |
2020 if (!FLAG_use_big_map_space) { | |
2021 ASSERT(CountPagesToTop() <= kMaxMapPageIndex); | |
2022 return true; | |
2023 } | 2367 } |
2024 return CountPagesToTop() <= max_map_space_pages_; | |
2025 } | |
2026 | |
2027 // Should be called after forced sweep to find out if map space needs | |
2028 // compaction. | |
2029 bool NeedsCompaction(int live_maps) { | |
2030 return !MapPointersEncodable() && live_maps <= CompactionThreshold(); | |
2031 } | |
2032 | |
2033 Address TopAfterCompaction(int live_maps) { | |
2034 ASSERT(NeedsCompaction(live_maps)); | |
2035 | |
2036 int pages_left = live_maps / kMapsPerPage; | |
2037 PageIterator it(this, PageIterator::ALL_PAGES); | |
2038 while (pages_left-- > 0) { | |
2039 ASSERT(it.has_next()); | |
2040 it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
2041 } | |
2042 ASSERT(it.has_next()); | |
2043 Page* top_page = it.next(); | |
2044 top_page->SetRegionMarks(Page::kAllRegionsCleanMarks); | |
2045 ASSERT(top_page->is_valid()); | |
2046 | |
2047 int offset = live_maps % kMapsPerPage * Map::kSize; | |
2048 Address top = top_page->ObjectAreaStart() + offset; | |
2049 ASSERT(top < top_page->ObjectAreaEnd()); | |
2050 ASSERT(Contains(top)); | |
2051 | |
2052 return top; | |
2053 } | |
2054 | |
2055 void FinishCompaction(Address new_top, int live_maps) { | |
2056 Page* top_page = Page::FromAddress(new_top); | |
2057 ASSERT(top_page->is_valid()); | |
2058 | |
2059 SetAllocationInfo(&allocation_info_, top_page); | |
2060 allocation_info_.top = new_top; | |
2061 | |
2062 int new_size = live_maps * Map::kSize; | |
2063 accounting_stats_.DeallocateBytes(accounting_stats_.Size()); | |
2064 accounting_stats_.AllocateBytes(new_size); | |
2065 | |
2066 // Flush allocation watermarks. | |
2067 for (Page* p = first_page_; p != top_page; p = p->next_page()) { | |
2068 p->SetAllocationWatermark(p->AllocationTop()); | |
2069 } | |
2070 top_page->SetAllocationWatermark(new_top); | |
2071 | |
2072 #ifdef DEBUG | |
2073 if (FLAG_enable_slow_asserts) { | |
2074 intptr_t actual_size = 0; | |
2075 for (Page* p = first_page_; p != top_page; p = p->next_page()) | |
2076 actual_size += kMapsPerPage * Map::kSize; | |
2077 actual_size += (new_top - top_page->ObjectAreaStart()); | |
2078 ASSERT(accounting_stats_.Size() == actual_size); | |
2079 } | |
2080 #endif | |
2081 | |
2082 Shrink(); | |
2083 ResetFreeList(); | |
2084 } | 2368 } |
2085 | 2369 |
2086 protected: | 2370 protected: |
2087 #ifdef DEBUG | 2371 #ifdef DEBUG |
2088 virtual void VerifyObject(HeapObject* obj); | 2372 virtual void VerifyObject(HeapObject* obj); |
2089 #endif | 2373 #endif |
2090 | 2374 |
2091 private: | 2375 private: |
2092 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize; | 2376 static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize; |
2093 | 2377 |
2094 // Do map space compaction if there is a page gap. | 2378 // Do map space compaction if there is a page gap. |
2095 int CompactionThreshold() { | 2379 int CompactionThreshold() { |
2096 return kMapsPerPage * (max_map_space_pages_ - 1); | 2380 return kMapsPerPage * (max_map_space_pages_ - 1); |
2097 } | 2381 } |
2098 | 2382 |
2099 const int max_map_space_pages_; | 2383 const int max_map_space_pages_; |
2100 | 2384 |
2101 // An array of page start address in a map space. | |
2102 Address page_addresses_[kMaxMapPageIndex]; | |
2103 | |
2104 public: | 2385 public: |
2105 TRACK_MEMORY("MapSpace") | 2386 TRACK_MEMORY("MapSpace") |
2106 }; | 2387 }; |
2107 | 2388 |
2108 | 2389 |
2109 // ----------------------------------------------------------------------------- | 2390 // ----------------------------------------------------------------------------- |
2110 // Old space for all global object property cell objects | 2391 // Old space for all global object property cell objects |
2111 | 2392 |
2112 class CellSpace : public FixedSpace { | 2393 class CellSpace : public FixedSpace { |
2113 public: | 2394 public: |
2114 // Creates a property cell space object with a maximum capacity. | 2395 // Creates a property cell space object with a maximum capacity. |
2115 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | 2396 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) |
2116 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") | 2397 : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell") |
2117 {} | 2398 {} |
2118 | 2399 |
| 2400 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 2401 if (IsPowerOf2(JSGlobalPropertyCell::kSize)) { |
| 2402 return RoundDown(size, JSGlobalPropertyCell::kSize); |
| 2403 } else { |
| 2404 return (size / JSGlobalPropertyCell::kSize) * JSGlobalPropertyCell::kSize; |
| 2405 } |
| 2406 } |
| 2407 |
2119 protected: | 2408 protected: |
2120 #ifdef DEBUG | 2409 #ifdef DEBUG |
2121 virtual void VerifyObject(HeapObject* obj); | 2410 virtual void VerifyObject(HeapObject* obj); |
2122 #endif | 2411 #endif |
2123 | 2412 |
2124 public: | 2413 public: |
2125 TRACK_MEMORY("CellSpace") | 2414 TRACK_MEMORY("CellSpace") |
2126 }; | 2415 }; |
2127 | 2416 |
2128 | 2417 |
2129 // ----------------------------------------------------------------------------- | 2418 // ----------------------------------------------------------------------------- |
2130 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by | 2419 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by |
2131 // the large object space. A large object is allocated from OS heap with | 2420 // the large object space. A large object is allocated from OS heap with |
2132 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). | 2421 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). |
2133 // A large object always starts at Page::kObjectStartOffset to a page. | 2422 // A large object always starts at Page::kObjectStartOffset to a page. |
2134 // Large objects do not move during garbage collections. | 2423 // Large objects do not move during garbage collections. |
2135 | 2424 |
2136 // A LargeObjectChunk holds exactly one large object page with exactly one | |
2137 // large object. | |
2138 class LargeObjectChunk { | |
2139 public: | |
2140 // Allocates a new LargeObjectChunk that contains a large object page | |
2141 // (Page::kPageSize aligned) that has at least size_in_bytes (for a large | |
2142 // object) bytes after the object area start of that page. | |
2143 static LargeObjectChunk* New(int size_in_bytes, Executability executable); | |
2144 | |
2145 // Free the memory associated with the chunk. | |
2146 void Free(Executability executable); | |
2147 | |
2148 // Interpret a raw address as a large object chunk. | |
2149 static LargeObjectChunk* FromAddress(Address address) { | |
2150 return reinterpret_cast<LargeObjectChunk*>(address); | |
2151 } | |
2152 | |
2153 // Returns the address of this chunk. | |
2154 Address address() { return reinterpret_cast<Address>(this); } | |
2155 | |
2156 Page* GetPage() { | |
2157 return Page::FromAddress(RoundUp(address(), Page::kPageSize)); | |
2158 } | |
2159 | |
2160 // Accessors for the fields of the chunk. | |
2161 LargeObjectChunk* next() { return next_; } | |
2162 void set_next(LargeObjectChunk* chunk) { next_ = chunk; } | |
2163 size_t size() { return size_ & ~Page::kPageFlagMask; } | |
2164 | |
2165 // Compute the start address in the chunk. | |
2166 Address GetStartAddress() { return GetPage()->ObjectAreaStart(); } | |
2167 | |
2168 // Returns the object in this chunk. | |
2169 HeapObject* GetObject() { return HeapObject::FromAddress(GetStartAddress()); } | |
2170 | |
2171 // Given a requested size returns the physical size of a chunk to be | |
2172 // allocated. | |
2173 static int ChunkSizeFor(int size_in_bytes); | |
2174 | |
2175 // Given a chunk size, returns the object size it can accommodate. Used by | |
2176 // LargeObjectSpace::Available. | |
2177 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | |
2178 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | |
2179 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | |
2180 } | |
2181 | |
2182 private: | |
2183 // A pointer to the next large object chunk in the space or NULL. | |
2184 LargeObjectChunk* next_; | |
2185 | |
2186 // The total size of this chunk. | |
2187 size_t size_; | |
2188 | |
2189 public: | |
2190 TRACK_MEMORY("LargeObjectChunk") | |
2191 }; | |
2192 | |
2193 | |
2194 class LargeObjectSpace : public Space { | 2425 class LargeObjectSpace : public Space { |
2195 public: | 2426 public: |
2196 LargeObjectSpace(Heap* heap, AllocationSpace id); | 2427 LargeObjectSpace(Heap* heap, AllocationSpace id); |
2197 virtual ~LargeObjectSpace() {} | 2428 virtual ~LargeObjectSpace() {} |
2198 | 2429 |
2199 // Initializes internal data structures. | 2430 // Initializes internal data structures. |
2200 bool Setup(); | 2431 bool Setup(); |
2201 | 2432 |
2202 // Releases internal resources, frees objects in this space. | 2433 // Releases internal resources, frees objects in this space. |
2203 void TearDown(); | 2434 void TearDown(); |
2204 | 2435 |
2205 // Allocates a (non-FixedArray, non-Code) large object. | 2436 static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
2206 MUST_USE_RESULT MaybeObject* AllocateRaw(int size_in_bytes); | 2437 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
2207 // Allocates a large Code object. | 2438 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
2208 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); | 2439 } |
2209 // Allocates a large FixedArray. | 2440 |
2210 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); | 2441 // Shared implementation of AllocateRaw, AllocateRawCode and |
| 2442 // AllocateRawFixedArray. |
| 2443 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, |
| 2444 Executability executable); |
2211 | 2445 |
2212 // Available bytes for objects in this space. | 2446 // Available bytes for objects in this space. |
2213 inline intptr_t Available(); | 2447 inline intptr_t Available(); |
2214 | 2448 |
2215 virtual intptr_t Size() { | 2449 virtual intptr_t Size() { |
2216 return size_; | 2450 return size_; |
2217 } | 2451 } |
2218 | 2452 |
2219 virtual intptr_t SizeOfObjects() { | 2453 virtual intptr_t SizeOfObjects() { |
2220 return objects_size_; | 2454 return objects_size_; |
2221 } | 2455 } |
2222 | 2456 |
2223 int PageCount() { | 2457 int PageCount() { |
2224 return page_count_; | 2458 return page_count_; |
2225 } | 2459 } |
2226 | 2460 |
2227 // Finds an object for a given address, returns Failure::Exception() | 2461 // Finds an object for a given address, returns Failure::Exception() |
2228 // if it is not found. The function iterates through all objects in this | 2462 // if it is not found. The function iterates through all objects in this |
2229 // space, may be slow. | 2463 // space, may be slow. |
2230 MaybeObject* FindObject(Address a); | 2464 MaybeObject* FindObject(Address a); |
2231 | 2465 |
2232 // Finds a large object page containing the given pc, returns NULL | 2466 // Finds a large object page containing the given pc, returns NULL |
2233 // if such a page doesn't exist. | 2467 // if such a page doesn't exist. |
2234 LargeObjectChunk* FindChunkContainingPc(Address pc); | 2468 LargePage* FindPageContainingPc(Address pc); |
2235 | |
2236 // Iterates objects covered by dirty regions. | |
2237 void IterateDirtyRegions(ObjectSlotCallback func); | |
2238 | 2469 |
2239 // Frees unmarked objects. | 2470 // Frees unmarked objects. |
2240 void FreeUnmarkedObjects(); | 2471 void FreeUnmarkedObjects(); |
2241 | 2472 |
2242 // Checks whether a heap object is in this space; O(1). | 2473 // Checks whether a heap object is in this space; O(1). |
2243 bool Contains(HeapObject* obj); | 2474 bool Contains(HeapObject* obj); |
2244 | 2475 |
2245 // Checks whether the space is empty. | 2476 // Checks whether the space is empty. |
2246 bool IsEmpty() { return first_chunk_ == NULL; } | 2477 bool IsEmpty() { return first_page_ == NULL; } |
2247 | 2478 |
2248 // See the comments for ReserveSpace in the Space class. This has to be | 2479 // See the comments for ReserveSpace in the Space class. This has to be |
2249 // called after ReserveSpace has been called on the paged spaces, since they | 2480 // called after ReserveSpace has been called on the paged spaces, since they |
2250 // may use some memory, leaving less for large objects. | 2481 // may use some memory, leaving less for large objects. |
2251 virtual bool ReserveSpace(int bytes); | 2482 virtual bool ReserveSpace(int bytes); |
2252 | 2483 |
| 2484 LargePage* first_page() { return first_page_; } |
| 2485 |
2253 #ifdef DEBUG | 2486 #ifdef DEBUG |
2254 virtual void Verify(); | 2487 virtual void Verify(); |
2255 virtual void Print(); | 2488 virtual void Print(); |
2256 void ReportStatistics(); | 2489 void ReportStatistics(); |
2257 void CollectCodeStatistics(); | 2490 void CollectCodeStatistics(); |
2258 #endif | 2491 #endif |
2259 // Checks whether an address is in the object area in this space. It | 2492 // Checks whether an address is in the object area in this space. It |
2260 // iterates all objects in the space. May be slow. | 2493 // iterates all objects in the space. May be slow. |
2261 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } | 2494 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } |
2262 | 2495 |
2263 private: | 2496 private: |
2264 // The head of the linked list of large object chunks. | 2497 // The head of the linked list of large object chunks. |
2265 LargeObjectChunk* first_chunk_; | 2498 LargePage* first_page_; |
2266 intptr_t size_; // allocated bytes | 2499 intptr_t size_; // allocated bytes |
2267 int page_count_; // number of chunks | 2500 int page_count_; // number of chunks |
2268 intptr_t objects_size_; // size of objects | 2501 intptr_t objects_size_; // size of objects |
2269 | 2502 |
2270 // Shared implementation of AllocateRaw, AllocateRawCode and | |
2271 // AllocateRawFixedArray. | |
2272 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int requested_size, | |
2273 int object_size, | |
2274 Executability executable); | |
2275 | |
2276 friend class LargeObjectIterator; | 2503 friend class LargeObjectIterator; |
2277 | 2504 |
2278 public: | 2505 public: |
2279 TRACK_MEMORY("LargeObjectSpace") | 2506 TRACK_MEMORY("LargeObjectSpace") |
2280 }; | 2507 }; |
2281 | 2508 |
2282 | 2509 |
2283 class LargeObjectIterator: public ObjectIterator { | 2510 class LargeObjectIterator: public ObjectIterator { |
2284 public: | 2511 public: |
2285 explicit LargeObjectIterator(LargeObjectSpace* space); | 2512 explicit LargeObjectIterator(LargeObjectSpace* space); |
2286 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); | 2513 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); |
2287 | 2514 |
2288 HeapObject* next(); | 2515 HeapObject* Next(); |
2289 | 2516 |
2290 // implementation of ObjectIterator. | 2517 // implementation of ObjectIterator. |
2291 virtual HeapObject* next_object() { return next(); } | 2518 virtual HeapObject* next_object() { return Next(); } |
2292 | 2519 |
2293 private: | 2520 private: |
2294 LargeObjectChunk* current_; | 2521 LargePage* current_; |
2295 HeapObjectCallback size_func_; | 2522 HeapObjectCallback size_func_; |
2296 }; | 2523 }; |
2297 | 2524 |
2298 | 2525 |
| 2526 // Iterates over the chunks (pages and large object pages) that can contain |
| 2527 // pointers to new space. |
| 2528 class PointerChunkIterator BASE_EMBEDDED { |
| 2529 public: |
| 2530 inline explicit PointerChunkIterator(Heap* heap); |
| 2531 |
| 2532 // Return NULL when the iterator is done. |
| 2533 MemoryChunk* next() { |
| 2534 switch (state_) { |
| 2535 case kOldPointerState: { |
| 2536 if (old_pointer_iterator_.has_next()) { |
| 2537 return old_pointer_iterator_.next(); |
| 2538 } |
| 2539 state_ = kMapState; |
| 2540 // Fall through. |
| 2541 } |
| 2542 case kMapState: { |
| 2543 if (map_iterator_.has_next()) { |
| 2544 return map_iterator_.next(); |
| 2545 } |
| 2546 state_ = kLargeObjectState; |
| 2547 // Fall through. |
| 2548 } |
| 2549 case kLargeObjectState: { |
| 2550 HeapObject* heap_object; |
| 2551 do { |
| 2552 heap_object = lo_iterator_.Next(); |
| 2553 if (heap_object == NULL) { |
| 2554 state_ = kFinishedState; |
| 2555 return NULL; |
| 2556 } |
| 2557 // Fixed arrays are the only pointer-containing objects in large |
| 2558 // object space. |
| 2559 } while (!heap_object->IsFixedArray()); |
| 2560 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); |
| 2561 return answer; |
| 2562 } |
| 2563 case kFinishedState: |
| 2564 return NULL; |
| 2565 default: |
| 2566 break; |
| 2567 } |
| 2568 UNREACHABLE(); |
| 2569 return NULL; |
| 2570 } |
| 2571 |
| 2572 |
| 2573 private: |
| 2574 enum State { |
| 2575 kOldPointerState, |
| 2576 kMapState, |
| 2577 kLargeObjectState, |
| 2578 kFinishedState |
| 2579 }; |
| 2580 State state_; |
| 2581 PageIterator old_pointer_iterator_; |
| 2582 PageIterator map_iterator_; |
| 2583 LargeObjectIterator lo_iterator_; |
| 2584 }; |
| 2585 |
| 2586 |
2299 #ifdef DEBUG | 2587 #ifdef DEBUG |
2300 struct CommentStatistic { | 2588 struct CommentStatistic { |
2301 const char* comment; | 2589 const char* comment; |
2302 int size; | 2590 int size; |
2303 int count; | 2591 int count; |
2304 void Clear() { | 2592 void Clear() { |
2305 comment = NULL; | 2593 comment = NULL; |
2306 size = 0; | 2594 size = 0; |
2307 count = 0; | 2595 count = 0; |
2308 } | 2596 } |
2309 // Must be small, since an iteration is used for lookup. | 2597 // Must be small, since an iteration is used for lookup. |
2310 static const int kMaxComments = 64; | 2598 static const int kMaxComments = 64; |
2311 }; | 2599 }; |
2312 #endif | 2600 #endif |
2313 | 2601 |
2314 | 2602 |
2315 } } // namespace v8::internal | 2603 } } // namespace v8::internal |
2316 | 2604 |
2317 #endif // V8_SPACES_H_ | 2605 #endif // V8_SPACES_H_ |
OLD | NEW |