| OLD | NEW |
| (Empty) |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #ifndef V8_SPACES_H_ | |
| 6 #define V8_SPACES_H_ | |
| 7 | |
| 8 #include "src/allocation.h" | |
| 9 #include "src/base/atomicops.h" | |
| 10 #include "src/base/platform/mutex.h" | |
| 11 #include "src/hashmap.h" | |
| 12 #include "src/list.h" | |
| 13 #include "src/log.h" | |
| 14 #include "src/utils.h" | |
| 15 | |
| 16 namespace v8 { | |
| 17 namespace internal { | |
| 18 | |
| 19 class Isolate; | |
| 20 | |
| 21 // ----------------------------------------------------------------------------- | |
| 22 // Heap structures: | |
| 23 // | |
| 24 // A JS heap consists of a young generation, an old generation, and a large | |
| 25 // object space. The young generation is divided into two semispaces. A | |
| 26 // scavenger implements Cheney's copying algorithm. The old generation is | |
| 27 // separated into a map space and an old object space. The map space contains | |
| 28 // all (and only) map objects, the rest of old objects go into the old space. | |
| 29 // The old generation is collected by a mark-sweep-compact collector. | |
| 30 // | |
| 31 // The semispaces of the young generation are contiguous. The old and map | |
| 32 // spaces consists of a list of pages. A page has a page header and an object | |
| 33 // area. | |
| 34 // | |
| 35 // There is a separate large object space for objects larger than | |
| 36 // Page::kMaxHeapObjectSize, so that they do not have to move during | |
| 37 // collection. The large object space is paged. Pages in large object space | |
| 38 // may be larger than the page size. | |
| 39 // | |
| 40 // A store-buffer based write barrier is used to keep track of intergenerational | |
| 41 // references. See store-buffer.h. | |
| 42 // | |
| 43 // During scavenges and mark-sweep collections we sometimes (after a store | |
| 44 // buffer overflow) iterate intergenerational pointers without decoding heap | |
| 45 // object maps so if the page belongs to old pointer space or large object | |
| 46 // space it is essential to guarantee that the page does not contain any | |
| 47 // garbage pointers to new space: every pointer aligned word which satisfies | |
| 48 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in | |
| 49 // new space. Thus objects in old pointer and large object spaces should have a | |
| 50 // special layout (e.g. no bare integer fields). This requirement does not | |
| 51 // apply to map space which is iterated in a special fashion. However we still | |
| 52 // require pointer fields of dead maps to be cleaned. | |
| 53 // | |
| 54 // To enable lazy cleaning of old space pages we can mark chunks of the page | |
| 55 // as being garbage. Garbage sections are marked with a special map. These | |
| 56 // sections are skipped when scanning the page, even if we are otherwise | |
| 57 // scanning without regard for object boundaries. Garbage sections are chained | |
| 58 // together to form a free list after a GC. Garbage sections created outside | |
| 59 // of GCs by object trunctation etc. may not be in the free list chain. Very | |
| 60 // small free spaces are ignored, they need only be cleaned of bogus pointers | |
| 61 // into new space. | |
| 62 // | |
| 63 // Each page may have up to one special garbage section. The start of this | |
| 64 // section is denoted by the top field in the space. The end of the section | |
| 65 // is denoted by the limit field in the space. This special garbage section | |
| 66 // is not marked with a free space map in the data. The point of this section | |
| 67 // is to enable linear allocation without having to constantly update the byte | |
| 68 // array every time the top field is updated and a new object is created. The | |
| 69 // special garbage section is not in the chain of garbage sections. | |
| 70 // | |
| 71 // Since the top and limit fields are in the space, not the page, only one page | |
| 72 // has a special garbage section, and if the top and limit are equal then there | |
| 73 // is no special garbage section. | |
| 74 | |
| 75 // Some assertion macros used in the debugging mode. | |
| 76 | |
| 77 #define DCHECK_PAGE_ALIGNED(address) \ | |
| 78 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0) | |
| 79 | |
| 80 #define DCHECK_OBJECT_ALIGNED(address) \ | |
| 81 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0) | |
| 82 | |
| 83 #define DCHECK_OBJECT_SIZE(size) \ | |
| 84 DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize)) | |
| 85 | |
| 86 #define DCHECK_PAGE_OFFSET(offset) \ | |
| 87 DCHECK((Page::kObjectStartOffset <= offset) \ | |
| 88 && (offset <= Page::kPageSize)) | |
| 89 | |
| 90 #define DCHECK_MAP_PAGE_INDEX(index) \ | |
| 91 DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | |
| 92 | |
| 93 | |
| 94 class PagedSpace; | |
| 95 class MemoryAllocator; | |
| 96 class AllocationInfo; | |
| 97 class Space; | |
| 98 class FreeList; | |
| 99 class MemoryChunk; | |
| 100 | |
| 101 class MarkBit { | |
| 102 public: | |
| 103 typedef uint32_t CellType; | |
| 104 | |
| 105 inline MarkBit(CellType* cell, CellType mask, bool data_only) | |
| 106 : cell_(cell), mask_(mask), data_only_(data_only) { } | |
| 107 | |
| 108 inline CellType* cell() { return cell_; } | |
| 109 inline CellType mask() { return mask_; } | |
| 110 | |
| 111 #ifdef DEBUG | |
| 112 bool operator==(const MarkBit& other) { | |
| 113 return cell_ == other.cell_ && mask_ == other.mask_; | |
| 114 } | |
| 115 #endif | |
| 116 | |
| 117 inline void Set() { *cell_ |= mask_; } | |
| 118 inline bool Get() { return (*cell_ & mask_) != 0; } | |
| 119 inline void Clear() { *cell_ &= ~mask_; } | |
| 120 | |
| 121 inline bool data_only() { return data_only_; } | |
| 122 | |
| 123 inline MarkBit Next() { | |
| 124 CellType new_mask = mask_ << 1; | |
| 125 if (new_mask == 0) { | |
| 126 return MarkBit(cell_ + 1, 1, data_only_); | |
| 127 } else { | |
| 128 return MarkBit(cell_, new_mask, data_only_); | |
| 129 } | |
| 130 } | |
| 131 | |
| 132 private: | |
| 133 CellType* cell_; | |
| 134 CellType mask_; | |
| 135 // This boolean indicates that the object is in a data-only space with no | |
| 136 // pointers. This enables some optimizations when marking. | |
| 137 // It is expected that this field is inlined and turned into control flow | |
| 138 // at the place where the MarkBit object is created. | |
| 139 bool data_only_; | |
| 140 }; | |
| 141 | |
| 142 | |
| 143 // Bitmap is a sequence of cells each containing fixed number of bits. | |
| 144 class Bitmap { | |
| 145 public: | |
| 146 static const uint32_t kBitsPerCell = 32; | |
| 147 static const uint32_t kBitsPerCellLog2 = 5; | |
| 148 static const uint32_t kBitIndexMask = kBitsPerCell - 1; | |
| 149 static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte; | |
| 150 static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2; | |
| 151 | |
| 152 static const size_t kLength = | |
| 153 (1 << kPageSizeBits) >> (kPointerSizeLog2); | |
| 154 | |
| 155 static const size_t kSize = | |
| 156 (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2); | |
| 157 | |
| 158 | |
| 159 static int CellsForLength(int length) { | |
| 160 return (length + kBitsPerCell - 1) >> kBitsPerCellLog2; | |
| 161 } | |
| 162 | |
| 163 int CellsCount() { | |
| 164 return CellsForLength(kLength); | |
| 165 } | |
| 166 | |
| 167 static int SizeFor(int cells_count) { | |
| 168 return sizeof(MarkBit::CellType) * cells_count; | |
| 169 } | |
| 170 | |
| 171 INLINE(static uint32_t IndexToCell(uint32_t index)) { | |
| 172 return index >> kBitsPerCellLog2; | |
| 173 } | |
| 174 | |
| 175 INLINE(static uint32_t CellToIndex(uint32_t index)) { | |
| 176 return index << kBitsPerCellLog2; | |
| 177 } | |
| 178 | |
| 179 INLINE(static uint32_t CellAlignIndex(uint32_t index)) { | |
| 180 return (index + kBitIndexMask) & ~kBitIndexMask; | |
| 181 } | |
| 182 | |
| 183 INLINE(MarkBit::CellType* cells()) { | |
| 184 return reinterpret_cast<MarkBit::CellType*>(this); | |
| 185 } | |
| 186 | |
| 187 INLINE(Address address()) { | |
| 188 return reinterpret_cast<Address>(this); | |
| 189 } | |
| 190 | |
| 191 INLINE(static Bitmap* FromAddress(Address addr)) { | |
| 192 return reinterpret_cast<Bitmap*>(addr); | |
| 193 } | |
| 194 | |
| 195 inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) { | |
| 196 MarkBit::CellType mask = 1 << (index & kBitIndexMask); | |
| 197 MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2); | |
| 198 return MarkBit(cell, mask, data_only); | |
| 199 } | |
| 200 | |
| 201 static inline void Clear(MemoryChunk* chunk); | |
| 202 | |
| 203 static void PrintWord(uint32_t word, uint32_t himask = 0) { | |
| 204 for (uint32_t mask = 1; mask != 0; mask <<= 1) { | |
| 205 if ((mask & himask) != 0) PrintF("["); | |
| 206 PrintF((mask & word) ? "1" : "0"); | |
| 207 if ((mask & himask) != 0) PrintF("]"); | |
| 208 } | |
| 209 } | |
| 210 | |
| 211 class CellPrinter { | |
| 212 public: | |
| 213 CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { } | |
| 214 | |
| 215 void Print(uint32_t pos, uint32_t cell) { | |
| 216 if (cell == seq_type) { | |
| 217 seq_length++; | |
| 218 return; | |
| 219 } | |
| 220 | |
| 221 Flush(); | |
| 222 | |
| 223 if (IsSeq(cell)) { | |
| 224 seq_start = pos; | |
| 225 seq_length = 0; | |
| 226 seq_type = cell; | |
| 227 return; | |
| 228 } | |
| 229 | |
| 230 PrintF("%d: ", pos); | |
| 231 PrintWord(cell); | |
| 232 PrintF("\n"); | |
| 233 } | |
| 234 | |
| 235 void Flush() { | |
| 236 if (seq_length > 0) { | |
| 237 PrintF("%d: %dx%d\n", | |
| 238 seq_start, | |
| 239 seq_type == 0 ? 0 : 1, | |
| 240 seq_length * kBitsPerCell); | |
| 241 seq_length = 0; | |
| 242 } | |
| 243 } | |
| 244 | |
| 245 static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; } | |
| 246 | |
| 247 private: | |
| 248 uint32_t seq_start; | |
| 249 uint32_t seq_type; | |
| 250 uint32_t seq_length; | |
| 251 }; | |
| 252 | |
| 253 void Print() { | |
| 254 CellPrinter printer; | |
| 255 for (int i = 0; i < CellsCount(); i++) { | |
| 256 printer.Print(i, cells()[i]); | |
| 257 } | |
| 258 printer.Flush(); | |
| 259 PrintF("\n"); | |
| 260 } | |
| 261 | |
| 262 bool IsClean() { | |
| 263 for (int i = 0; i < CellsCount(); i++) { | |
| 264 if (cells()[i] != 0) { | |
| 265 return false; | |
| 266 } | |
| 267 } | |
| 268 return true; | |
| 269 } | |
| 270 }; | |
| 271 | |
| 272 | |
| 273 class SkipList; | |
| 274 class SlotsBuffer; | |
| 275 | |
| 276 // MemoryChunk represents a memory region owned by a specific space. | |
| 277 // It is divided into the header and the body. Chunk start is always | |
| 278 // 1MB aligned. Start of the body is aligned so it can accommodate | |
| 279 // any heap object. | |
| 280 class MemoryChunk { | |
| 281 public: | |
| 282 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | |
| 283 static MemoryChunk* FromAddress(Address a) { | |
| 284 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | |
| 285 } | |
| 286 static const MemoryChunk* FromAddress(const byte* a) { | |
| 287 return reinterpret_cast<const MemoryChunk*>( | |
| 288 OffsetFrom(a) & ~kAlignmentMask); | |
| 289 } | |
| 290 | |
| 291 // Only works for addresses in pointer spaces, not data or code spaces. | |
| 292 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | |
| 293 | |
| 294 Address address() { return reinterpret_cast<Address>(this); } | |
| 295 | |
| 296 bool is_valid() { return address() != NULL; } | |
| 297 | |
| 298 MemoryChunk* next_chunk() const { | |
| 299 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_)); | |
| 300 } | |
| 301 | |
| 302 MemoryChunk* prev_chunk() const { | |
| 303 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_)); | |
| 304 } | |
| 305 | |
| 306 void set_next_chunk(MemoryChunk* next) { | |
| 307 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next)); | |
| 308 } | |
| 309 | |
| 310 void set_prev_chunk(MemoryChunk* prev) { | |
| 311 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev)); | |
| 312 } | |
| 313 | |
| 314 Space* owner() const { | |
| 315 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | |
| 316 kPageHeaderTag) { | |
| 317 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | |
| 318 kPageHeaderTag); | |
| 319 } else { | |
| 320 return NULL; | |
| 321 } | |
| 322 } | |
| 323 | |
| 324 void set_owner(Space* space) { | |
| 325 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | |
| 326 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | |
| 327 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | |
| 328 kPageHeaderTag); | |
| 329 } | |
| 330 | |
| 331 base::VirtualMemory* reserved_memory() { | |
| 332 return &reservation_; | |
| 333 } | |
| 334 | |
| 335 void InitializeReservedMemory() { | |
| 336 reservation_.Reset(); | |
| 337 } | |
| 338 | |
| 339 void set_reserved_memory(base::VirtualMemory* reservation) { | |
| 340 DCHECK_NOT_NULL(reservation); | |
| 341 reservation_.TakeControl(reservation); | |
| 342 } | |
| 343 | |
| 344 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | |
| 345 void initialize_scan_on_scavenge(bool scan) { | |
| 346 if (scan) { | |
| 347 SetFlag(SCAN_ON_SCAVENGE); | |
| 348 } else { | |
| 349 ClearFlag(SCAN_ON_SCAVENGE); | |
| 350 } | |
| 351 } | |
| 352 inline void set_scan_on_scavenge(bool scan); | |
| 353 | |
| 354 int store_buffer_counter() { return store_buffer_counter_; } | |
| 355 void set_store_buffer_counter(int counter) { | |
| 356 store_buffer_counter_ = counter; | |
| 357 } | |
| 358 | |
| 359 bool Contains(Address addr) { | |
| 360 return addr >= area_start() && addr < area_end(); | |
| 361 } | |
| 362 | |
| 363 // Checks whether addr can be a limit of addresses in this page. | |
| 364 // It's a limit if it's in the page, or if it's just after the | |
| 365 // last byte of the page. | |
| 366 bool ContainsLimit(Address addr) { | |
| 367 return addr >= area_start() && addr <= area_end(); | |
| 368 } | |
| 369 | |
| 370 // Every n write barrier invocations we go to runtime even though | |
| 371 // we could have handled it in generated code. This lets us check | |
| 372 // whether we have hit the limit and should do some more marking. | |
| 373 static const int kWriteBarrierCounterGranularity = 500; | |
| 374 | |
| 375 enum MemoryChunkFlags { | |
| 376 IS_EXECUTABLE, | |
| 377 ABOUT_TO_BE_FREED, | |
| 378 POINTERS_TO_HERE_ARE_INTERESTING, | |
| 379 POINTERS_FROM_HERE_ARE_INTERESTING, | |
| 380 SCAN_ON_SCAVENGE, | |
| 381 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | |
| 382 IN_TO_SPACE, // All pages in new space has one of these two set. | |
| 383 NEW_SPACE_BELOW_AGE_MARK, | |
| 384 CONTAINS_ONLY_DATA, | |
| 385 EVACUATION_CANDIDATE, | |
| 386 RESCAN_ON_EVACUATION, | |
| 387 | |
| 388 // Pages swept precisely can be iterated, hitting only the live objects. | |
| 389 // Whereas those swept conservatively cannot be iterated over. Both flags | |
| 390 // indicate that marking bits have been cleared by the sweeper, otherwise | |
| 391 // marking bits are still intact. | |
| 392 WAS_SWEPT_PRECISELY, | |
| 393 WAS_SWEPT_CONSERVATIVELY, | |
| 394 | |
| 395 // Large objects can have a progress bar in their page header. These object | |
| 396 // are scanned in increments and will be kept black while being scanned. | |
| 397 // Even if the mutator writes to them they will be kept black and a white | |
| 398 // to grey transition is performed in the value. | |
| 399 HAS_PROGRESS_BAR, | |
| 400 | |
| 401 // Last flag, keep at bottom. | |
| 402 NUM_MEMORY_CHUNK_FLAGS | |
| 403 }; | |
| 404 | |
| 405 | |
| 406 static const int kPointersToHereAreInterestingMask = | |
| 407 1 << POINTERS_TO_HERE_ARE_INTERESTING; | |
| 408 | |
| 409 static const int kPointersFromHereAreInterestingMask = | |
| 410 1 << POINTERS_FROM_HERE_ARE_INTERESTING; | |
| 411 | |
| 412 static const int kEvacuationCandidateMask = | |
| 413 1 << EVACUATION_CANDIDATE; | |
| 414 | |
| 415 static const int kSkipEvacuationSlotsRecordingMask = | |
| 416 (1 << EVACUATION_CANDIDATE) | | |
| 417 (1 << RESCAN_ON_EVACUATION) | | |
| 418 (1 << IN_FROM_SPACE) | | |
| 419 (1 << IN_TO_SPACE); | |
| 420 | |
| 421 | |
| 422 void SetFlag(int flag) { | |
| 423 flags_ |= static_cast<uintptr_t>(1) << flag; | |
| 424 } | |
| 425 | |
| 426 void ClearFlag(int flag) { | |
| 427 flags_ &= ~(static_cast<uintptr_t>(1) << flag); | |
| 428 } | |
| 429 | |
| 430 void SetFlagTo(int flag, bool value) { | |
| 431 if (value) { | |
| 432 SetFlag(flag); | |
| 433 } else { | |
| 434 ClearFlag(flag); | |
| 435 } | |
| 436 } | |
| 437 | |
| 438 bool IsFlagSet(int flag) { | |
| 439 return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0; | |
| 440 } | |
| 441 | |
| 442 // Set or clear multiple flags at a time. The flags in the mask | |
| 443 // are set to the value in "flags", the rest retain the current value | |
| 444 // in flags_. | |
| 445 void SetFlags(intptr_t flags, intptr_t mask) { | |
| 446 flags_ = (flags_ & ~mask) | (flags & mask); | |
| 447 } | |
| 448 | |
| 449 // Return all current flags. | |
| 450 intptr_t GetFlags() { return flags_; } | |
| 451 | |
| 452 | |
| 453 // SWEEPING_DONE - The page state when sweeping is complete or sweeping must | |
| 454 // not be performed on that page. | |
| 455 // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will | |
| 456 // not touch the page memory anymore. | |
| 457 // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread. | |
| 458 // SWEEPING_PENDING - This page is ready for parallel sweeping. | |
| 459 enum ParallelSweepingState { | |
| 460 SWEEPING_DONE, | |
| 461 SWEEPING_FINALIZE, | |
| 462 SWEEPING_IN_PROGRESS, | |
| 463 SWEEPING_PENDING | |
| 464 }; | |
| 465 | |
| 466 ParallelSweepingState parallel_sweeping() { | |
| 467 return static_cast<ParallelSweepingState>( | |
| 468 base::Acquire_Load(¶llel_sweeping_)); | |
| 469 } | |
| 470 | |
| 471 void set_parallel_sweeping(ParallelSweepingState state) { | |
| 472 base::Release_Store(¶llel_sweeping_, state); | |
| 473 } | |
| 474 | |
| 475 bool TryParallelSweeping() { | |
| 476 return base::Acquire_CompareAndSwap( | |
| 477 ¶llel_sweeping_, SWEEPING_PENDING, SWEEPING_IN_PROGRESS) == | |
| 478 SWEEPING_PENDING; | |
| 479 } | |
| 480 | |
| 481 bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; } | |
| 482 | |
| 483 // Manage live byte count (count of bytes known to be live, | |
| 484 // because they are marked black). | |
| 485 void ResetLiveBytes() { | |
| 486 if (FLAG_gc_verbose) { | |
| 487 PrintF("ResetLiveBytes:%p:%x->0\n", | |
| 488 static_cast<void*>(this), live_byte_count_); | |
| 489 } | |
| 490 live_byte_count_ = 0; | |
| 491 } | |
| 492 void IncrementLiveBytes(int by) { | |
| 493 if (FLAG_gc_verbose) { | |
| 494 printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", | |
| 495 static_cast<void*>(this), live_byte_count_, | |
| 496 ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by), | |
| 497 live_byte_count_ + by); | |
| 498 } | |
| 499 live_byte_count_ += by; | |
| 500 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_); | |
| 501 } | |
| 502 int LiveBytes() { | |
| 503 DCHECK(static_cast<unsigned>(live_byte_count_) <= size_); | |
| 504 return live_byte_count_; | |
| 505 } | |
| 506 | |
| 507 int write_barrier_counter() { | |
| 508 return static_cast<int>(write_barrier_counter_); | |
| 509 } | |
| 510 | |
| 511 void set_write_barrier_counter(int counter) { | |
| 512 write_barrier_counter_ = counter; | |
| 513 } | |
| 514 | |
| 515 int progress_bar() { | |
| 516 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); | |
| 517 return progress_bar_; | |
| 518 } | |
| 519 | |
| 520 void set_progress_bar(int progress_bar) { | |
| 521 DCHECK(IsFlagSet(HAS_PROGRESS_BAR)); | |
| 522 progress_bar_ = progress_bar; | |
| 523 } | |
| 524 | |
| 525 void ResetProgressBar() { | |
| 526 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { | |
| 527 set_progress_bar(0); | |
| 528 ClearFlag(MemoryChunk::HAS_PROGRESS_BAR); | |
| 529 } | |
| 530 } | |
| 531 | |
| 532 bool IsLeftOfProgressBar(Object** slot) { | |
| 533 Address slot_address = reinterpret_cast<Address>(slot); | |
| 534 DCHECK(slot_address > this->address()); | |
| 535 return (slot_address - (this->address() + kObjectStartOffset)) < | |
| 536 progress_bar(); | |
| 537 } | |
| 538 | |
| 539 static void IncrementLiveBytesFromGC(Address address, int by) { | |
| 540 MemoryChunk::FromAddress(address)->IncrementLiveBytes(by); | |
| 541 } | |
| 542 | |
| 543 static void IncrementLiveBytesFromMutator(Address address, int by); | |
| 544 | |
| 545 static const intptr_t kAlignment = | |
| 546 (static_cast<uintptr_t>(1) << kPageSizeBits); | |
| 547 | |
| 548 static const intptr_t kAlignmentMask = kAlignment - 1; | |
| 549 | |
| 550 static const intptr_t kSizeOffset = 0; | |
| 551 | |
| 552 static const intptr_t kLiveBytesOffset = | |
| 553 kSizeOffset + kPointerSize + kPointerSize + kPointerSize + | |
| 554 kPointerSize + kPointerSize + | |
| 555 kPointerSize + kPointerSize + kPointerSize + kIntSize; | |
| 556 | |
| 557 static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize; | |
| 558 | |
| 559 static const size_t kWriteBarrierCounterOffset = | |
| 560 kSlotsBufferOffset + kPointerSize + kPointerSize; | |
| 561 | |
| 562 static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize + | |
| 563 kIntSize + kIntSize + kPointerSize + | |
| 564 5 * kPointerSize + | |
| 565 kPointerSize + kPointerSize; | |
| 566 | |
| 567 static const int kBodyOffset = | |
| 568 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | |
| 569 | |
| 570 // The start offset of the object area in a page. Aligned to both maps and | |
| 571 // code alignment to be suitable for both. Also aligned to 32 words because | |
| 572 // the marking bitmap is arranged in 32 bit chunks. | |
| 573 static const int kObjectStartAlignment = 32 * kPointerSize; | |
| 574 static const int kObjectStartOffset = kBodyOffset - 1 + | |
| 575 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | |
| 576 | |
| 577 size_t size() const { return size_; } | |
| 578 | |
| 579 void set_size(size_t size) { | |
| 580 size_ = size; | |
| 581 } | |
| 582 | |
| 583 void SetArea(Address area_start, Address area_end) { | |
| 584 area_start_ = area_start; | |
| 585 area_end_ = area_end; | |
| 586 } | |
| 587 | |
| 588 Executability executable() { | |
| 589 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | |
| 590 } | |
| 591 | |
| 592 bool ContainsOnlyData() { | |
| 593 return IsFlagSet(CONTAINS_ONLY_DATA); | |
| 594 } | |
| 595 | |
| 596 bool InNewSpace() { | |
| 597 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | |
| 598 } | |
| 599 | |
| 600 bool InToSpace() { | |
| 601 return IsFlagSet(IN_TO_SPACE); | |
| 602 } | |
| 603 | |
| 604 bool InFromSpace() { | |
| 605 return IsFlagSet(IN_FROM_SPACE); | |
| 606 } | |
| 607 | |
| 608 // --------------------------------------------------------------------- | |
| 609 // Markbits support | |
| 610 | |
| 611 inline Bitmap* markbits() { | |
| 612 return Bitmap::FromAddress(address() + kHeaderSize); | |
| 613 } | |
| 614 | |
| 615 void PrintMarkbits() { markbits()->Print(); } | |
| 616 | |
| 617 inline uint32_t AddressToMarkbitIndex(Address addr) { | |
| 618 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; | |
| 619 } | |
| 620 | |
| 621 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { | |
| 622 const intptr_t offset = | |
| 623 reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | |
| 624 | |
| 625 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | |
| 626 } | |
| 627 | |
| 628 inline Address MarkbitIndexToAddress(uint32_t index) { | |
| 629 return this->address() + (index << kPointerSizeLog2); | |
| 630 } | |
| 631 | |
| 632 void InsertAfter(MemoryChunk* other); | |
| 633 void Unlink(); | |
| 634 | |
| 635 inline Heap* heap() const { return heap_; } | |
| 636 | |
| 637 static const int kFlagsOffset = kPointerSize; | |
| 638 | |
| 639 bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); } | |
| 640 | |
| 641 bool ShouldSkipEvacuationSlotRecording() { | |
| 642 return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0; | |
| 643 } | |
| 644 | |
| 645 inline SkipList* skip_list() { | |
| 646 return skip_list_; | |
| 647 } | |
| 648 | |
| 649 inline void set_skip_list(SkipList* skip_list) { | |
| 650 skip_list_ = skip_list; | |
| 651 } | |
| 652 | |
| 653 inline SlotsBuffer* slots_buffer() { | |
| 654 return slots_buffer_; | |
| 655 } | |
| 656 | |
| 657 inline SlotsBuffer** slots_buffer_address() { | |
| 658 return &slots_buffer_; | |
| 659 } | |
| 660 | |
| 661 void MarkEvacuationCandidate() { | |
| 662 DCHECK(slots_buffer_ == NULL); | |
| 663 SetFlag(EVACUATION_CANDIDATE); | |
| 664 } | |
| 665 | |
| 666 void ClearEvacuationCandidate() { | |
| 667 DCHECK(slots_buffer_ == NULL); | |
| 668 ClearFlag(EVACUATION_CANDIDATE); | |
| 669 } | |
| 670 | |
| 671 Address area_start() { return area_start_; } | |
| 672 Address area_end() { return area_end_; } | |
| 673 int area_size() { | |
| 674 return static_cast<int>(area_end() - area_start()); | |
| 675 } | |
| 676 bool CommitArea(size_t requested); | |
| 677 | |
| 678 // Approximate amount of physical memory committed for this chunk. | |
| 679 size_t CommittedPhysicalMemory() { | |
| 680 return high_water_mark_; | |
| 681 } | |
| 682 | |
| 683 static inline void UpdateHighWaterMark(Address mark); | |
| 684 | |
| 685 protected: | |
| 686 size_t size_; | |
| 687 intptr_t flags_; | |
| 688 | |
| 689 // Start and end of allocatable memory on this chunk. | |
| 690 Address area_start_; | |
| 691 Address area_end_; | |
| 692 | |
| 693 // If the chunk needs to remember its memory reservation, it is stored here. | |
| 694 base::VirtualMemory reservation_; | |
| 695 // The identity of the owning space. This is tagged as a failure pointer, but | |
| 696 // no failure can be in an object, so this can be distinguished from any entry | |
| 697 // in a fixed array. | |
| 698 Address owner_; | |
| 699 Heap* heap_; | |
| 700 // Used by the store buffer to keep track of which pages to mark scan-on- | |
| 701 // scavenge. | |
| 702 int store_buffer_counter_; | |
| 703 // Count of bytes marked black on page. | |
| 704 int live_byte_count_; | |
| 705 SlotsBuffer* slots_buffer_; | |
| 706 SkipList* skip_list_; | |
| 707 intptr_t write_barrier_counter_; | |
| 708 // Used by the incremental marker to keep track of the scanning progress in | |
| 709 // large objects that have a progress bar and are scanned in increments. | |
| 710 int progress_bar_; | |
| 711 // Assuming the initial allocation on a page is sequential, | |
| 712 // count highest number of bytes ever allocated on the page. | |
| 713 int high_water_mark_; | |
| 714 | |
| 715 base::AtomicWord parallel_sweeping_; | |
| 716 | |
| 717 // PagedSpace free-list statistics. | |
| 718 intptr_t available_in_small_free_list_; | |
| 719 intptr_t available_in_medium_free_list_; | |
| 720 intptr_t available_in_large_free_list_; | |
| 721 intptr_t available_in_huge_free_list_; | |
| 722 intptr_t non_available_small_blocks_; | |
| 723 | |
| 724 static MemoryChunk* Initialize(Heap* heap, | |
| 725 Address base, | |
| 726 size_t size, | |
| 727 Address area_start, | |
| 728 Address area_end, | |
| 729 Executability executable, | |
| 730 Space* owner); | |
| 731 | |
| 732 private: | |
| 733 // next_chunk_ holds a pointer of type MemoryChunk | |
| 734 base::AtomicWord next_chunk_; | |
| 735 // prev_chunk_ holds a pointer of type MemoryChunk | |
| 736 base::AtomicWord prev_chunk_; | |
| 737 | |
| 738 friend class MemoryAllocator; | |
| 739 }; | |
| 740 | |
| 741 | |
| 742 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize); | |
| 743 | |
| 744 | |
| 745 // ----------------------------------------------------------------------------- | |
| 746 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | |
| 747 // | |
| 748 // The only way to get a page pointer is by calling factory methods: | |
| 749 // Page* p = Page::FromAddress(addr); or | |
| 750 // Page* p = Page::FromAllocationTop(top); | |
| 751 class Page : public MemoryChunk { | |
| 752 public: | |
| 753 // Returns the page containing a given address. The address ranges | |
| 754 // from [page_addr .. page_addr + kPageSize[ | |
| 755 // This only works if the object is in fact in a page. See also MemoryChunk:: | |
| 756 // FromAddress() and FromAnyAddress(). | |
| 757 INLINE(static Page* FromAddress(Address a)) { | |
| 758 return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask); | |
| 759 } | |
| 760 | |
| 761 // Returns the page containing an allocation top. Because an allocation | |
| 762 // top address can be the upper bound of the page, we need to subtract | |
| 763 // it with kPointerSize first. The address ranges from | |
| 764 // [page_addr + kObjectStartOffset .. page_addr + kPageSize]. | |
| 765 INLINE(static Page* FromAllocationTop(Address top)) { | |
| 766 Page* p = FromAddress(top - kPointerSize); | |
| 767 return p; | |
| 768 } | |
| 769 | |
| 770 // Returns the next page in the chain of pages owned by a space. | |
| 771 inline Page* next_page(); | |
| 772 inline Page* prev_page(); | |
| 773 inline void set_next_page(Page* page); | |
| 774 inline void set_prev_page(Page* page); | |
| 775 | |
| 776 // Checks whether an address is page aligned. | |
| 777 static bool IsAlignedToPageSize(Address a) { | |
| 778 return 0 == (OffsetFrom(a) & kPageAlignmentMask); | |
| 779 } | |
| 780 | |
| 781 // Returns the offset of a given address to this page. | |
| 782 INLINE(int Offset(Address a)) { | |
| 783 int offset = static_cast<int>(a - address()); | |
| 784 return offset; | |
| 785 } | |
| 786 | |
| 787 // Returns the address for a given offset to the this page. | |
| 788 Address OffsetToAddress(int offset) { | |
| 789 DCHECK_PAGE_OFFSET(offset); | |
| 790 return address() + offset; | |
| 791 } | |
| 792 | |
| 793 // --------------------------------------------------------------------- | |
| 794 | |
| 795 // Page size in bytes. This must be a multiple of the OS page size. | |
| 796 static const int kPageSize = 1 << kPageSizeBits; | |
| 797 | |
| 798 // Maximum object size that fits in a page. Objects larger than that size | |
| 799 // are allocated in large object space and are never moved in memory. This | |
| 800 // also applies to new space allocation, since objects are never migrated | |
| 801 // from new space to large object space. Takes double alignment into account. | |
| 802 static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset; | |
| 803 | |
| 804 // Page size mask. | |
| 805 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; | |
| 806 | |
| 807 inline void ClearGCFields(); | |
| 808 | |
| 809 static inline Page* Initialize(Heap* heap, | |
| 810 MemoryChunk* chunk, | |
| 811 Executability executable, | |
| 812 PagedSpace* owner); | |
| 813 | |
| 814 void InitializeAsAnchor(PagedSpace* owner); | |
| 815 | |
| 816 bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); } | |
| 817 bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); } | |
| 818 bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); } | |
| 819 | |
| 820 void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); } | |
| 821 void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); } | |
| 822 | |
| 823 void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); } | |
| 824 void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); } | |
| 825 | |
| 826 void ResetFreeListStatistics(); | |
| 827 | |
| 828 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \ | |
| 829 type name() { return name##_; } \ | |
| 830 void set_##name(type name) { name##_ = name; } \ | |
| 831 void add_##name(type name) { name##_ += name; } | |
| 832 | |
| 833 FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks) | |
| 834 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list) | |
| 835 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list) | |
| 836 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list) | |
| 837 FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list) | |
| 838 | |
| 839 #undef FRAGMENTATION_STATS_ACCESSORS | |
| 840 | |
| 841 #ifdef DEBUG | |
| 842 void Print(); | |
| 843 #endif // DEBUG | |
| 844 | |
| 845 friend class MemoryAllocator; | |
| 846 }; | |
| 847 | |
| 848 | |
| 849 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize); | |
| 850 | |
| 851 | |
| 852 class LargePage : public MemoryChunk { | |
| 853 public: | |
| 854 HeapObject* GetObject() { | |
| 855 return HeapObject::FromAddress(area_start()); | |
| 856 } | |
| 857 | |
| 858 inline LargePage* next_page() const { | |
| 859 return static_cast<LargePage*>(next_chunk()); | |
| 860 } | |
| 861 | |
| 862 inline void set_next_page(LargePage* page) { | |
| 863 set_next_chunk(page); | |
| 864 } | |
| 865 private: | |
| 866 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | |
| 867 | |
| 868 friend class MemoryAllocator; | |
| 869 }; | |
| 870 | |
| 871 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize); | |
| 872 | |
| 873 // ---------------------------------------------------------------------------- | |
| 874 // Space is the abstract superclass for all allocation spaces. | |
| 875 class Space : public Malloced { | |
| 876 public: | |
| 877 Space(Heap* heap, AllocationSpace id, Executability executable) | |
| 878 : heap_(heap), id_(id), executable_(executable) {} | |
| 879 | |
| 880 virtual ~Space() {} | |
| 881 | |
| 882 Heap* heap() const { return heap_; } | |
| 883 | |
| 884 // Does the space need executable memory? | |
| 885 Executability executable() { return executable_; } | |
| 886 | |
| 887 // Identity used in error reporting. | |
| 888 AllocationSpace identity() { return id_; } | |
| 889 | |
| 890 // Returns allocated size. | |
| 891 virtual intptr_t Size() = 0; | |
| 892 | |
| 893 // Returns size of objects. Can differ from the allocated size | |
| 894 // (e.g. see LargeObjectSpace). | |
| 895 virtual intptr_t SizeOfObjects() { return Size(); } | |
| 896 | |
| 897 virtual int RoundSizeDownToObjectAlignment(int size) { | |
| 898 if (id_ == CODE_SPACE) { | |
| 899 return RoundDown(size, kCodeAlignment); | |
| 900 } else { | |
| 901 return RoundDown(size, kPointerSize); | |
| 902 } | |
| 903 } | |
| 904 | |
| 905 #ifdef DEBUG | |
| 906 virtual void Print() = 0; | |
| 907 #endif | |
| 908 | |
| 909 private: | |
| 910 Heap* heap_; | |
| 911 AllocationSpace id_; | |
| 912 Executability executable_; | |
| 913 }; | |
| 914 | |
| 915 | |
| 916 // ---------------------------------------------------------------------------- | |
| 917 // All heap objects containing executable code (code objects) must be allocated | |
| 918 // from a 2 GB range of memory, so that they can call each other using 32-bit | |
| 919 // displacements. This happens automatically on 32-bit platforms, where 32-bit | |
| 920 // displacements cover the entire 4GB virtual address space. On 64-bit | |
| 921 // platforms, we support this using the CodeRange object, which reserves and | |
| 922 // manages a range of virtual memory. | |
| 923 class CodeRange { | |
| 924 public: | |
| 925 explicit CodeRange(Isolate* isolate); | |
| 926 ~CodeRange() { TearDown(); } | |
| 927 | |
| 928 // Reserves a range of virtual memory, but does not commit any of it. | |
| 929 // Can only be called once, at heap initialization time. | |
| 930 // Returns false on failure. | |
| 931 bool SetUp(size_t requested_size); | |
| 932 | |
| 933 // Frees the range of virtual memory, and frees the data structures used to | |
| 934 // manage it. | |
| 935 void TearDown(); | |
| 936 | |
| 937 bool valid() { return code_range_ != NULL; } | |
| 938 Address start() { | |
| 939 DCHECK(valid()); | |
| 940 return static_cast<Address>(code_range_->address()); | |
| 941 } | |
| 942 bool contains(Address address) { | |
| 943 if (!valid()) return false; | |
| 944 Address start = static_cast<Address>(code_range_->address()); | |
| 945 return start <= address && address < start + code_range_->size(); | |
| 946 } | |
| 947 | |
| 948 // Allocates a chunk of memory from the large-object portion of | |
| 949 // the code range. On platforms with no separate code range, should | |
| 950 // not be called. | |
| 951 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size, | |
| 952 const size_t commit_size, | |
| 953 size_t* allocated); | |
| 954 bool CommitRawMemory(Address start, size_t length); | |
| 955 bool UncommitRawMemory(Address start, size_t length); | |
| 956 void FreeRawMemory(Address buf, size_t length); | |
| 957 | |
| 958 private: | |
| 959 Isolate* isolate_; | |
| 960 | |
| 961 // The reserved range of virtual memory that all code objects are put in. | |
| 962 base::VirtualMemory* code_range_; | |
| 963 // Plain old data class, just a struct plus a constructor. | |
| 964 class FreeBlock { | |
| 965 public: | |
| 966 FreeBlock(Address start_arg, size_t size_arg) | |
| 967 : start(start_arg), size(size_arg) { | |
| 968 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); | |
| 969 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); | |
| 970 } | |
| 971 FreeBlock(void* start_arg, size_t size_arg) | |
| 972 : start(static_cast<Address>(start_arg)), size(size_arg) { | |
| 973 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment)); | |
| 974 DCHECK(size >= static_cast<size_t>(Page::kPageSize)); | |
| 975 } | |
| 976 | |
| 977 Address start; | |
| 978 size_t size; | |
| 979 }; | |
| 980 | |
| 981 // Freed blocks of memory are added to the free list. When the allocation | |
| 982 // list is exhausted, the free list is sorted and merged to make the new | |
| 983 // allocation list. | |
| 984 List<FreeBlock> free_list_; | |
| 985 // Memory is allocated from the free blocks on the allocation list. | |
| 986 // The block at current_allocation_block_index_ is the current block. | |
| 987 List<FreeBlock> allocation_list_; | |
| 988 int current_allocation_block_index_; | |
| 989 | |
| 990 // Finds a block on the allocation list that contains at least the | |
| 991 // requested amount of memory. If none is found, sorts and merges | |
| 992 // the existing free memory blocks, and searches again. | |
| 993 // If none can be found, returns false. | |
| 994 bool GetNextAllocationBlock(size_t requested); | |
| 995 // Compares the start addresses of two free blocks. | |
| 996 static int CompareFreeBlockAddress(const FreeBlock* left, | |
| 997 const FreeBlock* right); | |
| 998 | |
| 999 DISALLOW_COPY_AND_ASSIGN(CodeRange); | |
| 1000 }; | |
| 1001 | |
| 1002 | |
| 1003 class SkipList { | |
| 1004 public: | |
| 1005 SkipList() { | |
| 1006 Clear(); | |
| 1007 } | |
| 1008 | |
| 1009 void Clear() { | |
| 1010 for (int idx = 0; idx < kSize; idx++) { | |
| 1011 starts_[idx] = reinterpret_cast<Address>(-1); | |
| 1012 } | |
| 1013 } | |
| 1014 | |
| 1015 Address StartFor(Address addr) { | |
| 1016 return starts_[RegionNumber(addr)]; | |
| 1017 } | |
| 1018 | |
| 1019 void AddObject(Address addr, int size) { | |
| 1020 int start_region = RegionNumber(addr); | |
| 1021 int end_region = RegionNumber(addr + size - kPointerSize); | |
| 1022 for (int idx = start_region; idx <= end_region; idx++) { | |
| 1023 if (starts_[idx] > addr) starts_[idx] = addr; | |
| 1024 } | |
| 1025 } | |
| 1026 | |
| 1027 static inline int RegionNumber(Address addr) { | |
| 1028 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2; | |
| 1029 } | |
| 1030 | |
| 1031 static void Update(Address addr, int size) { | |
| 1032 Page* page = Page::FromAddress(addr); | |
| 1033 SkipList* list = page->skip_list(); | |
| 1034 if (list == NULL) { | |
| 1035 list = new SkipList(); | |
| 1036 page->set_skip_list(list); | |
| 1037 } | |
| 1038 | |
| 1039 list->AddObject(addr, size); | |
| 1040 } | |
| 1041 | |
| 1042 private: | |
| 1043 static const int kRegionSizeLog2 = 13; | |
| 1044 static const int kRegionSize = 1 << kRegionSizeLog2; | |
| 1045 static const int kSize = Page::kPageSize / kRegionSize; | |
| 1046 | |
| 1047 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0); | |
| 1048 | |
| 1049 Address starts_[kSize]; | |
| 1050 }; | |
| 1051 | |
| 1052 | |
| 1053 // ---------------------------------------------------------------------------- | |
| 1054 // A space acquires chunks of memory from the operating system. The memory | |
| 1055 // allocator allocated and deallocates pages for the paged heap spaces and large | |
| 1056 // pages for large object space. | |
| 1057 // | |
| 1058 // Each space has to manage it's own pages. | |
| 1059 // | |
| 1060 class MemoryAllocator { | |
| 1061 public: | |
| 1062 explicit MemoryAllocator(Isolate* isolate); | |
| 1063 | |
| 1064 // Initializes its internal bookkeeping structures. | |
| 1065 // Max capacity of the total space and executable memory limit. | |
| 1066 bool SetUp(intptr_t max_capacity, intptr_t capacity_executable); | |
| 1067 | |
| 1068 void TearDown(); | |
| 1069 | |
| 1070 Page* AllocatePage( | |
| 1071 intptr_t size, PagedSpace* owner, Executability executable); | |
| 1072 | |
| 1073 LargePage* AllocateLargePage( | |
| 1074 intptr_t object_size, Space* owner, Executability executable); | |
| 1075 | |
| 1076 void Free(MemoryChunk* chunk); | |
| 1077 | |
| 1078 // Returns the maximum available bytes of heaps. | |
| 1079 intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } | |
| 1080 | |
| 1081 // Returns allocated spaces in bytes. | |
| 1082 intptr_t Size() { return size_; } | |
| 1083 | |
| 1084 // Returns the maximum available executable bytes of heaps. | |
| 1085 intptr_t AvailableExecutable() { | |
| 1086 if (capacity_executable_ < size_executable_) return 0; | |
| 1087 return capacity_executable_ - size_executable_; | |
| 1088 } | |
| 1089 | |
| 1090 // Returns allocated executable spaces in bytes. | |
| 1091 intptr_t SizeExecutable() { return size_executable_; } | |
| 1092 | |
| 1093 // Returns maximum available bytes that the old space can have. | |
| 1094 intptr_t MaxAvailable() { | |
| 1095 return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize; | |
| 1096 } | |
| 1097 | |
| 1098 // Returns an indication of whether a pointer is in a space that has | |
| 1099 // been allocated by this MemoryAllocator. | |
| 1100 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const { | |
| 1101 return address < lowest_ever_allocated_ || | |
| 1102 address >= highest_ever_allocated_; | |
| 1103 } | |
| 1104 | |
| 1105 #ifdef DEBUG | |
| 1106 // Reports statistic info of the space. | |
| 1107 void ReportStatistics(); | |
| 1108 #endif | |
| 1109 | |
| 1110 // Returns a MemoryChunk in which the memory region from commit_area_size to | |
| 1111 // reserve_area_size of the chunk area is reserved but not committed, it | |
| 1112 // could be committed later by calling MemoryChunk::CommitArea. | |
| 1113 MemoryChunk* AllocateChunk(intptr_t reserve_area_size, | |
| 1114 intptr_t commit_area_size, | |
| 1115 Executability executable, | |
| 1116 Space* space); | |
| 1117 | |
| 1118 Address ReserveAlignedMemory(size_t requested, | |
| 1119 size_t alignment, | |
| 1120 base::VirtualMemory* controller); | |
| 1121 Address AllocateAlignedMemory(size_t reserve_size, | |
| 1122 size_t commit_size, | |
| 1123 size_t alignment, | |
| 1124 Executability executable, | |
| 1125 base::VirtualMemory* controller); | |
| 1126 | |
| 1127 bool CommitMemory(Address addr, size_t size, Executability executable); | |
| 1128 | |
| 1129 void FreeMemory(base::VirtualMemory* reservation, Executability executable); | |
| 1130 void FreeMemory(Address addr, size_t size, Executability executable); | |
| 1131 | |
| 1132 // Commit a contiguous block of memory from the initial chunk. Assumes that | |
| 1133 // the address is not NULL, the size is greater than zero, and that the | |
| 1134 // block is contained in the initial chunk. Returns true if it succeeded | |
| 1135 // and false otherwise. | |
| 1136 bool CommitBlock(Address start, size_t size, Executability executable); | |
| 1137 | |
| 1138 // Uncommit a contiguous block of memory [start..(start+size)[. | |
| 1139 // start is not NULL, the size is greater than zero, and the | |
| 1140 // block is contained in the initial chunk. Returns true if it succeeded | |
| 1141 // and false otherwise. | |
| 1142 bool UncommitBlock(Address start, size_t size); | |
| 1143 | |
| 1144 // Zaps a contiguous block of memory [start..(start+size)[ thus | |
| 1145 // filling it up with a recognizable non-NULL bit pattern. | |
| 1146 void ZapBlock(Address start, size_t size); | |
| 1147 | |
| 1148 void PerformAllocationCallback(ObjectSpace space, | |
| 1149 AllocationAction action, | |
| 1150 size_t size); | |
| 1151 | |
| 1152 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | |
| 1153 ObjectSpace space, | |
| 1154 AllocationAction action); | |
| 1155 | |
| 1156 void RemoveMemoryAllocationCallback( | |
| 1157 MemoryAllocationCallback callback); | |
| 1158 | |
| 1159 bool MemoryAllocationCallbackRegistered( | |
| 1160 MemoryAllocationCallback callback); | |
| 1161 | |
| 1162 static int CodePageGuardStartOffset(); | |
| 1163 | |
| 1164 static int CodePageGuardSize(); | |
| 1165 | |
| 1166 static int CodePageAreaStartOffset(); | |
| 1167 | |
| 1168 static int CodePageAreaEndOffset(); | |
| 1169 | |
| 1170 static int CodePageAreaSize() { | |
| 1171 return CodePageAreaEndOffset() - CodePageAreaStartOffset(); | |
| 1172 } | |
| 1173 | |
| 1174 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm, | |
| 1175 Address start, | |
| 1176 size_t commit_size, | |
| 1177 size_t reserved_size); | |
| 1178 | |
| 1179 private: | |
| 1180 Isolate* isolate_; | |
| 1181 | |
| 1182 // Maximum space size in bytes. | |
| 1183 size_t capacity_; | |
| 1184 // Maximum subset of capacity_ that can be executable | |
| 1185 size_t capacity_executable_; | |
| 1186 | |
| 1187 // Allocated space size in bytes. | |
| 1188 size_t size_; | |
| 1189 // Allocated executable space size in bytes. | |
| 1190 size_t size_executable_; | |
| 1191 | |
| 1192 // We keep the lowest and highest addresses allocated as a quick way | |
| 1193 // of determining that pointers are outside the heap. The estimate is | |
| 1194 // conservative, i.e. not all addrsses in 'allocated' space are allocated | |
| 1195 // to our heap. The range is [lowest, highest[, inclusive on the low end | |
| 1196 // and exclusive on the high end. | |
| 1197 void* lowest_ever_allocated_; | |
| 1198 void* highest_ever_allocated_; | |
| 1199 | |
| 1200 struct MemoryAllocationCallbackRegistration { | |
| 1201 MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback, | |
| 1202 ObjectSpace space, | |
| 1203 AllocationAction action) | |
| 1204 : callback(callback), space(space), action(action) { | |
| 1205 } | |
| 1206 MemoryAllocationCallback callback; | |
| 1207 ObjectSpace space; | |
| 1208 AllocationAction action; | |
| 1209 }; | |
| 1210 | |
| 1211 // A List of callback that are triggered when memory is allocated or free'd | |
| 1212 List<MemoryAllocationCallbackRegistration> | |
| 1213 memory_allocation_callbacks_; | |
| 1214 | |
| 1215 // Initializes pages in a chunk. Returns the first page address. | |
| 1216 // This function and GetChunkId() are provided for the mark-compact | |
| 1217 // collector to rebuild page headers in the from space, which is | |
| 1218 // used as a marking stack and its page headers are destroyed. | |
| 1219 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk, | |
| 1220 PagedSpace* owner); | |
| 1221 | |
| 1222 void UpdateAllocatedSpaceLimits(void* low, void* high) { | |
| 1223 lowest_ever_allocated_ = Min(lowest_ever_allocated_, low); | |
| 1224 highest_ever_allocated_ = Max(highest_ever_allocated_, high); | |
| 1225 } | |
| 1226 | |
| 1227 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator); | |
| 1228 }; | |
| 1229 | |
| 1230 | |
| 1231 // ----------------------------------------------------------------------------- | |
| 1232 // Interface for heap object iterator to be implemented by all object space | |
| 1233 // object iterators. | |
| 1234 // | |
| 1235 // NOTE: The space specific object iterators also implements the own next() | |
| 1236 // method which is used to avoid using virtual functions | |
| 1237 // iterating a specific space. | |
| 1238 | |
| 1239 class ObjectIterator : public Malloced { | |
| 1240 public: | |
| 1241 virtual ~ObjectIterator() { } | |
| 1242 | |
| 1243 virtual HeapObject* next_object() = 0; | |
| 1244 }; | |
| 1245 | |
| 1246 | |
| 1247 // ----------------------------------------------------------------------------- | |
| 1248 // Heap object iterator in new/old/map spaces. | |
| 1249 // | |
| 1250 // A HeapObjectIterator iterates objects from the bottom of the given space | |
| 1251 // to its top or from the bottom of the given page to its top. | |
| 1252 // | |
| 1253 // If objects are allocated in the page during iteration the iterator may | |
| 1254 // or may not iterate over those objects. The caller must create a new | |
| 1255 // iterator in order to be sure to visit these new objects. | |
| 1256 class HeapObjectIterator: public ObjectIterator { | |
| 1257 public: | |
| 1258 // Creates a new object iterator in a given space. | |
| 1259 // If the size function is not given, the iterator calls the default | |
| 1260 // Object::Size(). | |
| 1261 explicit HeapObjectIterator(PagedSpace* space); | |
| 1262 HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func); | |
| 1263 HeapObjectIterator(Page* page, HeapObjectCallback size_func); | |
| 1264 | |
| 1265 // Advance to the next object, skipping free spaces and other fillers and | |
| 1266 // skipping the special garbage section of which there is one per space. | |
| 1267 // Returns NULL when the iteration has ended. | |
| 1268 inline HeapObject* Next() { | |
| 1269 do { | |
| 1270 HeapObject* next_obj = FromCurrentPage(); | |
| 1271 if (next_obj != NULL) return next_obj; | |
| 1272 } while (AdvanceToNextPage()); | |
| 1273 return NULL; | |
| 1274 } | |
| 1275 | |
| 1276 virtual HeapObject* next_object() { | |
| 1277 return Next(); | |
| 1278 } | |
| 1279 | |
| 1280 private: | |
| 1281 enum PageMode { kOnePageOnly, kAllPagesInSpace }; | |
| 1282 | |
| 1283 Address cur_addr_; // Current iteration point. | |
| 1284 Address cur_end_; // End iteration point. | |
| 1285 HeapObjectCallback size_func_; // Size function or NULL. | |
| 1286 PagedSpace* space_; | |
| 1287 PageMode page_mode_; | |
| 1288 | |
| 1289 // Fast (inlined) path of next(). | |
| 1290 inline HeapObject* FromCurrentPage(); | |
| 1291 | |
| 1292 // Slow path of next(), goes into the next page. Returns false if the | |
| 1293 // iteration has ended. | |
| 1294 bool AdvanceToNextPage(); | |
| 1295 | |
| 1296 // Initializes fields. | |
| 1297 inline void Initialize(PagedSpace* owner, | |
| 1298 Address start, | |
| 1299 Address end, | |
| 1300 PageMode mode, | |
| 1301 HeapObjectCallback size_func); | |
| 1302 }; | |
| 1303 | |
| 1304 | |
| 1305 // ----------------------------------------------------------------------------- | |
| 1306 // A PageIterator iterates the pages in a paged space. | |
| 1307 | |
| 1308 class PageIterator BASE_EMBEDDED { | |
| 1309 public: | |
| 1310 explicit inline PageIterator(PagedSpace* space); | |
| 1311 | |
| 1312 inline bool has_next(); | |
| 1313 inline Page* next(); | |
| 1314 | |
| 1315 private: | |
| 1316 PagedSpace* space_; | |
| 1317 Page* prev_page_; // Previous page returned. | |
| 1318 // Next page that will be returned. Cached here so that we can use this | |
| 1319 // iterator for operations that deallocate pages. | |
| 1320 Page* next_page_; | |
| 1321 }; | |
| 1322 | |
| 1323 | |
| 1324 // ----------------------------------------------------------------------------- | |
| 1325 // A space has a circular list of pages. The next page can be accessed via | |
| 1326 // Page::next_page() call. | |
| 1327 | |
| 1328 // An abstraction of allocation and relocation pointers in a page-structured | |
| 1329 // space. | |
| 1330 class AllocationInfo { | |
| 1331 public: | |
| 1332 AllocationInfo() : top_(NULL), limit_(NULL) { | |
| 1333 } | |
| 1334 | |
| 1335 INLINE(void set_top(Address top)) { | |
| 1336 SLOW_DCHECK(top == NULL || | |
| 1337 (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0); | |
| 1338 top_ = top; | |
| 1339 } | |
| 1340 | |
| 1341 INLINE(Address top()) const { | |
| 1342 SLOW_DCHECK(top_ == NULL || | |
| 1343 (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0); | |
| 1344 return top_; | |
| 1345 } | |
| 1346 | |
| 1347 Address* top_address() { | |
| 1348 return &top_; | |
| 1349 } | |
| 1350 | |
| 1351 INLINE(void set_limit(Address limit)) { | |
| 1352 SLOW_DCHECK(limit == NULL || | |
| 1353 (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0); | |
| 1354 limit_ = limit; | |
| 1355 } | |
| 1356 | |
| 1357 INLINE(Address limit()) const { | |
| 1358 SLOW_DCHECK(limit_ == NULL || | |
| 1359 (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0); | |
| 1360 return limit_; | |
| 1361 } | |
| 1362 | |
| 1363 Address* limit_address() { | |
| 1364 return &limit_; | |
| 1365 } | |
| 1366 | |
| 1367 #ifdef DEBUG | |
| 1368 bool VerifyPagedAllocation() { | |
| 1369 return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) | |
| 1370 && (top_ <= limit_); | |
| 1371 } | |
| 1372 #endif | |
| 1373 | |
| 1374 private: | |
| 1375 // Current allocation top. | |
| 1376 Address top_; | |
| 1377 // Current allocation limit. | |
| 1378 Address limit_; | |
| 1379 }; | |
| 1380 | |
| 1381 | |
| 1382 // An abstraction of the accounting statistics of a page-structured space. | |
| 1383 // The 'capacity' of a space is the number of object-area bytes (i.e., not | |
| 1384 // including page bookkeeping structures) currently in the space. The 'size' | |
| 1385 // of a space is the number of allocated bytes, the 'waste' in the space is | |
| 1386 // the number of bytes that are not allocated and not available to | |
| 1387 // allocation without reorganizing the space via a GC (e.g. small blocks due | |
| 1388 // to internal fragmentation, top of page areas in map space), and the bytes | |
| 1389 // 'available' is the number of unallocated bytes that are not waste. The | |
| 1390 // capacity is the sum of size, waste, and available. | |
| 1391 // | |
| 1392 // The stats are only set by functions that ensure they stay balanced. These | |
| 1393 // functions increase or decrease one of the non-capacity stats in | |
| 1394 // conjunction with capacity, or else they always balance increases and | |
| 1395 // decreases to the non-capacity stats. | |
| 1396 class AllocationStats BASE_EMBEDDED { | |
| 1397 public: | |
| 1398 AllocationStats() { Clear(); } | |
| 1399 | |
| 1400 // Zero out all the allocation statistics (i.e., no capacity). | |
| 1401 void Clear() { | |
| 1402 capacity_ = 0; | |
| 1403 max_capacity_ = 0; | |
| 1404 size_ = 0; | |
| 1405 waste_ = 0; | |
| 1406 } | |
| 1407 | |
| 1408 void ClearSizeWaste() { | |
| 1409 size_ = capacity_; | |
| 1410 waste_ = 0; | |
| 1411 } | |
| 1412 | |
| 1413 // Reset the allocation statistics (i.e., available = capacity with no | |
| 1414 // wasted or allocated bytes). | |
| 1415 void Reset() { | |
| 1416 size_ = 0; | |
| 1417 waste_ = 0; | |
| 1418 } | |
| 1419 | |
| 1420 // Accessors for the allocation statistics. | |
| 1421 intptr_t Capacity() { return capacity_; } | |
| 1422 intptr_t MaxCapacity() { return max_capacity_; } | |
| 1423 intptr_t Size() { return size_; } | |
| 1424 intptr_t Waste() { return waste_; } | |
| 1425 | |
| 1426 // Grow the space by adding available bytes. They are initially marked as | |
| 1427 // being in use (part of the size), but will normally be immediately freed, | |
| 1428 // putting them on the free list and removing them from size_. | |
| 1429 void ExpandSpace(int size_in_bytes) { | |
| 1430 capacity_ += size_in_bytes; | |
| 1431 size_ += size_in_bytes; | |
| 1432 if (capacity_ > max_capacity_) { | |
| 1433 max_capacity_ = capacity_; | |
| 1434 } | |
| 1435 DCHECK(size_ >= 0); | |
| 1436 } | |
| 1437 | |
| 1438 // Shrink the space by removing available bytes. Since shrinking is done | |
| 1439 // during sweeping, bytes have been marked as being in use (part of the size) | |
| 1440 // and are hereby freed. | |
| 1441 void ShrinkSpace(int size_in_bytes) { | |
| 1442 capacity_ -= size_in_bytes; | |
| 1443 size_ -= size_in_bytes; | |
| 1444 DCHECK(size_ >= 0); | |
| 1445 } | |
| 1446 | |
| 1447 // Allocate from available bytes (available -> size). | |
| 1448 void AllocateBytes(intptr_t size_in_bytes) { | |
| 1449 size_ += size_in_bytes; | |
| 1450 DCHECK(size_ >= 0); | |
| 1451 } | |
| 1452 | |
| 1453 // Free allocated bytes, making them available (size -> available). | |
| 1454 void DeallocateBytes(intptr_t size_in_bytes) { | |
| 1455 size_ -= size_in_bytes; | |
| 1456 DCHECK(size_ >= 0); | |
| 1457 } | |
| 1458 | |
| 1459 // Waste free bytes (available -> waste). | |
| 1460 void WasteBytes(int size_in_bytes) { | |
| 1461 DCHECK(size_in_bytes >= 0); | |
| 1462 waste_ += size_in_bytes; | |
| 1463 } | |
| 1464 | |
| 1465 private: | |
| 1466 intptr_t capacity_; | |
| 1467 intptr_t max_capacity_; | |
| 1468 intptr_t size_; | |
| 1469 intptr_t waste_; | |
| 1470 }; | |
| 1471 | |
| 1472 | |
| 1473 // ----------------------------------------------------------------------------- | |
| 1474 // Free lists for old object spaces | |
| 1475 // | |
| 1476 // Free-list nodes are free blocks in the heap. They look like heap objects | |
| 1477 // (free-list node pointers have the heap object tag, and they have a map like | |
| 1478 // a heap object). They have a size and a next pointer. The next pointer is | |
| 1479 // the raw address of the next free list node (or NULL). | |
| 1480 class FreeListNode: public HeapObject { | |
| 1481 public: | |
| 1482 // Obtain a free-list node from a raw address. This is not a cast because | |
| 1483 // it does not check nor require that the first word at the address is a map | |
| 1484 // pointer. | |
| 1485 static FreeListNode* FromAddress(Address address) { | |
| 1486 return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address)); | |
| 1487 } | |
| 1488 | |
| 1489 static inline bool IsFreeListNode(HeapObject* object); | |
| 1490 | |
| 1491 // Set the size in bytes, which can be read with HeapObject::Size(). This | |
| 1492 // function also writes a map to the first word of the block so that it | |
| 1493 // looks like a heap object to the garbage collector and heap iteration | |
| 1494 // functions. | |
| 1495 void set_size(Heap* heap, int size_in_bytes); | |
| 1496 | |
| 1497 // Accessors for the next field. | |
| 1498 inline FreeListNode* next(); | |
| 1499 inline FreeListNode** next_address(); | |
| 1500 inline void set_next(FreeListNode* next); | |
| 1501 | |
| 1502 inline void Zap(); | |
| 1503 | |
| 1504 static inline FreeListNode* cast(Object* object) { | |
| 1505 return reinterpret_cast<FreeListNode*>(object); | |
| 1506 } | |
| 1507 | |
| 1508 private: | |
| 1509 static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize); | |
| 1510 | |
| 1511 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode); | |
| 1512 }; | |
| 1513 | |
| 1514 | |
| 1515 // The free list category holds a pointer to the top element and a pointer to | |
| 1516 // the end element of the linked list of free memory blocks. | |
| 1517 class FreeListCategory { | |
| 1518 public: | |
| 1519 FreeListCategory() : | |
| 1520 top_(0), | |
| 1521 end_(NULL), | |
| 1522 available_(0) {} | |
| 1523 | |
| 1524 intptr_t Concatenate(FreeListCategory* category); | |
| 1525 | |
| 1526 void Reset(); | |
| 1527 | |
| 1528 void Free(FreeListNode* node, int size_in_bytes); | |
| 1529 | |
| 1530 FreeListNode* PickNodeFromList(int *node_size); | |
| 1531 FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size); | |
| 1532 | |
| 1533 intptr_t EvictFreeListItemsInList(Page* p); | |
| 1534 bool ContainsPageFreeListItemsInList(Page* p); | |
| 1535 | |
| 1536 void RepairFreeList(Heap* heap); | |
| 1537 | |
| 1538 FreeListNode* top() const { | |
| 1539 return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_)); | |
| 1540 } | |
| 1541 | |
| 1542 void set_top(FreeListNode* top) { | |
| 1543 base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top)); | |
| 1544 } | |
| 1545 | |
| 1546 FreeListNode** GetEndAddress() { return &end_; } | |
| 1547 FreeListNode* end() const { return end_; } | |
| 1548 void set_end(FreeListNode* end) { end_ = end; } | |
| 1549 | |
| 1550 int* GetAvailableAddress() { return &available_; } | |
| 1551 int available() const { return available_; } | |
| 1552 void set_available(int available) { available_ = available; } | |
| 1553 | |
| 1554 base::Mutex* mutex() { return &mutex_; } | |
| 1555 | |
| 1556 bool IsEmpty() { | |
| 1557 return top() == 0; | |
| 1558 } | |
| 1559 | |
| 1560 #ifdef DEBUG | |
| 1561 intptr_t SumFreeList(); | |
| 1562 int FreeListLength(); | |
| 1563 #endif | |
| 1564 | |
| 1565 private: | |
| 1566 // top_ points to the top FreeListNode* in the free list category. | |
| 1567 base::AtomicWord top_; | |
| 1568 FreeListNode* end_; | |
| 1569 base::Mutex mutex_; | |
| 1570 | |
| 1571 // Total available bytes in all blocks of this free list category. | |
| 1572 int available_; | |
| 1573 }; | |
| 1574 | |
| 1575 | |
| 1576 // The free list for the old space. The free list is organized in such a way | |
| 1577 // as to encourage objects allocated around the same time to be near each | |
| 1578 // other. The normal way to allocate is intended to be by bumping a 'top' | |
| 1579 // pointer until it hits a 'limit' pointer. When the limit is hit we need to | |
| 1580 // find a new space to allocate from. This is done with the free list, which | |
| 1581 // is divided up into rough categories to cut down on waste. Having finer | |
| 1582 // categories would scatter allocation more. | |
| 1583 | |
| 1584 // The old space free list is organized in categories. | |
| 1585 // 1-31 words: Such small free areas are discarded for efficiency reasons. | |
| 1586 // They can be reclaimed by the compactor. However the distance between top | |
| 1587 // and limit may be this small. | |
| 1588 // 32-255 words: There is a list of spaces this large. It is used for top and | |
| 1589 // limit when the object we need to allocate is 1-31 words in size. These | |
| 1590 // spaces are called small. | |
| 1591 // 256-2047 words: There is a list of spaces this large. It is used for top and | |
| 1592 // limit when the object we need to allocate is 32-255 words in size. These | |
| 1593 // spaces are called medium. | |
| 1594 // 1048-16383 words: There is a list of spaces this large. It is used for top | |
| 1595 // and limit when the object we need to allocate is 256-2047 words in size. | |
| 1596 // These spaces are call large. | |
| 1597 // At least 16384 words. This list is for objects of 2048 words or larger. | |
| 1598 // Empty pages are added to this list. These spaces are called huge. | |
| 1599 class FreeList { | |
| 1600 public: | |
| 1601 explicit FreeList(PagedSpace* owner); | |
| 1602 | |
| 1603 intptr_t Concatenate(FreeList* free_list); | |
| 1604 | |
| 1605 // Clear the free list. | |
| 1606 void Reset(); | |
| 1607 | |
| 1608 // Return the number of bytes available on the free list. | |
| 1609 intptr_t available() { | |
| 1610 return small_list_.available() + medium_list_.available() + | |
| 1611 large_list_.available() + huge_list_.available(); | |
| 1612 } | |
| 1613 | |
| 1614 // Place a node on the free list. The block of size 'size_in_bytes' | |
| 1615 // starting at 'start' is placed on the free list. The return value is the | |
| 1616 // number of bytes that have been lost due to internal fragmentation by | |
| 1617 // freeing the block. Bookkeeping information will be written to the block, | |
| 1618 // i.e., its contents will be destroyed. The start address should be word | |
| 1619 // aligned, and the size should be a non-zero multiple of the word size. | |
| 1620 int Free(Address start, int size_in_bytes); | |
| 1621 | |
| 1622 // This method returns how much memory can be allocated after freeing | |
| 1623 // maximum_freed memory. | |
| 1624 static inline int GuaranteedAllocatable(int maximum_freed) { | |
| 1625 if (maximum_freed < kSmallListMin) { | |
| 1626 return 0; | |
| 1627 } else if (maximum_freed <= kSmallListMax) { | |
| 1628 return kSmallAllocationMax; | |
| 1629 } else if (maximum_freed <= kMediumListMax) { | |
| 1630 return kMediumAllocationMax; | |
| 1631 } else if (maximum_freed <= kLargeListMax) { | |
| 1632 return kLargeAllocationMax; | |
| 1633 } | |
| 1634 return maximum_freed; | |
| 1635 } | |
| 1636 | |
| 1637 // Allocate a block of size 'size_in_bytes' from the free list. The block | |
| 1638 // is unitialized. A failure is returned if no block is available. The | |
| 1639 // number of bytes lost to fragmentation is returned in the output parameter | |
| 1640 // 'wasted_bytes'. The size should be a non-zero multiple of the word size. | |
| 1641 MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes); | |
| 1642 | |
| 1643 bool IsEmpty() { | |
| 1644 return small_list_.IsEmpty() && medium_list_.IsEmpty() && | |
| 1645 large_list_.IsEmpty() && huge_list_.IsEmpty(); | |
| 1646 } | |
| 1647 | |
| 1648 #ifdef DEBUG | |
| 1649 void Zap(); | |
| 1650 intptr_t SumFreeLists(); | |
| 1651 bool IsVeryLong(); | |
| 1652 #endif | |
| 1653 | |
| 1654 // Used after booting the VM. | |
| 1655 void RepairLists(Heap* heap); | |
| 1656 | |
| 1657 intptr_t EvictFreeListItems(Page* p); | |
| 1658 bool ContainsPageFreeListItems(Page* p); | |
| 1659 | |
| 1660 FreeListCategory* small_list() { return &small_list_; } | |
| 1661 FreeListCategory* medium_list() { return &medium_list_; } | |
| 1662 FreeListCategory* large_list() { return &large_list_; } | |
| 1663 FreeListCategory* huge_list() { return &huge_list_; } | |
| 1664 | |
| 1665 private: | |
| 1666 // The size range of blocks, in bytes. | |
| 1667 static const int kMinBlockSize = 3 * kPointerSize; | |
| 1668 static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize; | |
| 1669 | |
| 1670 FreeListNode* FindNodeFor(int size_in_bytes, int* node_size); | |
| 1671 | |
| 1672 PagedSpace* owner_; | |
| 1673 Heap* heap_; | |
| 1674 | |
| 1675 static const int kSmallListMin = 0x20 * kPointerSize; | |
| 1676 static const int kSmallListMax = 0xff * kPointerSize; | |
| 1677 static const int kMediumListMax = 0x7ff * kPointerSize; | |
| 1678 static const int kLargeListMax = 0x3fff * kPointerSize; | |
| 1679 static const int kSmallAllocationMax = kSmallListMin - kPointerSize; | |
| 1680 static const int kMediumAllocationMax = kSmallListMax; | |
| 1681 static const int kLargeAllocationMax = kMediumListMax; | |
| 1682 FreeListCategory small_list_; | |
| 1683 FreeListCategory medium_list_; | |
| 1684 FreeListCategory large_list_; | |
| 1685 FreeListCategory huge_list_; | |
| 1686 | |
| 1687 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList); | |
| 1688 }; | |
| 1689 | |
| 1690 | |
| 1691 class AllocationResult { | |
| 1692 public: | |
| 1693 // Implicit constructor from Object*. | |
| 1694 AllocationResult(Object* object) : object_(object), // NOLINT | |
| 1695 retry_space_(INVALID_SPACE) { } | |
| 1696 | |
| 1697 AllocationResult() : object_(NULL), | |
| 1698 retry_space_(INVALID_SPACE) { } | |
| 1699 | |
| 1700 static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) { | |
| 1701 return AllocationResult(space); | |
| 1702 } | |
| 1703 | |
| 1704 inline bool IsRetry() { return retry_space_ != INVALID_SPACE; } | |
| 1705 | |
| 1706 template <typename T> | |
| 1707 bool To(T** obj) { | |
| 1708 if (IsRetry()) return false; | |
| 1709 *obj = T::cast(object_); | |
| 1710 return true; | |
| 1711 } | |
| 1712 | |
| 1713 Object* ToObjectChecked() { | |
| 1714 CHECK(!IsRetry()); | |
| 1715 return object_; | |
| 1716 } | |
| 1717 | |
| 1718 AllocationSpace RetrySpace() { | |
| 1719 DCHECK(IsRetry()); | |
| 1720 return retry_space_; | |
| 1721 } | |
| 1722 | |
| 1723 private: | |
| 1724 explicit AllocationResult(AllocationSpace space) : object_(NULL), | |
| 1725 retry_space_(space) { } | |
| 1726 | |
| 1727 Object* object_; | |
| 1728 AllocationSpace retry_space_; | |
| 1729 }; | |
| 1730 | |
| 1731 | |
| 1732 class PagedSpace : public Space { | |
| 1733 public: | |
| 1734 // Creates a space with a maximum capacity, and an id. | |
| 1735 PagedSpace(Heap* heap, | |
| 1736 intptr_t max_capacity, | |
| 1737 AllocationSpace id, | |
| 1738 Executability executable); | |
| 1739 | |
| 1740 virtual ~PagedSpace() {} | |
| 1741 | |
| 1742 // Set up the space using the given address range of virtual memory (from | |
| 1743 // the memory allocator's initial chunk) if possible. If the block of | |
| 1744 // addresses is not big enough to contain a single page-aligned page, a | |
| 1745 // fresh chunk will be allocated. | |
| 1746 bool SetUp(); | |
| 1747 | |
| 1748 // Returns true if the space has been successfully set up and not | |
| 1749 // subsequently torn down. | |
| 1750 bool HasBeenSetUp(); | |
| 1751 | |
| 1752 // Cleans up the space, frees all pages in this space except those belonging | |
| 1753 // to the initial chunk, uncommits addresses in the initial chunk. | |
| 1754 void TearDown(); | |
| 1755 | |
| 1756 // Checks whether an object/address is in this space. | |
| 1757 inline bool Contains(Address a); | |
| 1758 bool Contains(HeapObject* o) { return Contains(o->address()); } | |
| 1759 | |
| 1760 // Given an address occupied by a live object, return that object if it is | |
| 1761 // in this space, or a Smi if it is not. The implementation iterates over | |
| 1762 // objects in the page containing the address, the cost is linear in the | |
| 1763 // number of objects in the page. It may be slow. | |
| 1764 Object* FindObject(Address addr); | |
| 1765 | |
| 1766 // During boot the free_space_map is created, and afterwards we may need | |
| 1767 // to write it into the free list nodes that were already created. | |
| 1768 void RepairFreeListsAfterBoot(); | |
| 1769 | |
| 1770 // Prepares for a mark-compact GC. | |
| 1771 void PrepareForMarkCompact(); | |
| 1772 | |
| 1773 // Current capacity without growing (Size() + Available()). | |
| 1774 intptr_t Capacity() { return accounting_stats_.Capacity(); } | |
| 1775 | |
| 1776 // Total amount of memory committed for this space. For paged | |
| 1777 // spaces this equals the capacity. | |
| 1778 intptr_t CommittedMemory() { return Capacity(); } | |
| 1779 | |
| 1780 // The maximum amount of memory ever committed for this space. | |
| 1781 intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); } | |
| 1782 | |
| 1783 // Approximate amount of physical memory committed for this space. | |
| 1784 size_t CommittedPhysicalMemory(); | |
| 1785 | |
| 1786 struct SizeStats { | |
| 1787 intptr_t Total() { | |
| 1788 return small_size_ + medium_size_ + large_size_ + huge_size_; | |
| 1789 } | |
| 1790 | |
| 1791 intptr_t small_size_; | |
| 1792 intptr_t medium_size_; | |
| 1793 intptr_t large_size_; | |
| 1794 intptr_t huge_size_; | |
| 1795 }; | |
| 1796 | |
| 1797 void ObtainFreeListStatistics(Page* p, SizeStats* sizes); | |
| 1798 void ResetFreeListStatistics(); | |
| 1799 | |
| 1800 // Sets the capacity, the available space and the wasted space to zero. | |
| 1801 // The stats are rebuilt during sweeping by adding each page to the | |
| 1802 // capacity and the size when it is encountered. As free spaces are | |
| 1803 // discovered during the sweeping they are subtracted from the size and added | |
| 1804 // to the available and wasted totals. | |
| 1805 void ClearStats() { | |
| 1806 accounting_stats_.ClearSizeWaste(); | |
| 1807 ResetFreeListStatistics(); | |
| 1808 } | |
| 1809 | |
| 1810 // Increases the number of available bytes of that space. | |
| 1811 void AddToAccountingStats(intptr_t bytes) { | |
| 1812 accounting_stats_.DeallocateBytes(bytes); | |
| 1813 } | |
| 1814 | |
| 1815 // Available bytes without growing. These are the bytes on the free list. | |
| 1816 // The bytes in the linear allocation area are not included in this total | |
| 1817 // because updating the stats would slow down allocation. New pages are | |
| 1818 // immediately added to the free list so they show up here. | |
| 1819 intptr_t Available() { return free_list_.available(); } | |
| 1820 | |
| 1821 // Allocated bytes in this space. Garbage bytes that were not found due to | |
| 1822 // concurrent sweeping are counted as being allocated! The bytes in the | |
| 1823 // current linear allocation area (between top and limit) are also counted | |
| 1824 // here. | |
| 1825 virtual intptr_t Size() { return accounting_stats_.Size(); } | |
| 1826 | |
| 1827 // As size, but the bytes in lazily swept pages are estimated and the bytes | |
| 1828 // in the current linear allocation area are not included. | |
| 1829 virtual intptr_t SizeOfObjects(); | |
| 1830 | |
| 1831 // Wasted bytes in this space. These are just the bytes that were thrown away | |
| 1832 // due to being too small to use for allocation. They do not include the | |
| 1833 // free bytes that were not found at all due to lazy sweeping. | |
| 1834 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | |
| 1835 | |
| 1836 // Returns the allocation pointer in this space. | |
| 1837 Address top() { return allocation_info_.top(); } | |
| 1838 Address limit() { return allocation_info_.limit(); } | |
| 1839 | |
| 1840 // The allocation top address. | |
| 1841 Address* allocation_top_address() { | |
| 1842 return allocation_info_.top_address(); | |
| 1843 } | |
| 1844 | |
| 1845 // The allocation limit address. | |
| 1846 Address* allocation_limit_address() { | |
| 1847 return allocation_info_.limit_address(); | |
| 1848 } | |
| 1849 | |
| 1850 // Allocate the requested number of bytes in the space if possible, return a | |
| 1851 // failure object if not. | |
| 1852 MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes); | |
| 1853 | |
| 1854 // Give a block of memory to the space's free list. It might be added to | |
| 1855 // the free list or accounted as waste. | |
| 1856 // If add_to_freelist is false then just accounting stats are updated and | |
| 1857 // no attempt to add area to free list is made. | |
| 1858 int Free(Address start, int size_in_bytes) { | |
| 1859 int wasted = free_list_.Free(start, size_in_bytes); | |
| 1860 accounting_stats_.DeallocateBytes(size_in_bytes); | |
| 1861 accounting_stats_.WasteBytes(wasted); | |
| 1862 return size_in_bytes - wasted; | |
| 1863 } | |
| 1864 | |
| 1865 void ResetFreeList() { | |
| 1866 free_list_.Reset(); | |
| 1867 } | |
| 1868 | |
| 1869 // Set space allocation info. | |
| 1870 void SetTopAndLimit(Address top, Address limit) { | |
| 1871 DCHECK(top == limit || | |
| 1872 Page::FromAddress(top) == Page::FromAddress(limit - 1)); | |
| 1873 MemoryChunk::UpdateHighWaterMark(allocation_info_.top()); | |
| 1874 allocation_info_.set_top(top); | |
| 1875 allocation_info_.set_limit(limit); | |
| 1876 } | |
| 1877 | |
| 1878 // Empty space allocation info, returning unused area to free list. | |
| 1879 void EmptyAllocationInfo() { | |
| 1880 // Mark the old linear allocation area with a free space map so it can be | |
| 1881 // skipped when scanning the heap. | |
| 1882 int old_linear_size = static_cast<int>(limit() - top()); | |
| 1883 Free(top(), old_linear_size); | |
| 1884 SetTopAndLimit(NULL, NULL); | |
| 1885 } | |
| 1886 | |
| 1887 void Allocate(int bytes) { | |
| 1888 accounting_stats_.AllocateBytes(bytes); | |
| 1889 } | |
| 1890 | |
| 1891 void IncreaseCapacity(int size); | |
| 1892 | |
| 1893 // Releases an unused page and shrinks the space. | |
| 1894 void ReleasePage(Page* page); | |
| 1895 | |
| 1896 // The dummy page that anchors the linked list of pages. | |
| 1897 Page* anchor() { return &anchor_; } | |
| 1898 | |
| 1899 #ifdef VERIFY_HEAP | |
| 1900 // Verify integrity of this space. | |
| 1901 virtual void Verify(ObjectVisitor* visitor); | |
| 1902 | |
| 1903 // Overridden by subclasses to verify space-specific object | |
| 1904 // properties (e.g., only maps or free-list nodes are in map space). | |
| 1905 virtual void VerifyObject(HeapObject* obj) {} | |
| 1906 #endif | |
| 1907 | |
| 1908 #ifdef DEBUG | |
| 1909 // Print meta info and objects in this space. | |
| 1910 virtual void Print(); | |
| 1911 | |
| 1912 // Reports statistics for the space | |
| 1913 void ReportStatistics(); | |
| 1914 | |
| 1915 // Report code object related statistics | |
| 1916 void CollectCodeStatistics(); | |
| 1917 static void ReportCodeStatistics(Isolate* isolate); | |
| 1918 static void ResetCodeStatistics(Isolate* isolate); | |
| 1919 #endif | |
| 1920 | |
| 1921 bool swept_precisely() { return swept_precisely_; } | |
| 1922 void set_swept_precisely(bool b) { swept_precisely_ = b; } | |
| 1923 | |
| 1924 // Evacuation candidates are swept by evacuator. Needs to return a valid | |
| 1925 // result before _and_ after evacuation has finished. | |
| 1926 static bool ShouldBeSweptBySweeperThreads(Page* p) { | |
| 1927 return !p->IsEvacuationCandidate() && | |
| 1928 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | |
| 1929 !p->WasSweptPrecisely(); | |
| 1930 } | |
| 1931 | |
| 1932 void IncrementUnsweptFreeBytes(intptr_t by) { | |
| 1933 unswept_free_bytes_ += by; | |
| 1934 } | |
| 1935 | |
| 1936 void IncreaseUnsweptFreeBytes(Page* p) { | |
| 1937 DCHECK(ShouldBeSweptBySweeperThreads(p)); | |
| 1938 unswept_free_bytes_ += (p->area_size() - p->LiveBytes()); | |
| 1939 } | |
| 1940 | |
| 1941 void DecrementUnsweptFreeBytes(intptr_t by) { | |
| 1942 unswept_free_bytes_ -= by; | |
| 1943 } | |
| 1944 | |
| 1945 void DecreaseUnsweptFreeBytes(Page* p) { | |
| 1946 DCHECK(ShouldBeSweptBySweeperThreads(p)); | |
| 1947 unswept_free_bytes_ -= (p->area_size() - p->LiveBytes()); | |
| 1948 } | |
| 1949 | |
| 1950 void ResetUnsweptFreeBytes() { | |
| 1951 unswept_free_bytes_ = 0; | |
| 1952 } | |
| 1953 | |
| 1954 // This function tries to steal size_in_bytes memory from the sweeper threads | |
| 1955 // free-lists. If it does not succeed stealing enough memory, it will wait | |
| 1956 // for the sweeper threads to finish sweeping. | |
| 1957 // It returns true when sweeping is completed and false otherwise. | |
| 1958 bool EnsureSweeperProgress(intptr_t size_in_bytes); | |
| 1959 | |
| 1960 void set_end_of_unswept_pages(Page* page) { | |
| 1961 end_of_unswept_pages_ = page; | |
| 1962 } | |
| 1963 | |
| 1964 Page* end_of_unswept_pages() { | |
| 1965 return end_of_unswept_pages_; | |
| 1966 } | |
| 1967 | |
| 1968 Page* FirstPage() { return anchor_.next_page(); } | |
| 1969 Page* LastPage() { return anchor_.prev_page(); } | |
| 1970 | |
| 1971 void EvictEvacuationCandidatesFromFreeLists(); | |
| 1972 | |
| 1973 bool CanExpand(); | |
| 1974 | |
| 1975 // Returns the number of total pages in this space. | |
| 1976 int CountTotalPages(); | |
| 1977 | |
| 1978 // Return size of allocatable area on a page in this space. | |
| 1979 inline int AreaSize() { | |
| 1980 return area_size_; | |
| 1981 } | |
| 1982 | |
| 1983 void CreateEmergencyMemory(); | |
| 1984 void FreeEmergencyMemory(); | |
| 1985 void UseEmergencyMemory(); | |
| 1986 | |
| 1987 bool HasEmergencyMemory() { return emergency_memory_ != NULL; } | |
| 1988 | |
| 1989 protected: | |
| 1990 FreeList* free_list() { return &free_list_; } | |
| 1991 | |
| 1992 int area_size_; | |
| 1993 | |
| 1994 // Maximum capacity of this space. | |
| 1995 intptr_t max_capacity_; | |
| 1996 | |
| 1997 intptr_t SizeOfFirstPage(); | |
| 1998 | |
| 1999 // Accounting information for this space. | |
| 2000 AllocationStats accounting_stats_; | |
| 2001 | |
| 2002 // The dummy page that anchors the double linked list of pages. | |
| 2003 Page anchor_; | |
| 2004 | |
| 2005 // The space's free list. | |
| 2006 FreeList free_list_; | |
| 2007 | |
| 2008 // Normal allocation information. | |
| 2009 AllocationInfo allocation_info_; | |
| 2010 | |
| 2011 // This space was swept precisely, hence it is iterable. | |
| 2012 bool swept_precisely_; | |
| 2013 | |
| 2014 // The number of free bytes which could be reclaimed by advancing the | |
| 2015 // concurrent sweeper threads. This is only an estimation because concurrent | |
| 2016 // sweeping is done conservatively. | |
| 2017 intptr_t unswept_free_bytes_; | |
| 2018 | |
| 2019 // The sweeper threads iterate over the list of pointer and data space pages | |
| 2020 // and sweep these pages concurrently. They will stop sweeping after the | |
| 2021 // end_of_unswept_pages_ page. | |
| 2022 Page* end_of_unswept_pages_; | |
| 2023 | |
| 2024 // Emergency memory is the memory of a full page for a given space, allocated | |
| 2025 // conservatively before evacuating a page. If compaction fails due to out | |
| 2026 // of memory error the emergency memory can be used to complete compaction. | |
| 2027 // If not used, the emergency memory is released after compaction. | |
| 2028 MemoryChunk* emergency_memory_; | |
| 2029 | |
| 2030 // Expands the space by allocating a fixed number of pages. Returns false if | |
| 2031 // it cannot allocate requested number of pages from OS, or if the hard heap | |
| 2032 // size limit has been hit. | |
| 2033 bool Expand(); | |
| 2034 | |
| 2035 // Generic fast case allocation function that tries linear allocation at the | |
| 2036 // address denoted by top in allocation_info_. | |
| 2037 inline HeapObject* AllocateLinearly(int size_in_bytes); | |
| 2038 | |
| 2039 // If sweeping is still in progress try to sweep unswept pages. If that is | |
| 2040 // not successful, wait for the sweeper threads and re-try free-list | |
| 2041 // allocation. | |
| 2042 MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( | |
| 2043 int size_in_bytes); | |
| 2044 | |
| 2045 // Slow path of AllocateRaw. This function is space-dependent. | |
| 2046 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); | |
| 2047 | |
| 2048 friend class PageIterator; | |
| 2049 friend class MarkCompactCollector; | |
| 2050 }; | |
| 2051 | |
| 2052 | |
| 2053 class NumberAndSizeInfo BASE_EMBEDDED { | |
| 2054 public: | |
| 2055 NumberAndSizeInfo() : number_(0), bytes_(0) {} | |
| 2056 | |
| 2057 int number() const { return number_; } | |
| 2058 void increment_number(int num) { number_ += num; } | |
| 2059 | |
| 2060 int bytes() const { return bytes_; } | |
| 2061 void increment_bytes(int size) { bytes_ += size; } | |
| 2062 | |
| 2063 void clear() { | |
| 2064 number_ = 0; | |
| 2065 bytes_ = 0; | |
| 2066 } | |
| 2067 | |
| 2068 private: | |
| 2069 int number_; | |
| 2070 int bytes_; | |
| 2071 }; | |
| 2072 | |
| 2073 | |
| 2074 // HistogramInfo class for recording a single "bar" of a histogram. This | |
| 2075 // class is used for collecting statistics to print to the log file. | |
| 2076 class HistogramInfo: public NumberAndSizeInfo { | |
| 2077 public: | |
| 2078 HistogramInfo() : NumberAndSizeInfo() {} | |
| 2079 | |
| 2080 const char* name() { return name_; } | |
| 2081 void set_name(const char* name) { name_ = name; } | |
| 2082 | |
| 2083 private: | |
| 2084 const char* name_; | |
| 2085 }; | |
| 2086 | |
| 2087 | |
| 2088 enum SemiSpaceId { | |
| 2089 kFromSpace = 0, | |
| 2090 kToSpace = 1 | |
| 2091 }; | |
| 2092 | |
| 2093 | |
| 2094 class SemiSpace; | |
| 2095 | |
| 2096 | |
| 2097 class NewSpacePage : public MemoryChunk { | |
| 2098 public: | |
| 2099 // GC related flags copied from from-space to to-space when | |
| 2100 // flipping semispaces. | |
| 2101 static const intptr_t kCopyOnFlipFlagsMask = | |
| 2102 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | |
| 2103 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | | |
| 2104 (1 << MemoryChunk::SCAN_ON_SCAVENGE); | |
| 2105 | |
| 2106 static const int kAreaSize = Page::kMaxRegularHeapObjectSize; | |
| 2107 | |
| 2108 inline NewSpacePage* next_page() const { | |
| 2109 return static_cast<NewSpacePage*>(next_chunk()); | |
| 2110 } | |
| 2111 | |
| 2112 inline void set_next_page(NewSpacePage* page) { | |
| 2113 set_next_chunk(page); | |
| 2114 } | |
| 2115 | |
| 2116 inline NewSpacePage* prev_page() const { | |
| 2117 return static_cast<NewSpacePage*>(prev_chunk()); | |
| 2118 } | |
| 2119 | |
| 2120 inline void set_prev_page(NewSpacePage* page) { | |
| 2121 set_prev_chunk(page); | |
| 2122 } | |
| 2123 | |
| 2124 SemiSpace* semi_space() { | |
| 2125 return reinterpret_cast<SemiSpace*>(owner()); | |
| 2126 } | |
| 2127 | |
| 2128 bool is_anchor() { return !this->InNewSpace(); } | |
| 2129 | |
| 2130 static bool IsAtStart(Address addr) { | |
| 2131 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) | |
| 2132 == kObjectStartOffset; | |
| 2133 } | |
| 2134 | |
| 2135 static bool IsAtEnd(Address addr) { | |
| 2136 return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0; | |
| 2137 } | |
| 2138 | |
| 2139 Address address() { | |
| 2140 return reinterpret_cast<Address>(this); | |
| 2141 } | |
| 2142 | |
| 2143 // Finds the NewSpacePage containg the given address. | |
| 2144 static inline NewSpacePage* FromAddress(Address address_in_page) { | |
| 2145 Address page_start = | |
| 2146 reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) & | |
| 2147 ~Page::kPageAlignmentMask); | |
| 2148 NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start); | |
| 2149 return page; | |
| 2150 } | |
| 2151 | |
| 2152 // Find the page for a limit address. A limit address is either an address | |
| 2153 // inside a page, or the address right after the last byte of a page. | |
| 2154 static inline NewSpacePage* FromLimit(Address address_limit) { | |
| 2155 return NewSpacePage::FromAddress(address_limit - 1); | |
| 2156 } | |
| 2157 | |
| 2158 // Checks if address1 and address2 are on the same new space page. | |
| 2159 static inline bool OnSamePage(Address address1, Address address2) { | |
| 2160 return NewSpacePage::FromAddress(address1) == | |
| 2161 NewSpacePage::FromAddress(address2); | |
| 2162 } | |
| 2163 | |
| 2164 private: | |
| 2165 // Create a NewSpacePage object that is only used as anchor | |
| 2166 // for the doubly-linked list of real pages. | |
| 2167 explicit NewSpacePage(SemiSpace* owner) { | |
| 2168 InitializeAsAnchor(owner); | |
| 2169 } | |
| 2170 | |
| 2171 static NewSpacePage* Initialize(Heap* heap, | |
| 2172 Address start, | |
| 2173 SemiSpace* semi_space); | |
| 2174 | |
| 2175 // Intialize a fake NewSpacePage used as sentinel at the ends | |
| 2176 // of a doubly-linked list of real NewSpacePages. | |
| 2177 // Only uses the prev/next links, and sets flags to not be in new-space. | |
| 2178 void InitializeAsAnchor(SemiSpace* owner); | |
| 2179 | |
| 2180 friend class SemiSpace; | |
| 2181 friend class SemiSpaceIterator; | |
| 2182 }; | |
| 2183 | |
| 2184 | |
| 2185 // ----------------------------------------------------------------------------- | |
| 2186 // SemiSpace in young generation | |
| 2187 // | |
| 2188 // A semispace is a contiguous chunk of memory holding page-like memory | |
| 2189 // chunks. The mark-compact collector uses the memory of the first page in | |
| 2190 // the from space as a marking stack when tracing live objects. | |
| 2191 | |
| 2192 class SemiSpace : public Space { | |
| 2193 public: | |
| 2194 // Constructor. | |
| 2195 SemiSpace(Heap* heap, SemiSpaceId semispace) | |
| 2196 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | |
| 2197 start_(NULL), | |
| 2198 age_mark_(NULL), | |
| 2199 id_(semispace), | |
| 2200 anchor_(this), | |
| 2201 current_page_(NULL) { } | |
| 2202 | |
| 2203 // Sets up the semispace using the given chunk. | |
| 2204 void SetUp(Address start, int initial_capacity, int maximum_capacity); | |
| 2205 | |
| 2206 // Tear down the space. Heap memory was not allocated by the space, so it | |
| 2207 // is not deallocated here. | |
| 2208 void TearDown(); | |
| 2209 | |
| 2210 // True if the space has been set up but not torn down. | |
| 2211 bool HasBeenSetUp() { return start_ != NULL; } | |
| 2212 | |
| 2213 // Grow the semispace to the new capacity. The new capacity | |
| 2214 // requested must be larger than the current capacity and less than | |
| 2215 // the maximum capacity. | |
| 2216 bool GrowTo(int new_capacity); | |
| 2217 | |
| 2218 // Shrinks the semispace to the new capacity. The new capacity | |
| 2219 // requested must be more than the amount of used memory in the | |
| 2220 // semispace and less than the current capacity. | |
| 2221 bool ShrinkTo(int new_capacity); | |
| 2222 | |
| 2223 // Returns the start address of the first page of the space. | |
| 2224 Address space_start() { | |
| 2225 DCHECK(anchor_.next_page() != &anchor_); | |
| 2226 return anchor_.next_page()->area_start(); | |
| 2227 } | |
| 2228 | |
| 2229 // Returns the start address of the current page of the space. | |
| 2230 Address page_low() { | |
| 2231 return current_page_->area_start(); | |
| 2232 } | |
| 2233 | |
| 2234 // Returns one past the end address of the space. | |
| 2235 Address space_end() { | |
| 2236 return anchor_.prev_page()->area_end(); | |
| 2237 } | |
| 2238 | |
| 2239 // Returns one past the end address of the current page of the space. | |
| 2240 Address page_high() { | |
| 2241 return current_page_->area_end(); | |
| 2242 } | |
| 2243 | |
| 2244 bool AdvancePage() { | |
| 2245 NewSpacePage* next_page = current_page_->next_page(); | |
| 2246 if (next_page == anchor()) return false; | |
| 2247 current_page_ = next_page; | |
| 2248 return true; | |
| 2249 } | |
| 2250 | |
| 2251 // Resets the space to using the first page. | |
| 2252 void Reset(); | |
| 2253 | |
| 2254 // Age mark accessors. | |
| 2255 Address age_mark() { return age_mark_; } | |
| 2256 void set_age_mark(Address mark); | |
| 2257 | |
| 2258 // True if the address is in the address range of this semispace (not | |
| 2259 // necessarily below the allocation pointer). | |
| 2260 bool Contains(Address a) { | |
| 2261 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | |
| 2262 == reinterpret_cast<uintptr_t>(start_); | |
| 2263 } | |
| 2264 | |
| 2265 // True if the object is a heap object in the address range of this | |
| 2266 // semispace (not necessarily below the allocation pointer). | |
| 2267 bool Contains(Object* o) { | |
| 2268 return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_; | |
| 2269 } | |
| 2270 | |
| 2271 // If we don't have these here then SemiSpace will be abstract. However | |
| 2272 // they should never be called. | |
| 2273 virtual intptr_t Size() { | |
| 2274 UNREACHABLE(); | |
| 2275 return 0; | |
| 2276 } | |
| 2277 | |
| 2278 bool is_committed() { return committed_; } | |
| 2279 bool Commit(); | |
| 2280 bool Uncommit(); | |
| 2281 | |
| 2282 NewSpacePage* first_page() { return anchor_.next_page(); } | |
| 2283 NewSpacePage* current_page() { return current_page_; } | |
| 2284 | |
| 2285 #ifdef VERIFY_HEAP | |
| 2286 virtual void Verify(); | |
| 2287 #endif | |
| 2288 | |
| 2289 #ifdef DEBUG | |
| 2290 virtual void Print(); | |
| 2291 // Validate a range of of addresses in a SemiSpace. | |
| 2292 // The "from" address must be on a page prior to the "to" address, | |
| 2293 // in the linked page order, or it must be earlier on the same page. | |
| 2294 static void AssertValidRange(Address from, Address to); | |
| 2295 #else | |
| 2296 // Do nothing. | |
| 2297 inline static void AssertValidRange(Address from, Address to) {} | |
| 2298 #endif | |
| 2299 | |
| 2300 // Returns the current capacity of the semi space. | |
| 2301 int Capacity() { return capacity_; } | |
| 2302 | |
| 2303 // Returns the maximum capacity of the semi space. | |
| 2304 int MaximumCapacity() { return maximum_capacity_; } | |
| 2305 | |
| 2306 // Returns the initial capacity of the semi space. | |
| 2307 int InitialCapacity() { return initial_capacity_; } | |
| 2308 | |
| 2309 SemiSpaceId id() { return id_; } | |
| 2310 | |
| 2311 static void Swap(SemiSpace* from, SemiSpace* to); | |
| 2312 | |
| 2313 // Returns the maximum amount of memory ever committed by the semi space. | |
| 2314 size_t MaximumCommittedMemory() { return maximum_committed_; } | |
| 2315 | |
| 2316 // Approximate amount of physical memory committed for this space. | |
| 2317 size_t CommittedPhysicalMemory(); | |
| 2318 | |
| 2319 private: | |
| 2320 // Flips the semispace between being from-space and to-space. | |
| 2321 // Copies the flags into the masked positions on all pages in the space. | |
| 2322 void FlipPages(intptr_t flags, intptr_t flag_mask); | |
| 2323 | |
| 2324 // Updates Capacity and MaximumCommitted based on new capacity. | |
| 2325 void SetCapacity(int new_capacity); | |
| 2326 | |
| 2327 NewSpacePage* anchor() { return &anchor_; } | |
| 2328 | |
| 2329 // The current and maximum capacity of the space. | |
| 2330 int capacity_; | |
| 2331 int maximum_capacity_; | |
| 2332 int initial_capacity_; | |
| 2333 | |
| 2334 intptr_t maximum_committed_; | |
| 2335 | |
| 2336 // The start address of the space. | |
| 2337 Address start_; | |
| 2338 // Used to govern object promotion during mark-compact collection. | |
| 2339 Address age_mark_; | |
| 2340 | |
| 2341 // Masks and comparison values to test for containment in this semispace. | |
| 2342 uintptr_t address_mask_; | |
| 2343 uintptr_t object_mask_; | |
| 2344 uintptr_t object_expected_; | |
| 2345 | |
| 2346 bool committed_; | |
| 2347 SemiSpaceId id_; | |
| 2348 | |
| 2349 NewSpacePage anchor_; | |
| 2350 NewSpacePage* current_page_; | |
| 2351 | |
| 2352 friend class SemiSpaceIterator; | |
| 2353 friend class NewSpacePageIterator; | |
| 2354 public: | |
| 2355 TRACK_MEMORY("SemiSpace") | |
| 2356 }; | |
| 2357 | |
| 2358 | |
| 2359 // A SemiSpaceIterator is an ObjectIterator that iterates over the active | |
| 2360 // semispace of the heap's new space. It iterates over the objects in the | |
| 2361 // semispace from a given start address (defaulting to the bottom of the | |
| 2362 // semispace) to the top of the semispace. New objects allocated after the | |
| 2363 // iterator is created are not iterated. | |
| 2364 class SemiSpaceIterator : public ObjectIterator { | |
| 2365 public: | |
| 2366 // Create an iterator over the objects in the given space. If no start | |
| 2367 // address is given, the iterator starts from the bottom of the space. If | |
| 2368 // no size function is given, the iterator calls Object::Size(). | |
| 2369 | |
| 2370 // Iterate over all of allocated to-space. | |
| 2371 explicit SemiSpaceIterator(NewSpace* space); | |
| 2372 // Iterate over all of allocated to-space, with a custome size function. | |
| 2373 SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func); | |
| 2374 // Iterate over part of allocated to-space, from start to the end | |
| 2375 // of allocation. | |
| 2376 SemiSpaceIterator(NewSpace* space, Address start); | |
| 2377 // Iterate from one address to another in the same semi-space. | |
| 2378 SemiSpaceIterator(Address from, Address to); | |
| 2379 | |
| 2380 HeapObject* Next() { | |
| 2381 if (current_ == limit_) return NULL; | |
| 2382 if (NewSpacePage::IsAtEnd(current_)) { | |
| 2383 NewSpacePage* page = NewSpacePage::FromLimit(current_); | |
| 2384 page = page->next_page(); | |
| 2385 DCHECK(!page->is_anchor()); | |
| 2386 current_ = page->area_start(); | |
| 2387 if (current_ == limit_) return NULL; | |
| 2388 } | |
| 2389 | |
| 2390 HeapObject* object = HeapObject::FromAddress(current_); | |
| 2391 int size = (size_func_ == NULL) ? object->Size() : size_func_(object); | |
| 2392 | |
| 2393 current_ += size; | |
| 2394 return object; | |
| 2395 } | |
| 2396 | |
| 2397 // Implementation of the ObjectIterator functions. | |
| 2398 virtual HeapObject* next_object() { return Next(); } | |
| 2399 | |
| 2400 private: | |
| 2401 void Initialize(Address start, | |
| 2402 Address end, | |
| 2403 HeapObjectCallback size_func); | |
| 2404 | |
| 2405 // The current iteration point. | |
| 2406 Address current_; | |
| 2407 // The end of iteration. | |
| 2408 Address limit_; | |
| 2409 // The callback function. | |
| 2410 HeapObjectCallback size_func_; | |
| 2411 }; | |
| 2412 | |
| 2413 | |
| 2414 // ----------------------------------------------------------------------------- | |
| 2415 // A PageIterator iterates the pages in a semi-space. | |
| 2416 class NewSpacePageIterator BASE_EMBEDDED { | |
| 2417 public: | |
| 2418 // Make an iterator that runs over all pages in to-space. | |
| 2419 explicit inline NewSpacePageIterator(NewSpace* space); | |
| 2420 | |
| 2421 // Make an iterator that runs over all pages in the given semispace, | |
| 2422 // even those not used in allocation. | |
| 2423 explicit inline NewSpacePageIterator(SemiSpace* space); | |
| 2424 | |
| 2425 // Make iterator that iterates from the page containing start | |
| 2426 // to the page that contains limit in the same semispace. | |
| 2427 inline NewSpacePageIterator(Address start, Address limit); | |
| 2428 | |
| 2429 inline bool has_next(); | |
| 2430 inline NewSpacePage* next(); | |
| 2431 | |
| 2432 private: | |
| 2433 NewSpacePage* prev_page_; // Previous page returned. | |
| 2434 // Next page that will be returned. Cached here so that we can use this | |
| 2435 // iterator for operations that deallocate pages. | |
| 2436 NewSpacePage* next_page_; | |
| 2437 // Last page returned. | |
| 2438 NewSpacePage* last_page_; | |
| 2439 }; | |
| 2440 | |
| 2441 | |
| 2442 // ----------------------------------------------------------------------------- | |
| 2443 // The young generation space. | |
| 2444 // | |
| 2445 // The new space consists of a contiguous pair of semispaces. It simply | |
| 2446 // forwards most functions to the appropriate semispace. | |
| 2447 | |
| 2448 class NewSpace : public Space { | |
| 2449 public: | |
| 2450 // Constructor. | |
| 2451 explicit NewSpace(Heap* heap) | |
| 2452 : Space(heap, NEW_SPACE, NOT_EXECUTABLE), | |
| 2453 to_space_(heap, kToSpace), | |
| 2454 from_space_(heap, kFromSpace), | |
| 2455 reservation_(), | |
| 2456 inline_allocation_limit_step_(0) {} | |
| 2457 | |
| 2458 // Sets up the new space using the given chunk. | |
| 2459 bool SetUp(int reserved_semispace_size_, int max_semi_space_size); | |
| 2460 | |
| 2461 // Tears down the space. Heap memory was not allocated by the space, so it | |
| 2462 // is not deallocated here. | |
| 2463 void TearDown(); | |
| 2464 | |
| 2465 // True if the space has been set up but not torn down. | |
| 2466 bool HasBeenSetUp() { | |
| 2467 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp(); | |
| 2468 } | |
| 2469 | |
| 2470 // Flip the pair of spaces. | |
| 2471 void Flip(); | |
| 2472 | |
| 2473 // Grow the capacity of the semispaces. Assumes that they are not at | |
| 2474 // their maximum capacity. | |
| 2475 void Grow(); | |
| 2476 | |
| 2477 // Shrink the capacity of the semispaces. | |
| 2478 void Shrink(); | |
| 2479 | |
| 2480 // True if the address or object lies in the address range of either | |
| 2481 // semispace (not necessarily below the allocation pointer). | |
| 2482 bool Contains(Address a) { | |
| 2483 return (reinterpret_cast<uintptr_t>(a) & address_mask_) | |
| 2484 == reinterpret_cast<uintptr_t>(start_); | |
| 2485 } | |
| 2486 | |
| 2487 bool Contains(Object* o) { | |
| 2488 Address a = reinterpret_cast<Address>(o); | |
| 2489 return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_; | |
| 2490 } | |
| 2491 | |
| 2492 // Return the allocated bytes in the active semispace. | |
| 2493 virtual intptr_t Size() { | |
| 2494 return pages_used_ * NewSpacePage::kAreaSize + | |
| 2495 static_cast<int>(top() - to_space_.page_low()); | |
| 2496 } | |
| 2497 | |
| 2498 // The same, but returning an int. We have to have the one that returns | |
| 2499 // intptr_t because it is inherited, but if we know we are dealing with the | |
| 2500 // new space, which can't get as big as the other spaces then this is useful: | |
| 2501 int SizeAsInt() { return static_cast<int>(Size()); } | |
| 2502 | |
| 2503 // Return the current capacity of a semispace. | |
| 2504 intptr_t EffectiveCapacity() { | |
| 2505 SLOW_DCHECK(to_space_.Capacity() == from_space_.Capacity()); | |
| 2506 return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize; | |
| 2507 } | |
| 2508 | |
| 2509 // Return the current capacity of a semispace. | |
| 2510 intptr_t Capacity() { | |
| 2511 DCHECK(to_space_.Capacity() == from_space_.Capacity()); | |
| 2512 return to_space_.Capacity(); | |
| 2513 } | |
| 2514 | |
| 2515 // Return the total amount of memory committed for new space. | |
| 2516 intptr_t CommittedMemory() { | |
| 2517 if (from_space_.is_committed()) return 2 * Capacity(); | |
| 2518 return Capacity(); | |
| 2519 } | |
| 2520 | |
| 2521 // Return the total amount of memory committed for new space. | |
| 2522 intptr_t MaximumCommittedMemory() { | |
| 2523 return to_space_.MaximumCommittedMemory() + | |
| 2524 from_space_.MaximumCommittedMemory(); | |
| 2525 } | |
| 2526 | |
| 2527 // Approximate amount of physical memory committed for this space. | |
| 2528 size_t CommittedPhysicalMemory(); | |
| 2529 | |
| 2530 // Return the available bytes without growing. | |
| 2531 intptr_t Available() { | |
| 2532 return Capacity() - Size(); | |
| 2533 } | |
| 2534 | |
| 2535 // Return the maximum capacity of a semispace. | |
| 2536 int MaximumCapacity() { | |
| 2537 DCHECK(to_space_.MaximumCapacity() == from_space_.MaximumCapacity()); | |
| 2538 return to_space_.MaximumCapacity(); | |
| 2539 } | |
| 2540 | |
| 2541 bool IsAtMaximumCapacity() { | |
| 2542 return Capacity() == MaximumCapacity(); | |
| 2543 } | |
| 2544 | |
| 2545 // Returns the initial capacity of a semispace. | |
| 2546 int InitialCapacity() { | |
| 2547 DCHECK(to_space_.InitialCapacity() == from_space_.InitialCapacity()); | |
| 2548 return to_space_.InitialCapacity(); | |
| 2549 } | |
| 2550 | |
| 2551 // Return the address of the allocation pointer in the active semispace. | |
| 2552 Address top() { | |
| 2553 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top())); | |
| 2554 return allocation_info_.top(); | |
| 2555 } | |
| 2556 | |
| 2557 void set_top(Address top) { | |
| 2558 DCHECK(to_space_.current_page()->ContainsLimit(top)); | |
| 2559 allocation_info_.set_top(top); | |
| 2560 } | |
| 2561 | |
| 2562 // Return the address of the allocation pointer limit in the active semispace. | |
| 2563 Address limit() { | |
| 2564 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit())); | |
| 2565 return allocation_info_.limit(); | |
| 2566 } | |
| 2567 | |
| 2568 // Return the address of the first object in the active semispace. | |
| 2569 Address bottom() { return to_space_.space_start(); } | |
| 2570 | |
| 2571 // Get the age mark of the inactive semispace. | |
| 2572 Address age_mark() { return from_space_.age_mark(); } | |
| 2573 // Set the age mark in the active semispace. | |
| 2574 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); } | |
| 2575 | |
| 2576 // The start address of the space and a bit mask. Anding an address in the | |
| 2577 // new space with the mask will result in the start address. | |
| 2578 Address start() { return start_; } | |
| 2579 uintptr_t mask() { return address_mask_; } | |
| 2580 | |
| 2581 INLINE(uint32_t AddressToMarkbitIndex(Address addr)) { | |
| 2582 DCHECK(Contains(addr)); | |
| 2583 DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) || | |
| 2584 IsAligned(OffsetFrom(addr) - 1, kPointerSize)); | |
| 2585 return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2; | |
| 2586 } | |
| 2587 | |
| 2588 INLINE(Address MarkbitIndexToAddress(uint32_t index)) { | |
| 2589 return reinterpret_cast<Address>(index << kPointerSizeLog2); | |
| 2590 } | |
| 2591 | |
| 2592 // The allocation top and limit address. | |
| 2593 Address* allocation_top_address() { | |
| 2594 return allocation_info_.top_address(); | |
| 2595 } | |
| 2596 | |
| 2597 // The allocation limit address. | |
| 2598 Address* allocation_limit_address() { | |
| 2599 return allocation_info_.limit_address(); | |
| 2600 } | |
| 2601 | |
| 2602 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes)); | |
| 2603 | |
| 2604 // Reset the allocation pointer to the beginning of the active semispace. | |
| 2605 void ResetAllocationInfo(); | |
| 2606 | |
| 2607 void UpdateInlineAllocationLimit(int size_in_bytes); | |
| 2608 void LowerInlineAllocationLimit(intptr_t step) { | |
| 2609 inline_allocation_limit_step_ = step; | |
| 2610 UpdateInlineAllocationLimit(0); | |
| 2611 top_on_previous_step_ = allocation_info_.top(); | |
| 2612 } | |
| 2613 | |
| 2614 // Get the extent of the inactive semispace (for use as a marking stack, | |
| 2615 // or to zap it). Notice: space-addresses are not necessarily on the | |
| 2616 // same page, so FromSpaceStart() might be above FromSpaceEnd(). | |
| 2617 Address FromSpacePageLow() { return from_space_.page_low(); } | |
| 2618 Address FromSpacePageHigh() { return from_space_.page_high(); } | |
| 2619 Address FromSpaceStart() { return from_space_.space_start(); } | |
| 2620 Address FromSpaceEnd() { return from_space_.space_end(); } | |
| 2621 | |
| 2622 // Get the extent of the active semispace's pages' memory. | |
| 2623 Address ToSpaceStart() { return to_space_.space_start(); } | |
| 2624 Address ToSpaceEnd() { return to_space_.space_end(); } | |
| 2625 | |
| 2626 inline bool ToSpaceContains(Address address) { | |
| 2627 return to_space_.Contains(address); | |
| 2628 } | |
| 2629 inline bool FromSpaceContains(Address address) { | |
| 2630 return from_space_.Contains(address); | |
| 2631 } | |
| 2632 | |
| 2633 // True if the object is a heap object in the address range of the | |
| 2634 // respective semispace (not necessarily below the allocation pointer of the | |
| 2635 // semispace). | |
| 2636 inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); } | |
| 2637 inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); } | |
| 2638 | |
| 2639 // Try to switch the active semispace to a new, empty, page. | |
| 2640 // Returns false if this isn't possible or reasonable (i.e., there | |
| 2641 // are no pages, or the current page is already empty), or true | |
| 2642 // if successful. | |
| 2643 bool AddFreshPage(); | |
| 2644 | |
| 2645 #ifdef VERIFY_HEAP | |
| 2646 // Verify the active semispace. | |
| 2647 virtual void Verify(); | |
| 2648 #endif | |
| 2649 | |
| 2650 #ifdef DEBUG | |
| 2651 // Print the active semispace. | |
| 2652 virtual void Print() { to_space_.Print(); } | |
| 2653 #endif | |
| 2654 | |
| 2655 // Iterates the active semispace to collect statistics. | |
| 2656 void CollectStatistics(); | |
| 2657 // Reports previously collected statistics of the active semispace. | |
| 2658 void ReportStatistics(); | |
| 2659 // Clears previously collected statistics. | |
| 2660 void ClearHistograms(); | |
| 2661 | |
| 2662 // Record the allocation or promotion of a heap object. Note that we don't | |
| 2663 // record every single allocation, but only those that happen in the | |
| 2664 // to space during a scavenge GC. | |
| 2665 void RecordAllocation(HeapObject* obj); | |
| 2666 void RecordPromotion(HeapObject* obj); | |
| 2667 | |
| 2668 // Return whether the operation succeded. | |
| 2669 bool CommitFromSpaceIfNeeded() { | |
| 2670 if (from_space_.is_committed()) return true; | |
| 2671 return from_space_.Commit(); | |
| 2672 } | |
| 2673 | |
| 2674 bool UncommitFromSpace() { | |
| 2675 if (!from_space_.is_committed()) return true; | |
| 2676 return from_space_.Uncommit(); | |
| 2677 } | |
| 2678 | |
| 2679 inline intptr_t inline_allocation_limit_step() { | |
| 2680 return inline_allocation_limit_step_; | |
| 2681 } | |
| 2682 | |
| 2683 SemiSpace* active_space() { return &to_space_; } | |
| 2684 | |
| 2685 private: | |
| 2686 // Update allocation info to match the current to-space page. | |
| 2687 void UpdateAllocationInfo(); | |
| 2688 | |
| 2689 Address chunk_base_; | |
| 2690 uintptr_t chunk_size_; | |
| 2691 | |
| 2692 // The semispaces. | |
| 2693 SemiSpace to_space_; | |
| 2694 SemiSpace from_space_; | |
| 2695 base::VirtualMemory reservation_; | |
| 2696 int pages_used_; | |
| 2697 | |
| 2698 // Start address and bit mask for containment testing. | |
| 2699 Address start_; | |
| 2700 uintptr_t address_mask_; | |
| 2701 uintptr_t object_mask_; | |
| 2702 uintptr_t object_expected_; | |
| 2703 | |
| 2704 // Allocation pointer and limit for normal allocation and allocation during | |
| 2705 // mark-compact collection. | |
| 2706 AllocationInfo allocation_info_; | |
| 2707 | |
| 2708 // When incremental marking is active we will set allocation_info_.limit | |
| 2709 // to be lower than actual limit and then will gradually increase it | |
| 2710 // in steps to guarantee that we do incremental marking steps even | |
| 2711 // when all allocation is performed from inlined generated code. | |
| 2712 intptr_t inline_allocation_limit_step_; | |
| 2713 | |
| 2714 Address top_on_previous_step_; | |
| 2715 | |
| 2716 HistogramInfo* allocated_histogram_; | |
| 2717 HistogramInfo* promoted_histogram_; | |
| 2718 | |
| 2719 MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes); | |
| 2720 | |
| 2721 friend class SemiSpaceIterator; | |
| 2722 | |
| 2723 public: | |
| 2724 TRACK_MEMORY("NewSpace") | |
| 2725 }; | |
| 2726 | |
| 2727 | |
| 2728 // ----------------------------------------------------------------------------- | |
| 2729 // Old object space (excluding map objects) | |
| 2730 | |
| 2731 class OldSpace : public PagedSpace { | |
| 2732 public: | |
| 2733 // Creates an old space object with a given maximum capacity. | |
| 2734 // The constructor does not allocate pages from OS. | |
| 2735 OldSpace(Heap* heap, | |
| 2736 intptr_t max_capacity, | |
| 2737 AllocationSpace id, | |
| 2738 Executability executable) | |
| 2739 : PagedSpace(heap, max_capacity, id, executable) { | |
| 2740 } | |
| 2741 | |
| 2742 public: | |
| 2743 TRACK_MEMORY("OldSpace") | |
| 2744 }; | |
| 2745 | |
| 2746 | |
| 2747 // For contiguous spaces, top should be in the space (or at the end) and limit | |
| 2748 // should be the end of the space. | |
| 2749 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \ | |
| 2750 SLOW_DCHECK((space).page_low() <= (info).top() \ | |
| 2751 && (info).top() <= (space).page_high() \ | |
| 2752 && (info).limit() <= (space).page_high()) | |
| 2753 | |
| 2754 | |
| 2755 // ----------------------------------------------------------------------------- | |
| 2756 // Old space for all map objects | |
| 2757 | |
| 2758 class MapSpace : public PagedSpace { | |
| 2759 public: | |
| 2760 // Creates a map space object with a maximum capacity. | |
| 2761 MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | |
| 2762 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE), | |
| 2763 max_map_space_pages_(kMaxMapPageIndex - 1) { | |
| 2764 } | |
| 2765 | |
| 2766 // Given an index, returns the page address. | |
| 2767 // TODO(1600): this limit is artifical just to keep code compilable | |
| 2768 static const int kMaxMapPageIndex = 1 << 16; | |
| 2769 | |
| 2770 virtual int RoundSizeDownToObjectAlignment(int size) { | |
| 2771 if (IsPowerOf2(Map::kSize)) { | |
| 2772 return RoundDown(size, Map::kSize); | |
| 2773 } else { | |
| 2774 return (size / Map::kSize) * Map::kSize; | |
| 2775 } | |
| 2776 } | |
| 2777 | |
| 2778 protected: | |
| 2779 virtual void VerifyObject(HeapObject* obj); | |
| 2780 | |
| 2781 private: | |
| 2782 static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize; | |
| 2783 | |
| 2784 // Do map space compaction if there is a page gap. | |
| 2785 int CompactionThreshold() { | |
| 2786 return kMapsPerPage * (max_map_space_pages_ - 1); | |
| 2787 } | |
| 2788 | |
| 2789 const int max_map_space_pages_; | |
| 2790 | |
| 2791 public: | |
| 2792 TRACK_MEMORY("MapSpace") | |
| 2793 }; | |
| 2794 | |
| 2795 | |
| 2796 // ----------------------------------------------------------------------------- | |
| 2797 // Old space for simple property cell objects | |
| 2798 | |
| 2799 class CellSpace : public PagedSpace { | |
| 2800 public: | |
| 2801 // Creates a property cell space object with a maximum capacity. | |
| 2802 CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id) | |
| 2803 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { | |
| 2804 } | |
| 2805 | |
| 2806 virtual int RoundSizeDownToObjectAlignment(int size) { | |
| 2807 if (IsPowerOf2(Cell::kSize)) { | |
| 2808 return RoundDown(size, Cell::kSize); | |
| 2809 } else { | |
| 2810 return (size / Cell::kSize) * Cell::kSize; | |
| 2811 } | |
| 2812 } | |
| 2813 | |
| 2814 protected: | |
| 2815 virtual void VerifyObject(HeapObject* obj); | |
| 2816 | |
| 2817 public: | |
| 2818 TRACK_MEMORY("CellSpace") | |
| 2819 }; | |
| 2820 | |
| 2821 | |
| 2822 // ----------------------------------------------------------------------------- | |
| 2823 // Old space for all global object property cell objects | |
| 2824 | |
| 2825 class PropertyCellSpace : public PagedSpace { | |
| 2826 public: | |
| 2827 // Creates a property cell space object with a maximum capacity. | |
| 2828 PropertyCellSpace(Heap* heap, intptr_t max_capacity, | |
| 2829 AllocationSpace id) | |
| 2830 : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) { | |
| 2831 } | |
| 2832 | |
| 2833 virtual int RoundSizeDownToObjectAlignment(int size) { | |
| 2834 if (IsPowerOf2(PropertyCell::kSize)) { | |
| 2835 return RoundDown(size, PropertyCell::kSize); | |
| 2836 } else { | |
| 2837 return (size / PropertyCell::kSize) * PropertyCell::kSize; | |
| 2838 } | |
| 2839 } | |
| 2840 | |
| 2841 protected: | |
| 2842 virtual void VerifyObject(HeapObject* obj); | |
| 2843 | |
| 2844 public: | |
| 2845 TRACK_MEMORY("PropertyCellSpace") | |
| 2846 }; | |
| 2847 | |
| 2848 | |
| 2849 // ----------------------------------------------------------------------------- | |
| 2850 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by | |
| 2851 // the large object space. A large object is allocated from OS heap with | |
| 2852 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset). | |
| 2853 // A large object always starts at Page::kObjectStartOffset to a page. | |
| 2854 // Large objects do not move during garbage collections. | |
| 2855 | |
| 2856 class LargeObjectSpace : public Space { | |
| 2857 public: | |
| 2858 LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id); | |
| 2859 virtual ~LargeObjectSpace() {} | |
| 2860 | |
| 2861 // Initializes internal data structures. | |
| 2862 bool SetUp(); | |
| 2863 | |
| 2864 // Releases internal resources, frees objects in this space. | |
| 2865 void TearDown(); | |
| 2866 | |
| 2867 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | |
| 2868 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | |
| 2869 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | |
| 2870 } | |
| 2871 | |
| 2872 // Shared implementation of AllocateRaw, AllocateRawCode and | |
| 2873 // AllocateRawFixedArray. | |
| 2874 MUST_USE_RESULT AllocationResult AllocateRaw(int object_size, | |
| 2875 Executability executable); | |
| 2876 | |
| 2877 // Available bytes for objects in this space. | |
| 2878 inline intptr_t Available(); | |
| 2879 | |
| 2880 virtual intptr_t Size() { | |
| 2881 return size_; | |
| 2882 } | |
| 2883 | |
| 2884 virtual intptr_t SizeOfObjects() { | |
| 2885 return objects_size_; | |
| 2886 } | |
| 2887 | |
| 2888 intptr_t MaximumCommittedMemory() { | |
| 2889 return maximum_committed_; | |
| 2890 } | |
| 2891 | |
| 2892 intptr_t CommittedMemory() { | |
| 2893 return Size(); | |
| 2894 } | |
| 2895 | |
| 2896 // Approximate amount of physical memory committed for this space. | |
| 2897 size_t CommittedPhysicalMemory(); | |
| 2898 | |
| 2899 int PageCount() { | |
| 2900 return page_count_; | |
| 2901 } | |
| 2902 | |
| 2903 // Finds an object for a given address, returns a Smi if it is not found. | |
| 2904 // The function iterates through all objects in this space, may be slow. | |
| 2905 Object* FindObject(Address a); | |
| 2906 | |
| 2907 // Finds a large object page containing the given address, returns NULL | |
| 2908 // if such a page doesn't exist. | |
| 2909 LargePage* FindPage(Address a); | |
| 2910 | |
| 2911 // Frees unmarked objects. | |
| 2912 void FreeUnmarkedObjects(); | |
| 2913 | |
| 2914 // Checks whether a heap object is in this space; O(1). | |
| 2915 bool Contains(HeapObject* obj); | |
| 2916 | |
| 2917 // Checks whether the space is empty. | |
| 2918 bool IsEmpty() { return first_page_ == NULL; } | |
| 2919 | |
| 2920 LargePage* first_page() { return first_page_; } | |
| 2921 | |
| 2922 #ifdef VERIFY_HEAP | |
| 2923 virtual void Verify(); | |
| 2924 #endif | |
| 2925 | |
| 2926 #ifdef DEBUG | |
| 2927 virtual void Print(); | |
| 2928 void ReportStatistics(); | |
| 2929 void CollectCodeStatistics(); | |
| 2930 #endif | |
| 2931 // Checks whether an address is in the object area in this space. It | |
| 2932 // iterates all objects in the space. May be slow. | |
| 2933 bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); } | |
| 2934 | |
| 2935 private: | |
| 2936 intptr_t max_capacity_; | |
| 2937 intptr_t maximum_committed_; | |
| 2938 // The head of the linked list of large object chunks. | |
| 2939 LargePage* first_page_; | |
| 2940 intptr_t size_; // allocated bytes | |
| 2941 int page_count_; // number of chunks | |
| 2942 intptr_t objects_size_; // size of objects | |
| 2943 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them | |
| 2944 HashMap chunk_map_; | |
| 2945 | |
| 2946 friend class LargeObjectIterator; | |
| 2947 | |
| 2948 public: | |
| 2949 TRACK_MEMORY("LargeObjectSpace") | |
| 2950 }; | |
| 2951 | |
| 2952 | |
| 2953 class LargeObjectIterator: public ObjectIterator { | |
| 2954 public: | |
| 2955 explicit LargeObjectIterator(LargeObjectSpace* space); | |
| 2956 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); | |
| 2957 | |
| 2958 HeapObject* Next(); | |
| 2959 | |
| 2960 // implementation of ObjectIterator. | |
| 2961 virtual HeapObject* next_object() { return Next(); } | |
| 2962 | |
| 2963 private: | |
| 2964 LargePage* current_; | |
| 2965 HeapObjectCallback size_func_; | |
| 2966 }; | |
| 2967 | |
| 2968 | |
| 2969 // Iterates over the chunks (pages and large object pages) that can contain | |
| 2970 // pointers to new space. | |
| 2971 class PointerChunkIterator BASE_EMBEDDED { | |
| 2972 public: | |
| 2973 inline explicit PointerChunkIterator(Heap* heap); | |
| 2974 | |
| 2975 // Return NULL when the iterator is done. | |
| 2976 MemoryChunk* next() { | |
| 2977 switch (state_) { | |
| 2978 case kOldPointerState: { | |
| 2979 if (old_pointer_iterator_.has_next()) { | |
| 2980 return old_pointer_iterator_.next(); | |
| 2981 } | |
| 2982 state_ = kMapState; | |
| 2983 // Fall through. | |
| 2984 } | |
| 2985 case kMapState: { | |
| 2986 if (map_iterator_.has_next()) { | |
| 2987 return map_iterator_.next(); | |
| 2988 } | |
| 2989 state_ = kLargeObjectState; | |
| 2990 // Fall through. | |
| 2991 } | |
| 2992 case kLargeObjectState: { | |
| 2993 HeapObject* heap_object; | |
| 2994 do { | |
| 2995 heap_object = lo_iterator_.Next(); | |
| 2996 if (heap_object == NULL) { | |
| 2997 state_ = kFinishedState; | |
| 2998 return NULL; | |
| 2999 } | |
| 3000 // Fixed arrays are the only pointer-containing objects in large | |
| 3001 // object space. | |
| 3002 } while (!heap_object->IsFixedArray()); | |
| 3003 MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address()); | |
| 3004 return answer; | |
| 3005 } | |
| 3006 case kFinishedState: | |
| 3007 return NULL; | |
| 3008 default: | |
| 3009 break; | |
| 3010 } | |
| 3011 UNREACHABLE(); | |
| 3012 return NULL; | |
| 3013 } | |
| 3014 | |
| 3015 | |
| 3016 private: | |
| 3017 enum State { | |
| 3018 kOldPointerState, | |
| 3019 kMapState, | |
| 3020 kLargeObjectState, | |
| 3021 kFinishedState | |
| 3022 }; | |
| 3023 State state_; | |
| 3024 PageIterator old_pointer_iterator_; | |
| 3025 PageIterator map_iterator_; | |
| 3026 LargeObjectIterator lo_iterator_; | |
| 3027 }; | |
| 3028 | |
| 3029 | |
| 3030 #ifdef DEBUG | |
| 3031 struct CommentStatistic { | |
| 3032 const char* comment; | |
| 3033 int size; | |
| 3034 int count; | |
| 3035 void Clear() { | |
| 3036 comment = NULL; | |
| 3037 size = 0; | |
| 3038 count = 0; | |
| 3039 } | |
| 3040 // Must be small, since an iteration is used for lookup. | |
| 3041 static const int kMaxComments = 64; | |
| 3042 }; | |
| 3043 #endif | |
| 3044 | |
| 3045 | |
| 3046 } } // namespace v8::internal | |
| 3047 | |
| 3048 #endif // V8_SPACES_H_ | |
| OLD | NEW |