| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_SPACES_H_ | 5 #ifndef V8_HEAP_SPACES_H_ |
| 6 #define V8_HEAP_SPACES_H_ | 6 #define V8_HEAP_SPACES_H_ |
| 7 | 7 |
| 8 #include "src/allocation.h" | 8 #include "src/allocation.h" |
| 9 #include "src/atomic-utils.h" | 9 #include "src/atomic-utils.h" |
| 10 #include "src/base/atomicops.h" | 10 #include "src/base/atomicops.h" |
| (...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 261 | 261 |
| 262 class SkipList; | 262 class SkipList; |
| 263 class SlotsBuffer; | 263 class SlotsBuffer; |
| 264 | 264 |
| 265 // MemoryChunk represents a memory region owned by a specific space. | 265 // MemoryChunk represents a memory region owned by a specific space. |
| 266 // It is divided into the header and the body. Chunk start is always | 266 // It is divided into the header and the body. Chunk start is always |
| 267 // 1MB aligned. Start of the body is aligned so it can accommodate | 267 // 1MB aligned. Start of the body is aligned so it can accommodate |
| 268 // any heap object. | 268 // any heap object. |
| 269 class MemoryChunk { | 269 class MemoryChunk { |
| 270 public: | 270 public: |
| 271 enum MemoryChunkFlags { |
| 272 IS_EXECUTABLE, |
| 273 ABOUT_TO_BE_FREED, |
| 274 POINTERS_TO_HERE_ARE_INTERESTING, |
| 275 POINTERS_FROM_HERE_ARE_INTERESTING, |
| 276 SCAN_ON_SCAVENGE, |
| 277 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. |
| 278 IN_TO_SPACE, // All pages in new space has one of these two set. |
| 279 NEW_SPACE_BELOW_AGE_MARK, |
| 280 EVACUATION_CANDIDATE, |
| 281 RESCAN_ON_EVACUATION, |
| 282 NEVER_EVACUATE, // May contain immortal immutables. |
| 283 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. |
| 284 |
| 285 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, |
| 286 // otherwise marking bits are still intact. |
| 287 WAS_SWEPT, |
| 288 |
| 289 // Large objects can have a progress bar in their page header. These object |
| 290 // are scanned in increments and will be kept black while being scanned. |
| 291 // Even if the mutator writes to them they will be kept black and a white |
| 292 // to grey transition is performed in the value. |
| 293 HAS_PROGRESS_BAR, |
| 294 |
| 295 // This flag is intended to be used for testing. Works only when both |
| 296 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection |
| 297 // are set. It forces the page to become an evacuation candidate at next |
| 298 // candidates selection cycle. |
| 299 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, |
| 300 |
| 301 // The memory chunk is already logically freed, however the actual freeing |
| 302 // still has to be performed. |
| 303 PRE_FREED, |
| 304 |
| 305 // Last flag, keep at bottom. |
| 306 NUM_MEMORY_CHUNK_FLAGS |
| 307 }; |
| 308 |
| 271 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. | 309 // |kCompactionDone|: Initial compaction state of a |MemoryChunk|. |
| 272 // |kCompactingInProgress|: Parallel compaction is currently in progress. | 310 // |kCompactingInProgress|: Parallel compaction is currently in progress. |
| 273 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to | 311 // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to |
| 274 // be finalized. | 312 // be finalized. |
| 275 // |kCompactingAborted|: Parallel compaction has been aborted, which should | 313 // |kCompactingAborted|: Parallel compaction has been aborted, which should |
| 276 // for now only happen in OOM scenarios. | 314 // for now only happen in OOM scenarios. |
| 277 enum ParallelCompactingState { | 315 enum ParallelCompactingState { |
| 278 kCompactingDone, | 316 kCompactingDone, |
| 279 kCompactingInProgress, | 317 kCompactingInProgress, |
| 280 kCompactingFinalize, | 318 kCompactingFinalize, |
| 281 kCompactingAborted, | 319 kCompactingAborted, |
| 282 }; | 320 }; |
| 283 | 321 |
| 284 // |kSweepingDone|: The page state when sweeping is complete or sweeping must | 322 // |kSweepingDone|: The page state when sweeping is complete or sweeping must |
| 285 // not be performed on that page. | 323 // not be performed on that page. |
| 286 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will | 324 // |kSweepingFinalize|: A sweeper thread is done sweeping this page and will |
| 287 // not touch the page memory anymore. | 325 // not touch the page memory anymore. |
| 288 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. | 326 // |kSweepingInProgress|: This page is currently swept by a sweeper thread. |
| 289 // |kSweepingPending|: This page is ready for parallel sweeping. | 327 // |kSweepingPending|: This page is ready for parallel sweeping. |
| 290 enum ParallelSweepingState { | 328 enum ParallelSweepingState { |
| 291 kSweepingDone, | 329 kSweepingDone, |
| 292 kSweepingFinalize, | 330 kSweepingFinalize, |
| 293 kSweepingInProgress, | 331 kSweepingInProgress, |
| 294 kSweepingPending | 332 kSweepingPending |
| 295 }; | 333 }; |
| 296 | 334 |
| 335 // Every n write barrier invocations we go to runtime even though |
| 336 // we could have handled it in generated code. This lets us check |
| 337 // whether we have hit the limit and should do some more marking. |
| 338 static const int kWriteBarrierCounterGranularity = 500; |
| 339 |
| 340 static const int kPointersToHereAreInterestingMask = |
| 341 1 << POINTERS_TO_HERE_ARE_INTERESTING; |
| 342 |
| 343 static const int kPointersFromHereAreInterestingMask = |
| 344 1 << POINTERS_FROM_HERE_ARE_INTERESTING; |
| 345 |
| 346 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; |
| 347 |
| 348 static const int kSkipEvacuationSlotsRecordingMask = |
| 349 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) | |
| 350 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); |
| 351 |
| 352 static const intptr_t kAlignment = |
| 353 (static_cast<uintptr_t>(1) << kPageSizeBits); |
| 354 |
| 355 static const intptr_t kAlignmentMask = kAlignment - 1; |
| 356 |
| 357 static const intptr_t kSizeOffset = 0; |
| 358 |
| 359 static const intptr_t kLiveBytesOffset = |
| 360 kSizeOffset + kPointerSize // size_t size |
| 361 + kIntptrSize // intptr_t flags_ |
| 362 + kPointerSize // Address area_start_ |
| 363 + kPointerSize // Address area_end_ |
| 364 + 2 * kPointerSize // base::VirtualMemory reservation_ |
| 365 + kPointerSize // Address owner_ |
| 366 + kPointerSize // Heap* heap_ |
| 367 + kIntSize; // int store_buffer_counter_ |
| 368 |
| 369 static const size_t kSlotsBufferOffset = |
| 370 kLiveBytesOffset + kIntSize; // int live_byte_count_ |
| 371 |
| 372 static const size_t kWriteBarrierCounterOffset = |
| 373 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; |
| 374 + kPointerSize; // SkipList* skip_list_; |
| 375 |
| 376 static const size_t kMinHeaderSize = |
| 377 kWriteBarrierCounterOffset + |
| 378 kIntptrSize // intptr_t write_barrier_counter_ |
| 379 + kIntSize // int progress_bar_ |
| 380 + kPointerSize // AtomicValue high_water_mark_ |
| 381 + kPointerSize // base::Mutex* mutex_ |
| 382 + kPointerSize // base::AtomicWord parallel_sweeping_ |
| 383 + kPointerSize // AtomicValue parallel_compaction_ |
| 384 + 5 * kPointerSize // AtomicNumber free-list statistics |
| 385 + kPointerSize // AtomicValue next_chunk_ |
| 386 + kPointerSize; // AtomicValue prev_chunk_ |
| 387 |
| 388 // We add some more space to the computed header size to amount for missing |
| 389 // alignment requirements in our computation. |
| 390 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. |
| 391 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; |
| 392 |
| 393 static const int kBodyOffset = |
| 394 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); |
| 395 |
| 396 // The start offset of the object area in a page. Aligned to both maps and |
| 397 // code alignment to be suitable for both. Also aligned to 32 words because |
| 398 // the marking bitmap is arranged in 32 bit chunks. |
| 399 static const int kObjectStartAlignment = 32 * kPointerSize; |
| 400 static const int kObjectStartOffset = |
| 401 kBodyOffset - 1 + |
| 402 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); |
| 403 |
| 404 static const int kFlagsOffset = kPointerSize; |
| 405 |
| 406 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); |
| 407 |
| 297 // Only works if the pointer is in the first kPageSize of the MemoryChunk. | 408 // Only works if the pointer is in the first kPageSize of the MemoryChunk. |
| 298 static MemoryChunk* FromAddress(Address a) { | 409 static MemoryChunk* FromAddress(Address a) { |
| 299 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); | 410 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask); |
| 300 } | 411 } |
| 412 |
| 301 static const MemoryChunk* FromAddress(const byte* a) { | 413 static const MemoryChunk* FromAddress(const byte* a) { |
| 302 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & | 414 return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) & |
| 303 ~kAlignmentMask); | 415 ~kAlignmentMask); |
| 304 } | 416 } |
| 305 | 417 |
| 418 static void IncrementLiveBytesFromGC(HeapObject* object, int by) { |
| 419 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); |
| 420 } |
| 421 |
| 306 // Only works for addresses in pointer spaces, not data or code spaces. | 422 // Only works for addresses in pointer spaces, not data or code spaces. |
| 307 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); | 423 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr); |
| 308 | 424 |
| 425 static inline uint32_t FastAddressToMarkbitIndex(Address addr) { |
| 426 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; |
| 427 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; |
| 428 } |
| 429 |
| 430 static inline void UpdateHighWaterMark(Address mark) { |
| 431 if (mark == nullptr) return; |
| 432 // Need to subtract one from the mark because when a chunk is full the |
| 433 // top points to the next address after the chunk, which effectively belongs |
| 434 // to another chunk. See the comment to Page::FromAllocationTop. |
| 435 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); |
| 436 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); |
| 437 intptr_t old_mark = 0; |
| 438 do { |
| 439 old_mark = chunk->high_water_mark_.Value(); |
| 440 } while ((new_mark > old_mark) && |
| 441 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); |
| 442 } |
| 443 |
| 309 Address address() { return reinterpret_cast<Address>(this); } | 444 Address address() { return reinterpret_cast<Address>(this); } |
| 310 | 445 |
| 311 bool is_valid() { return address() != NULL; } | 446 bool is_valid() { return address() != NULL; } |
| 312 | 447 |
| 313 MemoryChunk* next_chunk() const { | 448 MemoryChunk* next_chunk() { return next_chunk_.Value(); } |
| 314 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_)); | |
| 315 } | |
| 316 | 449 |
| 317 MemoryChunk* prev_chunk() const { | 450 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); } |
| 318 return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_)); | |
| 319 } | |
| 320 | 451 |
| 321 void set_next_chunk(MemoryChunk* next) { | 452 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); } |
| 322 base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next)); | |
| 323 } | |
| 324 | 453 |
| 325 void set_prev_chunk(MemoryChunk* prev) { | 454 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); } |
| 326 base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev)); | |
| 327 } | |
| 328 | 455 |
| 329 Space* owner() const { | 456 Space* owner() const { |
| 330 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 457 if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 331 kPageHeaderTag) { | 458 kPageHeaderTag) { |
| 332 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - | 459 return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) - |
| 333 kPageHeaderTag); | 460 kPageHeaderTag); |
| 334 } else { | 461 } else { |
| 335 return NULL; | 462 return NULL; |
| 336 } | 463 } |
| 337 } | 464 } |
| 338 | 465 |
| 339 void set_owner(Space* space) { | 466 void set_owner(Space* space) { |
| 340 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); | 467 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0); |
| 341 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; | 468 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag; |
| 342 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == | 469 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) == |
| 343 kPageHeaderTag); | 470 kPageHeaderTag); |
| 344 } | 471 } |
| 345 | 472 |
| 346 base::VirtualMemory* reserved_memory() { return &reservation_; } | 473 base::VirtualMemory* reserved_memory() { return &reservation_; } |
| 347 | 474 |
| 348 void InitializeReservedMemory() { reservation_.Reset(); } | |
| 349 | |
| 350 void set_reserved_memory(base::VirtualMemory* reservation) { | 475 void set_reserved_memory(base::VirtualMemory* reservation) { |
| 351 DCHECK_NOT_NULL(reservation); | 476 DCHECK_NOT_NULL(reservation); |
| 352 reservation_.TakeControl(reservation); | 477 reservation_.TakeControl(reservation); |
| 353 } | 478 } |
| 354 | 479 |
| 355 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } | 480 bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); } |
| 356 void initialize_scan_on_scavenge(bool scan) { | 481 void initialize_scan_on_scavenge(bool scan) { |
| 357 if (scan) { | 482 if (scan) { |
| 358 SetFlag(SCAN_ON_SCAVENGE); | 483 SetFlag(SCAN_ON_SCAVENGE); |
| 359 } else { | 484 } else { |
| (...skipping 11 matching lines...) Expand all Loading... |
| 371 return addr >= area_start() && addr < area_end(); | 496 return addr >= area_start() && addr < area_end(); |
| 372 } | 497 } |
| 373 | 498 |
| 374 // Checks whether addr can be a limit of addresses in this page. | 499 // Checks whether addr can be a limit of addresses in this page. |
| 375 // It's a limit if it's in the page, or if it's just after the | 500 // It's a limit if it's in the page, or if it's just after the |
| 376 // last byte of the page. | 501 // last byte of the page. |
| 377 bool ContainsLimit(Address addr) { | 502 bool ContainsLimit(Address addr) { |
| 378 return addr >= area_start() && addr <= area_end(); | 503 return addr >= area_start() && addr <= area_end(); |
| 379 } | 504 } |
| 380 | 505 |
| 381 // Every n write barrier invocations we go to runtime even though | |
| 382 // we could have handled it in generated code. This lets us check | |
| 383 // whether we have hit the limit and should do some more marking. | |
| 384 static const int kWriteBarrierCounterGranularity = 500; | |
| 385 | |
| 386 enum MemoryChunkFlags { | |
| 387 IS_EXECUTABLE, | |
| 388 ABOUT_TO_BE_FREED, | |
| 389 POINTERS_TO_HERE_ARE_INTERESTING, | |
| 390 POINTERS_FROM_HERE_ARE_INTERESTING, | |
| 391 SCAN_ON_SCAVENGE, | |
| 392 IN_FROM_SPACE, // Mutually exclusive with IN_TO_SPACE. | |
| 393 IN_TO_SPACE, // All pages in new space has one of these two set. | |
| 394 NEW_SPACE_BELOW_AGE_MARK, | |
| 395 EVACUATION_CANDIDATE, | |
| 396 RESCAN_ON_EVACUATION, | |
| 397 NEVER_EVACUATE, // May contain immortal immutables. | |
| 398 POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC. | |
| 399 | |
| 400 // WAS_SWEPT indicates that marking bits have been cleared by the sweeper, | |
| 401 // otherwise marking bits are still intact. | |
| 402 WAS_SWEPT, | |
| 403 | |
| 404 // Large objects can have a progress bar in their page header. These object | |
| 405 // are scanned in increments and will be kept black while being scanned. | |
| 406 // Even if the mutator writes to them they will be kept black and a white | |
| 407 // to grey transition is performed in the value. | |
| 408 HAS_PROGRESS_BAR, | |
| 409 | |
| 410 // This flag is intended to be used for testing. Works only when both | |
| 411 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection | |
| 412 // are set. It forces the page to become an evacuation candidate at next | |
| 413 // candidates selection cycle. | |
| 414 FORCE_EVACUATION_CANDIDATE_FOR_TESTING, | |
| 415 | |
| 416 // The memory chunk is already logically freed, however the actual freeing | |
| 417 // still has to be performed. | |
| 418 PRE_FREED, | |
| 419 | |
| 420 // Last flag, keep at bottom. | |
| 421 NUM_MEMORY_CHUNK_FLAGS | |
| 422 }; | |
| 423 | |
| 424 | |
| 425 static const int kPointersToHereAreInterestingMask = | |
| 426 1 << POINTERS_TO_HERE_ARE_INTERESTING; | |
| 427 | |
| 428 static const int kPointersFromHereAreInterestingMask = | |
| 429 1 << POINTERS_FROM_HERE_ARE_INTERESTING; | |
| 430 | |
| 431 static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE; | |
| 432 | |
| 433 static const int kSkipEvacuationSlotsRecordingMask = | |
| 434 (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) | | |
| 435 (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE); | |
| 436 | |
| 437 | |
| 438 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } | 506 void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; } |
| 439 | 507 |
| 440 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } | 508 void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); } |
| 441 | 509 |
| 442 void SetFlagTo(int flag, bool value) { | 510 void SetFlagTo(int flag, bool value) { |
| 443 if (value) { | 511 if (value) { |
| 444 SetFlag(flag); | 512 SetFlag(flag); |
| 445 } else { | 513 } else { |
| 446 ClearFlag(flag); | 514 ClearFlag(flag); |
| 447 } | 515 } |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 534 } | 602 } |
| 535 } | 603 } |
| 536 | 604 |
| 537 bool IsLeftOfProgressBar(Object** slot) { | 605 bool IsLeftOfProgressBar(Object** slot) { |
| 538 Address slot_address = reinterpret_cast<Address>(slot); | 606 Address slot_address = reinterpret_cast<Address>(slot); |
| 539 DCHECK(slot_address > this->address()); | 607 DCHECK(slot_address > this->address()); |
| 540 return (slot_address - (this->address() + kObjectStartOffset)) < | 608 return (slot_address - (this->address() + kObjectStartOffset)) < |
| 541 progress_bar(); | 609 progress_bar(); |
| 542 } | 610 } |
| 543 | 611 |
| 544 static void IncrementLiveBytesFromGC(HeapObject* object, int by) { | |
| 545 MemoryChunk::FromAddress(object->address())->IncrementLiveBytes(by); | |
| 546 } | |
| 547 | |
| 548 static void IncrementLiveBytesFromMutator(HeapObject* object, int by); | |
| 549 | |
| 550 static const intptr_t kAlignment = | |
| 551 (static_cast<uintptr_t>(1) << kPageSizeBits); | |
| 552 | |
| 553 static const intptr_t kAlignmentMask = kAlignment - 1; | |
| 554 | |
| 555 static const intptr_t kSizeOffset = 0; | |
| 556 | |
| 557 static const intptr_t kLiveBytesOffset = | |
| 558 kSizeOffset + kPointerSize // size_t size | |
| 559 + kIntptrSize // intptr_t flags_ | |
| 560 + kPointerSize // Address area_start_ | |
| 561 + kPointerSize // Address area_end_ | |
| 562 + 2 * kPointerSize // base::VirtualMemory reservation_ | |
| 563 + kPointerSize // Address owner_ | |
| 564 + kPointerSize // Heap* heap_ | |
| 565 + kIntSize; // int store_buffer_counter_ | |
| 566 | |
| 567 | |
| 568 static const size_t kSlotsBufferOffset = | |
| 569 kLiveBytesOffset + kIntSize; // int live_byte_count_ | |
| 570 | |
| 571 static const size_t kWriteBarrierCounterOffset = | |
| 572 kSlotsBufferOffset + kPointerSize // SlotsBuffer* slots_buffer_; | |
| 573 + kPointerSize; // SkipList* skip_list_; | |
| 574 | |
| 575 static const size_t kMinHeaderSize = | |
| 576 kWriteBarrierCounterOffset + | |
| 577 kIntptrSize // intptr_t write_barrier_counter_ | |
| 578 + kIntSize // int progress_bar_ | |
| 579 + kPointerSize // AtomicValue high_water_mark_ | |
| 580 + kPointerSize // base::Mutex* mutex_ | |
| 581 + kPointerSize // base::AtomicWord parallel_sweeping_ | |
| 582 + kPointerSize // AtomicValue parallel_compaction_ | |
| 583 + 5 * kPointerSize // AtomicNumber free-list statistics | |
| 584 + kPointerSize // base::AtomicWord next_chunk_ | |
| 585 + kPointerSize; // base::AtomicWord prev_chunk_ | |
| 586 | |
| 587 // We add some more space to the computed header size to amount for missing | |
| 588 // alignment requirements in our computation. | |
| 589 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines. | |
| 590 static const size_t kHeaderSize = kMinHeaderSize + kIntSize; | |
| 591 | |
| 592 static const int kBodyOffset = | |
| 593 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize); | |
| 594 | |
| 595 // The start offset of the object area in a page. Aligned to both maps and | |
| 596 // code alignment to be suitable for both. Also aligned to 32 words because | |
| 597 // the marking bitmap is arranged in 32 bit chunks. | |
| 598 static const int kObjectStartAlignment = 32 * kPointerSize; | |
| 599 static const int kObjectStartOffset = | |
| 600 kBodyOffset - 1 + | |
| 601 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment); | |
| 602 | |
| 603 size_t size() const { return size_; } | 612 size_t size() const { return size_; } |
| 604 | 613 |
| 605 void set_size(size_t size) { size_ = size; } | 614 void set_size(size_t size) { size_ = size; } |
| 606 | 615 |
| 607 void SetArea(Address area_start, Address area_end) { | 616 void SetArea(Address area_start, Address area_end) { |
| 608 area_start_ = area_start; | 617 area_start_ = area_start; |
| 609 area_end_ = area_end; | 618 area_end_ = area_end; |
| 610 } | 619 } |
| 611 | 620 |
| 612 Executability executable() { | 621 Executability executable() { |
| 613 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; | 622 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE; |
| 614 } | 623 } |
| 615 | 624 |
| 616 bool InNewSpace() { | 625 bool InNewSpace() { |
| 617 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; | 626 return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0; |
| 618 } | 627 } |
| 619 | 628 |
| 620 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } | 629 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); } |
| 621 | 630 |
| 622 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } | 631 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); } |
| 623 | 632 |
| 624 // --------------------------------------------------------------------- | |
| 625 // Markbits support | 633 // Markbits support |
| 626 | 634 |
| 627 inline Bitmap* markbits() { | 635 inline Bitmap* markbits() { |
| 628 return Bitmap::FromAddress(address() + kHeaderSize); | 636 return Bitmap::FromAddress(address() + kHeaderSize); |
| 629 } | 637 } |
| 630 | 638 |
| 631 void PrintMarkbits() { markbits()->Print(); } | 639 void PrintMarkbits() { markbits()->Print(); } |
| 632 | 640 |
| 633 inline uint32_t AddressToMarkbitIndex(Address addr) { | 641 inline uint32_t AddressToMarkbitIndex(Address addr) { |
| 634 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; | 642 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2; |
| 635 } | 643 } |
| 636 | 644 |
| 637 inline static uint32_t FastAddressToMarkbitIndex(Address addr) { | |
| 638 const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask; | |
| 639 | |
| 640 return static_cast<uint32_t>(offset) >> kPointerSizeLog2; | |
| 641 } | |
| 642 | |
| 643 inline Address MarkbitIndexToAddress(uint32_t index) { | 645 inline Address MarkbitIndexToAddress(uint32_t index) { |
| 644 return this->address() + (index << kPointerSizeLog2); | 646 return this->address() + (index << kPointerSizeLog2); |
| 645 } | 647 } |
| 646 | 648 |
| 647 void InsertAfter(MemoryChunk* other); | 649 void InsertAfter(MemoryChunk* other); |
| 648 void Unlink(); | 650 void Unlink(); |
| 649 | 651 |
| 650 inline Heap* heap() const { return heap_; } | 652 inline Heap* heap() const { return heap_; } |
| 651 | 653 |
| 652 static const int kFlagsOffset = kPointerSize; | |
| 653 | |
| 654 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } | 654 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); } |
| 655 | 655 |
| 656 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } | 656 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); } |
| 657 | 657 |
| 658 bool IsEvacuationCandidate() { | 658 bool IsEvacuationCandidate() { |
| 659 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); | 659 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE))); |
| 660 return IsFlagSet(EVACUATION_CANDIDATE); | 660 return IsFlagSet(EVACUATION_CANDIDATE); |
| 661 } | 661 } |
| 662 | 662 |
| 663 bool ShouldSkipEvacuationSlotRecording() { | 663 bool ShouldSkipEvacuationSlotRecording() { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 687 Address area_end() { return area_end_; } | 687 Address area_end() { return area_end_; } |
| 688 int area_size() { return static_cast<int>(area_end() - area_start()); } | 688 int area_size() { return static_cast<int>(area_end() - area_start()); } |
| 689 bool CommitArea(size_t requested); | 689 bool CommitArea(size_t requested); |
| 690 | 690 |
| 691 // Approximate amount of physical memory committed for this chunk. | 691 // Approximate amount of physical memory committed for this chunk. |
| 692 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } | 692 size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); } |
| 693 | 693 |
| 694 // Should be called when memory chunk is about to be freed. | 694 // Should be called when memory chunk is about to be freed. |
| 695 void ReleaseAllocatedMemory(); | 695 void ReleaseAllocatedMemory(); |
| 696 | 696 |
| 697 static inline void UpdateHighWaterMark(Address mark) { | 697 protected: |
| 698 if (mark == nullptr) return; | 698 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, |
| 699 // Need to subtract one from the mark because when a chunk is full the | 699 Address area_start, Address area_end, |
| 700 // top points to the next address after the chunk, which effectively belongs | 700 Executability executable, Space* owner); |
| 701 // to another chunk. See the comment to Page::FromAllocationTop. | |
| 702 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1); | |
| 703 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address()); | |
| 704 intptr_t old_mark = 0; | |
| 705 do { | |
| 706 old_mark = chunk->high_water_mark_.Value(); | |
| 707 } while ((new_mark > old_mark) && | |
| 708 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark)); | |
| 709 } | |
| 710 | 701 |
| 711 protected: | |
| 712 size_t size_; | 702 size_t size_; |
| 713 intptr_t flags_; | 703 intptr_t flags_; |
| 714 | 704 |
| 715 // Start and end of allocatable memory on this chunk. | 705 // Start and end of allocatable memory on this chunk. |
| 716 Address area_start_; | 706 Address area_start_; |
| 717 Address area_end_; | 707 Address area_end_; |
| 718 | 708 |
| 719 // If the chunk needs to remember its memory reservation, it is stored here. | 709 // If the chunk needs to remember its memory reservation, it is stored here. |
| 720 base::VirtualMemory reservation_; | 710 base::VirtualMemory reservation_; |
| 721 // The identity of the owning space. This is tagged as a failure pointer, but | 711 // The identity of the owning space. This is tagged as a failure pointer, but |
| (...skipping 21 matching lines...) Expand all Loading... |
| 743 AtomicValue<ParallelCompactingState> parallel_compaction_; | 733 AtomicValue<ParallelCompactingState> parallel_compaction_; |
| 744 | 734 |
| 745 // PagedSpace free-list statistics. | 735 // PagedSpace free-list statistics. |
| 746 AtomicNumber<intptr_t> available_in_small_free_list_; | 736 AtomicNumber<intptr_t> available_in_small_free_list_; |
| 747 AtomicNumber<intptr_t> available_in_medium_free_list_; | 737 AtomicNumber<intptr_t> available_in_medium_free_list_; |
| 748 AtomicNumber<intptr_t> available_in_large_free_list_; | 738 AtomicNumber<intptr_t> available_in_large_free_list_; |
| 749 AtomicNumber<intptr_t> available_in_huge_free_list_; | 739 AtomicNumber<intptr_t> available_in_huge_free_list_; |
| 750 AtomicNumber<intptr_t> non_available_small_blocks_; | 740 AtomicNumber<intptr_t> non_available_small_blocks_; |
| 751 | 741 |
| 752 // next_chunk_ holds a pointer of type MemoryChunk | 742 // next_chunk_ holds a pointer of type MemoryChunk |
| 753 base::AtomicWord next_chunk_; | 743 AtomicValue<MemoryChunk*> next_chunk_; |
| 754 // prev_chunk_ holds a pointer of type MemoryChunk | 744 // prev_chunk_ holds a pointer of type MemoryChunk |
| 755 base::AtomicWord prev_chunk_; | 745 AtomicValue<MemoryChunk*> prev_chunk_; |
| 756 | |
| 757 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size, | |
| 758 Address area_start, Address area_end, | |
| 759 Executability executable, Space* owner); | |
| 760 | 746 |
| 761 private: | 747 private: |
| 748 void InitializeReservedMemory() { reservation_.Reset(); } |
| 749 |
| 762 friend class MemoryAllocator; | 750 friend class MemoryAllocator; |
| 763 friend class MemoryChunkValidator; | 751 friend class MemoryChunkValidator; |
| 764 }; | 752 }; |
| 765 | 753 |
| 766 | 754 |
| 767 // ----------------------------------------------------------------------------- | 755 // ----------------------------------------------------------------------------- |
| 768 // A page is a memory chunk of a size 1MB. Large object pages may be larger. | 756 // A page is a memory chunk of a size 1MB. Large object pages may be larger. |
| 769 // | 757 // |
| 770 // The only way to get a page pointer is by calling factory methods: | 758 // The only way to get a page pointer is by calling factory methods: |
| 771 // Page* p = Page::FromAddress(addr); or | 759 // Page* p = Page::FromAddress(addr); or |
| (...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 870 #endif // DEBUG | 858 #endif // DEBUG |
| 871 | 859 |
| 872 friend class MemoryAllocator; | 860 friend class MemoryAllocator; |
| 873 }; | 861 }; |
| 874 | 862 |
| 875 | 863 |
| 876 class LargePage : public MemoryChunk { | 864 class LargePage : public MemoryChunk { |
| 877 public: | 865 public: |
| 878 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } | 866 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); } |
| 879 | 867 |
| 880 inline LargePage* next_page() const { | 868 inline LargePage* next_page() { |
| 881 return static_cast<LargePage*>(next_chunk()); | 869 return static_cast<LargePage*>(next_chunk()); |
| 882 } | 870 } |
| 883 | 871 |
| 884 inline void set_next_page(LargePage* page) { set_next_chunk(page); } | 872 inline void set_next_page(LargePage* page) { set_next_chunk(page); } |
| 885 | 873 |
| 886 private: | 874 private: |
| 887 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); | 875 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk); |
| 888 | 876 |
| 889 friend class MemoryAllocator; | 877 friend class MemoryAllocator; |
| 890 }; | 878 }; |
| (...skipping 1219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2110 public: | 2098 public: |
| 2111 // GC related flags copied from from-space to to-space when | 2099 // GC related flags copied from from-space to to-space when |
| 2112 // flipping semispaces. | 2100 // flipping semispaces. |
| 2113 static const intptr_t kCopyOnFlipFlagsMask = | 2101 static const intptr_t kCopyOnFlipFlagsMask = |
| 2114 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | | 2102 (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) | |
| 2115 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | | 2103 (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) | |
| 2116 (1 << MemoryChunk::SCAN_ON_SCAVENGE); | 2104 (1 << MemoryChunk::SCAN_ON_SCAVENGE); |
| 2117 | 2105 |
| 2118 static const int kAreaSize = Page::kMaxRegularHeapObjectSize; | 2106 static const int kAreaSize = Page::kMaxRegularHeapObjectSize; |
| 2119 | 2107 |
| 2120 inline NewSpacePage* next_page() const { | 2108 inline NewSpacePage* next_page() { |
| 2121 return static_cast<NewSpacePage*>(next_chunk()); | 2109 return static_cast<NewSpacePage*>(next_chunk()); |
| 2122 } | 2110 } |
| 2123 | 2111 |
| 2124 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } | 2112 inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); } |
| 2125 | 2113 |
| 2126 inline NewSpacePage* prev_page() const { | 2114 inline NewSpacePage* prev_page() { |
| 2127 return static_cast<NewSpacePage*>(prev_chunk()); | 2115 return static_cast<NewSpacePage*>(prev_chunk()); |
| 2128 } | 2116 } |
| 2129 | 2117 |
| 2130 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); } | 2118 inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); } |
| 2131 | 2119 |
| 2132 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); } | 2120 SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); } |
| 2133 | 2121 |
| 2134 bool is_anchor() { return !this->InNewSpace(); } | 2122 bool is_anchor() { return !this->InNewSpace(); } |
| 2135 | 2123 |
| 2136 static bool IsAtStart(Address addr) { | 2124 static bool IsAtStart(Address addr) { |
| (...skipping 840 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2977 count = 0; | 2965 count = 0; |
| 2978 } | 2966 } |
| 2979 // Must be small, since an iteration is used for lookup. | 2967 // Must be small, since an iteration is used for lookup. |
| 2980 static const int kMaxComments = 64; | 2968 static const int kMaxComments = 64; |
| 2981 }; | 2969 }; |
| 2982 #endif | 2970 #endif |
| 2983 } | 2971 } |
| 2984 } // namespace v8::internal | 2972 } // namespace v8::internal |
| 2985 | 2973 |
| 2986 #endif // V8_HEAP_SPACES_H_ | 2974 #endif // V8_HEAP_SPACES_H_ |
| OLD | NEW |