OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef V8_HEAP_HEAP_INL_H_ | 5 #ifndef V8_HEAP_HEAP_INL_H_ |
6 #define V8_HEAP_HEAP_INL_H_ | 6 #define V8_HEAP_HEAP_INL_H_ |
7 | 7 |
8 #include <cmath> | 8 #include <cmath> |
9 | 9 |
10 #include "src/base/platform/platform.h" | 10 #include "src/base/platform/platform.h" |
(...skipping 450 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
461 | 461 |
462 while (src_slot != end_slot) { | 462 while (src_slot != end_slot) { |
463 *dst_slot++ = *src_slot++; | 463 *dst_slot++ = *src_slot++; |
464 } | 464 } |
465 } else { | 465 } else { |
466 MemMove(dst, src, static_cast<size_t>(byte_size)); | 466 MemMove(dst, src, static_cast<size_t>(byte_size)); |
467 } | 467 } |
468 } | 468 } |
469 | 469 |
470 | 470 |
471 template <int find_memento_mode> | |
Hannes Payer (out of office)
2016/01/20 13:19:39
s/int find_memento_mode/Heap:FindMementoMode mode/
Michael Lippautz
2016/01/21 10:00:08
Done.
| |
471 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { | 472 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { |
472 // Check if there is potentially a memento behind the object. If | 473 // Check if there is potentially a memento behind the object. If |
473 // the last word of the memento is on another page we return | 474 // the last word of the memento is on another page we return |
474 // immediately. | 475 // immediately. |
475 Address object_address = object->address(); | 476 Address object_address = object->address(); |
476 Address memento_address = object_address + object->Size(); | 477 Address memento_address = object_address + object->Size(); |
477 Address last_memento_word_address = memento_address + kPointerSize; | 478 Address last_memento_word_address = memento_address + kPointerSize; |
478 if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { | 479 if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { |
479 return NULL; | 480 return nullptr; |
480 } | 481 } |
481 | |
482 HeapObject* candidate = HeapObject::FromAddress(memento_address); | 482 HeapObject* candidate = HeapObject::FromAddress(memento_address); |
483 Map* candidate_map = candidate->map(); | 483 Map* candidate_map = candidate->map(); |
484 // This fast check may peek at an uninitialized word. However, the slow check | 484 // This fast check may peek at an uninitialized word. However, the slow check |
485 // below (memento_address == top) ensures that this is safe. Mark the word as | 485 // below (memento_address == top) ensures that this is safe. Mark the word as |
486 // initialized to silence MemorySanitizer warnings. | 486 // initialized to silence MemorySanitizer warnings. |
487 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map)); | 487 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map)); |
488 if (candidate_map != allocation_memento_map()) return NULL; | 488 if (candidate_map != allocation_memento_map()) { |
489 return nullptr; | |
490 } | |
491 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate); | |
489 | 492 |
490 // Either the object is the last object in the new space, or there is another | 493 // Depending on what the memento is used for, we might need to perform |
491 // object of at least word size (the header map word) following it, so | 494 // additional checks. |
492 // suffices to compare ptr and top here. Note that technically we do not have | 495 switch (find_memento_mode) { |
493 // to compare with the current top pointer of the from space page during GC, | 496 default: |
494 // since we always install filler objects above the top pointer of a from | 497 UNREACHABLE(); |
Hannes Payer (out of office)
2016/01/20 13:19:39
default should be the last case
Michael Lippautz
2016/01/21 10:00:08
Done.
| |
495 // space page when performing a garbage collection. However, always performing | 498 case Heap::kForParallelEvacuation: |
496 // the test makes it possible to have a single, unified version of | 499 return memento_candidate; |
497 // FindAllocationMemento that is used both by the GC and the mutator. | 500 case Heap::kForRuntime: |
498 Address top = NewSpaceTop(); | 501 if (memento_candidate == nullptr) return nullptr; |
499 DCHECK(memento_address == top || | 502 // Either the object is the last object in the new space, or there is |
500 memento_address + HeapObject::kHeaderSize <= top || | 503 // another object of at least word size (the header map word) following |
501 !NewSpacePage::OnSamePage(memento_address, top - 1)); | 504 // it, so suffices to compare ptr and top here. |
502 if (memento_address == top) return NULL; | 505 Address top = NewSpaceTop(); |
503 | 506 Address memento_address = memento_candidate->address(); |
504 AllocationMemento* memento = AllocationMemento::cast(candidate); | 507 DCHECK(memento_address == top || |
505 if (!memento->IsValid()) return NULL; | 508 memento_address + HeapObject::kHeaderSize <= top || |
506 return memento; | 509 !NewSpacePage::OnSamePage(memento_address, top - 1)); |
510 if ((memento_address != top) && memento_candidate->IsValid()) { | |
511 return memento_candidate; | |
512 } | |
513 return nullptr; | |
514 } | |
515 UNREACHABLE(); | |
516 return nullptr; | |
507 } | 517 } |
508 | 518 |
509 | 519 |
510 void Heap::UpdateAllocationSite(HeapObject* object, | 520 void Heap::UpdateAllocationSite(HeapObject* object, |
511 HashMap* pretenuring_feedback) { | 521 HashMap* pretenuring_feedback) { |
512 DCHECK(InFromSpace(object)); | 522 DCHECK(InFromSpace(object)); |
513 if (!FLAG_allocation_site_pretenuring || | 523 if (!FLAG_allocation_site_pretenuring || |
514 !AllocationSite::CanTrack(object->map()->instance_type())) | 524 !AllocationSite::CanTrack(object->map()->instance_type())) |
515 return; | 525 return; |
516 AllocationMemento* memento = FindAllocationMemento(object); | 526 AllocationMemento* memento_candidate = |
517 if (memento == nullptr) return; | 527 FindAllocationMemento<kForParallelEvacuation>(object); |
518 | 528 if (memento_candidate == nullptr) return; |
519 AllocationSite* key = memento->GetAllocationSite(); | |
520 DCHECK(!key->IsZombie()); | |
521 | 529 |
522 if (pretenuring_feedback == global_pretenuring_feedback_) { | 530 if (pretenuring_feedback == global_pretenuring_feedback_) { |
531 // Entering global pretenuring feedback is only used in the scavenger, where | |
532 // we are allowed to actually touch the allocation site. | |
533 if (!memento_candidate->IsValid()) return; | |
534 AllocationSite* site = memento_candidate->GetAllocationSite(); | |
535 DCHECK(!site->IsZombie()); | |
523 // For inserting in the global pretenuring storage we need to first | 536 // For inserting in the global pretenuring storage we need to first |
524 // increment the memento found count on the allocation site. | 537 // increment the memento found count on the allocation site. |
525 if (key->IncrementMementoFoundCount()) { | 538 if (site->IncrementMementoFoundCount()) { |
526 global_pretenuring_feedback_->LookupOrInsert( | 539 global_pretenuring_feedback_->LookupOrInsert( |
527 key, static_cast<uint32_t>(bit_cast<uintptr_t>(key))); | 540 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site) >> 3)); |
528 } | 541 } |
529 } else { | 542 } else { |
530 // Any other pretenuring storage than the global one is used as a cache, | 543 // Entering cached feedback is used in the parallel case. We are not allowed |
531 // where the count is later on merge in the allocation site. | 544 // to dereference the allocation site and rather have to postpone all checks |
545 // till actually merging the data. | |
546 Address key = memento_candidate->GetAllocationSiteUnchecked(); | |
532 HashMap::Entry* e = pretenuring_feedback->LookupOrInsert( | 547 HashMap::Entry* e = pretenuring_feedback->LookupOrInsert( |
533 key, static_cast<uint32_t>(bit_cast<uintptr_t>(key))); | 548 key, static_cast<uint32_t>(bit_cast<uintptr_t>(key) >> 3)); |
Hannes Payer (out of office)
2016/01/20 13:19:39
Same as before.
Michael Lippautz
2016/01/21 10:00:08
Done.
| |
534 DCHECK(e != nullptr); | 549 DCHECK(e != nullptr); |
535 (*bit_cast<intptr_t*>(&e->value))++; | 550 (*bit_cast<intptr_t*>(&e->value))++; |
536 } | 551 } |
537 } | 552 } |
538 | 553 |
539 | 554 |
540 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) { | 555 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) { |
541 global_pretenuring_feedback_->Remove( | 556 global_pretenuring_feedback_->Remove( |
542 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site))); | 557 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site))); |
543 } | 558 } |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
719 | 734 |
720 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { | 735 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { |
721 for (Object** current = start; current < end; current++) { | 736 for (Object** current = start; current < end; current++) { |
722 CHECK((*current)->IsSmi()); | 737 CHECK((*current)->IsSmi()); |
723 } | 738 } |
724 } | 739 } |
725 } // namespace internal | 740 } // namespace internal |
726 } // namespace v8 | 741 } // namespace v8 |
727 | 742 |
728 #endif // V8_HEAP_HEAP_INL_H_ | 743 #endif // V8_HEAP_HEAP_INL_H_ |
OLD | NEW |