Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: src/heap/heap-inl.h

Issue 1643473002: Revert of [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/mark-compact.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef V8_HEAP_HEAP_INL_H_ 5 #ifndef V8_HEAP_HEAP_INL_H_
6 #define V8_HEAP_HEAP_INL_H_ 6 #define V8_HEAP_HEAP_INL_H_
7 7
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "src/base/platform/platform.h" 10 #include "src/base/platform/platform.h"
(...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after
460 Object** end_slot = src_slot + size_in_words; 460 Object** end_slot = src_slot + size_in_words;
461 461
462 while (src_slot != end_slot) { 462 while (src_slot != end_slot) {
463 *dst_slot++ = *src_slot++; 463 *dst_slot++ = *src_slot++;
464 } 464 }
465 } else { 465 } else {
466 MemMove(dst, src, static_cast<size_t>(byte_size)); 466 MemMove(dst, src, static_cast<size_t>(byte_size));
467 } 467 }
468 } 468 }
469 469
470 template <Heap::FindMementoMode mode> 470
471 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) { 471 AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
472 // Check if there is potentially a memento behind the object. If 472 // Check if there is potentially a memento behind the object. If
473 // the last word of the memento is on another page we return 473 // the last word of the memento is on another page we return
474 // immediately. 474 // immediately.
475 Address object_address = object->address(); 475 Address object_address = object->address();
476 Address memento_address = object_address + object->Size(); 476 Address memento_address = object_address + object->Size();
477 Address last_memento_word_address = memento_address + kPointerSize; 477 Address last_memento_word_address = memento_address + kPointerSize;
478 if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) { 478 if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
479 return nullptr; 479 return NULL;
480 } 480 }
481
481 HeapObject* candidate = HeapObject::FromAddress(memento_address); 482 HeapObject* candidate = HeapObject::FromAddress(memento_address);
482 Map* candidate_map = candidate->map(); 483 Map* candidate_map = candidate->map();
483 // This fast check may peek at an uninitialized word. However, the slow check 484 // This fast check may peek at an uninitialized word. However, the slow check
484 // below (memento_address == top) ensures that this is safe. Mark the word as 485 // below (memento_address == top) ensures that this is safe. Mark the word as
485 // initialized to silence MemorySanitizer warnings. 486 // initialized to silence MemorySanitizer warnings.
486 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map)); 487 MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
487 if (candidate_map != allocation_memento_map()) { 488 if (candidate_map != allocation_memento_map()) return NULL;
488 return nullptr;
489 }
490 AllocationMemento* memento_candidate = AllocationMemento::cast(candidate);
491 489
492 // Depending on what the memento is used for, we might need to perform 490 // Either the object is the last object in the new space, or there is another
493 // additional checks. 491 // object of at least word size (the header map word) following it, so
494 Address top; 492 // suffices to compare ptr and top here. Note that technically we do not have
495 switch (mode) { 493 // to compare with the current top pointer of the from space page during GC,
496 case Heap::kForGC: 494 // since we always install filler objects above the top pointer of a from
497 return memento_candidate; 495 // space page when performing a garbage collection. However, always performing
498 case Heap::kForRuntime: 496 // the test makes it possible to have a single, unified version of
499 if (memento_candidate == nullptr) return nullptr; 497 // FindAllocationMemento that is used both by the GC and the mutator.
500 // Either the object is the last object in the new space, or there is 498 Address top = NewSpaceTop();
501 // another object of at least word size (the header map word) following 499 DCHECK(memento_address == top ||
502 // it, so suffices to compare ptr and top here. 500 memento_address + HeapObject::kHeaderSize <= top ||
503 top = NewSpaceTop(); 501 !NewSpacePage::OnSamePage(memento_address, top - 1));
504 DCHECK(memento_address == top || 502 if (memento_address == top) return NULL;
505 memento_address + HeapObject::kHeaderSize <= top || 503
506 !NewSpacePage::OnSamePage(memento_address, top - 1)); 504 AllocationMemento* memento = AllocationMemento::cast(candidate);
507 if ((memento_address != top) && memento_candidate->IsValid()) { 505 if (!memento->IsValid()) return NULL;
508 return memento_candidate; 506 return memento;
509 }
510 return nullptr;
511 default:
512 UNREACHABLE();
513 }
514 UNREACHABLE();
515 return nullptr;
516 } 507 }
517 508
518 509
519 void Heap::UpdateAllocationSite(HeapObject* object, 510 void Heap::UpdateAllocationSite(HeapObject* object,
520 HashMap* pretenuring_feedback) { 511 HashMap* pretenuring_feedback) {
521 DCHECK(InFromSpace(object)); 512 DCHECK(InFromSpace(object));
522 if (!FLAG_allocation_site_pretenuring || 513 if (!FLAG_allocation_site_pretenuring ||
523 !AllocationSite::CanTrack(object->map()->instance_type())) 514 !AllocationSite::CanTrack(object->map()->instance_type()))
524 return; 515 return;
525 AllocationMemento* memento_candidate = FindAllocationMemento<kForGC>(object); 516 AllocationMemento* memento = FindAllocationMemento(object);
526 if (memento_candidate == nullptr) return; 517 if (memento == nullptr) return;
518
519 AllocationSite* key = memento->GetAllocationSite();
520 DCHECK(!key->IsZombie());
527 521
528 if (pretenuring_feedback == global_pretenuring_feedback_) { 522 if (pretenuring_feedback == global_pretenuring_feedback_) {
529 // Entering global pretenuring feedback is only used in the scavenger, where
530 // we are allowed to actually touch the allocation site.
531 if (!memento_candidate->IsValid()) return;
532 AllocationSite* site = memento_candidate->GetAllocationSite();
533 DCHECK(!site->IsZombie());
534 // For inserting in the global pretenuring storage we need to first 523 // For inserting in the global pretenuring storage we need to first
535 // increment the memento found count on the allocation site. 524 // increment the memento found count on the allocation site.
536 if (site->IncrementMementoFoundCount()) { 525 if (key->IncrementMementoFoundCount()) {
537 global_pretenuring_feedback_->LookupOrInsert(site, 526 global_pretenuring_feedback_->LookupOrInsert(
538 ObjectHash(site->address())); 527 key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
539 } 528 }
540 } else { 529 } else {
541 // Entering cached feedback is used in the parallel case. We are not allowed 530 // Any other pretenuring storage than the global one is used as a cache,
542 // to dereference the allocation site and rather have to postpone all checks 531 // where the count is later on merge in the allocation site.
543 // till actually merging the data. 532 HashMap::Entry* e = pretenuring_feedback->LookupOrInsert(
544 Address key = memento_candidate->GetAllocationSiteUnchecked(); 533 key, static_cast<uint32_t>(bit_cast<uintptr_t>(key)));
545 HashMap::Entry* e =
546 pretenuring_feedback->LookupOrInsert(key, ObjectHash(key));
547 DCHECK(e != nullptr); 534 DCHECK(e != nullptr);
548 (*bit_cast<intptr_t*>(&e->value))++; 535 (*bit_cast<intptr_t*>(&e->value))++;
549 } 536 }
550 } 537 }
551 538
552 539
553 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) { 540 void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite* site) {
554 global_pretenuring_feedback_->Remove( 541 global_pretenuring_feedback_->Remove(
555 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site))); 542 site, static_cast<uint32_t>(bit_cast<uintptr_t>(site)));
556 } 543 }
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
732 719
733 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) { 720 void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
734 for (Object** current = start; current < end; current++) { 721 for (Object** current = start; current < end; current++) {
735 CHECK((*current)->IsSmi()); 722 CHECK((*current)->IsSmi());
736 } 723 }
737 } 724 }
738 } // namespace internal 725 } // namespace internal
739 } // namespace v8 726 } // namespace v8
740 727
741 #endif // V8_HEAP_HEAP_INL_H_ 728 #endif // V8_HEAP_HEAP_INL_H_
OLDNEW
« no previous file with comments | « src/heap/heap.cc ('k') | src/heap/mark-compact.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698