OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <algorithm> | 5 #include <algorithm> |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #include "src/counters.h" | 9 #include "src/counters.h" |
10 #include "src/heap/store-buffer-inl.h" | 10 #include "src/heap/store-buffer-inl.h" |
(...skipping 446 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
457 if (!page->SweepingCompleted()) { | 457 if (!page->SweepingCompleted()) { |
458 heap_->mark_compact_collector()->SweepInParallel(page, owner); | 458 heap_->mark_compact_collector()->SweepInParallel(page, owner); |
459 if (!page->SweepingCompleted()) { | 459 if (!page->SweepingCompleted()) { |
460 // We were not able to sweep that page, i.e., a concurrent | 460 // We were not able to sweep that page, i.e., a concurrent |
461 // sweeper thread currently owns this page. | 461 // sweeper thread currently owns this page. |
462 // TODO(hpayer): This may introduce a huge pause here. We | 462 // TODO(hpayer): This may introduce a huge pause here. We |
463 // just care about finish sweeping of the scan on scavenge page. | 463 // just care about finish sweeping of the scan on scavenge page. |
464 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | 464 heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
465 } | 465 } |
466 } | 466 } |
467 CHECK(page->owner() == heap_->old_space()); | 467 CHECK(page->owner() == heap_->old_pointer_space()); |
468 HeapObjectIterator iterator(page, NULL); | 468 HeapObjectIterator iterator(page, NULL); |
469 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; | 469 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; |
470 heap_object = iterator.Next()) { | 470 heap_object = iterator.Next()) { |
471 // We iterate over objects that contain new space pointers only. | 471 // We iterate over objects that contain new space pointers only. |
472 bool may_contain_raw_values = heap_object->MayContainRawValues(); | 472 bool may_contain_raw_values = heap_object->MayContainRawValues(); |
473 if (!may_contain_raw_values) { | 473 if (!may_contain_raw_values) { |
474 Address obj_address = heap_object->address(); | 474 Address obj_address = heap_object->address(); |
475 const int start_offset = HeapObject::kHeaderSize; | 475 const int start_offset = HeapObject::kHeaderSize; |
476 const int end_offset = heap_object->Size(); | 476 const int end_offset = heap_object->Size(); |
477 #if V8_DOUBLE_FIELDS_UNBOXING | 477 #if V8_DOUBLE_FIELDS_UNBOXING |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
527 EnsureSpace(top - start_); | 527 EnsureSpace(top - start_); |
528 DCHECK(may_move_store_buffer_entries_); | 528 DCHECK(may_move_store_buffer_entries_); |
529 // Goes through the addresses in the store buffer attempting to remove | 529 // Goes through the addresses in the store buffer attempting to remove |
530 // duplicates. In the interest of speed this is a lossy operation. Some | 530 // duplicates. In the interest of speed this is a lossy operation. Some |
531 // duplicates will remain. We have two hash sets with different hash | 531 // duplicates will remain. We have two hash sets with different hash |
532 // functions to reduce the number of unnecessary clashes. | 532 // functions to reduce the number of unnecessary clashes. |
533 hash_sets_are_empty_ = false; // Hash sets are in use. | 533 hash_sets_are_empty_ = false; // Hash sets are in use. |
534 for (Address* current = start_; current < top; current++) { | 534 for (Address* current = start_; current < top; current++) { |
535 DCHECK(!heap_->cell_space()->Contains(*current)); | 535 DCHECK(!heap_->cell_space()->Contains(*current)); |
536 DCHECK(!heap_->code_space()->Contains(*current)); | 536 DCHECK(!heap_->code_space()->Contains(*current)); |
| 537 DCHECK(!heap_->old_data_space()->Contains(*current)); |
537 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); | 538 uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current); |
538 // Shift out the last bits including any tags. | 539 // Shift out the last bits including any tags. |
539 int_addr >>= kPointerSizeLog2; | 540 int_addr >>= kPointerSizeLog2; |
540 // The upper part of an address is basically random because of ASLR and OS | 541 // The upper part of an address is basically random because of ASLR and OS |
541 // non-determinism, so we use only the bits within a page for hashing to | 542 // non-determinism, so we use only the bits within a page for hashing to |
542 // make v8's behavior (more) deterministic. | 543 // make v8's behavior (more) deterministic. |
543 uintptr_t hash_addr = | 544 uintptr_t hash_addr = |
544 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); | 545 int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2); |
545 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & | 546 int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) & |
546 (kHashSetLength - 1)); | 547 (kHashSetLength - 1)); |
(...skipping 14 matching lines...) Expand all Loading... |
561 } | 562 } |
562 old_buffer_is_sorted_ = false; | 563 old_buffer_is_sorted_ = false; |
563 old_buffer_is_filtered_ = false; | 564 old_buffer_is_filtered_ = false; |
564 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 565 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
565 DCHECK(old_top_ <= old_limit_); | 566 DCHECK(old_top_ <= old_limit_); |
566 } | 567 } |
567 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 568 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
568 } | 569 } |
569 } | 570 } |
570 } // namespace v8::internal | 571 } // namespace v8::internal |
OLD | NEW |