| OLD | NEW |
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <algorithm> | 5 #include <algorithm> |
| 6 | 6 |
| 7 #include "src/v8.h" | 7 #include "src/v8.h" |
| 8 | 8 |
| 9 #include "src/base/atomicops.h" | 9 #include "src/base/atomicops.h" |
| 10 #include "src/counters.h" | 10 #include "src/counters.h" |
| (...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 372 during_gc_ = false; | 372 during_gc_ = false; |
| 373 #ifdef VERIFY_HEAP | 373 #ifdef VERIFY_HEAP |
| 374 if (FLAG_verify_heap) { | 374 if (FLAG_verify_heap) { |
| 375 Verify(); | 375 Verify(); |
| 376 } | 376 } |
| 377 #endif | 377 #endif |
| 378 } | 378 } |
| 379 | 379 |
| 380 | 380 |
| 381 void StoreBuffer::ProcessOldToNewSlot(Address slot_address, | 381 void StoreBuffer::ProcessOldToNewSlot(Address slot_address, |
| 382 ObjectSlotCallback slot_callback, | 382 ObjectSlotCallback slot_callback) { |
| 383 bool clear_maps) { | |
| 384 Object** slot = reinterpret_cast<Object**>(slot_address); | 383 Object** slot = reinterpret_cast<Object**>(slot_address); |
| 385 Object* object = reinterpret_cast<Object*>( | 384 Object* object = reinterpret_cast<Object*>( |
| 386 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | 385 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); |
| 387 | 386 |
| 388 // If the object is not in from space, it must be a duplicate store buffer | 387 // If the object is not in from space, it must be a duplicate store buffer |
| 389 // entry and the slot was already updated. | 388 // entry and the slot was already updated. |
| 390 if (heap_->InFromSpace(object)) { | 389 if (heap_->InFromSpace(object)) { |
| 391 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); | 390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); |
| 392 DCHECK(heap_object->IsHeapObject()); | 391 DCHECK(heap_object->IsHeapObject()); |
| 393 // The new space object was not promoted if it still contains a map | |
| 394 // pointer. Clear the map field now lazily (during full GC). | |
| 395 if (clear_maps) ClearDeadObject(heap_object); | |
| 396 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); | 392 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); |
| 397 object = reinterpret_cast<Object*>( | 393 object = reinterpret_cast<Object*>( |
| 398 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | 394 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); |
| 399 // If the object was in from space before and is after executing the | 395 // If the object was in from space before and is after executing the |
| 400 // callback in to space, the object is still live. | 396 // callback in to space, the object is still live. |
| 401 // Unfortunately, we do not know about the slot. It could be in a | 397 // Unfortunately, we do not know about the slot. It could be in a |
| 402 // just freed free space object. | 398 // just freed free space object. |
| 403 if (heap_->InToSpace(object)) { | 399 if (heap_->InToSpace(object)) { |
| 404 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); | 400 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); |
| 405 } | 401 } |
| 406 } | 402 } |
| 407 } | 403 } |
| 408 | 404 |
| 409 | 405 |
| 410 void StoreBuffer::FindPointersToNewSpaceInRegion( | 406 void StoreBuffer::FindPointersToNewSpaceInRegion( |
| 411 Address start, Address end, ObjectSlotCallback slot_callback, | 407 Address start, Address end, ObjectSlotCallback slot_callback) { |
| 412 bool clear_maps) { | |
| 413 for (Address slot_address = start; slot_address < end; | 408 for (Address slot_address = start; slot_address < end; |
| 414 slot_address += kPointerSize) { | 409 slot_address += kPointerSize) { |
| 415 ProcessOldToNewSlot(slot_address, slot_callback, clear_maps); | 410 ProcessOldToNewSlot(slot_address, slot_callback); |
| 416 } | 411 } |
| 417 } | 412 } |
| 418 | 413 |
| 419 | 414 |
| 420 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, | 415 void StoreBuffer::IteratePointersInStoreBuffer( |
| 421 bool clear_maps) { | 416 ObjectSlotCallback slot_callback) { |
| 422 Address* limit = old_top_; | 417 Address* limit = old_top_; |
| 423 old_top_ = old_start_; | 418 old_top_ = old_start_; |
| 424 { | 419 { |
| 425 DontMoveStoreBufferEntriesScope scope(this); | 420 DontMoveStoreBufferEntriesScope scope(this); |
| 426 for (Address* current = old_start_; current < limit; current++) { | 421 for (Address* current = old_start_; current < limit; current++) { |
| 427 #ifdef DEBUG | 422 #ifdef DEBUG |
| 428 Address* saved_top = old_top_; | 423 Address* saved_top = old_top_; |
| 429 #endif | 424 #endif |
| 430 ProcessOldToNewSlot(*current, slot_callback, clear_maps); | 425 ProcessOldToNewSlot(*current, slot_callback); |
| 431 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); | 426 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); |
| 432 } | 427 } |
| 433 } | 428 } |
| 434 } | 429 } |
| 435 | 430 |
| 436 | 431 |
| 437 void StoreBuffer::ClearInvalidStoreBufferEntries() { | 432 void StoreBuffer::ClearInvalidStoreBufferEntries() { |
| 438 Compact(); | 433 Compact(); |
| 439 Address* new_top = old_start_; | 434 Address* new_top = old_start_; |
| 440 for (Address* current = old_start_; current < old_top_; current++) { | 435 for (Address* current = old_start_; current < old_top_; current++) { |
| (...skipping 30 matching lines...) Expand all Loading... |
| 471 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); | 466 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); |
| 472 CHECK(heap_->InNewSpace(object)); | 467 CHECK(heap_->InNewSpace(object)); |
| 473 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject( | 468 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject( |
| 474 reinterpret_cast<HeapObject**>(slot), | 469 reinterpret_cast<HeapObject**>(slot), |
| 475 reinterpret_cast<HeapObject*>(object)); | 470 reinterpret_cast<HeapObject*>(object)); |
| 476 } | 471 } |
| 477 } | 472 } |
| 478 | 473 |
| 479 | 474 |
| 480 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { | 475 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { |
| 481 IteratePointersToNewSpace(slot_callback, false); | |
| 482 } | |
| 483 | |
| 484 | |
| 485 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps( | |
| 486 ObjectSlotCallback slot_callback) { | |
| 487 IteratePointersToNewSpace(slot_callback, true); | |
| 488 } | |
| 489 | |
| 490 | |
| 491 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback, | |
| 492 bool clear_maps) { | |
| 493 // We do not sort or remove duplicated entries from the store buffer because | 476 // We do not sort or remove duplicated entries from the store buffer because |
| 494 // we expect that callback will rebuild the store buffer thus removing | 477 // we expect that callback will rebuild the store buffer thus removing |
| 495 // all duplicates and pointers to old space. | 478 // all duplicates and pointers to old space. |
| 496 bool some_pages_to_scan = PrepareForIteration(); | 479 bool some_pages_to_scan = PrepareForIteration(); |
| 497 | 480 |
| 498 // TODO(gc): we want to skip slots on evacuation candidates | 481 // TODO(gc): we want to skip slots on evacuation candidates |
| 499 // but we can't simply figure that out from slot address | 482 // but we can't simply figure that out from slot address |
| 500 // because slot can belong to a large object. | 483 // because slot can belong to a large object. |
| 501 IteratePointersInStoreBuffer(slot_callback, clear_maps); | 484 IteratePointersInStoreBuffer(slot_callback); |
| 502 | 485 |
| 503 // We are done scanning all the pointers that were in the store buffer, but | 486 // We are done scanning all the pointers that were in the store buffer, but |
| 504 // there may be some pages marked scan_on_scavenge that have pointers to new | 487 // there may be some pages marked scan_on_scavenge that have pointers to new |
| 505 // space that are not in the store buffer. We must scan them now. As we | 488 // space that are not in the store buffer. We must scan them now. As we |
| 506 // scan, the surviving pointers to new space will be added to the store | 489 // scan, the surviving pointers to new space will be added to the store |
| 507 // buffer. If there are still a lot of pointers to new space then we will | 490 // buffer. If there are still a lot of pointers to new space then we will |
| 508 // keep the scan_on_scavenge flag on the page and discard the pointers that | 491 // keep the scan_on_scavenge flag on the page and discard the pointers that |
| 509 // were added to the store buffer. If there are not many pointers to new | 492 // were added to the store buffer. If there are not many pointers to new |
| 510 // space left on the page we will keep the pointers in the store buffer and | 493 // space left on the page we will keep the pointers in the store buffer and |
| 511 // remove the flag from the page. | 494 // remove the flag from the page. |
| 512 if (some_pages_to_scan) { | 495 if (some_pages_to_scan) { |
| 513 if (callback_ != NULL) { | 496 if (callback_ != NULL) { |
| 514 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); | 497 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); |
| 515 } | 498 } |
| 516 PointerChunkIterator it(heap_); | 499 PointerChunkIterator it(heap_); |
| 517 MemoryChunk* chunk; | 500 MemoryChunk* chunk; |
| 518 while ((chunk = it.next()) != NULL) { | 501 while ((chunk = it.next()) != NULL) { |
| 519 if (chunk->scan_on_scavenge()) { | 502 if (chunk->scan_on_scavenge()) { |
| 520 chunk->set_scan_on_scavenge(false); | 503 chunk->set_scan_on_scavenge(false); |
| 521 if (callback_ != NULL) { | 504 if (callback_ != NULL) { |
| 522 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); | 505 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); |
| 523 } | 506 } |
| 524 if (chunk->owner() == heap_->lo_space()) { | 507 if (chunk->owner() == heap_->lo_space()) { |
| 525 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); | 508 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); |
| 526 HeapObject* array = large_page->GetObject(); | 509 HeapObject* array = large_page->GetObject(); |
| 527 DCHECK(array->IsFixedArray()); | 510 DCHECK(array->IsFixedArray()); |
| 528 Address start = array->address(); | 511 Address start = array->address(); |
| 529 Address end = start + array->Size(); | 512 Address end = start + array->Size(); |
| 530 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); | 513 FindPointersToNewSpaceInRegion(start, end, slot_callback); |
| 531 } else { | 514 } else { |
| 532 Page* page = reinterpret_cast<Page*>(chunk); | 515 Page* page = reinterpret_cast<Page*>(chunk); |
| 533 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); | 516 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); |
| 534 if (owner == heap_->map_space()) { | 517 if (owner == heap_->map_space()) { |
| 535 DCHECK(page->WasSwept()); | 518 DCHECK(page->WasSwept()); |
| 536 HeapObjectIterator iterator(page, NULL); | 519 HeapObjectIterator iterator(page, NULL); |
| 537 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; | 520 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; |
| 538 heap_object = iterator.Next()) { | 521 heap_object = iterator.Next()) { |
| 539 // We skip free space objects. | 522 // We skip free space objects. |
| 540 if (!heap_object->IsFiller()) { | 523 if (!heap_object->IsFiller()) { |
| 541 DCHECK(heap_object->IsMap()); | 524 DCHECK(heap_object->IsMap()); |
| 542 FindPointersToNewSpaceInRegion( | 525 FindPointersToNewSpaceInRegion( |
| 543 heap_object->address() + Map::kPointerFieldsBeginOffset, | 526 heap_object->address() + Map::kPointerFieldsBeginOffset, |
| 544 heap_object->address() + Map::kPointerFieldsEndOffset, | 527 heap_object->address() + Map::kPointerFieldsEndOffset, |
| 545 slot_callback, clear_maps); | 528 slot_callback); |
| 546 } | 529 } |
| 547 } | 530 } |
| 548 } else { | 531 } else { |
| 549 if (!page->SweepingCompleted()) { | 532 if (!page->SweepingCompleted()) { |
| 550 heap_->mark_compact_collector()->SweepInParallel(page, owner); | 533 heap_->mark_compact_collector()->SweepInParallel(page, owner); |
| 551 if (!page->SweepingCompleted()) { | 534 if (!page->SweepingCompleted()) { |
| 552 // We were not able to sweep that page, i.e., a concurrent | 535 // We were not able to sweep that page, i.e., a concurrent |
| 553 // sweeper thread currently owns this page. | 536 // sweeper thread currently owns this page. |
| 554 // TODO(hpayer): This may introduce a huge pause here. We | 537 // TODO(hpayer): This may introduce a huge pause here. We |
| 555 // just care about finish sweeping of the scan on scavenge page. | 538 // just care about finish sweeping of the scan on scavenge page. |
| (...skipping 14 matching lines...) Expand all Loading... |
| 570 LayoutDescriptorHelper helper(heap_object->map()); | 553 LayoutDescriptorHelper helper(heap_object->map()); |
| 571 bool has_only_tagged_fields = helper.all_fields_tagged(); | 554 bool has_only_tagged_fields = helper.all_fields_tagged(); |
| 572 | 555 |
| 573 if (!has_only_tagged_fields) { | 556 if (!has_only_tagged_fields) { |
| 574 for (int offset = start_offset; offset < end_offset;) { | 557 for (int offset = start_offset; offset < end_offset;) { |
| 575 int end_of_region_offset; | 558 int end_of_region_offset; |
| 576 if (helper.IsTagged(offset, end_offset, | 559 if (helper.IsTagged(offset, end_offset, |
| 577 &end_of_region_offset)) { | 560 &end_of_region_offset)) { |
| 578 FindPointersToNewSpaceInRegion( | 561 FindPointersToNewSpaceInRegion( |
| 579 obj_address + offset, | 562 obj_address + offset, |
| 580 obj_address + end_of_region_offset, slot_callback, | 563 obj_address + end_of_region_offset, slot_callback); |
| 581 clear_maps); | |
| 582 } | 564 } |
| 583 offset = end_of_region_offset; | 565 offset = end_of_region_offset; |
| 584 } | 566 } |
| 585 } else { | 567 } else { |
| 586 #endif | 568 #endif |
| 587 Address start_address = obj_address + start_offset; | 569 Address start_address = obj_address + start_offset; |
| 588 Address end_address = obj_address + end_offset; | 570 Address end_address = obj_address + end_offset; |
| 589 // Object has only tagged fields. | 571 // Object has only tagged fields. |
| 590 FindPointersToNewSpaceInRegion(start_address, end_address, | 572 FindPointersToNewSpaceInRegion(start_address, end_address, |
| 591 slot_callback, clear_maps); | 573 slot_callback); |
| 592 #if V8_DOUBLE_FIELDS_UNBOXING | 574 #if V8_DOUBLE_FIELDS_UNBOXING |
| 593 } | 575 } |
| 594 #endif | 576 #endif |
| 595 } | 577 } |
| 596 } | 578 } |
| 597 } | 579 } |
| 598 } | 580 } |
| 599 } | 581 } |
| 600 } | 582 } |
| 601 if (callback_ != NULL) { | 583 if (callback_ != NULL) { |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 655 } | 637 } |
| 656 old_buffer_is_sorted_ = false; | 638 old_buffer_is_sorted_ = false; |
| 657 old_buffer_is_filtered_ = false; | 639 old_buffer_is_filtered_ = false; |
| 658 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); | 640 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); |
| 659 DCHECK(old_top_ <= old_limit_); | 641 DCHECK(old_top_ <= old_limit_); |
| 660 } | 642 } |
| 661 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); | 643 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); |
| 662 } | 644 } |
| 663 } | 645 } |
| 664 } // namespace v8::internal | 646 } // namespace v8::internal |
| OLD | NEW |