Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(455)

Side by Side Diff: src/heap/store-buffer.cc

Issue 991853002: Remove lazy sweeping of new space and corresponding complicated pointer updating logic. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« src/heap/mark-compact.cc ('K') | « src/heap/store-buffer.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <algorithm> 5 #include <algorithm>
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #include "src/base/atomicops.h" 9 #include "src/base/atomicops.h"
10 #include "src/counters.h" 10 #include "src/counters.h"
(...skipping 361 matching lines...) Expand 10 before | Expand all | Expand 10 after
372 during_gc_ = false; 372 during_gc_ = false;
373 #ifdef VERIFY_HEAP 373 #ifdef VERIFY_HEAP
374 if (FLAG_verify_heap) { 374 if (FLAG_verify_heap) {
375 Verify(); 375 Verify();
376 } 376 }
377 #endif 377 #endif
378 } 378 }
379 379
380 380
381 void StoreBuffer::ProcessOldToNewSlot(Address slot_address, 381 void StoreBuffer::ProcessOldToNewSlot(Address slot_address,
382 ObjectSlotCallback slot_callback, 382 ObjectSlotCallback slot_callback) {
383 bool clear_maps) {
384 Object** slot = reinterpret_cast<Object**>(slot_address); 383 Object** slot = reinterpret_cast<Object**>(slot_address);
385 Object* object = reinterpret_cast<Object*>( 384 Object* object = reinterpret_cast<Object*>(
386 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 385 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
387 386
388 // If the object is not in from space, it must be a duplicate store buffer 387 // If the object is not in from space, it must be a duplicate store buffer
389 // entry and the slot was already updated. 388 // entry and the slot was already updated.
390 if (heap_->InFromSpace(object)) { 389 if (heap_->InFromSpace(object)) {
391 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object); 390 HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
392 DCHECK(heap_object->IsHeapObject()); 391 DCHECK(heap_object->IsHeapObject());
393 // The new space object was not promoted if it still contains a map
394 // pointer. Clear the map field now lazily (during full GC).
395 if (clear_maps) ClearDeadObject(heap_object);
ulan 2015/03/10 10:37:32 We can remove the ClearDeadObject function now.
Hannes Payer (out of office) 2015/03/10 13:07:00 Done.
396 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object); 392 slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
397 object = reinterpret_cast<Object*>( 393 object = reinterpret_cast<Object*>(
398 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 394 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
399 // If the object was in from space before and is after executing the 395 // If the object was in from space before and is after executing the
400 // callback in to space, the object is still live. 396 // callback in to space, the object is still live.
401 // Unfortunately, we do not know about the slot. It could be in a 397 // Unfortunately, we do not know about the slot. It could be in a
402 // just freed free space object. 398 // just freed free space object.
403 if (heap_->InToSpace(object)) { 399 if (heap_->InToSpace(object)) {
404 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot)); 400 EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
405 } 401 }
406 } 402 }
407 } 403 }
408 404
409 405
410 void StoreBuffer::FindPointersToNewSpaceInRegion( 406 void StoreBuffer::FindPointersToNewSpaceInRegion(
411 Address start, Address end, ObjectSlotCallback slot_callback, 407 Address start, Address end, ObjectSlotCallback slot_callback) {
412 bool clear_maps) {
413 for (Address slot_address = start; slot_address < end; 408 for (Address slot_address = start; slot_address < end;
414 slot_address += kPointerSize) { 409 slot_address += kPointerSize) {
415 ProcessOldToNewSlot(slot_address, slot_callback, clear_maps); 410 ProcessOldToNewSlot(slot_address, slot_callback);
416 } 411 }
417 } 412 }
418 413
419 414
420 void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback, 415 void StoreBuffer::IteratePointersInStoreBuffer(
421 bool clear_maps) { 416 ObjectSlotCallback slot_callback) {
422 Address* limit = old_top_; 417 Address* limit = old_top_;
423 old_top_ = old_start_; 418 old_top_ = old_start_;
424 { 419 {
425 DontMoveStoreBufferEntriesScope scope(this); 420 DontMoveStoreBufferEntriesScope scope(this);
426 for (Address* current = old_start_; current < limit; current++) { 421 for (Address* current = old_start_; current < limit; current++) {
427 #ifdef DEBUG 422 #ifdef DEBUG
428 Address* saved_top = old_top_; 423 Address* saved_top = old_top_;
429 #endif 424 #endif
430 ProcessOldToNewSlot(*current, slot_callback, clear_maps); 425 ProcessOldToNewSlot(*current, slot_callback);
431 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top); 426 DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
432 } 427 }
433 } 428 }
434 } 429 }
435 430
436 431
437 void StoreBuffer::ClearInvalidStoreBufferEntries() { 432 void StoreBuffer::ClearInvalidStoreBufferEntries() {
438 Compact(); 433 Compact();
439 Address* new_top = old_start_; 434 Address* new_top = old_start_;
440 for (Address* current = old_start_; current < old_top_; current++) { 435 for (Address* current = old_start_; current < old_top_; current++) {
(...skipping 21 matching lines...) Expand all
462 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot))); 457 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
463 CHECK(heap_->InNewSpace(object)); 458 CHECK(heap_->InNewSpace(object));
464 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject( 459 heap_->mark_compact_collector()->VerifyIsSlotInLiveObject(
465 reinterpret_cast<HeapObject**>(slot), 460 reinterpret_cast<HeapObject**>(slot),
466 reinterpret_cast<HeapObject*>(object)); 461 reinterpret_cast<HeapObject*>(object));
467 } 462 }
468 } 463 }
469 464
470 465
471 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) { 466 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
472 IteratePointersToNewSpace(slot_callback, false);
473 }
474
475
476 void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
477 ObjectSlotCallback slot_callback) {
478 IteratePointersToNewSpace(slot_callback, true);
479 }
480
481
482 void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
483 bool clear_maps) {
484 // We do not sort or remove duplicated entries from the store buffer because 467 // We do not sort or remove duplicated entries from the store buffer because
485 // we expect that callback will rebuild the store buffer thus removing 468 // we expect that callback will rebuild the store buffer thus removing
486 // all duplicates and pointers to old space. 469 // all duplicates and pointers to old space.
487 bool some_pages_to_scan = PrepareForIteration(); 470 bool some_pages_to_scan = PrepareForIteration();
488 471
489 // TODO(gc): we want to skip slots on evacuation candidates 472 // TODO(gc): we want to skip slots on evacuation candidates
490 // but we can't simply figure that out from slot address 473 // but we can't simply figure that out from slot address
491 // because slot can belong to a large object. 474 // because slot can belong to a large object.
492 IteratePointersInStoreBuffer(slot_callback, clear_maps); 475 IteratePointersInStoreBuffer(slot_callback);
493 476
494 // We are done scanning all the pointers that were in the store buffer, but 477 // We are done scanning all the pointers that were in the store buffer, but
495 // there may be some pages marked scan_on_scavenge that have pointers to new 478 // there may be some pages marked scan_on_scavenge that have pointers to new
496 // space that are not in the store buffer. We must scan them now. As we 479 // space that are not in the store buffer. We must scan them now. As we
497 // scan, the surviving pointers to new space will be added to the store 480 // scan, the surviving pointers to new space will be added to the store
498 // buffer. If there are still a lot of pointers to new space then we will 481 // buffer. If there are still a lot of pointers to new space then we will
499 // keep the scan_on_scavenge flag on the page and discard the pointers that 482 // keep the scan_on_scavenge flag on the page and discard the pointers that
500 // were added to the store buffer. If there are not many pointers to new 483 // were added to the store buffer. If there are not many pointers to new
501 // space left on the page we will keep the pointers in the store buffer and 484 // space left on the page we will keep the pointers in the store buffer and
502 // remove the flag from the page. 485 // remove the flag from the page.
503 if (some_pages_to_scan) { 486 if (some_pages_to_scan) {
504 if (callback_ != NULL) { 487 if (callback_ != NULL) {
505 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent); 488 (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
506 } 489 }
507 PointerChunkIterator it(heap_); 490 PointerChunkIterator it(heap_);
508 MemoryChunk* chunk; 491 MemoryChunk* chunk;
509 while ((chunk = it.next()) != NULL) { 492 while ((chunk = it.next()) != NULL) {
510 if (chunk->scan_on_scavenge()) { 493 if (chunk->scan_on_scavenge()) {
511 chunk->set_scan_on_scavenge(false); 494 chunk->set_scan_on_scavenge(false);
512 if (callback_ != NULL) { 495 if (callback_ != NULL) {
513 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent); 496 (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
514 } 497 }
515 if (chunk->owner() == heap_->lo_space()) { 498 if (chunk->owner() == heap_->lo_space()) {
516 LargePage* large_page = reinterpret_cast<LargePage*>(chunk); 499 LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
517 HeapObject* array = large_page->GetObject(); 500 HeapObject* array = large_page->GetObject();
518 DCHECK(array->IsFixedArray()); 501 DCHECK(array->IsFixedArray());
519 Address start = array->address(); 502 Address start = array->address();
520 Address end = start + array->Size(); 503 Address end = start + array->Size();
521 FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps); 504 FindPointersToNewSpaceInRegion(start, end, slot_callback);
522 } else { 505 } else {
523 Page* page = reinterpret_cast<Page*>(chunk); 506 Page* page = reinterpret_cast<Page*>(chunk);
524 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 507 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
525 if (owner == heap_->map_space()) { 508 if (owner == heap_->map_space()) {
526 DCHECK(page->WasSwept()); 509 DCHECK(page->WasSwept());
527 HeapObjectIterator iterator(page, NULL); 510 HeapObjectIterator iterator(page, NULL);
528 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL; 511 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
529 heap_object = iterator.Next()) { 512 heap_object = iterator.Next()) {
530 // We skip free space objects. 513 // We skip free space objects.
531 if (!heap_object->IsFiller()) { 514 if (!heap_object->IsFiller()) {
532 DCHECK(heap_object->IsMap()); 515 DCHECK(heap_object->IsMap());
533 FindPointersToNewSpaceInRegion( 516 FindPointersToNewSpaceInRegion(
534 heap_object->address() + Map::kPointerFieldsBeginOffset, 517 heap_object->address() + Map::kPointerFieldsBeginOffset,
535 heap_object->address() + Map::kPointerFieldsEndOffset, 518 heap_object->address() + Map::kPointerFieldsEndOffset,
536 slot_callback, clear_maps); 519 slot_callback);
537 } 520 }
538 } 521 }
539 } else { 522 } else {
540 if (!page->SweepingCompleted()) { 523 if (!page->SweepingCompleted()) {
541 heap_->mark_compact_collector()->SweepInParallel(page, owner); 524 heap_->mark_compact_collector()->SweepInParallel(page, owner);
542 if (!page->SweepingCompleted()) { 525 if (!page->SweepingCompleted()) {
543 // We were not able to sweep that page, i.e., a concurrent 526 // We were not able to sweep that page, i.e., a concurrent
544 // sweeper thread currently owns this page. 527 // sweeper thread currently owns this page.
545 // TODO(hpayer): This may introduce a huge pause here. We 528 // TODO(hpayer): This may introduce a huge pause here. We
546 // just care about finish sweeping of the scan on scavenge page. 529 // just care about finish sweeping of the scan on scavenge page.
(...skipping 14 matching lines...) Expand all
561 LayoutDescriptorHelper helper(heap_object->map()); 544 LayoutDescriptorHelper helper(heap_object->map());
562 bool has_only_tagged_fields = helper.all_fields_tagged(); 545 bool has_only_tagged_fields = helper.all_fields_tagged();
563 546
564 if (!has_only_tagged_fields) { 547 if (!has_only_tagged_fields) {
565 for (int offset = start_offset; offset < end_offset;) { 548 for (int offset = start_offset; offset < end_offset;) {
566 int end_of_region_offset; 549 int end_of_region_offset;
567 if (helper.IsTagged(offset, end_offset, 550 if (helper.IsTagged(offset, end_offset,
568 &end_of_region_offset)) { 551 &end_of_region_offset)) {
569 FindPointersToNewSpaceInRegion( 552 FindPointersToNewSpaceInRegion(
570 obj_address + offset, 553 obj_address + offset,
571 obj_address + end_of_region_offset, slot_callback, 554 obj_address + end_of_region_offset, slot_callback);
572 clear_maps);
573 } 555 }
574 offset = end_of_region_offset; 556 offset = end_of_region_offset;
575 } 557 }
576 } else { 558 } else {
577 #endif 559 #endif
578 Address start_address = obj_address + start_offset; 560 Address start_address = obj_address + start_offset;
579 Address end_address = obj_address + end_offset; 561 Address end_address = obj_address + end_offset;
580 // Object has only tagged fields. 562 // Object has only tagged fields.
581 FindPointersToNewSpaceInRegion(start_address, end_address, 563 FindPointersToNewSpaceInRegion(start_address, end_address,
582 slot_callback, clear_maps); 564 slot_callback);
583 #if V8_DOUBLE_FIELDS_UNBOXING 565 #if V8_DOUBLE_FIELDS_UNBOXING
584 } 566 }
585 #endif 567 #endif
586 } 568 }
587 } 569 }
588 } 570 }
589 } 571 }
590 } 572 }
591 } 573 }
592 if (callback_ != NULL) { 574 if (callback_ != NULL) {
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
646 } 628 }
647 old_buffer_is_sorted_ = false; 629 old_buffer_is_sorted_ = false;
648 old_buffer_is_filtered_ = false; 630 old_buffer_is_filtered_ = false;
649 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2); 631 *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
650 DCHECK(old_top_ <= old_limit_); 632 DCHECK(old_top_ <= old_limit_);
651 } 633 }
652 heap_->isolate()->counters()->store_buffer_compactions()->Increment(); 634 heap_->isolate()->counters()->store_buffer_compactions()->Increment();
653 } 635 }
654 } 636 }
655 } // namespace v8::internal 637 } // namespace v8::internal
OLDNEW
« src/heap/mark-compact.cc ('K') | « src/heap/store-buffer.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698