Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(594)

Side by Side Diff: src/heap.cc

Issue 8004: - Cleaned up Heap::CopyObject. Inlined fast case. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 12 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/heap.h ('k') | src/mark-compact.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 Object* obj = code_space_->FindObject(a); 438 Object* obj = code_space_->FindObject(a);
439 if (obj->IsFailure()) { 439 if (obj->IsFailure()) {
440 obj = lo_space_->FindObject(a); 440 obj = lo_space_->FindObject(a);
441 } 441 }
442 ASSERT(!obj->IsFailure()); 442 ASSERT(!obj->IsFailure());
443 return obj; 443 return obj;
444 } 444 }
445 445
446 446
447 // Helper class for copying HeapObjects 447 // Helper class for copying HeapObjects
448 class CopyVisitor: public ObjectVisitor { 448 class ScavengeVisitor: public ObjectVisitor {
449 public: 449 public:
450 450
451 void VisitPointer(Object** p) { 451 void VisitPointer(Object** p) { ScavengePointer(p); }
452 CopyObject(p);
453 }
454 452
455 void VisitPointers(Object** start, Object** end) { 453 void VisitPointers(Object** start, Object** end) {
456 // Copy all HeapObject pointers in [start, end) 454 // Copy all HeapObject pointers in [start, end)
457 for (Object** p = start; p < end; p++) CopyObject(p); 455 for (Object** p = start; p < end; p++) ScavengePointer(p);
458 } 456 }
459 457
460 private: 458 private:
461 void CopyObject(Object** p) { 459 void ScavengePointer(Object** p) {
462 if (!Heap::InNewSpace(*p)) return; 460 Object* object = *p;
463 Heap::CopyObject(reinterpret_cast<HeapObject**>(p)); 461 if (!Heap::InNewSpace(object)) return;
462 Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
463 reinterpret_cast<HeapObject*>(object));
464 } 464 }
465 }; 465 };
466 466
467 467
468 // Shared state read by the scavenge collector and set by CopyObject. 468 // Shared state read by the scavenge collector and set by ScavengeObject.
469 static Address promoted_top = NULL; 469 static Address promoted_top = NULL;
470 470
471 471
472 #ifdef DEBUG 472 #ifdef DEBUG
473 // Visitor class to verify pointers in code or data space do not point into 473 // Visitor class to verify pointers in code or data space do not point into
474 // new space. 474 // new space.
475 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor { 475 class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
476 public: 476 public:
477 void VisitPointers(Object** start, Object**end) { 477 void VisitPointers(Object** start, Object**end) {
478 for (Object** current = start; current < end; current++) { 478 for (Object** current = start; current < end; current++) {
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
535 // There is guaranteed to be enough room at the top of the to space for the 535 // There is guaranteed to be enough room at the top of the to space for the
536 // addresses of promoted objects: every object promoted frees up its size in 536 // addresses of promoted objects: every object promoted frees up its size in
537 // bytes from the top of the new space, and objects are at least one pointer 537 // bytes from the top of the new space, and objects are at least one pointer
538 // in size. Using the new space to record promoted addresses makes the 538 // in size. Using the new space to record promoted addresses makes the
539 // scavenge collector agnostic to the allocation strategy (eg, linear or 539 // scavenge collector agnostic to the allocation strategy (eg, linear or
540 // free-list) used in old space. 540 // free-list) used in old space.
541 Address new_mark = new_space_.ToSpaceLow(); 541 Address new_mark = new_space_.ToSpaceLow();
542 Address promoted_mark = new_space_.ToSpaceHigh(); 542 Address promoted_mark = new_space_.ToSpaceHigh();
543 promoted_top = new_space_.ToSpaceHigh(); 543 promoted_top = new_space_.ToSpaceHigh();
544 544
545 CopyVisitor copy_visitor; 545 ScavengeVisitor scavenge_visitor;
546 // Copy roots. 546 // Copy roots.
547 IterateRoots(&copy_visitor); 547 IterateRoots(&scavenge_visitor);
548 548
549 // Copy objects reachable from the old generation. By definition, there 549 // Copy objects reachable from the old generation. By definition, there
550 // are no intergenerational pointers in code or data spaces. 550 // are no intergenerational pointers in code or data spaces.
551 IterateRSet(old_pointer_space_, &CopyObject); 551 IterateRSet(old_pointer_space_, &ScavengePointer);
552 IterateRSet(map_space_, &CopyObject); 552 IterateRSet(map_space_, &ScavengePointer);
553 lo_space_->IterateRSet(&CopyObject); 553 lo_space_->IterateRSet(&ScavengePointer);
554 554
555 bool has_processed_weak_pointers = false; 555 bool has_processed_weak_pointers = false;
556 556
557 while (true) { 557 while (true) {
558 ASSERT(new_mark <= new_space_.top()); 558 ASSERT(new_mark <= new_space_.top());
559 ASSERT(promoted_mark >= promoted_top); 559 ASSERT(promoted_mark >= promoted_top);
560 560
561 // Copy objects reachable from newly copied objects. 561 // Copy objects reachable from newly copied objects.
562 while (new_mark < new_space_.top() || promoted_mark > promoted_top) { 562 while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
563 // Sweep newly copied objects in the to space. The allocation pointer 563 // Sweep newly copied objects in the to space. The allocation pointer
564 // can change during sweeping. 564 // can change during sweeping.
565 Address previous_top = new_space_.top(); 565 Address previous_top = new_space_.top();
566 SemiSpaceIterator new_it(new_space(), new_mark); 566 SemiSpaceIterator new_it(new_space(), new_mark);
567 while (new_it.has_next()) { 567 while (new_it.has_next()) {
568 new_it.next()->Iterate(&copy_visitor); 568 new_it.next()->Iterate(&scavenge_visitor);
569 } 569 }
570 new_mark = previous_top; 570 new_mark = previous_top;
571 571
572 // Sweep newly copied objects in the old space. The promotion 'top' 572 // Sweep newly copied objects in the old space. The promotion 'top'
573 // pointer could change during sweeping. 573 // pointer could change during sweeping.
574 previous_top = promoted_top; 574 previous_top = promoted_top;
575 for (Address current = promoted_mark - kPointerSize; 575 for (Address current = promoted_mark - kPointerSize;
576 current >= previous_top; 576 current >= previous_top;
577 current -= kPointerSize) { 577 current -= kPointerSize) {
578 HeapObject* object = HeapObject::cast(Memory::Object_at(current)); 578 HeapObject* object = HeapObject::cast(Memory::Object_at(current));
579 object->Iterate(&copy_visitor); 579 object->Iterate(&scavenge_visitor);
580 UpdateRSet(object); 580 UpdateRSet(object);
581 } 581 }
582 promoted_mark = previous_top; 582 promoted_mark = previous_top;
583 } 583 }
584 584
585 if (has_processed_weak_pointers) break; // We are done. 585 if (has_processed_weak_pointers) break; // We are done.
586 // Copy objects reachable from weak pointers. 586 // Copy objects reachable from weak pointers.
587 GlobalHandles::IterateWeakRoots(&copy_visitor); 587 GlobalHandles::IterateWeakRoots(&scavenge_visitor);
588 has_processed_weak_pointers = true; 588 has_processed_weak_pointers = true;
589 } 589 }
590 590
591 // Set age mark. 591 // Set age mark.
592 new_space_.set_age_mark(new_mark); 592 new_space_.set_age_mark(new_mark);
593 593
594 LOG(ResourceEvent("scavenge", "end")); 594 LOG(ResourceEvent("scavenge", "end"));
595 595
596 gc_state_ = NOT_IN_GC; 596 gc_state_ = NOT_IN_GC;
597 } 597 }
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
751 751
752 // Update NewSpace stats if necessary. 752 // Update NewSpace stats if necessary.
753 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) 753 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
754 RecordCopiedObject(target); 754 RecordCopiedObject(target);
755 #endif 755 #endif
756 756
757 return target; 757 return target;
758 } 758 }
759 759
760 760
761 void Heap::CopyObject(HeapObject** p) { 761 // Inlined function.
762 ASSERT(InFromSpace(*p)); 762 void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
763 763 ASSERT(InFromSpace(object));
764 HeapObject* object = *p;
765 764
766 // We use the first word (where the map pointer usually is) of a heap 765 // We use the first word (where the map pointer usually is) of a heap
767 // object to record the forwarding pointer. A forwarding pointer can 766 // object to record the forwarding pointer. A forwarding pointer can
768 // point to an old space, the code space, or the to space of the new 767 // point to an old space, the code space, or the to space of the new
769 // generation. 768 // generation.
770 MapWord first_word = object->map_word(); 769 MapWord first_word = object->map_word();
771 770
772 // If the first word is a forwarding address, the object has already been 771 // If the first word is a forwarding address, the object has already been
773 // copied. 772 // copied.
774 if (first_word.IsForwardingAddress()) { 773 if (first_word.IsForwardingAddress()) {
775 *p = first_word.ToForwardingAddress(); 774 *p = first_word.ToForwardingAddress();
776 return; 775 return;
777 } 776 }
778 777
779 // Optimization: Bypass ConsString objects where the right-hand side is 778 // Call the slow part of scavenge object.
780 // Heap::empty_string(). We do not use object->IsConsString because we 779 return ScavengeObjectSlow(p, object);
781 // already know that object has the heap object tag. 780 }
782 InstanceType type = first_word.ToMap()->instance_type(); 781
783 if (type < FIRST_NONSTRING_TYPE && 782 static inline bool IsShortcutCandidate(HeapObject* object, Map* map) {
784 String::cast(object)->representation_tag() == kConsStringTag && 783 // A ConString object with Heap::empty_string() as the right side
785 ConsString::cast(object)->second() == Heap::empty_string()) { 784 // is a candidate for being shortcut by the scavenger.
785 ASSERT(object->map() == map);
786 return (map->instance_type() < FIRST_NONSTRING_TYPE) &&
787 (String::cast(object)->map_representation_tag(map) == kConsStringTag) &&
788 (ConsString::cast(object)->second() == Heap::empty_string());
789 }
790
791
792 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
793 ASSERT(InFromSpace(object));
794 MapWord first_word = object->map_word();
795 ASSERT(!first_word.IsForwardingAddress());
796
797 // Optimization: Bypass flattened ConsString objects.
798 if (IsShortcutCandidate(object, first_word.ToMap())) {
786 object = HeapObject::cast(ConsString::cast(object)->first()); 799 object = HeapObject::cast(ConsString::cast(object)->first());
787 *p = object; 800 *p = object;
788 // After patching *p we have to repeat the checks that object is in the 801 // After patching *p we have to repeat the checks that object is in the
789 // active semispace of the young generation and not already copied. 802 // active semispace of the young generation and not already copied.
790 if (!InNewSpace(object)) return; 803 if (!InNewSpace(object)) return;
791 first_word = object->map_word(); 804 first_word = object->map_word();
792 if (first_word.IsForwardingAddress()) { 805 if (first_word.IsForwardingAddress()) {
793 *p = first_word.ToForwardingAddress(); 806 *p = first_word.ToForwardingAddress();
794 return; 807 return;
795 } 808 }
796 type = first_word.ToMap()->instance_type();
797 } 809 }
798 810
799 int object_size = object->SizeFromMap(first_word.ToMap()); 811 int object_size = object->SizeFromMap(first_word.ToMap());
800 Object* result;
801 // If the object should be promoted, we try to copy it to old space. 812 // If the object should be promoted, we try to copy it to old space.
802 if (ShouldBePromoted(object->address(), object_size)) { 813 if (ShouldBePromoted(object->address(), object_size)) {
803 OldSpace* target_space = Heap::TargetSpace(object); 814 OldSpace* target_space = Heap::TargetSpace(object);
804 ASSERT(target_space == Heap::old_pointer_space_ || 815 ASSERT(target_space == Heap::old_pointer_space_ ||
805 target_space == Heap::old_data_space_); 816 target_space == Heap::old_data_space_);
806 result = target_space->AllocateRaw(object_size); 817 Object* result = target_space->AllocateRaw(object_size);
807
808 if (!result->IsFailure()) { 818 if (!result->IsFailure()) {
809 *p = MigrateObject(object, HeapObject::cast(result), object_size); 819 *p = MigrateObject(object, HeapObject::cast(result), object_size);
810 if (target_space == Heap::old_pointer_space_) { 820 if (target_space == Heap::old_pointer_space_) {
811 // Record the object's address at the top of the to space, to allow 821 // Record the object's address at the top of the to space, to allow
812 // it to be swept by the scavenger. 822 // it to be swept by the scavenger.
813 promoted_top -= kPointerSize; 823 promoted_top -= kPointerSize;
814 Memory::Object_at(promoted_top) = *p; 824 Memory::Object_at(promoted_top) = *p;
815 } else { 825 } else {
816 #ifdef DEBUG 826 #ifdef DEBUG
817 // Objects promoted to the data space should not have pointers to 827 // Objects promoted to the data space should not have pointers to
818 // new space. 828 // new space.
819 VerifyNonPointerSpacePointersVisitor v; 829 VerifyNonPointerSpacePointersVisitor v;
820 (*p)->Iterate(&v); 830 (*p)->Iterate(&v);
821 #endif 831 #endif
822 } 832 }
823 return; 833 return;
824 } 834 }
825 } 835 }
826 836
827 // The object should remain in new space or the old space allocation failed. 837 // The object should remain in new space or the old space allocation failed.
828 result = new_space_.AllocateRaw(object_size); 838 Object* result = new_space_.AllocateRaw(object_size);
829 // Failed allocation at this point is utterly unexpected. 839 // Failed allocation at this point is utterly unexpected.
830 ASSERT(!result->IsFailure()); 840 ASSERT(!result->IsFailure());
831 *p = MigrateObject(object, HeapObject::cast(result), object_size); 841 *p = MigrateObject(object, HeapObject::cast(result), object_size);
832 } 842 }
833 843
834 844
845 void Heap::ScavengePointer(HeapObject** p) {
846 ScavengeObject(p, *p);
847 }
848
849
835 Object* Heap::AllocatePartialMap(InstanceType instance_type, 850 Object* Heap::AllocatePartialMap(InstanceType instance_type,
836 int instance_size) { 851 int instance_size) {
837 Object* result = AllocateRawMap(Map::kSize); 852 Object* result = AllocateRawMap(Map::kSize);
838 if (result->IsFailure()) return result; 853 if (result->IsFailure()) return result;
839 854
840 // Map::cast cannot be used due to uninitialized map field. 855 // Map::cast cannot be used due to uninitialized map field.
841 reinterpret_cast<Map*>(result)->set_map(meta_map()); 856 reinterpret_cast<Map*>(result)->set_map(meta_map());
842 reinterpret_cast<Map*>(result)->set_instance_type(instance_type); 857 reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
843 reinterpret_cast<Map*>(result)->set_instance_size(instance_size); 858 reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
844 reinterpret_cast<Map*>(result)->set_inobject_properties(0); 859 reinterpret_cast<Map*>(result)->set_inobject_properties(0);
(...skipping 2264 matching lines...) Expand 10 before | Expand all | Expand 10 after
3109 #ifdef DEBUG 3124 #ifdef DEBUG
3110 bool Heap::GarbageCollectionGreedyCheck() { 3125 bool Heap::GarbageCollectionGreedyCheck() {
3111 ASSERT(FLAG_gc_greedy); 3126 ASSERT(FLAG_gc_greedy);
3112 if (Bootstrapper::IsActive()) return true; 3127 if (Bootstrapper::IsActive()) return true;
3113 if (disallow_allocation_failure()) return true; 3128 if (disallow_allocation_failure()) return true;
3114 return CollectGarbage(0, NEW_SPACE); 3129 return CollectGarbage(0, NEW_SPACE);
3115 } 3130 }
3116 #endif 3131 #endif
3117 3132
3118 } } // namespace v8::internal 3133 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/heap.h ('k') | src/mark-compact.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698