Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Side by Side Diff: src/mark-compact.cc

Issue 6250076: Start using store buffers. Handle store buffer overflow situation.... (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
94 ASSERT(state_ == PREPARE_GC); 94 ASSERT(state_ == PREPARE_GC);
95 95
96 // Prepare has selected whether to compact the old generation or not. 96 // Prepare has selected whether to compact the old generation or not.
97 // Tell the tracer. 97 // Tell the tracer.
98 if (IsCompacting()) tracer_->set_is_compacting(); 98 if (IsCompacting()) tracer_->set_is_compacting();
99 99
100 MarkLiveObjects(); 100 MarkLiveObjects();
101 101
102 if (FLAG_collect_maps) ClearNonLiveTransitions(); 102 if (FLAG_collect_maps) ClearNonLiveTransitions();
103 103
104 SweepSpaces();
104 SweepLargeObjectSpace(); 105 SweepLargeObjectSpace();
Vyacheslav Egorov (Chromium) 2011/02/02 13:15:47 You can now move SweepLargeObjectSpace into SweepS
Erik Corry 2011/02/03 13:21:17 Done.
105 106
106 SweepSpaces();
107 PcToCodeCache::FlushPcToCodeCache(); 107 PcToCodeCache::FlushPcToCodeCache();
108 108
109 Finish(); 109 Finish();
110 110
111 // Check that swept all marked objects and 111 // Check that swept all marked objects and
112 // null out the GC tracer. 112 // null out the GC tracer.
113 // TODO(gc) does not work with conservative sweeping. 113 // TODO(gc) does not work with conservative sweeping.
114 // ASSERT(tracer_->marked_count() == 0); 114 // ASSERT(tracer_->marked_count() == 0);
115 tracer_ = NULL; 115 tracer_ = NULL;
116 } 116 }
(...skipping 292 matching lines...) Expand 10 before | Expand all | Expand 10 after
409 InstanceType type = object->map()->instance_type(); 409 InstanceType type = object->map()->instance_type();
410 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object; 410 if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
411 411
412 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second(); 412 Object* second = reinterpret_cast<ConsString*>(object)->unchecked_second();
413 if (second != Heap::raw_unchecked_empty_string()) { 413 if (second != Heap::raw_unchecked_empty_string()) {
414 return object; 414 return object;
415 } 415 }
416 416
417 // Since we don't have the object's start, it is impossible to update the 417 // Since we don't have the object's start, it is impossible to update the
418 // page dirty marks. Therefore, we only replace the string with its left 418 // page dirty marks. Therefore, we only replace the string with its left
419 // substring when page dirty marks do not change. 419 // substring when page dirty marks do not change. TODO(gc): Seems like we
Vyacheslav Egorov (Chromium) 2011/02/02 13:15:47 move todo to the new line
Erik Corry 2011/02/03 13:21:17 Done.
420 // could relax this restriction with store buffers.
420 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first(); 421 Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
421 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object; 422 if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
422 423
423 *p = first; 424 *p = first;
424 return HeapObject::cast(first); 425 return HeapObject::cast(first);
425 } 426 }
426 427
427 428
428 class StaticMarkingVisitor : public StaticVisitorBase { 429 class StaticMarkingVisitor : public StaticVisitorBase {
429 public: 430 public:
(...skipping 993 matching lines...) Expand 10 before | Expand all | Expand 10 after
1423 live_lo_objects_size_ += obj->Size(); 1424 live_lo_objects_size_ += obj->Size();
1424 } else { 1425 } else {
1425 UNREACHABLE(); 1426 UNREACHABLE();
1426 } 1427 }
1427 } 1428 }
1428 #endif // DEBUG 1429 #endif // DEBUG
1429 1430
1430 1431
1431 void MarkCompactCollector::SweepLargeObjectSpace() { 1432 void MarkCompactCollector::SweepLargeObjectSpace() {
1432 #ifdef DEBUG 1433 #ifdef DEBUG
1433 ASSERT(state_ == MARK_LIVE_OBJECTS); 1434 ASSERT(state_ == SWEEP_SPACES);
1434 state_ =
1435 compacting_collection_ ? ENCODE_FORWARDING_ADDRESSES : SWEEP_SPACES;
1436 #endif 1435 #endif
1437 // Deallocate unmarked objects and clear marked bits for marked objects. 1436 // Deallocate unmarked objects and clear marked bits for marked objects.
1438 Heap::lo_space()->FreeUnmarkedObjects(); 1437 Heap::lo_space()->FreeUnmarkedObjects();
1439 } 1438 }
1440 1439
1441 1440
1442 // Safe to use during marking phase only. 1441 // Safe to use during marking phase only.
1443 bool MarkCompactCollector::SafeIsMap(HeapObject* object) { 1442 bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
1444 return object->map()->instance_type() == MAP_TYPE; 1443 return object->map()->instance_type() == MAP_TYPE;
1445 } 1444 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1502 *HeapObject::RawField(current, Map::kPrototypeOffset) = 1501 *HeapObject::RawField(current, Map::kPrototypeOffset) =
1503 real_prototype; 1502 real_prototype;
1504 current = reinterpret_cast<Map*>(next); 1503 current = reinterpret_cast<Map*>(next);
1505 } 1504 }
1506 } 1505 }
1507 } 1506 }
1508 1507
1509 1508
1510 // We scavange new space simultaneously with sweeping. This is done in two 1509 // We scavange new space simultaneously with sweeping. This is done in two
1511 // passes. 1510 // passes.
1511 //
1512 // The first pass migrates all alive objects from one semispace to another or 1512 // The first pass migrates all alive objects from one semispace to another or
1513 // promotes them to old space. Forwading address is written directly into 1513 // promotes them to old space. Forwarding address is written directly into
1514 // first word of object without any encoding. If object is dead we are writing 1514 // first word of object without any encoding. If object is dead we write
1515 // NULL as a forwarding address. 1515 // NULL as a forwarding address.
1516 // The second pass updates pointers to new space in all spaces. It is possible 1516 //
1517 // to encounter pointers to dead objects during traversal of dirty regions we 1517 // The second pass updates pointers to new space in all spaces. It is possible
1518 // should clear them to avoid encountering them during next dirty regions 1518 // to encounter pointers to dead new space objects during traversal of pointers
1519 // iteration. 1519 // to new space. We should clear them to avoid encountering them during next
1520 // pointer iteration. This is an issue if the store buffer overflows and we
1521 // have to scan the entire old space, including dead objects, looking for
1522 // pointers to new space.
1520 static void MigrateObject(Address dst, 1523 static void MigrateObject(Address dst,
1521 Address src, 1524 Address src,
1522 int size, 1525 int size,
1523 bool to_old_space) { 1526 bool to_old_space) {
1524 if (to_old_space) { 1527 if (to_old_space) {
1525 Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size); 1528 Heap::CopyBlockToOldSpaceAndUpdateWriteBarrier(dst, src, size);
1526 } else { 1529 } else {
1527 Heap::CopyBlock(dst, src, size); 1530 Heap::CopyBlock(dst, src, size);
1528 } 1531 }
1529
1530 Memory::Address_at(src) = dst; 1532 Memory::Address_at(src) = dst;
1531 } 1533 }
1532 1534
1533 1535
1534 class StaticPointersToNewGenUpdatingVisitor : public 1536 class StaticPointersToNewGenUpdatingVisitor : public
1535 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> { 1537 StaticNewSpaceVisitor<StaticPointersToNewGenUpdatingVisitor> {
1536 public: 1538 public:
1537 static inline void VisitPointer(Object** p) { 1539 static inline void VisitPointer(Object** p) {
1538 if (!(*p)->IsHeapObject()) return; 1540 if (!(*p)->IsHeapObject()) return;
1539 1541
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
1574 rinfo->IsPatchedReturnSequence()) || 1576 rinfo->IsPatchedReturnSequence()) ||
1575 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) && 1577 (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1576 rinfo->IsPatchedDebugBreakSlotSequence())); 1578 rinfo->IsPatchedDebugBreakSlotSequence()));
1577 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address()); 1579 Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
1578 VisitPointer(&target); 1580 VisitPointer(&target);
1579 rinfo->set_call_address(Code::cast(target)->instruction_start()); 1581 rinfo->set_call_address(Code::cast(target)->instruction_start());
1580 } 1582 }
1581 }; 1583 };
1582 1584
1583 1585
1584 // Visitor for updating pointers from live objects in old spaces to new space. 1586 static void UpdatePointerToNewGen(HeapObject** p, HeapObject* object) {
1585 // It can encounter pointers to dead objects in new space when traversing map 1587 ASSERT(Heap::InFromSpace(object));
1586 // space (see comment for MigrateObject). 1588 ASSERT(*p == object);
1587 static void UpdatePointerToNewGen(HeapObject** p) {
1588 if (!(*p)->IsHeapObject()) return;
1589 1589
1590 Address old_addr = (*p)->address(); 1590 Address old_addr = object->address();
1591 ASSERT(Heap::InFromSpace(*p));
1592 1591
1593 Address new_addr = Memory::Address_at(old_addr); 1592 Address new_addr = Memory::Address_at(old_addr);
1594 1593
1595 if (new_addr == NULL) { 1594 // The new space sweep will overwrite the map word of dead objects
1596 // We encountered pointer to a dead object. Clear it so we will 1595 // with NULL. In this case we do not need to transfer this entry to
1597 // not visit it again during next iteration of dirty regions. 1596 // the store buffer which we are rebuilding.
1598 *p = NULL; 1597 if (new_addr != NULL) {
1598 *p = HeapObject::FromAddress(new_addr);
1599 if (Heap::InNewSpace(new_addr)) {
1600 StoreBuffer::EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(p));
1601 }
1599 } else { 1602 } else {
1600 *p = HeapObject::FromAddress(new_addr); 1603 // We have to zap this pointer, because the store buffer may overflow later,
1604 // and then we have to scan the entire heap and we don't want to find
1605 // spurious newspace pointers in the old space.
1606 *p = HeapObject::FromAddress(NULL); // Fake heap object not in new space.
1601 } 1607 }
1602 } 1608 }
1603 1609
1604 1610
1605 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) { 1611 static String* UpdateNewSpaceReferenceInExternalStringTableEntry(Object **p) {
1606 Address old_addr = HeapObject::cast(*p)->address(); 1612 Address old_addr = HeapObject::cast(*p)->address();
1607 Address new_addr = Memory::Address_at(old_addr); 1613 Address new_addr = Memory::Address_at(old_addr);
1608 return String::cast(HeapObject::FromAddress(new_addr)); 1614 return String::cast(HeapObject::FromAddress(new_addr));
1609 } 1615 }
1610 1616
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after
1678 // Promotion failed. Just migrate object to another semispace. 1684 // Promotion failed. Just migrate object to another semispace.
1679 // Allocation cannot fail at this point: semispaces are of equal size. 1685 // Allocation cannot fail at this point: semispaces are of equal size.
1680 Object* target = space->AllocateRaw(size)->ToObjectUnchecked(); 1686 Object* target = space->AllocateRaw(size)->ToObjectUnchecked();
1681 1687
1682 MigrateObject(HeapObject::cast(target)->address(), 1688 MigrateObject(HeapObject::cast(target)->address(),
1683 current, 1689 current,
1684 size, 1690 size,
1685 false); 1691 false);
1686 } else { 1692 } else {
1687 size = object->Size(); 1693 size = object->Size();
1694 // Mark dead objects in the new space with null in their map field.
1688 Memory::Address_at(current) = NULL; 1695 Memory::Address_at(current) = NULL;
1689 } 1696 }
1690 } 1697 }
1691 1698
1692 // Second pass: find pointers to new space and update them. 1699 // Second pass: find pointers to new space and update them.
1693 PointersToNewGenUpdatingVisitor updating_visitor; 1700 PointersToNewGenUpdatingVisitor updating_visitor;
1694 1701
1695 // Update pointers in to space. 1702 // Update pointers in to space.
1696 Address current = space->bottom(); 1703 Address current = space->bottom();
1697 while (current < space->top()) { 1704 while (current < space->top()) {
1698 HeapObject* object = HeapObject::FromAddress(current); 1705 HeapObject* object = HeapObject::FromAddress(current);
1699 current += 1706 current +=
1700 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(), 1707 StaticPointersToNewGenUpdatingVisitor::IterateBody(object->map(),
1701 object); 1708 object);
1702 } 1709 }
1703 1710
1704 // Update roots. 1711 // Update roots.
1705 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE); 1712 Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
1706 1713
1707 // Update pointers in old spaces. 1714 {
1708 Heap::IterateDirtyRegions(Heap::old_pointer_space(), 1715 StoreBufferRebuildScope scope;
1709 &Heap::IteratePointersInDirtyRegion, 1716 StoreBuffer::IteratePointersToNewSpace(&UpdatePointerToNewGen);
1710 &UpdatePointerToNewGen, 1717 }
1711 Heap::WATERMARK_SHOULD_BE_VALID);
1712
1713 Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
1714 1718
1715 // Update pointers from cells. 1719 // Update pointers from cells.
1716 HeapObjectIterator cell_iterator(Heap::cell_space()); 1720 HeapObjectIterator cell_iterator(Heap::cell_space());
1717 for (HeapObject* cell = cell_iterator.next(); 1721 for (HeapObject* cell = cell_iterator.next();
1718 cell != NULL; 1722 cell != NULL;
1719 cell = cell_iterator.next()) { 1723 cell = cell_iterator.next()) {
1720 if (cell->IsJSGlobalPropertyCell()) { 1724 if (cell->IsJSGlobalPropertyCell()) {
1721 Address value_address = 1725 Address value_address =
1722 reinterpret_cast<Address>(cell) + 1726 reinterpret_cast<Address>(cell) +
1723 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag); 1727 (JSGlobalPropertyCell::kValueOffset - kHeapObjectTag);
(...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after
2022 } 2026 }
2023 #endif 2027 #endif
2024 2028
2025 space->SetTop(new_allocation_top); 2029 space->SetTop(new_allocation_top);
2026 } 2030 }
2027 } 2031 }
2028 2032
2029 2033
2030 void MarkCompactCollector::SweepSpaces() { 2034 void MarkCompactCollector::SweepSpaces() {
2031 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 2035 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
2032 2036 #ifdef DEBUG
2033 ASSERT(state_ == SWEEP_SPACES); 2037 state_ = SWEEP_SPACES;
2038 #endif
2034 ASSERT(!IsCompacting()); 2039 ASSERT(!IsCompacting());
2035 // Noncompacting collections simply sweep the spaces to clear the mark 2040 // Noncompacting collections simply sweep the spaces to clear the mark
2036 // bits and free the nonlive blocks (for old and map spaces). We sweep 2041 // bits and free the nonlive blocks (for old and map spaces). We sweep
2037 // the map space last because freeing non-live maps overwrites them and 2042 // the map space last because freeing non-live maps overwrites them and
2038 // the other spaces rely on possibly non-live maps to get the sizes for 2043 // the other spaces rely on possibly non-live maps to get the sizes for
2039 // non-live objects. 2044 // non-live objects.
2040 SweepSpace(Heap::old_pointer_space(), CONSERVATIVE); 2045 SweepSpace(Heap::old_pointer_space(), CONSERVATIVE);
2041 SweepSpace(Heap::old_data_space(), CONSERVATIVE); 2046 SweepSpace(Heap::old_data_space(), CONSERVATIVE);
2042 SweepSpace(Heap::code_space(), PRECISE); 2047 SweepSpace(Heap::code_space(), PRECISE);
2043 // TODO(gc): implement specialized sweeper for cell space. 2048 // TODO(gc): implement specialized sweeper for cell space.
2044 SweepSpace(Heap::cell_space(), CONSERVATIVE); 2049 SweepSpace(Heap::cell_space(), CONSERVATIVE);
2045 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE); 2050 { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
2046 SweepNewSpace(Heap::new_space()); 2051 SweepNewSpace(Heap::new_space());
2047 } 2052 }
2048 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of 2053 // TODO(gc): ClearNonLiveTransitions depends on precise sweeping of
2049 // map space to detect whether unmarked map became dead in this 2054 // map space to detect whether unmarked map became dead in this
2050 // collection or in one of the previous ones. 2055 // collection or in one of the previous ones.
2051 // TODO(gc): Implement specialized sweeper for map space. 2056 // TODO(gc): Implement specialized sweeper for map space.
2052 SweepSpace(Heap::map_space(), PRECISE); 2057 SweepSpace(Heap::map_space(), PRECISE);
2053 2058
2054 Heap::IterateDirtyRegions(Heap::map_space(),
2055 &Heap::IteratePointersInDirtyMapsRegion,
2056 &UpdatePointerToNewGen,
2057 Heap::WATERMARK_SHOULD_BE_VALID);
2058
2059 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size()); 2059 ASSERT(live_map_objects_size_ <= Heap::map_space()->Size());
2060 } 2060 }
2061 2061
2062 2062
2063 // Iterate the live objects in a range of addresses (eg, a page or a 2063 // Iterate the live objects in a range of addresses (eg, a page or a
2064 // semispace). The live regions of the range have been linked into a list. 2064 // semispace). The live regions of the range have been linked into a list.
2065 // The first live region is [first_live_start, first_live_end), and the last 2065 // The first live region is [first_live_start, first_live_end), and the last
2066 // address in the range is top. The callback function is used to get the 2066 // address in the range is top. The callback function is used to get the
2067 // size of each live object. 2067 // size of each live object.
2068 int MarkCompactCollector::IterateLiveObjectsInRange( 2068 int MarkCompactCollector::IterateLiveObjectsInRange(
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
2124 } 2124 }
2125 2125
2126 2126
2127 void MarkCompactCollector::Initialize() { 2127 void MarkCompactCollector::Initialize() {
2128 StaticPointersToNewGenUpdatingVisitor::Initialize(); 2128 StaticPointersToNewGenUpdatingVisitor::Initialize();
2129 StaticMarkingVisitor::Initialize(); 2129 StaticMarkingVisitor::Initialize();
2130 } 2130 }
2131 2131
2132 2132
2133 } } // namespace v8::internal 2133 } } // namespace v8::internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698