| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 61 #endif | 61 #endif |
| 62 sweep_precisely_(false), | 62 sweep_precisely_(false), |
| 63 reduce_memory_footprint_(false), | 63 reduce_memory_footprint_(false), |
| 64 abort_incremental_marking_(false), | 64 abort_incremental_marking_(false), |
| 65 compacting_(false), | 65 compacting_(false), |
| 66 was_marked_incrementally_(false), | 66 was_marked_incrementally_(false), |
| 67 tracer_(NULL), | 67 tracer_(NULL), |
| 68 migration_slots_buffer_(NULL), | 68 migration_slots_buffer_(NULL), |
| 69 heap_(NULL), | 69 heap_(NULL), |
| 70 code_flusher_(NULL), | 70 code_flusher_(NULL), |
| 71 encountered_weak_maps_(NULL) { } | 71 encountered_weak_maps_(NULL), |
| 72 marker_(this, this) { } |
| 72 | 73 |
| 73 | 74 |
| 74 #ifdef DEBUG | 75 #ifdef DEBUG |
| 75 class VerifyMarkingVisitor: public ObjectVisitor { | 76 class VerifyMarkingVisitor: public ObjectVisitor { |
| 76 public: | 77 public: |
| 77 void VisitPointers(Object** start, Object** end) { | 78 void VisitPointers(Object** start, Object** end) { |
| 78 for (Object** current = start; current < end; current++) { | 79 for (Object** current = start; current < end; current++) { |
| 79 if ((*current)->IsHeapObject()) { | 80 if ((*current)->IsHeapObject()) { |
| 80 HeapObject* object = HeapObject::cast(*current); | 81 HeapObject* object = HeapObject::cast(*current); |
| 81 ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); | 82 ASSERT(HEAP->mark_compact_collector()->IsMarked(object)); |
| (...skipping 964 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1046 public: | 1047 public: |
| 1047 static inline void Visit(Map* map, HeapObject* obj); | 1048 static inline void Visit(Map* map, HeapObject* obj); |
| 1048 }; | 1049 }; |
| 1049 | 1050 |
| 1050 static void Initialize(); | 1051 static void Initialize(); |
| 1051 | 1052 |
| 1052 INLINE(static void VisitPointer(Heap* heap, Object** p)) { | 1053 INLINE(static void VisitPointer(Heap* heap, Object** p)) { |
| 1053 MarkObjectByPointer(heap->mark_compact_collector(), p, p); | 1054 MarkObjectByPointer(heap->mark_compact_collector(), p, p); |
| 1054 } | 1055 } |
| 1055 | 1056 |
| 1056 INLINE(static void VisitPointers(Heap* heap, | 1057 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
| 1057 Object** anchor, | |
| 1058 Object** start, | |
| 1059 Object** end)) { | |
| 1060 // Mark all objects pointed to in [start, end). | 1058 // Mark all objects pointed to in [start, end). |
| 1061 const int kMinRangeForMarkingRecursion = 64; | 1059 const int kMinRangeForMarkingRecursion = 64; |
| 1062 if (end - start >= kMinRangeForMarkingRecursion) { | 1060 if (end - start >= kMinRangeForMarkingRecursion) { |
| 1063 if (VisitUnmarkedObjects(heap, anchor, start, end)) return; | 1061 if (VisitUnmarkedObjects(heap, start, end)) return; |
| 1064 // We are close to a stack overflow, so just mark the objects. | 1062 // We are close to a stack overflow, so just mark the objects. |
| 1065 } | 1063 } |
| 1066 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1064 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 1067 for (Object** p = start; p < end; p++) { | 1065 for (Object** p = start; p < end; p++) { |
| 1068 MarkObjectByPointer(collector, anchor, p); | 1066 MarkObjectByPointer(collector, start, p); |
| 1069 } | 1067 } |
| 1070 } | 1068 } |
| 1071 | 1069 |
| 1072 static void VisitHugeFixedArray(Heap* heap, FixedArray* array, int length); | |
| 1073 | |
| 1074 // The deque is contiguous and we use new space, it is therefore contained in | |
| 1075 // one page minus the header. It also has a size that is a power of two so | |
| 1076 // it is half the size of a page. We want to scan a number of array entries | |
| 1077 // that is less than the number of entries in the deque, so we divide by 2 | |
| 1078 // once more. | |
| 1079 static const int kScanningChunk = Page::kPageSize / 4 / kPointerSize; | |
| 1080 | |
| 1081 INLINE(static void VisitFixedArray(Map* map, HeapObject* object)) { | |
| 1082 FixedArray* array = FixedArray::cast(object); | |
| 1083 int length = array->length(); | |
| 1084 Heap* heap = map->GetHeap(); | |
| 1085 | |
| 1086 if (length < kScanningChunk || | |
| 1087 MemoryChunk::FromAddress(array->address())->owner()->identity() != | |
| 1088 LO_SPACE) { | |
| 1089 Object** start_slot = array->data_start(); | |
| 1090 VisitPointers(heap, start_slot, start_slot, start_slot + length); | |
| 1091 } else { | |
| 1092 VisitHugeFixedArray(heap, array, length); | |
| 1093 } | |
| 1094 } | |
| 1095 | |
| 1096 // Marks the object black and pushes it on the marking stack. | |
| 1097 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { | 1070 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) { |
| 1098 MarkBit mark = Marking::MarkBitFrom(object); | 1071 MarkBit mark = Marking::MarkBitFrom(object); |
| 1099 heap->mark_compact_collector()->MarkObject(object, mark); | 1072 heap->mark_compact_collector()->MarkObject(object, mark); |
| 1100 } | 1073 } |
| 1101 | 1074 |
| 1102 // Marks the object black without pushing it on the marking stack. | |
| 1103 // Returns true if object needed marking and false otherwise. | |
| 1104 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) { | |
| 1105 MarkBit mark_bit = Marking::MarkBitFrom(object); | |
| 1106 if (!mark_bit.Get()) { | |
| 1107 heap->mark_compact_collector()->SetMark(object, mark_bit); | |
| 1108 return true; | |
| 1109 } | |
| 1110 return false; | |
| 1111 } | |
| 1112 | |
| 1113 // Mark object pointed to by p. | 1075 // Mark object pointed to by p. |
| 1114 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, | 1076 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector, |
| 1115 Object** anchor_slot, | 1077 Object** anchor_slot, |
| 1116 Object** p)) { | 1078 Object** p)) { |
| 1117 if (!(*p)->IsHeapObject()) return; | 1079 if (!(*p)->IsHeapObject()) return; |
| 1118 HeapObject* object = ShortCircuitConsString(p); | 1080 HeapObject* object = ShortCircuitConsString(p); |
| 1119 collector->RecordSlot(anchor_slot, p, object); | 1081 collector->RecordSlot(anchor_slot, p, object); |
| 1120 MarkBit mark = Marking::MarkBitFrom(object); | 1082 MarkBit mark = Marking::MarkBitFrom(object); |
| 1121 collector->MarkObject(object, mark); | 1083 collector->MarkObject(object, mark); |
| 1122 } | 1084 } |
| 1123 | 1085 |
| 1124 | 1086 |
| 1125 // Visit an unmarked object. | 1087 // Visit an unmarked object. |
| 1126 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, | 1088 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector, |
| 1127 HeapObject* obj)) { | 1089 HeapObject* obj)) { |
| 1128 #ifdef DEBUG | 1090 #ifdef DEBUG |
| 1129 ASSERT(Isolate::Current()->heap()->Contains(obj)); | 1091 ASSERT(Isolate::Current()->heap()->Contains(obj)); |
| 1130 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); | 1092 ASSERT(!HEAP->mark_compact_collector()->IsMarked(obj)); |
| 1131 #endif | 1093 #endif |
| 1132 Map* map = obj->map(); | 1094 Map* map = obj->map(); |
| 1133 Heap* heap = obj->GetHeap(); | 1095 Heap* heap = obj->GetHeap(); |
| 1134 MarkBit mark = Marking::MarkBitFrom(obj); | 1096 MarkBit mark = Marking::MarkBitFrom(obj); |
| 1135 heap->mark_compact_collector()->SetMark(obj, mark); | 1097 heap->mark_compact_collector()->SetMark(obj, mark); |
| 1136 // Mark the map pointer and the body. | 1098 // Mark the map pointer and the body. |
| 1137 MarkBit map_mark = Marking::MarkBitFrom(map); | 1099 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 1138 heap->mark_compact_collector()->MarkObject(map, map_mark); | 1100 heap->mark_compact_collector()->MarkObject(map, map_mark); |
| 1139 IterateBody(map, obj); | 1101 IterateBody(map, obj); |
| 1140 } | 1102 } |
| 1141 | 1103 |
| 1142 // Visit all unmarked objects pointed to by [start_slot, end_slot). | 1104 // Visit all unmarked objects pointed to by [start, end). |
| 1143 // Returns false if the operation fails (lack of stack space). | 1105 // Returns false if the operation fails (lack of stack space). |
| 1144 static inline bool VisitUnmarkedObjects(Heap* heap, | 1106 static inline bool VisitUnmarkedObjects(Heap* heap, |
| 1145 Object** anchor_slot, | 1107 Object** start, |
| 1146 Object** start_slot, | 1108 Object** end) { |
| 1147 Object** end_slot) { | |
| 1148 // Return false is we are close to the stack limit. | 1109 // Return false is we are close to the stack limit. |
| 1149 StackLimitCheck check(heap->isolate()); | 1110 StackLimitCheck check(heap->isolate()); |
| 1150 if (check.HasOverflowed()) return false; | 1111 if (check.HasOverflowed()) return false; |
| 1151 | 1112 |
| 1152 MarkCompactCollector* collector = heap->mark_compact_collector(); | 1113 MarkCompactCollector* collector = heap->mark_compact_collector(); |
| 1153 // Visit the unmarked objects. | 1114 // Visit the unmarked objects. |
| 1154 for (Object** p = start_slot; p < end_slot; p++) { | 1115 for (Object** p = start; p < end; p++) { |
| 1155 Object* o = *p; | 1116 Object* o = *p; |
| 1156 if (!o->IsHeapObject()) continue; | 1117 if (!o->IsHeapObject()) continue; |
| 1157 collector->RecordSlot(anchor_slot, p, o); | 1118 collector->RecordSlot(start, p, o); |
| 1158 HeapObject* obj = HeapObject::cast(o); | 1119 HeapObject* obj = HeapObject::cast(o); |
| 1159 MarkBit mark = Marking::MarkBitFrom(obj); | 1120 MarkBit mark = Marking::MarkBitFrom(obj); |
| 1160 if (mark.Get()) continue; | 1121 if (mark.Get()) continue; |
| 1161 VisitUnmarkedObject(collector, obj); | 1122 VisitUnmarkedObject(collector, obj); |
| 1162 } | 1123 } |
| 1163 return true; | 1124 return true; |
| 1164 } | 1125 } |
| 1165 | 1126 |
| 1166 static void VisitJSWeakMap(Map* map, HeapObject* object) { | 1127 static void VisitJSWeakMap(Map* map, HeapObject* object) { |
| 1167 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); | 1128 MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector(); |
| (...skipping 300 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1468 reinterpret_cast<JSFunction*>(object), | 1429 reinterpret_cast<JSFunction*>(object), |
| 1469 false); | 1430 false); |
| 1470 } | 1431 } |
| 1471 | 1432 |
| 1472 | 1433 |
| 1473 static inline void VisitJSFunctionFields(Map* map, | 1434 static inline void VisitJSFunctionFields(Map* map, |
| 1474 JSFunction* object, | 1435 JSFunction* object, |
| 1475 bool flush_code_candidate) { | 1436 bool flush_code_candidate) { |
| 1476 Heap* heap = map->GetHeap(); | 1437 Heap* heap = map->GetHeap(); |
| 1477 | 1438 |
| 1478 Object** start_slot = | 1439 VisitPointers(heap, |
| 1479 HeapObject::RawField(object, JSFunction::kPropertiesOffset); | 1440 HeapObject::RawField(object, JSFunction::kPropertiesOffset), |
| 1480 Object** end_slot = | 1441 HeapObject::RawField(object, JSFunction::kCodeEntryOffset)); |
| 1481 HeapObject::RawField(object, JSFunction::kCodeEntryOffset); | |
| 1482 VisitPointers(heap, start_slot, start_slot, end_slot); | |
| 1483 | 1442 |
| 1484 if (!flush_code_candidate) { | 1443 if (!flush_code_candidate) { |
| 1485 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); | 1444 VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset); |
| 1486 } else { | 1445 } else { |
| 1487 // Don't visit code object. | 1446 // Don't visit code object. |
| 1488 | 1447 |
| 1489 // Visit shared function info to avoid double checking of its | 1448 // Visit shared function info to avoid double checking of its |
| 1490 // flushability. | 1449 // flushability. |
| 1491 SharedFunctionInfo* shared_info = object->unchecked_shared(); | 1450 SharedFunctionInfo* shared_info = object->unchecked_shared(); |
| 1492 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); | 1451 MarkBit shared_info_mark = Marking::MarkBitFrom(shared_info); |
| 1493 if (!shared_info_mark.Get()) { | 1452 if (!shared_info_mark.Get()) { |
| 1494 Map* shared_info_map = shared_info->map(); | 1453 Map* shared_info_map = shared_info->map(); |
| 1495 MarkBit shared_info_map_mark = | 1454 MarkBit shared_info_map_mark = |
| 1496 Marking::MarkBitFrom(shared_info_map); | 1455 Marking::MarkBitFrom(shared_info_map); |
| 1497 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); | 1456 heap->mark_compact_collector()->SetMark(shared_info, shared_info_mark); |
| 1498 heap->mark_compact_collector()->MarkObject(shared_info_map, | 1457 heap->mark_compact_collector()->MarkObject(shared_info_map, |
| 1499 shared_info_map_mark); | 1458 shared_info_map_mark); |
| 1500 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, | 1459 VisitSharedFunctionInfoAndFlushCodeGeneric(shared_info_map, |
| 1501 shared_info, | 1460 shared_info, |
| 1502 true); | 1461 true); |
| 1503 } | 1462 } |
| 1504 } | 1463 } |
| 1505 | 1464 |
| 1506 start_slot = | 1465 VisitPointers( |
| 1466 heap, |
| 1507 HeapObject::RawField(object, | 1467 HeapObject::RawField(object, |
| 1508 JSFunction::kCodeEntryOffset + kPointerSize); | 1468 JSFunction::kCodeEntryOffset + kPointerSize), |
| 1509 end_slot = | 1469 HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset)); |
| 1510 HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset); | |
| 1511 VisitPointers(heap, start_slot, start_slot, end_slot); | |
| 1512 } | 1470 } |
| 1513 | 1471 |
| 1514 | 1472 |
| 1515 static void VisitSharedFunctionInfoFields(Heap* heap, | 1473 static void VisitSharedFunctionInfoFields(Heap* heap, |
| 1516 HeapObject* object, | 1474 HeapObject* object, |
| 1517 bool flush_code_candidate) { | 1475 bool flush_code_candidate) { |
| 1518 VisitPointer(heap, | 1476 VisitPointer(heap, |
| 1519 HeapObject::RawField(object, SharedFunctionInfo::kNameOffset)); | 1477 HeapObject::RawField(object, SharedFunctionInfo::kNameOffset)); |
| 1520 | 1478 |
| 1521 if (!flush_code_candidate) { | 1479 if (!flush_code_candidate) { |
| 1522 VisitPointer(heap, | 1480 VisitPointer(heap, |
| 1523 HeapObject::RawField(object, | 1481 HeapObject::RawField(object, |
| 1524 SharedFunctionInfo::kCodeOffset)); | 1482 SharedFunctionInfo::kCodeOffset)); |
| 1525 } | 1483 } |
| 1526 | 1484 |
| 1527 Object** start_slot = | 1485 VisitPointers( |
| 1486 heap, |
| 1528 HeapObject::RawField(object, | 1487 HeapObject::RawField(object, |
| 1529 SharedFunctionInfo::kOptimizedCodeMapOffset); | 1488 SharedFunctionInfo::kOptimizedCodeMapOffset), |
| 1530 Object** end_slot = | 1489 HeapObject::RawField(object, SharedFunctionInfo::kSize)); |
| 1531 HeapObject::RawField(object, SharedFunctionInfo::kSize); | |
| 1532 | |
| 1533 VisitPointers(heap, start_slot, start_slot, end_slot); | |
| 1534 } | 1490 } |
| 1535 | 1491 |
| 1536 static VisitorDispatchTable<Callback> non_count_table_; | 1492 static VisitorDispatchTable<Callback> non_count_table_; |
| 1537 }; | 1493 }; |
| 1538 | 1494 |
| 1539 | 1495 |
| 1540 void MarkCompactMarkingVisitor::VisitHugeFixedArray(Heap* heap, | |
| 1541 FixedArray* array, | |
| 1542 int length) { | |
| 1543 MemoryChunk* chunk = MemoryChunk::FromAddress(array->address()); | |
| 1544 | |
| 1545 ASSERT(chunk->owner()->identity() == LO_SPACE); | |
| 1546 | |
| 1547 Object** start_slot = array->data_start(); | |
| 1548 int from = | |
| 1549 chunk->IsPartiallyScanned() ? chunk->PartiallyScannedProgress() : 0; | |
| 1550 int to = Min(from + kScanningChunk, length); | |
| 1551 VisitPointers(heap, start_slot, start_slot + from, start_slot + to); | |
| 1552 | |
| 1553 if (to == length) { | |
| 1554 chunk->SetCompletelyScanned(); | |
| 1555 } else { | |
| 1556 chunk->SetPartiallyScannedProgress(to); | |
| 1557 } | |
| 1558 } | |
| 1559 | |
| 1560 | |
| 1561 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( | 1496 void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray( |
| 1562 FixedArrayBase* fixed_array, | 1497 FixedArrayBase* fixed_array, |
| 1563 FixedArraySubInstanceType fast_type, | 1498 FixedArraySubInstanceType fast_type, |
| 1564 FixedArraySubInstanceType dictionary_type) { | 1499 FixedArraySubInstanceType dictionary_type) { |
| 1565 Heap* heap = fixed_array->map()->GetHeap(); | 1500 Heap* heap = fixed_array->map()->GetHeap(); |
| 1566 if (fixed_array->map() != heap->fixed_cow_array_map() && | 1501 if (fixed_array->map() != heap->fixed_cow_array_map() && |
| 1567 fixed_array->map() != heap->fixed_double_array_map() && | 1502 fixed_array->map() != heap->fixed_double_array_map() && |
| 1568 fixed_array != heap->empty_fixed_array()) { | 1503 fixed_array != heap->empty_fixed_array()) { |
| 1569 if (fixed_array->IsDictionary()) { | 1504 if (fixed_array->IsDictionary()) { |
| 1570 heap->RecordObjectStats(FIXED_ARRAY_TYPE, | 1505 heap->RecordObjectStats(FIXED_ARRAY_TYPE, |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1606 | 1541 |
| 1607 template<> | 1542 template<> |
| 1608 class MarkCompactMarkingVisitor::ObjectStatsTracker< | 1543 class MarkCompactMarkingVisitor::ObjectStatsTracker< |
| 1609 MarkCompactMarkingVisitor::kVisitMap> { | 1544 MarkCompactMarkingVisitor::kVisitMap> { |
| 1610 public: | 1545 public: |
| 1611 static inline void Visit(Map* map, HeapObject* obj) { | 1546 static inline void Visit(Map* map, HeapObject* obj) { |
| 1612 Heap* heap = map->GetHeap(); | 1547 Heap* heap = map->GetHeap(); |
| 1613 Map* map_obj = Map::cast(obj); | 1548 Map* map_obj = Map::cast(obj); |
| 1614 ASSERT(map->instance_type() == MAP_TYPE); | 1549 ASSERT(map->instance_type() == MAP_TYPE); |
| 1615 DescriptorArray* array = map_obj->instance_descriptors(); | 1550 DescriptorArray* array = map_obj->instance_descriptors(); |
| 1616 if (map_obj->owns_descriptors() && | 1551 if (array != heap->empty_descriptor_array()) { |
| 1617 array != heap->empty_descriptor_array()) { | |
| 1618 int fixed_array_size = array->Size(); | 1552 int fixed_array_size = array->Size(); |
| 1619 heap->RecordObjectStats(FIXED_ARRAY_TYPE, | 1553 heap->RecordObjectStats(FIXED_ARRAY_TYPE, |
| 1620 DESCRIPTOR_ARRAY_SUB_TYPE, | 1554 DESCRIPTOR_ARRAY_SUB_TYPE, |
| 1621 fixed_array_size); | 1555 fixed_array_size); |
| 1622 } | 1556 } |
| 1623 if (map_obj->HasTransitionArray()) { | 1557 if (map_obj->HasTransitionArray()) { |
| 1624 int fixed_array_size = map_obj->transitions()->Size(); | 1558 int fixed_array_size = map_obj->transitions()->Size(); |
| 1625 heap->RecordObjectStats(FIXED_ARRAY_TYPE, | 1559 heap->RecordObjectStats(FIXED_ARRAY_TYPE, |
| 1626 TRANSITION_ARRAY_SUB_TYPE, | 1560 TRANSITION_ARRAY_SUB_TYPE, |
| 1627 fixed_array_size); | 1561 fixed_array_size); |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 | 1626 |
| 1693 table_.Register(kVisitSharedFunctionInfo, | 1627 table_.Register(kVisitSharedFunctionInfo, |
| 1694 &VisitSharedFunctionInfoAndFlushCode); | 1628 &VisitSharedFunctionInfoAndFlushCode); |
| 1695 | 1629 |
| 1696 table_.Register(kVisitJSFunction, | 1630 table_.Register(kVisitJSFunction, |
| 1697 &VisitJSFunctionAndFlushCode); | 1631 &VisitJSFunctionAndFlushCode); |
| 1698 | 1632 |
| 1699 table_.Register(kVisitJSRegExp, | 1633 table_.Register(kVisitJSRegExp, |
| 1700 &VisitRegExpAndFlushCode); | 1634 &VisitRegExpAndFlushCode); |
| 1701 | 1635 |
| 1702 table_.Register(kVisitFixedArray, | |
| 1703 &VisitFixedArray); | |
| 1704 | |
| 1705 if (FLAG_track_gc_object_stats) { | 1636 if (FLAG_track_gc_object_stats) { |
| 1706 // Copy the visitor table to make call-through possible. | 1637 // Copy the visitor table to make call-through possible. |
| 1707 non_count_table_.CopyFrom(&table_); | 1638 non_count_table_.CopyFrom(&table_); |
| 1708 #define VISITOR_ID_COUNT_FUNCTION(id) \ | 1639 #define VISITOR_ID_COUNT_FUNCTION(id) \ |
| 1709 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); | 1640 table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit); |
| 1710 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) | 1641 VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION) |
| 1711 #undef VISITOR_ID_COUNT_FUNCTION | 1642 #undef VISITOR_ID_COUNT_FUNCTION |
| 1712 } | 1643 } |
| 1713 } | 1644 } |
| 1714 | 1645 |
| 1715 | 1646 |
| 1716 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> | 1647 VisitorDispatchTable<MarkCompactMarkingVisitor::Callback> |
| 1717 MarkCompactMarkingVisitor::non_count_table_; | 1648 MarkCompactMarkingVisitor::non_count_table_; |
| 1718 | 1649 |
| 1719 | 1650 |
| 1720 class MarkingVisitor : public ObjectVisitor { | 1651 class MarkingVisitor : public ObjectVisitor { |
| 1721 public: | 1652 public: |
| 1722 explicit MarkingVisitor(Heap* heap) : heap_(heap) { } | 1653 explicit MarkingVisitor(Heap* heap) : heap_(heap) { } |
| 1723 | 1654 |
| 1724 void VisitPointer(Object** p) { | 1655 void VisitPointer(Object** p) { |
| 1725 MarkCompactMarkingVisitor::VisitPointer(heap_, p); | 1656 MarkCompactMarkingVisitor::VisitPointer(heap_, p); |
| 1726 } | 1657 } |
| 1727 | 1658 |
| 1728 void VisitPointers(Object** start_slot, Object** end_slot) { | 1659 void VisitPointers(Object** start, Object** end) { |
| 1729 MarkCompactMarkingVisitor::VisitPointers( | 1660 MarkCompactMarkingVisitor::VisitPointers(heap_, start, end); |
| 1730 heap_, start_slot, start_slot, end_slot); | |
| 1731 } | 1661 } |
| 1732 | 1662 |
| 1733 private: | 1663 private: |
| 1734 Heap* heap_; | 1664 Heap* heap_; |
| 1735 }; | 1665 }; |
| 1736 | 1666 |
| 1737 | 1667 |
| 1738 class CodeMarkingVisitor : public ThreadVisitor { | 1668 class CodeMarkingVisitor : public ThreadVisitor { |
| 1739 public: | 1669 public: |
| 1740 explicit CodeMarkingVisitor(MarkCompactCollector* collector) | 1670 explicit CodeMarkingVisitor(MarkCompactCollector* collector) |
| 1741 : collector_(collector) {} | 1671 : collector_(collector) {} |
| 1742 | 1672 |
| 1743 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { | 1673 void VisitThread(Isolate* isolate, ThreadLocalTop* top) { |
| 1744 collector_->PrepareThreadForCodeFlushing(isolate, top); | 1674 collector_->PrepareThreadForCodeFlushing(isolate, top); |
| 1745 } | 1675 } |
| 1746 | 1676 |
| 1747 private: | 1677 private: |
| 1748 MarkCompactCollector* collector_; | 1678 MarkCompactCollector* collector_; |
| 1749 }; | 1679 }; |
| 1750 | 1680 |
| 1751 | 1681 |
| 1752 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { | 1682 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor { |
| 1753 public: | 1683 public: |
| 1754 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) | 1684 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector) |
| 1755 : collector_(collector) {} | 1685 : collector_(collector) {} |
| 1756 | 1686 |
| 1757 void VisitPointers(Object** start_slot, Object** end_slot) { | 1687 void VisitPointers(Object** start, Object** end) { |
| 1758 for (Object** p = start_slot; p < end_slot; p++) VisitPointer(p); | 1688 for (Object** p = start; p < end; p++) VisitPointer(p); |
| 1759 } | 1689 } |
| 1760 | 1690 |
| 1761 void VisitPointer(Object** slot) { | 1691 void VisitPointer(Object** slot) { |
| 1762 Object* obj = *slot; | 1692 Object* obj = *slot; |
| 1763 if (obj->IsSharedFunctionInfo()) { | 1693 if (obj->IsSharedFunctionInfo()) { |
| 1764 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); | 1694 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj); |
| 1765 MarkBit shared_mark = Marking::MarkBitFrom(shared); | 1695 MarkBit shared_mark = Marking::MarkBitFrom(shared); |
| 1766 MarkBit code_mark = Marking::MarkBitFrom(shared->code()); | 1696 MarkBit code_mark = Marking::MarkBitFrom(shared->code()); |
| 1767 collector_->MarkObject(shared->code(), code_mark); | 1697 collector_->MarkObject(shared->code(), code_mark); |
| 1768 collector_->MarkObject(shared, shared_mark); | 1698 collector_->MarkObject(shared, shared_mark); |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1859 // Visitor class for marking heap roots. | 1789 // Visitor class for marking heap roots. |
| 1860 class RootMarkingVisitor : public ObjectVisitor { | 1790 class RootMarkingVisitor : public ObjectVisitor { |
| 1861 public: | 1791 public: |
| 1862 explicit RootMarkingVisitor(Heap* heap) | 1792 explicit RootMarkingVisitor(Heap* heap) |
| 1863 : collector_(heap->mark_compact_collector()) { } | 1793 : collector_(heap->mark_compact_collector()) { } |
| 1864 | 1794 |
| 1865 void VisitPointer(Object** p) { | 1795 void VisitPointer(Object** p) { |
| 1866 MarkObjectByPointer(p); | 1796 MarkObjectByPointer(p); |
| 1867 } | 1797 } |
| 1868 | 1798 |
| 1869 void VisitPointers(Object** start_slot, Object** end_slot) { | 1799 void VisitPointers(Object** start, Object** end) { |
| 1870 for (Object** p = start_slot; p < end_slot; p++) MarkObjectByPointer(p); | 1800 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
| 1871 } | 1801 } |
| 1872 | 1802 |
| 1873 private: | 1803 private: |
| 1874 void MarkObjectByPointer(Object** p) { | 1804 void MarkObjectByPointer(Object** p) { |
| 1875 if (!(*p)->IsHeapObject()) return; | 1805 if (!(*p)->IsHeapObject()) return; |
| 1876 | 1806 |
| 1877 // Replace flat cons strings in place. | 1807 // Replace flat cons strings in place. |
| 1878 HeapObject* object = ShortCircuitConsString(p); | 1808 HeapObject* object = ShortCircuitConsString(p); |
| 1879 MarkBit mark_bit = Marking::MarkBitFrom(object); | 1809 MarkBit mark_bit = Marking::MarkBitFrom(object); |
| 1880 if (mark_bit.Get()) return; | 1810 if (mark_bit.Get()) return; |
| (...skipping 15 matching lines...) Expand all Loading... |
| 1896 MarkCompactCollector* collector_; | 1826 MarkCompactCollector* collector_; |
| 1897 }; | 1827 }; |
| 1898 | 1828 |
| 1899 | 1829 |
| 1900 // Helper class for pruning the symbol table. | 1830 // Helper class for pruning the symbol table. |
| 1901 class SymbolTableCleaner : public ObjectVisitor { | 1831 class SymbolTableCleaner : public ObjectVisitor { |
| 1902 public: | 1832 public: |
| 1903 explicit SymbolTableCleaner(Heap* heap) | 1833 explicit SymbolTableCleaner(Heap* heap) |
| 1904 : heap_(heap), pointers_removed_(0) { } | 1834 : heap_(heap), pointers_removed_(0) { } |
| 1905 | 1835 |
| 1906 virtual void VisitPointers(Object** start_slot, Object** end_slot) { | 1836 virtual void VisitPointers(Object** start, Object** end) { |
| 1907 // Visit all HeapObject pointers in [start_slot, end_slot). | 1837 // Visit all HeapObject pointers in [start, end). |
| 1908 for (Object** p = start_slot; p < end_slot; p++) { | 1838 for (Object** p = start; p < end; p++) { |
| 1909 Object* o = *p; | 1839 Object* o = *p; |
| 1910 if (o->IsHeapObject() && | 1840 if (o->IsHeapObject() && |
| 1911 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { | 1841 !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) { |
| 1912 // Check if the symbol being pruned is an external symbol. We need to | 1842 // Check if the symbol being pruned is an external symbol. We need to |
| 1913 // delete the associated external data as this symbol is going away. | 1843 // delete the associated external data as this symbol is going away. |
| 1914 | 1844 |
| 1915 // Since no objects have yet been moved we can safely access the map of | 1845 // Since no objects have yet been moved we can safely access the map of |
| 1916 // the object. | 1846 // the object. |
| 1917 if (o->IsExternalString()) { | 1847 if (o->IsExternalString()) { |
| 1918 heap_->FinalizeExternalString(String::cast(*p)); | 1848 heap_->FinalizeExternalString(String::cast(*p)); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 1941 virtual Object* RetainAs(Object* object) { | 1871 virtual Object* RetainAs(Object* object) { |
| 1942 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { | 1872 if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) { |
| 1943 return object; | 1873 return object; |
| 1944 } else { | 1874 } else { |
| 1945 return NULL; | 1875 return NULL; |
| 1946 } | 1876 } |
| 1947 } | 1877 } |
| 1948 }; | 1878 }; |
| 1949 | 1879 |
| 1950 | 1880 |
| 1881 void MarkCompactCollector::ProcessNewlyMarkedObject(HeapObject* object) { |
| 1882 ASSERT(IsMarked(object)); |
| 1883 ASSERT(HEAP->Contains(object)); |
| 1884 if (object->IsMap()) { |
| 1885 Map* map = Map::cast(object); |
| 1886 heap_->ClearCacheOnMap(map); |
| 1887 |
| 1888 // When map collection is enabled we have to mark through map's transitions |
| 1889 // in a special way to make transition links weak. Only maps for subclasses |
| 1890 // of JSReceiver can have transitions. |
| 1891 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); |
| 1892 if (FLAG_collect_maps && map->instance_type() >= FIRST_JS_RECEIVER_TYPE) { |
| 1893 marker_.MarkMapContents(map); |
| 1894 } else { |
| 1895 marking_deque_.PushBlack(map); |
| 1896 } |
| 1897 } else { |
| 1898 marking_deque_.PushBlack(object); |
| 1899 } |
| 1900 } |
| 1901 |
| 1902 |
| 1903 // Force instantiation of template instances. |
| 1904 template void Marker<IncrementalMarking>::MarkMapContents(Map* map); |
| 1905 template void Marker<MarkCompactCollector>::MarkMapContents(Map* map); |
| 1906 |
| 1907 |
| 1908 template <class T> |
| 1909 void Marker<T>::MarkMapContents(Map* map) { |
| 1910 // Make sure that the back pointer stored either in the map itself or inside |
| 1911 // its transitions array is marked. Treat pointers in the transitions array as |
| 1912 // weak and also mark that array to prevent visiting it later. |
| 1913 base_marker()->MarkObjectAndPush(HeapObject::cast(map->GetBackPointer())); |
| 1914 |
| 1915 Object** transitions_slot = |
| 1916 HeapObject::RawField(map, Map::kTransitionsOrBackPointerOffset); |
| 1917 Object* transitions = *transitions_slot; |
| 1918 if (transitions->IsTransitionArray()) { |
| 1919 MarkTransitionArray(reinterpret_cast<TransitionArray*>(transitions)); |
| 1920 } else { |
| 1921 // Already marked by marking map->GetBackPointer(). |
| 1922 ASSERT(transitions->IsMap() || transitions->IsUndefined()); |
| 1923 } |
| 1924 |
| 1925 // Mark the Object* fields of the Map. Since the transitions array has been |
| 1926 // marked already, it is fine that one of these fields contains a pointer to |
| 1927 // it. |
| 1928 Object** start_slot = |
| 1929 HeapObject::RawField(map, Map::kPointerFieldsBeginOffset); |
| 1930 Object** end_slot = HeapObject::RawField(map, Map::kPointerFieldsEndOffset); |
| 1931 for (Object** slot = start_slot; slot < end_slot; slot++) { |
| 1932 Object* obj = *slot; |
| 1933 if (!obj->NonFailureIsHeapObject()) continue; |
| 1934 mark_compact_collector()->RecordSlot(start_slot, slot, obj); |
| 1935 base_marker()->MarkObjectAndPush(reinterpret_cast<HeapObject*>(obj)); |
| 1936 } |
| 1937 } |
| 1938 |
| 1939 |
| 1940 template <class T> |
| 1941 void Marker<T>::MarkTransitionArray(TransitionArray* transitions) { |
| 1942 if (!base_marker()->MarkObjectWithoutPush(transitions)) return; |
| 1943 Object** transitions_start = transitions->data_start(); |
| 1944 |
| 1945 DescriptorArray* descriptors = transitions->descriptors(); |
| 1946 base_marker()->MarkObjectAndPush(descriptors); |
| 1947 mark_compact_collector()->RecordSlot( |
| 1948 transitions_start, transitions->GetDescriptorsSlot(), descriptors); |
| 1949 |
| 1950 if (transitions->HasPrototypeTransitions()) { |
| 1951 // Mark prototype transitions array but don't push it into marking stack. |
| 1952 // This will make references from it weak. We will clean dead prototype |
| 1953 // transitions in ClearNonLiveTransitions. |
| 1954 Object** proto_trans_slot = transitions->GetPrototypeTransitionsSlot(); |
| 1955 HeapObject* prototype_transitions = HeapObject::cast(*proto_trans_slot); |
| 1956 base_marker()->MarkObjectWithoutPush(prototype_transitions); |
| 1957 mark_compact_collector()->RecordSlot( |
| 1958 transitions_start, proto_trans_slot, prototype_transitions); |
| 1959 } |
| 1960 |
| 1961 for (int i = 0; i < transitions->number_of_transitions(); ++i) { |
| 1962 Object** key_slot = transitions->GetKeySlot(i); |
| 1963 Object* key = *key_slot; |
| 1964 if (key->IsHeapObject()) { |
| 1965 base_marker()->MarkObjectAndPush(HeapObject::cast(key)); |
| 1966 mark_compact_collector()->RecordSlot(transitions_start, key_slot, key); |
| 1967 } |
| 1968 } |
| 1969 } |
| 1970 |
| 1971 |
| 1951 // Fill the marking stack with overflowed objects returned by the given | 1972 // Fill the marking stack with overflowed objects returned by the given |
| 1952 // iterator. Stop when the marking stack is filled or the end of the space | 1973 // iterator. Stop when the marking stack is filled or the end of the space |
| 1953 // is reached, whichever comes first. | 1974 // is reached, whichever comes first. |
| 1954 template<class T> | 1975 template<class T> |
| 1955 static void DiscoverGreyObjectsWithIterator(Heap* heap, | 1976 static void DiscoverGreyObjectsWithIterator(Heap* heap, |
| 1956 MarkingDeque* marking_deque, | 1977 MarkingDeque* marking_deque, |
| 1957 T* it) { | 1978 T* it) { |
| 1958 // The caller should ensure that the marking stack is initially not full, | 1979 // The caller should ensure that the marking stack is initially not full, |
| 1959 // so that we don't waste effort pointlessly scanning for objects. | 1980 // so that we don't waste effort pointlessly scanning for objects. |
| 1960 ASSERT(!marking_deque->IsFull()); | 1981 ASSERT(!marking_deque->IsFull()); |
| (...skipping 218 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2179 ASSERT(object->IsHeapObject()); | 2200 ASSERT(object->IsHeapObject()); |
| 2180 ASSERT(heap()->Contains(object)); | 2201 ASSERT(heap()->Contains(object)); |
| 2181 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); | 2202 ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 2182 | 2203 |
| 2183 Map* map = object->map(); | 2204 Map* map = object->map(); |
| 2184 MarkBit map_mark = Marking::MarkBitFrom(map); | 2205 MarkBit map_mark = Marking::MarkBitFrom(map); |
| 2185 MarkObject(map, map_mark); | 2206 MarkObject(map, map_mark); |
| 2186 | 2207 |
| 2187 MarkCompactMarkingVisitor::IterateBody(map, object); | 2208 MarkCompactMarkingVisitor::IterateBody(map, object); |
| 2188 } | 2209 } |
| 2189 ProcessLargePostponedArrays(heap(), &marking_deque_); | |
| 2190 | 2210 |
| 2191 // Process encountered weak maps, mark objects only reachable by those | 2211 // Process encountered weak maps, mark objects only reachable by those |
| 2192 // weak maps and repeat until fix-point is reached. | 2212 // weak maps and repeat until fix-point is reached. |
| 2193 ProcessWeakMaps(); | 2213 ProcessWeakMaps(); |
| 2194 } | 2214 } |
| 2195 } | 2215 } |
| 2196 | 2216 |
| 2197 | 2217 |
| 2198 void MarkCompactCollector::ProcessLargePostponedArrays(Heap* heap, | |
| 2199 MarkingDeque* deque) { | |
| 2200 ASSERT(deque->IsEmpty()); | |
| 2201 LargeObjectIterator it(heap->lo_space()); | |
| 2202 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | |
| 2203 if (!obj->IsFixedArray()) continue; | |
| 2204 MemoryChunk* p = MemoryChunk::FromAddress(obj->address()); | |
| 2205 if (p->IsPartiallyScanned()) { | |
| 2206 deque->PushBlack(obj); | |
| 2207 } | |
| 2208 } | |
| 2209 } | |
| 2210 | |
| 2211 | |
| 2212 // Sweep the heap for overflowed objects, clear their overflow bits, and | 2218 // Sweep the heap for overflowed objects, clear their overflow bits, and |
| 2213 // push them on the marking stack. Stop early if the marking stack fills | 2219 // push them on the marking stack. Stop early if the marking stack fills |
| 2214 // before sweeping completes. If sweeping completes, there are no remaining | 2220 // before sweeping completes. If sweeping completes, there are no remaining |
| 2215 // overflowed objects in the heap so the overflow flag on the markings stack | 2221 // overflowed objects in the heap so the overflow flag on the markings stack |
| 2216 // is cleared. | 2222 // is cleared. |
| 2217 void MarkCompactCollector::RefillMarkingDeque() { | 2223 void MarkCompactCollector::RefillMarkingDeque() { |
| 2218 if (FLAG_trace_gc) { | |
| 2219 PrintPID("Marking queue overflowed\n"); | |
| 2220 } | |
| 2221 ASSERT(marking_deque_.overflowed()); | 2224 ASSERT(marking_deque_.overflowed()); |
| 2222 | 2225 |
| 2223 SemiSpaceIterator new_it(heap()->new_space()); | 2226 SemiSpaceIterator new_it(heap()->new_space()); |
| 2224 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); | 2227 DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &new_it); |
| 2225 if (marking_deque_.IsFull()) return; | 2228 if (marking_deque_.IsFull()) return; |
| 2226 | 2229 |
| 2227 DiscoverGreyObjectsInSpace(heap(), | 2230 DiscoverGreyObjectsInSpace(heap(), |
| 2228 &marking_deque_, | 2231 &marking_deque_, |
| 2229 heap()->old_pointer_space()); | 2232 heap()->old_pointer_space()); |
| 2230 if (marking_deque_.IsFull()) return; | 2233 if (marking_deque_.IsFull()) return; |
| (...skipping 470 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2701 // Visitor for updating pointers from live objects in old spaces to new space. | 2704 // Visitor for updating pointers from live objects in old spaces to new space. |
| 2702 // It does not expect to encounter pointers to dead objects. | 2705 // It does not expect to encounter pointers to dead objects. |
| 2703 class PointersUpdatingVisitor: public ObjectVisitor { | 2706 class PointersUpdatingVisitor: public ObjectVisitor { |
| 2704 public: | 2707 public: |
| 2705 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } | 2708 explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { } |
| 2706 | 2709 |
| 2707 void VisitPointer(Object** p) { | 2710 void VisitPointer(Object** p) { |
| 2708 UpdatePointer(p); | 2711 UpdatePointer(p); |
| 2709 } | 2712 } |
| 2710 | 2713 |
| 2711 void VisitPointers(Object** start_slot, Object** end_slot) { | 2714 void VisitPointers(Object** start, Object** end) { |
| 2712 for (Object** p = start_slot; p < end_slot; p++) UpdatePointer(p); | 2715 for (Object** p = start; p < end; p++) UpdatePointer(p); |
| 2713 } | 2716 } |
| 2714 | 2717 |
| 2715 void VisitEmbeddedPointer(RelocInfo* rinfo) { | 2718 void VisitEmbeddedPointer(RelocInfo* rinfo) { |
| 2716 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); | 2719 ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT); |
| 2717 Object* target = rinfo->target_object(); | 2720 Object* target = rinfo->target_object(); |
| 2718 VisitPointer(&target); | 2721 VisitPointer(&target); |
| 2719 rinfo->set_target_object(target); | 2722 rinfo->set_target_object(target); |
| 2720 } | 2723 } |
| 2721 | 2724 |
| 2722 void VisitCodeTarget(RelocInfo* rinfo) { | 2725 void VisitCodeTarget(RelocInfo* rinfo) { |
| (...skipping 1418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4141 while (buffer != NULL) { | 4144 while (buffer != NULL) { |
| 4142 SlotsBuffer* next_buffer = buffer->next(); | 4145 SlotsBuffer* next_buffer = buffer->next(); |
| 4143 DeallocateBuffer(buffer); | 4146 DeallocateBuffer(buffer); |
| 4144 buffer = next_buffer; | 4147 buffer = next_buffer; |
| 4145 } | 4148 } |
| 4146 *buffer_address = NULL; | 4149 *buffer_address = NULL; |
| 4147 } | 4150 } |
| 4148 | 4151 |
| 4149 | 4152 |
| 4150 } } // namespace v8::internal | 4153 } } // namespace v8::internal |
| OLD | NEW |