Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(527)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1412063012: Revert of [heap] Separate out optimized code map processing. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/mark-compact-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 966 matching lines...) Expand 10 before | Expand all | Expand 10 after
977 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot, 977 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
978 *code_slot); 978 *code_slot);
979 979
980 candidate = next_candidate; 980 candidate = next_candidate;
981 } 981 }
982 982
983 shared_function_info_candidates_head_ = NULL; 983 shared_function_info_candidates_head_ = NULL;
984 } 984 }
985 985
986 986
987 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) { 987 void CodeFlusher::ProcessOptimizedCodeMaps() {
988 // Make sure previous flushing decisions are revisited. 988 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
989 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
990 989
991 if (FLAG_trace_code_flushing) { 990 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
992 PrintF("[code-flushing abandons function-info: "); 991 SharedFunctionInfo* next_holder;
993 shared_info->ShortPrint();
994 PrintF("]\n");
995 }
996 992
997 SharedFunctionInfo* candidate = shared_function_info_candidates_head_; 993 while (holder != NULL) {
998 SharedFunctionInfo* next_candidate; 994 next_holder = GetNextCodeMap(holder);
999 if (candidate == shared_info) { 995 ClearNextCodeMap(holder);
1000 next_candidate = GetNextCandidate(shared_info);
1001 shared_function_info_candidates_head_ = next_candidate;
1002 ClearNextCandidate(shared_info);
1003 } else {
1004 while (candidate != NULL) {
1005 next_candidate = GetNextCandidate(candidate);
1006
1007 if (next_candidate == shared_info) {
1008 next_candidate = GetNextCandidate(shared_info);
1009 SetNextCandidate(candidate, next_candidate);
1010 ClearNextCandidate(shared_info);
1011 break;
1012 }
1013
1014 candidate = next_candidate;
1015 }
1016 }
1017 }
1018
1019
1020 void CodeFlusher::EvictCandidate(JSFunction* function) {
1021 DCHECK(!function->next_function_link()->IsUndefined());
1022 Object* undefined = isolate_->heap()->undefined_value();
1023
1024 // Make sure previous flushing decisions are revisited.
1025 isolate_->heap()->incremental_marking()->RecordWrites(function);
1026 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1027
1028 if (FLAG_trace_code_flushing) {
1029 PrintF("[code-flushing abandons closure: ");
1030 function->shared()->ShortPrint();
1031 PrintF("]\n");
1032 }
1033
1034 JSFunction* candidate = jsfunction_candidates_head_;
1035 JSFunction* next_candidate;
1036 if (candidate == function) {
1037 next_candidate = GetNextCandidate(function);
1038 jsfunction_candidates_head_ = next_candidate;
1039 ClearNextCandidate(function, undefined);
1040 } else {
1041 while (candidate != NULL) {
1042 next_candidate = GetNextCandidate(candidate);
1043
1044 if (next_candidate == function) {
1045 next_candidate = GetNextCandidate(function);
1046 SetNextCandidate(candidate, next_candidate);
1047 ClearNextCandidate(function, undefined);
1048 break;
1049 }
1050
1051 candidate = next_candidate;
1052 }
1053 }
1054 }
1055
1056
1057 void CodeFlusher::EvictJSFunctionCandidates() {
1058 JSFunction* candidate = jsfunction_candidates_head_;
1059 JSFunction* next_candidate;
1060 while (candidate != NULL) {
1061 next_candidate = GetNextCandidate(candidate);
1062 EvictCandidate(candidate);
1063 candidate = next_candidate;
1064 }
1065 DCHECK(jsfunction_candidates_head_ == NULL);
1066 }
1067
1068
1069 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1070 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1071 SharedFunctionInfo* next_candidate;
1072 while (candidate != NULL) {
1073 next_candidate = GetNextCandidate(candidate);
1074 EvictCandidate(candidate);
1075 candidate = next_candidate;
1076 }
1077 DCHECK(shared_function_info_candidates_head_ == NULL);
1078 }
1079
1080
1081 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1082 Heap* heap = isolate_->heap();
1083
1084 JSFunction** slot = &jsfunction_candidates_head_;
1085 JSFunction* candidate = jsfunction_candidates_head_;
1086 while (candidate != NULL) {
1087 if (heap->InFromSpace(candidate)) {
1088 v->VisitPointer(reinterpret_cast<Object**>(slot));
1089 }
1090 candidate = GetNextCandidate(*slot);
1091 slot = GetNextCandidateSlot(*slot);
1092 }
1093 }
1094
1095
1096 MarkCompactCollector::~MarkCompactCollector() {
1097 if (code_flusher_ != NULL) {
1098 delete code_flusher_;
1099 code_flusher_ = NULL;
1100 }
1101 }
1102
1103
1104 class MarkCompactMarkingVisitor
1105 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1106 public:
1107 static void Initialize();
1108
1109 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1110 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1111 }
1112
1113 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1114 Object** start, Object** end)) {
1115 // Mark all objects pointed to in [start, end).
1116 const int kMinRangeForMarkingRecursion = 64;
1117 if (end - start >= kMinRangeForMarkingRecursion) {
1118 if (VisitUnmarkedObjects(heap, object, start, end)) return;
1119 // We are close to a stack overflow, so just mark the objects.
1120 }
1121 MarkCompactCollector* collector = heap->mark_compact_collector();
1122 for (Object** p = start; p < end; p++) {
1123 MarkObjectByPointer(collector, object, p);
1124 }
1125 }
1126
1127 // Marks the object black and pushes it on the marking stack.
1128 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1129 MarkBit mark = Marking::MarkBitFrom(object);
1130 heap->mark_compact_collector()->MarkObject(object, mark);
1131 }
1132
1133 // Marks the object black without pushing it on the marking stack.
1134 // Returns true if object needed marking and false otherwise.
1135 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1136 MarkBit mark_bit = Marking::MarkBitFrom(object);
1137 if (Marking::IsWhite(mark_bit)) {
1138 heap->mark_compact_collector()->SetMark(object, mark_bit);
1139 return true;
1140 }
1141 return false;
1142 }
1143
1144 // Mark object pointed to by p.
1145 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1146 HeapObject* object, Object** p)) {
1147 if (!(*p)->IsHeapObject()) return;
1148 HeapObject* target_object = HeapObject::cast(*p);
1149 collector->RecordSlot(object, p, target_object);
1150 MarkBit mark = Marking::MarkBitFrom(target_object);
1151 collector->MarkObject(target_object, mark);
1152 }
1153
1154
1155 // Visit an unmarked object.
1156 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1157 HeapObject* obj)) {
1158 #ifdef DEBUG
1159 DCHECK(collector->heap()->Contains(obj));
1160 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1161 #endif
1162 Map* map = obj->map();
1163 Heap* heap = obj->GetHeap();
1164 MarkBit mark = Marking::MarkBitFrom(obj);
1165 heap->mark_compact_collector()->SetMark(obj, mark);
1166 // Mark the map pointer and the body.
1167 MarkBit map_mark = Marking::MarkBitFrom(map);
1168 heap->mark_compact_collector()->MarkObject(map, map_mark);
1169 IterateBody(map, obj);
1170 }
1171
1172 // Visit all unmarked objects pointed to by [start, end).
1173 // Returns false if the operation fails (lack of stack space).
1174 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1175 Object** start, Object** end)) {
1176 // Return false is we are close to the stack limit.
1177 StackLimitCheck check(heap->isolate());
1178 if (check.HasOverflowed()) return false;
1179
1180 MarkCompactCollector* collector = heap->mark_compact_collector();
1181 // Visit the unmarked objects.
1182 for (Object** p = start; p < end; p++) {
1183 Object* o = *p;
1184 if (!o->IsHeapObject()) continue;
1185 collector->RecordSlot(object, p, o);
1186 HeapObject* obj = HeapObject::cast(o);
1187 MarkBit mark = Marking::MarkBitFrom(obj);
1188 if (Marking::IsBlackOrGrey(mark)) continue;
1189 VisitUnmarkedObject(collector, obj);
1190 }
1191 return true;
1192 }
1193
1194 private:
1195 template <int id>
1196 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1197
1198 // Code flushing support.
1199
1200 static const int kRegExpCodeThreshold = 5;
1201
1202 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1203 bool is_one_byte) {
1204 // Make sure that the fixed array is in fact initialized on the RegExp.
1205 // We could potentially trigger a GC when initializing the RegExp.
1206 if (HeapObject::cast(re->data())->map()->instance_type() !=
1207 FIXED_ARRAY_TYPE)
1208 return;
1209
1210 // Make sure this is a RegExp that actually contains code.
1211 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1212
1213 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1214 if (!code->IsSmi() &&
1215 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1216 // Save a copy that can be reinstated if we need the code again.
1217 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1218
1219 // Saving a copy might create a pointer into compaction candidate
1220 // that was not observed by marker. This might happen if JSRegExp data
1221 // was marked through the compilation cache before marker reached JSRegExp
1222 // object.
1223 FixedArray* data = FixedArray::cast(re->data());
1224 Object** slot =
1225 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1226 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1227
1228 // Set a number in the 0-255 range to guarantee no smi overflow.
1229 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1230 Smi::FromInt(heap->ms_count() & 0xff));
1231 } else if (code->IsSmi()) {
1232 int value = Smi::cast(code)->value();
1233 // The regexp has not been compiled yet or there was a compilation error.
1234 if (value == JSRegExp::kUninitializedValue ||
1235 value == JSRegExp::kCompilationErrorValue) {
1236 return;
1237 }
1238
1239 // Check if we should flush now.
1240 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1241 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1242 Smi::FromInt(JSRegExp::kUninitializedValue));
1243 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1244 Smi::FromInt(JSRegExp::kUninitializedValue));
1245 }
1246 }
1247 }
1248
1249
1250 // Works by setting the current sweep_generation (as a smi) in the
1251 // code object place in the data array of the RegExp and keeps a copy
1252 // around that can be reinstated if we reuse the RegExp before flushing.
1253 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1254 // we flush the code.
1255 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1256 Heap* heap = map->GetHeap();
1257 MarkCompactCollector* collector = heap->mark_compact_collector();
1258 if (!collector->is_code_flushing_enabled()) {
1259 VisitJSRegExp(map, object);
1260 return;
1261 }
1262 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1263 // Flush code or set age on both one byte and two byte code.
1264 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1265 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1266 // Visit the fields of the RegExp, including the updated FixedArray.
1267 VisitJSRegExp(map, object);
1268 }
1269 };
1270
1271
1272 void MarkCompactMarkingVisitor::Initialize() {
1273 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1274
1275 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1276
1277 if (FLAG_track_gc_object_stats) {
1278 ObjectStatsVisitor::Initialize(&table_);
1279 }
1280 }
1281
1282
1283 class CodeMarkingVisitor : public ThreadVisitor {
1284 public:
1285 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1286 : collector_(collector) {}
1287
1288 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1289 collector_->PrepareThreadForCodeFlushing(isolate, top);
1290 }
1291
1292 private:
1293 MarkCompactCollector* collector_;
1294 };
1295
1296
1297 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1298 public:
1299 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1300 : collector_(collector) {}
1301
1302 void VisitPointers(Object** start, Object** end) override {
1303 for (Object** p = start; p < end; p++) VisitPointer(p);
1304 }
1305
1306 void VisitPointer(Object** slot) override {
1307 Object* obj = *slot;
1308 if (obj->IsSharedFunctionInfo()) {
1309 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1310 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1311 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1312 collector_->MarkObject(shared->code(), code_mark);
1313 collector_->MarkObject(shared, shared_mark);
1314 }
1315 }
1316
1317 private:
1318 MarkCompactCollector* collector_;
1319 };
1320
1321
1322 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1323 ThreadLocalTop* top) {
1324 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1325 // Note: for the frame that has a pending lazy deoptimization
1326 // StackFrame::unchecked_code will return a non-optimized code object for
1327 // the outermost function and StackFrame::LookupCode will return
1328 // actual optimized code object.
1329 StackFrame* frame = it.frame();
1330 Code* code = frame->unchecked_code();
1331 MarkBit code_mark = Marking::MarkBitFrom(code);
1332 MarkObject(code, code_mark);
1333 if (frame->is_optimized()) {
1334 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1335 frame->LookupCode());
1336 }
1337 }
1338 }
1339
1340
1341 void MarkCompactCollector::PrepareForCodeFlushing() {
1342 // If code flushing is disabled, there is no need to prepare for it.
1343 if (!is_code_flushing_enabled()) return;
1344
1345 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1346 // relies on it being marked before any other descriptor array.
1347 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1348 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1349 MarkObject(descriptor_array, descriptor_array_mark);
1350
1351 // Make sure we are not referencing the code from the stack.
1352 DCHECK(this == heap()->mark_compact_collector());
1353 PrepareThreadForCodeFlushing(heap()->isolate(),
1354 heap()->isolate()->thread_local_top());
1355
1356 // Iterate the archived stacks in all threads to check if
1357 // the code is referenced.
1358 CodeMarkingVisitor code_marking_visitor(this);
1359 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1360 &code_marking_visitor);
1361
1362 SharedFunctionInfoMarkingVisitor visitor(this);
1363 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1364 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1365
1366 ProcessMarkingDeque();
1367 }
1368
1369
1370 // Visitor class for marking heap roots.
1371 class RootMarkingVisitor : public ObjectVisitor {
1372 public:
1373 explicit RootMarkingVisitor(Heap* heap)
1374 : collector_(heap->mark_compact_collector()) {}
1375
1376 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1377
1378 void VisitPointers(Object** start, Object** end) override {
1379 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1380 }
1381
1382 // Skip the weak next code link in a code object, which is visited in
1383 // ProcessTopOptimizedFrame.
1384 void VisitNextCodeLink(Object** p) override {}
1385
1386 private:
1387 void MarkObjectByPointer(Object** p) {
1388 if (!(*p)->IsHeapObject()) return;
1389
1390 // Replace flat cons strings in place.
1391 HeapObject* object = HeapObject::cast(*p);
1392 MarkBit mark_bit = Marking::MarkBitFrom(object);
1393 if (Marking::IsBlackOrGrey(mark_bit)) return;
1394
1395 Map* map = object->map();
1396 // Mark the object.
1397 collector_->SetMark(object, mark_bit);
1398
1399 // Mark the map pointer and body, and push them on the marking stack.
1400 MarkBit map_mark = Marking::MarkBitFrom(map);
1401 collector_->MarkObject(map, map_mark);
1402 MarkCompactMarkingVisitor::IterateBody(map, object);
1403
1404 // Mark all the objects reachable from the map and body. May leave
1405 // overflowed objects in the heap.
1406 collector_->EmptyMarkingDeque();
1407 }
1408
1409 MarkCompactCollector* collector_;
1410 };
1411
1412
1413 // Helper class for pruning the string table.
1414 template <bool finalize_external_strings>
1415 class StringTableCleaner : public ObjectVisitor {
1416 public:
1417 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1418
1419 void VisitPointers(Object** start, Object** end) override {
1420 // Visit all HeapObject pointers in [start, end).
1421 for (Object** p = start; p < end; p++) {
1422 Object* o = *p;
1423 if (o->IsHeapObject() &&
1424 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1425 if (finalize_external_strings) {
1426 DCHECK(o->IsExternalString());
1427 heap_->FinalizeExternalString(String::cast(*p));
1428 } else {
1429 pointers_removed_++;
1430 }
1431 // Set the entry to the_hole_value (as deleted).
1432 *p = heap_->the_hole_value();
1433 }
1434 }
1435 }
1436
1437 int PointersRemoved() {
1438 DCHECK(!finalize_external_strings);
1439 return pointers_removed_;
1440 }
1441
1442 private:
1443 Heap* heap_;
1444 int pointers_removed_;
1445 };
1446
1447
1448 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1449 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1450
1451
1452 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1453 // are retained.
1454 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1455 public:
1456 virtual Object* RetainAs(Object* object) {
1457 if (Marking::IsBlackOrGrey(
1458 Marking::MarkBitFrom(HeapObject::cast(object)))) {
1459 return object;
1460 } else if (object->IsAllocationSite() &&
1461 !(AllocationSite::cast(object)->IsZombie())) {
1462 // "dead" AllocationSites need to live long enough for a traversal of new
1463 // space. These sites get a one-time reprieve.
1464 AllocationSite* site = AllocationSite::cast(object);
1465 site->MarkZombie();
1466 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1467 return object;
1468 } else {
1469 return NULL;
1470 }
1471 }
1472 };
1473
1474
1475 // Fill the marking stack with overflowed objects returned by the given
1476 // iterator. Stop when the marking stack is filled or the end of the space
1477 // is reached, whichever comes first.
1478 template <class T>
1479 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1480 // The caller should ensure that the marking stack is initially not full,
1481 // so that we don't waste effort pointlessly scanning for objects.
1482 DCHECK(!marking_deque()->IsFull());
1483
1484 Map* filler_map = heap()->one_pointer_filler_map();
1485 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1486 MarkBit markbit = Marking::MarkBitFrom(object);
1487 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1488 Marking::GreyToBlack(markbit);
1489 PushBlack(object);
1490 if (marking_deque()->IsFull()) return;
1491 }
1492 }
1493 }
1494
1495
1496 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1497
1498
1499 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1500 DCHECK(!marking_deque()->IsFull());
1501 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1502 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1503 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1504 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1505
1506 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1507 Address cell_base = it.CurrentCellBase();
1508 MarkBit::CellType* cell = it.CurrentCell();
1509
1510 const MarkBit::CellType current_cell = *cell;
1511 if (current_cell == 0) continue;
1512
1513 MarkBit::CellType grey_objects;
1514 if (it.HasNext()) {
1515 const MarkBit::CellType next_cell = *(cell + 1);
1516 grey_objects = current_cell & ((current_cell >> 1) |
1517 (next_cell << (Bitmap::kBitsPerCell - 1)));
1518 } else {
1519 grey_objects = current_cell & (current_cell >> 1);
1520 }
1521
1522 int offset = 0;
1523 while (grey_objects != 0) {
1524 int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1525 grey_objects >>= trailing_zeros;
1526 offset += trailing_zeros;
1527 MarkBit markbit(cell, 1 << offset);
1528 DCHECK(Marking::IsGrey(markbit));
1529 Marking::GreyToBlack(markbit);
1530 Address addr = cell_base + offset * kPointerSize;
1531 HeapObject* object = HeapObject::FromAddress(addr);
1532 PushBlack(object);
1533 if (marking_deque()->IsFull()) return;
1534 offset += 2;
1535 grey_objects >>= 2;
1536 }
1537
1538 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1539 }
1540 }
1541
1542
1543 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1544 NewSpace* new_space, NewSpacePage* p) {
1545 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1546 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1547 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1548 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1549
1550 MarkBit::CellType* cells = p->markbits()->cells();
1551 int survivors_size = 0;
1552
1553 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1554 Address cell_base = it.CurrentCellBase();
1555 MarkBit::CellType* cell = it.CurrentCell();
1556
1557 MarkBit::CellType current_cell = *cell;
1558 if (current_cell == 0) continue;
1559
1560 int offset = 0;
1561 while (current_cell != 0) {
1562 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1563 current_cell >>= trailing_zeros;
1564 offset += trailing_zeros;
1565 Address address = cell_base + offset * kPointerSize;
1566 HeapObject* object = HeapObject::FromAddress(address);
1567 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
1568
1569 int size = object->Size();
1570 survivors_size += size;
1571
1572 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1573
1574 offset += 2;
1575 current_cell >>= 2;
1576
1577 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1578 if (heap()->ShouldBePromoted(object->address(), size) &&
1579 TryPromoteObject(object, size)) {
1580 continue;
1581 }
1582
1583 AllocationAlignment alignment = object->RequiredAlignment();
1584 AllocationResult allocation = new_space->AllocateRaw(size, alignment);
1585 if (allocation.IsRetry()) {
1586 if (!new_space->AddFreshPage()) {
1587 // Shouldn't happen. We are sweeping linearly, and to-space
1588 // has the same number of pages as from-space, so there is
1589 // always room unless we are in an OOM situation.
1590 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1591 }
1592 allocation = new_space->AllocateRaw(size, alignment);
1593 DCHECK(!allocation.IsRetry());
1594 }
1595 Object* target = allocation.ToObjectChecked();
1596
1597 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1598 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1599 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1600 }
1601 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1602 }
1603 *cells = 0;
1604 }
1605 return survivors_size;
1606 }
1607
1608
1609 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1610 PageIterator it(space);
1611 while (it.has_next()) {
1612 Page* p = it.next();
1613 DiscoverGreyObjectsOnPage(p);
1614 if (marking_deque()->IsFull()) return;
1615 }
1616 }
1617
1618
1619 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1620 NewSpace* space = heap()->new_space();
1621 NewSpacePageIterator it(space->bottom(), space->top());
1622 while (it.has_next()) {
1623 NewSpacePage* page = it.next();
1624 DiscoverGreyObjectsOnPage(page);
1625 if (marking_deque()->IsFull()) return;
1626 }
1627 }
1628
1629
1630 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1631 Object* o = *p;
1632 if (!o->IsHeapObject()) return false;
1633 HeapObject* heap_object = HeapObject::cast(o);
1634 MarkBit mark = Marking::MarkBitFrom(heap_object);
1635 return Marking::IsWhite(mark);
1636 }
1637
1638
1639 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1640 Object** p) {
1641 Object* o = *p;
1642 DCHECK(o->IsHeapObject());
1643 HeapObject* heap_object = HeapObject::cast(o);
1644 MarkBit mark = Marking::MarkBitFrom(heap_object);
1645 return Marking::IsWhite(mark);
1646 }
1647
1648
1649 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1650 StringTable* string_table = heap()->string_table();
1651 // Mark the string table itself.
1652 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1653 if (Marking::IsWhite(string_table_mark)) {
1654 // String table could have already been marked by visiting the handles list.
1655 SetMark(string_table, string_table_mark);
1656 }
1657 // Explicitly mark the prefix.
1658 string_table->IteratePrefix(visitor);
1659 ProcessMarkingDeque();
1660 }
1661
1662
1663 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1664 MarkBit mark_bit = Marking::MarkBitFrom(site);
1665 SetMark(site, mark_bit);
1666 }
1667
1668
1669 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1670 // Mark the heap roots including global variables, stack variables,
1671 // etc., and all objects reachable from them.
1672 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1673
1674 // Handle the string table specially.
1675 MarkStringTable(visitor);
1676
1677 // There may be overflowed objects in the heap. Visit them now.
1678 while (marking_deque_.overflowed()) {
1679 RefillMarkingDeque();
1680 EmptyMarkingDeque();
1681 }
1682 }
1683
1684
1685 void MarkCompactCollector::MarkImplicitRefGroups(
1686 MarkObjectFunction mark_object) {
1687 List<ImplicitRefGroup*>* ref_groups =
1688 isolate()->global_handles()->implicit_ref_groups();
1689
1690 int last = 0;
1691 for (int i = 0; i < ref_groups->length(); i++) {
1692 ImplicitRefGroup* entry = ref_groups->at(i);
1693 DCHECK(entry != NULL);
1694
1695 if (!IsMarked(*entry->parent)) {
1696 (*ref_groups)[last++] = entry;
1697 continue;
1698 }
1699
1700 Object*** children = entry->children;
1701 // A parent object is marked, so mark all child heap objects.
1702 for (size_t j = 0; j < entry->length; ++j) {
1703 if ((*children[j])->IsHeapObject()) {
1704 mark_object(heap(), HeapObject::cast(*children[j]));
1705 }
1706 }
1707
1708 // Once the entire group has been marked, dispose it because it's
1709 // not needed anymore.
1710 delete entry;
1711 }
1712 ref_groups->Rewind(last);
1713 }
1714
1715
1716 // Mark all objects reachable from the objects on the marking stack.
1717 // Before: the marking stack contains zero or more heap object pointers.
1718 // After: the marking stack is empty, and all objects reachable from the
1719 // marking stack have been marked, or are overflowed in the heap.
1720 void MarkCompactCollector::EmptyMarkingDeque() {
1721 Map* filler_map = heap_->one_pointer_filler_map();
1722 while (!marking_deque_.IsEmpty()) {
1723 HeapObject* object = marking_deque_.Pop();
1724 // Explicitly skip one word fillers. Incremental markbit patterns are
1725 // correct only for objects that occupy at least two words.
1726 Map* map = object->map();
1727 if (map == filler_map) continue;
1728
1729 DCHECK(object->IsHeapObject());
1730 DCHECK(heap()->Contains(object));
1731 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
1732
1733 MarkBit map_mark = Marking::MarkBitFrom(map);
1734 MarkObject(map, map_mark);
1735
1736 MarkCompactMarkingVisitor::IterateBody(map, object);
1737 }
1738 }
1739
1740
1741 // Sweep the heap for overflowed objects, clear their overflow bits, and
1742 // push them on the marking stack. Stop early if the marking stack fills
1743 // before sweeping completes. If sweeping completes, there are no remaining
1744 // overflowed objects in the heap so the overflow flag on the markings stack
1745 // is cleared.
1746 void MarkCompactCollector::RefillMarkingDeque() {
1747 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
1748 DCHECK(marking_deque_.overflowed());
1749
1750 DiscoverGreyObjectsInNewSpace();
1751 if (marking_deque_.IsFull()) return;
1752
1753 DiscoverGreyObjectsInSpace(heap()->old_space());
1754 if (marking_deque_.IsFull()) return;
1755
1756 DiscoverGreyObjectsInSpace(heap()->code_space());
1757 if (marking_deque_.IsFull()) return;
1758
1759 DiscoverGreyObjectsInSpace(heap()->map_space());
1760 if (marking_deque_.IsFull()) return;
1761
1762 LargeObjectIterator lo_it(heap()->lo_space());
1763 DiscoverGreyObjectsWithIterator(&lo_it);
1764 if (marking_deque_.IsFull()) return;
1765
1766 marking_deque_.ClearOverflowed();
1767 }
1768
1769
1770 // Mark all objects reachable (transitively) from objects on the marking
1771 // stack. Before: the marking stack contains zero or more heap object
1772 // pointers. After: the marking stack is empty and there are no overflowed
1773 // objects in the heap.
1774 void MarkCompactCollector::ProcessMarkingDeque() {
1775 EmptyMarkingDeque();
1776 while (marking_deque_.overflowed()) {
1777 RefillMarkingDeque();
1778 EmptyMarkingDeque();
1779 }
1780 }
1781
1782
1783 // Mark all objects reachable (transitively) from objects on the marking
1784 // stack including references only considered in the atomic marking pause.
1785 void MarkCompactCollector::ProcessEphemeralMarking(
1786 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
1787 bool work_to_do = true;
1788 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
1789 while (work_to_do) {
1790 if (!only_process_harmony_weak_collections) {
1791 isolate()->global_handles()->IterateObjectGroups(
1792 visitor, &IsUnmarkedHeapObjectWithHeap);
1793 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
1794 }
1795 ProcessWeakCollections();
1796 work_to_do = !marking_deque_.IsEmpty();
1797 ProcessMarkingDeque();
1798 }
1799 }
1800
1801
1802 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1803 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1804 !it.done(); it.Advance()) {
1805 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1806 return;
1807 }
1808 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1809 Code* code = it.frame()->LookupCode();
1810 if (!code->CanDeoptAt(it.frame()->pc())) {
1811 code->CodeIterateBody(visitor);
1812 }
1813 ProcessMarkingDeque();
1814 return;
1815 }
1816 }
1817 }
1818
1819
1820 void MarkCompactCollector::RetainMaps() {
1821 if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() ||
1822 FLAG_retain_maps_for_n_gc == 0) {
1823 // Do not retain dead maps if flag disables it or there is
1824 // - memory pressure (reduce_memory_footprint_),
1825 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
1826 return;
1827 }
1828
1829 ArrayList* retained_maps = heap()->retained_maps();
1830 int length = retained_maps->Length();
1831 int new_length = 0;
1832 for (int i = 0; i < length; i += 2) {
1833 DCHECK(retained_maps->Get(i)->IsWeakCell());
1834 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
1835 if (cell->cleared()) continue;
1836 int age = Smi::cast(retained_maps->Get(i + 1))->value();
1837 int new_age;
1838 Map* map = Map::cast(cell->value());
1839 MarkBit map_mark = Marking::MarkBitFrom(map);
1840 if (Marking::IsWhite(map_mark)) {
1841 if (age == 0) {
1842 // The map has aged. Do not retain this map.
1843 continue;
1844 }
1845 Object* constructor = map->GetConstructor();
1846 if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom(
1847 HeapObject::cast(constructor)))) {
1848 // The constructor is dead, no new objects with this map can
1849 // be created. Do not retain this map.
1850 continue;
1851 }
1852 Object* prototype = map->prototype();
1853 if (prototype->IsHeapObject() &&
1854 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
1855 // The prototype is not marked, age the map.
1856 new_age = age - 1;
1857 } else {
1858 // The prototype and the constructor are marked, this map keeps only
1859 // transition tree alive, not JSObjects. Do not age the map.
1860 new_age = age;
1861 }
1862 MarkObject(map, map_mark);
1863 } else {
1864 new_age = FLAG_retain_maps_for_n_gc;
1865 }
1866 if (i != new_length) {
1867 retained_maps->Set(new_length, cell);
1868 Object** slot = retained_maps->Slot(new_length);
1869 RecordSlot(retained_maps, slot, cell);
1870 retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
1871 } else if (new_age != age) {
1872 retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
1873 }
1874 new_length += 2;
1875 }
1876 Object* undefined = heap()->undefined_value();
1877 for (int i = new_length; i < length; i++) {
1878 retained_maps->Clear(i, undefined);
1879 }
1880 if (new_length != length) retained_maps->SetLength(new_length);
1881 ProcessMarkingDeque();
1882 }
1883
1884
1885 void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
1886 DCHECK(!marking_deque_.in_use());
1887 if (marking_deque_memory_ == NULL) {
1888 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
1889 marking_deque_memory_committed_ = 0;
1890 }
1891 if (marking_deque_memory_ == NULL) {
1892 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
1893 }
1894 }
1895
1896
1897 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
1898 // If the marking deque is too small, we try to allocate a bigger one.
1899 // If that fails, make do with a smaller one.
1900 CHECK(!marking_deque_.in_use());
1901 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
1902 base::VirtualMemory* memory = marking_deque_memory_;
1903 size_t currently_committed = marking_deque_memory_committed_;
1904
1905 if (currently_committed == size) return;
1906
1907 if (currently_committed > size) {
1908 bool success = marking_deque_memory_->Uncommit(
1909 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
1910 currently_committed - size);
1911 if (success) {
1912 marking_deque_memory_committed_ = size;
1913 return;
1914 }
1915 UNREACHABLE();
1916 }
1917
1918 bool success = memory->Commit(
1919 reinterpret_cast<Address>(memory->address()) + currently_committed,
1920 size - currently_committed,
1921 false); // Not executable.
1922 if (success) {
1923 marking_deque_memory_committed_ = size;
1924 return;
1925 }
1926 }
1927 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
1928 }
1929
1930
1931 void MarkCompactCollector::InitializeMarkingDeque() {
1932 DCHECK(!marking_deque_.in_use());
1933 DCHECK(marking_deque_memory_committed_ > 0);
1934 Address addr = static_cast<Address>(marking_deque_memory_->address());
1935 size_t size = marking_deque_memory_committed_;
1936 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
1937 marking_deque_.Initialize(addr, addr + size);
1938 }
1939
1940
1941 void MarkingDeque::Initialize(Address low, Address high) {
1942 DCHECK(!in_use_);
1943 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
1944 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
1945 array_ = obj_low;
1946 mask_ = base::bits::RoundDownToPowerOfTwo32(
1947 static_cast<uint32_t>(obj_high - obj_low)) -
1948 1;
1949 top_ = bottom_ = 0;
1950 overflowed_ = false;
1951 in_use_ = true;
1952 }
1953
1954
1955 void MarkingDeque::Uninitialize(bool aborting) {
1956 if (!aborting) {
1957 DCHECK(IsEmpty());
1958 DCHECK(!overflowed_);
1959 }
1960 DCHECK(in_use_);
1961 top_ = bottom_ = 0xdecbad;
1962 in_use_ = false;
1963 }
1964
1965
1966 void MarkCompactCollector::MarkLiveObjects() {
1967 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
1968 double start_time = 0.0;
1969 if (FLAG_print_cumulative_gc_stat) {
1970 start_time = base::OS::TimeCurrentMillis();
1971 }
1972 // The recursive GC marker detects when it is nearing stack overflow,
1973 // and switches to a different marking system. JS interrupts interfere
1974 // with the C stack limit check.
1975 PostponeInterruptsScope postpone(isolate());
1976
1977 {
1978 GCTracer::Scope gc_scope(heap()->tracer(),
1979 GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1980 IncrementalMarking* incremental_marking = heap_->incremental_marking();
1981 if (was_marked_incrementally_) {
1982 incremental_marking->Finalize();
1983 } else {
1984 // Abort any pending incremental activities e.g. incremental sweeping.
1985 incremental_marking->Stop();
1986 if (marking_deque_.in_use()) {
1987 marking_deque_.Uninitialize(true);
1988 }
1989 }
1990 }
1991
1992 #ifdef DEBUG
1993 DCHECK(state_ == PREPARE_GC);
1994 state_ = MARK_LIVE_OBJECTS;
1995 #endif
1996
1997 EnsureMarkingDequeIsCommittedAndInitialize(
1998 MarkCompactCollector::kMaxMarkingDequeSize);
1999
2000 {
2001 GCTracer::Scope gc_scope(heap()->tracer(),
2002 GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2003 PrepareForCodeFlushing();
2004 }
2005
2006 RootMarkingVisitor root_visitor(heap());
2007
2008 {
2009 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT);
2010 MarkRoots(&root_visitor);
2011 }
2012
2013 {
2014 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT);
2015 ProcessTopOptimizedFrame(&root_visitor);
2016 }
2017
2018 // Retaining dying maps should happen before or during ephemeral marking
2019 // because a map could keep the key of an ephemeron alive. Note that map
2020 // aging is imprecise: maps that are kept alive only by ephemerons will age.
2021 {
2022 GCTracer::Scope gc_scope(heap()->tracer(),
2023 GCTracer::Scope::MC_MARK_RETAIN_MAPS);
2024 RetainMaps();
2025 }
2026
2027 {
2028 GCTracer::Scope gc_scope(heap()->tracer(),
2029 GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2030
2031 // The objects reachable from the roots are marked, yet unreachable
2032 // objects are unmarked. Mark objects reachable due to host
2033 // application specific logic or through Harmony weak maps.
2034 ProcessEphemeralMarking(&root_visitor, false);
2035
2036 // The objects reachable from the roots, weak maps or object groups
2037 // are marked. Objects pointed to only by weak global handles cannot be
2038 // immediately reclaimed. Instead, we have to mark them as pending and mark
2039 // objects reachable from them.
2040 //
2041 // First we identify nonlive weak handles and mark them as pending
2042 // destruction.
2043 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2044 &IsUnmarkedHeapObject);
2045 // Then we mark the objects.
2046 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2047 ProcessMarkingDeque();
2048
2049 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2050 // the weak roots we just marked as pending destruction.
2051 //
2052 // We only process harmony collections, as all object groups have been fully
2053 // processed and no weakly reachable node can discover new objects groups.
2054 ProcessEphemeralMarking(&root_visitor, true);
2055 }
2056
2057 AfterMarking();
2058
2059 if (FLAG_print_cumulative_gc_stat) {
2060 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2061 }
2062 }
2063
2064
2065 void MarkCompactCollector::AfterMarking() {
2066 {
2067 GCTracer::Scope gc_scope(heap()->tracer(),
2068 GCTracer::Scope::MC_MARK_STRING_TABLE);
2069
2070 // Prune the string table removing all strings only pointed to by the
2071 // string table. Cannot use string_table() here because the string
2072 // table is marked.
2073 StringTable* string_table = heap()->string_table();
2074 InternalizedStringTableCleaner internalized_visitor(heap());
2075 string_table->IterateElements(&internalized_visitor);
2076 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2077
2078 ExternalStringTableCleaner external_visitor(heap());
2079 heap()->external_string_table_.Iterate(&external_visitor);
2080 heap()->external_string_table_.CleanUp();
2081 }
2082
2083 {
2084 GCTracer::Scope gc_scope(heap()->tracer(),
2085 GCTracer::Scope::MC_MARK_WEAK_REFERENCES);
2086
2087 // Process the weak references.
2088 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2089 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2090 }
2091
2092 {
2093 GCTracer::Scope gc_scope(heap()->tracer(),
2094 GCTracer::Scope::MC_MARK_GLOBAL_HANDLES);
2095
2096 // Remove object groups after marking phase.
2097 heap()->isolate()->global_handles()->RemoveObjectGroups();
2098 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2099 }
2100
2101 // Flush code from collected candidates.
2102 if (is_code_flushing_enabled()) {
2103 GCTracer::Scope gc_scope(heap()->tracer(),
2104 GCTracer::Scope::MC_MARK_CODE_FLUSH);
2105 code_flusher_->ProcessCandidates();
2106 }
2107
2108 // Process and clear all optimized code maps.
2109 if (!FLAG_flush_optimized_code_cache) {
2110 GCTracer::Scope gc_scope(heap()->tracer(),
2111 GCTracer::Scope::MC_MARK_OPTIMIZED_CODE_MAPS);
2112 ProcessAndClearOptimizedCodeMaps();
2113 }
2114
2115 if (FLAG_track_gc_object_stats) {
2116 if (FLAG_trace_gc_object_stats) {
2117 heap()->object_stats_->TraceObjectStats();
2118 }
2119 heap()->object_stats_->CheckpointObjectStats();
2120 }
2121 }
2122
2123
2124 void MarkCompactCollector::ProcessAndClearOptimizedCodeMaps() {
2125 SharedFunctionInfo::Iterator iterator(isolate());
2126 while (SharedFunctionInfo* shared = iterator.Next()) {
2127 if (shared->optimized_code_map()->IsSmi()) continue;
2128 996
2129 // Process context-dependent entries in the optimized code map. 997 // Process context-dependent entries in the optimized code map.
2130 FixedArray* code_map = FixedArray::cast(shared->optimized_code_map()); 998 FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
2131 int new_length = SharedFunctionInfo::kEntriesStart; 999 int new_length = SharedFunctionInfo::kEntriesStart;
2132 int old_length = code_map->length(); 1000 int old_length = code_map->length();
2133 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length; 1001 for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
2134 i += SharedFunctionInfo::kEntryLength) { 1002 i += SharedFunctionInfo::kEntryLength) {
2135 // Each entry contains [ context, code, literals, ast-id ] as fields. 1003 // Each entry contains [ context, code, literals, ast-id ] as fields.
2136 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4); 1004 STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
2137 Context* context = 1005 Context* context =
2138 Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset)); 1006 Context::cast(code_map->get(i + SharedFunctionInfo::kContextOffset));
2139 HeapObject* code = HeapObject::cast( 1007 HeapObject* code = HeapObject::cast(
2140 code_map->get(i + SharedFunctionInfo::kCachedCodeOffset)); 1008 code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
2141 FixedArray* literals = FixedArray::cast( 1009 FixedArray* literals = FixedArray::cast(
2142 code_map->get(i + SharedFunctionInfo::kLiteralsOffset)); 1010 code_map->get(i + SharedFunctionInfo::kLiteralsOffset));
2143 Smi* ast_id = 1011 Smi* ast_id =
2144 Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset)); 1012 Smi::cast(code_map->get(i + SharedFunctionInfo::kOsrAstIdOffset));
2145 if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue; 1013 if (Marking::IsWhite(Marking::MarkBitFrom(context))) continue;
2146 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context))); 1014 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(context)));
2147 if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue; 1015 if (Marking::IsWhite(Marking::MarkBitFrom(code))) continue;
2148 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code))); 1016 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(code)));
2149 if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue; 1017 if (Marking::IsWhite(Marking::MarkBitFrom(literals))) continue;
2150 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals))); 1018 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(literals)));
2151 // Move every slot in the entry and record slots when needed. 1019 // Move every slot in the entry and record slots when needed.
2152 code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code); 1020 code_map->set(new_length + SharedFunctionInfo::kCachedCodeOffset, code);
2153 code_map->set(new_length + SharedFunctionInfo::kContextOffset, context); 1021 code_map->set(new_length + SharedFunctionInfo::kContextOffset, context);
2154 code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals); 1022 code_map->set(new_length + SharedFunctionInfo::kLiteralsOffset, literals);
2155 code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id); 1023 code_map->set(new_length + SharedFunctionInfo::kOsrAstIdOffset, ast_id);
2156 Object** code_slot = code_map->RawFieldOfElementAt( 1024 Object** code_slot = code_map->RawFieldOfElementAt(
2157 new_length + SharedFunctionInfo::kCachedCodeOffset); 1025 new_length + SharedFunctionInfo::kCachedCodeOffset);
2158 RecordSlot(code_map, code_slot, *code_slot); 1026 isolate_->heap()->mark_compact_collector()->RecordSlot(
1027 code_map, code_slot, *code_slot);
2159 Object** context_slot = code_map->RawFieldOfElementAt( 1028 Object** context_slot = code_map->RawFieldOfElementAt(
2160 new_length + SharedFunctionInfo::kContextOffset); 1029 new_length + SharedFunctionInfo::kContextOffset);
2161 RecordSlot(code_map, context_slot, *context_slot); 1030 isolate_->heap()->mark_compact_collector()->RecordSlot(
1031 code_map, context_slot, *context_slot);
2162 Object** literals_slot = code_map->RawFieldOfElementAt( 1032 Object** literals_slot = code_map->RawFieldOfElementAt(
2163 new_length + SharedFunctionInfo::kLiteralsOffset); 1033 new_length + SharedFunctionInfo::kLiteralsOffset);
2164 RecordSlot(code_map, literals_slot, *literals_slot); 1034 isolate_->heap()->mark_compact_collector()->RecordSlot(
1035 code_map, literals_slot, *literals_slot);
2165 new_length += SharedFunctionInfo::kEntryLength; 1036 new_length += SharedFunctionInfo::kEntryLength;
2166 } 1037 }
2167 1038
2168 // Process context-independent entry in the optimized code map. 1039 // Process context-independent entry in the optimized code map.
2169 Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex); 1040 Object* shared_object = code_map->get(SharedFunctionInfo::kSharedCodeIndex);
2170 if (shared_object->IsCode()) { 1041 if (shared_object->IsCode()) {
2171 Code* shared_code = Code::cast(shared_object); 1042 Code* shared_code = Code::cast(shared_object);
2172 if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) { 1043 if (Marking::IsWhite(Marking::MarkBitFrom(shared_code))) {
2173 code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex); 1044 code_map->set_undefined(SharedFunctionInfo::kSharedCodeIndex);
2174 } else { 1045 } else {
2175 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code))); 1046 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(shared_code)));
2176 Object** slot = 1047 Object** slot =
2177 code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex); 1048 code_map->RawFieldOfElementAt(SharedFunctionInfo::kSharedCodeIndex);
2178 RecordSlot(code_map, slot, *slot); 1049 isolate_->heap()->mark_compact_collector()->RecordSlot(code_map, slot,
1050 *slot);
2179 } 1051 }
2180 } 1052 }
2181 1053
2182 // Trim the optimized code map if entries have been removed. 1054 // Trim the optimized code map if entries have been removed.
2183 if (new_length < old_length) { 1055 if (new_length < old_length) {
2184 shared->TrimOptimizedCodeMap(old_length - new_length); 1056 holder->TrimOptimizedCodeMap(old_length - new_length);
2185 } 1057 }
2186 } 1058
2187 } 1059 holder = next_holder;
2188 1060 }
1061
1062 optimized_code_map_holder_head_ = NULL;
1063 }
1064
1065
1066 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1067 // Make sure previous flushing decisions are revisited.
1068 isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
1069
1070 if (FLAG_trace_code_flushing) {
1071 PrintF("[code-flushing abandons function-info: ");
1072 shared_info->ShortPrint();
1073 PrintF("]\n");
1074 }
1075
1076 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1077 SharedFunctionInfo* next_candidate;
1078 if (candidate == shared_info) {
1079 next_candidate = GetNextCandidate(shared_info);
1080 shared_function_info_candidates_head_ = next_candidate;
1081 ClearNextCandidate(shared_info);
1082 } else {
1083 while (candidate != NULL) {
1084 next_candidate = GetNextCandidate(candidate);
1085
1086 if (next_candidate == shared_info) {
1087 next_candidate = GetNextCandidate(shared_info);
1088 SetNextCandidate(candidate, next_candidate);
1089 ClearNextCandidate(shared_info);
1090 break;
1091 }
1092
1093 candidate = next_candidate;
1094 }
1095 }
1096 }
1097
1098
1099 void CodeFlusher::EvictCandidate(JSFunction* function) {
1100 DCHECK(!function->next_function_link()->IsUndefined());
1101 Object* undefined = isolate_->heap()->undefined_value();
1102
1103 // Make sure previous flushing decisions are revisited.
1104 isolate_->heap()->incremental_marking()->RecordWrites(function);
1105 isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
1106
1107 if (FLAG_trace_code_flushing) {
1108 PrintF("[code-flushing abandons closure: ");
1109 function->shared()->ShortPrint();
1110 PrintF("]\n");
1111 }
1112
1113 JSFunction* candidate = jsfunction_candidates_head_;
1114 JSFunction* next_candidate;
1115 if (candidate == function) {
1116 next_candidate = GetNextCandidate(function);
1117 jsfunction_candidates_head_ = next_candidate;
1118 ClearNextCandidate(function, undefined);
1119 } else {
1120 while (candidate != NULL) {
1121 next_candidate = GetNextCandidate(candidate);
1122
1123 if (next_candidate == function) {
1124 next_candidate = GetNextCandidate(function);
1125 SetNextCandidate(candidate, next_candidate);
1126 ClearNextCandidate(function, undefined);
1127 break;
1128 }
1129
1130 candidate = next_candidate;
1131 }
1132 }
1133 }
1134
1135
1136 void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
1137 FixedArray* code_map =
1138 FixedArray::cast(code_map_holder->optimized_code_map());
1139 DCHECK(!code_map->get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
1140
1141 // Make sure previous flushing decisions are revisited.
1142 isolate_->heap()->incremental_marking()->RecordWrites(code_map);
1143 isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
1144
1145 if (FLAG_trace_code_flushing) {
1146 PrintF("[code-flushing abandons code-map: ");
1147 code_map_holder->ShortPrint();
1148 PrintF("]\n");
1149 }
1150
1151 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1152 SharedFunctionInfo* next_holder;
1153 if (holder == code_map_holder) {
1154 next_holder = GetNextCodeMap(code_map_holder);
1155 optimized_code_map_holder_head_ = next_holder;
1156 ClearNextCodeMap(code_map_holder);
1157 } else {
1158 while (holder != NULL) {
1159 next_holder = GetNextCodeMap(holder);
1160
1161 if (next_holder == code_map_holder) {
1162 next_holder = GetNextCodeMap(code_map_holder);
1163 SetNextCodeMap(holder, next_holder);
1164 ClearNextCodeMap(code_map_holder);
1165 break;
1166 }
1167
1168 holder = next_holder;
1169 }
1170 }
1171 }
1172
1173
1174 void CodeFlusher::EvictJSFunctionCandidates() {
1175 JSFunction* candidate = jsfunction_candidates_head_;
1176 JSFunction* next_candidate;
1177 while (candidate != NULL) {
1178 next_candidate = GetNextCandidate(candidate);
1179 EvictCandidate(candidate);
1180 candidate = next_candidate;
1181 }
1182 DCHECK(jsfunction_candidates_head_ == NULL);
1183 }
1184
1185
1186 void CodeFlusher::EvictSharedFunctionInfoCandidates() {
1187 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1188 SharedFunctionInfo* next_candidate;
1189 while (candidate != NULL) {
1190 next_candidate = GetNextCandidate(candidate);
1191 EvictCandidate(candidate);
1192 candidate = next_candidate;
1193 }
1194 DCHECK(shared_function_info_candidates_head_ == NULL);
1195 }
1196
1197
1198 void CodeFlusher::EvictOptimizedCodeMaps() {
1199 SharedFunctionInfo* holder = optimized_code_map_holder_head_;
1200 SharedFunctionInfo* next_holder;
1201 while (holder != NULL) {
1202 next_holder = GetNextCodeMap(holder);
1203 EvictOptimizedCodeMap(holder);
1204 holder = next_holder;
1205 }
1206 DCHECK(optimized_code_map_holder_head_ == NULL);
1207 }
1208
1209
1210 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1211 Heap* heap = isolate_->heap();
1212
1213 JSFunction** slot = &jsfunction_candidates_head_;
1214 JSFunction* candidate = jsfunction_candidates_head_;
1215 while (candidate != NULL) {
1216 if (heap->InFromSpace(candidate)) {
1217 v->VisitPointer(reinterpret_cast<Object**>(slot));
1218 }
1219 candidate = GetNextCandidate(*slot);
1220 slot = GetNextCandidateSlot(*slot);
1221 }
1222 }
1223
1224
1225 MarkCompactCollector::~MarkCompactCollector() {
1226 if (code_flusher_ != NULL) {
1227 delete code_flusher_;
1228 code_flusher_ = NULL;
1229 }
1230 }
1231
1232
1233 class MarkCompactMarkingVisitor
1234 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1235 public:
1236 static void Initialize();
1237
1238 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1239 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1240 }
1241
1242 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1243 Object** start, Object** end)) {
1244 // Mark all objects pointed to in [start, end).
1245 const int kMinRangeForMarkingRecursion = 64;
1246 if (end - start >= kMinRangeForMarkingRecursion) {
1247 if (VisitUnmarkedObjects(heap, object, start, end)) return;
1248 // We are close to a stack overflow, so just mark the objects.
1249 }
1250 MarkCompactCollector* collector = heap->mark_compact_collector();
1251 for (Object** p = start; p < end; p++) {
1252 MarkObjectByPointer(collector, object, p);
1253 }
1254 }
1255
1256 // Marks the object black and pushes it on the marking stack.
1257 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1258 MarkBit mark = Marking::MarkBitFrom(object);
1259 heap->mark_compact_collector()->MarkObject(object, mark);
1260 }
1261
1262 // Marks the object black without pushing it on the marking stack.
1263 // Returns true if object needed marking and false otherwise.
1264 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1265 MarkBit mark_bit = Marking::MarkBitFrom(object);
1266 if (Marking::IsWhite(mark_bit)) {
1267 heap->mark_compact_collector()->SetMark(object, mark_bit);
1268 return true;
1269 }
1270 return false;
1271 }
1272
1273 // Mark object pointed to by p.
1274 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1275 HeapObject* object, Object** p)) {
1276 if (!(*p)->IsHeapObject()) return;
1277 HeapObject* target_object = HeapObject::cast(*p);
1278 collector->RecordSlot(object, p, target_object);
1279 MarkBit mark = Marking::MarkBitFrom(target_object);
1280 collector->MarkObject(target_object, mark);
1281 }
1282
1283
1284 // Visit an unmarked object.
1285 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1286 HeapObject* obj)) {
1287 #ifdef DEBUG
1288 DCHECK(collector->heap()->Contains(obj));
1289 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1290 #endif
1291 Map* map = obj->map();
1292 Heap* heap = obj->GetHeap();
1293 MarkBit mark = Marking::MarkBitFrom(obj);
1294 heap->mark_compact_collector()->SetMark(obj, mark);
1295 // Mark the map pointer and the body.
1296 MarkBit map_mark = Marking::MarkBitFrom(map);
1297 heap->mark_compact_collector()->MarkObject(map, map_mark);
1298 IterateBody(map, obj);
1299 }
1300
1301 // Visit all unmarked objects pointed to by [start, end).
1302 // Returns false if the operation fails (lack of stack space).
1303 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1304 Object** start, Object** end)) {
1305 // Return false is we are close to the stack limit.
1306 StackLimitCheck check(heap->isolate());
1307 if (check.HasOverflowed()) return false;
1308
1309 MarkCompactCollector* collector = heap->mark_compact_collector();
1310 // Visit the unmarked objects.
1311 for (Object** p = start; p < end; p++) {
1312 Object* o = *p;
1313 if (!o->IsHeapObject()) continue;
1314 collector->RecordSlot(object, p, o);
1315 HeapObject* obj = HeapObject::cast(o);
1316 MarkBit mark = Marking::MarkBitFrom(obj);
1317 if (Marking::IsBlackOrGrey(mark)) continue;
1318 VisitUnmarkedObject(collector, obj);
1319 }
1320 return true;
1321 }
1322
1323 private:
1324 template <int id>
1325 static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
1326
1327 // Code flushing support.
1328
1329 static const int kRegExpCodeThreshold = 5;
1330
1331 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1332 bool is_one_byte) {
1333 // Make sure that the fixed array is in fact initialized on the RegExp.
1334 // We could potentially trigger a GC when initializing the RegExp.
1335 if (HeapObject::cast(re->data())->map()->instance_type() !=
1336 FIXED_ARRAY_TYPE)
1337 return;
1338
1339 // Make sure this is a RegExp that actually contains code.
1340 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1341
1342 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1343 if (!code->IsSmi() &&
1344 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1345 // Save a copy that can be reinstated if we need the code again.
1346 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1347
1348 // Saving a copy might create a pointer into compaction candidate
1349 // that was not observed by marker. This might happen if JSRegExp data
1350 // was marked through the compilation cache before marker reached JSRegExp
1351 // object.
1352 FixedArray* data = FixedArray::cast(re->data());
1353 Object** slot =
1354 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1355 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1356
1357 // Set a number in the 0-255 range to guarantee no smi overflow.
1358 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1359 Smi::FromInt(heap->ms_count() & 0xff));
1360 } else if (code->IsSmi()) {
1361 int value = Smi::cast(code)->value();
1362 // The regexp has not been compiled yet or there was a compilation error.
1363 if (value == JSRegExp::kUninitializedValue ||
1364 value == JSRegExp::kCompilationErrorValue) {
1365 return;
1366 }
1367
1368 // Check if we should flush now.
1369 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1370 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1371 Smi::FromInt(JSRegExp::kUninitializedValue));
1372 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1373 Smi::FromInt(JSRegExp::kUninitializedValue));
1374 }
1375 }
1376 }
1377
1378
1379 // Works by setting the current sweep_generation (as a smi) in the
1380 // code object place in the data array of the RegExp and keeps a copy
1381 // around that can be reinstated if we reuse the RegExp before flushing.
1382 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1383 // we flush the code.
1384 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1385 Heap* heap = map->GetHeap();
1386 MarkCompactCollector* collector = heap->mark_compact_collector();
1387 if (!collector->is_code_flushing_enabled()) {
1388 VisitJSRegExp(map, object);
1389 return;
1390 }
1391 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1392 // Flush code or set age on both one byte and two byte code.
1393 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1394 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1395 // Visit the fields of the RegExp, including the updated FixedArray.
1396 VisitJSRegExp(map, object);
1397 }
1398 };
1399
1400
1401 void MarkCompactMarkingVisitor::Initialize() {
1402 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1403
1404 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1405
1406 if (FLAG_track_gc_object_stats) {
1407 ObjectStatsVisitor::Initialize(&table_);
1408 }
1409 }
1410
1411
1412 class CodeMarkingVisitor : public ThreadVisitor {
1413 public:
1414 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1415 : collector_(collector) {}
1416
1417 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1418 collector_->PrepareThreadForCodeFlushing(isolate, top);
1419 }
1420
1421 private:
1422 MarkCompactCollector* collector_;
1423 };
1424
1425
1426 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1427 public:
1428 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1429 : collector_(collector) {}
1430
1431 void VisitPointers(Object** start, Object** end) override {
1432 for (Object** p = start; p < end; p++) VisitPointer(p);
1433 }
1434
1435 void VisitPointer(Object** slot) override {
1436 Object* obj = *slot;
1437 if (obj->IsSharedFunctionInfo()) {
1438 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1439 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1440 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1441 collector_->MarkObject(shared->code(), code_mark);
1442 collector_->MarkObject(shared, shared_mark);
1443 }
1444 }
1445
1446 private:
1447 MarkCompactCollector* collector_;
1448 };
1449
1450
1451 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1452 ThreadLocalTop* top) {
1453 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1454 // Note: for the frame that has a pending lazy deoptimization
1455 // StackFrame::unchecked_code will return a non-optimized code object for
1456 // the outermost function and StackFrame::LookupCode will return
1457 // actual optimized code object.
1458 StackFrame* frame = it.frame();
1459 Code* code = frame->unchecked_code();
1460 MarkBit code_mark = Marking::MarkBitFrom(code);
1461 MarkObject(code, code_mark);
1462 if (frame->is_optimized()) {
1463 MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
1464 frame->LookupCode());
1465 }
1466 }
1467 }
1468
1469
1470 void MarkCompactCollector::PrepareForCodeFlushing() {
1471 // If code flushing is disabled, there is no need to prepare for it.
1472 if (!is_code_flushing_enabled()) return;
1473
1474 // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
1475 // relies on it being marked before any other descriptor array.
1476 HeapObject* descriptor_array = heap()->empty_descriptor_array();
1477 MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
1478 MarkObject(descriptor_array, descriptor_array_mark);
1479
1480 // Make sure we are not referencing the code from the stack.
1481 DCHECK(this == heap()->mark_compact_collector());
1482 PrepareThreadForCodeFlushing(heap()->isolate(),
1483 heap()->isolate()->thread_local_top());
1484
1485 // Iterate the archived stacks in all threads to check if
1486 // the code is referenced.
1487 CodeMarkingVisitor code_marking_visitor(this);
1488 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1489 &code_marking_visitor);
1490
1491 SharedFunctionInfoMarkingVisitor visitor(this);
1492 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1493 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1494
1495 ProcessMarkingDeque();
1496 }
1497
1498
1499 // Visitor class for marking heap roots.
1500 class RootMarkingVisitor : public ObjectVisitor {
1501 public:
1502 explicit RootMarkingVisitor(Heap* heap)
1503 : collector_(heap->mark_compact_collector()) {}
1504
1505 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1506
1507 void VisitPointers(Object** start, Object** end) override {
1508 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1509 }
1510
1511 // Skip the weak next code link in a code object, which is visited in
1512 // ProcessTopOptimizedFrame.
1513 void VisitNextCodeLink(Object** p) override {}
1514
1515 private:
1516 void MarkObjectByPointer(Object** p) {
1517 if (!(*p)->IsHeapObject()) return;
1518
1519 // Replace flat cons strings in place.
1520 HeapObject* object = HeapObject::cast(*p);
1521 MarkBit mark_bit = Marking::MarkBitFrom(object);
1522 if (Marking::IsBlackOrGrey(mark_bit)) return;
1523
1524 Map* map = object->map();
1525 // Mark the object.
1526 collector_->SetMark(object, mark_bit);
1527
1528 // Mark the map pointer and body, and push them on the marking stack.
1529 MarkBit map_mark = Marking::MarkBitFrom(map);
1530 collector_->MarkObject(map, map_mark);
1531 MarkCompactMarkingVisitor::IterateBody(map, object);
1532
1533 // Mark all the objects reachable from the map and body. May leave
1534 // overflowed objects in the heap.
1535 collector_->EmptyMarkingDeque();
1536 }
1537
1538 MarkCompactCollector* collector_;
1539 };
1540
1541
1542 // Helper class for pruning the string table.
1543 template <bool finalize_external_strings>
1544 class StringTableCleaner : public ObjectVisitor {
1545 public:
1546 explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
1547
1548 void VisitPointers(Object** start, Object** end) override {
1549 // Visit all HeapObject pointers in [start, end).
1550 for (Object** p = start; p < end; p++) {
1551 Object* o = *p;
1552 if (o->IsHeapObject() &&
1553 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1554 if (finalize_external_strings) {
1555 DCHECK(o->IsExternalString());
1556 heap_->FinalizeExternalString(String::cast(*p));
1557 } else {
1558 pointers_removed_++;
1559 }
1560 // Set the entry to the_hole_value (as deleted).
1561 *p = heap_->the_hole_value();
1562 }
1563 }
1564 }
1565
1566 int PointersRemoved() {
1567 DCHECK(!finalize_external_strings);
1568 return pointers_removed_;
1569 }
1570
1571 private:
1572 Heap* heap_;
1573 int pointers_removed_;
1574 };
1575
1576
1577 typedef StringTableCleaner<false> InternalizedStringTableCleaner;
1578 typedef StringTableCleaner<true> ExternalStringTableCleaner;
1579
1580
1581 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1582 // are retained.
1583 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1584 public:
1585 virtual Object* RetainAs(Object* object) {
1586 if (Marking::IsBlackOrGrey(
1587 Marking::MarkBitFrom(HeapObject::cast(object)))) {
1588 return object;
1589 } else if (object->IsAllocationSite() &&
1590 !(AllocationSite::cast(object)->IsZombie())) {
1591 // "dead" AllocationSites need to live long enough for a traversal of new
1592 // space. These sites get a one-time reprieve.
1593 AllocationSite* site = AllocationSite::cast(object);
1594 site->MarkZombie();
1595 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1596 return object;
1597 } else {
1598 return NULL;
1599 }
1600 }
1601 };
1602
1603
1604 // Fill the marking stack with overflowed objects returned by the given
1605 // iterator. Stop when the marking stack is filled or the end of the space
1606 // is reached, whichever comes first.
1607 template <class T>
1608 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1609 // The caller should ensure that the marking stack is initially not full,
1610 // so that we don't waste effort pointlessly scanning for objects.
1611 DCHECK(!marking_deque()->IsFull());
1612
1613 Map* filler_map = heap()->one_pointer_filler_map();
1614 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1615 MarkBit markbit = Marking::MarkBitFrom(object);
1616 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1617 Marking::GreyToBlack(markbit);
1618 PushBlack(object);
1619 if (marking_deque()->IsFull()) return;
1620 }
1621 }
1622 }
1623
1624
1625 static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
1626
1627
1628 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1629 DCHECK(!marking_deque()->IsFull());
1630 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1631 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1632 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1633 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1634
1635 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1636 Address cell_base = it.CurrentCellBase();
1637 MarkBit::CellType* cell = it.CurrentCell();
1638
1639 const MarkBit::CellType current_cell = *cell;
1640 if (current_cell == 0) continue;
1641
1642 MarkBit::CellType grey_objects;
1643 if (it.HasNext()) {
1644 const MarkBit::CellType next_cell = *(cell + 1);
1645 grey_objects = current_cell & ((current_cell >> 1) |
1646 (next_cell << (Bitmap::kBitsPerCell - 1)));
1647 } else {
1648 grey_objects = current_cell & (current_cell >> 1);
1649 }
1650
1651 int offset = 0;
1652 while (grey_objects != 0) {
1653 int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
1654 grey_objects >>= trailing_zeros;
1655 offset += trailing_zeros;
1656 MarkBit markbit(cell, 1 << offset);
1657 DCHECK(Marking::IsGrey(markbit));
1658 Marking::GreyToBlack(markbit);
1659 Address addr = cell_base + offset * kPointerSize;
1660 HeapObject* object = HeapObject::FromAddress(addr);
1661 PushBlack(object);
1662 if (marking_deque()->IsFull()) return;
1663 offset += 2;
1664 grey_objects >>= 2;
1665 }
1666
1667 grey_objects >>= (Bitmap::kBitsPerCell - 1);
1668 }
1669 }
1670
1671
1672 int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
1673 NewSpace* new_space, NewSpacePage* p) {
1674 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
1675 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
1676 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
1677 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
1678
1679 MarkBit::CellType* cells = p->markbits()->cells();
1680 int survivors_size = 0;
1681
1682 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
1683 Address cell_base = it.CurrentCellBase();
1684 MarkBit::CellType* cell = it.CurrentCell();
1685
1686 MarkBit::CellType current_cell = *cell;
1687 if (current_cell == 0) continue;
1688
1689 int offset = 0;
1690 while (current_cell != 0) {
1691 int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
1692 current_cell >>= trailing_zeros;
1693 offset += trailing_zeros;
1694 Address address = cell_base + offset * kPointerSize;
1695 HeapObject* object = HeapObject::FromAddress(address);
1696 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
1697
1698 int size = object->Size();
1699 survivors_size += size;
1700
1701 Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
1702
1703 offset += 2;
1704 current_cell >>= 2;
1705
1706 // TODO(hpayer): Refactor EvacuateObject and call this function instead.
1707 if (heap()->ShouldBePromoted(object->address(), size) &&
1708 TryPromoteObject(object, size)) {
1709 continue;
1710 }
1711
1712 AllocationAlignment alignment = object->RequiredAlignment();
1713 AllocationResult allocation = new_space->AllocateRaw(size, alignment);
1714 if (allocation.IsRetry()) {
1715 if (!new_space->AddFreshPage()) {
1716 // Shouldn't happen. We are sweeping linearly, and to-space
1717 // has the same number of pages as from-space, so there is
1718 // always room unless we are in an OOM situation.
1719 FatalProcessOutOfMemory("MarkCompactCollector: semi-space copy\n");
1720 }
1721 allocation = new_space->AllocateRaw(size, alignment);
1722 DCHECK(!allocation.IsRetry());
1723 }
1724 Object* target = allocation.ToObjectChecked();
1725
1726 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1727 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1728 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1729 }
1730 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1731 }
1732 *cells = 0;
1733 }
1734 return survivors_size;
1735 }
1736
1737
1738 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1739 PageIterator it(space);
1740 while (it.has_next()) {
1741 Page* p = it.next();
1742 DiscoverGreyObjectsOnPage(p);
1743 if (marking_deque()->IsFull()) return;
1744 }
1745 }
1746
1747
1748 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1749 NewSpace* space = heap()->new_space();
1750 NewSpacePageIterator it(space->bottom(), space->top());
1751 while (it.has_next()) {
1752 NewSpacePage* page = it.next();
1753 DiscoverGreyObjectsOnPage(page);
1754 if (marking_deque()->IsFull()) return;
1755 }
1756 }
1757
1758
1759 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1760 Object* o = *p;
1761 if (!o->IsHeapObject()) return false;
1762 HeapObject* heap_object = HeapObject::cast(o);
1763 MarkBit mark = Marking::MarkBitFrom(heap_object);
1764 return Marking::IsWhite(mark);
1765 }
1766
1767
1768 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1769 Object** p) {
1770 Object* o = *p;
1771 DCHECK(o->IsHeapObject());
1772 HeapObject* heap_object = HeapObject::cast(o);
1773 MarkBit mark = Marking::MarkBitFrom(heap_object);
1774 return Marking::IsWhite(mark);
1775 }
1776
1777
1778 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1779 StringTable* string_table = heap()->string_table();
1780 // Mark the string table itself.
1781 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1782 if (Marking::IsWhite(string_table_mark)) {
1783 // String table could have already been marked by visiting the handles list.
1784 SetMark(string_table, string_table_mark);
1785 }
1786 // Explicitly mark the prefix.
1787 string_table->IteratePrefix(visitor);
1788 ProcessMarkingDeque();
1789 }
1790
1791
1792 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
1793 MarkBit mark_bit = Marking::MarkBitFrom(site);
1794 SetMark(site, mark_bit);
1795 }
1796
1797
1798 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
1799 // Mark the heap roots including global variables, stack variables,
1800 // etc., and all objects reachable from them.
1801 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1802
1803 // Handle the string table specially.
1804 MarkStringTable(visitor);
1805
1806 // There may be overflowed objects in the heap. Visit them now.
1807 while (marking_deque_.overflowed()) {
1808 RefillMarkingDeque();
1809 EmptyMarkingDeque();
1810 }
1811 }
1812
1813
1814 void MarkCompactCollector::MarkImplicitRefGroups(
1815 MarkObjectFunction mark_object) {
1816 List<ImplicitRefGroup*>* ref_groups =
1817 isolate()->global_handles()->implicit_ref_groups();
1818
1819 int last = 0;
1820 for (int i = 0; i < ref_groups->length(); i++) {
1821 ImplicitRefGroup* entry = ref_groups->at(i);
1822 DCHECK(entry != NULL);
1823
1824 if (!IsMarked(*entry->parent)) {
1825 (*ref_groups)[last++] = entry;
1826 continue;
1827 }
1828
1829 Object*** children = entry->children;
1830 // A parent object is marked, so mark all child heap objects.
1831 for (size_t j = 0; j < entry->length; ++j) {
1832 if ((*children[j])->IsHeapObject()) {
1833 mark_object(heap(), HeapObject::cast(*children[j]));
1834 }
1835 }
1836
1837 // Once the entire group has been marked, dispose it because it's
1838 // not needed anymore.
1839 delete entry;
1840 }
1841 ref_groups->Rewind(last);
1842 }
1843
1844
1845 // Mark all objects reachable from the objects on the marking stack.
1846 // Before: the marking stack contains zero or more heap object pointers.
1847 // After: the marking stack is empty, and all objects reachable from the
1848 // marking stack have been marked, or are overflowed in the heap.
1849 void MarkCompactCollector::EmptyMarkingDeque() {
1850 Map* filler_map = heap_->one_pointer_filler_map();
1851 while (!marking_deque_.IsEmpty()) {
1852 HeapObject* object = marking_deque_.Pop();
1853 // Explicitly skip one word fillers. Incremental markbit patterns are
1854 // correct only for objects that occupy at least two words.
1855 Map* map = object->map();
1856 if (map == filler_map) continue;
1857
1858 DCHECK(object->IsHeapObject());
1859 DCHECK(heap()->Contains(object));
1860 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
1861
1862 MarkBit map_mark = Marking::MarkBitFrom(map);
1863 MarkObject(map, map_mark);
1864
1865 MarkCompactMarkingVisitor::IterateBody(map, object);
1866 }
1867 }
1868
1869
1870 // Sweep the heap for overflowed objects, clear their overflow bits, and
1871 // push them on the marking stack. Stop early if the marking stack fills
1872 // before sweeping completes. If sweeping completes, there are no remaining
1873 // overflowed objects in the heap so the overflow flag on the markings stack
1874 // is cleared.
1875 void MarkCompactCollector::RefillMarkingDeque() {
1876 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
1877 DCHECK(marking_deque_.overflowed());
1878
1879 DiscoverGreyObjectsInNewSpace();
1880 if (marking_deque_.IsFull()) return;
1881
1882 DiscoverGreyObjectsInSpace(heap()->old_space());
1883 if (marking_deque_.IsFull()) return;
1884
1885 DiscoverGreyObjectsInSpace(heap()->code_space());
1886 if (marking_deque_.IsFull()) return;
1887
1888 DiscoverGreyObjectsInSpace(heap()->map_space());
1889 if (marking_deque_.IsFull()) return;
1890
1891 LargeObjectIterator lo_it(heap()->lo_space());
1892 DiscoverGreyObjectsWithIterator(&lo_it);
1893 if (marking_deque_.IsFull()) return;
1894
1895 marking_deque_.ClearOverflowed();
1896 }
1897
1898
1899 // Mark all objects reachable (transitively) from objects on the marking
1900 // stack. Before: the marking stack contains zero or more heap object
1901 // pointers. After: the marking stack is empty and there are no overflowed
1902 // objects in the heap.
1903 void MarkCompactCollector::ProcessMarkingDeque() {
1904 EmptyMarkingDeque();
1905 while (marking_deque_.overflowed()) {
1906 RefillMarkingDeque();
1907 EmptyMarkingDeque();
1908 }
1909 }
1910
1911
1912 // Mark all objects reachable (transitively) from objects on the marking
1913 // stack including references only considered in the atomic marking pause.
1914 void MarkCompactCollector::ProcessEphemeralMarking(
1915 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
1916 bool work_to_do = true;
1917 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
1918 while (work_to_do) {
1919 if (!only_process_harmony_weak_collections) {
1920 isolate()->global_handles()->IterateObjectGroups(
1921 visitor, &IsUnmarkedHeapObjectWithHeap);
1922 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
1923 }
1924 ProcessWeakCollections();
1925 work_to_do = !marking_deque_.IsEmpty();
1926 ProcessMarkingDeque();
1927 }
1928 }
1929
1930
1931 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1932 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1933 !it.done(); it.Advance()) {
1934 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
1935 return;
1936 }
1937 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1938 Code* code = it.frame()->LookupCode();
1939 if (!code->CanDeoptAt(it.frame()->pc())) {
1940 code->CodeIterateBody(visitor);
1941 }
1942 ProcessMarkingDeque();
1943 return;
1944 }
1945 }
1946 }
1947
1948
1949 void MarkCompactCollector::RetainMaps() {
1950 if (heap()->ShouldReduceMemory() || heap()->ShouldAbortIncrementalMarking() ||
1951 FLAG_retain_maps_for_n_gc == 0) {
1952 // Do not retain dead maps if flag disables it or there is
1953 // - memory pressure (reduce_memory_footprint_),
1954 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
1955 return;
1956 }
1957
1958 ArrayList* retained_maps = heap()->retained_maps();
1959 int length = retained_maps->Length();
1960 int new_length = 0;
1961 for (int i = 0; i < length; i += 2) {
1962 DCHECK(retained_maps->Get(i)->IsWeakCell());
1963 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
1964 if (cell->cleared()) continue;
1965 int age = Smi::cast(retained_maps->Get(i + 1))->value();
1966 int new_age;
1967 Map* map = Map::cast(cell->value());
1968 MarkBit map_mark = Marking::MarkBitFrom(map);
1969 if (Marking::IsWhite(map_mark)) {
1970 if (age == 0) {
1971 // The map has aged. Do not retain this map.
1972 continue;
1973 }
1974 Object* constructor = map->GetConstructor();
1975 if (!constructor->IsHeapObject() || Marking::IsWhite(Marking::MarkBitFrom(
1976 HeapObject::cast(constructor)))) {
1977 // The constructor is dead, no new objects with this map can
1978 // be created. Do not retain this map.
1979 continue;
1980 }
1981 Object* prototype = map->prototype();
1982 if (prototype->IsHeapObject() &&
1983 Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
1984 // The prototype is not marked, age the map.
1985 new_age = age - 1;
1986 } else {
1987 // The prototype and the constructor are marked, this map keeps only
1988 // transition tree alive, not JSObjects. Do not age the map.
1989 new_age = age;
1990 }
1991 MarkObject(map, map_mark);
1992 } else {
1993 new_age = FLAG_retain_maps_for_n_gc;
1994 }
1995 if (i != new_length) {
1996 retained_maps->Set(new_length, cell);
1997 Object** slot = retained_maps->Slot(new_length);
1998 RecordSlot(retained_maps, slot, cell);
1999 retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
2000 } else if (new_age != age) {
2001 retained_maps->Set(new_length + 1, Smi::FromInt(new_age));
2002 }
2003 new_length += 2;
2004 }
2005 Object* undefined = heap()->undefined_value();
2006 for (int i = new_length; i < length; i++) {
2007 retained_maps->Clear(i, undefined);
2008 }
2009 if (new_length != length) retained_maps->SetLength(new_length);
2010 ProcessMarkingDeque();
2011 }
2012
2013
2014 void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
2015 DCHECK(!marking_deque_.in_use());
2016 if (marking_deque_memory_ == NULL) {
2017 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
2018 marking_deque_memory_committed_ = 0;
2019 }
2020 if (marking_deque_memory_ == NULL) {
2021 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
2022 }
2023 }
2024
2025
2026 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
2027 // If the marking deque is too small, we try to allocate a bigger one.
2028 // If that fails, make do with a smaller one.
2029 CHECK(!marking_deque_.in_use());
2030 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
2031 base::VirtualMemory* memory = marking_deque_memory_;
2032 size_t currently_committed = marking_deque_memory_committed_;
2033
2034 if (currently_committed == size) return;
2035
2036 if (currently_committed > size) {
2037 bool success = marking_deque_memory_->Uncommit(
2038 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
2039 currently_committed - size);
2040 if (success) {
2041 marking_deque_memory_committed_ = size;
2042 return;
2043 }
2044 UNREACHABLE();
2045 }
2046
2047 bool success = memory->Commit(
2048 reinterpret_cast<Address>(memory->address()) + currently_committed,
2049 size - currently_committed,
2050 false); // Not executable.
2051 if (success) {
2052 marking_deque_memory_committed_ = size;
2053 return;
2054 }
2055 }
2056 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
2057 }
2058
2059
2060 void MarkCompactCollector::InitializeMarkingDeque() {
2061 DCHECK(!marking_deque_.in_use());
2062 DCHECK(marking_deque_memory_committed_ > 0);
2063 Address addr = static_cast<Address>(marking_deque_memory_->address());
2064 size_t size = marking_deque_memory_committed_;
2065 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2066 marking_deque_.Initialize(addr, addr + size);
2067 }
2068
2069
2070 void MarkingDeque::Initialize(Address low, Address high) {
2071 DCHECK(!in_use_);
2072 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2073 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2074 array_ = obj_low;
2075 mask_ = base::bits::RoundDownToPowerOfTwo32(
2076 static_cast<uint32_t>(obj_high - obj_low)) -
2077 1;
2078 top_ = bottom_ = 0;
2079 overflowed_ = false;
2080 in_use_ = true;
2081 }
2082
2083
2084 void MarkingDeque::Uninitialize(bool aborting) {
2085 if (!aborting) {
2086 DCHECK(IsEmpty());
2087 DCHECK(!overflowed_);
2088 }
2089 DCHECK(in_use_);
2090 top_ = bottom_ = 0xdecbad;
2091 in_use_ = false;
2092 }
2093
2094
2095 void MarkCompactCollector::MarkLiveObjects() {
2096 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
2097 double start_time = 0.0;
2098 if (FLAG_print_cumulative_gc_stat) {
2099 start_time = base::OS::TimeCurrentMillis();
2100 }
2101 // The recursive GC marker detects when it is nearing stack overflow,
2102 // and switches to a different marking system. JS interrupts interfere
2103 // with the C stack limit check.
2104 PostponeInterruptsScope postpone(isolate());
2105
2106 {
2107 GCTracer::Scope gc_scope(heap()->tracer(),
2108 GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2109 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2110 if (was_marked_incrementally_) {
2111 incremental_marking->Finalize();
2112 } else {
2113 // Abort any pending incremental activities e.g. incremental sweeping.
2114 incremental_marking->Stop();
2115 if (marking_deque_.in_use()) {
2116 marking_deque_.Uninitialize(true);
2117 }
2118 }
2119 }
2120
2121 #ifdef DEBUG
2122 DCHECK(state_ == PREPARE_GC);
2123 state_ = MARK_LIVE_OBJECTS;
2124 #endif
2125
2126 EnsureMarkingDequeIsCommittedAndInitialize(
2127 MarkCompactCollector::kMaxMarkingDequeSize);
2128
2129 {
2130 GCTracer::Scope gc_scope(heap()->tracer(),
2131 GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2132 PrepareForCodeFlushing();
2133 }
2134
2135 RootMarkingVisitor root_visitor(heap());
2136
2137 {
2138 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOT);
2139 MarkRoots(&root_visitor);
2140 }
2141
2142 {
2143 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK_TOPOPT);
2144 ProcessTopOptimizedFrame(&root_visitor);
2145 }
2146
2147 // Retaining dying maps should happen before or during ephemeral marking
2148 // because a map could keep the key of an ephemeron alive. Note that map
2149 // aging is imprecise: maps that are kept alive only by ephemerons will age.
2150 {
2151 GCTracer::Scope gc_scope(heap()->tracer(),
2152 GCTracer::Scope::MC_MARK_RETAIN_MAPS);
2153 RetainMaps();
2154 }
2155
2156 {
2157 GCTracer::Scope gc_scope(heap()->tracer(),
2158 GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2159
2160 // The objects reachable from the roots are marked, yet unreachable
2161 // objects are unmarked. Mark objects reachable due to host
2162 // application specific logic or through Harmony weak maps.
2163 ProcessEphemeralMarking(&root_visitor, false);
2164
2165 // The objects reachable from the roots, weak maps or object groups
2166 // are marked. Objects pointed to only by weak global handles cannot be
2167 // immediately reclaimed. Instead, we have to mark them as pending and mark
2168 // objects reachable from them.
2169 //
2170 // First we identify nonlive weak handles and mark them as pending
2171 // destruction.
2172 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2173 &IsUnmarkedHeapObject);
2174 // Then we mark the objects.
2175 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2176 ProcessMarkingDeque();
2177
2178 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2179 // the weak roots we just marked as pending destruction.
2180 //
2181 // We only process harmony collections, as all object groups have been fully
2182 // processed and no weakly reachable node can discover new objects groups.
2183 ProcessEphemeralMarking(&root_visitor, true);
2184 }
2185
2186 AfterMarking();
2187
2188 if (FLAG_print_cumulative_gc_stat) {
2189 heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
2190 }
2191 }
2192
2193
2194 void MarkCompactCollector::AfterMarking() {
2195 {
2196 GCTracer::Scope gc_scope(heap()->tracer(),
2197 GCTracer::Scope::MC_MARK_STRING_TABLE);
2198
2199 // Prune the string table removing all strings only pointed to by the
2200 // string table. Cannot use string_table() here because the string
2201 // table is marked.
2202 StringTable* string_table = heap()->string_table();
2203 InternalizedStringTableCleaner internalized_visitor(heap());
2204 string_table->IterateElements(&internalized_visitor);
2205 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2206
2207 ExternalStringTableCleaner external_visitor(heap());
2208 heap()->external_string_table_.Iterate(&external_visitor);
2209 heap()->external_string_table_.CleanUp();
2210 }
2211
2212 {
2213 GCTracer::Scope gc_scope(heap()->tracer(),
2214 GCTracer::Scope::MC_MARK_WEAK_REFERENCES);
2215
2216 // Process the weak references.
2217 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2218 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2219 }
2220
2221 {
2222 GCTracer::Scope gc_scope(heap()->tracer(),
2223 GCTracer::Scope::MC_MARK_GLOBAL_HANDLES);
2224
2225 // Remove object groups after marking phase.
2226 heap()->isolate()->global_handles()->RemoveObjectGroups();
2227 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2228 }
2229
2230 // Flush code from collected candidates.
2231 if (is_code_flushing_enabled()) {
2232 GCTracer::Scope gc_scope(heap()->tracer(),
2233 GCTracer::Scope::MC_MARK_CODE_FLUSH);
2234 code_flusher_->ProcessCandidates();
2235 }
2236
2237 if (FLAG_track_gc_object_stats) {
2238 if (FLAG_trace_gc_object_stats) {
2239 heap()->object_stats_->TraceObjectStats();
2240 }
2241 heap()->object_stats_->CheckpointObjectStats();
2242 }
2243 }
2244
2189 2245
2190 void MarkCompactCollector::ClearNonLiveReferences() { 2246 void MarkCompactCollector::ClearNonLiveReferences() {
2191 GCTracer::Scope gc_scope(heap()->tracer(), 2247 GCTracer::Scope gc_scope(heap()->tracer(),
2192 GCTracer::Scope::MC_NONLIVEREFERENCES); 2248 GCTracer::Scope::MC_NONLIVEREFERENCES);
2193 // Iterate over the map space, setting map transitions that go from 2249 // Iterate over the map space, setting map transitions that go from
2194 // a marked map to an unmarked map to null transitions. This action 2250 // a marked map to an unmarked map to null transitions. This action
2195 // is carried out only on maps of JSObjects and related subtypes. 2251 // is carried out only on maps of JSObjects and related subtypes.
2196 HeapObjectIterator map_iterator(heap()->map_space()); 2252 HeapObjectIterator map_iterator(heap()->map_space());
2197 for (HeapObject* obj = map_iterator.Next(); obj != NULL; 2253 for (HeapObject* obj = map_iterator.Next(); obj != NULL;
2198 obj = map_iterator.Next()) { 2254 obj = map_iterator.Next()) {
(...skipping 2375 matching lines...) Expand 10 before | Expand all | Expand 10 after
4574 MarkBit mark_bit = Marking::MarkBitFrom(host); 4630 MarkBit mark_bit = Marking::MarkBitFrom(host);
4575 if (Marking::IsBlack(mark_bit)) { 4631 if (Marking::IsBlack(mark_bit)) {
4576 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4632 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4577 RecordRelocSlot(&rinfo, target); 4633 RecordRelocSlot(&rinfo, target);
4578 } 4634 }
4579 } 4635 }
4580 } 4636 }
4581 4637
4582 } // namespace internal 4638 } // namespace internal
4583 } // namespace v8 4639 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/mark-compact-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698