Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 923 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 934 } | 934 } |
| 935 | 935 |
| 936 | 936 |
| 937 void Heap::Scavenge() { | 937 void Heap::Scavenge() { |
| 938 #ifdef DEBUG | 938 #ifdef DEBUG |
| 939 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); | 939 if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers(); |
| 940 #endif | 940 #endif |
| 941 | 941 |
| 942 gc_state_ = SCAVENGE; | 942 gc_state_ = SCAVENGE; |
| 943 | 943 |
| 944 SwitchScavengingVisitorsTableIfProfilingWasEnabled(); | |
| 945 | |
| 944 Page::FlipMeaningOfInvalidatedWatermarkFlag(this); | 946 Page::FlipMeaningOfInvalidatedWatermarkFlag(this); |
| 945 #ifdef DEBUG | 947 #ifdef DEBUG |
| 946 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); | 948 VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID); |
| 947 VerifyPageWatermarkValidity(map_space_, ALL_VALID); | 949 VerifyPageWatermarkValidity(map_space_, ALL_VALID); |
| 948 #endif | 950 #endif |
| 949 | 951 |
| 950 // We do not update an allocation watermark of the top page during linear | 952 // We do not update an allocation watermark of the top page during linear |
| 951 // allocation to avoid overhead. So to maintain the watermark invariant | 953 // allocation to avoid overhead. So to maintain the watermark invariant |
| 952 // we have to manually cache the watermark and mark the top page as having an | 954 // we have to manually cache the watermark and mark the top page as having an |
| 953 // invalid watermark. This guarantees that dirty regions iteration will use a | 955 // invalid watermark. This guarantees that dirty regions iteration will use a |
| (...skipping 271 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1225 } | 1227 } |
| 1226 | 1228 |
| 1227 // Take another spin if there are now unswept objects in new space | 1229 // Take another spin if there are now unswept objects in new space |
| 1228 // (there are currently no more unswept promoted objects). | 1230 // (there are currently no more unswept promoted objects). |
| 1229 } while (new_space_front < new_space_.top()); | 1231 } while (new_space_front < new_space_.top()); |
| 1230 | 1232 |
| 1231 return new_space_front; | 1233 return new_space_front; |
| 1232 } | 1234 } |
| 1233 | 1235 |
| 1234 | 1236 |
| 1237 enum LoggingAndProfiling { | |
| 1238 LOGGING_AND_PROFILING_ENABLED, | |
| 1239 LOGGING_AND_PROFILING_DISABLED | |
| 1240 }; | |
| 1241 | |
| 1242 | |
| 1243 typedef void (*ScavengingCallback)(Map* map, | |
| 1244 HeapObject** slot, | |
| 1245 HeapObject* object); | |
| 1246 | |
| 1247 | |
| 1248 static Atomic32 scavenging_visitors_table_mode_; | |
| 1249 static VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_; | |
| 1250 | |
| 1251 | |
| 1252 static inline void DoScavengeObject(Map* map, | |
|
Vitaly Repeshko
2011/03/30 15:11:04
This probably needs INLINE to prevent surprises wi
| |
| 1253 HeapObject** slot, | |
| 1254 HeapObject* obj) { | |
| 1255 scavenging_visitors_table_.GetVisitor(map)(map, slot, obj); | |
| 1256 } | |
| 1257 | |
| 1258 | |
| 1259 template<LoggingAndProfiling logging_and_profiling_mode> | |
| 1235 class ScavengingVisitor : public StaticVisitorBase { | 1260 class ScavengingVisitor : public StaticVisitorBase { |
| 1236 public: | 1261 public: |
| 1237 static void Initialize() { | 1262 static void Initialize() { |
| 1238 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString); | 1263 table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString); |
| 1239 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); | 1264 table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString); |
| 1240 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); | 1265 table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate); |
| 1241 table_.Register(kVisitByteArray, &EvacuateByteArray); | 1266 table_.Register(kVisitByteArray, &EvacuateByteArray); |
| 1242 table_.Register(kVisitFixedArray, &EvacuateFixedArray); | 1267 table_.Register(kVisitFixedArray, &EvacuateFixedArray); |
| 1268 | |
| 1243 table_.Register(kVisitGlobalContext, | 1269 table_.Register(kVisitGlobalContext, |
| 1244 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1270 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1245 VisitSpecialized<Context::kSize>); | 1271 template VisitSpecialized<Context::kSize>); |
| 1246 | |
| 1247 typedef ObjectEvacuationStrategy<POINTER_OBJECT> PointerObject; | |
| 1248 | 1272 |
| 1249 table_.Register(kVisitConsString, | 1273 table_.Register(kVisitConsString, |
| 1250 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1274 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1251 VisitSpecialized<ConsString::kSize>); | 1275 template VisitSpecialized<ConsString::kSize>); |
| 1252 | 1276 |
| 1253 table_.Register(kVisitSharedFunctionInfo, | 1277 table_.Register(kVisitSharedFunctionInfo, |
| 1254 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1278 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1255 VisitSpecialized<SharedFunctionInfo::kSize>); | 1279 template VisitSpecialized<SharedFunctionInfo::kSize>); |
| 1256 | 1280 |
| 1257 table_.Register(kVisitJSFunction, | 1281 table_.Register(kVisitJSFunction, |
| 1258 &ObjectEvacuationStrategy<POINTER_OBJECT>:: | 1282 &ObjectEvacuationStrategy<POINTER_OBJECT>:: |
| 1259 VisitSpecialized<JSFunction::kSize>); | 1283 template VisitSpecialized<JSFunction::kSize>); |
| 1260 | 1284 |
| 1261 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, | 1285 table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>, |
| 1262 kVisitDataObject, | 1286 kVisitDataObject, |
| 1263 kVisitDataObjectGeneric>(); | 1287 kVisitDataObjectGeneric>(); |
| 1264 | 1288 |
| 1265 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | 1289 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 1266 kVisitJSObject, | 1290 kVisitJSObject, |
| 1267 kVisitJSObjectGeneric>(); | 1291 kVisitJSObjectGeneric>(); |
| 1268 | 1292 |
| 1269 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, | 1293 table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>, |
| 1270 kVisitStruct, | 1294 kVisitStruct, |
| 1271 kVisitStructGeneric>(); | 1295 kVisitStructGeneric>(); |
| 1272 } | 1296 } |
| 1273 | 1297 |
| 1274 | 1298 static VisitorDispatchTable<ScavengingCallback>* GetTable() { |
| 1275 static inline void Scavenge(Map* map, HeapObject** slot, HeapObject* obj) { | 1299 return &table_; |
| 1276 table_.GetVisitor(map)(map, slot, obj); | |
| 1277 } | 1300 } |
| 1278 | 1301 |
| 1279 | |
| 1280 private: | 1302 private: |
| 1281 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; | 1303 enum ObjectContents { DATA_OBJECT, POINTER_OBJECT }; |
| 1282 enum SizeRestriction { SMALL, UNKNOWN_SIZE }; | 1304 enum SizeRestriction { SMALL, UNKNOWN_SIZE }; |
| 1283 | 1305 |
| 1284 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1306 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1285 static void RecordCopiedObject(Heap* heap, HeapObject* obj) { | 1307 static void RecordCopiedObject(Heap* heap, HeapObject* obj) { |
| 1286 bool should_record = false; | 1308 bool should_record = false; |
| 1287 #ifdef DEBUG | 1309 #ifdef DEBUG |
| 1288 should_record = FLAG_heap_stats; | 1310 should_record = FLAG_heap_stats; |
| 1289 #endif | 1311 #endif |
| (...skipping 16 matching lines...) Expand all Loading... | |
| 1306 INLINE(static HeapObject* MigrateObject(Heap* heap, | 1328 INLINE(static HeapObject* MigrateObject(Heap* heap, |
| 1307 HeapObject* source, | 1329 HeapObject* source, |
| 1308 HeapObject* target, | 1330 HeapObject* target, |
| 1309 int size)) { | 1331 int size)) { |
| 1310 // Copy the content of source to target. | 1332 // Copy the content of source to target. |
| 1311 heap->CopyBlock(target->address(), source->address(), size); | 1333 heap->CopyBlock(target->address(), source->address(), size); |
| 1312 | 1334 |
| 1313 // Set the forwarding address. | 1335 // Set the forwarding address. |
| 1314 source->set_map_word(MapWord::FromForwardingAddress(target)); | 1336 source->set_map_word(MapWord::FromForwardingAddress(target)); |
| 1315 | 1337 |
| 1338 if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) { | |
| 1316 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) | 1339 #if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING) |
| 1317 // Update NewSpace stats if necessary. | 1340 // Update NewSpace stats if necessary. |
| 1318 RecordCopiedObject(heap, target); | 1341 RecordCopiedObject(heap, target); |
| 1319 #endif | 1342 #endif |
| 1320 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); | 1343 HEAP_PROFILE(heap, ObjectMoveEvent(source->address(), target->address())); |
| 1321 #if defined(ENABLE_LOGGING_AND_PROFILING) | 1344 #if defined(ENABLE_LOGGING_AND_PROFILING) |
| 1322 Isolate* isolate = heap->isolate(); | 1345 Isolate* isolate = heap->isolate(); |
| 1323 if (isolate->logger()->is_logging() || | 1346 if (isolate->logger()->is_logging() || |
| 1324 isolate->cpu_profiler()->is_profiling()) { | 1347 isolate->cpu_profiler()->is_profiling()) { |
| 1325 if (target->IsSharedFunctionInfo()) { | 1348 if (target->IsSharedFunctionInfo()) { |
| 1326 PROFILE(isolate, SharedFunctionInfoMoveEvent( | 1349 PROFILE(isolate, SharedFunctionInfoMoveEvent( |
| 1327 source->address(), target->address())); | 1350 source->address(), target->address())); |
| 1351 } | |
| 1328 } | 1352 } |
| 1353 #endif | |
| 1329 } | 1354 } |
| 1330 #endif | 1355 |
| 1331 return target; | 1356 return target; |
| 1332 } | 1357 } |
| 1333 | 1358 |
| 1334 | 1359 |
| 1335 template<ObjectContents object_contents, SizeRestriction size_restriction> | 1360 template<ObjectContents object_contents, SizeRestriction size_restriction> |
| 1336 static inline void EvacuateObject(Map* map, | 1361 static inline void EvacuateObject(Map* map, |
| 1337 HeapObject** slot, | 1362 HeapObject** slot, |
| 1338 HeapObject* object, | 1363 HeapObject* object, |
| 1339 int object_size) { | 1364 int object_size) { |
| 1340 ASSERT((size_restriction != SMALL) || | 1365 ASSERT((size_restriction != SMALL) || |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1436 | 1461 |
| 1437 MapWord first_word = first->map_word(); | 1462 MapWord first_word = first->map_word(); |
| 1438 if (first_word.IsForwardingAddress()) { | 1463 if (first_word.IsForwardingAddress()) { |
| 1439 HeapObject* target = first_word.ToForwardingAddress(); | 1464 HeapObject* target = first_word.ToForwardingAddress(); |
| 1440 | 1465 |
| 1441 *slot = target; | 1466 *slot = target; |
| 1442 object->set_map_word(MapWord::FromForwardingAddress(target)); | 1467 object->set_map_word(MapWord::FromForwardingAddress(target)); |
| 1443 return; | 1468 return; |
| 1444 } | 1469 } |
| 1445 | 1470 |
| 1446 Scavenge(first->map(), slot, first); | 1471 DoScavengeObject(first->map(), slot, first); |
| 1447 object->set_map_word(MapWord::FromForwardingAddress(*slot)); | 1472 object->set_map_word(MapWord::FromForwardingAddress(*slot)); |
| 1448 return; | 1473 return; |
| 1449 } | 1474 } |
| 1450 | 1475 |
| 1451 int object_size = ConsString::kSize; | 1476 int object_size = ConsString::kSize; |
| 1452 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); | 1477 EvacuateObject<POINTER_OBJECT, SMALL>(map, slot, object, object_size); |
| 1453 } | 1478 } |
| 1454 | 1479 |
| 1455 template<ObjectContents object_contents> | 1480 template<ObjectContents object_contents> |
| 1456 class ObjectEvacuationStrategy { | 1481 class ObjectEvacuationStrategy { |
| 1457 public: | 1482 public: |
| 1458 template<int object_size> | 1483 template<int object_size> |
| 1459 static inline void VisitSpecialized(Map* map, | 1484 static inline void VisitSpecialized(Map* map, |
| 1460 HeapObject** slot, | 1485 HeapObject** slot, |
| 1461 HeapObject* object) { | 1486 HeapObject* object) { |
| 1462 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); | 1487 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); |
| 1463 } | 1488 } |
| 1464 | 1489 |
| 1465 static inline void Visit(Map* map, | 1490 static inline void Visit(Map* map, |
| 1466 HeapObject** slot, | 1491 HeapObject** slot, |
| 1467 HeapObject* object) { | 1492 HeapObject* object) { |
| 1468 int object_size = map->instance_size(); | 1493 int object_size = map->instance_size(); |
| 1469 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); | 1494 EvacuateObject<object_contents, SMALL>(map, slot, object, object_size); |
| 1470 } | 1495 } |
| 1471 }; | 1496 }; |
| 1472 | 1497 |
| 1473 typedef void (*Callback)(Map* map, HeapObject** slot, HeapObject* object); | 1498 static VisitorDispatchTable<ScavengingCallback> table_; |
| 1474 | |
| 1475 static VisitorDispatchTable<Callback> table_; | |
| 1476 }; | 1499 }; |
| 1477 | 1500 |
| 1478 | 1501 |
| 1479 VisitorDispatchTable<ScavengingVisitor::Callback> ScavengingVisitor::table_; | 1502 template<LoggingAndProfiling logging_and_profiling_mode> |
| 1503 VisitorDispatchTable<ScavengingCallback> | |
| 1504 ScavengingVisitor<logging_and_profiling_mode>::table_; | |
| 1505 | |
| 1506 | |
| 1507 static void InitializeScavengingVisitorsTables() { | |
| 1508 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::Initialize(); | |
| 1509 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::Initialize(); | |
| 1510 scavenging_visitors_table_.CopyFrom( | |
| 1511 ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>::GetTable()); | |
| 1512 scavenging_visitors_table_mode_ = LOGGING_AND_PROFILING_DISABLED; | |
| 1513 } | |
| 1514 | |
| 1515 | |
| 1516 void Heap::SwitchScavengingVisitorsTableIfProfilingWasEnabled() { | |
| 1517 if (scavenging_visitors_table_mode_ == LOGGING_AND_PROFILING_ENABLED) { | |
| 1518 // Table was already updated by some isolate. | |
| 1519 return; | |
| 1520 } | |
| 1521 | |
| 1522 if (isolate()->logger()->is_logging() || | |
| 1523 isolate()->cpu_profiler()->is_profiling() || | |
| 1524 (isolate()->heap_profiler() != NULL && | |
| 1525 isolate()->heap_profiler()->is_profiling())) { | |
| 1526 // If one of the isolates is doing scavenge at this moment of time | |
| 1527 // it might see this table in an inconsitent state when | |
| 1528 // some of the callbacks point to | |
| 1529 // ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED> and others | |
| 1530 // to ScavengingVisitor<LOGGING_AND_PROFILING_DISABLED>. | |
| 1531 // However this does not lead any bugs as such isolate does not have | |
|
Vitaly Repeshko
2011/03/30 15:11:04
"lead to"
| |
| 1532 // profiling enabled and any isolate with enabled profiling is guaranteed | |
| 1533 // to see table it in the consistent state. | |
|
Vitaly Repeshko
2011/03/30 15:11:04
"table it" -> "the table"
| |
| 1534 scavenging_visitors_table_.CopyFrom( | |
| 1535 ScavengingVisitor<LOGGING_AND_PROFILING_ENABLED>::GetTable()); | |
| 1536 | |
| 1537 // We use Release_Store to prevent reordering of this write before writes | |
| 1538 // to the table. | |
| 1539 Release_Store(&scavenging_visitors_table_mode_, | |
| 1540 LOGGING_AND_PROFILING_ENABLED); | |
| 1541 } | |
| 1542 } | |
| 1480 | 1543 |
| 1481 | 1544 |
| 1482 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { | 1545 void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) { |
| 1483 ASSERT(HEAP->InFromSpace(object)); | 1546 ASSERT(HEAP->InFromSpace(object)); |
| 1484 MapWord first_word = object->map_word(); | 1547 MapWord first_word = object->map_word(); |
| 1485 ASSERT(!first_word.IsForwardingAddress()); | 1548 ASSERT(!first_word.IsForwardingAddress()); |
| 1486 Map* map = first_word.ToMap(); | 1549 Map* map = first_word.ToMap(); |
| 1487 ScavengingVisitor::Scavenge(map, p, object); | 1550 DoScavengeObject(map, p, object); |
| 1488 } | 1551 } |
| 1489 | 1552 |
| 1490 | 1553 |
| 1491 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, | 1554 MaybeObject* Heap::AllocatePartialMap(InstanceType instance_type, |
| 1492 int instance_size) { | 1555 int instance_size) { |
| 1493 Object* result; | 1556 Object* result; |
| 1494 { MaybeObject* maybe_result = AllocateRawMap(); | 1557 { MaybeObject* maybe_result = AllocateRawMap(); |
| 1495 if (!maybe_result->ToObject(&result)) return maybe_result; | 1558 if (!maybe_result->ToObject(&result)) return maybe_result; |
| 1496 } | 1559 } |
| 1497 | 1560 |
| (...skipping 3252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4750 // Configuration is based on the flags new-space-size (really the semispace | 4813 // Configuration is based on the flags new-space-size (really the semispace |
| 4751 // size) and old-space-size if set or the initial values of semispace_size_ | 4814 // size) and old-space-size if set or the initial values of semispace_size_ |
| 4752 // and old_generation_size_ otherwise. | 4815 // and old_generation_size_ otherwise. |
| 4753 if (!configured_) { | 4816 if (!configured_) { |
| 4754 if (!ConfigureHeapDefault()) return false; | 4817 if (!ConfigureHeapDefault()) return false; |
| 4755 } | 4818 } |
| 4756 | 4819 |
| 4757 gc_initializer_mutex->Lock(); | 4820 gc_initializer_mutex->Lock(); |
| 4758 static bool initialized_gc = false; | 4821 static bool initialized_gc = false; |
| 4759 if (!initialized_gc) { | 4822 if (!initialized_gc) { |
| 4760 initialized_gc = true; | 4823 initialized_gc = true; |
|
Vitaly Repeshko
2011/03/30 15:11:04
nit: Is indentation off here?
| |
| 4761 ScavengingVisitor::Initialize(); | 4824 InitializeScavengingVisitorsTables(); |
| 4762 NewSpaceScavenger::Initialize(); | 4825 NewSpaceScavenger::Initialize(); |
| 4763 MarkCompactCollector::Initialize(); | 4826 MarkCompactCollector::Initialize(); |
| 4764 } | 4827 } |
| 4765 gc_initializer_mutex->Unlock(); | 4828 gc_initializer_mutex->Unlock(); |
| 4766 | 4829 |
| 4767 MarkMapPointersAsEncoded(false); | 4830 MarkMapPointersAsEncoded(false); |
| 4768 | 4831 |
| 4769 // Setup memory allocator and reserve a chunk of memory for new | 4832 // Setup memory allocator and reserve a chunk of memory for new |
| 4770 // space. The chunk is double the size of the requested reserved | 4833 // space. The chunk is double the size of the requested reserved |
| 4771 // new space size to ensure that we can find a pair of semispaces that | 4834 // new space size to ensure that we can find a pair of semispaces that |
| (...skipping 1009 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 5781 } | 5844 } |
| 5782 | 5845 |
| 5783 | 5846 |
| 5784 void ExternalStringTable::TearDown() { | 5847 void ExternalStringTable::TearDown() { |
| 5785 new_space_strings_.Free(); | 5848 new_space_strings_.Free(); |
| 5786 old_space_strings_.Free(); | 5849 old_space_strings_.Free(); |
| 5787 } | 5850 } |
| 5788 | 5851 |
| 5789 | 5852 |
| 5790 } } // namespace v8::internal | 5853 } } // namespace v8::internal |
| OLD | NEW |