OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1037 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1048 } | 1048 } |
1049 | 1049 |
1050 | 1050 |
1051 template<MarkCompactCollector::AllocationFunction Alloc, | 1051 template<MarkCompactCollector::AllocationFunction Alloc, |
1052 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> | 1052 MarkCompactCollector::ProcessNonLiveFunction ProcessNonLive> |
1053 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( | 1053 void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace( |
1054 PagedSpace* space) { | 1054 PagedSpace* space) { |
1055 PageIterator it(space, PageIterator::PAGES_IN_USE); | 1055 PageIterator it(space, PageIterator::PAGES_IN_USE); |
1056 while (it.has_next()) { | 1056 while (it.has_next()) { |
1057 Page* p = it.next(); | 1057 Page* p = it.next(); |
1058 // The offset of each live object in the page from the first live object | 1058 |
1059 // in the page. | 1059 if (p->WasInUseBeforeMC()) { |
1060 int offset = 0; | 1060 // The offset of each live object in the page from the first live object |
1061 EncodeForwardingAddressesInRange<Alloc, | 1061 // in the page. |
1062 EncodeForwardingAddressInPagedSpace, | 1062 int offset = 0; |
1063 ProcessNonLive>( | 1063 EncodeForwardingAddressesInRange<Alloc, |
1064 p->ObjectAreaStart(), | 1064 EncodeForwardingAddressInPagedSpace, |
1065 p->AllocationTop(), | 1065 ProcessNonLive>( |
1066 &offset); | 1066 p->ObjectAreaStart(), |
| 1067 p->AllocationTop(), |
| 1068 &offset); |
| 1069 } else { |
| 1070 // Mark whole unused page as a free region. |
| 1071 EncodeFreeRegion(p->ObjectAreaStart(), |
| 1072 p->AllocationTop() - p->ObjectAreaStart()); |
| 1073 } |
1067 } | 1074 } |
1068 } | 1075 } |
1069 | 1076 |
1070 | 1077 |
1071 // We scavange new space simultaneously with sweeping. This is done in two | 1078 // We scavange new space simultaneously with sweeping. This is done in two |
1072 // passes. | 1079 // passes. |
1073 // The first pass migrates all alive objects from one semispace to another or | 1080 // The first pass migrates all alive objects from one semispace to another or |
1074 // promotes them to old space. Forwading address is written directly into | 1081 // promotes them to old space. Forwading address is written directly into |
1075 // first word of object without any encoding. If object is dead we are writing | 1082 // first word of object without any encoding. If object is dead we are writing |
1076 // NULL as a forwarding address. | 1083 // NULL as a forwarding address. |
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1270 &UpdateNewSpaceReferenceInExternalStringTableEntry); | 1277 &UpdateNewSpaceReferenceInExternalStringTableEntry); |
1271 | 1278 |
1272 // All pointers were updated. Update auxiliary allocation info. | 1279 // All pointers were updated. Update auxiliary allocation info. |
1273 Heap::IncrementYoungSurvivorsCounter(survivors_size); | 1280 Heap::IncrementYoungSurvivorsCounter(survivors_size); |
1274 space->set_age_mark(space->top()); | 1281 space->set_age_mark(space->top()); |
1275 } | 1282 } |
1276 | 1283 |
1277 | 1284 |
1278 static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { | 1285 static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) { |
1279 PageIterator it(space, PageIterator::PAGES_IN_USE); | 1286 PageIterator it(space, PageIterator::PAGES_IN_USE); |
| 1287 |
| 1288 // During sweeping of paged space we are trying to find longest sequences |
| 1289 // of pages without live objects and free them (instead of putting them on |
| 1290 // the free list). |
| 1291 Page* prev = NULL; // Page preceding current. |
| 1292 Page* first_empty_page = NULL; // First empty page in a sequence. |
| 1293 Page* prec_first_empty_page = NULL; // Page preceding first empty page. |
| 1294 |
| 1295 // If last used page of space ends with a sequence of dead objects |
| 1296 // we can adjust allocation top instead of puting this free area into |
| 1297 // the free list. Thus during sweeping we keep track of such areas |
| 1298 // and defer their deallocation until the sweeping of the next page |
| 1299 // is done: if one of the next pages contains live objects we have |
| 1300 // to put such area into the free list. |
| 1301 Address last_free_start = NULL; |
| 1302 int last_free_size = 0; |
| 1303 |
1280 while (it.has_next()) { | 1304 while (it.has_next()) { |
1281 Page* p = it.next(); | 1305 Page* p = it.next(); |
1282 | 1306 |
1283 bool is_previous_alive = true; | 1307 bool is_previous_alive = true; |
1284 Address free_start = NULL; | 1308 Address free_start = NULL; |
1285 HeapObject* object; | 1309 HeapObject* object; |
1286 | 1310 |
1287 for (Address current = p->ObjectAreaStart(); | 1311 for (Address current = p->ObjectAreaStart(); |
1288 current < p->AllocationTop(); | 1312 current < p->AllocationTop(); |
1289 current += object->Size()) { | 1313 current += object->Size()) { |
1290 object = HeapObject::FromAddress(current); | 1314 object = HeapObject::FromAddress(current); |
1291 if (object->IsMarked()) { | 1315 if (object->IsMarked()) { |
1292 object->ClearMark(); | 1316 object->ClearMark(); |
1293 MarkCompactCollector::tracer()->decrement_marked_count(); | 1317 MarkCompactCollector::tracer()->decrement_marked_count(); |
| 1318 |
1294 if (!is_previous_alive) { // Transition from free to live. | 1319 if (!is_previous_alive) { // Transition from free to live. |
1295 dealloc(free_start, static_cast<int>(current - free_start)); | 1320 dealloc(free_start, static_cast<int>(current - free_start), true); |
1296 is_previous_alive = true; | 1321 is_previous_alive = true; |
1297 } | 1322 } |
1298 } else { | 1323 } else { |
1299 MarkCompactCollector::ReportDeleteIfNeeded(object); | 1324 MarkCompactCollector::ReportDeleteIfNeeded(object); |
1300 if (is_previous_alive) { // Transition from live to free. | 1325 if (is_previous_alive) { // Transition from live to free. |
1301 free_start = current; | 1326 free_start = current; |
1302 is_previous_alive = false; | 1327 is_previous_alive = false; |
1303 } | 1328 } |
1304 } | 1329 } |
1305 // The object is now unmarked for the call to Size() at the top of the | 1330 // The object is now unmarked for the call to Size() at the top of the |
1306 // loop. | 1331 // loop. |
1307 } | 1332 } |
1308 | 1333 |
1309 // If the last region was not live we need to deallocate from | 1334 bool page_is_empty = (p->ObjectAreaStart() == p->AllocationTop()) |
1310 // free_start to the allocation top in the page. | 1335 || (!is_previous_alive && free_start == p->ObjectAreaStart()); |
1311 if (!is_previous_alive) { | 1336 |
1312 int free_size = static_cast<int>(p->AllocationTop() - free_start); | 1337 if (page_is_empty) { |
1313 if (free_size > 0) { | 1338 // This page is empty. Check whether we are in the middle of |
1314 dealloc(free_start, free_size); | 1339 // sequence of empty pages and start one if not. |
| 1340 if (first_empty_page == NULL) { |
| 1341 first_empty_page = p; |
| 1342 prec_first_empty_page = prev; |
| 1343 } |
| 1344 |
| 1345 if (!is_previous_alive) { |
| 1346 // There are dead objects on this page. Update space accounting stats |
| 1347 // without putting anything into free list. |
| 1348 int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start); |
| 1349 if (size_in_bytes > 0) { |
| 1350 dealloc(free_start, size_in_bytes, false); |
| 1351 } |
| 1352 } |
| 1353 } else { |
| 1354 // This page is not empty. Sequence of empty pages ended on the previous |
| 1355 // one. |
| 1356 if (first_empty_page != NULL) { |
| 1357 space->FreePages(prec_first_empty_page, prev); |
| 1358 prec_first_empty_page = first_empty_page = NULL; |
| 1359 } |
| 1360 |
| 1361 // If there is a free ending area on one of the previous pages we have |
| 1362 // deallocate that area and put it on the free list. |
| 1363 if (last_free_size > 0) { |
| 1364 dealloc(last_free_start, last_free_size, true); |
| 1365 last_free_start = NULL; |
| 1366 last_free_size = 0; |
| 1367 } |
| 1368 |
| 1369 // If the last region of this page was not live we remember it. |
| 1370 if (!is_previous_alive) { |
| 1371 ASSERT(last_free_size == 0); |
| 1372 last_free_size = static_cast<int>(p->AllocationTop() - free_start); |
| 1373 last_free_start = free_start; |
1315 } | 1374 } |
1316 } | 1375 } |
| 1376 |
| 1377 prev = p; |
| 1378 } |
| 1379 |
| 1380 // We reached end of space. See if we need to adjust allocation top. |
| 1381 Address new_allocation_top = NULL; |
| 1382 |
| 1383 if (first_empty_page != NULL) { |
| 1384 // Last used pages in space are empty. We can move allocation top backwards |
| 1385 // to the beginning of first empty page. |
| 1386 ASSERT(prev == space->AllocationTopPage()); |
| 1387 |
| 1388 new_allocation_top = first_empty_page->ObjectAreaStart(); |
| 1389 } |
| 1390 |
| 1391 if (last_free_size > 0) { |
| 1392 // There was a free ending area on the previous page. |
| 1393 // Deallocate it without putting it into freelist and move allocation |
| 1394 // top to the beginning of this free area. |
| 1395 dealloc(last_free_start, last_free_size, false); |
| 1396 new_allocation_top = last_free_start; |
| 1397 } |
| 1398 |
| 1399 if (new_allocation_top != NULL) { |
| 1400 Page* new_allocation_top_page = Page::FromAllocationTop(new_allocation_top); |
| 1401 |
| 1402 ASSERT(((first_empty_page == NULL) && |
| 1403 (new_allocation_top_page == space->AllocationTopPage())) || |
| 1404 ((first_empty_page != NULL) && (last_free_size > 0) && |
| 1405 (new_allocation_top_page == prec_first_empty_page)) || |
| 1406 ((first_empty_page != NULL) && (last_free_size == 0) && |
| 1407 (new_allocation_top_page == first_empty_page))); |
| 1408 |
| 1409 space->SetTop(new_allocation_top, |
| 1410 new_allocation_top_page->ObjectAreaEnd()); |
1317 } | 1411 } |
1318 } | 1412 } |
1319 | 1413 |
1320 | 1414 |
1321 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, | 1415 void MarkCompactCollector::DeallocateOldPointerBlock(Address start, |
1322 int size_in_bytes) { | 1416 int size_in_bytes, |
| 1417 bool add_to_freelist) { |
1323 Heap::ClearRSetRange(start, size_in_bytes); | 1418 Heap::ClearRSetRange(start, size_in_bytes); |
1324 Heap::old_pointer_space()->Free(start, size_in_bytes); | 1419 Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist); |
1325 } | 1420 } |
1326 | 1421 |
1327 | 1422 |
1328 void MarkCompactCollector::DeallocateOldDataBlock(Address start, | 1423 void MarkCompactCollector::DeallocateOldDataBlock(Address start, |
1329 int size_in_bytes) { | 1424 int size_in_bytes, |
1330 Heap::old_data_space()->Free(start, size_in_bytes); | 1425 bool add_to_freelist) { |
| 1426 Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist); |
1331 } | 1427 } |
1332 | 1428 |
1333 | 1429 |
1334 void MarkCompactCollector::DeallocateCodeBlock(Address start, | 1430 void MarkCompactCollector::DeallocateCodeBlock(Address start, |
1335 int size_in_bytes) { | 1431 int size_in_bytes, |
1336 Heap::code_space()->Free(start, size_in_bytes); | 1432 bool add_to_freelist) { |
| 1433 Heap::code_space()->Free(start, size_in_bytes, add_to_freelist); |
1337 } | 1434 } |
1338 | 1435 |
1339 | 1436 |
1340 void MarkCompactCollector::DeallocateMapBlock(Address start, | 1437 void MarkCompactCollector::DeallocateMapBlock(Address start, |
1341 int size_in_bytes) { | 1438 int size_in_bytes, |
| 1439 bool add_to_freelist) { |
1342 // Objects in map space are assumed to have size Map::kSize and a | 1440 // Objects in map space are assumed to have size Map::kSize and a |
1343 // valid map in their first word. Thus, we break the free block up into | 1441 // valid map in their first word. Thus, we break the free block up into |
1344 // chunks and free them separately. | 1442 // chunks and free them separately. |
1345 ASSERT(size_in_bytes % Map::kSize == 0); | 1443 ASSERT(size_in_bytes % Map::kSize == 0); |
1346 Heap::ClearRSetRange(start, size_in_bytes); | 1444 Heap::ClearRSetRange(start, size_in_bytes); |
1347 Address end = start + size_in_bytes; | 1445 Address end = start + size_in_bytes; |
1348 for (Address a = start; a < end; a += Map::kSize) { | 1446 for (Address a = start; a < end; a += Map::kSize) { |
1349 Heap::map_space()->Free(a); | 1447 Heap::map_space()->Free(a, add_to_freelist); |
1350 } | 1448 } |
1351 } | 1449 } |
1352 | 1450 |
1353 | 1451 |
1354 void MarkCompactCollector::DeallocateCellBlock(Address start, | 1452 void MarkCompactCollector::DeallocateCellBlock(Address start, |
1355 int size_in_bytes) { | 1453 int size_in_bytes, |
| 1454 bool add_to_freelist) { |
1356 // Free-list elements in cell space are assumed to have a fixed size. | 1455 // Free-list elements in cell space are assumed to have a fixed size. |
1357 // We break the free block into chunks and add them to the free list | 1456 // We break the free block into chunks and add them to the free list |
1358 // individually. | 1457 // individually. |
1359 int size = Heap::cell_space()->object_size_in_bytes(); | 1458 int size = Heap::cell_space()->object_size_in_bytes(); |
1360 ASSERT(size_in_bytes % size == 0); | 1459 ASSERT(size_in_bytes % size == 0); |
1361 Heap::ClearRSetRange(start, size_in_bytes); | 1460 Heap::ClearRSetRange(start, size_in_bytes); |
1362 Address end = start + size_in_bytes; | 1461 Address end = start + size_in_bytes; |
1363 for (Address a = start; a < end; a += size) { | 1462 for (Address a = start; a < end; a += size) { |
1364 Heap::cell_space()->Free(a); | 1463 Heap::cell_space()->Free(a, add_to_freelist); |
1365 } | 1464 } |
1366 } | 1465 } |
1367 | 1466 |
1368 | 1467 |
1369 void MarkCompactCollector::EncodeForwardingAddresses() { | 1468 void MarkCompactCollector::EncodeForwardingAddresses() { |
1370 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); | 1469 ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES); |
1371 // Objects in the active semispace of the young generation may be | 1470 // Objects in the active semispace of the young generation may be |
1372 // relocated to the inactive semispace (if not promoted). Set the | 1471 // relocated to the inactive semispace (if not promoted). Set the |
1373 // relocation info to the beginning of the inactive semispace. | 1472 // relocation info to the beginning of the inactive semispace. |
1374 Heap::new_space()->MCResetRelocationInfo(); | 1473 Heap::new_space()->MCResetRelocationInfo(); |
(...skipping 835 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2210 #ifdef ENABLE_LOGGING_AND_PROFILING | 2309 #ifdef ENABLE_LOGGING_AND_PROFILING |
2211 if (obj->IsCode()) { | 2310 if (obj->IsCode()) { |
2212 PROFILE(CodeDeleteEvent(obj->address())); | 2311 PROFILE(CodeDeleteEvent(obj->address())); |
2213 } else if (obj->IsJSFunction()) { | 2312 } else if (obj->IsJSFunction()) { |
2214 PROFILE(FunctionDeleteEvent(obj->address())); | 2313 PROFILE(FunctionDeleteEvent(obj->address())); |
2215 } | 2314 } |
2216 #endif | 2315 #endif |
2217 } | 2316 } |
2218 | 2317 |
2219 } } // namespace v8::internal | 2318 } } // namespace v8::internal |
OLD | NEW |