| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
| 6 | 6 |
| 7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
| 8 #include "src/api.h" | 8 #include "src/api.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/base/once.h" | 10 #include "src/base/once.h" |
| (...skipping 1437 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1448 !HeapObject::cast(*p)->map_word().IsForwardingAddress(); | 1448 !HeapObject::cast(*p)->map_word().IsForwardingAddress(); |
| 1449 } | 1449 } |
| 1450 | 1450 |
| 1451 | 1451 |
| 1452 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, | 1452 void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page, |
| 1453 StoreBufferEvent event) { | 1453 StoreBufferEvent event) { |
| 1454 heap->store_buffer_rebuilder_.Callback(page, event); | 1454 heap->store_buffer_rebuilder_.Callback(page, event); |
| 1455 } | 1455 } |
| 1456 | 1456 |
| 1457 | 1457 |
| 1458 void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) { | |
| 1459 if (event == kStoreBufferStartScanningPagesEvent) { | |
| 1460 start_of_current_page_ = NULL; | |
| 1461 current_page_ = NULL; | |
| 1462 } else if (event == kStoreBufferScanningPageEvent) { | |
| 1463 if (current_page_ != NULL) { | |
| 1464 // If this page already overflowed the store buffer during this iteration. | |
| 1465 if (current_page_->scan_on_scavenge()) { | |
| 1466 // Then we should wipe out the entries that have been added for it. | |
| 1467 store_buffer_->SetTop(start_of_current_page_); | |
| 1468 } else if (store_buffer_->Top() - start_of_current_page_ >= | |
| 1469 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) { | |
| 1470 // Did we find too many pointers in the previous page? The heuristic is | |
| 1471 // that no page can take more then 1/5 the remaining slots in the store | |
| 1472 // buffer. | |
| 1473 current_page_->set_scan_on_scavenge(true); | |
| 1474 store_buffer_->SetTop(start_of_current_page_); | |
| 1475 } else { | |
| 1476 // In this case the page we scanned took a reasonable number of slots in | |
| 1477 // the store buffer. It has now been rehabilitated and is no longer | |
| 1478 // marked scan_on_scavenge. | |
| 1479 DCHECK(!current_page_->scan_on_scavenge()); | |
| 1480 } | |
| 1481 } | |
| 1482 start_of_current_page_ = store_buffer_->Top(); | |
| 1483 current_page_ = page; | |
| 1484 } else if (event == kStoreBufferFullEvent) { | |
| 1485 // The current page overflowed the store buffer again. Wipe out its entries | |
| 1486 // in the store buffer and mark it scan-on-scavenge again. This may happen | |
| 1487 // several times while scanning. | |
| 1488 if (current_page_ == NULL) { | |
| 1489 // Store Buffer overflowed while scanning promoted objects. These are not | |
| 1490 // in any particular page, though they are likely to be clustered by the | |
| 1491 // allocation routines. | |
| 1492 store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2); | |
| 1493 } else { | |
| 1494 // Store Buffer overflowed while scanning a particular old space page for | |
| 1495 // pointers to new space. | |
| 1496 DCHECK(current_page_ == page); | |
| 1497 DCHECK(page != NULL); | |
| 1498 current_page_->set_scan_on_scavenge(true); | |
| 1499 DCHECK(start_of_current_page_ != store_buffer_->Top()); | |
| 1500 store_buffer_->SetTop(start_of_current_page_); | |
| 1501 } | |
| 1502 } else { | |
| 1503 UNREACHABLE(); | |
| 1504 } | |
| 1505 } | |
| 1506 | |
| 1507 | |
| 1508 void PromotionQueue::Initialize() { | 1458 void PromotionQueue::Initialize() { |
| 1509 // The last to-space page may be used for promotion queue. On promotion | 1459 // The last to-space page may be used for promotion queue. On promotion |
| 1510 // conflict, we use the emergency stack. | 1460 // conflict, we use the emergency stack. |
| 1511 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == | 1461 DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) == |
| 1512 0); | 1462 0); |
| 1513 front_ = rear_ = | 1463 front_ = rear_ = |
| 1514 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); | 1464 reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd()); |
| 1515 limit_ = reinterpret_cast<intptr_t*>( | 1465 limit_ = reinterpret_cast<intptr_t*>( |
| 1516 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); | 1466 Page::FromAllocationTop(reinterpret_cast<Address>(rear_))->area_start()); |
| 1517 emergency_stack_ = NULL; | 1467 emergency_stack_ = NULL; |
| (...skipping 5267 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 6785 *object_type = "CODE_TYPE"; \ | 6735 *object_type = "CODE_TYPE"; \ |
| 6786 *object_sub_type = "CODE_AGE/" #name; \ | 6736 *object_sub_type = "CODE_AGE/" #name; \ |
| 6787 return true; | 6737 return true; |
| 6788 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) | 6738 CODE_AGE_LIST_COMPLETE(COMPARE_AND_RETURN_NAME) |
| 6789 #undef COMPARE_AND_RETURN_NAME | 6739 #undef COMPARE_AND_RETURN_NAME |
| 6790 } | 6740 } |
| 6791 return false; | 6741 return false; |
| 6792 } | 6742 } |
| 6793 } // namespace internal | 6743 } // namespace internal |
| 6794 } // namespace v8 | 6744 } // namespace v8 |
| OLD | NEW |