OLD | NEW |
---|---|
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1451 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1462 // The bytes in the linear allocation area are not included in this total | 1462 // The bytes in the linear allocation area are not included in this total |
1463 // because updating the stats would slow down allocation. New pages are | 1463 // because updating the stats would slow down allocation. New pages are |
1464 // immediately added to the free list so they show up here. | 1464 // immediately added to the free list so they show up here. |
1465 intptr_t Available() { return free_list_.available(); } | 1465 intptr_t Available() { return free_list_.available(); } |
1466 | 1466 |
1467 // Allocated bytes in this space. Garbage bytes that were not found due to | 1467 // Allocated bytes in this space. Garbage bytes that were not found due to |
1468 // lazy sweeping are counted as being allocated! The bytes in the current | 1468 // lazy sweeping are counted as being allocated! The bytes in the current |
1469 // linear allocation area (between top and limit) are also counted here. | 1469 // linear allocation area (between top and limit) are also counted here. |
1470 virtual intptr_t Size() { return accounting_stats_.Size(); } | 1470 virtual intptr_t Size() { return accounting_stats_.Size(); } |
1471 | 1471 |
1472 // As size, but the bytes in the current linear allocation area are not | 1472 // As size, but the bytes in lazily swept pages are estimated and the bytes |
1473 // included. | 1473 // in the current linear allocation area are not included. |
1474 virtual intptr_t SizeOfObjects() { return Size() - (limit() - top()); } | 1474 virtual intptr_t SizeOfObjects() { |
1475 ASSERT(!IsSweepingComplete() || (unswept_dead_bytes_ == 0)); | |
1476 return Size() - unswept_dead_bytes_ - (limit() - top()); | |
1477 } | |
1475 | 1478 |
1476 // Wasted bytes in this space. These are just the bytes that were thrown away | 1479 // Wasted bytes in this space. These are just the bytes that were thrown away |
1477 // due to being too small to use for allocation. They do not include the | 1480 // due to being too small to use for allocation. They do not include the |
1478 // free bytes that were not found at all due to lazy sweeping. | 1481 // free bytes that were not found at all due to lazy sweeping. |
1479 virtual intptr_t Waste() { return accounting_stats_.Waste(); } | 1482 virtual intptr_t Waste() { return accounting_stats_.Waste(); } |
1480 | 1483 |
1481 // Returns the allocation pointer in this space. | 1484 // Returns the allocation pointer in this space. |
1482 Address top() { | 1485 Address top() { return allocation_info_.top; } |
1483 return allocation_info_.top; | |
1484 } | |
1485 Address limit() { return allocation_info_.limit; } | 1486 Address limit() { return allocation_info_.limit; } |
1486 | 1487 |
1487 // Allocate the requested number of bytes in the space if possible, return a | 1488 // Allocate the requested number of bytes in the space if possible, return a |
1488 // failure object if not. | 1489 // failure object if not. |
1489 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); | 1490 MUST_USE_RESULT inline MaybeObject* AllocateRaw(int size_in_bytes); |
1490 | 1491 |
1491 virtual bool ReserveSpace(int bytes); | 1492 virtual bool ReserveSpace(int bytes); |
1492 | 1493 |
1493 // Give a block of memory to the space's free list. It might be added to | 1494 // Give a block of memory to the space's free list. It might be added to |
1494 // the free list or accounted as waste. | 1495 // the free list or accounted as waste. |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1550 | 1551 |
1551 // Evacuation candidates are swept by evacuator. Needs to return a valid | 1552 // Evacuation candidates are swept by evacuator. Needs to return a valid |
1552 // result before _and_ after evacuation has finished. | 1553 // result before _and_ after evacuation has finished. |
1553 static bool ShouldBeSweptLazily(Page* p) { | 1554 static bool ShouldBeSweptLazily(Page* p) { |
1554 return !p->IsEvacuationCandidate() && | 1555 return !p->IsEvacuationCandidate() && |
1555 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && | 1556 !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && |
1556 !p->WasSweptPrecisely(); | 1557 !p->WasSweptPrecisely(); |
1557 } | 1558 } |
1558 | 1559 |
1559 void SetPagesToSweep(Page* first) { | 1560 void SetPagesToSweep(Page* first) { |
1561 ASSERT(unswept_dead_bytes_ == 0); | |
1560 if (first == &anchor_) first = NULL; | 1562 if (first == &anchor_) first = NULL; |
1561 first_unswept_page_ = first; | 1563 first_unswept_page_ = first; |
1562 } | 1564 } |
1563 | 1565 |
1566 void MarkPageForLazySweep(Page* p) { | |
1567 unswept_dead_bytes_ += (Page::kObjectAreaSize - p->LiveBytes()); | |
1568 } | |
1569 | |
1564 bool AdvanceSweeper(intptr_t bytes_to_sweep); | 1570 bool AdvanceSweeper(intptr_t bytes_to_sweep); |
1565 | 1571 |
1566 bool IsSweepingComplete() { | 1572 bool IsSweepingComplete() { |
1567 return !first_unswept_page_->is_valid(); | 1573 return !first_unswept_page_->is_valid(); |
1568 } | 1574 } |
1569 | 1575 |
1570 Page* FirstPage() { return anchor_.next_page(); } | 1576 Page* FirstPage() { return anchor_.next_page(); } |
1571 Page* LastPage() { return anchor_.prev_page(); } | 1577 Page* LastPage() { return anchor_.prev_page(); } |
1572 | 1578 |
1573 // Returns zero for pages that have so little fragmentation that it is not | 1579 // Returns zero for pages that have so little fragmentation that it is not |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1640 AllocationInfo allocation_info_; | 1646 AllocationInfo allocation_info_; |
1641 | 1647 |
1642 // Bytes of each page that cannot be allocated. Possibly non-zero | 1648 // Bytes of each page that cannot be allocated. Possibly non-zero |
1643 // for pages in spaces with only fixed-size objects. Always zero | 1649 // for pages in spaces with only fixed-size objects. Always zero |
1644 // for pages in spaces with variable sized objects (those pages are | 1650 // for pages in spaces with variable sized objects (those pages are |
1645 // padded with free-list nodes). | 1651 // padded with free-list nodes). |
1646 int page_extra_; | 1652 int page_extra_; |
1647 | 1653 |
1648 bool was_swept_conservatively_; | 1654 bool was_swept_conservatively_; |
1649 | 1655 |
1656 // The first page to be swept when the lazy sweeper advances. Is set | |
1657 // to NULL when all pages have been swept. | |
1650 Page* first_unswept_page_; | 1658 Page* first_unswept_page_; |
1651 | 1659 |
1660 // The number of dead bytes which could be reclaimed by advancing the | |
1661 // lazy sweeper. This is only an estimation because lazy sweeping is | |
1662 // done conservatively. | |
1663 intptr_t unswept_dead_bytes_; | |
Vyacheslav Egorov (Chromium)
2012/01/11 10:16:49
I would call them free not dead (I think object ca
Michael Starzinger
2012/01/11 10:34:00
Done.
| |
1664 | |
1652 // Expands the space by allocating a fixed number of pages. Returns false if | 1665 // Expands the space by allocating a fixed number of pages. Returns false if |
1653 // it cannot allocate requested number of pages from OS, or if the hard heap | 1666 // it cannot allocate requested number of pages from OS, or if the hard heap |
1654 // size limit has been hit. | 1667 // size limit has been hit. |
1655 bool Expand(); | 1668 bool Expand(); |
1656 | 1669 |
1657 // Generic fast case allocation function that tries linear allocation at the | 1670 // Generic fast case allocation function that tries linear allocation at the |
1658 // address denoted by top in allocation_info_. | 1671 // address denoted by top in allocation_info_. |
1659 inline HeapObject* AllocateLinearly(int size_in_bytes); | 1672 inline HeapObject* AllocateLinearly(int size_in_bytes); |
1660 | 1673 |
1661 // Slow path of AllocateRaw. This function is space-dependent. | 1674 // Slow path of AllocateRaw. This function is space-dependent. |
(...skipping 968 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2630 } | 2643 } |
2631 // Must be small, since an iteration is used for lookup. | 2644 // Must be small, since an iteration is used for lookup. |
2632 static const int kMaxComments = 64; | 2645 static const int kMaxComments = 64; |
2633 }; | 2646 }; |
2634 #endif | 2647 #endif |
2635 | 2648 |
2636 | 2649 |
2637 } } // namespace v8::internal | 2650 } } // namespace v8::internal |
2638 | 2651 |
2639 #endif // V8_SPACES_H_ | 2652 #endif // V8_SPACES_H_ |
OLD | NEW |