Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(604)

Unified Diff: src/mark-compact.cc

Issue 3260001: Force relinking of paged space if first attempt to recommit from space fails. (Closed)
Patch Set: Virtualizing block deallocation Created 10 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mark-compact.cc
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index e7a26194e99f3231451fc70ab39ab1397b57993b..e84e529fc4f54c517125a292cff998482caf6c2e 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -1627,7 +1627,7 @@ static void SweepNewSpace(NewSpace* space) {
}
-static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
+static void SweepSpace(PagedSpace* space) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
// During sweeping of paged space we are trying to find longest sequences
@@ -1668,10 +1668,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start,
- static_cast<int>(current - free_start),
- true,
- false);
+ space->DeallocateBlock(free_start,
+ static_cast<int>(current - free_start),
+ true);
is_previous_alive = true;
}
} else {
@@ -1701,7 +1700,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false, true);
+ space->DeallocateBlock(free_start, size_in_bytes, false);
}
}
} else {
@@ -1717,7 +1716,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
if (last_free_size > 0) {
Page::FromAddress(last_free_start)->
SetAllocationWatermark(last_free_start);
- dealloc(last_free_start, last_free_size, true, true);
+ space->DeallocateBlock(last_free_start, last_free_size, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1748,7 +1747,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false, true);
+ space->DeallocateBlock(last_free_start, last_free_size, false);
new_allocation_top = last_free_start;
}
@@ -1769,61 +1768,6 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
}
-void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateOldDataBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateCodeBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
-}
-
-
-void MarkCompactCollector::DeallocateMapBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Objects in map space are assumed to have size Map::kSize and a
- // valid map in their first word. Thus, we break the free block up into
- // chunks and free them separately.
- ASSERT(size_in_bytes % Map::kSize == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += Map::kSize) {
- Heap::map_space()->Free(a, add_to_freelist);
- }
-}
-
-
-void MarkCompactCollector::DeallocateCellBlock(Address start,
- int size_in_bytes,
- bool add_to_freelist,
- bool last_on_page) {
- // Free-list elements in cell space are assumed to have a fixed size.
- // We break the free block into chunks and add them to the free list
- // individually.
- int size = Heap::cell_space()->object_size_in_bytes();
- ASSERT(size_in_bytes % size == 0);
- Address end = start + size_in_bytes;
- for (Address a = start; a < end; a += size) {
- Heap::cell_space()->Free(a, add_to_freelist);
- }
-}
-
-
void MarkCompactCollector::EncodeForwardingAddresses() {
ASSERT(state_ == ENCODE_FORWARDING_ADDRESSES);
// Objects in the active semispace of the young generation may be
@@ -2088,14 +2032,14 @@ void MarkCompactCollector::SweepSpaces() {
// the map space last because freeing non-live maps overwrites them and
// the other spaces rely on possibly non-live maps to get the sizes for
// non-live objects.
- SweepSpace(Heap::old_pointer_space(), &DeallocateOldPointerBlock);
- SweepSpace(Heap::old_data_space(), &DeallocateOldDataBlock);
- SweepSpace(Heap::code_space(), &DeallocateCodeBlock);
- SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
+ SweepSpace(Heap::old_pointer_space());
+ SweepSpace(Heap::old_data_space());
+ SweepSpace(Heap::code_space());
+ SweepSpace(Heap::cell_space());
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
SweepNewSpace(Heap::new_space());
}
- SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ SweepSpace(Heap::map_space());
Heap::IterateDirtyRegions(Heap::map_space(),
&Heap::IteratePointersInDirtyMapsRegion,
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698