Index: src/spaces.h |
diff --git a/src/spaces.h b/src/spaces.h |
index 855359835293d05bf7523a7a5b187d96a4ec7cbb..32850781452f82b1fe880a9ef2a8d3bd0a017fba 100644 |
--- a/src/spaces.h |
+++ b/src/spaces.h |
@@ -1905,9 +1905,6 @@ class PagedSpace : public Space { |
bool is_iterable() { return is_iterable_; } |
void set_is_iterable(bool b) { is_iterable_ = b; } |
- bool is_swept_concurrently() { return is_swept_concurrently_; } |
- void set_is_swept_concurrently(bool b) { is_swept_concurrently_ = b; } |
- |
// Evacuation candidates are swept by evacuator. Needs to return a valid |
// result before _and_ after evacuation has finished. |
static bool ShouldBeSweptBySweeperThreads(Page* p) { |
@@ -1992,9 +1989,6 @@ class PagedSpace : public Space { |
// This space was swept precisely, hence it is iterable. |
bool is_iterable_; |
- // This space is currently swept by sweeper threads. |
- bool is_swept_concurrently_; |
- |
// The number of free bytes which could be reclaimed by advancing the |
// concurrent sweeper threads. This is only an estimation because concurrent |
// sweeping is done conservatively. |
@@ -2017,7 +2011,8 @@ class PagedSpace : public Space { |
// If sweeping is still in progress try to sweep unswept pages. If that is |
// not successful, wait for the sweeper threads and re-try free-list |
// allocation. |
- MUST_USE_RESULT HeapObject* EnsureSweepingProgress(int size_in_bytes); |
+ MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation( |
+ int size_in_bytes); |
// Slow path of AllocateRaw. This function is space-dependent. |
MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes); |