Index: runtime/vm/pages.cc |
=================================================================== |
--- runtime/vm/pages.cc (revision 44579) |
+++ runtime/vm/pages.cc (working copy) |
@@ -448,7 +448,7 @@ |
private: |
const PageSpace* space_; |
MutexLocker ml_; |
- NoGCScope no_gc; |
+ NoSafepointScope no_safepoint; |
HeapPage* page_; |
}; |
@@ -471,7 +471,7 @@ |
private: |
const PageSpace* space_; |
MutexLocker ml_; |
- NoGCScope no_gc; |
+ NoSafepointScope no_safepoint; |
HeapPage* page_; |
}; |
@@ -493,7 +493,7 @@ |
private: |
const PageSpace* space_; |
MutexLocker ml_; |
- NoGCScope no_gc; |
+ NoSafepointScope no_safepoint; |
HeapPage* page_; |
}; |
@@ -670,7 +670,7 @@ |
// TODO(19445): Use ExclusivePageIterator once HeapMap supports large pages. |
MutexLocker ml(pages_lock_); |
MakeIterable(); |
- NoGCScope no_gc; |
+ NoSafepointScope no_safepoint; |
JSONArray all_pages(&heap_map, "pages"); |
for (HeapPage* page = pages_; page != NULL; page = page->next()) { |
JSONObject page_container(&all_pages); |
@@ -713,7 +713,7 @@ |
void PageSpace::WriteProtectCode(bool read_only) { |
if (FLAG_write_protect_code) { |
MutexLocker ml(pages_lock_); |
- NoGCScope no_gc; |
+ NoSafepointScope no_safepoint; |
// No need to go through all of the data pages first. |
HeapPage* page = exec_pages_; |
while (page != NULL) { |