OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "test/cctest/cctest.h" | 5 #include "test/cctest/cctest.h" |
6 #include "test/cctest/heap/heap-tester.h" | 6 #include "test/cctest/heap/heap-tester.h" |
7 #include "test/cctest/heap/utils-inl.h" | 7 #include "test/cctest/heap/heap-utils.h" |
8 | 8 |
9 namespace v8 { | 9 namespace v8 { |
10 namespace internal { | 10 namespace internal { |
11 | 11 |
12 static void CheckInvariantsOfAbortedPage(Page* page) { | 12 namespace { |
| 13 |
| 14 void CheckInvariantsOfAbortedPage(Page* page) { |
13 // Check invariants: | 15 // Check invariants: |
14 // 1) Markbits are cleared | 16 // 1) Markbits are cleared |
15 // 2) The page is not marked as evacuation candidate anymore | 17 // 2) The page is not marked as evacuation candidate anymore |
16 // 3) The page is not marked as aborted compaction anymore. | 18 // 3) The page is not marked as aborted compaction anymore. |
17 CHECK(page->markbits()->IsClean()); | 19 CHECK(page->markbits()->IsClean()); |
18 CHECK(!page->IsEvacuationCandidate()); | 20 CHECK(!page->IsEvacuationCandidate()); |
19 CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); | 21 CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); |
20 } | 22 } |
21 | 23 |
| 24 void CheckAllObjectsOnPage(std::vector<Handle<FixedArray>>& handles, |
| 25 Page* page) { |
| 26 for (auto& fixed_array : handles) { |
| 27 CHECK(Page::FromAddress(fixed_array->address()) == page); |
| 28 } |
| 29 } |
| 30 |
| 31 } // namespace |
22 | 32 |
23 HEAP_TEST(CompactionFullAbortedPage) { | 33 HEAP_TEST(CompactionFullAbortedPage) { |
24 // Test the scenario where we reach OOM during compaction and the whole page | 34 // Test the scenario where we reach OOM during compaction and the whole page |
25 // is aborted. | 35 // is aborted. |
26 | 36 |
27 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 37 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., |
28 // we can reach the state of a half aborted page. | 38 // we can reach the state of a half aborted page. |
29 FLAG_concurrent_sweeping = false; | 39 FLAG_concurrent_sweeping = false; |
30 FLAG_manual_evacuation_candidates_selection = true; | 40 FLAG_manual_evacuation_candidates_selection = true; |
31 CcTest::InitializeVM(); | 41 CcTest::InitializeVM(); |
32 Isolate* isolate = CcTest::i_isolate(); | 42 Isolate* isolate = CcTest::i_isolate(); |
33 Heap* heap = isolate->heap(); | 43 Heap* heap = isolate->heap(); |
34 { | 44 { |
35 HandleScope scope1(isolate); | 45 HandleScope scope1(isolate); |
36 PageIterator it(heap->old_space()); | 46 |
37 while (it.has_next()) { | 47 heap::SealCurrentObjects(heap); |
38 it.next()->MarkNeverAllocateForTesting(); | |
39 } | |
40 | 48 |
41 { | 49 { |
42 HandleScope scope2(isolate); | 50 HandleScope scope2(isolate); |
43 CHECK(heap->old_space()->Expand()); | 51 CHECK(heap->old_space()->Expand()); |
44 auto compaction_page_handles = | 52 auto compaction_page_handles = |
45 CreatePadding(heap, Page::kAllocatableMemory, TENURED); | 53 heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED); |
46 Page* to_be_aborted_page = | 54 Page* to_be_aborted_page = |
47 Page::FromAddress(compaction_page_handles.front()->address()); | 55 Page::FromAddress(compaction_page_handles.front()->address()); |
48 to_be_aborted_page->SetFlag( | 56 to_be_aborted_page->SetFlag( |
49 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 57 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); |
| 58 CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page); |
50 | 59 |
51 heap->set_force_oom(true); | 60 heap->set_force_oom(true); |
52 heap->CollectAllGarbage(); | 61 heap->CollectAllGarbage(); |
53 heap->mark_compact_collector()->EnsureSweepingCompleted(); | 62 heap->mark_compact_collector()->EnsureSweepingCompleted(); |
54 | 63 |
55 // Check that all handles still point to the same page, i.e., compaction | 64 // Check that all handles still point to the same page, i.e., compaction |
56 // has been aborted on the page. | 65 // has been aborted on the page. |
57 for (Handle<FixedArray> object : compaction_page_handles) { | 66 for (Handle<FixedArray> object : compaction_page_handles) { |
58 CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address())); | 67 CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address())); |
59 } | 68 } |
60 CheckInvariantsOfAbortedPage(to_be_aborted_page); | 69 CheckInvariantsOfAbortedPage(to_be_aborted_page); |
61 } | 70 } |
62 } | 71 } |
63 } | 72 } |
64 | 73 |
65 | 74 |
66 HEAP_TEST(CompactionPartiallyAbortedPage) { | 75 HEAP_TEST(CompactionPartiallyAbortedPage) { |
67 // Test the scenario where we reach OOM during compaction and parts of the | 76 // Test the scenario where we reach OOM during compaction and parts of the |
68 // page have already been migrated to a new one. | 77 // page have already been migrated to a new one. |
69 | 78 |
70 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 79 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., |
71 // we can reach the state of a half aborted page. | 80 // we can reach the state of a half aborted page. |
72 FLAG_concurrent_sweeping = false; | 81 FLAG_concurrent_sweeping = false; |
73 FLAG_manual_evacuation_candidates_selection = true; | 82 FLAG_manual_evacuation_candidates_selection = true; |
74 | 83 |
75 const int object_size = 128 * KB; | 84 const int objects_per_page = 10; |
| 85 const int object_size = Page::kAllocatableMemory / objects_per_page; |
76 | 86 |
77 CcTest::InitializeVM(); | 87 CcTest::InitializeVM(); |
78 Isolate* isolate = CcTest::i_isolate(); | 88 Isolate* isolate = CcTest::i_isolate(); |
79 Heap* heap = isolate->heap(); | 89 Heap* heap = isolate->heap(); |
80 { | 90 { |
81 HandleScope scope1(isolate); | 91 HandleScope scope1(isolate); |
82 PageIterator it(heap->old_space()); | 92 |
83 while (it.has_next()) { | 93 heap::SealCurrentObjects(heap); |
84 it.next()->MarkNeverAllocateForTesting(); | |
85 } | |
86 | 94 |
87 { | 95 { |
88 HandleScope scope2(isolate); | 96 HandleScope scope2(isolate); |
89 // Fill another page with objects of size {object_size} (last one is | 97 // Fill another page with objects of size {object_size} (last one is |
90 // properly adjusted). | 98 // properly adjusted). |
91 CHECK(heap->old_space()->Expand()); | 99 CHECK(heap->old_space()->Expand()); |
92 auto compaction_page_handles = | 100 auto compaction_page_handles = heap::CreatePadding( |
93 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); | 101 heap, Page::kAllocatableMemory, TENURED, object_size); |
94 Page* to_be_aborted_page = | 102 Page* to_be_aborted_page = |
95 Page::FromAddress(compaction_page_handles.front()->address()); | 103 Page::FromAddress(compaction_page_handles.front()->address()); |
96 to_be_aborted_page->SetFlag( | 104 to_be_aborted_page->SetFlag( |
97 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 105 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); |
| 106 CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page); |
98 | 107 |
99 { | 108 { |
100 // Add another page that is filled with {num_objects} objects of size | 109 // Add another page that is filled with {num_objects} objects of size |
101 // {object_size}. | 110 // {object_size}. |
102 HandleScope scope3(isolate); | 111 HandleScope scope3(isolate); |
103 CHECK(heap->old_space()->Expand()); | 112 CHECK(heap->old_space()->Expand()); |
104 const int num_objects = 3; | 113 const int num_objects = 3; |
105 std::vector<Handle<FixedArray>> page_to_fill_handles = CreatePadding( | 114 std::vector<Handle<FixedArray>> page_to_fill_handles = |
106 heap, object_size * num_objects, TENURED, object_size); | 115 heap::CreatePadding(heap, object_size * num_objects, TENURED, |
| 116 object_size); |
107 Page* page_to_fill = | 117 Page* page_to_fill = |
108 Page::FromAddress(page_to_fill_handles.front()->address()); | 118 Page::FromAddress(page_to_fill_handles.front()->address()); |
109 | 119 |
110 heap->set_force_oom(true); | 120 heap->set_force_oom(true); |
111 heap->CollectAllGarbage(); | 121 heap->CollectAllGarbage(); |
112 heap->mark_compact_collector()->EnsureSweepingCompleted(); | 122 heap->mark_compact_collector()->EnsureSweepingCompleted(); |
113 | 123 |
114 bool migration_aborted = false; | 124 bool migration_aborted = false; |
115 for (Handle<FixedArray> object : compaction_page_handles) { | 125 for (Handle<FixedArray> object : compaction_page_handles) { |
116 // Once compaction has been aborted, all following objects still have | 126 // Once compaction has been aborted, all following objects still have |
(...skipping 21 matching lines...) Expand all Loading... |
138 // Test the scenario where we reach OOM during compaction and parts of the | 148 // Test the scenario where we reach OOM during compaction and parts of the |
139 // page have already been migrated to a new one. Objects on the aborted page | 149 // page have already been migrated to a new one. Objects on the aborted page |
140 // are linked together. This test makes sure that intra-aborted page pointers | 150 // are linked together. This test makes sure that intra-aborted page pointers |
141 // get properly updated. | 151 // get properly updated. |
142 | 152 |
143 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 153 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., |
144 // we can reach the state of a half aborted page. | 154 // we can reach the state of a half aborted page. |
145 FLAG_concurrent_sweeping = false; | 155 FLAG_concurrent_sweeping = false; |
146 FLAG_manual_evacuation_candidates_selection = true; | 156 FLAG_manual_evacuation_candidates_selection = true; |
147 | 157 |
148 const int object_size = 128 * KB; | 158 const int objects_per_page = 10; |
| 159 const int object_size = Page::kAllocatableMemory / objects_per_page; |
149 | 160 |
150 CcTest::InitializeVM(); | 161 CcTest::InitializeVM(); |
151 Isolate* isolate = CcTest::i_isolate(); | 162 Isolate* isolate = CcTest::i_isolate(); |
152 Heap* heap = isolate->heap(); | 163 Heap* heap = isolate->heap(); |
153 { | 164 { |
154 HandleScope scope1(isolate); | 165 HandleScope scope1(isolate); |
155 Handle<FixedArray> root_array = | 166 Handle<FixedArray> root_array = |
156 isolate->factory()->NewFixedArray(10, TENURED); | 167 isolate->factory()->NewFixedArray(10, TENURED); |
157 | 168 |
158 PageIterator it(heap->old_space()); | 169 heap::SealCurrentObjects(heap); |
159 while (it.has_next()) { | |
160 it.next()->MarkNeverAllocateForTesting(); | |
161 } | |
162 | 170 |
163 Page* to_be_aborted_page = nullptr; | 171 Page* to_be_aborted_page = nullptr; |
164 { | 172 { |
165 HandleScope temporary_scope(isolate); | 173 HandleScope temporary_scope(isolate); |
166 // Fill a fresh page with objects of size {object_size} (last one is | 174 // Fill a fresh page with objects of size {object_size} (last one is |
167 // properly adjusted). | 175 // properly adjusted). |
168 CHECK(heap->old_space()->Expand()); | 176 CHECK(heap->old_space()->Expand()); |
169 std::vector<Handle<FixedArray>> compaction_page_handles = | 177 std::vector<Handle<FixedArray>> compaction_page_handles = |
170 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); | 178 heap::CreatePadding(heap, Page::kAllocatableMemory, TENURED, |
| 179 object_size); |
171 to_be_aborted_page = | 180 to_be_aborted_page = |
172 Page::FromAddress(compaction_page_handles.front()->address()); | 181 Page::FromAddress(compaction_page_handles.front()->address()); |
173 to_be_aborted_page->SetFlag( | 182 to_be_aborted_page->SetFlag( |
174 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 183 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); |
175 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { | 184 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { |
176 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); | 185 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); |
177 } | 186 } |
178 root_array->set(0, *compaction_page_handles.back()); | 187 root_array->set(0, *compaction_page_handles.back()); |
| 188 CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page); |
179 } | 189 } |
180 | |
181 { | 190 { |
182 // Add another page that is filled with {num_objects} objects of size | 191 // Add another page that is filled with {num_objects} objects of size |
183 // {object_size}. | 192 // {object_size}. |
184 HandleScope scope3(isolate); | 193 HandleScope scope3(isolate); |
185 CHECK(heap->old_space()->Expand()); | 194 CHECK(heap->old_space()->Expand()); |
186 const int num_objects = 2; | 195 const int num_objects = 2; |
187 int used_memory = object_size * num_objects; | 196 int used_memory = object_size * num_objects; |
188 std::vector<Handle<FixedArray>> page_to_fill_handles = | 197 std::vector<Handle<FixedArray>> page_to_fill_handles = |
189 CreatePadding(heap, used_memory, TENURED, object_size); | 198 heap::CreatePadding(heap, used_memory, TENURED, object_size); |
190 Page* page_to_fill = | 199 Page* page_to_fill = |
191 Page::FromAddress(page_to_fill_handles.front()->address()); | 200 Page::FromAddress(page_to_fill_handles.front()->address()); |
192 | 201 |
193 heap->set_force_oom(true); | 202 heap->set_force_oom(true); |
194 heap->CollectAllGarbage(); | 203 heap->CollectAllGarbage(); |
195 heap->mark_compact_collector()->EnsureSweepingCompleted(); | 204 heap->mark_compact_collector()->EnsureSweepingCompleted(); |
196 | 205 |
197 // The following check makes sure that we compacted "some" objects, while | 206 // The following check makes sure that we compacted "some" objects, while |
198 // leaving others in place. | 207 // leaving others in place. |
199 bool in_place = true; | 208 bool in_place = true; |
(...skipping 26 matching lines...) Expand all Loading... |
226 // into new space. The test verifies that the store buffer entries are | 235 // into new space. The test verifies that the store buffer entries are |
227 // properly cleared and rebuilt after aborting a page. Failing to do so can | 236 // properly cleared and rebuilt after aborting a page. Failing to do so can |
228 // result in other objects being allocated in the free space where their | 237 // result in other objects being allocated in the free space where their |
229 // payload looks like a valid new space pointer. | 238 // payload looks like a valid new space pointer. |
230 | 239 |
231 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 240 // Disable concurrent sweeping to ensure memory is in an expected state, i.e., |
232 // we can reach the state of a half aborted page. | 241 // we can reach the state of a half aborted page. |
233 FLAG_concurrent_sweeping = false; | 242 FLAG_concurrent_sweeping = false; |
234 FLAG_manual_evacuation_candidates_selection = true; | 243 FLAG_manual_evacuation_candidates_selection = true; |
235 | 244 |
236 const int object_size = 128 * KB; | 245 const int objects_per_page = 10; |
| 246 const int object_size = Page::kAllocatableMemory / objects_per_page; |
237 | 247 |
238 CcTest::InitializeVM(); | 248 CcTest::InitializeVM(); |
239 Isolate* isolate = CcTest::i_isolate(); | 249 Isolate* isolate = CcTest::i_isolate(); |
240 Heap* heap = isolate->heap(); | 250 Heap* heap = isolate->heap(); |
241 { | 251 { |
242 HandleScope scope1(isolate); | 252 HandleScope scope1(isolate); |
243 Handle<FixedArray> root_array = | 253 Handle<FixedArray> root_array = |
244 isolate->factory()->NewFixedArray(10, TENURED); | 254 isolate->factory()->NewFixedArray(10, TENURED); |
245 PageIterator it(heap->old_space()); | 255 heap::SealCurrentObjects(heap); |
246 while (it.has_next()) { | |
247 it.next()->MarkNeverAllocateForTesting(); | |
248 } | |
249 | 256 |
250 Page* to_be_aborted_page = nullptr; | 257 Page* to_be_aborted_page = nullptr; |
251 { | 258 { |
252 HandleScope temporary_scope(isolate); | 259 HandleScope temporary_scope(isolate); |
253 // Fill another page with objects of size {object_size} (last one is | 260 // Fill another page with objects of size {object_size} (last one is |
254 // properly adjusted). | 261 // properly adjusted). |
255 CHECK(heap->old_space()->Expand()); | 262 CHECK(heap->old_space()->Expand()); |
256 auto compaction_page_handles = | 263 auto compaction_page_handles = heap::CreatePadding( |
257 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); | 264 heap, Page::kAllocatableMemory, TENURED, object_size); |
258 // Sanity check that we have enough space for linking up arrays. | 265 // Sanity check that we have enough space for linking up arrays. |
259 CHECK_GE(compaction_page_handles.front()->length(), 2); | 266 CHECK_GE(compaction_page_handles.front()->length(), 2); |
260 to_be_aborted_page = | 267 to_be_aborted_page = |
261 Page::FromAddress(compaction_page_handles.front()->address()); | 268 Page::FromAddress(compaction_page_handles.front()->address()); |
262 to_be_aborted_page->SetFlag( | 269 to_be_aborted_page->SetFlag( |
263 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 270 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); |
264 | 271 |
265 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { | 272 for (size_t i = compaction_page_handles.size() - 1; i > 0; i--) { |
266 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); | 273 compaction_page_handles[i]->set(0, *compaction_page_handles[i - 1]); |
267 } | 274 } |
268 root_array->set(0, *compaction_page_handles.back()); | 275 root_array->set(0, *compaction_page_handles.back()); |
269 Handle<FixedArray> new_space_array = | 276 Handle<FixedArray> new_space_array = |
270 isolate->factory()->NewFixedArray(1, NOT_TENURED); | 277 isolate->factory()->NewFixedArray(1, NOT_TENURED); |
271 CHECK(heap->InNewSpace(*new_space_array)); | 278 CHECK(heap->InNewSpace(*new_space_array)); |
272 compaction_page_handles.front()->set(1, *new_space_array); | 279 compaction_page_handles.front()->set(1, *new_space_array); |
| 280 CheckAllObjectsOnPage(compaction_page_handles, to_be_aborted_page); |
273 } | 281 } |
274 | 282 |
275 { | 283 { |
276 // Add another page that is filled with {num_objects} objects of size | 284 // Add another page that is filled with {num_objects} objects of size |
277 // {object_size}. | 285 // {object_size}. |
278 HandleScope scope3(isolate); | 286 HandleScope scope3(isolate); |
279 CHECK(heap->old_space()->Expand()); | 287 CHECK(heap->old_space()->Expand()); |
280 const int num_objects = 2; | 288 const int num_objects = 2; |
281 int used_memory = object_size * num_objects; | 289 int used_memory = object_size * num_objects; |
282 std::vector<Handle<FixedArray>> page_to_fill_handles = | 290 std::vector<Handle<FixedArray>> page_to_fill_handles = |
283 CreatePadding(heap, used_memory, TENURED, object_size); | 291 heap::CreatePadding(heap, used_memory, TENURED, object_size); |
284 Page* page_to_fill = | 292 Page* page_to_fill = |
285 Page::FromAddress(page_to_fill_handles.front()->address()); | 293 Page::FromAddress(page_to_fill_handles.front()->address()); |
286 | 294 |
287 heap->set_force_oom(true); | 295 heap->set_force_oom(true); |
288 heap->CollectAllGarbage(); | 296 heap->CollectAllGarbage(); |
289 heap->mark_compact_collector()->EnsureSweepingCompleted(); | 297 heap->mark_compact_collector()->EnsureSweepingCompleted(); |
290 | 298 |
291 // The following check makes sure that we compacted "some" objects, while | 299 // The following check makes sure that we compacted "some" objects, while |
292 // leaving others in place. | 300 // leaving others in place. |
293 bool in_place = true; | 301 bool in_place = true; |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
335 // If store buffer entries are not properly filtered/reset for aborted | 343 // If store buffer entries are not properly filtered/reset for aborted |
336 // pages we have now a broken address at an object slot in old space and | 344 // pages we have now a broken address at an object slot in old space and |
337 // the following scavenge will crash. | 345 // the following scavenge will crash. |
338 heap->CollectGarbage(NEW_SPACE); | 346 heap->CollectGarbage(NEW_SPACE); |
339 } | 347 } |
340 } | 348 } |
341 } | 349 } |
342 | 350 |
343 } // namespace internal | 351 } // namespace internal |
344 } // namespace v8 | 352 } // namespace v8 |
OLD | NEW |