Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(142)

Side by Side Diff: test/cctest/heap/test-compaction.cc

Issue 1511933002: [cctest] Add tests for aborting compaction of pages (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: More tests Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "test/cctest/cctest.h"
6 #include "test/cctest/heap/heap-tester.h"
7 #include "test/cctest/heap/utils-inl.h"
8
9 namespace v8 {
10 namespace internal {
11
12 static std::vector<Handle<FixedArray>> FillUpFirstOldSpacePage(Heap* heap) {
13 // This functions assumes that old space top is still on the first page
14 heap->old_space()->EmptyAllocationInfo();
15 int free_on_first_page = static_cast<int>(heap->old_space()->Available());
16 return CreatePadding(heap, free_on_first_page, TENURED);
17 }
18
19
20 static void CheckInvariantsOfAbortedPage(Page* page) {
21 // Check invariants:
22 // 1) Markibts are cleared
23 // 2) The page is not marked as evacuation candidate anymore
24 // 3) The page is not marked as aborted compaction anymore.
25 CHECK(page->markbits()->IsClean());
26 CHECK(!page->IsEvacuationCandidate());
27 CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
28 }
29
30
31 HEAP_TEST(CompactionFullAbortedPage) {
32 // Test the scenario where we reach OOM during compaction and the whole page
33 // is aborted.
34
35 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
36 // we can reach the state of a half aborted page.
37 FLAG_concurrent_sweeping = false;
38 FLAG_manual_evacuation_candidates_selection = true;
39 CcTest::InitializeVM();
40 Isolate* isolate = CcTest::i_isolate();
41 Heap* heap = isolate->heap();
42 {
43 HandleScope scope1(isolate);
44 // Fill up the first page since it cannot be evacuated.
45 auto first_page_handles = FillUpFirstOldSpacePage(heap);
46
47 {
48 HandleScope scope2(isolate);
49 heap->old_space()->EmptyAllocationInfo();
50 auto second_page_handles =
51 CreatePadding(heap, Page::kAllocatableMemory, TENURED);
52 Page* to_be_aborted_page =
53 Page::FromAddress(second_page_handles.front()->address());
54 to_be_aborted_page->SetFlag(
55 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
56 heap->old_space()->SetForcedOOM(true);
57 heap->CollectAllGarbage();
58
59 // Check that all handles still point to the same page, i.e., compaction
60 // has been aborted on the page.
61 for (Handle<FixedArray> object : second_page_handles) {
62 CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address()));
63 }
64 CheckInvariantsOfAbortedPage(to_be_aborted_page);
65 }
66 }
67 }
68
69
70 HEAP_TEST(CompactionPartiallyAbortedPage) {
71 // Test the scenario where we reach OOM during compaction and parts of the
72 // page have already been migrated to a new one.
73
74 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
75 // we can reach the state of a half aborted page.
76 FLAG_concurrent_sweeping = false;
77 FLAG_manual_evacuation_candidates_selection = true;
78
79 const int object_size = 128 * KB;
80
81 CcTest::InitializeVM();
82 Isolate* isolate = CcTest::i_isolate();
83 Heap* heap = isolate->heap();
84 {
85 HandleScope scope1(isolate);
86 // Fill up the first page since it cannot be evacuated.
87 auto first_page_handles = FillUpFirstOldSpacePage(heap);
88
89 {
90 HandleScope scope2(isolate);
91 // Fill the second page with objects of size {object_size} (last one is
92 // properly adjusted).
93 heap->old_space()->EmptyAllocationInfo();
94 auto second_page_handles =
95 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
96 // Mark the second page for evacuation.
97 Page* to_be_aborted_page =
98 Page::FromAddress(second_page_handles.front()->address());
99 to_be_aborted_page->SetFlag(
100 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
101
102 {
103 // Add a third page that is filled with {num_objects} objects of size
104 // {object_size}.
105 HandleScope scope3(isolate);
106 heap->old_space()->EmptyAllocationInfo();
107 const int num_objects = 3;
108 std::vector<Handle<FixedArray>> third_page_handles = CreatePadding(
109 heap, object_size * num_objects, TENURED, object_size);
110 heap->old_space()->SetForcedOOM(true);
111 heap->CollectAllGarbage();
112
113 bool migration_aborted = false;
114 for (Handle<FixedArray> object : second_page_handles) {
115 // Once compaction has been aborted, all following objects still have
116 // to be on the initial page.
117 CHECK(!migration_aborted ||
118 (Page::FromAddress(object->address()) == to_be_aborted_page));
119 if (to_be_aborted_page == Page::FromAddress(object->address())) {
120 // This object has not been migrated.
121 migration_aborted = true;
122 }
ulan 2015/12/10 17:29:23 else CHECK(the object is on the third page) ?
Michael Lippautz 2015/12/10 17:42:44 Done.
123 }
124 // Check that we actually created a scenario with a partially aborted
125 // page.
126 CHECK(migration_aborted);
127 CheckInvariantsOfAbortedPage(to_be_aborted_page);
128 }
129 }
130 }
131 }
132
133
134 HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) {
135 // Test the scenario where we reach OOM during compaction and parts of the
136 // page have already been migrated to a new one. Objects on the aborted page
137 // are linked together. This test makes sure that intra aborted page pointers
138 // get properly updated.
139
140 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
141 // we can reach the state of a half aborted page.
142 FLAG_concurrent_sweeping = false;
143 FLAG_manual_evacuation_candidates_selection = true;
144
145 const int object_size = 128 * KB;
146
147 CcTest::InitializeVM();
148 Isolate* isolate = CcTest::i_isolate();
149 Heap* heap = isolate->heap();
150 {
151 HandleScope scope1(isolate);
152 // Fill up the first page since it cannot be evacuated.
153 auto first_page_handles = FillUpFirstOldSpacePage(heap);
154
155 Page* to_be_aborted_page = nullptr;
156 {
157 HandleScope temporary_scope(isolate);
158 // Fill the second page with objects of size {object_size} (last one is
159 // properly adjusted).
160 heap->old_space()->EmptyAllocationInfo();
161 const int free_on_second_page = Page::kAllocatableMemory;
162 std::vector<Handle<FixedArray>> second_page_handles =
163 CreatePadding(heap, free_on_second_page, TENURED, object_size);
164 // Mark the second page for evacuation.
165 to_be_aborted_page =
166 Page::FromAddress(second_page_handles.front()->address());
167 to_be_aborted_page->SetFlag(
168 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
169
170 for (size_t i = second_page_handles.size() - 1; i > 0; i--) {
171 second_page_handles[i]->set(0, *second_page_handles[i - 1]);
172 }
173 first_page_handles.front()->set(0, *second_page_handles.back());
174 }
175
176 {
177 // Add a third page that is filled with {num_objects} objects of size
178 // {object_size}.
179 HandleScope scope3(isolate);
180 heap->old_space()->EmptyAllocationInfo();
181 const int num_objects = 2;
182 int used_memory = object_size * num_objects;
183 std::vector<Handle<FixedArray>> third_page_handles =
184 CreatePadding(heap, used_memory, TENURED, object_size);
185 heap->old_space()->SetForcedOOM(true);
186 heap->CollectAllGarbage();
187
188 // The following check makes sure that we compacted "some" objects, while
189 // leaving others in place.
190 bool in_place = true;
191 Handle<FixedArray> current = first_page_handles.front();
192 while (current->get(0) != heap->undefined_value()) {
193 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
194 CHECK(current->IsFixedArray());
195 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
196 in_place = false;
197 }
198 bool on_aborted_page =
199 Page::FromAddress(current->address()) == to_be_aborted_page;
200 CHECK((in_place && on_aborted_page) || (!in_place && !on_aborted_page));
201 }
202 // Check that we at least migrated one object, as otherwise the test would
203 // not trigger.
204 CHECK(!in_place);
205
206 CheckInvariantsOfAbortedPage(to_be_aborted_page);
207 heap->CollectAllGarbage();
208 }
209 }
210 }
211
212
213 HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) {
214 // Test the scenario where we reach OOM during compaction and parts of the
215 // page have already been migrated to a new one. Objects on the aborted page
216 // are linked together and the very first object on the aborted page points
217 // into new space. The test verfies that the store buffer entries are properly
218 // cleared and rebuilt after aborting a page. Failing to do so can result in
219 // other objects being allocated in the free space where their payload looks
220 // like a valid new space pointer.
221
222 // Disable concurrent sweeping to ensure memory is in an expected state, i.e.,
223 // we can reach the state of a half aborted page.
224 FLAG_concurrent_sweeping = false;
225 FLAG_manual_evacuation_candidates_selection = true;
226
227 const int object_size = 128 * KB;
228
229 CcTest::InitializeVM();
230 Isolate* isolate = CcTest::i_isolate();
231 Heap* heap = isolate->heap();
232 {
233 HandleScope scope1(isolate);
234 // Fill up the first page since it cannot be evacuated.
235 auto first_page_handles = FillUpFirstOldSpacePage(heap);
236
237 Page* to_be_aborted_page = nullptr;
238 {
239 HandleScope temporary_scope(isolate);
240 // Fill the second page with objects of size {object_size} (last one is
241 // properly adjusted).
242 heap->old_space()->EmptyAllocationInfo();
243 auto second_page_handles =
244 CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size);
245 // Mark the second page for evacuation.
246 to_be_aborted_page =
247 Page::FromAddress(second_page_handles.front()->address());
248 to_be_aborted_page->SetFlag(
249 MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
250
251 for (size_t i = second_page_handles.size() - 1; i > 0; i--) {
252 second_page_handles[i]->set(0, *second_page_handles[i - 1]);
253 }
254 first_page_handles.front()->set(0, *second_page_handles.back());
255 Handle<FixedArray> new_space_array =
256 isolate->factory()->NewFixedArray(1, NOT_TENURED);
257 CHECK(heap->InNewSpace(*new_space_array));
258 second_page_handles.front()->set(1, *new_space_array);
259 }
260
261 {
262 // Add a third page that is filled with {num_objects} objects of size
263 // {object_size}.
264 HandleScope scope3(isolate);
265 heap->old_space()->EmptyAllocationInfo();
266 const int num_objects = 2;
267 int used_memory = object_size * num_objects;
268 std::vector<Handle<FixedArray>> third_page_handles =
269 CreatePadding(heap, used_memory, TENURED, object_size);
270 heap->old_space()->SetForcedOOM(true);
271 heap->CollectAllGarbage();
272
273 // The following check makes sure that we compacted "some" objects, while
274 // leaving others in place.
275 bool in_place = true;
276 Handle<FixedArray> current = first_page_handles.front();
277 while (current->get(0) != heap->undefined_value()) {
278 current = Handle<FixedArray>(FixedArray::cast(current->get(0)));
279 CHECK(!heap->InNewSpace(*current));
280 CHECK(current->IsFixedArray());
281 if (Page::FromAddress(current->address()) != to_be_aborted_page) {
282 in_place = false;
283 }
284 bool on_aborted_page =
285 Page::FromAddress(current->address()) == to_be_aborted_page;
286 CHECK((in_place && on_aborted_page) || (!in_place && !on_aborted_page));
287 }
288 // Check that we at least migrated one object, as otherwise the test would
289 // not trigger.
290 CHECK(!in_place);
291
292 CheckInvariantsOfAbortedPage(to_be_aborted_page);
293
294 // Allocate a new object in new space.
295 Handle<FixedArray> holder =
296 isolate->factory()->NewFixedArray(10, NOT_TENURED);
297 // Create a broken address that looks like a tagged pointer to a new space
298 // object.
299 Address broken_address = holder->address() + 2 * kPointerSize + 1;
300 // Convert it to a vector to create a string from it.
301 Vector<const uint8_t> string_to_broken_addresss(
302 reinterpret_cast<const uint8_t*>(&broken_address), 8);
303
304 Handle<String> string;
305 do {
306 // We know that the intersting slot will be on the aborted page and
307 // hence we allocate until we get our string on the aborted page.
308 // We used slot 1 in the fixed size array which corresponds to the
309 // the first word in the string. Since the first object definitelly
310 // migrated we can just allocate until we hit the aborted page.
311 string = isolate->factory()
312 ->NewStringFromOneByte(string_to_broken_addresss, TENURED)
313 .ToHandleChecked();
314 } while (Page::FromAddress(string->address()) != to_be_aborted_page);
315
316 // If store buffer entries are not properly filtered/reset for aborted
317 // pages we have now a broken address at an object slot in old space and
318 // the following scavenge will crash.
319 heap->CollectGarbage(NEW_SPACE);
320 }
321 }
322 }
323
324 } // namespace internal
325 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698