| OLD | NEW | 
| (Empty) |  | 
 |    1 // Copyright 2015 the V8 project authors. All rights reserved. | 
 |    2 // Use of this source code is governed by a BSD-style license that can be | 
 |    3 // found in the LICENSE file. | 
 |    4  | 
 |    5 #include "test/cctest/cctest.h" | 
 |    6 #include "test/cctest/heap/heap-tester.h" | 
 |    7 #include "test/cctest/heap/utils-inl.h" | 
 |    8  | 
 |    9 namespace v8 { | 
 |   10 namespace internal { | 
 |   11  | 
 |   12 static std::vector<Handle<FixedArray>> FillUpFirstOldSpacePage(Heap* heap) { | 
 |   13   // This functions assumes that old space top is still on the first page | 
 |   14   heap->old_space()->EmptyAllocationInfo(); | 
 |   15   int free_on_first_page = static_cast<int>(heap->old_space()->Available()); | 
 |   16   return CreatePadding(heap, free_on_first_page, TENURED); | 
 |   17 } | 
 |   18  | 
 |   19  | 
 |   20 static void CheckInvariantsOfAbortedPage(Page* page) { | 
 |   21   // Check invariants: | 
 |   22   // 1) Markbits are cleared | 
 |   23   // 2) The page is not marked as evacuation candidate anymore | 
 |   24   // 3) The page is not marked as aborted compaction anymore. | 
 |   25   CHECK(page->markbits()->IsClean()); | 
 |   26   CHECK(!page->IsEvacuationCandidate()); | 
 |   27   CHECK(!page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)); | 
 |   28 } | 
 |   29  | 
 |   30  | 
 |   31 HEAP_TEST(CompactionFullAbortedPage) { | 
 |   32   // Test the scenario where we reach OOM during compaction and the whole page | 
 |   33   // is aborted. | 
 |   34  | 
 |   35   // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 
 |   36   // we can reach the state of a half aborted page. | 
 |   37   FLAG_concurrent_sweeping = false; | 
 |   38   FLAG_manual_evacuation_candidates_selection = true; | 
 |   39   CcTest::InitializeVM(); | 
 |   40   Isolate* isolate = CcTest::i_isolate(); | 
 |   41   Heap* heap = isolate->heap(); | 
 |   42   { | 
 |   43     HandleScope scope1(isolate); | 
 |   44     // Fill up the first page since it cannot be evacuated. | 
 |   45     auto first_page_handles = FillUpFirstOldSpacePage(heap); | 
 |   46  | 
 |   47     { | 
 |   48       HandleScope scope2(isolate); | 
 |   49       heap->old_space()->EmptyAllocationInfo(); | 
 |   50       auto second_page_handles = | 
 |   51           CreatePadding(heap, Page::kAllocatableMemory, TENURED); | 
 |   52       Page* to_be_aborted_page = | 
 |   53           Page::FromAddress(second_page_handles.front()->address()); | 
 |   54       to_be_aborted_page->SetFlag( | 
 |   55           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 
 |   56       heap->set_force_oom(true); | 
 |   57       heap->CollectAllGarbage(); | 
 |   58  | 
 |   59       // Check that all handles still point to the same page, i.e., compaction | 
 |   60       // has been aborted on the page. | 
 |   61       for (Handle<FixedArray> object : second_page_handles) { | 
 |   62         CHECK_EQ(to_be_aborted_page, Page::FromAddress(object->address())); | 
 |   63       } | 
 |   64       CheckInvariantsOfAbortedPage(to_be_aborted_page); | 
 |   65     } | 
 |   66   } | 
 |   67 } | 
 |   68  | 
 |   69  | 
 |   70 HEAP_TEST(CompactionPartiallyAbortedPage) { | 
 |   71   // Test the scenario where we reach OOM during compaction and parts of the | 
 |   72   // page have already been migrated to a new one. | 
 |   73  | 
 |   74   // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 
 |   75   // we can reach the state of a half aborted page. | 
 |   76   FLAG_concurrent_sweeping = false; | 
 |   77   FLAG_manual_evacuation_candidates_selection = true; | 
 |   78  | 
 |   79   const int object_size = 128 * KB; | 
 |   80  | 
 |   81   CcTest::InitializeVM(); | 
 |   82   Isolate* isolate = CcTest::i_isolate(); | 
 |   83   Heap* heap = isolate->heap(); | 
 |   84   { | 
 |   85     HandleScope scope1(isolate); | 
 |   86     // Fill up the first page since it cannot be evacuated. | 
 |   87     auto first_page_handles = FillUpFirstOldSpacePage(heap); | 
 |   88  | 
 |   89     { | 
 |   90       HandleScope scope2(isolate); | 
 |   91       // Fill the second page with objects of size {object_size} (last one is | 
 |   92       // properly adjusted). | 
 |   93       heap->old_space()->EmptyAllocationInfo(); | 
 |   94       auto second_page_handles = | 
 |   95           CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); | 
 |   96       // Mark the second page for evacuation. | 
 |   97       Page* to_be_aborted_page = | 
 |   98           Page::FromAddress(second_page_handles.front()->address()); | 
 |   99       to_be_aborted_page->SetFlag( | 
 |  100           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 
 |  101  | 
 |  102       { | 
 |  103         // Add a third page that is filled with {num_objects} objects of size | 
 |  104         // {object_size}. | 
 |  105         HandleScope scope3(isolate); | 
 |  106         heap->old_space()->EmptyAllocationInfo(); | 
 |  107         const int num_objects = 3; | 
 |  108         std::vector<Handle<FixedArray>> third_page_handles = CreatePadding( | 
 |  109             heap, object_size * num_objects, TENURED, object_size); | 
 |  110         Page* third_page = | 
 |  111             Page::FromAddress(third_page_handles.front()->address()); | 
 |  112         heap->set_force_oom(true); | 
 |  113         heap->CollectAllGarbage(); | 
 |  114  | 
 |  115         bool migration_aborted = false; | 
 |  116         for (Handle<FixedArray> object : second_page_handles) { | 
 |  117           // Once compaction has been aborted, all following objects still have | 
 |  118           // to be on the initial page. | 
 |  119           CHECK(!migration_aborted || | 
 |  120                 (Page::FromAddress(object->address()) == to_be_aborted_page)); | 
 |  121           if (Page::FromAddress(object->address()) == to_be_aborted_page) { | 
 |  122             // This object has not been migrated. | 
 |  123             migration_aborted = true; | 
 |  124           } else { | 
 |  125             CHECK_EQ(Page::FromAddress(object->address()), third_page); | 
 |  126           } | 
 |  127         } | 
 |  128         // Check that we actually created a scenario with a partially aborted | 
 |  129         // page. | 
 |  130         CHECK(migration_aborted); | 
 |  131         CheckInvariantsOfAbortedPage(to_be_aborted_page); | 
 |  132       } | 
 |  133     } | 
 |  134   } | 
 |  135 } | 
 |  136  | 
 |  137  | 
 |  138 HEAP_TEST(CompactionPartiallyAbortedPageIntraAbortedPointers) { | 
 |  139   // Test the scenario where we reach OOM during compaction and parts of the | 
 |  140   // page have already been migrated to a new one. Objects on the aborted page | 
 |  141   // are linked together. This test makes sure that intra-aborted page pointers | 
 |  142   // get properly updated. | 
 |  143  | 
 |  144   // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 
 |  145   // we can reach the state of a half aborted page. | 
 |  146   FLAG_concurrent_sweeping = false; | 
 |  147   FLAG_manual_evacuation_candidates_selection = true; | 
 |  148  | 
 |  149   const int object_size = 128 * KB; | 
 |  150  | 
 |  151   CcTest::InitializeVM(); | 
 |  152   Isolate* isolate = CcTest::i_isolate(); | 
 |  153   Heap* heap = isolate->heap(); | 
 |  154   { | 
 |  155     HandleScope scope1(isolate); | 
 |  156     // Fill up the first page since it cannot be evacuated. | 
 |  157     auto first_page_handles = FillUpFirstOldSpacePage(heap); | 
 |  158  | 
 |  159     Page* to_be_aborted_page = nullptr; | 
 |  160     { | 
 |  161       HandleScope temporary_scope(isolate); | 
 |  162       // Fill the second page with objects of size {object_size} (last one is | 
 |  163       // properly adjusted). | 
 |  164       heap->old_space()->EmptyAllocationInfo(); | 
 |  165       const int free_on_second_page = Page::kAllocatableMemory; | 
 |  166       std::vector<Handle<FixedArray>> second_page_handles = | 
 |  167           CreatePadding(heap, free_on_second_page, TENURED, object_size); | 
 |  168       // Mark the second page for evacuation. | 
 |  169       to_be_aborted_page = | 
 |  170           Page::FromAddress(second_page_handles.front()->address()); | 
 |  171       to_be_aborted_page->SetFlag( | 
 |  172           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 
 |  173  | 
 |  174       for (size_t i = second_page_handles.size() - 1; i > 0; i--) { | 
 |  175         second_page_handles[i]->set(0, *second_page_handles[i - 1]); | 
 |  176       } | 
 |  177       first_page_handles.front()->set(0, *second_page_handles.back()); | 
 |  178     } | 
 |  179  | 
 |  180     { | 
 |  181       // Add a third page that is filled with {num_objects} objects of size | 
 |  182       // {object_size}. | 
 |  183       HandleScope scope3(isolate); | 
 |  184       heap->old_space()->EmptyAllocationInfo(); | 
 |  185       const int num_objects = 2; | 
 |  186       int used_memory = object_size * num_objects; | 
 |  187       std::vector<Handle<FixedArray>> third_page_handles = | 
 |  188           CreatePadding(heap, used_memory, TENURED, object_size); | 
 |  189       heap->set_force_oom(true); | 
 |  190       heap->CollectAllGarbage(); | 
 |  191  | 
 |  192       // The following check makes sure that we compacted "some" objects, while | 
 |  193       // leaving others in place. | 
 |  194       bool in_place = true; | 
 |  195       Handle<FixedArray> current = first_page_handles.front(); | 
 |  196       while (current->get(0) != heap->undefined_value()) { | 
 |  197         current = Handle<FixedArray>(FixedArray::cast(current->get(0))); | 
 |  198         CHECK(current->IsFixedArray()); | 
 |  199         if (Page::FromAddress(current->address()) != to_be_aborted_page) { | 
 |  200           in_place = false; | 
 |  201         } | 
 |  202         bool on_aborted_page = | 
 |  203             Page::FromAddress(current->address()) == to_be_aborted_page; | 
 |  204         CHECK((in_place && on_aborted_page) || (!in_place && !on_aborted_page)); | 
 |  205       } | 
 |  206       // Check that we at least migrated one object, as otherwise the test would | 
 |  207       // not trigger. | 
 |  208       CHECK(!in_place); | 
 |  209  | 
 |  210       CheckInvariantsOfAbortedPage(to_be_aborted_page); | 
 |  211       heap->CollectAllGarbage(); | 
 |  212     } | 
 |  213   } | 
 |  214 } | 
 |  215  | 
 |  216  | 
 |  217 HEAP_TEST(CompactionPartiallyAbortedPageWithStoreBufferEntries) { | 
 |  218   // Test the scenario where we reach OOM during compaction and parts of the | 
 |  219   // page have already been migrated to a new one. Objects on the aborted page | 
 |  220   // are linked together and the very first object on the aborted page points | 
 |  221   // into new space. The test verifies that the store buffer entries are | 
 |  222   // properly cleared and rebuilt after aborting a page. Failing to do so can | 
 |  223   // result in other objects being allocated in the free space where their | 
 |  224   // payload looks like a valid new space pointer. | 
 |  225  | 
 |  226   // Disable concurrent sweeping to ensure memory is in an expected state, i.e., | 
 |  227   // we can reach the state of a half aborted page. | 
 |  228   FLAG_concurrent_sweeping = false; | 
 |  229   FLAG_manual_evacuation_candidates_selection = true; | 
 |  230  | 
 |  231   const int object_size = 128 * KB; | 
 |  232  | 
 |  233   CcTest::InitializeVM(); | 
 |  234   Isolate* isolate = CcTest::i_isolate(); | 
 |  235   Heap* heap = isolate->heap(); | 
 |  236   { | 
 |  237     HandleScope scope1(isolate); | 
 |  238     // Fill up the first page since it cannot be evacuated. | 
 |  239     auto first_page_handles = FillUpFirstOldSpacePage(heap); | 
 |  240  | 
 |  241     Page* to_be_aborted_page = nullptr; | 
 |  242     { | 
 |  243       HandleScope temporary_scope(isolate); | 
 |  244       // Fill the second page with objects of size {object_size} (last one is | 
 |  245       // properly adjusted). | 
 |  246       heap->old_space()->EmptyAllocationInfo(); | 
 |  247       auto second_page_handles = | 
 |  248           CreatePadding(heap, Page::kAllocatableMemory, TENURED, object_size); | 
 |  249       // Mark the second page for evacuation. | 
 |  250       to_be_aborted_page = | 
 |  251           Page::FromAddress(second_page_handles.front()->address()); | 
 |  252       to_be_aborted_page->SetFlag( | 
 |  253           MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING); | 
 |  254  | 
 |  255       for (size_t i = second_page_handles.size() - 1; i > 0; i--) { | 
 |  256         second_page_handles[i]->set(0, *second_page_handles[i - 1]); | 
 |  257       } | 
 |  258       first_page_handles.front()->set(0, *second_page_handles.back()); | 
 |  259       Handle<FixedArray> new_space_array = | 
 |  260           isolate->factory()->NewFixedArray(1, NOT_TENURED); | 
 |  261       CHECK(heap->InNewSpace(*new_space_array)); | 
 |  262       second_page_handles.front()->set(1, *new_space_array); | 
 |  263     } | 
 |  264  | 
 |  265     { | 
 |  266       // Add a third page that is filled with {num_objects} objects of size | 
 |  267       // {object_size}. | 
 |  268       HandleScope scope3(isolate); | 
 |  269       heap->old_space()->EmptyAllocationInfo(); | 
 |  270       const int num_objects = 2; | 
 |  271       int used_memory = object_size * num_objects; | 
 |  272       std::vector<Handle<FixedArray>> third_page_handles = | 
 |  273           CreatePadding(heap, used_memory, TENURED, object_size); | 
 |  274       heap->set_force_oom(true); | 
 |  275       heap->CollectAllGarbage(); | 
 |  276  | 
 |  277       // The following check makes sure that we compacted "some" objects, while | 
 |  278       // leaving others in place. | 
 |  279       bool in_place = true; | 
 |  280       Handle<FixedArray> current = first_page_handles.front(); | 
 |  281       while (current->get(0) != heap->undefined_value()) { | 
 |  282         current = Handle<FixedArray>(FixedArray::cast(current->get(0))); | 
 |  283         CHECK(!heap->InNewSpace(*current)); | 
 |  284         CHECK(current->IsFixedArray()); | 
 |  285         if (Page::FromAddress(current->address()) != to_be_aborted_page) { | 
 |  286           in_place = false; | 
 |  287         } | 
 |  288         bool on_aborted_page = | 
 |  289             Page::FromAddress(current->address()) == to_be_aborted_page; | 
 |  290         CHECK((in_place && on_aborted_page) || (!in_place && !on_aborted_page)); | 
 |  291       } | 
 |  292       // Check that we at least migrated one object, as otherwise the test would | 
 |  293       // not trigger. | 
 |  294       CHECK(!in_place); | 
 |  295  | 
 |  296       CheckInvariantsOfAbortedPage(to_be_aborted_page); | 
 |  297  | 
 |  298       // Allocate a new object in new space. | 
 |  299       Handle<FixedArray> holder = | 
 |  300           isolate->factory()->NewFixedArray(10, NOT_TENURED); | 
 |  301       // Create a broken address that looks like a tagged pointer to a new space | 
 |  302       // object. | 
 |  303       Address broken_address = holder->address() + 2 * kPointerSize + 1; | 
 |  304       // Convert it to a vector to create a string from it. | 
 |  305       Vector<const uint8_t> string_to_broken_addresss( | 
 |  306           reinterpret_cast<const uint8_t*>(&broken_address), 8); | 
 |  307  | 
 |  308       Handle<String> string; | 
 |  309       do { | 
 |  310         // We know that the interesting slot will be on the aborted page and | 
 |  311         // hence we allocate until we get our string on the aborted page. | 
 |  312         // We used slot 1 in the fixed size array which corresponds to the | 
 |  313         // the first word in the string. Since the first object definitely | 
 |  314         // migrated we can just allocate until we hit the aborted page. | 
 |  315         string = isolate->factory() | 
 |  316                      ->NewStringFromOneByte(string_to_broken_addresss, TENURED) | 
 |  317                      .ToHandleChecked(); | 
 |  318       } while (Page::FromAddress(string->address()) != to_be_aborted_page); | 
 |  319  | 
 |  320       // If store buffer entries are not properly filtered/reset for aborted | 
 |  321       // pages we have now a broken address at an object slot in old space and | 
 |  322       // the following scavenge will crash. | 
 |  323       heap->CollectGarbage(NEW_SPACE); | 
 |  324     } | 
 |  325   } | 
 |  326 } | 
 |  327  | 
 |  328 }  // namespace internal | 
 |  329 }  // namespace v8 | 
| OLD | NEW |