| Index: test/cctest/heap/test-heap.cc
|
| diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc
|
| index 3837397ac1399446b11c9b2fbaed14d9fdeeb40d..f371a8c5b34f851d326d6661c461b96a33b170a8 100644
|
| --- a/test/cctest/heap/test-heap.cc
|
| +++ b/test/cctest/heap/test-heap.cc
|
| @@ -2228,6 +2228,18 @@ static Address AlignOldSpace(AllocationAlignment alignment, int offset) {
|
| // Test the case where allocation must be done from the free list, so filler
|
| // may precede or follow the object.
|
| TEST(TestAlignedOverAllocation) {
|
| + Heap* heap = CcTest::heap();
|
| + // Test checks for fillers before and behind objects and requires a fresh
|
| + // page and empty free list.
|
| + heap::AbandonCurrentlyFreeMemory(heap->old_space());
|
| + // Allocate a dummy object to properly set up the linear allocation info.
|
| + AllocationResult dummy =
|
| + heap->old_space()->AllocateRawUnaligned(kPointerSize);
|
| + CHECK(!dummy.IsRetry());
|
| + heap->CreateFillerObjectAt(
|
| + HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
|
| + ClearRecordedSlots::kNo);
|
| +
|
| // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
|
| const intptr_t double_misalignment = kDoubleSize - kPointerSize;
|
| Address start;
|
| @@ -3613,6 +3625,7 @@ TEST(ReleaseOverReservedPages) {
|
| Isolate* isolate = CcTest::i_isolate();
|
| Factory* factory = isolate->factory();
|
| Heap* heap = isolate->heap();
|
| +
|
| v8::HandleScope scope(CcTest::isolate());
|
| static const int number_of_test_pages = 20;
|
|
|
| @@ -3642,15 +3655,20 @@ TEST(ReleaseOverReservedPages) {
|
| "triggered by test 2");
|
| CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
|
|
|
| - // Triggering a last-resort GC should cause all pages to be released to the
|
| - // OS so that other processes can seize the memory. If we get a failure here
|
| - // where there are 2 pages left instead of 1, then we should increase the
|
| - // size of the first page a little in SizeOfFirstPage in spaces.cc. The
|
| - // first page should be small in order to reduce memory used when the VM
|
| - // boots, but if the 20 small arrays don't fit on the first page then that's
|
| - // an indication that it is too small.
|
| heap->CollectAllAvailableGarbage("triggered really hard");
|
| - CHECK_EQ(initial_page_count, old_space->CountTotalPages());
|
| + if (isolate->snapshot_available()) {
|
| + // Triggering a last-resort GC should cause all pages to be released to the
|
| + // OS. If we get a failure here, adjust Snapshot::SizeOfSnapshot or
|
| + // PagedSpace::AreaSizeDuringDeserialization. The initialized heap after
|
| + // deserialization should be small, but still large enough to hold some
|
| + // small arrays.
|
| + CHECK_EQ(initial_page_count, old_space->CountTotalPages());
|
| + } else {
|
| + // Without a snapshot we cannot guarantee that deserialization will leave
|
| + // enough space for the fixed arrays to fit on the first page. However,
|
| + // allocating the small arrays should result in at most one more page.
|
| + CHECK_GE(initial_page_count + 1, old_space->CountTotalPages());
|
| + }
|
| }
|
|
|
| static int forced_gc_counter = 0;
|
|
|