Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(25)

Side by Side Diff: test/cctest/heap/test-heap.cc

Issue 2232653003: Reland of "[heap] Switch to 500k pages" (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Switch back to using the high water mark. Also: dont sweep immortal immovable pages Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2210 matching lines...) Expand 10 before | Expand all | Expand 10 after
2221 Address top = *top_addr; 2221 Address top = *top_addr;
2222 // Now force the remaining allocation onto the free list. 2222 // Now force the remaining allocation onto the free list.
2223 CcTest::heap()->old_space()->EmptyAllocationInfo(); 2223 CcTest::heap()->old_space()->EmptyAllocationInfo();
2224 return top; 2224 return top;
2225 } 2225 }
2226 2226
2227 2227
2228 // Test the case where allocation must be done from the free list, so filler 2228 // Test the case where allocation must be done from the free list, so filler
2229 // may precede or follow the object. 2229 // may precede or follow the object.
2230 TEST(TestAlignedOverAllocation) { 2230 TEST(TestAlignedOverAllocation) {
2231 Heap* heap = CcTest::heap();
2232 // Test checks for fillers before and behind objects and requires a fresh
2233 // page and empty free list.
2234 heap::AbandonCurrentlyFreeMemory(heap->old_space());
2235 // Allocate a dummy object to properly set up the linear allocation info.
2236 AllocationResult dummy =
2237 heap->old_space()->AllocateRawUnaligned(kPointerSize);
2238 CHECK(!dummy.IsRetry());
2239 heap->CreateFillerObjectAt(
2240 HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
2241 ClearRecordedSlots::kNo);
2242
2231 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. 2243 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
2232 const intptr_t double_misalignment = kDoubleSize - kPointerSize; 2244 const intptr_t double_misalignment = kDoubleSize - kPointerSize;
2233 Address start; 2245 Address start;
2234 HeapObject* obj; 2246 HeapObject* obj;
2235 HeapObject* filler1; 2247 HeapObject* filler1;
2236 HeapObject* filler2; 2248 HeapObject* filler2;
2237 if (double_misalignment) { 2249 if (double_misalignment) {
2238 start = AlignOldSpace(kDoubleAligned, 0); 2250 start = AlignOldSpace(kDoubleAligned, 0);
2239 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); 2251 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
2240 // The object is aligned, and a filler object is created after. 2252 // The object is aligned, and a filler object is created after.
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
2362 intptr_t available = new_space->Capacity() - new_space->Size(); 2374 intptr_t available = new_space->Capacity() - new_space->Size();
2363 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; 2375 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
2364 for (intptr_t i = 0; i < number_of_fillers; i++) { 2376 for (intptr_t i = 0; i < number_of_fillers; i++) {
2365 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); 2377 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED)));
2366 } 2378 }
2367 } 2379 }
2368 2380
2369 2381
2370 TEST(GrowAndShrinkNewSpace) { 2382 TEST(GrowAndShrinkNewSpace) {
2371 CcTest::InitializeVM(); 2383 CcTest::InitializeVM();
2384 // Avoid shrinking new space in GC epilogue. This can happen if allocation
2385 // throughput samples have been taken while executing the benchmark.
2386 i::FLAG_predictable = true;
2372 Heap* heap = CcTest::heap(); 2387 Heap* heap = CcTest::heap();
2373 NewSpace* new_space = heap->new_space(); 2388 NewSpace* new_space = heap->new_space();
2374 2389
2375 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { 2390 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
2376 return; 2391 return;
2377 } 2392 }
2378 2393
2379 // Explicitly growing should double the space capacity. 2394 // Explicitly growing should double the space capacity.
2380 intptr_t old_capacity, new_capacity; 2395 intptr_t old_capacity, new_capacity;
2381 old_capacity = new_space->TotalCapacity(); 2396 old_capacity = new_space->TotalCapacity();
(...skipping 1222 matching lines...) Expand 10 before | Expand all | Expand 10 after
3604 // memory is distributed. Since this is non-deterministic because of 3619 // memory is distributed. Since this is non-deterministic because of
3605 // concurrent sweeping, we disable it for this test. 3620 // concurrent sweeping, we disable it for this test.
3606 i::FLAG_parallel_compaction = false; 3621 i::FLAG_parallel_compaction = false;
3607 // Concurrent sweeping adds non determinism, depending on when memory is 3622 // Concurrent sweeping adds non determinism, depending on when memory is
3608 // available for further reuse. 3623 // available for further reuse.
3609 i::FLAG_concurrent_sweeping = false; 3624 i::FLAG_concurrent_sweeping = false;
3610 // Fast evacuation of pages may result in a different page count in old space. 3625 // Fast evacuation of pages may result in a different page count in old space.
3611 i::FLAG_page_promotion = false; 3626 i::FLAG_page_promotion = false;
3612 CcTest::InitializeVM(); 3627 CcTest::InitializeVM();
3613 Isolate* isolate = CcTest::i_isolate(); 3628 Isolate* isolate = CcTest::i_isolate();
3629 // If there's snapshot available, we don't know whether 20 small arrays will
3630 // fit on the initial pages.
3631 if (!isolate->snapshot_available()) return;
3614 Factory* factory = isolate->factory(); 3632 Factory* factory = isolate->factory();
3615 Heap* heap = isolate->heap(); 3633 Heap* heap = isolate->heap();
3634
3616 v8::HandleScope scope(CcTest::isolate()); 3635 v8::HandleScope scope(CcTest::isolate());
3617 static const int number_of_test_pages = 20; 3636 static const int number_of_test_pages = 20;
3618 3637
3619 // Prepare many pages with low live-bytes count. 3638 // Prepare many pages with low live-bytes count.
3620 PagedSpace* old_space = heap->old_space(); 3639 PagedSpace* old_space = heap->old_space();
3621 const int initial_page_count = old_space->CountTotalPages(); 3640 const int initial_page_count = old_space->CountTotalPages();
3622 const int overall_page_count = number_of_test_pages + initial_page_count; 3641 const int overall_page_count = number_of_test_pages + initial_page_count;
3623 for (int i = 0; i < number_of_test_pages; i++) { 3642 for (int i = 0; i < number_of_test_pages; i++) {
3624 AlwaysAllocateScope always_allocate(isolate); 3643 AlwaysAllocateScope always_allocate(isolate);
3625 heap::SimulateFullSpace(old_space); 3644 heap::SimulateFullSpace(old_space);
3626 factory->NewFixedArray(1, TENURED); 3645 factory->NewFixedArray(1, TENURED);
3627 } 3646 }
3628 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); 3647 CHECK_EQ(overall_page_count, old_space->CountTotalPages());
3629 3648
3630 // Triggering one GC will cause a lot of garbage to be discovered but 3649 // Triggering one GC will cause a lot of garbage to be discovered but
3631 // even spread across all allocated pages. 3650 // even spread across all allocated pages.
3632 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3651 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3633 "triggered for preparation"); 3652 "triggered for preparation");
3634 CHECK_GE(overall_page_count, old_space->CountTotalPages()); 3653 CHECK_GE(overall_page_count, old_space->CountTotalPages());
3635 3654
3636 // Triggering subsequent GCs should cause at least half of the pages 3655 // Triggering subsequent GCs should cause at least half of the pages
3637 // to be released to the OS after at most two cycles. 3656 // to be released to the OS after at most two cycles.
3638 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3657 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3639 "triggered by test 1"); 3658 "triggered by test 1");
3640 CHECK_GE(overall_page_count, old_space->CountTotalPages()); 3659 CHECK_GE(overall_page_count, old_space->CountTotalPages());
3641 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3660 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3642 "triggered by test 2"); 3661 "triggered by test 2");
3643 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); 3662 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
3644 3663
3645 // Triggering a last-resort GC should cause all pages to be released to the
3646 // OS so that other processes can seize the memory. If we get a failure here
3647 // where there are 2 pages left instead of 1, then we should increase the
3648 // size of the first page a little in SizeOfFirstPage in spaces.cc. The
3649 // first page should be small in order to reduce memory used when the VM
3650 // boots, but if the 20 small arrays don't fit on the first page then that's
3651 // an indication that it is too small.
3652 heap->CollectAllAvailableGarbage("triggered really hard"); 3664 heap->CollectAllAvailableGarbage("triggered really hard");
3665 // Triggering a last-resort GC should release all additional pages.
3653 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); 3666 CHECK_EQ(initial_page_count, old_space->CountTotalPages());
3654 } 3667 }
3655 3668
3656 static int forced_gc_counter = 0; 3669 static int forced_gc_counter = 0;
3657 3670
3658 void MockUseCounterCallback(v8::Isolate* isolate, 3671 void MockUseCounterCallback(v8::Isolate* isolate,
3659 v8::Isolate::UseCounterFeature feature) { 3672 v8::Isolate::UseCounterFeature feature) {
3660 isolate->GetCurrentContext(); 3673 isolate->GetCurrentContext();
3661 if (feature == v8::Isolate::kForcedGC) { 3674 if (feature == v8::Isolate::kForcedGC) {
3662 forced_gc_counter++; 3675 forced_gc_counter++;
(...skipping 3422 matching lines...) Expand 10 before | Expand all | Expand 10 after
7085 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); 7098 chunk, chunk->area_end() - kPointerSize, chunk->area_end());
7086 slots[chunk->area_end() - kPointerSize] = false; 7099 slots[chunk->area_end() - kPointerSize] = false;
7087 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { 7100 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
7088 CHECK(slots[addr]); 7101 CHECK(slots[addr]);
7089 return KEEP_SLOT; 7102 return KEEP_SLOT;
7090 }); 7103 });
7091 } 7104 }
7092 7105
7093 } // namespace internal 7106 } // namespace internal
7094 } // namespace v8 7107 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698