Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(278)

Side by Side Diff: test/cctest/heap/test-heap.cc

Issue 2278653003: Reland of "[heap] Switch to 500k pages" (Closed)
Patch Set: Rebase Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « test/cctest/heap/heap-utils.cc ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 2200 matching lines...) Expand 10 before | Expand all | Expand 10 after
2211 Address top = *top_addr; 2211 Address top = *top_addr;
2212 // Now force the remaining allocation onto the free list. 2212 // Now force the remaining allocation onto the free list.
2213 CcTest::heap()->old_space()->EmptyAllocationInfo(); 2213 CcTest::heap()->old_space()->EmptyAllocationInfo();
2214 return top; 2214 return top;
2215 } 2215 }
2216 2216
2217 2217
2218 // Test the case where allocation must be done from the free list, so filler 2218 // Test the case where allocation must be done from the free list, so filler
2219 // may precede or follow the object. 2219 // may precede or follow the object.
2220 TEST(TestAlignedOverAllocation) { 2220 TEST(TestAlignedOverAllocation) {
2221 Heap* heap = CcTest::heap();
2222 // Test checks for fillers before and behind objects and requires a fresh
2223 // page and empty free list.
2224 heap::AbandonCurrentlyFreeMemory(heap->old_space());
2225 // Allocate a dummy object to properly set up the linear allocation info.
2226 AllocationResult dummy =
2227 heap->old_space()->AllocateRawUnaligned(kPointerSize);
2228 CHECK(!dummy.IsRetry());
2229 heap->CreateFillerObjectAt(
2230 HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize,
2231 ClearRecordedSlots::kNo);
2232
2221 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. 2233 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones.
2222 const intptr_t double_misalignment = kDoubleSize - kPointerSize; 2234 const intptr_t double_misalignment = kDoubleSize - kPointerSize;
2223 Address start; 2235 Address start;
2224 HeapObject* obj; 2236 HeapObject* obj;
2225 HeapObject* filler1; 2237 HeapObject* filler1;
2226 HeapObject* filler2; 2238 HeapObject* filler2;
2227 if (double_misalignment) { 2239 if (double_misalignment) {
2228 start = AlignOldSpace(kDoubleAligned, 0); 2240 start = AlignOldSpace(kDoubleAligned, 0);
2229 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); 2241 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned);
2230 // The object is aligned, and a filler object is created after. 2242 // The object is aligned, and a filler object is created after.
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
2352 intptr_t available = new_space->Capacity() - new_space->Size(); 2364 intptr_t available = new_space->Capacity() - new_space->Size();
2353 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; 2365 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
2354 for (intptr_t i = 0; i < number_of_fillers; i++) { 2366 for (intptr_t i = 0; i < number_of_fillers; i++) {
2355 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); 2367 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED)));
2356 } 2368 }
2357 } 2369 }
2358 2370
2359 2371
2360 TEST(GrowAndShrinkNewSpace) { 2372 TEST(GrowAndShrinkNewSpace) {
2361 CcTest::InitializeVM(); 2373 CcTest::InitializeVM();
2374 // Avoid shrinking new space in GC epilogue. This can happen if allocation
2375 // throughput samples have been taken while executing the benchmark.
2376 i::FLAG_predictable = true;
2362 Heap* heap = CcTest::heap(); 2377 Heap* heap = CcTest::heap();
2363 NewSpace* new_space = heap->new_space(); 2378 NewSpace* new_space = heap->new_space();
2364 2379
2365 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { 2380 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) {
2366 return; 2381 return;
2367 } 2382 }
2368 2383
2369 // Explicitly growing should double the space capacity. 2384 // Explicitly growing should double the space capacity.
2370 intptr_t old_capacity, new_capacity; 2385 intptr_t old_capacity, new_capacity;
2371 old_capacity = new_space->TotalCapacity(); 2386 old_capacity = new_space->TotalCapacity();
(...skipping 1222 matching lines...) Expand 10 before | Expand all | Expand 10 after
3594 // memory is distributed. Since this is non-deterministic because of 3609 // memory is distributed. Since this is non-deterministic because of
3595 // concurrent sweeping, we disable it for this test. 3610 // concurrent sweeping, we disable it for this test.
3596 i::FLAG_parallel_compaction = false; 3611 i::FLAG_parallel_compaction = false;
3597 // Concurrent sweeping adds non determinism, depending on when memory is 3612 // Concurrent sweeping adds non determinism, depending on when memory is
3598 // available for further reuse. 3613 // available for further reuse.
3599 i::FLAG_concurrent_sweeping = false; 3614 i::FLAG_concurrent_sweeping = false;
3600 // Fast evacuation of pages may result in a different page count in old space. 3615 // Fast evacuation of pages may result in a different page count in old space.
3601 i::FLAG_page_promotion = false; 3616 i::FLAG_page_promotion = false;
3602 CcTest::InitializeVM(); 3617 CcTest::InitializeVM();
3603 Isolate* isolate = CcTest::i_isolate(); 3618 Isolate* isolate = CcTest::i_isolate();
3619 // If there's snapshot available, we don't know whether 20 small arrays will
3620 // fit on the initial pages.
3621 if (!isolate->snapshot_available()) return;
3604 Factory* factory = isolate->factory(); 3622 Factory* factory = isolate->factory();
3605 Heap* heap = isolate->heap(); 3623 Heap* heap = isolate->heap();
3624
3606 v8::HandleScope scope(CcTest::isolate()); 3625 v8::HandleScope scope(CcTest::isolate());
3607 static const int number_of_test_pages = 20; 3626 static const int number_of_test_pages = 20;
3608 3627
3609 // Prepare many pages with low live-bytes count. 3628 // Prepare many pages with low live-bytes count.
3610 PagedSpace* old_space = heap->old_space(); 3629 PagedSpace* old_space = heap->old_space();
3611 const int initial_page_count = old_space->CountTotalPages(); 3630 const int initial_page_count = old_space->CountTotalPages();
3612 const int overall_page_count = number_of_test_pages + initial_page_count; 3631 const int overall_page_count = number_of_test_pages + initial_page_count;
3613 for (int i = 0; i < number_of_test_pages; i++) { 3632 for (int i = 0; i < number_of_test_pages; i++) {
3614 AlwaysAllocateScope always_allocate(isolate); 3633 AlwaysAllocateScope always_allocate(isolate);
3615 heap::SimulateFullSpace(old_space); 3634 heap::SimulateFullSpace(old_space);
3616 factory->NewFixedArray(1, TENURED); 3635 factory->NewFixedArray(1, TENURED);
3617 } 3636 }
3618 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); 3637 CHECK_EQ(overall_page_count, old_space->CountTotalPages());
3619 3638
3620 // Triggering one GC will cause a lot of garbage to be discovered but 3639 // Triggering one GC will cause a lot of garbage to be discovered but
3621 // even spread across all allocated pages. 3640 // even spread across all allocated pages.
3622 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3641 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3623 "triggered for preparation"); 3642 "triggered for preparation");
3624 CHECK_GE(overall_page_count, old_space->CountTotalPages()); 3643 CHECK_GE(overall_page_count, old_space->CountTotalPages());
3625 3644
3626 // Triggering subsequent GCs should cause at least half of the pages 3645 // Triggering subsequent GCs should cause at least half of the pages
3627 // to be released to the OS after at most two cycles. 3646 // to be released to the OS after at most two cycles.
3628 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3647 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3629 "triggered by test 1"); 3648 "triggered by test 1");
3630 CHECK_GE(overall_page_count, old_space->CountTotalPages()); 3649 CHECK_GE(overall_page_count, old_space->CountTotalPages());
3631 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, 3650 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask,
3632 "triggered by test 2"); 3651 "triggered by test 2");
3633 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); 3652 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2);
3634 3653
3635 // Triggering a last-resort GC should cause all pages to be released to the
3636 // OS so that other processes can seize the memory. If we get a failure here
3637 // where there are 2 pages left instead of 1, then we should increase the
3638 // size of the first page a little in SizeOfFirstPage in spaces.cc. The
3639 // first page should be small in order to reduce memory used when the VM
3640 // boots, but if the 20 small arrays don't fit on the first page then that's
3641 // an indication that it is too small.
3642 heap->CollectAllAvailableGarbage("triggered really hard"); 3654 heap->CollectAllAvailableGarbage("triggered really hard");
3655 // Triggering a last-resort GC should release all additional pages.
3643 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); 3656 CHECK_EQ(initial_page_count, old_space->CountTotalPages());
3644 } 3657 }
3645 3658
3646 static int forced_gc_counter = 0; 3659 static int forced_gc_counter = 0;
3647 3660
3648 void MockUseCounterCallback(v8::Isolate* isolate, 3661 void MockUseCounterCallback(v8::Isolate* isolate,
3649 v8::Isolate::UseCounterFeature feature) { 3662 v8::Isolate::UseCounterFeature feature) {
3650 isolate->GetCurrentContext(); 3663 isolate->GetCurrentContext();
3651 if (feature == v8::Isolate::kForcedGC) { 3664 if (feature == v8::Isolate::kForcedGC) {
3652 forced_gc_counter++; 3665 forced_gc_counter++;
(...skipping 3426 matching lines...) Expand 10 before | Expand all | Expand 10 after
7079 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); 7092 chunk, chunk->area_end() - kPointerSize, chunk->area_end());
7080 slots[chunk->area_end() - kPointerSize] = false; 7093 slots[chunk->area_end() - kPointerSize] = false;
7081 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { 7094 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) {
7082 CHECK(slots[addr]); 7095 CHECK(slots[addr]);
7083 return KEEP_SLOT; 7096 return KEEP_SLOT;
7084 }); 7097 });
7085 } 7098 }
7086 7099
7087 } // namespace internal 7100 } // namespace internal
7088 } // namespace v8 7101 } // namespace v8
OLDNEW
« no previous file with comments | « test/cctest/heap/heap-utils.cc ('k') | test/cctest/heap/test-spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698