| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 2200 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2211 Address top = *top_addr; | 2211 Address top = *top_addr; |
| 2212 // Now force the remaining allocation onto the free list. | 2212 // Now force the remaining allocation onto the free list. |
| 2213 CcTest::heap()->old_space()->EmptyAllocationInfo(); | 2213 CcTest::heap()->old_space()->EmptyAllocationInfo(); |
| 2214 return top; | 2214 return top; |
| 2215 } | 2215 } |
| 2216 | 2216 |
| 2217 | 2217 |
| 2218 // Test the case where allocation must be done from the free list, so filler | 2218 // Test the case where allocation must be done from the free list, so filler |
| 2219 // may precede or follow the object. | 2219 // may precede or follow the object. |
| 2220 TEST(TestAlignedOverAllocation) { | 2220 TEST(TestAlignedOverAllocation) { |
| 2221 Heap* heap = CcTest::heap(); | |
| 2222 // Test checks for fillers before and behind objects and requires a fresh | |
| 2223 // page and empty free list. | |
| 2224 heap::AbandonCurrentlyFreeMemory(heap->old_space()); | |
| 2225 // Allocate a dummy object to properly set up the linear allocation info. | |
| 2226 AllocationResult dummy = | |
| 2227 heap->old_space()->AllocateRawUnaligned(kPointerSize); | |
| 2228 CHECK(!dummy.IsRetry()); | |
| 2229 heap->CreateFillerObjectAt( | |
| 2230 HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize, | |
| 2231 ClearRecordedSlots::kNo); | |
| 2232 | |
| 2233 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. | 2221 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. |
| 2234 const intptr_t double_misalignment = kDoubleSize - kPointerSize; | 2222 const intptr_t double_misalignment = kDoubleSize - kPointerSize; |
| 2235 Address start; | 2223 Address start; |
| 2236 HeapObject* obj; | 2224 HeapObject* obj; |
| 2237 HeapObject* filler1; | 2225 HeapObject* filler1; |
| 2238 HeapObject* filler2; | 2226 HeapObject* filler2; |
| 2239 if (double_misalignment) { | 2227 if (double_misalignment) { |
| 2240 start = AlignOldSpace(kDoubleAligned, 0); | 2228 start = AlignOldSpace(kDoubleAligned, 0); |
| 2241 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); | 2229 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); |
| 2242 // The object is aligned, and a filler object is created after. | 2230 // The object is aligned, and a filler object is created after. |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2364 intptr_t available = new_space->Capacity() - new_space->Size(); | 2352 intptr_t available = new_space->Capacity() - new_space->Size(); |
| 2365 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; | 2353 intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1; |
| 2366 for (intptr_t i = 0; i < number_of_fillers; i++) { | 2354 for (intptr_t i = 0; i < number_of_fillers; i++) { |
| 2367 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); | 2355 CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED))); |
| 2368 } | 2356 } |
| 2369 } | 2357 } |
| 2370 | 2358 |
| 2371 | 2359 |
| 2372 TEST(GrowAndShrinkNewSpace) { | 2360 TEST(GrowAndShrinkNewSpace) { |
| 2373 CcTest::InitializeVM(); | 2361 CcTest::InitializeVM(); |
| 2374 // Avoid shrinking new space in GC epilogue. This can happen if allocation | |
| 2375 // throughput samples have been taken while executing the benchmark. | |
| 2376 i::FLAG_predictable = true; | |
| 2377 Heap* heap = CcTest::heap(); | 2362 Heap* heap = CcTest::heap(); |
| 2378 NewSpace* new_space = heap->new_space(); | 2363 NewSpace* new_space = heap->new_space(); |
| 2379 | 2364 |
| 2380 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { | 2365 if (heap->MaxSemiSpaceSize() == heap->InitialSemiSpaceSize()) { |
| 2381 return; | 2366 return; |
| 2382 } | 2367 } |
| 2383 | 2368 |
| 2384 // Explicitly growing should double the space capacity. | 2369 // Explicitly growing should double the space capacity. |
| 2385 intptr_t old_capacity, new_capacity; | 2370 intptr_t old_capacity, new_capacity; |
| 2386 old_capacity = new_space->TotalCapacity(); | 2371 old_capacity = new_space->TotalCapacity(); |
| (...skipping 1222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3609 // memory is distributed. Since this is non-deterministic because of | 3594 // memory is distributed. Since this is non-deterministic because of |
| 3610 // concurrent sweeping, we disable it for this test. | 3595 // concurrent sweeping, we disable it for this test. |
| 3611 i::FLAG_parallel_compaction = false; | 3596 i::FLAG_parallel_compaction = false; |
| 3612 // Concurrent sweeping adds non determinism, depending on when memory is | 3597 // Concurrent sweeping adds non determinism, depending on when memory is |
| 3613 // available for further reuse. | 3598 // available for further reuse. |
| 3614 i::FLAG_concurrent_sweeping = false; | 3599 i::FLAG_concurrent_sweeping = false; |
| 3615 // Fast evacuation of pages may result in a different page count in old space. | 3600 // Fast evacuation of pages may result in a different page count in old space. |
| 3616 i::FLAG_page_promotion = false; | 3601 i::FLAG_page_promotion = false; |
| 3617 CcTest::InitializeVM(); | 3602 CcTest::InitializeVM(); |
| 3618 Isolate* isolate = CcTest::i_isolate(); | 3603 Isolate* isolate = CcTest::i_isolate(); |
| 3619 // If there's snapshot available, we don't know whether 20 small arrays will | |
| 3620 // fit on the initial pages. | |
| 3621 if (!isolate->snapshot_available()) return; | |
| 3622 Factory* factory = isolate->factory(); | 3604 Factory* factory = isolate->factory(); |
| 3623 Heap* heap = isolate->heap(); | 3605 Heap* heap = isolate->heap(); |
| 3624 | |
| 3625 v8::HandleScope scope(CcTest::isolate()); | 3606 v8::HandleScope scope(CcTest::isolate()); |
| 3626 static const int number_of_test_pages = 20; | 3607 static const int number_of_test_pages = 20; |
| 3627 | 3608 |
| 3628 // Prepare many pages with low live-bytes count. | 3609 // Prepare many pages with low live-bytes count. |
| 3629 PagedSpace* old_space = heap->old_space(); | 3610 PagedSpace* old_space = heap->old_space(); |
| 3630 const int initial_page_count = old_space->CountTotalPages(); | 3611 const int initial_page_count = old_space->CountTotalPages(); |
| 3631 const int overall_page_count = number_of_test_pages + initial_page_count; | 3612 const int overall_page_count = number_of_test_pages + initial_page_count; |
| 3632 for (int i = 0; i < number_of_test_pages; i++) { | 3613 for (int i = 0; i < number_of_test_pages; i++) { |
| 3633 AlwaysAllocateScope always_allocate(isolate); | 3614 AlwaysAllocateScope always_allocate(isolate); |
| 3634 heap::SimulateFullSpace(old_space); | 3615 heap::SimulateFullSpace(old_space); |
| 3635 factory->NewFixedArray(1, TENURED); | 3616 factory->NewFixedArray(1, TENURED); |
| 3636 } | 3617 } |
| 3637 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); | 3618 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); |
| 3638 | 3619 |
| 3639 // Triggering one GC will cause a lot of garbage to be discovered but | 3620 // Triggering one GC will cause a lot of garbage to be discovered but |
| 3640 // even spread across all allocated pages. | 3621 // even spread across all allocated pages. |
| 3641 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3622 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
| 3642 "triggered for preparation"); | 3623 "triggered for preparation"); |
| 3643 CHECK_GE(overall_page_count, old_space->CountTotalPages()); | 3624 CHECK_GE(overall_page_count, old_space->CountTotalPages()); |
| 3644 | 3625 |
| 3645 // Triggering subsequent GCs should cause at least half of the pages | 3626 // Triggering subsequent GCs should cause at least half of the pages |
| 3646 // to be released to the OS after at most two cycles. | 3627 // to be released to the OS after at most two cycles. |
| 3647 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3628 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
| 3648 "triggered by test 1"); | 3629 "triggered by test 1"); |
| 3649 CHECK_GE(overall_page_count, old_space->CountTotalPages()); | 3630 CHECK_GE(overall_page_count, old_space->CountTotalPages()); |
| 3650 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3631 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
| 3651 "triggered by test 2"); | 3632 "triggered by test 2"); |
| 3652 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); | 3633 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); |
| 3653 | 3634 |
| 3635 // Triggering a last-resort GC should cause all pages to be released to the |
| 3636 // OS so that other processes can seize the memory. If we get a failure here |
| 3637 // where there are 2 pages left instead of 1, then we should increase the |
| 3638 // size of the first page a little in SizeOfFirstPage in spaces.cc. The |
| 3639 // first page should be small in order to reduce memory used when the VM |
| 3640 // boots, but if the 20 small arrays don't fit on the first page then that's |
| 3641 // an indication that it is too small. |
| 3654 heap->CollectAllAvailableGarbage("triggered really hard"); | 3642 heap->CollectAllAvailableGarbage("triggered really hard"); |
| 3655 // Triggering a last-resort GC should release all additional pages. | |
| 3656 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); | 3643 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); |
| 3657 } | 3644 } |
| 3658 | 3645 |
| 3659 static int forced_gc_counter = 0; | 3646 static int forced_gc_counter = 0; |
| 3660 | 3647 |
| 3661 void MockUseCounterCallback(v8::Isolate* isolate, | 3648 void MockUseCounterCallback(v8::Isolate* isolate, |
| 3662 v8::Isolate::UseCounterFeature feature) { | 3649 v8::Isolate::UseCounterFeature feature) { |
| 3663 isolate->GetCurrentContext(); | 3650 isolate->GetCurrentContext(); |
| 3664 if (feature == v8::Isolate::kForcedGC) { | 3651 if (feature == v8::Isolate::kForcedGC) { |
| 3665 forced_gc_counter++; | 3652 forced_gc_counter++; |
| (...skipping 3426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 7092 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); | 7079 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); |
| 7093 slots[chunk->area_end() - kPointerSize] = false; | 7080 slots[chunk->area_end() - kPointerSize] = false; |
| 7094 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { | 7081 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { |
| 7095 CHECK(slots[addr]); | 7082 CHECK(slots[addr]); |
| 7096 return KEEP_SLOT; | 7083 return KEEP_SLOT; |
| 7097 }); | 7084 }); |
| 7098 } | 7085 } |
| 7099 | 7086 |
| 7100 } // namespace internal | 7087 } // namespace internal |
| 7101 } // namespace v8 | 7088 } // namespace v8 |
| OLD | NEW |