OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 2210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2221 Address top = *top_addr; | 2221 Address top = *top_addr; |
2222 // Now force the remaining allocation onto the free list. | 2222 // Now force the remaining allocation onto the free list. |
2223 CcTest::heap()->old_space()->EmptyAllocationInfo(); | 2223 CcTest::heap()->old_space()->EmptyAllocationInfo(); |
2224 return top; | 2224 return top; |
2225 } | 2225 } |
2226 | 2226 |
2227 | 2227 |
2228 // Test the case where allocation must be done from the free list, so filler | 2228 // Test the case where allocation must be done from the free list, so filler |
2229 // may precede or follow the object. | 2229 // may precede or follow the object. |
2230 TEST(TestAlignedOverAllocation) { | 2230 TEST(TestAlignedOverAllocation) { |
2231 Heap* heap = CcTest::heap(); | |
2232 // Test checks for fillers before and behind objects and requires a fresh | |
2233 // page and empty free list. | |
2234 heap::AbandonCurrentlyFreeMemory(heap->old_space()); | |
2235 // Allocate a dummy object to properly set up the linear allocation info. | |
2236 AllocationResult dummy = | |
2237 heap->old_space()->AllocateRawUnaligned(kPointerSize); | |
2238 CHECK(!dummy.IsRetry()); | |
2239 heap->CreateFillerObjectAt( | |
2240 HeapObject::cast(dummy.ToObjectChecked())->address(), kPointerSize, | |
2241 ClearRecordedSlots::kNo); | |
2242 | |
2243 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. | 2231 // Double misalignment is 4 on 32-bit platforms, 0 on 64-bit ones. |
2244 const intptr_t double_misalignment = kDoubleSize - kPointerSize; | 2232 const intptr_t double_misalignment = kDoubleSize - kPointerSize; |
2245 Address start; | 2233 Address start; |
2246 HeapObject* obj; | 2234 HeapObject* obj; |
2247 HeapObject* filler1; | 2235 HeapObject* filler1; |
2248 HeapObject* filler2; | 2236 HeapObject* filler2; |
2249 if (double_misalignment) { | 2237 if (double_misalignment) { |
2250 start = AlignOldSpace(kDoubleAligned, 0); | 2238 start = AlignOldSpace(kDoubleAligned, 0); |
2251 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); | 2239 obj = OldSpaceAllocateAligned(kPointerSize, kDoubleAligned); |
2252 // The object is aligned, and a filler object is created after. | 2240 // The object is aligned, and a filler object is created after. |
(...skipping 1363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3616 // memory is distributed. Since this is non-deterministic because of | 3604 // memory is distributed. Since this is non-deterministic because of |
3617 // concurrent sweeping, we disable it for this test. | 3605 // concurrent sweeping, we disable it for this test. |
3618 i::FLAG_parallel_compaction = false; | 3606 i::FLAG_parallel_compaction = false; |
3619 // Concurrent sweeping adds non determinism, depending on when memory is | 3607 // Concurrent sweeping adds non determinism, depending on when memory is |
3620 // available for further reuse. | 3608 // available for further reuse. |
3621 i::FLAG_concurrent_sweeping = false; | 3609 i::FLAG_concurrent_sweeping = false; |
3622 // Fast evacuation of pages may result in a different page count in old space. | 3610 // Fast evacuation of pages may result in a different page count in old space. |
3623 i::FLAG_page_promotion = false; | 3611 i::FLAG_page_promotion = false; |
3624 CcTest::InitializeVM(); | 3612 CcTest::InitializeVM(); |
3625 Isolate* isolate = CcTest::i_isolate(); | 3613 Isolate* isolate = CcTest::i_isolate(); |
3626 // If there's snapshot available, we don't know whether 20 small arrays will | |
3627 // fit on the initial pages. | |
3628 if (!isolate->snapshot_available()) return; | |
3629 Factory* factory = isolate->factory(); | 3614 Factory* factory = isolate->factory(); |
3630 Heap* heap = isolate->heap(); | 3615 Heap* heap = isolate->heap(); |
3631 | |
3632 v8::HandleScope scope(CcTest::isolate()); | 3616 v8::HandleScope scope(CcTest::isolate()); |
3633 static const int number_of_test_pages = 20; | 3617 static const int number_of_test_pages = 20; |
3634 | 3618 |
3635 // Prepare many pages with low live-bytes count. | 3619 // Prepare many pages with low live-bytes count. |
3636 PagedSpace* old_space = heap->old_space(); | 3620 PagedSpace* old_space = heap->old_space(); |
3637 const int initial_page_count = old_space->CountTotalPages(); | 3621 const int initial_page_count = old_space->CountTotalPages(); |
3638 const int overall_page_count = number_of_test_pages + initial_page_count; | 3622 const int overall_page_count = number_of_test_pages + initial_page_count; |
3639 for (int i = 0; i < number_of_test_pages; i++) { | 3623 for (int i = 0; i < number_of_test_pages; i++) { |
3640 AlwaysAllocateScope always_allocate(isolate); | 3624 AlwaysAllocateScope always_allocate(isolate); |
3641 heap::SimulateFullSpace(old_space); | 3625 heap::SimulateFullSpace(old_space); |
3642 factory->NewFixedArray(1, TENURED); | 3626 factory->NewFixedArray(1, TENURED); |
3643 } | 3627 } |
3644 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); | 3628 CHECK_EQ(overall_page_count, old_space->CountTotalPages()); |
3645 | 3629 |
3646 // Triggering one GC will cause a lot of garbage to be discovered but | 3630 // Triggering one GC will cause a lot of garbage to be discovered but |
3647 // even spread across all allocated pages. | 3631 // even spread across all allocated pages. |
3648 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3632 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
3649 "triggered for preparation"); | 3633 "triggered for preparation"); |
3650 CHECK_GE(overall_page_count, old_space->CountTotalPages()); | 3634 CHECK_GE(overall_page_count, old_space->CountTotalPages()); |
3651 | 3635 |
3652 // Triggering subsequent GCs should cause at least half of the pages | 3636 // Triggering subsequent GCs should cause at least half of the pages |
3653 // to be released to the OS after at most two cycles. | 3637 // to be released to the OS after at most two cycles. |
3654 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3638 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
3655 "triggered by test 1"); | 3639 "triggered by test 1"); |
3656 CHECK_GE(overall_page_count, old_space->CountTotalPages()); | 3640 CHECK_GE(overall_page_count, old_space->CountTotalPages()); |
3657 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, | 3641 heap->CollectAllGarbage(Heap::kFinalizeIncrementalMarkingMask, |
3658 "triggered by test 2"); | 3642 "triggered by test 2"); |
3659 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); | 3643 CHECK_GE(overall_page_count, old_space->CountTotalPages() * 2); |
3660 | 3644 |
| 3645 // Triggering a last-resort GC should cause all pages to be released to the |
| 3646 // OS so that other processes can seize the memory. If we get a failure here |
| 3647 // where there are 2 pages left instead of 1, then we should increase the |
| 3648 // size of the first page a little in SizeOfFirstPage in spaces.cc. The |
| 3649 // first page should be small in order to reduce memory used when the VM |
| 3650 // boots, but if the 20 small arrays don't fit on the first page then that's |
| 3651 // an indication that it is too small. |
3661 heap->CollectAllAvailableGarbage("triggered really hard"); | 3652 heap->CollectAllAvailableGarbage("triggered really hard"); |
3662 // Triggering a last-resort GC should release all additional pages. | |
3663 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); | 3653 CHECK_EQ(initial_page_count, old_space->CountTotalPages()); |
3664 } | 3654 } |
3665 | 3655 |
3666 static int forced_gc_counter = 0; | 3656 static int forced_gc_counter = 0; |
3667 | 3657 |
3668 void MockUseCounterCallback(v8::Isolate* isolate, | 3658 void MockUseCounterCallback(v8::Isolate* isolate, |
3669 v8::Isolate::UseCounterFeature feature) { | 3659 v8::Isolate::UseCounterFeature feature) { |
3670 isolate->GetCurrentContext(); | 3660 isolate->GetCurrentContext(); |
3671 if (feature == v8::Isolate::kForcedGC) { | 3661 if (feature == v8::Isolate::kForcedGC) { |
3672 forced_gc_counter++; | 3662 forced_gc_counter++; |
(...skipping 3422 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7095 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); | 7085 chunk, chunk->area_end() - kPointerSize, chunk->area_end()); |
7096 slots[chunk->area_end() - kPointerSize] = false; | 7086 slots[chunk->area_end() - kPointerSize] = false; |
7097 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { | 7087 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [&slots](Address addr) { |
7098 CHECK(slots[addr]); | 7088 CHECK(slots[addr]); |
7099 return KEEP_SLOT; | 7089 return KEEP_SLOT; |
7100 }); | 7090 }); |
7101 } | 7091 } |
7102 | 7092 |
7103 } // namespace internal | 7093 } // namespace internal |
7104 } // namespace v8 | 7094 } // namespace v8 |
OLD | NEW |