OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifdef __linux__ |
| 6 #include <sys/mman.h> |
| 7 #undef MAP_TYPE |
| 8 #endif // __linux__ |
| 9 |
| 10 #include "src/heap/heap-inl.h" |
| 11 #include "src/heap/spaces-inl.h" |
| 12 #include "src/isolate.h" |
| 13 #include "test/unittests/test-utils.h" |
| 14 #include "testing/gtest/include/gtest/gtest.h" |
| 15 |
| 16 namespace v8 { |
| 17 namespace internal { |
| 18 |
| 19 class SequentialUnmapperTest : public TestWithIsolate { |
| 20 public: |
| 21 SequentialUnmapperTest() = default; |
| 22 ~SequentialUnmapperTest() override = default; |
| 23 |
| 24 static void SetUpTestCase() { |
| 25 old_flag_ = i::FLAG_concurrent_sweeping; |
| 26 i::FLAG_concurrent_sweeping = false; |
| 27 TestWithIsolate::SetUpTestCase(); |
| 28 } |
| 29 |
| 30 static void TearDownTestCase() { |
| 31 TestWithIsolate::TearDownTestCase(); |
| 32 i::FLAG_concurrent_sweeping = old_flag_; |
| 33 } |
| 34 |
| 35 Heap* heap() { return isolate()->heap(); } |
| 36 MemoryAllocator* allocator() { return heap()->memory_allocator(); } |
| 37 MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); } |
| 38 |
| 39 private: |
| 40 static bool old_flag_; |
| 41 |
| 42 DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest); |
| 43 }; |
| 44 |
| 45 bool SequentialUnmapperTest::old_flag_; |
| 46 |
| 47 #ifdef __linux__ |
| 48 |
| 49 // See v8:5945. |
| 50 TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { |
| 51 Page* page = |
| 52 allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), |
| 53 static_cast<PagedSpace*>(heap()->old_space()), |
| 54 Executability::NOT_EXECUTABLE); |
| 55 heap()->old_space()->UnlinkFreeListCategories(page); |
| 56 EXPECT_NE(nullptr, page); |
| 57 const int page_size = getpagesize(); |
| 58 void* start_address = static_cast<void*>(page->address()); |
| 59 EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC)); |
| 60 allocator()->Free<MemoryAllocator::kPooledAndQueue>(page); |
| 61 EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC)); |
| 62 unmapper()->FreeQueuedChunks(); |
| 63 EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC)); |
| 64 unmapper()->TearDown(); |
| 65 EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC)); |
| 66 } |
| 67 |
| 68 // See v8:5945. |
| 69 TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { |
| 70 Page* page = |
| 71 allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), |
| 72 static_cast<PagedSpace*>(heap()->old_space()), |
| 73 Executability::NOT_EXECUTABLE); |
| 74 heap()->old_space()->UnlinkFreeListCategories(page); |
| 75 EXPECT_NE(nullptr, page); |
| 76 const int page_size = getpagesize(); |
| 77 void* start_address = static_cast<void*>(page->address()); |
| 78 EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC)); |
| 79 allocator()->Free<MemoryAllocator::kPooledAndQueue>(page); |
| 80 EXPECT_EQ(0, msync(start_address, page_size, MS_SYNC)); |
| 81 unmapper()->TearDown(); |
| 82 EXPECT_EQ(-1, msync(start_address, page_size, MS_SYNC)); |
| 83 } |
| 84 |
| 85 #endif // __linux__ |
| 86 |
| 87 } // namespace internal |
| 88 } // namespace v8 |
OLD | NEW |