OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifdef __linux__ |
| 6 #include <sys/mman.h> |
| 7 #undef MAP_TYPE |
| 8 #endif // __linux__ |
| 9 |
| 10 #include "src/heap/heap-inl.h" |
| 11 #include "src/heap/spaces-inl.h" |
| 12 #include "src/heap/spaces.h" |
| 13 #include "src/isolate.h" |
| 14 #include "test/unittests/test-utils.h" |
| 15 #include "testing/gtest/include/gtest/gtest.h" |
| 16 |
| 17 namespace v8 { |
| 18 namespace internal { |
| 19 |
| 20 class SequentialUnmapperTest : public TestWithIsolate { |
| 21 public: |
| 22 SequentialUnmapperTest() = default; |
| 23 ~SequentialUnmapperTest() override = default; |
| 24 |
| 25 static void SetUpTestCase() { |
| 26 old_flag_ = i::FLAG_concurrent_sweeping; |
| 27 i::FLAG_concurrent_sweeping = false; |
| 28 TestWithIsolate::SetUpTestCase(); |
| 29 } |
| 30 |
| 31 static void TearDownTestCase() { |
| 32 TestWithIsolate::TearDownTestCase(); |
| 33 i::FLAG_concurrent_sweeping = old_flag_; |
| 34 } |
| 35 |
| 36 Heap* heap() { return isolate()->heap(); } |
| 37 MemoryAllocator* allocator() { return heap()->memory_allocator(); } |
| 38 MemoryAllocator::Unmapper* unmapper() { return allocator()->unmapper(); } |
| 39 |
| 40 private: |
| 41 static bool old_flag_; |
| 42 |
| 43 DISALLOW_COPY_AND_ASSIGN(SequentialUnmapperTest); |
| 44 }; |
| 45 |
| 46 bool SequentialUnmapperTest::old_flag_; |
| 47 |
| 48 #ifdef __linux__ |
| 49 |
| 50 // See v8:5945. |
| 51 TEST_F(SequentialUnmapperTest, UnmapOnTeardownAfterAlreadyFreeingPooled) { |
| 52 Page* page = |
| 53 allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), |
| 54 static_cast<PagedSpace*>(heap()->old_space()), |
| 55 Executability::NOT_EXECUTABLE); |
| 56 EXPECT_NE(nullptr, page); |
| 57 void* start_address = static_cast<void*>(page->address()); |
| 58 EXPECT_EQ(0, msync(start_address, 4096, MS_SYNC)); |
| 59 allocator()->Free<MemoryAllocator::kPooledAndQueue>(page); |
| 60 EXPECT_EQ(0, msync(start_address, 4096, MS_SYNC)); |
| 61 unmapper()->FreeQueuedChunks(); |
| 62 EXPECT_EQ(0, msync(start_address, 4096, MS_SYNC)); |
| 63 unmapper()->TearDown(); |
| 64 EXPECT_EQ(-1, msync(start_address, 4096, MS_SYNC)); |
| 65 } |
| 66 |
| 67 // See v8:5945. |
| 68 TEST_F(SequentialUnmapperTest, UnmapOnTeardown) { |
| 69 Page* page = |
| 70 allocator()->AllocatePage(MemoryAllocator::PageAreaSize(OLD_SPACE), |
| 71 static_cast<PagedSpace*>(heap()->old_space()), |
| 72 Executability::NOT_EXECUTABLE); |
| 73 EXPECT_NE(nullptr, page); |
| 74 const int page_size = getpagesize(); |
| 75 void* start_address = static_cast<void*>(page->address()); |
| 76 EXPECT_EQ(0, msync(start_address, 4096, MS_SYNC)); |
| 77 allocator()->Free<MemoryAllocator::kPooledAndQueue>(page); |
| 78 EXPECT_EQ(0, msync(start_address, 4096, MS_SYNC)); |
| 79 unmapper()->TearDown(); |
| 80 EXPECT_EQ(-1, msync(start_address, 4096, MS_SYNC)); |
| 81 } |
| 82 |
| 83 #endif // __linux__ |
| 84 |
| 85 } // namespace internal |
| 86 } // namespace v8 |
OLD | NEW |