OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include <dlfcn.h> |
| 6 #include <fcntl.h> |
| 7 #include <stdlib.h> |
| 8 #include <string.h> |
| 9 #include <sys/mman.h> |
| 10 #include <unistd.h> |
| 11 #include <map> |
| 12 |
| 13 #include "testing/gtest/include/gtest/gtest.h" |
| 14 #include "tools/android/heap_profiler/heap_profiler.h" |
| 15 |
| 16 |
| 17 namespace { |
| 18 |
| 19 // The purpose of the four functions below is to create watermarked allocations, |
| 20 // so the test fixture can ascertain that the hooks work end-to-end. |
| 21 __attribute__ ((noinline)) void* MallocStep2(size_t size) { |
| 22 void* ptr = malloc(size); |
| 23 // The memset below is to avoid tail-call elimination optimizations and ensure |
| 24 // that this function will be part of the stack trace. |
| 25 memset(ptr, 0, size); |
| 26 return ptr; |
| 27 } |
| 28 |
| 29 __attribute__ ((noinline)) void* MallocStep1(size_t size) { |
| 30 void* ptr = MallocStep2(size); |
| 31 memset(ptr, 0, size); |
| 32 return ptr; |
| 33 } |
| 34 |
| 35 __attribute__ ((noinline)) void* DoMmap(size_t size) { |
| 36 return mmap( |
| 37 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); |
| 38 } |
| 39 |
| 40 __attribute__ ((noinline)) void* MmapStep2(size_t size) { |
| 41 void* ptr = DoMmap(size); |
| 42 memset(ptr, 0, size); |
| 43 return ptr; |
| 44 } |
| 45 |
| 46 __attribute__ ((noinline)) void* MmapStep1(size_t size) { |
| 47 void* ptr = MmapStep2(size); |
| 48 memset(ptr, 0, size); |
| 49 return ptr; |
| 50 } |
| 51 |
| 52 bool StackTraceContains(const uintptr_t* frames, uintptr_t fn_addr) { |
| 53 // expected_fn_length is a gross estimation of the size of the Step* functions |
| 54 // above. It tries to address the following problem: the addrs in the unwound |
| 55 // stack frames will NOT point to the beginning of the functions, but to the |
| 56 // PC after the call to malloc/mmap. |
| 57 const size_t expected_fn_length = 16; |
| 58 |
| 59 for(size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { |
| 60 if (frames[i] >= fn_addr && frames[i] <= fn_addr + expected_fn_length) |
| 61 return true; |
| 62 } |
| 63 return false; |
| 64 } |
| 65 |
| 66 const HeapStats* GetHeapProfilerStats() { |
| 67 HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( |
| 68 dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); |
| 69 EXPECT_TRUE(stats_ptr != NULL); |
| 70 const HeapStats* stats = *stats_ptr; |
| 71 EXPECT_TRUE(stats != NULL); |
| 72 EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats->magic_start); |
| 73 return stats; |
| 74 } |
| 75 |
| 76 TEST(HeapProfilerIntegrationTest, TestMallocStackTraces) { |
| 77 const HeapStats* stats = GetHeapProfilerStats(); |
| 78 |
| 79 void* m1 = MallocStep1(1000000); |
| 80 void* m2 = MallocStep2(2000000); |
| 81 void* m3 = MallocStep2(4000000); |
| 82 |
| 83 bool m1_found = false; |
| 84 bool m2_found = false; |
| 85 bool m3_found = false; |
| 86 |
| 87 ASSERT_TRUE(stats->stack_traces != NULL); |
| 88 |
| 89 free(m3); |
| 90 |
| 91 for(size_t i = 0; i < stats->max_stack_traces; ++i) { |
| 92 const StacktraceEntry* st = &stats->stack_traces[i]; |
| 93 if (st->alloc_bytes == 1000000) { |
| 94 m1_found = true; |
| 95 EXPECT_TRUE(StackTraceContains( |
| 96 st->frames, reinterpret_cast<uintptr_t>(&MallocStep1))); |
| 97 EXPECT_TRUE(StackTraceContains( |
| 98 st->frames, reinterpret_cast<uintptr_t>(&MallocStep2))); |
| 99 } |
| 100 else if (st->alloc_bytes == 2000000) { |
| 101 m2_found = true; |
| 102 EXPECT_FALSE(StackTraceContains( |
| 103 st->frames, reinterpret_cast<uintptr_t>(&MallocStep1))); |
| 104 EXPECT_TRUE(StackTraceContains( |
| 105 st->frames, reinterpret_cast<uintptr_t>(&MallocStep2))); |
| 106 } |
| 107 else if (st->alloc_bytes == 4000000) { |
| 108 m3_found = true; |
| 109 } |
| 110 } |
| 111 |
| 112 EXPECT_TRUE(m1_found); |
| 113 EXPECT_TRUE(m2_found); |
| 114 EXPECT_FALSE(m3_found); |
| 115 |
| 116 const size_t total_alloc_start = stats->total_alloc_bytes; |
| 117 free(m1); |
| 118 free(m2); |
| 119 const size_t total_alloc_end = stats->total_alloc_bytes; |
| 120 |
| 121 EXPECT_EQ(3000000, total_alloc_start - total_alloc_end); |
| 122 } |
| 123 |
| 124 TEST(HeapProfilerIntegrationTest, TestMmapStackTraces) { |
| 125 const HeapStats* stats = GetHeapProfilerStats(); |
| 126 |
| 127 static const size_t m1_size = 499 * PAGE_SIZE; |
| 128 static const size_t m2_size = 503 * PAGE_SIZE; |
| 129 static const size_t m3_size = 509 * PAGE_SIZE; |
| 130 |
| 131 void* m1 = MmapStep1(m1_size); |
| 132 void* m2 = MmapStep2(m2_size); |
| 133 void* m3 = MmapStep2(m3_size); |
| 134 |
| 135 bool m1_found = false; |
| 136 bool m2_found = false; |
| 137 bool m3_found = false; |
| 138 |
| 139 ASSERT_TRUE(stats->stack_traces != NULL); |
| 140 |
| 141 munmap(m3, m3_size); |
| 142 |
| 143 for(size_t i = 0; i < stats->max_stack_traces; ++i) { |
| 144 const StacktraceEntry* st = &stats->stack_traces[i]; |
| 145 if (st->alloc_bytes == m1_size) { |
| 146 m1_found = true; |
| 147 EXPECT_TRUE(StackTraceContains( |
| 148 st->frames, reinterpret_cast<uintptr_t>(&MmapStep1))); |
| 149 EXPECT_TRUE(StackTraceContains( |
| 150 st->frames, reinterpret_cast<uintptr_t>(&MmapStep2))); |
| 151 } |
| 152 else if (st->alloc_bytes == m2_size) { |
| 153 m2_found = true; |
| 154 EXPECT_FALSE(StackTraceContains( |
| 155 st->frames, reinterpret_cast<uintptr_t>(&MmapStep1))); |
| 156 EXPECT_TRUE(StackTraceContains( |
| 157 st->frames, reinterpret_cast<uintptr_t>(&MmapStep2))); |
| 158 } |
| 159 else if (st->alloc_bytes == m3_size) { |
| 160 m3_found = true; |
| 161 } |
| 162 } |
| 163 |
| 164 EXPECT_TRUE(m1_found); |
| 165 EXPECT_TRUE(m2_found); |
| 166 EXPECT_FALSE(m3_found); |
| 167 |
| 168 const size_t total_alloc_start = stats->total_alloc_bytes; |
| 169 munmap(m1, m1_size); |
| 170 munmap(m2, m2_size); |
| 171 const size_t total_alloc_end = stats->total_alloc_bytes; |
| 172 |
| 173 EXPECT_EQ(m1_size + m2_size, total_alloc_start - total_alloc_end); |
| 174 } |
| 175 |
| 176 // Returns the path of the directory containing the current executable. |
| 177 std::string GetExePath() { |
| 178 char buf[1024]; |
| 179 ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); |
| 180 if (len == -1) |
| 181 return std::string(); |
| 182 std::string path(buf, len); |
| 183 size_t sep = path.find_last_of('/'); |
| 184 if (sep == std::string::npos) |
| 185 return std::string(); |
| 186 path.erase(sep); |
| 187 return path; |
| 188 } |
| 189 |
| 190 } // namespace |
| 191 |
| 192 |
| 193 int main(int argc, char** argv) { |
| 194 // Re-launch the process itself forcing the preload of the libheap_profiler. |
| 195 if (getenv("LD_PRELOAD") == NULL) { |
| 196 char env_ld_lib_path[256]; |
| 197 strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); |
| 198 strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); |
| 199 char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; |
| 200 char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; |
| 201 execve("/proc/self/exe", argv, env); |
| 202 // execve() never returns, unless something goes wrong. |
| 203 perror("execve"); |
| 204 assert(false); |
| 205 } |
| 206 |
| 207 testing::InitGoogleTest(&argc, argv); |
| 208 return RUN_ALL_TESTS(); |
| 209 } |
OLD | NEW |