| OLD | NEW | 
|---|
|  | (Empty) | 
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. |  | 
| 2 // Use of this source code is governed by a BSD-style license that can be |  | 
| 3 // found in the LICENSE file. |  | 
| 4 |  | 
| 5 #include <dlfcn.h> |  | 
| 6 #include <fcntl.h> |  | 
| 7 #include <stddef.h> |  | 
| 8 #include <stdlib.h> |  | 
| 9 #include <string.h> |  | 
| 10 #include <sys/mman.h> |  | 
| 11 #include <unistd.h> |  | 
| 12 #include <map> |  | 
| 13 |  | 
| 14 #include "testing/gtest/include/gtest/gtest.h" |  | 
| 15 #include "tools/android/heap_profiler/heap_profiler.h" |  | 
| 16 |  | 
| 17 namespace { |  | 
| 18 |  | 
| 19 typedef void* (*AllocatorFn)(size_t); |  | 
| 20 typedef int (*FreeFn)(void*, size_t); |  | 
| 21 |  | 
| 22 const size_t kSize1 = 499 * PAGE_SIZE; |  | 
| 23 const size_t kSize2 = 503 * PAGE_SIZE; |  | 
| 24 const size_t kSize3 = 509 * PAGE_SIZE; |  | 
| 25 |  | 
| 26 // The purpose of the four functions below is to create watermarked allocations, |  | 
| 27 // so the test fixture can ascertain that the hooks work end-to-end. |  | 
| 28 __attribute__((noinline)) void* MallocInner(size_t size) { |  | 
| 29   void* ptr = malloc(size); |  | 
| 30   // The memset below is to avoid tail-call elimination optimizations and ensure |  | 
| 31   // that this function will be part of the stack trace. |  | 
| 32   memset(ptr, 0, size); |  | 
| 33   return ptr; |  | 
| 34 } |  | 
| 35 |  | 
| 36 __attribute__((noinline)) void* MallocOuter(size_t size) { |  | 
| 37   void* ptr = MallocInner(size); |  | 
| 38   memset(ptr, 0, size); |  | 
| 39   return ptr; |  | 
| 40 } |  | 
| 41 |  | 
| 42 __attribute__((noinline)) void* DoMmap(size_t size) { |  | 
| 43   return mmap( |  | 
| 44       0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); |  | 
| 45 } |  | 
| 46 |  | 
| 47 __attribute__((noinline)) void* MmapInner(size_t size) { |  | 
| 48   void* ptr = DoMmap(size); |  | 
| 49   memset(ptr, 0, size); |  | 
| 50   return ptr; |  | 
| 51 } |  | 
| 52 |  | 
| 53 __attribute__((noinline)) void* MmapOuter(size_t size) { |  | 
| 54   void* ptr = MmapInner(size); |  | 
| 55   memset(ptr, 0, size); |  | 
| 56   return ptr; |  | 
| 57 } |  | 
| 58 |  | 
| 59 const HeapStats* GetHeapStats() { |  | 
| 60   HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( |  | 
| 61       dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); |  | 
| 62   EXPECT_TRUE(stats_ptr != NULL); |  | 
| 63   const HeapStats* stats = *stats_ptr; |  | 
| 64   EXPECT_TRUE(stats != NULL); |  | 
| 65   EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats->magic_start); |  | 
| 66   return stats; |  | 
| 67 } |  | 
| 68 |  | 
| 69 bool StackTraceContains(const StacktraceEntry* s, AllocatorFn fn) { |  | 
| 70   // kExpectedFnLen is a gross estimation of the watermark functions' size. |  | 
| 71   // It tries to address the following problem: the addrs in the unwound stack |  | 
| 72   // stack frames will NOT point to the beginning of the functions, but to the |  | 
| 73   // PC after the call to malloc/mmap. |  | 
| 74   const size_t kExpectedFnLen = 16; |  | 
| 75   const uintptr_t fn_addr = reinterpret_cast<uintptr_t>(fn); |  | 
| 76   for (size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { |  | 
| 77     if (s->frames[i] >= fn_addr && s->frames[i] <= fn_addr + kExpectedFnLen) |  | 
| 78       return true; |  | 
| 79   } |  | 
| 80   return false; |  | 
| 81 } |  | 
| 82 |  | 
| 83 const StacktraceEntry* LookupStackTrace(size_t size, AllocatorFn fn) { |  | 
| 84   const HeapStats* stats = GetHeapStats(); |  | 
| 85   for (size_t i = 0; i < stats->max_stack_traces; ++i) { |  | 
| 86     const StacktraceEntry* st = &stats->stack_traces[i]; |  | 
| 87     if (st->alloc_bytes == size && StackTraceContains(st, fn)) |  | 
| 88       return st; |  | 
| 89   } |  | 
| 90   return NULL; |  | 
| 91 } |  | 
| 92 |  | 
| 93 int DoFree(void* addr, size_t /*size, ignored.*/) { |  | 
| 94   free(addr); |  | 
| 95   return 0; |  | 
| 96 } |  | 
| 97 |  | 
| 98 void TestStackTracesWithParams(AllocatorFn outer_fn, |  | 
| 99                                AllocatorFn inner_fn, |  | 
| 100                                FreeFn free_fn) { |  | 
| 101   const HeapStats* stats = GetHeapStats(); |  | 
| 102 |  | 
| 103   void* m1 = outer_fn(kSize1); |  | 
| 104   void* m2 = inner_fn(kSize2); |  | 
| 105   void* m3 = inner_fn(kSize3); |  | 
| 106   free_fn(m3, kSize3); |  | 
| 107 |  | 
| 108   const StacktraceEntry* st1 = LookupStackTrace(kSize1, inner_fn); |  | 
| 109   const StacktraceEntry* st2 = LookupStackTrace(kSize2, inner_fn); |  | 
| 110   const StacktraceEntry* st3 = LookupStackTrace(kSize3, inner_fn); |  | 
| 111 |  | 
| 112   EXPECT_TRUE(st1 != NULL); |  | 
| 113   EXPECT_TRUE(StackTraceContains(st1, outer_fn)); |  | 
| 114   EXPECT_TRUE(StackTraceContains(st1, inner_fn)); |  | 
| 115 |  | 
| 116   EXPECT_TRUE(st2 != NULL); |  | 
| 117   EXPECT_FALSE(StackTraceContains(st2, outer_fn)); |  | 
| 118   EXPECT_TRUE(StackTraceContains(st2, inner_fn)); |  | 
| 119 |  | 
| 120   EXPECT_EQ(NULL, st3); |  | 
| 121 |  | 
| 122   const size_t total_alloc_start = stats->total_alloc_bytes; |  | 
| 123   const size_t num_stack_traces_start = stats->num_stack_traces; |  | 
| 124 |  | 
| 125   free_fn(m1, kSize1); |  | 
| 126   free_fn(m2, kSize2); |  | 
| 127 |  | 
| 128   const size_t total_alloc_end = stats->total_alloc_bytes; |  | 
| 129   const size_t num_stack_traces_end = stats->num_stack_traces; |  | 
| 130 |  | 
| 131   EXPECT_EQ(kSize1 + kSize2, total_alloc_start - total_alloc_end); |  | 
| 132   EXPECT_EQ(2, num_stack_traces_start - num_stack_traces_end); |  | 
| 133   EXPECT_EQ(NULL, LookupStackTrace(kSize1, inner_fn)); |  | 
| 134   EXPECT_EQ(NULL, LookupStackTrace(kSize2, inner_fn)); |  | 
| 135   EXPECT_EQ(NULL, LookupStackTrace(kSize3, inner_fn)); |  | 
| 136 } |  | 
| 137 |  | 
| 138 TEST(HeapProfilerIntegrationTest, TestMallocStackTraces) { |  | 
| 139   TestStackTracesWithParams(&MallocOuter, &MallocInner, &DoFree); |  | 
| 140 } |  | 
| 141 |  | 
| 142 TEST(HeapProfilerIntegrationTest, TestMmapStackTraces) { |  | 
| 143   TestStackTracesWithParams(&MmapOuter, &MmapInner, &munmap); |  | 
| 144 } |  | 
| 145 |  | 
| 146 // Returns the path of the directory containing the current executable. |  | 
| 147 std::string GetExePath() { |  | 
| 148   char buf[1024]; |  | 
| 149   ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); |  | 
| 150   if (len == -1) |  | 
| 151     return std::string(); |  | 
| 152   std::string path(buf, len); |  | 
| 153   size_t sep = path.find_last_of('/'); |  | 
| 154   if (sep == std::string::npos) |  | 
| 155     return std::string(); |  | 
| 156   path.erase(sep); |  | 
| 157   return path; |  | 
| 158 } |  | 
| 159 |  | 
| 160 }  // namespace |  | 
| 161 |  | 
| 162 int main(int argc, char** argv) { |  | 
| 163   // Re-launch the process itself forcing the preload of the libheap_profiler. |  | 
| 164   char* ld_preload = getenv("LD_PRELOAD"); |  | 
| 165   if (ld_preload == NULL || strstr(ld_preload, "libheap_profiler.so") == NULL) { |  | 
| 166     char env_ld_lib_path[256]; |  | 
| 167     strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); |  | 
| 168     strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); |  | 
| 169     char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; |  | 
| 170     char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; |  | 
| 171     execve("/proc/self/exe", argv, env); |  | 
| 172     // execve() never returns, unless something goes wrong. |  | 
| 173     perror("execve"); |  | 
| 174     assert(false); |  | 
| 175   } |  | 
| 176 |  | 
| 177   testing::InitGoogleTest(&argc, argv); |  | 
| 178   return RUN_ALL_TESTS(); |  | 
| 179 } |  | 
| OLD | NEW | 
|---|