OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <dlfcn.h> | |
6 #include <fcntl.h> | |
7 #include <stdlib.h> | |
8 #include <string.h> | |
9 #include <sys/mman.h> | |
10 #include <unistd.h> | |
11 #include <map> | |
12 | |
13 #include "base/compiler_specific.h" | |
14 #include "testing/gtest/include/gtest/gtest.h" | |
15 #include "tools/android/heap_profiler/heap_profiler.h" | |
16 | |
17 namespace { | |
18 | |
19 // The purpose of the four functions below is to create watermarked allocations, | |
20 // so the test fixture can ascertain that the hooks work end-to-end. | |
21 __attribute__((noinline)) void* MallocInner(size_t size) { | |
22 void* ptr = malloc(size); | |
23 // The memset below is to avoid tail-call elimination optimizations and ensure | |
24 // that this function will be part of the stack trace. | |
25 memset(ptr, 0, size); | |
26 return ptr; | |
27 } | |
28 | |
29 __attribute__((noinline)) void* MallocOuter(size_t size) { | |
30 void* ptr = MallocInner(size); | |
31 memset(ptr, 0, size); | |
32 return ptr; | |
33 } | |
34 | |
35 __attribute__((noinline)) void* DoMmap(size_t size) { | |
36 return mmap( | |
37 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); | |
38 } | |
39 | |
40 __attribute__((noinline)) void* MmapInner(size_t size) { | |
41 void* ptr = DoMmap(size); | |
42 memset(ptr, 0, size); | |
43 return ptr; | |
44 } | |
45 | |
46 __attribute__((noinline)) void* MmapOuter(size_t size) { | |
47 void* ptr = MmapInner(size); | |
48 memset(ptr, 0, size); | |
49 return ptr; | |
50 } | |
51 | |
52 class HeapProfilerIntegrationTest : public testing::Test { | |
53 public: | |
54 virtual void SetUp() OVERRIDE { | |
55 HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( | |
56 dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); | |
57 EXPECT_TRUE(stats_ptr != NULL); | |
58 stats_ = *stats_ptr; | |
59 EXPECT_TRUE(stats_ != NULL); | |
60 EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats_->magic_start); | |
61 } | |
62 | |
63 protected: | |
64 typedef void* (*AllocatorFn)(size_t); | |
65 | |
66 void Initialize(AllocatorFn outer_fn, AllocatorFn inner_fn) { | |
67 outer_fn_ = reinterpret_cast<uintptr_t>(outer_fn); | |
68 inner_fn_ = reinterpret_cast<uintptr_t>(inner_fn); | |
69 } | |
70 | |
71 void TestAfterAllAllocatedAnd3Freed() { | |
72 const StacktraceEntry* st1 = LookupStackTrace(kSize1, inner_fn_); | |
73 const StacktraceEntry* st2 = LookupStackTrace(kSize2, inner_fn_); | |
74 const StacktraceEntry* st3 = LookupStackTrace(kSize3, inner_fn_); | |
75 | |
76 EXPECT_TRUE(st1 != NULL); | |
77 EXPECT_TRUE(StackTraceContains(st1, outer_fn_)); | |
78 EXPECT_TRUE(StackTraceContains(st1, inner_fn_)); | |
79 | |
80 EXPECT_TRUE(st2 != NULL); | |
81 EXPECT_FALSE(StackTraceContains(st2, outer_fn_)); | |
82 EXPECT_TRUE(StackTraceContains(st2, inner_fn_)); | |
83 | |
84 EXPECT_EQ(NULL, st3); | |
85 | |
86 total_alloc_start_ = stats_->total_alloc_bytes; | |
87 num_stack_traces_start_ = stats_->num_stack_traces; | |
88 } | |
89 | |
90 void TestAfterAllFreed() { | |
91 const size_t total_alloc_end = stats_->total_alloc_bytes; | |
92 const size_t num_stack_traces_end = stats_->num_stack_traces; | |
93 | |
94 EXPECT_EQ(kSize1 + kSize2, total_alloc_start_ - total_alloc_end); | |
95 EXPECT_EQ(2, num_stack_traces_start_ - num_stack_traces_end); | |
96 EXPECT_EQ(NULL, LookupStackTrace(kSize1, inner_fn_)); | |
97 EXPECT_EQ(NULL, LookupStackTrace(kSize2, inner_fn_)); | |
98 EXPECT_EQ(NULL, LookupStackTrace(kSize3, inner_fn_)); | |
99 } | |
100 | |
101 static const size_t kSize1 = 499 * PAGE_SIZE; | |
102 static const size_t kSize2 = 503 * PAGE_SIZE; | |
103 static const size_t kSize3 = 509 * PAGE_SIZE; | |
104 | |
105 private: | |
106 static bool StackTraceContains(const StacktraceEntry* s, uintptr_t fn_addr) { | |
107 // kExpectedFnLen is a gross estimation of the watermark functions' size. | |
108 // It tries to address the following problem: the addrs in the unwound stack | |
109 // stack frames will NOT point to the beginning of the functions, but to the | |
110 // PC after the call to malloc/mmap. | |
111 const size_t kExpectedFnLen = 16; | |
112 | |
113 for (size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { | |
114 if (s->frames[i] >= fn_addr && s->frames[i] <= fn_addr + kExpectedFnLen) | |
115 return true; | |
116 } | |
117 return false; | |
118 } | |
119 | |
120 const StacktraceEntry* LookupStackTrace(size_t size, uintptr_t fn_addr) { | |
121 for (size_t i = 0; i < stats_->max_stack_traces; ++i) { | |
122 const StacktraceEntry* st = &stats_->stack_traces[i]; | |
123 if (st->alloc_bytes == size && StackTraceContains(st, fn_addr)) | |
124 return st; | |
125 } | |
126 return NULL; | |
127 } | |
128 | |
129 const HeapStats* stats_; | |
130 size_t total_alloc_start_; | |
131 size_t num_stack_traces_start_; | |
132 uintptr_t outer_fn_; | |
133 uintptr_t inner_fn_; | |
134 }; | |
135 | |
136 TEST_F(HeapProfilerIntegrationTest, TestMallocStackTraces) { | |
137 Initialize(&MallocOuter, &MallocInner); | |
138 | |
139 void* m1 = MallocOuter(kSize1); | |
140 void* m2 = MallocInner(kSize2); | |
141 void* m3 = MallocInner(kSize3); | |
142 free(m3); | |
143 TestAfterAllAllocatedAnd3Freed(); | |
144 | |
145 free(m1); | |
146 free(m2); | |
bulach
2014/06/26 14:42:54
you could create a dummy wrapper around free that
Primiano Tucci (use gerrit)
2014/06/27 09:34:29
Done, now is more "zipped" than ever :)
Thanks for
| |
147 TestAfterAllFreed(); | |
148 } | |
149 | |
150 TEST_F(HeapProfilerIntegrationTest, TestMmapStackTraces) { | |
151 Initialize(&MmapOuter, &MmapInner); | |
152 | |
153 void* m1 = MmapOuter(kSize1); | |
154 void* m2 = MmapInner(kSize2); | |
155 void* m3 = MmapInner(kSize3); | |
156 munmap(m3, kSize3); | |
157 TestAfterAllAllocatedAnd3Freed(); | |
158 | |
159 munmap(m1, kSize1); | |
160 munmap(m2, kSize2); | |
161 TestAfterAllFreed(); | |
162 } | |
163 | |
164 // Returns the path of the directory containing the current executable. | |
165 std::string GetExePath() { | |
166 char buf[1024]; | |
167 ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); | |
168 if (len == -1) | |
169 return std::string(); | |
170 std::string path(buf, len); | |
171 size_t sep = path.find_last_of('/'); | |
172 if (sep == std::string::npos) | |
173 return std::string(); | |
174 path.erase(sep); | |
175 return path; | |
176 } | |
177 | |
178 } // namespace | |
179 | |
180 int main(int argc, char** argv) { | |
181 // Re-launch the process itself forcing the preload of the libheap_profiler. | |
182 if (getenv("LD_PRELOAD") == NULL) { | |
183 char env_ld_lib_path[256]; | |
184 strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); | |
185 strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); | |
186 char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; | |
187 char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; | |
188 execve("/proc/self/exe", argv, env); | |
189 // execve() never returns, unless something goes wrong. | |
190 perror("execve"); | |
191 assert(false); | |
192 } | |
193 | |
194 testing::InitGoogleTest(&argc, argv); | |
195 return RUN_ALL_TESTS(); | |
196 } | |
OLD | NEW |