OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include <dlfcn.h> | |
6 #include <fcntl.h> | |
7 #include <stdlib.h> | |
8 #include <string.h> | |
9 #include <sys/mman.h> | |
10 #include <unistd.h> | |
11 #include <map> | |
12 | |
13 #include "testing/gtest/include/gtest/gtest.h" | |
14 #include "tools/android/heap_profiler/heap_profiler.h" | |
15 | |
16 | |
17 namespace { | |
18 | |
19 // The purpose of the four functions below is to create watermarked allocations, | |
20 // so the test fixture can ascertain that the hooks work end-to-end. | |
21 __attribute__ ((noinline)) | |
22 void* MallocInner(size_t size) { | |
23 void* ptr = malloc(size); | |
24 // The memset below is to avoid tail-call elimination optimizations and ensure | |
25 // that this function will be part of the stack trace. | |
pasko
2014/06/25 16:39:50
this trick is not done for DoMmap, is that for som
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
the *trick* is essentially a way to avoid that the
| |
26 memset(ptr, 0, size); | |
27 return ptr; | |
28 } | |
29 | |
30 __attribute__ ((noinline)) | |
31 void* MallocOuter(size_t size) { | |
32 void* ptr = MallocInner(size); | |
33 memset(ptr, 0, size); | |
34 return ptr; | |
35 } | |
36 | |
37 __attribute__ ((noinline)) | |
38 void* DoMmap(size_t size) { | |
39 return mmap( | |
40 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); | |
41 } | |
42 | |
43 __attribute__ ((noinline)) | |
44 void* MmapInner(size_t size) { | |
45 void* ptr = DoMmap(size); | |
46 memset(ptr, 0, size); | |
47 return ptr; | |
48 } | |
49 | |
50 __attribute__ ((noinline)) | |
51 void* MmapOuter(size_t size) { | |
52 void* ptr = MmapInner(size); | |
53 memset(ptr, 0, size); | |
54 return ptr; | |
55 } | |
56 | |
57 bool StackTraceContains(const StacktraceEntry* s, void* (*fn)(size_t)) { | |
58 // kExpectedFnLength is a gross estimation of the size of the Step* functions | |
59 // above. It tries to address the following problem: the addrs in the unwound | |
60 // stack frames will NOT point to the beginning of the functions, but to the | |
61 // PC after the call to malloc/mmap. | |
62 const size_t kExpectedFnLength = 16; | |
63 const uintptr_t fn_addr = reinterpret_cast<uintptr_t>(fn); | |
64 | |
65 for (size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { | |
66 if (s->frames[i] >= fn_addr && s->frames[i] <= fn_addr + kExpectedFnLength) | |
67 return true; | |
68 } | |
69 return false; | |
70 } | |
71 | |
72 const HeapStats* GetHeapProfilerStats() { | |
73 HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( | |
74 dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); | |
75 EXPECT_TRUE(stats_ptr != NULL); | |
76 const HeapStats* stats = *stats_ptr; | |
77 EXPECT_TRUE(stats != NULL); | |
78 EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats->magic_start); | |
79 return stats; | |
80 } | |
81 | |
82 const StacktraceEntry* LookupStackTraceBySize(size_t size) { | |
pasko
2014/06/25 16:39:50
maybe search by size _and_ a function to filter ou
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
Done.
| |
83 const HeapStats* stats = GetHeapProfilerStats(); | |
84 for (size_t i = 0; i < stats->max_stack_traces; ++i) { | |
85 const StacktraceEntry* st = &stats->stack_traces[i]; | |
86 if (st->alloc_bytes == size) | |
87 return st; | |
88 } | |
89 return NULL; | |
90 } | |
91 | |
92 TEST(HeapProfilerIntegrationTest, TestMallocStackTraces) { | |
93 const HeapStats* stats = GetHeapProfilerStats(); | |
94 | |
95 const size_t kSize1 = 1000000; | |
96 const size_t kSize2 = 2000000; | |
97 const size_t kSize3 = 4000000; | |
98 | |
99 void* m1 = MallocOuter(kSize1); | |
100 void* m2 = MallocInner(kSize2); | |
101 void* m3 = MallocInner(kSize3); | |
102 | |
103 ASSERT_TRUE(stats->stack_traces != NULL); | |
104 | |
105 free(m3); | |
106 | |
107 const StacktraceEntry* st1 = LookupStackTraceBySize(kSize1); | |
108 const StacktraceEntry* st2 = LookupStackTraceBySize(kSize2); | |
109 const StacktraceEntry* st3 = LookupStackTraceBySize(kSize3); | |
110 | |
111 EXPECT_TRUE(st1 != NULL); | |
112 EXPECT_TRUE(StackTraceContains(st1, &MallocOuter)); | |
113 EXPECT_TRUE(StackTraceContains(st1, &MallocInner)); | |
114 | |
115 EXPECT_TRUE(st2 != NULL); | |
116 EXPECT_FALSE(StackTraceContains(st2, &MallocOuter)); | |
117 EXPECT_TRUE(StackTraceContains(st2, &MallocInner)); | |
118 | |
119 EXPECT_EQ(NULL, st3); | |
120 | |
121 const size_t total_alloc_start = stats->total_alloc_bytes; | |
122 const size_t num_stack_traces_start = stats->num_stack_traces; | |
123 free(m1); | |
124 free(m2); | |
125 const size_t total_alloc_end = stats->total_alloc_bytes; | |
126 const size_t num_stack_traces_end = stats->num_stack_traces; | |
127 | |
128 EXPECT_EQ(kSize1 + kSize2, total_alloc_start - total_alloc_end); | |
129 EXPECT_EQ(2, num_stack_traces_start - num_stack_traces_end); | |
130 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize1)); | |
131 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize2)); | |
132 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize3)); | |
133 } | |
134 | |
135 TEST(HeapProfilerIntegrationTest, TestMmapStackTraces) { | |
bulach
2014/06/25 11:19:33
nit: I still think it'd be clearer to have one inn
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
Ok, what about now?
| |
136 const HeapStats* stats = GetHeapProfilerStats(); | |
137 | |
138 static const size_t kSize1 = 499 * PAGE_SIZE; | |
139 static const size_t kSize2 = 503 * PAGE_SIZE; | |
140 static const size_t kSize3 = 509 * PAGE_SIZE; | |
141 | |
142 void* m1 = MmapOuter(kSize1); | |
143 void* m2 = MmapInner(kSize2); | |
144 void* m3 = MmapInner(kSize3); | |
145 | |
146 ASSERT_TRUE(stats->stack_traces != NULL); | |
147 | |
148 munmap(m3, kSize3); | |
149 | |
150 const StacktraceEntry* st1 = LookupStackTraceBySize(kSize1); | |
151 const StacktraceEntry* st2 = LookupStackTraceBySize(kSize2); | |
152 const StacktraceEntry* st3 = LookupStackTraceBySize(kSize3); | |
153 | |
154 EXPECT_TRUE(st1 != NULL); | |
155 EXPECT_TRUE(StackTraceContains(st1, &MmapOuter)); | |
156 EXPECT_TRUE(StackTraceContains(st1, &MmapInner)); | |
157 | |
158 EXPECT_TRUE(st2 != NULL); | |
159 EXPECT_FALSE(StackTraceContains(st2, &MmapOuter)); | |
160 EXPECT_TRUE(StackTraceContains(st2, &MmapInner)); | |
161 | |
162 EXPECT_EQ(NULL, st3); | |
163 | |
164 const size_t total_alloc_start = stats->total_alloc_bytes; | |
165 const size_t num_stack_traces_start = stats->num_stack_traces; | |
166 munmap(m1, kSize1); | |
167 munmap(m2, kSize2); | |
168 const size_t total_alloc_end = stats->total_alloc_bytes; | |
169 const size_t num_stack_traces_end = stats->num_stack_traces; | |
170 | |
171 EXPECT_EQ(kSize1 + kSize2, total_alloc_start - total_alloc_end); | |
172 EXPECT_EQ(2, num_stack_traces_start - num_stack_traces_end); | |
173 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize1)); | |
174 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize2)); | |
175 EXPECT_EQ(NULL, LookupStackTraceBySize(kSize3)); | |
176 } | |
177 | |
178 // Returns the path of the directory containing the current executable. | |
179 std::string GetExePath() { | |
180 char buf[1024]; | |
181 ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); | |
182 if (len == -1) | |
183 return std::string(); | |
184 std::string path(buf, len); | |
185 size_t sep = path.find_last_of('/'); | |
186 if (sep == std::string::npos) | |
187 return std::string(); | |
188 path.erase(sep); | |
189 return path; | |
190 } | |
191 | |
192 } // namespace | |
193 | |
194 | |
195 int main(int argc, char** argv) { | |
196 // Re-launch the process itself forcing the preload of the libheap_profiler. | |
197 if (getenv("LD_PRELOAD") == NULL) { | |
198 char env_ld_lib_path[256]; | |
199 strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); | |
200 strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); | |
201 char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; | |
202 char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; | |
203 execve("/proc/self/exe", argv, env); | |
204 // execve() never returns, unless something goes wrong. | |
205 perror("execve"); | |
206 assert(false); | |
207 } | |
208 | |
209 testing::InitGoogleTest(&argc, argv); | |
210 return RUN_ALL_TESTS(); | |
211 } | |
OLD | NEW |