Index: tools/android/heap_profiler/heap_profiler_integrationtest.cc |
diff --git a/tools/android/heap_profiler/heap_profiler_integrationtest.cc b/tools/android/heap_profiler/heap_profiler_integrationtest.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..e3754ebe11db8fa3019bf4df879381b33bcb9722 |
--- /dev/null |
+++ b/tools/android/heap_profiler/heap_profiler_integrationtest.cc |
@@ -0,0 +1,211 @@ |
+// Copyright 2014 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include <dlfcn.h> |
+#include <fcntl.h> |
+#include <stdlib.h> |
+#include <string.h> |
+#include <sys/mman.h> |
+#include <unistd.h> |
+#include <map> |
+ |
+#include "testing/gtest/include/gtest/gtest.h" |
+#include "tools/android/heap_profiler/heap_profiler.h" |
+ |
+ |
+namespace { |
+ |
+// The purpose of the four functions below is to create watermarked allocations, |
+// so the test fixture can ascertain that the hooks work end-to-end. |
+__attribute__ ((noinline)) |
+void* MallocInner(size_t size) { |
+ void* ptr = malloc(size); |
+ // The memset below is to avoid tail-call elimination optimizations and ensure |
+ // that this function will be part of the stack trace. |
pasko
2014/06/25 16:39:50
this trick is not done for DoMmap, is that for som
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
the *trick* is essentially a way to avoid that the
|
+ memset(ptr, 0, size); |
+ return ptr; |
+} |
+ |
+__attribute__ ((noinline)) |
+void* MallocOuter(size_t size) { |
+ void* ptr = MallocInner(size); |
+ memset(ptr, 0, size); |
+ return ptr; |
+} |
+ |
+__attribute__ ((noinline)) |
+void* DoMmap(size_t size) { |
+ return mmap( |
+ 0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); |
+} |
+ |
+__attribute__ ((noinline)) |
+void* MmapInner(size_t size) { |
+ void* ptr = DoMmap(size); |
+ memset(ptr, 0, size); |
+ return ptr; |
+} |
+ |
+__attribute__ ((noinline)) |
+void* MmapOuter(size_t size) { |
+ void* ptr = MmapInner(size); |
+ memset(ptr, 0, size); |
+ return ptr; |
+} |
+ |
+bool StackTraceContains(const StacktraceEntry* s, void* (*fn)(size_t)) { |
+ // kExpectedFnLength is a gross estimation of the size of the Step* functions |
+ // above. It tries to address the following problem: the addrs in the unwound |
+ // stack frames will NOT point to the beginning of the functions, but to the |
+ // PC after the call to malloc/mmap. |
+ const size_t kExpectedFnLength = 16; |
+ const uintptr_t fn_addr = reinterpret_cast<uintptr_t>(fn); |
+ |
+ for (size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { |
+ if (s->frames[i] >= fn_addr && s->frames[i] <= fn_addr + kExpectedFnLength) |
+ return true; |
+ } |
+ return false; |
+} |
+ |
+const HeapStats* GetHeapProfilerStats() { |
+ HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( |
+ dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); |
+ EXPECT_TRUE(stats_ptr != NULL); |
+ const HeapStats* stats = *stats_ptr; |
+ EXPECT_TRUE(stats != NULL); |
+ EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats->magic_start); |
+ return stats; |
+} |
+ |
+const StacktraceEntry* LookupStackTraceBySize(size_t size) { |
pasko
2014/06/25 16:39:50
maybe search by size _and_ a function to filter ou
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
Done.
|
+ const HeapStats* stats = GetHeapProfilerStats(); |
+ for (size_t i = 0; i < stats->max_stack_traces; ++i) { |
+ const StacktraceEntry* st = &stats->stack_traces[i]; |
+ if (st->alloc_bytes == size) |
+ return st; |
+ } |
+ return NULL; |
+} |
+ |
+TEST(HeapProfilerIntegrationTest, TestMallocStackTraces) { |
+ const HeapStats* stats = GetHeapProfilerStats(); |
+ |
+ const size_t kSize1 = 1000000; |
+ const size_t kSize2 = 2000000; |
+ const size_t kSize3 = 4000000; |
+ |
+ void* m1 = MallocOuter(kSize1); |
+ void* m2 = MallocInner(kSize2); |
+ void* m3 = MallocInner(kSize3); |
+ |
+ ASSERT_TRUE(stats->stack_traces != NULL); |
+ |
+ free(m3); |
+ |
+ const StacktraceEntry* st1 = LookupStackTraceBySize(kSize1); |
+ const StacktraceEntry* st2 = LookupStackTraceBySize(kSize2); |
+ const StacktraceEntry* st3 = LookupStackTraceBySize(kSize3); |
+ |
+ EXPECT_TRUE(st1 != NULL); |
+ EXPECT_TRUE(StackTraceContains(st1, &MallocOuter)); |
+ EXPECT_TRUE(StackTraceContains(st1, &MallocInner)); |
+ |
+ EXPECT_TRUE(st2 != NULL); |
+ EXPECT_FALSE(StackTraceContains(st2, &MallocOuter)); |
+ EXPECT_TRUE(StackTraceContains(st2, &MallocInner)); |
+ |
+ EXPECT_EQ(NULL, st3); |
+ |
+ const size_t total_alloc_start = stats->total_alloc_bytes; |
+ const size_t num_stack_traces_start = stats->num_stack_traces; |
+ free(m1); |
+ free(m2); |
+ const size_t total_alloc_end = stats->total_alloc_bytes; |
+ const size_t num_stack_traces_end = stats->num_stack_traces; |
+ |
+ EXPECT_EQ(kSize1 + kSize2, total_alloc_start - total_alloc_end); |
+ EXPECT_EQ(2, num_stack_traces_start - num_stack_traces_end); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize1)); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize2)); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize3)); |
+} |
+ |
+TEST(HeapProfilerIntegrationTest, TestMmapStackTraces) { |
bulach
2014/06/25 11:19:33
nit: I still think it'd be clearer to have one inn
Primiano Tucci (use gerrit)
2014/06/26 09:00:54
Ok, what about now?
|
+ const HeapStats* stats = GetHeapProfilerStats(); |
+ |
+ static const size_t kSize1 = 499 * PAGE_SIZE; |
+ static const size_t kSize2 = 503 * PAGE_SIZE; |
+ static const size_t kSize3 = 509 * PAGE_SIZE; |
+ |
+ void* m1 = MmapOuter(kSize1); |
+ void* m2 = MmapInner(kSize2); |
+ void* m3 = MmapInner(kSize3); |
+ |
+ ASSERT_TRUE(stats->stack_traces != NULL); |
+ |
+ munmap(m3, kSize3); |
+ |
+ const StacktraceEntry* st1 = LookupStackTraceBySize(kSize1); |
+ const StacktraceEntry* st2 = LookupStackTraceBySize(kSize2); |
+ const StacktraceEntry* st3 = LookupStackTraceBySize(kSize3); |
+ |
+ EXPECT_TRUE(st1 != NULL); |
+ EXPECT_TRUE(StackTraceContains(st1, &MmapOuter)); |
+ EXPECT_TRUE(StackTraceContains(st1, &MmapInner)); |
+ |
+ EXPECT_TRUE(st2 != NULL); |
+ EXPECT_FALSE(StackTraceContains(st2, &MmapOuter)); |
+ EXPECT_TRUE(StackTraceContains(st2, &MmapInner)); |
+ |
+ EXPECT_EQ(NULL, st3); |
+ |
+ const size_t total_alloc_start = stats->total_alloc_bytes; |
+ const size_t num_stack_traces_start = stats->num_stack_traces; |
+ munmap(m1, kSize1); |
+ munmap(m2, kSize2); |
+ const size_t total_alloc_end = stats->total_alloc_bytes; |
+ const size_t num_stack_traces_end = stats->num_stack_traces; |
+ |
+ EXPECT_EQ(kSize1 + kSize2, total_alloc_start - total_alloc_end); |
+ EXPECT_EQ(2, num_stack_traces_start - num_stack_traces_end); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize1)); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize2)); |
+ EXPECT_EQ(NULL, LookupStackTraceBySize(kSize3)); |
+} |
+ |
+// Returns the path of the directory containing the current executable. |
+std::string GetExePath() { |
+ char buf[1024]; |
+ ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); |
+ if (len == -1) |
+ return std::string(); |
+ std::string path(buf, len); |
+ size_t sep = path.find_last_of('/'); |
+ if (sep == std::string::npos) |
+ return std::string(); |
+ path.erase(sep); |
+ return path; |
+} |
+ |
+} // namespace |
+ |
+ |
+int main(int argc, char** argv) { |
+ // Re-launch the process itself forcing the preload of the libheap_profiler. |
+ if (getenv("LD_PRELOAD") == NULL) { |
+ char env_ld_lib_path[256]; |
+ strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); |
+ strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); |
+ char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; |
+ char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; |
+ execve("/proc/self/exe", argv, env); |
+ // execve() never returns, unless something goes wrong. |
+ perror("execve"); |
+ assert(false); |
+ } |
+ |
+ testing::InitGoogleTest(&argc, argv); |
+ return RUN_ALL_TESTS(); |
+} |