| Index: tools/android/heap_profiler/heap_profiler_integrationtest.cc | 
| diff --git a/tools/android/heap_profiler/heap_profiler_integrationtest.cc b/tools/android/heap_profiler/heap_profiler_integrationtest.cc | 
| new file mode 100644 | 
| index 0000000000000000000000000000000000000000..4952cbc383f32beb2f16eee83734aca8ec135460 | 
| --- /dev/null | 
| +++ b/tools/android/heap_profiler/heap_profiler_integrationtest.cc | 
| @@ -0,0 +1,209 @@ | 
| +// Copyright 2014 The Chromium Authors. All rights reserved. | 
| +// Use of this source code is governed by a BSD-style license that can be | 
| +// found in the LICENSE file. | 
| + | 
| +#include <dlfcn.h> | 
| +#include <fcntl.h> | 
| +#include <stdlib.h> | 
| +#include <string.h> | 
| +#include <sys/mman.h> | 
| +#include <unistd.h> | 
| +#include <map> | 
| + | 
| +#include "testing/gtest/include/gtest/gtest.h" | 
| +#include "tools/android/heap_profiler/heap_profiler.h" | 
| + | 
| + | 
| +namespace { | 
| + | 
| +// The purpose of the four functions below is to create watermarked allocations, | 
| +// so the test fixture can ascertain that the hooks work end-to-end. | 
| +__attribute__ ((noinline)) void* MallocStep2(size_t size) { | 
| +  void* ptr = malloc(size); | 
| +  // The memset below is to avoid tail-call elimination optimizations and ensure | 
| +  // that this function will be part of the stack trace. | 
| +  memset(ptr, 0, size); | 
| +  return ptr; | 
| +} | 
| + | 
| +__attribute__ ((noinline)) void* MallocStep1(size_t size) { | 
| +  void* ptr = MallocStep2(size); | 
| +  memset(ptr, 0, size); | 
| +  return ptr; | 
| +} | 
| + | 
| +__attribute__ ((noinline)) void* DoMmap(size_t size) { | 
| +  return mmap( | 
| +      0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, 0, 0); | 
| +} | 
| + | 
| +__attribute__ ((noinline)) void* MmapStep2(size_t size) { | 
| +  void* ptr = DoMmap(size); | 
| +  memset(ptr, 0, size); | 
| +  return ptr; | 
| +} | 
| + | 
| +__attribute__ ((noinline)) void* MmapStep1(size_t size) { | 
| +  void* ptr = MmapStep2(size); | 
| +  memset(ptr, 0, size); | 
| +  return ptr; | 
| +} | 
| + | 
| +bool StackTraceContains(const uintptr_t* frames, uintptr_t fn_addr) { | 
| +  // expected_fn_length is a gross estimation of the size of the Step* functions | 
| +  // above. It tries to address the following problem: the addrs in the unwound | 
| +  // stack frames will NOT point to the beginning of the functions, but to the | 
| +  // PC after the call to malloc/mmap. | 
| +  const size_t expected_fn_length = 16; | 
| + | 
| +  for(size_t i = 0; i < HEAP_PROFILER_MAX_DEPTH; ++i) { | 
| +    if (frames[i] >= fn_addr && frames[i] <= fn_addr + expected_fn_length) | 
| +      return true; | 
| +  } | 
| +  return false; | 
| +} | 
| + | 
| +const HeapStats* GetHeapProfilerStats() { | 
| +  HeapStats* const* stats_ptr = reinterpret_cast<HeapStats* const*>( | 
| +      dlsym(RTLD_DEFAULT, "heap_profiler_stats_for_tests")); | 
| +  EXPECT_TRUE(stats_ptr != NULL); | 
| +  const HeapStats* stats = *stats_ptr; | 
| +  EXPECT_TRUE(stats != NULL); | 
| +  EXPECT_EQ(HEAP_PROFILER_MAGIC_MARKER, stats->magic_start); | 
| +  return stats; | 
| +} | 
| + | 
| +TEST(HeapProfilerIntegrationTest, TestMallocStackTraces) { | 
| +  const HeapStats* stats = GetHeapProfilerStats(); | 
| + | 
| +  void* m1 = MallocStep1(1000000); | 
| +  void* m2 = MallocStep2(2000000); | 
| +  void* m3 = MallocStep2(4000000); | 
| + | 
| +  bool m1_found = false; | 
| +  bool m2_found = false; | 
| +  bool m3_found = false; | 
| + | 
| +  ASSERT_TRUE(stats->stack_traces != NULL); | 
| + | 
| +  free(m3); | 
| + | 
| +  for(size_t i = 0; i < stats->max_stack_traces; ++i) { | 
| +    const StacktraceEntry* st = &stats->stack_traces[i]; | 
| +    if (st->alloc_bytes == 1000000) { | 
| +      m1_found = true; | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MallocStep1))); | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MallocStep2))); | 
| +    } | 
| +    else if (st->alloc_bytes == 2000000) { | 
| +      m2_found = true; | 
| +      EXPECT_FALSE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MallocStep1))); | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MallocStep2))); | 
| +    } | 
| +    else if (st->alloc_bytes == 4000000) { | 
| +      m3_found = true; | 
| +    } | 
| +  } | 
| + | 
| +  EXPECT_TRUE(m1_found); | 
| +  EXPECT_TRUE(m2_found); | 
| +  EXPECT_FALSE(m3_found); | 
| + | 
| +  const size_t total_alloc_start = stats->total_alloc_bytes; | 
| +  free(m1); | 
| +  free(m2); | 
| +  const size_t total_alloc_end = stats->total_alloc_bytes; | 
| + | 
| +  EXPECT_EQ(3000000, total_alloc_start - total_alloc_end); | 
| +} | 
| + | 
| +TEST(HeapProfilerIntegrationTest, TestMmapStackTraces) { | 
| +  const HeapStats* stats = GetHeapProfilerStats(); | 
| + | 
| +  static const size_t m1_size = 499 * PAGE_SIZE; | 
| +  static const size_t m2_size = 503 * PAGE_SIZE; | 
| +  static const size_t m3_size = 509 * PAGE_SIZE; | 
| + | 
| +  void* m1 = MmapStep1(m1_size); | 
| +  void* m2 = MmapStep2(m2_size); | 
| +  void* m3 = MmapStep2(m3_size); | 
| + | 
| +  bool m1_found = false; | 
| +  bool m2_found = false; | 
| +  bool m3_found = false; | 
| + | 
| +  ASSERT_TRUE(stats->stack_traces != NULL); | 
| + | 
| +  munmap(m3, m3_size); | 
| + | 
| +  for(size_t i = 0; i < stats->max_stack_traces; ++i) { | 
| +    const StacktraceEntry* st = &stats->stack_traces[i]; | 
| +    if (st->alloc_bytes == m1_size) { | 
| +      m1_found = true; | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MmapStep1))); | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MmapStep2))); | 
| +    } | 
| +    else if (st->alloc_bytes == m2_size) { | 
| +      m2_found = true; | 
| +      EXPECT_FALSE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MmapStep1))); | 
| +      EXPECT_TRUE(StackTraceContains( | 
| +          st->frames, reinterpret_cast<uintptr_t>(&MmapStep2))); | 
| +    } | 
| +    else if (st->alloc_bytes == m3_size) { | 
| +      m3_found = true; | 
| +    } | 
| +  } | 
| + | 
| +  EXPECT_TRUE(m1_found); | 
| +  EXPECT_TRUE(m2_found); | 
| +  EXPECT_FALSE(m3_found); | 
| + | 
| +  const size_t total_alloc_start = stats->total_alloc_bytes; | 
| +  munmap(m1, m1_size); | 
| +  munmap(m2, m2_size); | 
| +  const size_t total_alloc_end = stats->total_alloc_bytes; | 
| + | 
| +  EXPECT_EQ(m1_size + m2_size, total_alloc_start - total_alloc_end); | 
| +} | 
| + | 
| +// Returns the path of the directory containing the current executable. | 
| +std::string GetExePath() { | 
| +  char buf[1024]; | 
| +  ssize_t len = readlink("/proc/self/exe", buf, sizeof(buf) - 1); | 
| +  if (len == -1) | 
| +    return std::string(); | 
| +  std::string path(buf, len); | 
| +  size_t sep = path.find_last_of('/'); | 
| +  if (sep == std::string::npos) | 
| +    return std::string(); | 
| +  path.erase(sep); | 
| +  return path; | 
| +} | 
| + | 
| +}  // namespace | 
| + | 
| + | 
| +int main(int argc, char** argv) { | 
| +  // Re-launch the process itself forcing the preload of the libheap_profiler. | 
| +  if (getenv("LD_PRELOAD") == NULL) { | 
| +    char env_ld_lib_path[256]; | 
| +    strlcpy(env_ld_lib_path, "LD_LIBRARY_PATH=", sizeof(env_ld_lib_path)); | 
| +    strlcat(env_ld_lib_path, GetExePath().c_str(), sizeof(env_ld_lib_path)); | 
| +    char env_ld_preload[] = "LD_PRELOAD=libheap_profiler.so"; | 
| +    char* const env[] = {env_ld_preload, env_ld_lib_path, 0}; | 
| +    execve("/proc/self/exe", argv, env); | 
| +    // execve() never returns, unless something goes wrong. | 
| +    perror("execve"); | 
| +    assert(false); | 
| +  } | 
| + | 
| +  testing::InitGoogleTest(&argc, argv); | 
| +  return RUN_ALL_TESTS(); | 
| +} | 
|  |