Index: src/client/linux/microdump_writer/microdump_writer.cc |
diff --git a/src/client/linux/microdump_writer/microdump_writer.cc b/src/client/linux/microdump_writer/microdump_writer.cc |
index d459d9ec441370c2e06e2e2f9a365bedf10e6353..7b1f1a9f2f2cc932192c7028c9ebb4f86d84b0f1 100644 |
--- a/src/client/linux/microdump_writer/microdump_writer.cc |
+++ b/src/client/linux/microdump_writer/microdump_writer.cc |
@@ -34,8 +34,6 @@ |
#include <sys/utsname.h> |
-#include <algorithm> |
- |
#include "client/linux/dump_writer_common/thread_info.h" |
#include "client/linux/dump_writer_common/ucontext_reader.h" |
#include "client/linux/handler/exception_handler.h" |
@@ -44,6 +42,7 @@ |
#include "client/linux/minidump_writer/linux_ptrace_dumper.h" |
#include "common/linux/file_id.h" |
#include "common/linux/linux_libc_support.h" |
+#include "common/memory.h" |
namespace { |
@@ -61,6 +60,44 @@ using google_breakpad::UContextReader; |
const size_t kLineBufferSize = 2048; |
+int Log2Floor(uint64_t n) { |
Primiano Tucci (use gerrit)
2016/05/06 13:58:38
Maybe just add a comment saying:
// From chromium
|
+ if (!n) return -1; |
+ int log = 0; |
+ for (int i = 5; i >= 0; ++i) { |
+ int shift = 1 << i; |
+ if (n >> shift) { |
+ log += shift; |
+ if (!(n >>= shift)) break; |
+ } |
+ } |
+ return log; |
+} |
+ |
+bool MappingsAreAdjacent(const MappingInfo* a, const MappingInfo* b) { |
+ // Because of load biasing, we can end up with a situation where two |
+ // mappings actually overlap. So we will define adjacency to also include a |
+ // b start address that lies within a's address range (including starting |
+ // immediately after a). |
+ // Because load biasing only ever moves the start address backwards, the end |
+ // address should still increase. |
+ return a->start_addr <= b->start_addr && |
+ a->start_addr + a->size >= b->start_addr; |
+} |
+ |
+size_t NextOrderedMapping( |
+ const google_breakpad::wasteful_vector<MappingInfo*>& mappings, |
+ size_t curr) { |
+ size_t best = mappings.size(); |
+ for (size_t next = 0; next < mappings.size(); ++next) { |
+ if (mappings[curr]->start_addr < mappings[next]->start_addr && |
+ (best == mappings.size() || |
+ mappings[next]->start_addr < mappings[best]->start_addr)) { |
+ best = next; |
+ } |
+ } |
+ return best; |
+} |
+ |
class MicrodumpWriter { |
public: |
MicrodumpWriter(const ExceptionHandler::CrashContext* context, |
@@ -98,6 +135,7 @@ class MicrodumpWriter { |
DumpProductInformation(); |
DumpOSInformation(); |
DumpGPUInformation(); |
+ DumpFreeSpace(); |
success = DumpCrashingThread(); |
if (success) |
success = DumpMappings(); |
@@ -128,16 +166,21 @@ class MicrodumpWriter { |
} |
// Stages the hex repr. of the given int type in the current line buffer. |
- template<typename T> |
- void LogAppend(T value) { |
+ enum LeadingZeros { kKeepLeadingZeros, kDiscardLeadingZeros }; |
+ template <typename T> |
+ void LogAppend(T value, const LeadingZeros leading_zeros = |
+ LeadingZeros::kKeepLeadingZeros) { |
// Make enough room to hex encode the largest int type + NUL. |
static const char HEX[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', |
'A', 'B', 'C', 'D', 'E', 'F'}; |
char hexstr[sizeof(T) * 2 + 1]; |
- for (int i = sizeof(T) * 2 - 1; i >= 0; --i, value >>= 4) |
- hexstr[i] = HEX[static_cast<uint8_t>(value) & 0x0F]; |
- hexstr[sizeof(T) * 2] = '\0'; |
- LogAppend(hexstr); |
+ int i = sizeof(T) * 2; |
+ hexstr[i] = '\0'; |
+ do { |
+ hexstr[--i] = HEX[static_cast<uint8_t>(value) & 0x0F]; |
+ value >>= 4; |
+ } while (value || leading_zeros == kKeepLeadingZeros && i > 0); |
+ LogAppend(hexstr + i); |
} |
// Stages the buffer content hex-encoded in the current line buffer. |
@@ -391,6 +434,78 @@ class MicrodumpWriter { |
LogCommitLine(); |
} |
+ bool DumpFreeSpace() { |
+ const google_breakpad::wasteful_vector<MappingInfo*>& mappings = |
+ dumper_->mappings(); |
+ if (mappings.size() == 0) return false; |
+ |
+ // This is complicated by the fact that mappings is not in order. It should |
+ // be mostly in order, however the mapping that contains the entry point for |
+ // the process is always at the front of the vector. |
+ |
+ static const int HBITS = sizeof(size_t) * 8; |
+ unsigned int hole_histogram[HBITS]; |
+ my_memset(hole_histogram, 0, sizeof(hole_histogram)); |
+ |
+ // Find the lowest address mapping. |
+ size_t curr = 0; |
+ for (size_t i = 1; i < mappings.size(); ++i) { |
+ if (mappings[i]->start_addr < mappings[curr]->start_addr) curr = i; |
+ } |
+ |
+ uintptr_t lo_addr = mappings[curr]->start_addr; |
+ |
+ unsigned int hole_cnt = 0; |
+ size_t hole_max = 0; |
+ size_t hole_sum = 0; |
+ |
+ while (true) { |
+ // Skip to the end of an adjacent run of mappings. This is an optimization |
+ // for the fact that mappings is mostly sorted. |
+ while (curr != mappings.size() - 1 && |
+ MappingsAreAdjacent(mappings[curr], mappings[curr + 1])) |
+ ++curr; |
+ |
+ size_t next = NextOrderedMapping(mappings, curr); |
+ if (next == mappings.size()) |
+ break; |
+ |
+ uintptr_t hole_lo = mappings[curr]->start_addr + mappings[curr]->size; |
+ uintptr_t hole_hi = mappings[next]->start_addr; |
+ |
+ if (hole_hi > hole_lo) { |
+ size_t hole_sz = hole_hi - hole_lo; |
+ hole_sum += hole_sz; |
+ hole_max = std::max(hole_sz, hole_max); |
+ ++hole_cnt; |
+ ++hole_histogram[Log2Floor(hole_sz)]; |
+ } |
+ curr = next; |
+ } |
+ |
+ uintptr_t hi_addr = mappings[curr]->start_addr + mappings[curr]->size; |
+ |
+ LogAppend("H "); |
+ LogAppend(lo_addr); |
+ LogAppend(" "); |
+ LogAppend(hi_addr); |
+ LogAppend(" "); |
+ LogAppend(hole_cnt, kDiscardLeadingZeros); |
+ LogAppend(" "); |
+ LogAppend(hole_max, kDiscardLeadingZeros); |
+ LogAppend(" "); |
+ LogAppend(hole_sum, kDiscardLeadingZeros); |
+ for (unsigned int i = 0; i < HBITS; ++i) { |
+ if (!hole_histogram[i]) continue; |
+ LogAppend(" "); |
+ LogAppend(i, kDiscardLeadingZeros); |
+ LogAppend(":"); |
+ LogAppend(hole_histogram[i], kDiscardLeadingZeros); |
+ } |
+ LogCommitLine(); |
+ return true; |
+ } |
+ |
// Write information about the mappings in effect. |
bool DumpMappings() { |
// First write all the mappings from the dumper |