Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(636)

Side by Side Diff: base/metrics/persistent_histogram_allocator.cc

Issue 1738063002: Refactor histogram_persistence to be a class. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: refactored (again) into PersistentHistogramAllocator class Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/histogram_persistence.h" 5 #include "base/metrics/persistent_histogram_allocator.h"
6 6
7 #include "base/lazy_instance.h" 7 #include "base/lazy_instance.h"
8 #include "base/logging.h" 8 #include "base/logging.h"
9 #include "base/memory/scoped_ptr.h" 9 #include "base/memory/scoped_ptr.h"
10 #include "base/metrics/histogram.h" 10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_base.h" 11 #include "base/metrics/histogram_base.h"
12 #include "base/metrics/histogram_samples.h" 12 #include "base/metrics/histogram_samples.h"
13 #include "base/metrics/statistics_recorder.h" 13 #include "base/metrics/statistics_recorder.h"
14 #include "base/synchronization/lock.h" 14 #include "base/synchronization/lock.h"
15 15
16 namespace base { 16 namespace base {
17 17
18 namespace { 18 namespace {
19 19
20 // Enumerate possible creation results for reporting.
21 enum CreateHistogramResultType {
22 // Everything was fine.
23 CREATE_HISTOGRAM_SUCCESS = 0,
24
25 // Pointer to metadata was not valid.
26 CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
27
28 // Histogram metadata was not valid.
29 CREATE_HISTOGRAM_INVALID_METADATA,
30
31 // Ranges information was not valid.
32 CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
33
34 // Counts information was not valid.
35 CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
36
37 // Could not allocate histogram memory due to corruption.
38 CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
39
40 // Could not allocate histogram memory due to lack of space.
41 CREATE_HISTOGRAM_ALLOCATOR_FULL,
42
43 // Could not allocate histogram memory due to unknown error.
44 CREATE_HISTOGRAM_ALLOCATOR_ERROR,
45
46 // Histogram was of unknown type.
47 CREATE_HISTOGRAM_UNKNOWN_TYPE,
48
49 // Instance has detected a corrupt allocator (recorded only once).
50 CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
51
52 // Always keep this at the end.
53 CREATE_HISTOGRAM_MAX
54 };
55
56 // Name of histogram for storing results of local operations. 20 // Name of histogram for storing results of local operations.
57 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result"; 21 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
58 22
59 // Type identifiers used when storing in persistent memory so they can be 23 // Type identifiers used when storing in persistent memory so they can be
60 // identified during extraction; the first 4 bytes of the SHA1 of the name 24 // identified during extraction; the first 4 bytes of the SHA1 of the name
61 // is used as a unique integer. A "version number" is added to the base 25 // is used as a unique integer. A "version number" is added to the base
62 // so that, if the structure of that object changes, stored older versions 26 // so that, if the structure of that object changes, stored older versions
63 // will be safely ignored. 27 // will be safely ignored.
64 enum : uint32_t { 28 enum : uint32_t {
65 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2 29 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2
66 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 30 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
67 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 31 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
68 }; 32 };
69 33
70 // This data must be held in persistent memory in order for processes to
71 // locate and use histograms created elsewhere. All elements must be of a
72 // fixed width to ensure 32/64-bit interoperability.
73 struct PersistentHistogramData {
74 int32_t histogram_type;
75 int32_t flags;
76 int32_t minimum;
77 int32_t maximum;
78 uint32_t bucket_count;
79 PersistentMemoryAllocator::Reference ranges_ref;
80 uint32_t ranges_checksum;
81 PersistentMemoryAllocator::Reference counts_ref;
82 HistogramSamples::Metadata samples_metadata;
83 HistogramSamples::Metadata logged_metadata;
84
85 // Space for the histogram name will be added during the actual allocation
86 // request. This must be the last field of the structure. A zero-size array
87 // or a "flexible" array would be preferred but is not (yet) valid C++.
88 char name[1];
89 };
90
91 // The object held here will obviously not be destructed at process exit
92 // but that's okay since PersistentMemoryAllocator objects are explicitly
93 // forbidden from doing anything essential at exit anyway due to the fact
94 // that they depend on data managed elsewhere and which could be destructed
95 // first.
96 PersistentMemoryAllocator* g_allocator = nullptr;
97
98 // Take an array of range boundaries and create a proper BucketRanges object 34 // Take an array of range boundaries and create a proper BucketRanges object
99 // which is returned to the caller. A return of nullptr indicates that the 35 // which is returned to the caller. A return of nullptr indicates that the
100 // passed boundaries are invalid. 36 // passed boundaries are invalid.
101 BucketRanges* CreateRangesFromData(HistogramBase::Sample* ranges_data, 37 scoped_ptr<BucketRanges> CreateRangesFromData(
102 uint32_t ranges_checksum, 38 HistogramBase::Sample* ranges_data,
103 size_t count) { 39 uint32_t ranges_checksum,
40 size_t count) {
41 // To avoid racy destruction at shutdown, the following may be leaked.
104 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); 42 scoped_ptr<BucketRanges> ranges(new BucketRanges(count));
105 DCHECK_EQ(count, ranges->size()); 43 DCHECK_EQ(count, ranges->size());
106 for (size_t i = 0; i < count; ++i) { 44 for (size_t i = 0; i < count; ++i) {
107 if (i > 0 && ranges_data[i] <= ranges_data[i - 1]) 45 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
108 return nullptr; 46 return nullptr;
109 ranges->set_range(i, ranges_data[i]); 47 ranges->set_range(i, ranges_data[i]);
110 } 48 }
111 49
112 ranges->ResetChecksum(); 50 ranges->ResetChecksum();
113 if (ranges->checksum() != ranges_checksum) 51 if (ranges->checksum() != ranges_checksum)
114 return nullptr; 52 return nullptr;
115 53
116 return ranges.release(); 54 return ranges;
117 } 55 }
118 56
119 // Calculate the number of bytes required to store all of a histogram's 57 // Calculate the number of bytes required to store all of a histogram's
120 // "counts". This will return zero (0) if |bucket_count| is not valid. 58 // "counts". This will return zero (0) if |bucket_count| is not valid.
121 size_t CalculateRequiredCountsBytes(size_t bucket_count) { 59 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
122 // 2 because each "sample count" also requires a backup "logged count" 60 // 2 because each "sample count" also requires a backup "logged count"
123 // used for calculating the delta during snapshot operations. 61 // used for calculating the delta during snapshot operations.
124 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); 62 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
125 63
126 // If the |bucket_count| is such that it would overflow the return type, 64 // If the |bucket_count| is such that it would overflow the return type,
127 // perhaps as the result of a malicious actor, then return zero to 65 // perhaps as the result of a malicious actor, then return zero to
128 // indicate the problem to the caller. 66 // indicate the problem to the caller.
129 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket) 67 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket)
130 return 0; 68 return 0;
131 69
132 return bucket_count * kBytesPerBucket; 70 return bucket_count * kBytesPerBucket;
133 } 71 }
134 72
135 } // namespace 73 } // namespace
136 74
137 const Feature kPersistentHistogramsFeature{ 75 const Feature kPersistentHistogramsFeature{
138 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT 76 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
139 }; 77 };
140 78
141 // Get the histogram in which create results are stored. This is copied almost 79 // This data will be held in persistent memory in order for processes to
142 // exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with added code 80 // locate and use histograms created elsewhere.
143 // to prevent recursion (a likely occurance because the creation of a new 81 struct PersistentHistogramAllocator::PersistentHistogramData {
144 // histogram can end up calling this.) 82 int32_t histogram_type;
145 HistogramBase* GetCreateHistogramResultHistogram() { 83 int32_t flags;
146 static base::subtle::AtomicWord atomic_histogram_pointer = 0; 84 int32_t minimum;
147 HistogramBase* histogram_pointer( 85 int32_t maximum;
148 reinterpret_cast<HistogramBase*>( 86 uint32_t bucket_count;
149 base::subtle::Acquire_Load(&atomic_histogram_pointer))); 87 PersistentMemoryAllocator::Reference ranges_ref;
150 if (!histogram_pointer) { 88 uint32_t ranges_checksum;
151 // It's possible for multiple threads to make it here in parallel but 89 PersistentMemoryAllocator::Reference counts_ref;
152 // they'll always return the same result as there is a mutex in the Get. 90 HistogramSamples::Metadata samples_metadata;
153 // The purpose of the "initialized" variable is just to ensure that 91 HistogramSamples::Metadata logged_metadata;
154 // the same thread doesn't recurse which is also why it doesn't have
155 // to be atomic.
156 static bool initialized = false;
157 if (!initialized) {
158 initialized = true;
159 if (g_allocator) {
160 DLOG(WARNING) << "Creating the results-histogram inside persistent"
161 << " memory can cause future allocations to crash if"
162 << " that memory is ever released (for testing).";
163 }
164 92
165 histogram_pointer = LinearHistogram::FactoryGet( 93 // Space for the histogram name will be added during the actual allocation
166 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1, 94 // request. This must be the last field of the structure. A zero-size array
167 HistogramBase::kUmaTargetedHistogramFlag); 95 // or a "flexible" array would be preferred but is not (yet) valid C++.
168 base::subtle::Release_Store( 96 char name[1];
169 &atomic_histogram_pointer, 97 };
170 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); 98
171 } 99 // The object held here will obviously not be destructed at process exit
100 // but that's best since PersistentMemoryAllocator objects (that underlie
101 // PersistentHistogramAllocator objects) are explicitly forbidden from doing
102 // anything essential at exit anyway due to the fact that they depend on data
103 // managed elsewhere and which could be destructed first.
104 PersistentHistogramAllocator* PersistentHistogramAllocator::g_allocator_;
Alexei Svitkine (slow) 2016/03/03 18:11:28 Can this be local to the file? (If not, the comme
bcwhite 2016/03/04 21:17:16 Done. I wasn't sure which was the best style for
105
106 PersistentHistogramAllocator::PersistentHistogramAllocator(
107 scoped_ptr<PersistentMemoryAllocator> memory)
108 : memory_allocator_(std::move(memory)) {}
109
110 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
111
112 scoped_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
113 Reference ref) {
114 // Unfortunately, the histogram "pickle" methods cannot be used as part of
115 // the persistance because the deserialization methods always create local
116 // count data (while these must reference the persistent counts) and always
117 // add it to the local list of known histograms (while these may be simple
118 // references to histograms in other processes).
119 PersistentHistogramData* histogram_data =
120 memory_allocator_->GetAsObject<PersistentHistogramData>(
121 ref, kTypeIdHistogram);
122 size_t length = memory_allocator_->GetAllocSize(ref);
123 if (!histogram_data ||
124 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
125 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
126 NOTREACHED();
127 return nullptr;
172 } 128 }
173 return histogram_pointer; 129 return CreateHistogram(histogram_data);
174 } 130 }
175 131
176 // Record the result of a histogram creation. 132 void PersistentHistogramAllocator::CreateIterator(Iterator* iter) {
177 void RecordCreateHistogramResult(CreateHistogramResultType result) { 133 memory_allocator_->CreateIterator(&iter->memory_iter);
178 HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
179 if (result_histogram)
180 result_histogram->Add(result);
181 } 134 }
182 135
183 void SetPersistentHistogramMemoryAllocator( 136 scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
184 PersistentMemoryAllocator* allocator) { 137 HistogramType histogram_type,
138 const std::string& name,
139 int minimum,
140 int maximum,
141 const BucketRanges* bucket_ranges,
142 int32_t flags,
143 Reference* ref_ptr) {
144 // If the allocator is corrupt, don't waste time trying anything else.
145 // This also allows differentiating on the dashboard between allocations
146 // failed due to a corrupt allocator and the number of process instances
147 // with one, the latter being idicated by "newly corrupt", below.
148 if (memory_allocator_->IsCorrupt()) {
149 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
150 return nullptr;
151 }
152
153 // If CalculateRequiredCountsBytes() returns zero then the bucket_count
154 // was not valid.
155 size_t bucket_count = bucket_ranges->bucket_count();
156 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
157 if (!counts_bytes) {
158 NOTREACHED();
159 return nullptr;
160 }
161
162 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
163 PersistentMemoryAllocator::Reference ranges_ref =
164 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
165 PersistentMemoryAllocator::Reference counts_ref =
166 memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
167 PersistentMemoryAllocator::Reference histogram_ref =
168 memory_allocator_->Allocate(
169 offsetof(PersistentHistogramData, name) + name.length() + 1,
170 kTypeIdHistogram);
171 HistogramBase::Sample* ranges_data =
172 memory_allocator_->GetAsObject<HistogramBase::Sample>(ranges_ref,
173 kTypeIdRangesArray);
174 PersistentHistogramData* histogram_data =
175 memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
176 kTypeIdHistogram);
177
178 // Only continue here if all allocations were successful. If they weren't,
179 // there is no way to free the space but that's not really a problem since
180 // the allocations only fail because the space is full or corrupt and so
181 // any future attempts will also fail.
182 if (counts_ref && ranges_data && histogram_data) {
183 strcpy(histogram_data->name, name.c_str());
184 for (size_t i = 0; i < bucket_ranges->size(); ++i)
185 ranges_data[i] = bucket_ranges->range(i);
186
187 histogram_data->histogram_type = histogram_type;
188 histogram_data->flags = flags;
189 histogram_data->minimum = minimum;
190 histogram_data->maximum = maximum;
191 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
192 histogram_data->ranges_ref = ranges_ref;
193 histogram_data->ranges_checksum = bucket_ranges->checksum();
194 histogram_data->counts_ref = counts_ref;
195
196 // Create the histogram using resources in persistent memory. This ends up
197 // resolving the "ref" values stored in histogram_data instad of just
198 // using what is already known above but avoids duplicating the switch
199 // statement here and serves as a double-check that everything is
200 // correct before commiting the new histogram to persistent space.
201 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
202 DCHECK(histogram);
203 if (ref_ptr != nullptr)
204 *ref_ptr = histogram_ref;
205 subtle::NoBarrier_Store(&last_created_, histogram_ref);
206 return histogram;
207 }
208
209 CreateHistogramResultType result;
210 if (memory_allocator_->IsCorrupt()) {
211 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
212 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
213 } else if (memory_allocator_->IsFull()) {
214 result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
215 } else {
216 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
217 }
218 RecordCreateHistogramResult(result);
219 NOTREACHED() << "error=" << result;
220
221 return nullptr;
222 }
223
224 void PersistentHistogramAllocator::FinalizeHistogram(
225 Reference ref,
226 bool registered) {
227 // If the created persistent histogram was registered then it needs to
228 // be marked as "iterable" in order to be found by other processes.
229 if (registered)
230 memory_allocator_->MakeIterable(ref);
231 // If it wasn't registered then a race condition must have caused
232 // two to be created. The allocator does not support releasing the
233 // acquired memory so just change the type to be empty.
234 else
235 memory_allocator_->SetType(ref, 0);
236 }
237
238 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
239 memory_allocator_->CreateTrackingHistograms(name);
240 }
241
242 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
243 memory_allocator_->UpdateTrackingHistograms();
244 }
245
246 // static
247 void PersistentHistogramAllocator::SetGlobalAllocator(
248 scoped_ptr<PersistentHistogramAllocator> allocator) {
185 // Releasing or changing an allocator is extremely dangerous because it 249 // Releasing or changing an allocator is extremely dangerous because it
186 // likely has histograms stored within it. If the backing memory is also 250 // likely has histograms stored within it. If the backing memory is also
187 // also released, future accesses to those histograms will seg-fault. 251 // also released, future accesses to those histograms will seg-fault.
188 CHECK(!g_allocator); 252 CHECK(!g_allocator_);
189 g_allocator = allocator; 253 g_allocator_ = allocator.release();
190 } 254 }
191 255
192 PersistentMemoryAllocator* GetPersistentHistogramMemoryAllocator() { 256 // static
193 return g_allocator; 257 PersistentHistogramAllocator*
258 PersistentHistogramAllocator::GetGlobalAllocator() {
259 return g_allocator_;
194 } 260 }
195 261
196 PersistentMemoryAllocator* 262 // static
197 ReleasePersistentHistogramMemoryAllocatorForTesting() { 263 scoped_ptr<PersistentHistogramAllocator>
198 PersistentMemoryAllocator* allocator = g_allocator; 264 PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() {
199 if (!allocator) 265 PersistentHistogramAllocator* histogram_allocator = g_allocator_;
266 if (!histogram_allocator)
200 return nullptr; 267 return nullptr;
268 PersistentMemoryAllocator* memory_allocator =
269 histogram_allocator->memory_allocator();
201 270
202 // Before releasing the memory, it's necessary to have the Statistics- 271 // Before releasing the memory, it's necessary to have the Statistics-
203 // Recorder forget about the histograms contained therein; otherwise, 272 // Recorder forget about the histograms contained therein; otherwise,
204 // some operations will try to access them and the released memory. 273 // some operations will try to access them and the released memory.
205 PersistentMemoryAllocator::Iterator iter; 274 PersistentMemoryAllocator::Iterator iter;
206 PersistentMemoryAllocator::Reference ref; 275 PersistentMemoryAllocator::Reference ref;
207 uint32_t type_id; 276 uint32_t type_id;
208 allocator->CreateIterator(&iter); 277 memory_allocator->CreateIterator(&iter);
209 while ((ref = allocator->GetNextIterable(&iter, &type_id)) != 0) { 278 while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) {
210 if (type_id == kTypeIdHistogram) { 279 if (type_id == kTypeIdHistogram) {
211 PersistentHistogramData* histogram_data = 280 PersistentHistogramData* histogram_data =
212 allocator->GetAsObject<PersistentHistogramData>( 281 memory_allocator->GetAsObject<PersistentHistogramData>(
213 ref, kTypeIdHistogram); 282 ref, kTypeIdHistogram);
214 DCHECK(histogram_data); 283 DCHECK(histogram_data);
215 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name); 284 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
216 285
217 // If a test breaks here then a memory region containing a histogram 286 // If a test breaks here then a memory region containing a histogram
218 // actively used by this code is being released back to the test. 287 // actively used by this code is being released back to the test.
219 // If that memory segment were to be deleted, future calls to create 288 // If that memory segment were to be deleted, future calls to create
220 // persistent histograms would crash. To avoid this, have the test call 289 // persistent histograms would crash. To avoid this, have the test call
221 // the method GetCreateHistogramResultHistogram() *before* setting the 290 // the method GetCreateHistogramResultHistogram() *before* setting
222 // (temporary) memory allocator via SetPersistentMemoryAllocator() so 291 // the (temporary) memory allocator via SetGlobalAllocator() so that
223 // that the histogram is instead allocated from the process heap. 292 // histogram is instead allocated from the process heap.
224 DCHECK_NE(kResultHistogram, histogram_data->name); 293 DCHECK_NE(kResultHistogram, histogram_data->name);
225 } 294 }
226 } 295 }
227 296
228 g_allocator = nullptr; 297 g_allocator_ = nullptr;
229 return allocator; 298 return make_scoped_ptr(histogram_allocator);
230 }; 299 };
231 300
232 HistogramBase* CreatePersistentHistogram( 301 // static
233 PersistentMemoryAllocator* allocator, 302 void PersistentHistogramAllocator::ImportGlobalHistograms() {
303 // The lock protects against concurrent access to the iterator and is created
304 // in a thread-safe manner when needed.
305 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;
306
307 if (g_allocator_) {
308 // TODO(bcwhite): Investigate a lock-free, thread-safe iterator.
309 base::AutoLock auto_lock(lock.Get());
310
311 // Each call resumes from where it last left off so need persistant
Alexei Svitkine (slow) 2016/03/03 18:11:28 Nit: Grammar.
bcwhite 2016/03/04 21:17:16 Done.
312 // the iterator. This class has a constructor so even the definition
313 // has to be protected by the lock in order to be thread-safe.
314 static Iterator iter;
315 if (iter.is_clear())
316 g_allocator_->CreateIterator(&iter);
317
318 // Skip the import if it's the histogram that was last created. Should a
319 // race condition cause the "last created" to be overwritten before it
320 // is recognized here then the histogram will be created and be ignored
321 // when it is detected as a duplicate by the statistics-recorder. This
322 // simple check reduces the time of creating persistent histograms by
323 // about 40%.
324 Reference last_created =
325 subtle::NoBarrier_Load(&g_allocator_->last_created_);
326
327 while (true) {
328 scoped_ptr<HistogramBase> histogram =
329 g_allocator_->GetNextHistogramWithIgnore(&iter, last_created);
330 if (!histogram)
331 break;
332 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
333 }
334 }
335 }
336
337 // static
338 HistogramBase*
339 PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
340 // Get the histogram in which create-results are stored. This is copied
341 // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
342 // added code to prevent recursion (a likely occurance because the creation
343 // of a new a histogram can end up calling this.)
344 static base::subtle::AtomicWord atomic_histogram_pointer = 0;
345 HistogramBase* histogram_pointer =
346 reinterpret_cast<HistogramBase*>(
347 base::subtle::Acquire_Load(&atomic_histogram_pointer));
348 if (!histogram_pointer) {
349 // It's possible for multiple threads to make it here in parallel but
350 // they'll always return the same result as there is a mutex in the Get.
351 // The purpose of the "initialized" variable is just to ensure that
352 // the same thread doesn't recurse which is also why it doesn't have
353 // to be atomic.
354 static bool initialized = false;
355 if (!initialized) {
356 initialized = true;
357 if (g_allocator_) {
358 DLOG(WARNING) << "Creating the results-histogram inside persistent"
359 << " memory can cause future allocations to crash if"
360 << " that memory is ever released (for testing).";
361 }
362
363 histogram_pointer = LinearHistogram::FactoryGet(
364 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
365 HistogramBase::kUmaTargetedHistogramFlag);
366 base::subtle::Release_Store(
367 &atomic_histogram_pointer,
368 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
369 }
370 }
371 return histogram_pointer;
372 }
373
374 scoped_ptr<HistogramBase>
375 PersistentHistogramAllocator::GetNextHistogramWithIgnore(
376 Iterator* iter,
377 Reference ignore) {
378 PersistentMemoryAllocator::Reference ref;
379 uint32_t type_id;
380 while ((ref = memory_allocator_->GetNextIterable(&iter->memory_iter,
381 &type_id)) != 0) {
382 if (ref == ignore)
383 continue;
384 if (type_id == kTypeIdHistogram)
385 return GetHistogram(ref);
386 }
387 return nullptr;
388 }
389
390 // static
391 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
234 PersistentHistogramData* histogram_data_ptr) { 392 PersistentHistogramData* histogram_data_ptr) {
235 if (!histogram_data_ptr) { 393 if (!histogram_data_ptr) {
236 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); 394 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
237 NOTREACHED(); 395 NOTREACHED();
238 return nullptr; 396 return nullptr;
239 } 397 }
240 398
241 // Copy the histogram_data to local storage because anything in persistent 399 // Copy the histogram_data to local storage because anything in persistent
242 // memory cannot be trusted as it could be changed at any moment by a 400 // memory cannot be trusted as it could be changed at any moment by a
243 // malicious actor that shares access. The contents of histogram_data are 401 // malicious actor that shares access. The contents of histogram_data are
244 // validated below; the local copy is to ensure that the contents cannot 402 // validated below; the local copy is to ensure that the contents cannot
245 // be externally changed between validation and use. 403 // be externally changed between validation and use.
246 PersistentHistogramData histogram_data = *histogram_data_ptr; 404 PersistentHistogramData histogram_data = *histogram_data_ptr;
247 405
248 HistogramBase::Sample* ranges_data = 406 HistogramBase::Sample* ranges_data =
249 allocator->GetAsObject<HistogramBase::Sample>(histogram_data.ranges_ref, 407 memory_allocator_->GetAsObject<HistogramBase::Sample>(
250 kTypeIdRangesArray); 408 histogram_data.ranges_ref, kTypeIdRangesArray);
251 if (!ranges_data || histogram_data.bucket_count < 2 || 409 if (!ranges_data || histogram_data.bucket_count < 2 ||
252 histogram_data.bucket_count + 1 > 410 histogram_data.bucket_count + 1 > std::numeric_limits<uint32_t>::max() /
253 std::numeric_limits<uint32_t>::max() / 411 sizeof(HistogramBase::Sample) ||
254 sizeof(HistogramBase::Sample) || 412 memory_allocator_->GetAllocSize(histogram_data.ranges_ref) <
255 allocator->GetAllocSize(histogram_data.ranges_ref) <
256 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample)) { 413 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample)) {
257 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); 414 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
258 NOTREACHED(); 415 NOTREACHED();
259 return nullptr; 416 return nullptr;
260 } 417 }
261 // To avoid racy destruction at shutdown, the following will be leaked. 418
262 const BucketRanges* ranges = CreateRangesFromData( 419 scoped_ptr<const BucketRanges> created_ranges =
263 ranges_data, 420 CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
264 histogram_data.ranges_checksum, 421 histogram_data.bucket_count + 1);
265 histogram_data.bucket_count + 1); 422 if (!created_ranges) {
266 if (!ranges) {
267 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); 423 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
268 NOTREACHED(); 424 NOTREACHED();
269 return nullptr; 425 return nullptr;
270 } 426 }
271 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges); 427 const BucketRanges* ranges =
428 StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
429 created_ranges.release());
272 430
273 HistogramBase::AtomicCount* counts_data = 431 HistogramBase::AtomicCount* counts_data =
274 allocator->GetAsObject<HistogramBase::AtomicCount>( 432 memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
275 histogram_data.counts_ref, kTypeIdCountsArray); 433 histogram_data.counts_ref, kTypeIdCountsArray);
276 size_t counts_bytes = 434 size_t counts_bytes =
277 CalculateRequiredCountsBytes(histogram_data.bucket_count); 435 CalculateRequiredCountsBytes(histogram_data.bucket_count);
278 if (!counts_data || !counts_bytes || 436 if (!counts_data || !counts_bytes ||
279 allocator->GetAllocSize(histogram_data.counts_ref) < counts_bytes) { 437 memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
438 counts_bytes) {
280 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); 439 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
281 NOTREACHED(); 440 NOTREACHED();
282 return nullptr; 441 return nullptr;
283 } 442 }
284 443
285 // After the main "counts" array is a second array using for storing what 444 // After the main "counts" array is a second array using for storing what
286 // was previously logged. This is used to calculate the "delta" during 445 // was previously logged. This is used to calculate the "delta" during
287 // snapshot operations. 446 // snapshot operations.
288 HistogramBase::AtomicCount* logged_data = 447 HistogramBase::AtomicCount* logged_data =
289 counts_data + histogram_data.bucket_count; 448 counts_data + histogram_data.bucket_count;
290 449
291 std::string name(histogram_data_ptr->name); 450 std::string name(histogram_data_ptr->name);
292 HistogramBase* histogram = nullptr; 451 scoped_ptr<HistogramBase> histogram;
293 switch (histogram_data.histogram_type) { 452 switch (histogram_data.histogram_type) {
294 case HISTOGRAM: 453 case HISTOGRAM:
295 histogram = Histogram::PersistentGet( 454 histogram = Histogram::PersistentCreate(
296 name, 455 name, histogram_data.minimum, histogram_data.maximum, ranges,
297 histogram_data.minimum, 456 counts_data, logged_data, histogram_data.bucket_count,
298 histogram_data.maximum,
299 ranges,
300 counts_data,
301 logged_data,
302 histogram_data.bucket_count,
303 &histogram_data_ptr->samples_metadata, 457 &histogram_data_ptr->samples_metadata,
304 &histogram_data_ptr->logged_metadata); 458 &histogram_data_ptr->logged_metadata);
305 DCHECK(histogram); 459 DCHECK(histogram);
306 break; 460 break;
307 case LINEAR_HISTOGRAM: 461 case LINEAR_HISTOGRAM:
308 histogram = LinearHistogram::PersistentGet( 462 histogram = LinearHistogram::PersistentCreate(
309 name, 463 name, histogram_data.minimum, histogram_data.maximum, ranges,
310 histogram_data.minimum, 464 counts_data, logged_data, histogram_data.bucket_count,
311 histogram_data.maximum,
312 ranges,
313 counts_data,
314 logged_data,
315 histogram_data.bucket_count,
316 &histogram_data_ptr->samples_metadata, 465 &histogram_data_ptr->samples_metadata,
317 &histogram_data_ptr->logged_metadata); 466 &histogram_data_ptr->logged_metadata);
318 DCHECK(histogram); 467 DCHECK(histogram);
319 break; 468 break;
320 case BOOLEAN_HISTOGRAM: 469 case BOOLEAN_HISTOGRAM:
321 histogram = BooleanHistogram::PersistentGet( 470 histogram = BooleanHistogram::PersistentCreate(
322 name, 471 name, ranges, counts_data, logged_data,
323 ranges,
324 counts_data,
325 logged_data,
326 &histogram_data_ptr->samples_metadata, 472 &histogram_data_ptr->samples_metadata,
327 &histogram_data_ptr->logged_metadata); 473 &histogram_data_ptr->logged_metadata);
328 DCHECK(histogram); 474 DCHECK(histogram);
329 break; 475 break;
330 case CUSTOM_HISTOGRAM: 476 case CUSTOM_HISTOGRAM:
331 histogram = CustomHistogram::PersistentGet( 477 histogram = CustomHistogram::PersistentCreate(
332 name, 478 name, ranges, counts_data, logged_data, histogram_data.bucket_count,
333 ranges,
334 counts_data,
335 logged_data,
336 histogram_data.bucket_count,
337 &histogram_data_ptr->samples_metadata, 479 &histogram_data_ptr->samples_metadata,
338 &histogram_data_ptr->logged_metadata); 480 &histogram_data_ptr->logged_metadata);
339 DCHECK(histogram); 481 DCHECK(histogram);
340 break; 482 break;
341 default: 483 default:
342 NOTREACHED(); 484 NOTREACHED();
343 } 485 }
344 486
345 if (histogram) { 487 if (histogram) {
346 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType()); 488 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
347 histogram->SetFlags(histogram_data.flags); 489 histogram->SetFlags(histogram_data.flags);
348 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); 490 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
349 } else { 491 } else {
350 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE); 492 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
351 } 493 }
352 494
353 return histogram; 495 return histogram;
354 } 496 }
355 497
356 HistogramBase* GetPersistentHistogram( 498 // static
357 PersistentMemoryAllocator* allocator, 499 void PersistentHistogramAllocator::RecordCreateHistogramResult(
358 int32_t ref) { 500 CreateHistogramResultType result) {
359 // Unfortunately, the above "pickle" methods cannot be used as part of the 501 HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
360 // persistance because the deserialization methods always create local 502 if (result_histogram)
361 // count data (these must referenced the persistent counts) and always add 503 result_histogram->Add(result);
362 // it to the local list of known histograms (these may be simple references
363 // to histograms in other processes).
364 PersistentHistogramData* histogram_data =
365 allocator->GetAsObject<PersistentHistogramData>(ref, kTypeIdHistogram);
366 size_t length = allocator->GetAllocSize(ref);
367 if (!histogram_data ||
368 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
369 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
370 NOTREACHED();
371 return nullptr;
372 }
373 return CreatePersistentHistogram(allocator, histogram_data);
374 }
375
376 HistogramBase* GetNextPersistentHistogram(
377 PersistentMemoryAllocator* allocator,
378 PersistentMemoryAllocator::Iterator* iter) {
379 PersistentMemoryAllocator::Reference ref;
380 uint32_t type_id;
381 while ((ref = allocator->GetNextIterable(iter, &type_id)) != 0) {
382 if (type_id == kTypeIdHistogram)
383 return GetPersistentHistogram(allocator, ref);
384 }
385 return nullptr;
386 }
387
388 void FinalizePersistentHistogram(PersistentMemoryAllocator::Reference ref,
389 bool registered) {
390 // If the created persistent histogram was registered then it needs to
391 // be marked as "iterable" in order to be found by other processes.
392 if (registered)
393 GetPersistentHistogramMemoryAllocator()->MakeIterable(ref);
394 // If it wasn't registered then a race condition must have caused
395 // two to be created. The allocator does not support releasing the
396 // acquired memory so just change the type to be empty.
397 else
398 GetPersistentHistogramMemoryAllocator()->SetType(ref, 0);
399 }
400
401 HistogramBase* AllocatePersistentHistogram(
402 PersistentMemoryAllocator* allocator,
403 HistogramType histogram_type,
404 const std::string& name,
405 int minimum,
406 int maximum,
407 const BucketRanges* bucket_ranges,
408 int32_t flags,
409 PersistentMemoryAllocator::Reference* ref_ptr) {
410 if (!allocator)
411 return nullptr;
412
413 // If the allocator is corrupt, don't waste time trying anything else.
414 // This also allows differentiating on the dashboard between allocations
415 // failed due to a corrupt allocator and the number of process instances
416 // with one, the latter being idicated by "newly corrupt", below.
417 if (allocator->IsCorrupt()) {
418 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
419 return nullptr;
420 }
421
422 // If CalculateRequiredCountsBytes() returns zero then the bucket_count
423 // was not valid.
424 size_t bucket_count = bucket_ranges->bucket_count();
425 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
426 if (!counts_bytes) {
427 NOTREACHED();
428 return nullptr;
429 }
430
431 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
432 PersistentMemoryAllocator::Reference ranges_ref =
433 allocator->Allocate(ranges_bytes, kTypeIdRangesArray);
434 PersistentMemoryAllocator::Reference counts_ref =
435 allocator->Allocate(counts_bytes, kTypeIdCountsArray);
436 PersistentMemoryAllocator::Reference histogram_ref =
437 allocator->Allocate(offsetof(PersistentHistogramData, name) +
438 name.length() + 1, kTypeIdHistogram);
439 HistogramBase::Sample* ranges_data =
440 allocator->GetAsObject<HistogramBase::Sample>(ranges_ref,
441 kTypeIdRangesArray);
442 PersistentHistogramData* histogram_data =
443 allocator->GetAsObject<PersistentHistogramData>(histogram_ref,
444 kTypeIdHistogram);
445
446 // Only continue here if all allocations were successful. If they weren't
447 // there is no way to free the space but that's not really a problem since
448 // the allocations only fail because the space is full and so any future
449 // attempts will also fail.
450 if (counts_ref && ranges_data && histogram_data) {
451 strcpy(histogram_data->name, name.c_str());
452 for (size_t i = 0; i < bucket_ranges->size(); ++i)
453 ranges_data[i] = bucket_ranges->range(i);
454
455 histogram_data->histogram_type = histogram_type;
456 histogram_data->flags = flags;
457 histogram_data->minimum = minimum;
458 histogram_data->maximum = maximum;
459 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
460 histogram_data->ranges_ref = ranges_ref;
461 histogram_data->ranges_checksum = bucket_ranges->checksum();
462 histogram_data->counts_ref = counts_ref;
463
464 // Create the histogram using resources in persistent memory. This ends up
465 // resolving the "ref" values stored in histogram_data instad of just
466 // using what is already known above but avoids duplicating the switch
467 // statement here and serves as a double-check that everything is
468 // correct before commiting the new histogram to persistent space.
469 HistogramBase* histogram =
470 CreatePersistentHistogram(allocator, histogram_data);
471 DCHECK(histogram);
472 if (ref_ptr != nullptr)
473 *ref_ptr = histogram_ref;
474 return histogram;
475 }
476
477 CreateHistogramResultType result;
478 if (allocator->IsCorrupt()) {
479 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
480 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
481 } else if (allocator->IsFull()) {
482 result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
483 } else {
484 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
485 }
486 RecordCreateHistogramResult(result);
487 NOTREACHED() << "error=" << result;
488
489 return nullptr;
490 }
491
492 void ImportPersistentHistograms() {
493 // The lock protects against concurrent access to the iterator and is created
494 // in a thread-safe manner when needed.
495 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;
496
497 if (g_allocator) {
498 base::AutoLock auto_lock(lock.Get());
499
500 // Each call resumes from where it last left off so need persistant
501 // iterator. This class has a constructor so even the definition has
502 // to be protected by the lock in order to be thread-safe.
503 static PersistentMemoryAllocator::Iterator iter;
504 if (iter.is_clear())
505 g_allocator->CreateIterator(&iter);
506
507 while (true) {
508 HistogramBase* histogram = GetNextPersistentHistogram(g_allocator, &iter);
509 if (!histogram)
510 break;
511 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram);
512 }
513 }
514 } 504 }
515 505
516 } // namespace base 506 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698