Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(967)

Side by Side Diff: base/metrics/persistent_histogram_allocator.cc

Issue 1738063002: Refactor histogram_persistence to be a class. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: move some parameters up a line Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/histogram_persistence.h" 5 #include "base/metrics/persistent_histogram_allocator.h"
6 6
7 #include "base/lazy_instance.h" 7 #include "base/lazy_instance.h"
8 #include "base/logging.h" 8 #include "base/logging.h"
9 #include "base/memory/scoped_ptr.h" 9 #include "base/memory/scoped_ptr.h"
10 #include "base/metrics/histogram.h" 10 #include "base/metrics/histogram.h"
11 #include "base/metrics/histogram_base.h" 11 #include "base/metrics/histogram_base.h"
12 #include "base/metrics/histogram_samples.h" 12 #include "base/metrics/histogram_samples.h"
13 #include "base/metrics/statistics_recorder.h" 13 #include "base/metrics/statistics_recorder.h"
14 #include "base/synchronization/lock.h" 14 #include "base/synchronization/lock.h"
15 15
16 // TODO(bcwhite): Order these methods to match the header file. The current
17 // order is only temporary in order to aid review of the transition from
18 // a non-class implementation.
19
16 namespace base { 20 namespace base {
17 21
18 namespace { 22 namespace {
19 23
20 // Enumerate possible creation results for reporting.
21 enum CreateHistogramResultType {
22 // Everything was fine.
23 CREATE_HISTOGRAM_SUCCESS = 0,
24
25 // Pointer to metadata was not valid.
26 CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
27
28 // Histogram metadata was not valid.
29 CREATE_HISTOGRAM_INVALID_METADATA,
30
31 // Ranges information was not valid.
32 CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
33
34 // Counts information was not valid.
35 CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
36
37 // Could not allocate histogram memory due to corruption.
38 CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
39
40 // Could not allocate histogram memory due to lack of space.
41 CREATE_HISTOGRAM_ALLOCATOR_FULL,
42
43 // Could not allocate histogram memory due to unknown error.
44 CREATE_HISTOGRAM_ALLOCATOR_ERROR,
45
46 // Histogram was of unknown type.
47 CREATE_HISTOGRAM_UNKNOWN_TYPE,
48
49 // Instance has detected a corrupt allocator (recorded only once).
50 CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
51
52 // Always keep this at the end.
53 CREATE_HISTOGRAM_MAX
54 };
55
56 // Name of histogram for storing results of local operations. 24 // Name of histogram for storing results of local operations.
57 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result"; 25 const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
58 26
59 // Type identifiers used when storing in persistent memory so they can be 27 // Type identifiers used when storing in persistent memory so they can be
60 // identified during extraction; the first 4 bytes of the SHA1 of the name 28 // identified during extraction; the first 4 bytes of the SHA1 of the name
61 // is used as a unique integer. A "version number" is added to the base 29 // is used as a unique integer. A "version number" is added to the base
62 // so that, if the structure of that object changes, stored older versions 30 // so that, if the structure of that object changes, stored older versions
63 // will be safely ignored. 31 // will be safely ignored.
64 enum : uint32_t { 32 enum : uint32_t {
65 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2 33 kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2
66 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1 34 kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
67 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1 35 kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
68 }; 36 };
69 37
70 // This data must be held in persistent memory in order for processes to 38 // The current globally-active persistent allocator for all new histograms.
71 // locate and use histograms created elsewhere. All elements must be of a
72 // fixed width to ensure 32/64-bit interoperability.
73 struct PersistentHistogramData {
74 int32_t histogram_type;
75 int32_t flags;
76 int32_t minimum;
77 int32_t maximum;
78 uint32_t bucket_count;
79 PersistentMemoryAllocator::Reference ranges_ref;
80 uint32_t ranges_checksum;
81 PersistentMemoryAllocator::Reference counts_ref;
82 HistogramSamples::Metadata samples_metadata;
83 HistogramSamples::Metadata logged_metadata;
84
85 // Space for the histogram name will be added during the actual allocation
86 // request. This must be the last field of the structure. A zero-size array
87 // or a "flexible" array would be preferred but is not (yet) valid C++.
88 char name[1];
89 };
90
91 // The object held here will obviously not be destructed at process exit 39 // The object held here will obviously not be destructed at process exit
92 // but that's okay since PersistentMemoryAllocator objects are explicitly 40 // but that's best since PersistentMemoryAllocator objects (that underlie
93 // forbidden from doing anything essential at exit anyway due to the fact 41 // PersistentHistogramAllocator objects) are explicitly forbidden from doing
94 // that they depend on data managed elsewhere and which could be destructed 42 // anything essential at exit anyway due to the fact that they depend on data
95 // first. 43 // managed elsewhere and which could be destructed first.
96 PersistentMemoryAllocator* g_allocator = nullptr; 44 PersistentHistogramAllocator* g_allocator;
97 45
98 // Take an array of range boundaries and create a proper BucketRanges object 46 // Take an array of range boundaries and create a proper BucketRanges object
99 // which is returned to the caller. A return of nullptr indicates that the 47 // which is returned to the caller. A return of nullptr indicates that the
100 // passed boundaries are invalid. 48 // passed boundaries are invalid.
101 BucketRanges* CreateRangesFromData(HistogramBase::Sample* ranges_data, 49 scoped_ptr<BucketRanges> CreateRangesFromData(
102 uint32_t ranges_checksum, 50 HistogramBase::Sample* ranges_data,
103 size_t count) { 51 uint32_t ranges_checksum,
52 size_t count) {
53 // To avoid racy destruction at shutdown, the following may be leaked.
104 scoped_ptr<BucketRanges> ranges(new BucketRanges(count)); 54 scoped_ptr<BucketRanges> ranges(new BucketRanges(count));
105 DCHECK_EQ(count, ranges->size()); 55 DCHECK_EQ(count, ranges->size());
106 for (size_t i = 0; i < count; ++i) { 56 for (size_t i = 0; i < count; ++i) {
107 if (i > 0 && ranges_data[i] <= ranges_data[i - 1]) 57 if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
108 return nullptr; 58 return nullptr;
109 ranges->set_range(i, ranges_data[i]); 59 ranges->set_range(i, ranges_data[i]);
110 } 60 }
111 61
112 ranges->ResetChecksum(); 62 ranges->ResetChecksum();
113 if (ranges->checksum() != ranges_checksum) 63 if (ranges->checksum() != ranges_checksum)
114 return nullptr; 64 return nullptr;
115 65
116 return ranges.release(); 66 return ranges;
117 } 67 }
118 68
119 // Calculate the number of bytes required to store all of a histogram's 69 // Calculate the number of bytes required to store all of a histogram's
120 // "counts". This will return zero (0) if |bucket_count| is not valid. 70 // "counts". This will return zero (0) if |bucket_count| is not valid.
121 size_t CalculateRequiredCountsBytes(size_t bucket_count) { 71 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
122 // 2 because each "sample count" also requires a backup "logged count" 72 // 2 because each "sample count" also requires a backup "logged count"
123 // used for calculating the delta during snapshot operations. 73 // used for calculating the delta during snapshot operations.
124 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount); 74 const unsigned kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
125 75
126 // If the |bucket_count| is such that it would overflow the return type, 76 // If the |bucket_count| is such that it would overflow the return type,
127 // perhaps as the result of a malicious actor, then return zero to 77 // perhaps as the result of a malicious actor, then return zero to
128 // indicate the problem to the caller. 78 // indicate the problem to the caller.
129 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket) 79 if (bucket_count > std::numeric_limits<uint32_t>::max() / kBytesPerBucket)
130 return 0; 80 return 0;
131 81
132 return bucket_count * kBytesPerBucket; 82 return bucket_count * kBytesPerBucket;
133 } 83 }
134 84
135 } // namespace 85 } // namespace
136 86
137 const Feature kPersistentHistogramsFeature{ 87 const Feature kPersistentHistogramsFeature{
138 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT 88 "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
139 }; 89 };
140 90
141 // Get the histogram in which create results are stored. This is copied almost 91 // This data will be held in persistent memory in order for processes to
142 // exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with added code 92 // locate and use histograms created elsewhere.
143 // to prevent recursion (a likely occurance because the creation of a new 93 struct PersistentHistogramAllocator::PersistentHistogramData {
144 // histogram can end up calling this.) 94 int32_t histogram_type;
145 HistogramBase* GetCreateHistogramResultHistogram() { 95 int32_t flags;
96 int32_t minimum;
97 int32_t maximum;
98 uint32_t bucket_count;
99 PersistentMemoryAllocator::Reference ranges_ref;
100 uint32_t ranges_checksum;
101 PersistentMemoryAllocator::Reference counts_ref;
102 HistogramSamples::Metadata samples_metadata;
103 HistogramSamples::Metadata logged_metadata;
104
105 // Space for the histogram name will be added during the actual allocation
106 // request. This must be the last field of the structure. A zero-size array
107 // or a "flexible" array would be preferred but is not (yet) valid C++.
108 char name[1];
109 };
110
111 PersistentHistogramAllocator::PersistentHistogramAllocator(
112 scoped_ptr<PersistentMemoryAllocator> memory)
113 : memory_allocator_(std::move(memory)) {}
114
115 PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
116
117 void PersistentHistogramAllocator::CreateIterator(Iterator* iter) {
118 memory_allocator_->CreateIterator(&iter->memory_iter);
119 }
120
121 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
122 memory_allocator_->CreateTrackingHistograms(name);
123 }
124
125 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
126 memory_allocator_->UpdateTrackingHistograms();
127 }
128
129 // static
130 HistogramBase*
131 PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
132 // Get the histogram in which create-results are stored. This is copied
133 // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
134 // added code to prevent recursion (a likely occurance because the creation
135 // of a new a histogram can end up calling this.)
146 static base::subtle::AtomicWord atomic_histogram_pointer = 0; 136 static base::subtle::AtomicWord atomic_histogram_pointer = 0;
147 HistogramBase* histogram_pointer( 137 HistogramBase* histogram_pointer =
148 reinterpret_cast<HistogramBase*>( 138 reinterpret_cast<HistogramBase*>(
149 base::subtle::Acquire_Load(&atomic_histogram_pointer))); 139 base::subtle::Acquire_Load(&atomic_histogram_pointer));
150 if (!histogram_pointer) { 140 if (!histogram_pointer) {
151 // It's possible for multiple threads to make it here in parallel but 141 // It's possible for multiple threads to make it here in parallel but
152 // they'll always return the same result as there is a mutex in the Get. 142 // they'll always return the same result as there is a mutex in the Get.
153 // The purpose of the "initialized" variable is just to ensure that 143 // The purpose of the "initialized" variable is just to ensure that
154 // the same thread doesn't recurse which is also why it doesn't have 144 // the same thread doesn't recurse which is also why it doesn't have
155 // to be atomic. 145 // to be atomic.
156 static bool initialized = false; 146 static bool initialized = false;
157 if (!initialized) { 147 if (!initialized) {
158 initialized = true; 148 initialized = true;
159 if (g_allocator) { 149 if (g_allocator) {
160 DLOG(WARNING) << "Creating the results-histogram inside persistent" 150 DLOG(WARNING) << "Creating the results-histogram inside persistent"
161 << " memory can cause future allocations to crash if" 151 << " memory can cause future allocations to crash if"
162 << " that memory is ever released (for testing)."; 152 << " that memory is ever released (for testing).";
163 } 153 }
164 154
165 histogram_pointer = LinearHistogram::FactoryGet( 155 histogram_pointer = LinearHistogram::FactoryGet(
166 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1, 156 kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
167 HistogramBase::kUmaTargetedHistogramFlag); 157 HistogramBase::kUmaTargetedHistogramFlag);
168 base::subtle::Release_Store( 158 base::subtle::Release_Store(
169 &atomic_histogram_pointer, 159 &atomic_histogram_pointer,
170 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); 160 reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
171 } 161 }
172 } 162 }
173 return histogram_pointer; 163 return histogram_pointer;
174 } 164 }
175 165
176 // Record the result of a histogram creation. 166 // static
177 void RecordCreateHistogramResult(CreateHistogramResultType result) { 167 void PersistentHistogramAllocator::RecordCreateHistogramResult(
168 CreateHistogramResultType result) {
178 HistogramBase* result_histogram = GetCreateHistogramResultHistogram(); 169 HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
179 if (result_histogram) 170 if (result_histogram)
180 result_histogram->Add(result); 171 result_histogram->Add(result);
181 } 172 }
182 173
183 void SetPersistentHistogramMemoryAllocator( 174 // static
184 PersistentMemoryAllocator* allocator) { 175 void PersistentHistogramAllocator::SetGlobalAllocator(
176 scoped_ptr<PersistentHistogramAllocator> allocator) {
185 // Releasing or changing an allocator is extremely dangerous because it 177 // Releasing or changing an allocator is extremely dangerous because it
186 // likely has histograms stored within it. If the backing memory is also 178 // likely has histograms stored within it. If the backing memory is also
187 // also released, future accesses to those histograms will seg-fault. 179 // also released, future accesses to those histograms will seg-fault.
188 CHECK(!g_allocator); 180 CHECK(!g_allocator);
189 g_allocator = allocator; 181 g_allocator = allocator.release();
190 } 182 }
191 183
192 PersistentMemoryAllocator* GetPersistentHistogramMemoryAllocator() { 184 // static
185 PersistentHistogramAllocator*
186 PersistentHistogramAllocator::GetGlobalAllocator() {
193 return g_allocator; 187 return g_allocator;
194 } 188 }
195 189
196 PersistentMemoryAllocator* 190 // static
197 ReleasePersistentHistogramMemoryAllocatorForTesting() { 191 scoped_ptr<PersistentHistogramAllocator>
198 PersistentMemoryAllocator* allocator = g_allocator; 192 PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() {
199 if (!allocator) 193 PersistentHistogramAllocator* histogram_allocator = g_allocator;
194 if (!histogram_allocator)
200 return nullptr; 195 return nullptr;
196 PersistentMemoryAllocator* memory_allocator =
197 histogram_allocator->memory_allocator();
201 198
202 // Before releasing the memory, it's necessary to have the Statistics- 199 // Before releasing the memory, it's necessary to have the Statistics-
203 // Recorder forget about the histograms contained therein; otherwise, 200 // Recorder forget about the histograms contained therein; otherwise,
204 // some operations will try to access them and the released memory. 201 // some operations will try to access them and the released memory.
205 PersistentMemoryAllocator::Iterator iter; 202 PersistentMemoryAllocator::Iterator iter;
206 PersistentMemoryAllocator::Reference ref; 203 PersistentMemoryAllocator::Reference ref;
207 uint32_t type_id; 204 uint32_t type_id;
208 allocator->CreateIterator(&iter); 205 memory_allocator->CreateIterator(&iter);
209 while ((ref = allocator->GetNextIterable(&iter, &type_id)) != 0) { 206 while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) {
210 if (type_id == kTypeIdHistogram) { 207 if (type_id == kTypeIdHistogram) {
211 PersistentHistogramData* histogram_data = 208 PersistentHistogramData* histogram_data =
212 allocator->GetAsObject<PersistentHistogramData>( 209 memory_allocator->GetAsObject<PersistentHistogramData>(
213 ref, kTypeIdHistogram); 210 ref, kTypeIdHistogram);
214 DCHECK(histogram_data); 211 DCHECK(histogram_data);
215 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name); 212 StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
216 213
217 // If a test breaks here then a memory region containing a histogram 214 // If a test breaks here then a memory region containing a histogram
218 // actively used by this code is being released back to the test. 215 // actively used by this code is being released back to the test.
219 // If that memory segment were to be deleted, future calls to create 216 // If that memory segment were to be deleted, future calls to create
220 // persistent histograms would crash. To avoid this, have the test call 217 // persistent histograms would crash. To avoid this, have the test call
221 // the method GetCreateHistogramResultHistogram() *before* setting the 218 // the method GetCreateHistogramResultHistogram() *before* setting
222 // (temporary) memory allocator via SetPersistentMemoryAllocator() so 219 // the (temporary) memory allocator via SetGlobalAllocator() so that
223 // that the histogram is instead allocated from the process heap. 220 // histogram is instead allocated from the process heap.
224 DCHECK_NE(kResultHistogram, histogram_data->name); 221 DCHECK_NE(kResultHistogram, histogram_data->name);
225 } 222 }
226 } 223 }
227 224
228 g_allocator = nullptr; 225 g_allocator = nullptr;
229 return allocator; 226 return make_scoped_ptr(histogram_allocator);
230 }; 227 };
231 228
232 HistogramBase* CreatePersistentHistogram( 229 // static
233 PersistentMemoryAllocator* allocator, 230 void PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
231 void* base,
232 size_t size,
233 size_t page_size,
234 uint64_t id,
235 StringPiece name) {
236 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
237 make_scoped_ptr(new PersistentMemoryAllocator(
238 base, size, page_size, id, name, false)))));
239 }
240
241 // static
242 void PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
243 size_t size,
244 uint64_t id,
245 StringPiece name) {
246 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
247 make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name)))));
248 }
249
250 // static
251 void PersistentHistogramAllocator::CreateGlobalAllocatorOnSharedMemory(
252 size_t size,
253 const SharedMemoryHandle& handle) {
254 scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false));
255 if (!shm->Map(size)) {
256 NOTREACHED();
257 return;
258 }
259
260 SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
261 make_scoped_ptr(new SharedPersistentMemoryAllocator(
262 std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
263 }
264
265 // static
266 scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
234 PersistentHistogramData* histogram_data_ptr) { 267 PersistentHistogramData* histogram_data_ptr) {
235 if (!histogram_data_ptr) { 268 if (!histogram_data_ptr) {
236 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER); 269 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
237 NOTREACHED(); 270 NOTREACHED();
238 return nullptr; 271 return nullptr;
239 } 272 }
240 273
241 // Copy the histogram_data to local storage because anything in persistent 274 // Copy the histogram_data to local storage because anything in persistent
242 // memory cannot be trusted as it could be changed at any moment by a 275 // memory cannot be trusted as it could be changed at any moment by a
243 // malicious actor that shares access. The contents of histogram_data are 276 // malicious actor that shares access. The contents of histogram_data are
244 // validated below; the local copy is to ensure that the contents cannot 277 // validated below; the local copy is to ensure that the contents cannot
245 // be externally changed between validation and use. 278 // be externally changed between validation and use.
246 PersistentHistogramData histogram_data = *histogram_data_ptr; 279 PersistentHistogramData histogram_data = *histogram_data_ptr;
247 280
248 HistogramBase::Sample* ranges_data = 281 HistogramBase::Sample* ranges_data =
249 allocator->GetAsObject<HistogramBase::Sample>(histogram_data.ranges_ref, 282 memory_allocator_->GetAsObject<HistogramBase::Sample>(
250 kTypeIdRangesArray); 283 histogram_data.ranges_ref, kTypeIdRangesArray);
284
285 const uint32_t max_buckets =
286 std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
287 size_t required_bytes =
288 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
289 size_t allocated_bytes =
290 memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
251 if (!ranges_data || histogram_data.bucket_count < 2 || 291 if (!ranges_data || histogram_data.bucket_count < 2 ||
252 histogram_data.bucket_count + 1 > 292 histogram_data.bucket_count >= max_buckets ||
253 std::numeric_limits<uint32_t>::max() / 293 allocated_bytes < required_bytes) {
254 sizeof(HistogramBase::Sample) ||
255 allocator->GetAllocSize(histogram_data.ranges_ref) <
256 (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample)) {
257 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); 294 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
258 NOTREACHED(); 295 NOTREACHED();
259 return nullptr; 296 return nullptr;
260 } 297 }
261 // To avoid racy destruction at shutdown, the following will be leaked. 298
262 const BucketRanges* ranges = CreateRangesFromData( 299 scoped_ptr<const BucketRanges> created_ranges =
263 ranges_data, 300 CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
264 histogram_data.ranges_checksum, 301 histogram_data.bucket_count + 1);
265 histogram_data.bucket_count + 1); 302 if (!created_ranges) {
266 if (!ranges) {
267 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY); 303 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
268 NOTREACHED(); 304 NOTREACHED();
269 return nullptr; 305 return nullptr;
270 } 306 }
271 ranges = StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges); 307 const BucketRanges* ranges =
308 StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
309 created_ranges.release());
272 310
273 HistogramBase::AtomicCount* counts_data = 311 HistogramBase::AtomicCount* counts_data =
274 allocator->GetAsObject<HistogramBase::AtomicCount>( 312 memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
275 histogram_data.counts_ref, kTypeIdCountsArray); 313 histogram_data.counts_ref, kTypeIdCountsArray);
276 size_t counts_bytes = 314 size_t counts_bytes =
277 CalculateRequiredCountsBytes(histogram_data.bucket_count); 315 CalculateRequiredCountsBytes(histogram_data.bucket_count);
278 if (!counts_data || !counts_bytes || 316 if (!counts_data || !counts_bytes ||
279 allocator->GetAllocSize(histogram_data.counts_ref) < counts_bytes) { 317 memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
318 counts_bytes) {
280 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY); 319 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
281 NOTREACHED(); 320 NOTREACHED();
282 return nullptr; 321 return nullptr;
283 } 322 }
284 323
285 // After the main "counts" array is a second array using for storing what 324 // After the main "counts" array is a second array using for storing what
286 // was previously logged. This is used to calculate the "delta" during 325 // was previously logged. This is used to calculate the "delta" during
287 // snapshot operations. 326 // snapshot operations.
288 HistogramBase::AtomicCount* logged_data = 327 HistogramBase::AtomicCount* logged_data =
289 counts_data + histogram_data.bucket_count; 328 counts_data + histogram_data.bucket_count;
290 329
291 std::string name(histogram_data_ptr->name); 330 std::string name(histogram_data_ptr->name);
292 HistogramBase* histogram = nullptr; 331 scoped_ptr<HistogramBase> histogram;
293 switch (histogram_data.histogram_type) { 332 switch (histogram_data.histogram_type) {
294 case HISTOGRAM: 333 case HISTOGRAM:
295 histogram = Histogram::PersistentGet( 334 histogram = Histogram::PersistentCreate(
296 name, 335 name, histogram_data.minimum, histogram_data.maximum, ranges,
297 histogram_data.minimum, 336 counts_data, logged_data, histogram_data.bucket_count,
298 histogram_data.maximum,
299 ranges,
300 counts_data,
301 logged_data,
302 histogram_data.bucket_count,
303 &histogram_data_ptr->samples_metadata, 337 &histogram_data_ptr->samples_metadata,
304 &histogram_data_ptr->logged_metadata); 338 &histogram_data_ptr->logged_metadata);
305 DCHECK(histogram); 339 DCHECK(histogram);
306 break; 340 break;
307 case LINEAR_HISTOGRAM: 341 case LINEAR_HISTOGRAM:
308 histogram = LinearHistogram::PersistentGet( 342 histogram = LinearHistogram::PersistentCreate(
309 name, 343 name, histogram_data.minimum, histogram_data.maximum, ranges,
310 histogram_data.minimum, 344 counts_data, logged_data, histogram_data.bucket_count,
311 histogram_data.maximum,
312 ranges,
313 counts_data,
314 logged_data,
315 histogram_data.bucket_count,
316 &histogram_data_ptr->samples_metadata, 345 &histogram_data_ptr->samples_metadata,
317 &histogram_data_ptr->logged_metadata); 346 &histogram_data_ptr->logged_metadata);
318 DCHECK(histogram); 347 DCHECK(histogram);
319 break; 348 break;
320 case BOOLEAN_HISTOGRAM: 349 case BOOLEAN_HISTOGRAM:
321 histogram = BooleanHistogram::PersistentGet( 350 histogram = BooleanHistogram::PersistentCreate(
322 name, 351 name, ranges, counts_data, logged_data,
323 ranges,
324 counts_data,
325 logged_data,
326 &histogram_data_ptr->samples_metadata, 352 &histogram_data_ptr->samples_metadata,
327 &histogram_data_ptr->logged_metadata); 353 &histogram_data_ptr->logged_metadata);
328 DCHECK(histogram); 354 DCHECK(histogram);
329 break; 355 break;
330 case CUSTOM_HISTOGRAM: 356 case CUSTOM_HISTOGRAM:
331 histogram = CustomHistogram::PersistentGet( 357 histogram = CustomHistogram::PersistentCreate(
332 name, 358 name, ranges, counts_data, logged_data, histogram_data.bucket_count,
333 ranges,
334 counts_data,
335 logged_data,
336 histogram_data.bucket_count,
337 &histogram_data_ptr->samples_metadata, 359 &histogram_data_ptr->samples_metadata,
338 &histogram_data_ptr->logged_metadata); 360 &histogram_data_ptr->logged_metadata);
339 DCHECK(histogram); 361 DCHECK(histogram);
340 break; 362 break;
341 default: 363 default:
342 NOTREACHED(); 364 NOTREACHED();
343 } 365 }
344 366
345 if (histogram) { 367 if (histogram) {
346 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType()); 368 DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
347 histogram->SetFlags(histogram_data.flags); 369 histogram->SetFlags(histogram_data.flags);
348 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS); 370 RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
349 } else { 371 } else {
350 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE); 372 RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
351 } 373 }
352 374
353 return histogram; 375 return histogram;
354 } 376 }
355 377
356 HistogramBase* GetPersistentHistogram( 378 scoped_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
357 PersistentMemoryAllocator* allocator, 379 Reference ref) {
358 int32_t ref) { 380 // Unfortunately, the histogram "pickle" methods cannot be used as part of
359 // Unfortunately, the above "pickle" methods cannot be used as part of the 381 // the persistance because the deserialization methods always create local
360 // persistance because the deserialization methods always create local 382 // count data (while these must reference the persistent counts) and always
361 // count data (these must referenced the persistent counts) and always add 383 // add it to the local list of known histograms (while these may be simple
362 // it to the local list of known histograms (these may be simple references 384 // references to histograms in other processes).
363 // to histograms in other processes).
364 PersistentHistogramData* histogram_data = 385 PersistentHistogramData* histogram_data =
365 allocator->GetAsObject<PersistentHistogramData>(ref, kTypeIdHistogram); 386 memory_allocator_->GetAsObject<PersistentHistogramData>(
366 size_t length = allocator->GetAllocSize(ref); 387 ref, kTypeIdHistogram);
388 size_t length = memory_allocator_->GetAllocSize(ref);
367 if (!histogram_data || 389 if (!histogram_data ||
368 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') { 390 reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
369 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA); 391 RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
370 NOTREACHED(); 392 NOTREACHED();
371 return nullptr; 393 return nullptr;
372 } 394 }
373 return CreatePersistentHistogram(allocator, histogram_data); 395 return CreateHistogram(histogram_data);
374 } 396 }
375 397
376 HistogramBase* GetNextPersistentHistogram( 398 scoped_ptr<HistogramBase>
377 PersistentMemoryAllocator* allocator, 399 PersistentHistogramAllocator::GetNextHistogramWithIgnore(Iterator* iter,
378 PersistentMemoryAllocator::Iterator* iter) { 400 Reference ignore) {
379 PersistentMemoryAllocator::Reference ref; 401 PersistentMemoryAllocator::Reference ref;
380 uint32_t type_id; 402 uint32_t type_id;
381 while ((ref = allocator->GetNextIterable(iter, &type_id)) != 0) { 403 while ((ref = memory_allocator_->GetNextIterable(&iter->memory_iter,
404 &type_id)) != 0) {
405 if (ref == ignore)
406 continue;
382 if (type_id == kTypeIdHistogram) 407 if (type_id == kTypeIdHistogram)
383 return GetPersistentHistogram(allocator, ref); 408 return GetHistogram(ref);
384 } 409 }
385 return nullptr; 410 return nullptr;
386 } 411 }
387 412
388 void FinalizePersistentHistogram(PersistentMemoryAllocator::Reference ref, 413 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
389 bool registered) { 414 bool registered) {
390 // If the created persistent histogram was registered then it needs to 415 // If the created persistent histogram was registered then it needs to
391 // be marked as "iterable" in order to be found by other processes. 416 // be marked as "iterable" in order to be found by other processes.
392 if (registered) 417 if (registered)
393 GetPersistentHistogramMemoryAllocator()->MakeIterable(ref); 418 memory_allocator_->MakeIterable(ref);
394 // If it wasn't registered then a race condition must have caused 419 // If it wasn't registered then a race condition must have caused
395 // two to be created. The allocator does not support releasing the 420 // two to be created. The allocator does not support releasing the
396 // acquired memory so just change the type to be empty. 421 // acquired memory so just change the type to be empty.
397 else 422 else
398 GetPersistentHistogramMemoryAllocator()->SetType(ref, 0); 423 memory_allocator_->SetType(ref, 0);
399 } 424 }
400 425
401 HistogramBase* AllocatePersistentHistogram( 426 scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
402 PersistentMemoryAllocator* allocator,
403 HistogramType histogram_type, 427 HistogramType histogram_type,
404 const std::string& name, 428 const std::string& name,
405 int minimum, 429 int minimum,
406 int maximum, 430 int maximum,
407 const BucketRanges* bucket_ranges, 431 const BucketRanges* bucket_ranges,
408 int32_t flags, 432 int32_t flags,
409 PersistentMemoryAllocator::Reference* ref_ptr) { 433 Reference* ref_ptr) {
410 if (!allocator)
411 return nullptr;
412
413 // If the allocator is corrupt, don't waste time trying anything else. 434 // If the allocator is corrupt, don't waste time trying anything else.
414 // This also allows differentiating on the dashboard between allocations 435 // This also allows differentiating on the dashboard between allocations
415 // failed due to a corrupt allocator and the number of process instances 436 // failed due to a corrupt allocator and the number of process instances
416 // with one, the latter being idicated by "newly corrupt", below. 437 // with one, the latter being idicated by "newly corrupt", below.
417 if (allocator->IsCorrupt()) { 438 if (memory_allocator_->IsCorrupt()) {
418 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT); 439 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
419 return nullptr; 440 return nullptr;
420 } 441 }
421 442
422 // If CalculateRequiredCountsBytes() returns zero then the bucket_count 443 // If CalculateRequiredCountsBytes() returns zero then the bucket_count
423 // was not valid. 444 // was not valid.
424 size_t bucket_count = bucket_ranges->bucket_count(); 445 size_t bucket_count = bucket_ranges->bucket_count();
425 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count); 446 size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
426 if (!counts_bytes) { 447 if (!counts_bytes) {
427 NOTREACHED(); 448 NOTREACHED();
428 return nullptr; 449 return nullptr;
429 } 450 }
430 451
431 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample); 452 size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
432 PersistentMemoryAllocator::Reference ranges_ref = 453 PersistentMemoryAllocator::Reference ranges_ref =
433 allocator->Allocate(ranges_bytes, kTypeIdRangesArray); 454 memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
434 PersistentMemoryAllocator::Reference counts_ref = 455 PersistentMemoryAllocator::Reference counts_ref =
435 allocator->Allocate(counts_bytes, kTypeIdCountsArray); 456 memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
436 PersistentMemoryAllocator::Reference histogram_ref = 457 PersistentMemoryAllocator::Reference histogram_ref =
437 allocator->Allocate(offsetof(PersistentHistogramData, name) + 458 memory_allocator_->Allocate(
438 name.length() + 1, kTypeIdHistogram); 459 offsetof(PersistentHistogramData, name) + name.length() + 1,
460 kTypeIdHistogram);
439 HistogramBase::Sample* ranges_data = 461 HistogramBase::Sample* ranges_data =
440 allocator->GetAsObject<HistogramBase::Sample>(ranges_ref, 462 memory_allocator_->GetAsObject<HistogramBase::Sample>(ranges_ref,
441 kTypeIdRangesArray); 463 kTypeIdRangesArray);
442 PersistentHistogramData* histogram_data = 464 PersistentHistogramData* histogram_data =
443 allocator->GetAsObject<PersistentHistogramData>(histogram_ref, 465 memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
444 kTypeIdHistogram); 466 kTypeIdHistogram);
445 467
446 // Only continue here if all allocations were successful. If they weren't 468 // Only continue here if all allocations were successful. If they weren't,
447 // there is no way to free the space but that's not really a problem since 469 // there is no way to free the space but that's not really a problem since
448 // the allocations only fail because the space is full and so any future 470 // the allocations only fail because the space is full or corrupt and so
449 // attempts will also fail. 471 // any future attempts will also fail.
450 if (counts_ref && ranges_data && histogram_data) { 472 if (counts_ref && ranges_data && histogram_data) {
451 strcpy(histogram_data->name, name.c_str()); 473 strcpy(histogram_data->name, name.c_str());
452 for (size_t i = 0; i < bucket_ranges->size(); ++i) 474 for (size_t i = 0; i < bucket_ranges->size(); ++i)
453 ranges_data[i] = bucket_ranges->range(i); 475 ranges_data[i] = bucket_ranges->range(i);
454 476
455 histogram_data->histogram_type = histogram_type; 477 histogram_data->histogram_type = histogram_type;
456 histogram_data->flags = flags; 478 histogram_data->flags = flags;
457 histogram_data->minimum = minimum; 479 histogram_data->minimum = minimum;
458 histogram_data->maximum = maximum; 480 histogram_data->maximum = maximum;
459 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count); 481 histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
460 histogram_data->ranges_ref = ranges_ref; 482 histogram_data->ranges_ref = ranges_ref;
461 histogram_data->ranges_checksum = bucket_ranges->checksum(); 483 histogram_data->ranges_checksum = bucket_ranges->checksum();
462 histogram_data->counts_ref = counts_ref; 484 histogram_data->counts_ref = counts_ref;
463 485
464 // Create the histogram using resources in persistent memory. This ends up 486 // Create the histogram using resources in persistent memory. This ends up
465 // resolving the "ref" values stored in histogram_data instad of just 487 // resolving the "ref" values stored in histogram_data instad of just
466 // using what is already known above but avoids duplicating the switch 488 // using what is already known above but avoids duplicating the switch
467 // statement here and serves as a double-check that everything is 489 // statement here and serves as a double-check that everything is
468 // correct before commiting the new histogram to persistent space. 490 // correct before commiting the new histogram to persistent space.
469 HistogramBase* histogram = 491 scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
470 CreatePersistentHistogram(allocator, histogram_data);
471 DCHECK(histogram); 492 DCHECK(histogram);
472 if (ref_ptr != nullptr) 493 if (ref_ptr != nullptr)
473 *ref_ptr = histogram_ref; 494 *ref_ptr = histogram_ref;
495
496 // By storing the reference within the allocator to this histogram, the
497 // next import (which will happen before the next histogram creation)
498 // will know to skip it. See also the comment in ImportGlobalHistograms().
499 subtle::NoBarrier_Store(&last_created_, histogram_ref);
474 return histogram; 500 return histogram;
475 } 501 }
476 502
477 CreateHistogramResultType result; 503 CreateHistogramResultType result;
478 if (allocator->IsCorrupt()) { 504 if (memory_allocator_->IsCorrupt()) {
479 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT); 505 RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
480 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT; 506 result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
481 } else if (allocator->IsFull()) { 507 } else if (memory_allocator_->IsFull()) {
482 result = CREATE_HISTOGRAM_ALLOCATOR_FULL; 508 result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
483 } else { 509 } else {
484 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR; 510 result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
485 } 511 }
486 RecordCreateHistogramResult(result); 512 RecordCreateHistogramResult(result);
487 NOTREACHED() << "error=" << result; 513 NOTREACHED() << "error=" << result;
488 514
489 return nullptr; 515 return nullptr;
490 } 516 }
491 517
492 void ImportPersistentHistograms() { 518 // static
519 void PersistentHistogramAllocator::ImportGlobalHistograms() {
493 // The lock protects against concurrent access to the iterator and is created 520 // The lock protects against concurrent access to the iterator and is created
494 // in a thread-safe manner when needed. 521 // in a thread-safe manner when needed.
495 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER; 522 static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;
496 523
497 if (g_allocator) { 524 if (g_allocator) {
525 // TODO(bcwhite): Investigate a lock-free, thread-safe iterator.
498 base::AutoLock auto_lock(lock.Get()); 526 base::AutoLock auto_lock(lock.Get());
499 527
500 // Each call resumes from where it last left off so need persistant 528 // Each call resumes from where it last left off so a persistant iterator
501 // iterator. This class has a constructor so even the definition has 529 // is needed. This class has a constructor so even the definition has to
502 // to be protected by the lock in order to be thread-safe. 530 // be protected by the lock in order to be thread-safe.
503 static PersistentMemoryAllocator::Iterator iter; 531 static Iterator iter;
504 if (iter.is_clear()) 532 if (iter.is_clear())
505 g_allocator->CreateIterator(&iter); 533 g_allocator->CreateIterator(&iter);
506 534
535 // Skip the import if it's the histogram that was last created. Should a
536 // race condition cause the "last created" to be overwritten before it
537 // is recognized here then the histogram will be created and be ignored
538 // when it is detected as a duplicate by the statistics-recorder. This
539 // simple check reduces the time of creating persistent histograms by
540 // about 40%.
541 Reference last_created =
542 subtle::NoBarrier_Load(&g_allocator->last_created_);
543
507 while (true) { 544 while (true) {
508 HistogramBase* histogram = GetNextPersistentHistogram(g_allocator, &iter); 545 scoped_ptr<HistogramBase> histogram =
546 g_allocator->GetNextHistogramWithIgnore(&iter, last_created);
509 if (!histogram) 547 if (!histogram)
510 break; 548 break;
511 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram); 549 StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
512 } 550 }
513 } 551 }
514 } 552 }
515 553
516 } // namespace base 554 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698