Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(472)

Side by Side Diff: base/metrics/sample_vector.cc

Issue 2811713003: Embed a single sample in histogram metadata. (Closed)
Patch Set: rebased Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/metrics/sample_vector.h ('k') | base/metrics/sample_vector_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/metrics/sample_vector.h" 5 #include "base/metrics/sample_vector.h"
6 6
7 #include "base/lazy_instance.h"
7 #include "base/logging.h" 8 #include "base/logging.h"
8 #include "base/metrics/bucket_ranges.h" 9 #include "base/memory/ptr_util.h"
10 #include "base/metrics/persistent_memory_allocator.h"
11 #include "base/synchronization/lock.h"
12 #include "base/threading/platform_thread.h"
13
14 // This SampleVector makes use of the single-sample embedded in the base
15 // HistogramSamples class. If the count is non-zero then there is guaranteed
16 // (within the bounds of "eventual consistency") to be no allocated external
17 // storage. Once the full counts storage is allocated, the single-sample must
18 // be extracted and disabled.
9 19
10 namespace base { 20 namespace base {
11 21
12 typedef HistogramBase::Count Count; 22 typedef HistogramBase::Count Count;
13 typedef HistogramBase::Sample Sample; 23 typedef HistogramBase::Sample Sample;
14 24
15 SampleVector::SampleVector(const BucketRanges* bucket_ranges) 25 SampleVectorBase::SampleVectorBase(uint64_t id,
16 : SampleVector(0, bucket_ranges) {} 26 const BucketRanges* bucket_ranges)
17 27 : HistogramSamples(id), bucket_ranges_(bucket_ranges) {
18 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
19 : HistogramSamples(id),
20 local_counts_(bucket_ranges->bucket_count()),
21 counts_(&local_counts_[0]),
22 counts_size_(local_counts_.size()),
23 bucket_ranges_(bucket_ranges) {
24 CHECK_GE(bucket_ranges_->bucket_count(), 1u); 28 CHECK_GE(bucket_ranges_->bucket_count(), 1u);
25 } 29 }
26 30
27 SampleVector::SampleVector(uint64_t id, 31 SampleVectorBase::SampleVectorBase(uint64_t id,
28 HistogramBase::AtomicCount* counts, 32 Metadata* meta,
29 size_t counts_size, 33 const BucketRanges* bucket_ranges)
30 Metadata* meta, 34 : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) {
31 const BucketRanges* bucket_ranges)
32 : HistogramSamples(id, meta),
33 counts_(counts),
34 counts_size_(bucket_ranges->bucket_count()),
35 bucket_ranges_(bucket_ranges) {
36 CHECK_LE(bucket_ranges_->bucket_count(), counts_size_);
37 CHECK_GE(bucket_ranges_->bucket_count(), 1u); 35 CHECK_GE(bucket_ranges_->bucket_count(), 1u);
38 } 36 }
39 37
40 SampleVector::~SampleVector() {} 38 SampleVectorBase::~SampleVectorBase() {}
41 39
42 void SampleVector::Accumulate(Sample value, Count count) { 40 void SampleVectorBase::Accumulate(Sample value, Count count) {
43 size_t bucket_index = GetBucketIndex(value); 41 const size_t bucket_index = GetBucketIndex(value);
44 subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count); 42
45 IncreaseSum(static_cast<int64_t>(count) * value); 43 // Handle the single-sample case.
46 IncreaseRedundantCount(count); 44 if (!counts()) {
47 } 45 // Try to accumulate the parameters into the single-count entry.
48 46 if (AccumulateSingleSample(value, count, bucket_index)) {
49 Count SampleVector::GetCount(Sample value) const { 47 // A race condition could lead to a new single-sample being accumulated
50 size_t bucket_index = GetBucketIndex(value); 48 // above just after another thread executed the MountCountsStorage below.
51 return subtle::NoBarrier_Load(&counts_[bucket_index]); 49 // Since it is mounted, it could be mounted elsewhere and have values
52 } 50 // written to it. It's not allowed to have both a single-sample and
53 51 // entries in the counts array so move the single-sample.
54 Count SampleVector::TotalCount() const { 52 if (counts())
55 Count count = 0; 53 MoveSingleSampleToCounts();
56 for (size_t i = 0; i < counts_size_; i++) { 54 return;
57 count += subtle::NoBarrier_Load(&counts_[i]); 55 }
58 } 56
59 return count; 57 // Need real storage to store both what was in the single-sample plus the
60 } 58 // parameter information.
61 59 MountCountsStorageAndMoveSingleSample();
62 Count SampleVector::GetCountAtIndex(size_t bucket_index) const { 60 }
63 DCHECK(bucket_index < counts_size_); 61
64 return subtle::NoBarrier_Load(&counts_[bucket_index]); 62 // Handle the multi-sample case.
65 } 63 subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
66 64 IncreaseSumAndCount(static_cast<int64_t>(count) * value, count);
67 std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const { 65 }
68 return std::unique_ptr<SampleCountIterator>( 66
69 new SampleVectorIterator(counts_, counts_size_, bucket_ranges_)); 67 Count SampleVectorBase::GetCount(Sample value) const {
70 } 68 return GetCountAtIndex(GetBucketIndex(value));
71 69 }
72 bool SampleVector::AddSubtractImpl(SampleCountIterator* iter, 70
73 HistogramSamples::Operator op) { 71 Count SampleVectorBase::TotalCount() const {
72 // Handle the single-sample case.
73 SingleSample sample = single_sample().Load();
74 if (sample.count != 0)
75 return sample.count;
76
77 // Handle the multi-sample case.
78 if (counts() || MountExistingCountsStorage()) {
79 Count count = 0;
80 size_t size = counts_size();
81 const HistogramBase::AtomicCount* counts_array = counts();
82 for (size_t i = 0; i < size; ++i) {
83 count += subtle::NoBarrier_Load(&counts_array[i]);
84 }
85 return count;
86 }
87
88 // And the no-value case.
89 return 0;
90 }
91
92 Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const {
93 DCHECK(bucket_index < counts_size());
94
95 // Handle the single-sample case.
96 SingleSample sample = single_sample().Load();
97 if (sample.count != 0)
98 return sample.bucket == bucket_index ? sample.count : 0;
99
100 // Handle the multi-sample case.
101 if (counts() || MountExistingCountsStorage())
102 return subtle::NoBarrier_Load(&counts()[bucket_index]);
103
104 // And the no-value case.
105 return 0;
106 }
107
108 std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const {
109 // Handle the single-sample case.
110 SingleSample sample = single_sample().Load();
111 if (sample.count != 0) {
112 return MakeUnique<SingleSampleIterator>(
113 bucket_ranges_->range(sample.bucket),
114 bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket);
115 }
116
117 // Handle the multi-sample case.
118 if (counts() || MountExistingCountsStorage()) {
119 return MakeUnique<SampleVectorIterator>(counts(), counts_size(),
120 bucket_ranges_);
121 }
122
123 // And the no-value case.
124 return MakeUnique<SampleVectorIterator>(nullptr, 0, bucket_ranges_);
125 }
126
127 bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
128 HistogramSamples::Operator op) {
129 // Stop now if there's nothing to do.
130 if (iter->Done())
131 return true;
132
133 // Get the first value and its index.
74 HistogramBase::Sample min; 134 HistogramBase::Sample min;
75 HistogramBase::Sample max; 135 HistogramBase::Sample max;
76 HistogramBase::Count count; 136 HistogramBase::Count count;
137 iter->Get(&min, &max, &count);
138 size_t dest_index = GetBucketIndex(min);
139
140 // The destination must be a superset of the source meaning that though the
141 // incoming ranges will find an exact match, the incoming bucket-index, if
142 // it exists, may be offset from the destination bucket-index. Calculate
143 // that offset of the passed iterator; there are are no overflow checks
144 // because 2's compliment math will work it out in the end.
145 //
146 // Because GetBucketIndex() always returns the same true or false result for
147 // a given iterator object, |index_offset| is either set here and used below,
148 // or never set and never used. The compiler doesn't know this, though, which
149 // is why it's necessary to initialize it to something.
150 size_t index_offset = 0;
151 size_t iter_index;
152 if (iter->GetBucketIndex(&iter_index))
153 index_offset = dest_index - iter_index;
154 if (dest_index >= counts_size())
155 return false;
156
157 // Post-increment. Information about the current sample is not available
158 // after this point.
159 iter->Next();
160
161 // Single-value storage is possible if there is no counts storage and the
162 // retrieved entry is the only one in the iterator.
163 if (!counts()) {
164 if (iter->Done()) {
165 // Don't call AccumulateSingleSample because that updates sum and count
166 // which was already done by the caller of this method.
167 if (single_sample().Accumulate(
168 dest_index, op == HistogramSamples::ADD ? count : -count)) {
169 // Handle race-condition that mounted counts storage between above and
170 // here.
171 if (counts())
172 MoveSingleSampleToCounts();
173 return true;
174 }
175 }
176
177 // The counts storage will be needed to hold the multiple incoming values.
178 MountCountsStorageAndMoveSingleSample();
179 }
77 180
78 // Go through the iterator and add the counts into correct bucket. 181 // Go through the iterator and add the counts into correct bucket.
79 size_t index = 0; 182 while (true) {
80 while (index < counts_size_ && !iter->Done()) { 183 // Ensure that the sample's min/max match the ranges min/max.
184 if (min != bucket_ranges_->range(dest_index) ||
185 max != bucket_ranges_->range(dest_index + 1)) {
186 NOTREACHED() << "sample=" << min << "," << max
187 << "; range=" << bucket_ranges_->range(dest_index) << ","
188 << bucket_ranges_->range(dest_index + 1);
189 return false;
190 }
191
192 // Sample's bucket matches exactly. Adjust count.
193 subtle::NoBarrier_AtomicIncrement(
194 &counts()[dest_index], op == HistogramSamples::ADD ? count : -count);
195
196 // Advance to the next iterable sample. See comments above for how
197 // everything works.
198 if (iter->Done())
199 return true;
81 iter->Get(&min, &max, &count); 200 iter->Get(&min, &max, &count);
82 if (min == bucket_ranges_->range(index) && 201 if (iter->GetBucketIndex(&iter_index)) {
83 max == bucket_ranges_->range(index + 1)) { 202 // Destination bucket is a known offset from the source bucket.
84 // Sample matches this bucket! 203 dest_index = iter_index + index_offset;
85 subtle::NoBarrier_AtomicIncrement(
86 &counts_[index], op == HistogramSamples::ADD ? count : -count);
87 iter->Next();
88 } else if (min > bucket_ranges_->range(index)) {
89 // Sample is larger than current bucket range. Try next.
90 index++;
91 } else { 204 } else {
92 // Sample is smaller than current bucket range. We scan buckets from 205 // Destination bucket has to be determined anew each time.
93 // smallest to largest, so the sample value must be invalid. 206 dest_index = GetBucketIndex(min);
207 }
208 if (dest_index >= counts_size())
94 return false; 209 return false;
95 } 210 iter->Next();
96 } 211 }
97
98 return iter->Done();
99 } 212 }
100 213
101 // Use simple binary search. This is very general, but there are better 214 // Use simple binary search. This is very general, but there are better
102 // approaches if we knew that the buckets were linearly distributed. 215 // approaches if we knew that the buckets were linearly distributed.
103 size_t SampleVector::GetBucketIndex(Sample value) const { 216 size_t SampleVectorBase::GetBucketIndex(Sample value) const {
104 size_t bucket_count = bucket_ranges_->bucket_count(); 217 size_t bucket_count = bucket_ranges_->bucket_count();
105 CHECK_GE(bucket_count, 1u); 218 CHECK_GE(bucket_count, 1u);
106 CHECK_GE(value, bucket_ranges_->range(0)); 219 CHECK_GE(value, bucket_ranges_->range(0));
107 CHECK_LT(value, bucket_ranges_->range(bucket_count)); 220 CHECK_LT(value, bucket_ranges_->range(bucket_count));
108 221
109 size_t under = 0; 222 size_t under = 0;
110 size_t over = bucket_count; 223 size_t over = bucket_count;
111 size_t mid; 224 size_t mid;
112 do { 225 do {
113 DCHECK_GE(over, under); 226 DCHECK_GE(over, under);
114 mid = under + (over - under)/2; 227 mid = under + (over - under)/2;
115 if (mid == under) 228 if (mid == under)
116 break; 229 break;
117 if (bucket_ranges_->range(mid) <= value) 230 if (bucket_ranges_->range(mid) <= value)
118 under = mid; 231 under = mid;
119 else 232 else
120 over = mid; 233 over = mid;
121 } while (true); 234 } while (true);
122 235
123 DCHECK_LE(bucket_ranges_->range(mid), value); 236 DCHECK_LE(bucket_ranges_->range(mid), value);
124 CHECK_GT(bucket_ranges_->range(mid + 1), value); 237 CHECK_GT(bucket_ranges_->range(mid + 1), value);
125 return mid; 238 return mid;
126 } 239 }
127 240
241 void SampleVectorBase::MoveSingleSampleToCounts() {
242 DCHECK(counts());
243
244 // Disable the single-sample since there is now counts storage for the data.
245 SingleSample sample = single_sample().Extract(/*disable=*/true);
246
247 // Stop here if there is no "count" as trying to find the bucket index of
248 // an invalid (including zero) "value" will crash.
249 if (sample.count == 0)
250 return;
251
252 // Move the value into storage. Sum and redundant-count already account
253 // for this entry so no need to call IncreaseSumAndCount().
254 subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count);
255 }
256
257 void SampleVectorBase::MountCountsStorageAndMoveSingleSample() {
258 // There are many SampleVector objects and the lock is needed very
259 // infrequently (just when advancing from single-sample to multi-sample) so
260 // define a single, global lock that all can use. This lock only prevents
261 // concurrent entry into the code below; access and updates to |counts_|
262 // still requires atomic operations.
263 static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
264 if (subtle::NoBarrier_Load(&counts_) == 0) {
265 AutoLock lock(counts_lock.Get());
266 if (subtle::NoBarrier_Load(&counts_) == 0) {
267 // Create the actual counts storage while the above lock is acquired.
268 HistogramBase::Count* counts = CreateCountsStorageWhileLocked();
269 DCHECK(counts);
270
271 // Point |counts_| to the newly created storage. This is done while
272 // locked to prevent possible concurrent calls to CreateCountsStorage
273 // but, between that call and here, other threads could notice the
274 // existance of the storage and race with this to set_counts(). That's
275 // okay because (a) it's atomic and (b) it always writes the same value.
276 set_counts(counts);
277 }
278 }
279
280 // Move any single-sample into the newly mounted storage.
281 MoveSingleSampleToCounts();
282 }
283
284 SampleVector::SampleVector(const BucketRanges* bucket_ranges)
285 : SampleVector(0, bucket_ranges) {}
286
287 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
288 : SampleVectorBase(id, bucket_ranges) {}
289
290 SampleVector::~SampleVector() {}
291
292 bool SampleVector::MountExistingCountsStorage() const {
293 // There is never any existing storage other than what is already in use.
294 return counts() != nullptr;
295 }
296
297 HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() {
298 local_counts_.resize(counts_size());
299 return &local_counts_[0];
300 }
301
302 PersistentSampleVector::PersistentSampleVector(
303 uint64_t id,
304 const BucketRanges* bucket_ranges,
305 Metadata* meta,
306 const DelayedPersistentAllocation& counts)
307 : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) {
308 // Only mount the full storage if the single-sample has been disabled.
309 // Otherwise, it is possible for this object instance to start using (empty)
310 // storage that was created incidentally while another instance continues to
311 // update to the single sample. This "incidental creation" can happen because
312 // the memory is a DelayedPersistentAllocation which allows multiple memory
313 // blocks within it and applies an all-or-nothing approach to the allocation.
314 // Thus, a request elsewhere for one of the _other_ blocks would make _this_
315 // block available even though nothing has explicitly requested it.
316 //
317 // Note that it's not possible for the ctor to mount existing storage and
318 // move any single-sample to it because sometimes the persistent memory is
319 // read-only. Only non-const methods (which assume that memory is read/write)
320 // can do that.
321 if (single_sample().IsDisabled()) {
322 bool success = MountExistingCountsStorage();
323 DCHECK(success);
324 }
325 }
326
327 PersistentSampleVector::~PersistentSampleVector() {}
328
329 bool PersistentSampleVector::MountExistingCountsStorage() const {
330 // There is no early exit if counts is not yet mounted because, given that
331 // this is a virtual function, it's more efficient to do that at the call-
332 // site. There is no danger, however, should this get called anyway (perhaps
333 // because of a race condition) because at worst the |counts_| value would
334 // be over-written (in an atomic manner) with the exact same address.
335
336 if (!persistent_counts_.reference())
337 return false; // Nothing to mount.
338
339 // Mount the counts array in position.
340 set_counts(
341 static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get()));
342 return true;
343 }
344
345 HistogramBase::AtomicCount*
346 PersistentSampleVector::CreateCountsStorageWhileLocked() {
347 void* mem = persistent_counts_.Get();
348 if (!mem) {
349 // The above shouldn't fail but can if Bad Things(tm) are occurring in the
350 // persistent allocator. Crashing isn't a good option so instead just
351 // allocate something from the heap and return that. There will be no
352 // sharing or persistence but worse things are already happening.
353 return new HistogramBase::AtomicCount[counts_size()];
354 }
355
356 return static_cast<HistogramBase::AtomicCount*>(mem);
357 }
358
128 SampleVectorIterator::SampleVectorIterator( 359 SampleVectorIterator::SampleVectorIterator(
129 const std::vector<HistogramBase::AtomicCount>* counts, 360 const std::vector<HistogramBase::AtomicCount>* counts,
130 const BucketRanges* bucket_ranges) 361 const BucketRanges* bucket_ranges)
131 : counts_(&(*counts)[0]), 362 : counts_(&(*counts)[0]),
132 counts_size_(counts->size()), 363 counts_size_(counts->size()),
133 bucket_ranges_(bucket_ranges), 364 bucket_ranges_(bucket_ranges),
134 index_(0) { 365 index_(0) {
135 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_); 366 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
136 SkipEmptyBuckets(); 367 SkipEmptyBuckets();
137 } 368 }
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
184 return; 415 return;
185 416
186 while (index_ < counts_size_) { 417 while (index_ < counts_size_) {
187 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) 418 if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
188 return; 419 return;
189 index_++; 420 index_++;
190 } 421 }
191 } 422 }
192 423
193 } // namespace base 424 } // namespace base
OLDNEW
« no previous file with comments | « base/metrics/sample_vector.h ('k') | base/metrics/sample_vector_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698