| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/metrics/sample_vector.h" | 5 #include "base/metrics/sample_vector.h" |
| 6 | 6 |
| 7 #include "base/lazy_instance.h" | |
| 8 #include "base/logging.h" | 7 #include "base/logging.h" |
| 9 #include "base/memory/ptr_util.h" | 8 #include "base/metrics/bucket_ranges.h" |
| 10 #include "base/metrics/persistent_memory_allocator.h" | |
| 11 #include "base/synchronization/lock.h" | |
| 12 #include "base/threading/platform_thread.h" | |
| 13 | |
| 14 // This SampleVector makes use of the single-sample embedded in the base | |
| 15 // HistogramSamples class. If the count is non-zero then there is guaranteed | |
| 16 // (within the bounds of "eventual consistency") to be no allocated external | |
| 17 // storage. Once the full counts storage is allocated, the single-sample must | |
| 18 // be extracted and disabled. | |
| 19 | 9 |
| 20 namespace base { | 10 namespace base { |
| 21 | 11 |
| 22 typedef HistogramBase::Count Count; | 12 typedef HistogramBase::Count Count; |
| 23 typedef HistogramBase::Sample Sample; | 13 typedef HistogramBase::Sample Sample; |
| 24 | 14 |
| 25 SampleVectorBase::SampleVectorBase(uint64_t id, | 15 SampleVector::SampleVector(const BucketRanges* bucket_ranges) |
| 26 const BucketRanges* bucket_ranges) | 16 : SampleVector(0, bucket_ranges) {} |
| 27 : HistogramSamples(id), bucket_ranges_(bucket_ranges) { | 17 |
| 18 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) |
| 19 : HistogramSamples(id), |
| 20 local_counts_(bucket_ranges->bucket_count()), |
| 21 counts_(&local_counts_[0]), |
| 22 counts_size_(local_counts_.size()), |
| 23 bucket_ranges_(bucket_ranges) { |
| 28 CHECK_GE(bucket_ranges_->bucket_count(), 1u); | 24 CHECK_GE(bucket_ranges_->bucket_count(), 1u); |
| 29 } | 25 } |
| 30 | 26 |
| 31 SampleVectorBase::SampleVectorBase(uint64_t id, | 27 SampleVector::SampleVector(uint64_t id, |
| 32 Metadata* meta, | 28 HistogramBase::AtomicCount* counts, |
| 33 const BucketRanges* bucket_ranges) | 29 size_t counts_size, |
| 34 : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) { | 30 Metadata* meta, |
| 31 const BucketRanges* bucket_ranges) |
| 32 : HistogramSamples(id, meta), |
| 33 counts_(counts), |
| 34 counts_size_(bucket_ranges->bucket_count()), |
| 35 bucket_ranges_(bucket_ranges) { |
| 36 CHECK_LE(bucket_ranges_->bucket_count(), counts_size_); |
| 35 CHECK_GE(bucket_ranges_->bucket_count(), 1u); | 37 CHECK_GE(bucket_ranges_->bucket_count(), 1u); |
| 36 } | 38 } |
| 37 | 39 |
| 38 SampleVectorBase::~SampleVectorBase() {} | 40 SampleVector::~SampleVector() {} |
| 39 | 41 |
| 40 void SampleVectorBase::Accumulate(Sample value, Count count) { | 42 void SampleVector::Accumulate(Sample value, Count count) { |
| 41 const size_t bucket_index = GetBucketIndex(value); | 43 size_t bucket_index = GetBucketIndex(value); |
| 42 | 44 subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count); |
| 43 // Handle the single-sample case. | 45 IncreaseSum(static_cast<int64_t>(count) * value); |
| 44 if (!counts()) { | 46 IncreaseRedundantCount(count); |
| 45 // Try to accumulate the parameters into the single-count entry. | |
| 46 if (AccumulateSingleSample(value, count, bucket_index)) { | |
| 47 // A race condition could lead to a new single-sample being accumulated | |
| 48 // above just after another thread executed the MountCountsStorage below. | |
| 49 // Since it is mounted, it could be mounted elsewhere and have values | |
| 50 // written to it. It's not allowed to have both a single-sample and | |
| 51 // entries in the counts array so move the single-sample. | |
| 52 if (counts()) | |
| 53 MoveSingleSampleToCounts(); | |
| 54 return; | |
| 55 } | |
| 56 | |
| 57 // Need real storage to store both what was in the single-sample plus the | |
| 58 // parameter information. | |
| 59 MountCountsStorageAndMoveSingleSample(); | |
| 60 } | |
| 61 | |
| 62 // Handle the multi-sample case. | |
| 63 subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count); | |
| 64 IncreaseSumAndCount(static_cast<int64_t>(count) * value, count); | |
| 65 } | 47 } |
| 66 | 48 |
| 67 Count SampleVectorBase::GetCount(Sample value) const { | 49 Count SampleVector::GetCount(Sample value) const { |
| 68 return GetCountAtIndex(GetBucketIndex(value)); | 50 size_t bucket_index = GetBucketIndex(value); |
| 51 return subtle::NoBarrier_Load(&counts_[bucket_index]); |
| 69 } | 52 } |
| 70 | 53 |
| 71 Count SampleVectorBase::TotalCount() const { | 54 Count SampleVector::TotalCount() const { |
| 72 // Handle the single-sample case. | 55 Count count = 0; |
| 73 SingleSample sample = single_sample().Load(); | 56 for (size_t i = 0; i < counts_size_; i++) { |
| 74 if (sample.count != 0) | 57 count += subtle::NoBarrier_Load(&counts_[i]); |
| 75 return sample.count; | |
| 76 | |
| 77 // Handle the multi-sample case. | |
| 78 if (counts() || MountExistingCountsStorage()) { | |
| 79 Count count = 0; | |
| 80 size_t size = counts_size(); | |
| 81 const HistogramBase::AtomicCount* counts_array = counts(); | |
| 82 for (size_t i = 0; i < size; ++i) { | |
| 83 count += subtle::NoBarrier_Load(&counts_array[i]); | |
| 84 } | |
| 85 return count; | |
| 86 } | 58 } |
| 87 | 59 return count; |
| 88 // And the no-value case. | |
| 89 return 0; | |
| 90 } | 60 } |
| 91 | 61 |
| 92 Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const { | 62 Count SampleVector::GetCountAtIndex(size_t bucket_index) const { |
| 93 DCHECK(bucket_index < counts_size()); | 63 DCHECK(bucket_index < counts_size_); |
| 94 | 64 return subtle::NoBarrier_Load(&counts_[bucket_index]); |
| 95 // Handle the single-sample case. | |
| 96 SingleSample sample = single_sample().Load(); | |
| 97 if (sample.count != 0) | |
| 98 return sample.bucket == bucket_index ? sample.count : 0; | |
| 99 | |
| 100 // Handle the multi-sample case. | |
| 101 if (counts() || MountExistingCountsStorage()) | |
| 102 return subtle::NoBarrier_Load(&counts()[bucket_index]); | |
| 103 | |
| 104 // And the no-value case. | |
| 105 return 0; | |
| 106 } | 65 } |
| 107 | 66 |
| 108 std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const { | 67 std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const { |
| 109 // Handle the single-sample case. | 68 return std::unique_ptr<SampleCountIterator>( |
| 110 SingleSample sample = single_sample().Load(); | 69 new SampleVectorIterator(counts_, counts_size_, bucket_ranges_)); |
| 111 if (sample.count != 0) { | |
| 112 return MakeUnique<SingleSampleIterator>( | |
| 113 bucket_ranges_->range(sample.bucket), | |
| 114 bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket); | |
| 115 } | |
| 116 | |
| 117 // Handle the multi-sample case. | |
| 118 if (counts() || MountExistingCountsStorage()) { | |
| 119 return MakeUnique<SampleVectorIterator>(counts(), counts_size(), | |
| 120 bucket_ranges_); | |
| 121 } | |
| 122 | |
| 123 // And the no-value case. | |
| 124 return MakeUnique<SampleVectorIterator>(nullptr, 0, bucket_ranges_); | |
| 125 } | 70 } |
| 126 | 71 |
| 127 bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter, | 72 bool SampleVector::AddSubtractImpl(SampleCountIterator* iter, |
| 128 HistogramSamples::Operator op) { | 73 HistogramSamples::Operator op) { |
| 129 // Stop now if there's nothing to do. | |
| 130 if (iter->Done()) | |
| 131 return true; | |
| 132 | |
| 133 // Get the first value and its index. | |
| 134 HistogramBase::Sample min; | 74 HistogramBase::Sample min; |
| 135 HistogramBase::Sample max; | 75 HistogramBase::Sample max; |
| 136 HistogramBase::Count count; | 76 HistogramBase::Count count; |
| 137 iter->Get(&min, &max, &count); | |
| 138 size_t dest_index = GetBucketIndex(min); | |
| 139 | 77 |
| 140 // The destination must be a superset of the source meaning that though the | 78 // Go through the iterator and add the counts into correct bucket. |
| 141 // incoming ranges will find an exact match, the incoming bucket-index, if | 79 size_t index = 0; |
| 142 // it exists, may be offset from the destination bucket-index. Calculate | 80 while (index < counts_size_ && !iter->Done()) { |
| 143 // that offset of the passed iterator; there are are no overflow checks | 81 iter->Get(&min, &max, &count); |
| 144 // because 2's compliment math will work it out in the end. | 82 if (min == bucket_ranges_->range(index) && |
| 145 // | 83 max == bucket_ranges_->range(index + 1)) { |
| 146 // Because GetBucketIndex() always returns the same true or false result for | 84 // Sample matches this bucket! |
| 147 // a given iterator object, |index_offset| is either set here and used below, | 85 subtle::NoBarrier_AtomicIncrement( |
| 148 // or never set and never used. The compiler doesn't know this, though, which | 86 &counts_[index], op == HistogramSamples::ADD ? count : -count); |
| 149 // is why it's necessary to initialize it to something. | 87 iter->Next(); |
| 150 size_t index_offset = 0; | 88 } else if (min > bucket_ranges_->range(index)) { |
| 151 size_t iter_index; | 89 // Sample is larger than current bucket range. Try next. |
| 152 if (iter->GetBucketIndex(&iter_index)) | 90 index++; |
| 153 index_offset = dest_index - iter_index; | 91 } else { |
| 154 if (dest_index >= counts_size()) | 92 // Sample is smaller than current bucket range. We scan buckets from |
| 155 return false; | 93 // smallest to largest, so the sample value must be invalid. |
| 156 | 94 return false; |
| 157 // Post-increment. Information about the current sample is not available | |
| 158 // after this point. | |
| 159 iter->Next(); | |
| 160 | |
| 161 // Single-value storage is possible if there is no counts storage and the | |
| 162 // retrieved entry is the only one in the iterator. | |
| 163 if (!counts()) { | |
| 164 if (iter->Done()) { | |
| 165 // Don't call AccumulateSingleSample because that updates sum and count | |
| 166 // which was already done by the caller of this method. | |
| 167 if (single_sample().Accumulate( | |
| 168 dest_index, op == HistogramSamples::ADD ? count : -count)) { | |
| 169 // Handle race-condition that mounted counts storage between above and | |
| 170 // here. | |
| 171 if (counts()) | |
| 172 MoveSingleSampleToCounts(); | |
| 173 return true; | |
| 174 } | |
| 175 } | 95 } |
| 176 | |
| 177 // The counts storage will be needed to hold the multiple incoming values. | |
| 178 MountCountsStorageAndMoveSingleSample(); | |
| 179 } | 96 } |
| 180 | 97 |
| 181 // Go through the iterator and add the counts into correct bucket. | 98 return iter->Done(); |
| 182 while (true) { | |
| 183 // Ensure that the sample's min/max match the ranges min/max. | |
| 184 if (min != bucket_ranges_->range(dest_index) || | |
| 185 max != bucket_ranges_->range(dest_index + 1)) { | |
| 186 NOTREACHED() << "sample=" << min << "," << max | |
| 187 << "; range=" << bucket_ranges_->range(dest_index) << "," | |
| 188 << bucket_ranges_->range(dest_index + 1); | |
| 189 return false; | |
| 190 } | |
| 191 | |
| 192 // Sample's bucket matches exactly. Adjust count. | |
| 193 subtle::NoBarrier_AtomicIncrement( | |
| 194 &counts()[dest_index], op == HistogramSamples::ADD ? count : -count); | |
| 195 | |
| 196 // Advance to the next iterable sample. See comments above for how | |
| 197 // everything works. | |
| 198 if (iter->Done()) | |
| 199 return true; | |
| 200 iter->Get(&min, &max, &count); | |
| 201 if (iter->GetBucketIndex(&iter_index)) { | |
| 202 // Destination bucket is a known offset from the source bucket. | |
| 203 dest_index = iter_index + index_offset; | |
| 204 } else { | |
| 205 // Destination bucket has to be determined anew each time. | |
| 206 dest_index = GetBucketIndex(min); | |
| 207 } | |
| 208 if (dest_index >= counts_size()) | |
| 209 return false; | |
| 210 iter->Next(); | |
| 211 } | |
| 212 } | 99 } |
| 213 | 100 |
| 214 // Use simple binary search. This is very general, but there are better | 101 // Use simple binary search. This is very general, but there are better |
| 215 // approaches if we knew that the buckets were linearly distributed. | 102 // approaches if we knew that the buckets were linearly distributed. |
| 216 size_t SampleVectorBase::GetBucketIndex(Sample value) const { | 103 size_t SampleVector::GetBucketIndex(Sample value) const { |
| 217 size_t bucket_count = bucket_ranges_->bucket_count(); | 104 size_t bucket_count = bucket_ranges_->bucket_count(); |
| 218 CHECK_GE(bucket_count, 1u); | 105 CHECK_GE(bucket_count, 1u); |
| 219 CHECK_GE(value, bucket_ranges_->range(0)); | 106 CHECK_GE(value, bucket_ranges_->range(0)); |
| 220 CHECK_LT(value, bucket_ranges_->range(bucket_count)); | 107 CHECK_LT(value, bucket_ranges_->range(bucket_count)); |
| 221 | 108 |
| 222 size_t under = 0; | 109 size_t under = 0; |
| 223 size_t over = bucket_count; | 110 size_t over = bucket_count; |
| 224 size_t mid; | 111 size_t mid; |
| 225 do { | 112 do { |
| 226 DCHECK_GE(over, under); | 113 DCHECK_GE(over, under); |
| 227 mid = under + (over - under)/2; | 114 mid = under + (over - under)/2; |
| 228 if (mid == under) | 115 if (mid == under) |
| 229 break; | 116 break; |
| 230 if (bucket_ranges_->range(mid) <= value) | 117 if (bucket_ranges_->range(mid) <= value) |
| 231 under = mid; | 118 under = mid; |
| 232 else | 119 else |
| 233 over = mid; | 120 over = mid; |
| 234 } while (true); | 121 } while (true); |
| 235 | 122 |
| 236 DCHECK_LE(bucket_ranges_->range(mid), value); | 123 DCHECK_LE(bucket_ranges_->range(mid), value); |
| 237 CHECK_GT(bucket_ranges_->range(mid + 1), value); | 124 CHECK_GT(bucket_ranges_->range(mid + 1), value); |
| 238 return mid; | 125 return mid; |
| 239 } | 126 } |
| 240 | 127 |
| 241 void SampleVectorBase::MoveSingleSampleToCounts() { | |
| 242 DCHECK(counts()); | |
| 243 | |
| 244 // Disable the single-sample since there is now counts storage for the data. | |
| 245 SingleSample sample = single_sample().Extract(/*disable=*/true); | |
| 246 | |
| 247 // Stop here if there is no "count" as trying to find the bucket index of | |
| 248 // an invalid (including zero) "value" will crash. | |
| 249 if (sample.count == 0) | |
| 250 return; | |
| 251 | |
| 252 // Move the value into storage. Sum and redundant-count already account | |
| 253 // for this entry so no need to call IncreaseSumAndCount(). | |
| 254 subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count); | |
| 255 } | |
| 256 | |
| 257 void SampleVectorBase::MountCountsStorageAndMoveSingleSample() { | |
| 258 // There are many SampleVector objects and the lock is needed very | |
| 259 // infrequently (just when advancing from single-sample to multi-sample) so | |
| 260 // define a single, global lock that all can use. This lock only prevents | |
| 261 // concurrent entry into the code below; access and updates to |counts_| | |
| 262 // still requires atomic operations. | |
| 263 static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER; | |
| 264 if (subtle::NoBarrier_Load(&counts_) == 0) { | |
| 265 AutoLock lock(counts_lock.Get()); | |
| 266 if (subtle::NoBarrier_Load(&counts_) == 0) { | |
| 267 // Create the actual counts storage while the above lock is acquired. | |
| 268 HistogramBase::Count* counts = CreateCountsStorageWhileLocked(); | |
| 269 DCHECK(counts); | |
| 270 | |
| 271 // Point |counts_| to the newly created storage. This is done while | |
| 272 // locked to prevent possible concurrent calls to CreateCountsStorage | |
| 273 // but, between that call and here, other threads could notice the | |
| 274 // existance of the storage and race with this to set_counts(). That's | |
| 275 // okay because (a) it's atomic and (b) it always writes the same value. | |
| 276 set_counts(counts); | |
| 277 } | |
| 278 } | |
| 279 | |
| 280 // Move any single-sample into the newly mounted storage. | |
| 281 MoveSingleSampleToCounts(); | |
| 282 } | |
| 283 | |
| 284 SampleVector::SampleVector(const BucketRanges* bucket_ranges) | |
| 285 : SampleVector(0, bucket_ranges) {} | |
| 286 | |
| 287 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) | |
| 288 : SampleVectorBase(id, bucket_ranges) {} | |
| 289 | |
| 290 SampleVector::~SampleVector() {} | |
| 291 | |
| 292 bool SampleVector::MountExistingCountsStorage() const { | |
| 293 // There is never any existing storage other than what is already in use. | |
| 294 return counts() != nullptr; | |
| 295 } | |
| 296 | |
| 297 HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() { | |
| 298 local_counts_.resize(counts_size()); | |
| 299 return &local_counts_[0]; | |
| 300 } | |
| 301 | |
| 302 PersistentSampleVector::PersistentSampleVector( | |
| 303 uint64_t id, | |
| 304 const BucketRanges* bucket_ranges, | |
| 305 Metadata* meta, | |
| 306 const DelayedPersistentAllocation& counts) | |
| 307 : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) { | |
| 308 // Only mount the full storage if the single-sample has been disabled. | |
| 309 // Otherwise, it is possible for this object instance to start using (empty) | |
| 310 // storage that was created incidentally while another instance continues to | |
| 311 // update to the single sample. This "incidental creation" can happen because | |
| 312 // the memory is a DelayedPersistentAllocation which allows multiple memory | |
| 313 // blocks within it and applies an all-or-nothing approach to the allocation. | |
| 314 // Thus, a request elsewhere for one of the _other_ blocks would make _this_ | |
| 315 // block available even though nothing has explicitly requested it. | |
| 316 // | |
| 317 // Note that it's not possible for the ctor to mount existing storage and | |
| 318 // move any single-sample to it because sometimes the persistent memory is | |
| 319 // read-only. Only non-const methods (which assume that memory is read/write) | |
| 320 // can do that. | |
| 321 if (single_sample().IsDisabled()) { | |
| 322 bool success = MountExistingCountsStorage(); | |
| 323 DCHECK(success); | |
| 324 } | |
| 325 } | |
| 326 | |
| 327 PersistentSampleVector::~PersistentSampleVector() {} | |
| 328 | |
| 329 bool PersistentSampleVector::MountExistingCountsStorage() const { | |
| 330 // There is no early exit if counts is not yet mounted because, given that | |
| 331 // this is a virtual function, it's more efficient to do that at the call- | |
| 332 // site. There is no danger, however, should this get called anyway (perhaps | |
| 333 // because of a race condition) because at worst the |counts_| value would | |
| 334 // be over-written (in an atomic manner) with the exact same address. | |
| 335 | |
| 336 if (!persistent_counts_.reference()) | |
| 337 return false; // Nothing to mount. | |
| 338 | |
| 339 // Mount the counts array in position. | |
| 340 set_counts( | |
| 341 static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get())); | |
| 342 return true; | |
| 343 } | |
| 344 | |
| 345 HistogramBase::AtomicCount* | |
| 346 PersistentSampleVector::CreateCountsStorageWhileLocked() { | |
| 347 void* mem = persistent_counts_.Get(); | |
| 348 if (!mem) { | |
| 349 // The above shouldn't fail but can if Bad Things(tm) are occurring in the | |
| 350 // persistent allocator. Crashing isn't a good option so instead just | |
| 351 // allocate something from the heap and return that. There will be no | |
| 352 // sharing or persistence but worse things are already happening. | |
| 353 return new HistogramBase::AtomicCount[counts_size()]; | |
| 354 } | |
| 355 | |
| 356 return static_cast<HistogramBase::AtomicCount*>(mem); | |
| 357 } | |
| 358 | |
| 359 SampleVectorIterator::SampleVectorIterator( | 128 SampleVectorIterator::SampleVectorIterator( |
| 360 const std::vector<HistogramBase::AtomicCount>* counts, | 129 const std::vector<HistogramBase::AtomicCount>* counts, |
| 361 const BucketRanges* bucket_ranges) | 130 const BucketRanges* bucket_ranges) |
| 362 : counts_(&(*counts)[0]), | 131 : counts_(&(*counts)[0]), |
| 363 counts_size_(counts->size()), | 132 counts_size_(counts->size()), |
| 364 bucket_ranges_(bucket_ranges), | 133 bucket_ranges_(bucket_ranges), |
| 365 index_(0) { | 134 index_(0) { |
| 366 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_); | 135 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_); |
| 367 SkipEmptyBuckets(); | 136 SkipEmptyBuckets(); |
| 368 } | 137 } |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 415 return; | 184 return; |
| 416 | 185 |
| 417 while (index_ < counts_size_) { | 186 while (index_ < counts_size_) { |
| 418 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) | 187 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) |
| 419 return; | 188 return; |
| 420 index_++; | 189 index_++; |
| 421 } | 190 } |
| 422 } | 191 } |
| 423 | 192 |
| 424 } // namespace base | 193 } // namespace base |
| OLD | NEW |