Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/metrics/sample_vector.h" | 5 #include "base/metrics/sample_vector.h" |
| 6 | 6 |
| 7 #include "base/lazy_instance.h" | |
| 7 #include "base/logging.h" | 8 #include "base/logging.h" |
| 8 #include "base/metrics/bucket_ranges.h" | 9 #include "base/memory/ptr_util.h" |
| 10 #include "base/metrics/persistent_memory_allocator.h" | |
| 11 #include "base/synchronization/lock.h" | |
| 12 #include "base/threading/platform_thread.h" | |
| 13 | |
| 14 // This SampleVector makes use of the single-sample embedded in the base | |
| 15 // HistogramSamples class. If the count is non-zero then there is guaranteed | |
| 16 // (within the bounds of "eventual consistency") to be no allocated external | |
| 17 // storage. Once the full counts storage is allocated, the single-sample must | |
| 18 // be extracted and disabled. | |
| 9 | 19 |
| 10 namespace base { | 20 namespace base { |
| 11 | 21 |
| 12 typedef HistogramBase::Count Count; | 22 typedef HistogramBase::Count Count; |
| 13 typedef HistogramBase::Sample Sample; | 23 typedef HistogramBase::Sample Sample; |
| 14 | 24 |
| 15 SampleVector::SampleVector(const BucketRanges* bucket_ranges) | 25 SampleVectorBase::SampleVectorBase(uint64_t id, |
| 16 : SampleVector(0, bucket_ranges) {} | 26 const BucketRanges* bucket_ranges) |
| 17 | 27 : HistogramSamples(id), bucket_ranges_(bucket_ranges) { |
| 18 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) | |
| 19 : HistogramSamples(id), | |
| 20 local_counts_(bucket_ranges->bucket_count()), | |
| 21 counts_(&local_counts_[0]), | |
| 22 counts_size_(local_counts_.size()), | |
| 23 bucket_ranges_(bucket_ranges) { | |
| 24 CHECK_GE(bucket_ranges_->bucket_count(), 1u); | 28 CHECK_GE(bucket_ranges_->bucket_count(), 1u); |
| 25 } | 29 } |
| 26 | 30 |
| 27 SampleVector::SampleVector(uint64_t id, | 31 SampleVectorBase::SampleVectorBase(uint64_t id, |
| 28 HistogramBase::AtomicCount* counts, | 32 Metadata* meta, |
| 29 size_t counts_size, | 33 const BucketRanges* bucket_ranges) |
| 30 Metadata* meta, | 34 : HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) { |
| 31 const BucketRanges* bucket_ranges) | |
| 32 : HistogramSamples(id, meta), | |
| 33 counts_(counts), | |
| 34 counts_size_(bucket_ranges->bucket_count()), | |
| 35 bucket_ranges_(bucket_ranges) { | |
| 36 CHECK_LE(bucket_ranges_->bucket_count(), counts_size_); | |
| 37 CHECK_GE(bucket_ranges_->bucket_count(), 1u); | 35 CHECK_GE(bucket_ranges_->bucket_count(), 1u); |
| 38 } | 36 } |
| 39 | 37 |
| 40 SampleVector::~SampleVector() {} | 38 SampleVectorBase::~SampleVectorBase() {} |
| 41 | 39 |
| 42 void SampleVector::Accumulate(Sample value, Count count) { | 40 void SampleVectorBase::Accumulate(Sample value, Count count) { |
| 43 size_t bucket_index = GetBucketIndex(value); | 41 const size_t bucket_index = GetBucketIndex(value); |
| 44 subtle::NoBarrier_AtomicIncrement(&counts_[bucket_index], count); | 42 |
| 45 IncreaseSum(static_cast<int64_t>(count) * value); | 43 // Handle the single-sample case. |
| 46 IncreaseRedundantCount(count); | 44 if (!counts()) { |
| 47 } | 45 // Try to accumulate the parameters into the single-count entry. |
| 48 | 46 if (AccumulateSingleSample(value, count, bucket_index)) { |
| 49 Count SampleVector::GetCount(Sample value) const { | 47 // A race condition could lead to a new single-sample being accumulated |
| 50 size_t bucket_index = GetBucketIndex(value); | 48 // above just after another thread executed the MountCountsStorage below. |
| 51 return subtle::NoBarrier_Load(&counts_[bucket_index]); | 49 // Since it is mounted, it could be mounted elsewhere and have values |
| 52 } | 50 // written to it. It's not allowed to have both a single-sample and |
| 53 | 51 // entries in the counts array so move the single-sample. |
| 54 Count SampleVector::TotalCount() const { | 52 if (counts()) |
| 55 Count count = 0; | 53 MoveSingleSampleToCounts(); |
| 56 for (size_t i = 0; i < counts_size_; i++) { | 54 return; |
| 57 count += subtle::NoBarrier_Load(&counts_[i]); | 55 } |
| 58 } | 56 |
| 59 return count; | 57 // Need real storage to store both what was in the single-sample plus the |
| 60 } | 58 // parameter information. |
| 61 | 59 MountCountsStorageAndMoveSingleSample(); |
| 62 Count SampleVector::GetCountAtIndex(size_t bucket_index) const { | 60 } |
| 63 DCHECK(bucket_index < counts_size_); | 61 |
| 64 return subtle::NoBarrier_Load(&counts_[bucket_index]); | 62 // Handle the multi-sample case. |
| 65 } | 63 subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count); |
| 66 | 64 IncreaseSumAndCount(static_cast<int64_t>(count) * value, count); |
| 67 std::unique_ptr<SampleCountIterator> SampleVector::Iterator() const { | 65 } |
| 68 return std::unique_ptr<SampleCountIterator>( | 66 |
| 69 new SampleVectorIterator(counts_, counts_size_, bucket_ranges_)); | 67 Count SampleVectorBase::GetCount(Sample value) const { |
| 70 } | 68 return GetCountAtIndex(GetBucketIndex(value)); |
| 71 | 69 } |
| 72 bool SampleVector::AddSubtractImpl(SampleCountIterator* iter, | 70 |
| 73 HistogramSamples::Operator op) { | 71 Count SampleVectorBase::TotalCount() const { |
| 72 // Handle the single-sample case. | |
| 73 SingleSample sample = single_sample().Load(); | |
| 74 if (sample.count != 0) | |
| 75 return sample.count; | |
| 76 | |
| 77 // Handle the multi-sample case. | |
| 78 if (counts() || MountExistingCountsStorage()) { | |
| 79 Count count = 0; | |
| 80 const HistogramBase::AtomicCount* counts_array = counts(); | |
| 81 for (size_t i = counts_size(); i > 0; --i, ++counts_array) { | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: Did you switch the loop structure just so we
bcwhite
2017/04/21 13:25:31
Done.
| |
| 82 count += subtle::NoBarrier_Load(counts_array); | |
| 83 } | |
| 84 return count; | |
| 85 } | |
| 86 | |
| 87 // And the no-value case. | |
| 88 return 0; | |
| 89 } | |
| 90 | |
| 91 Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const { | |
| 92 DCHECK(bucket_index < counts_size()); | |
| 93 | |
| 94 // Handle the single-sample case. | |
| 95 SingleSample sample = single_sample().Load(); | |
| 96 if (sample.count != 0) | |
| 97 return sample.bucket == bucket_index ? sample.count : 0; | |
| 98 | |
| 99 // Handle the multi-sample case. | |
| 100 if (counts() || MountExistingCountsStorage()) | |
| 101 return subtle::NoBarrier_Load(&counts()[bucket_index]); | |
| 102 | |
| 103 // And the no-value case. | |
| 104 return 0; | |
| 105 } | |
| 106 | |
| 107 std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const { | |
| 108 // Handle the single-sample case. | |
| 109 SingleSample sample = single_sample().Load(); | |
| 110 if (sample.count != 0) { | |
| 111 return MakeUnique<SingleSampleIterator>( | |
| 112 bucket_ranges_->range(sample.bucket), | |
| 113 bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket); | |
| 114 } | |
| 115 | |
| 116 // Handle the multi-sample case. | |
| 117 if (counts() || MountExistingCountsStorage()) { | |
| 118 return MakeUnique<SampleVectorIterator>(counts(), counts_size(), | |
| 119 bucket_ranges_); | |
| 120 } | |
| 121 | |
| 122 // And the no-value case. | |
| 123 return MakeUnique<SampleVectorIterator>(nullptr, 0, bucket_ranges_); | |
| 124 } | |
| 125 | |
| 126 bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter, | |
| 127 HistogramSamples::Operator op) { | |
| 74 HistogramBase::Sample min; | 128 HistogramBase::Sample min; |
| 75 HistogramBase::Sample max; | 129 HistogramBase::Sample max; |
| 76 HistogramBase::Count count; | 130 HistogramBase::Count count; |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: Move these closer to where they're used - ie.
bcwhite
2017/04/21 13:25:31
Done.
| |
| 131 size_t index_offset = 0; // So compiler doesn't see maybe-unitialized. | |
| 132 size_t iter_index; | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: This one and index_offset can be declared rig
bcwhite
2017/04/21 13:25:31
Done.
| |
| 133 size_t dest_index; | |
| 134 | |
| 135 // Stop now if there's nothing to do. | |
| 136 if (iter->Done()) | |
| 137 return true; | |
| 138 | |
| 139 // Get the first value and its index. | |
| 140 iter->Get(&min, &max, &count); | |
| 141 dest_index = GetBucketIndex(min); | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: This can be declared on this line rather than
bcwhite
2017/04/21 13:25:31
Done.
| |
| 142 | |
| 143 // The destination must be a superset of the source meaning that though the | |
| 144 // incoming ranges will find an exact match, the incoming bucket-index, if | |
| 145 // it exists, may be offset from the destination bucket-index. Calculate | |
| 146 // that offset of the passed iterator; there are are no overflow checks | |
| 147 // because 2's compliment math will work it out in the end. | |
| 148 // | |
| 149 // Because GetBucketIndex() always returns the same true or false result for | |
| 150 // a given iterator object, index_offset is either set here and used below, | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: |index_offset|
bcwhite
2017/04/21 13:25:32
Done.
| |
| 151 // or never set and never used. The compiler doesn't know this, though, which | |
| 152 // is why it's necessary to initialize it to zero above. | |
| 153 if (iter->GetBucketIndex(&iter_index)) | |
| 154 index_offset = dest_index - iter_index; | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
So this logic only works if it's the same number o
bcwhite
2017/04/21 13:25:31
It'll work with different numbers of buckets so lo
| |
| 155 if (dest_index >= counts_size()) | |
| 156 return false; | |
| 157 | |
| 158 // Post-increment. Information about the current sample is not available | |
| 159 // after this point. | |
| 160 iter->Next(); | |
| 161 | |
| 162 // Single-value storage is possible if there is no counts storage and the | |
| 163 // retrieved entry is the only one in the iterator. | |
| 164 if (!counts()) { | |
| 165 if (iter->Done()) { | |
| 166 // Don't call AccumulateSingleSample because that updates sum and count | |
| 167 // which was already done by the caller of this method. | |
| 168 if (single_sample().Accumulate( | |
| 169 dest_index, op == HistogramSamples::ADD ? count : -count)) { | |
| 170 // Handle race-condition that mounted counts storage between above and | |
| 171 // here. | |
| 172 if (counts()) | |
| 173 MoveSingleSampleToCounts(); | |
| 174 return true; | |
| 175 } | |
| 176 } | |
| 177 | |
| 178 // The counts storage will be needed to hold the multiple incoming values. | |
| 179 MountCountsStorageAndMoveSingleSample(); | |
| 180 } | |
| 77 | 181 |
| 78 // Go through the iterator and add the counts into correct bucket. | 182 // Go through the iterator and add the counts into correct bucket. |
| 79 size_t index = 0; | 183 while (true) { |
| 80 while (index < counts_size_ && !iter->Done()) { | 184 // Ensure that the sample's min/max match the ranges min/max. |
| 185 if (min != bucket_ranges_->range(dest_index) || | |
| 186 max != bucket_ranges_->range(dest_index + 1)) { | |
| 187 NOTREACHED() << "sample=" << min << "," << max | |
| 188 << "; range=" << bucket_ranges_->range(dest_index) << "," | |
| 189 << bucket_ranges_->range(dest_index + 1); | |
| 190 return false; | |
| 191 } | |
| 192 | |
| 193 // Sample's bucket matches exactly. Adjust count. | |
| 194 subtle::NoBarrier_AtomicIncrement( | |
| 195 &counts()[dest_index], op == HistogramSamples::ADD ? count : -count); | |
| 196 | |
| 197 // Advance to the next iterable sample. See comments above for how | |
| 198 // everything works. | |
| 199 if (iter->Done()) | |
| 200 return true; | |
| 81 iter->Get(&min, &max, &count); | 201 iter->Get(&min, &max, &count); |
| 82 if (min == bucket_ranges_->range(index) && | 202 if (iter->GetBucketIndex(&iter_index)) { |
| 83 max == bucket_ranges_->range(index + 1)) { | 203 // Destination bucket is a known offset from the source bucket. |
| 84 // Sample matches this bucket! | 204 dest_index = iter_index + index_offset; |
| 85 subtle::NoBarrier_AtomicIncrement( | |
| 86 &counts_[index], op == HistogramSamples::ADD ? count : -count); | |
| 87 iter->Next(); | |
| 88 } else if (min > bucket_ranges_->range(index)) { | |
| 89 // Sample is larger than current bucket range. Try next. | |
| 90 index++; | |
| 91 } else { | 205 } else { |
| 92 // Sample is smaller than current bucket range. We scan buckets from | 206 // Destination bucket has to be determined anew each time. |
| 93 // smallest to largest, so the sample value must be invalid. | 207 dest_index = GetBucketIndex(min); |
| 208 } | |
| 209 if (dest_index >= counts_size()) | |
| 94 return false; | 210 return false; |
| 95 } | 211 iter->Next(); |
| 96 } | 212 } |
| 97 | |
| 98 return iter->Done(); | |
| 99 } | 213 } |
| 100 | 214 |
| 101 // Use simple binary search. This is very general, but there are better | 215 // Use simple binary search. This is very general, but there are better |
| 102 // approaches if we knew that the buckets were linearly distributed. | 216 // approaches if we knew that the buckets were linearly distributed. |
| 103 size_t SampleVector::GetBucketIndex(Sample value) const { | 217 size_t SampleVectorBase::GetBucketIndex(Sample value) const { |
| 104 size_t bucket_count = bucket_ranges_->bucket_count(); | 218 size_t bucket_count = bucket_ranges_->bucket_count(); |
| 105 CHECK_GE(bucket_count, 1u); | 219 CHECK_GE(bucket_count, 1u); |
| 106 CHECK_GE(value, bucket_ranges_->range(0)); | 220 CHECK_GE(value, bucket_ranges_->range(0)); |
| 107 CHECK_LT(value, bucket_ranges_->range(bucket_count)); | 221 CHECK_LT(value, bucket_ranges_->range(bucket_count)); |
| 108 | 222 |
| 109 size_t under = 0; | 223 size_t under = 0; |
| 110 size_t over = bucket_count; | 224 size_t over = bucket_count; |
| 111 size_t mid; | 225 size_t mid; |
| 112 do { | 226 do { |
| 113 DCHECK_GE(over, under); | 227 DCHECK_GE(over, under); |
| 114 mid = under + (over - under)/2; | 228 mid = under + (over - under)/2; |
| 115 if (mid == under) | 229 if (mid == under) |
| 116 break; | 230 break; |
| 117 if (bucket_ranges_->range(mid) <= value) | 231 if (bucket_ranges_->range(mid) <= value) |
| 118 under = mid; | 232 under = mid; |
| 119 else | 233 else |
| 120 over = mid; | 234 over = mid; |
| 121 } while (true); | 235 } while (true); |
| 122 | 236 |
| 123 DCHECK_LE(bucket_ranges_->range(mid), value); | 237 DCHECK_LE(bucket_ranges_->range(mid), value); |
| 124 CHECK_GT(bucket_ranges_->range(mid + 1), value); | 238 CHECK_GT(bucket_ranges_->range(mid + 1), value); |
| 125 return mid; | 239 return mid; |
| 126 } | 240 } |
| 127 | 241 |
| 242 void SampleVectorBase::MoveSingleSampleToCounts() { | |
| 243 DCHECK(counts()); | |
| 244 | |
| 245 // Disable the single-sample since there is now counts storage for the data. | |
| 246 SingleSample sample = single_sample().Extract(/*disable=*/true); | |
| 247 | |
| 248 // Stop here if there is no "count" as trying to find the bucket index of | |
| 249 // an invalid (including zero) "value" will crash. | |
| 250 if (sample.count == 0) | |
| 251 return; | |
| 252 | |
| 253 // Move the value into storage. Sum and redundant-count already account | |
| 254 // for this entry so no need to call IncreaseSumAndCount(). | |
| 255 subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count); | |
| 256 } | |
| 257 | |
| 258 void SampleVectorBase::MountCountsStorageAndMoveSingleSample() { | |
| 259 // There are many SampleVector objects and the lock is needed very | |
| 260 // infrequently (just when advancing from single-sample to multi-sample) so | |
| 261 // define a single, global lock that all can use. This lock only prevents | |
| 262 // concurrent entry into the code below; access and updates to |counts_| | |
| 263 // still requires atomic operations. | |
| 264 static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER; | |
| 265 if (subtle::NoBarrier_Load(&counts_) == 0) { | |
| 266 AutoLock lock(counts_lock.Get()); | |
| 267 if (subtle::NoBarrier_Load(&counts_) == 0) { | |
| 268 // Create the actual counts storage while the above lock is acquired. | |
| 269 HistogramBase::Count* counts = CreateCountsStorageWhileLocked(); | |
| 270 DCHECK(counts); | |
| 271 | |
| 272 // Point |counts_| to the newly created storage. This is done while | |
| 273 // locked to prevent possible concurrent calls to CreateCountsStorage | |
| 274 // but, between that call and here, other threads could notice the | |
| 275 // existance of the storage and race with this to set_counts(). That's | |
| 276 // okay because (a) it's atomic and (b) it always writes the same value. | |
| 277 set_counts(counts); | |
| 278 } | |
| 279 } | |
| 280 | |
| 281 // Move any single-sample into the newly mounted storage. | |
| 282 MoveSingleSampleToCounts(); | |
| 283 } | |
| 284 | |
| 285 SampleVector::SampleVector(const BucketRanges* bucket_ranges) | |
| 286 : SampleVector(0, bucket_ranges) {} | |
| 287 | |
| 288 SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges) | |
| 289 : SampleVectorBase(id, bucket_ranges) {} | |
| 290 | |
| 291 SampleVector::~SampleVector() {} | |
| 292 | |
| 293 bool SampleVector::MountExistingCountsStorage() const { | |
| 294 // There is never any existing storage other than what is already in use. | |
| 295 return counts() != nullptr; | |
| 296 } | |
| 297 | |
| 298 HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() { | |
| 299 local_counts_.resize(counts_size()); | |
| 300 return &local_counts_[0]; | |
| 301 } | |
| 302 | |
| 303 PersistentSampleVector::PersistentSampleVector( | |
| 304 uint64_t id, | |
| 305 const BucketRanges* bucket_ranges, | |
| 306 Metadata* meta, | |
| 307 const DelayedPersistentAllocation& counts) | |
| 308 : SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) { | |
| 309 // Only mount the full storage if the single-sample has been disabled. | |
| 310 // Otherwise, it is possible for this object instance to start using (empty) | |
| 311 // storage that was created incidentally (because it shares a memory segment | |
| 312 // with another block) while another instance continues to update to the | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
I don't understand the "because it shares a memory
bcwhite
2017/04/21 13:25:32
Something else. Comment updated.
| |
| 313 // single sample. | |
| 314 if (single_sample().IsDisabled()) { | |
| 315 bool success = MountExistingCountsStorage(); | |
| 316 DCHECK(success); | |
| 317 } | |
| 318 } | |
| 319 | |
| 320 PersistentSampleVector::~PersistentSampleVector() {} | |
| 321 | |
| 322 bool PersistentSampleVector::MountExistingCountsStorage() const { | |
| 323 // There is no check that counts is not yet mounted because, given that this | |
|
Alexei Svitkine (slow)
2017/04/20 22:11:34
Nit: Make it a DCHECK just in case.
bcwhite
2017/04/21 13:25:31
It's allowed to be called when it is already mount
| |
| 324 // is a virtual function, it's more efficient to do that at the call-site. | |
| 325 // There is no danger, however, because at worst the counts value would be | |
| 326 // overwritten (in an atomic manner) with the exact same address. | |
| 327 | |
| 328 if (!persistent_counts_.reference()) | |
| 329 return false; // Nothing to mount. | |
| 330 | |
| 331 // Mount the counts array in position. | |
| 332 set_counts( | |
| 333 static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get())); | |
| 334 return true; | |
| 335 } | |
| 336 | |
| 337 HistogramBase::AtomicCount* | |
| 338 PersistentSampleVector::CreateCountsStorageWhileLocked() { | |
| 339 void* mem = persistent_counts_.Get(); | |
| 340 if (!mem) { | |
| 341 // The above shouldn't fail but can if Bad Things(tm) are occurring in the | |
| 342 // persistent allocator. Crashing isn't a good option so instead just | |
| 343 // allocate something from the heap and return that. There will be no | |
| 344 // sharing or persistence but worse things are already happening. | |
| 345 return new HistogramBase::AtomicCount[counts_size()]; | |
| 346 } | |
| 347 | |
| 348 return static_cast<HistogramBase::AtomicCount*>(mem); | |
| 349 } | |
| 350 | |
| 128 SampleVectorIterator::SampleVectorIterator( | 351 SampleVectorIterator::SampleVectorIterator( |
| 129 const std::vector<HistogramBase::AtomicCount>* counts, | 352 const std::vector<HistogramBase::AtomicCount>* counts, |
| 130 const BucketRanges* bucket_ranges) | 353 const BucketRanges* bucket_ranges) |
| 131 : counts_(&(*counts)[0]), | 354 : counts_(&(*counts)[0]), |
| 132 counts_size_(counts->size()), | 355 counts_size_(counts->size()), |
| 133 bucket_ranges_(bucket_ranges), | 356 bucket_ranges_(bucket_ranges), |
| 134 index_(0) { | 357 index_(0) { |
| 135 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_); | 358 CHECK_GE(bucket_ranges_->bucket_count(), counts_size_); |
| 136 SkipEmptyBuckets(); | 359 SkipEmptyBuckets(); |
| 137 } | 360 } |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 184 return; | 407 return; |
| 185 | 408 |
| 186 while (index_ < counts_size_) { | 409 while (index_ < counts_size_) { |
| 187 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) | 410 if (subtle::NoBarrier_Load(&counts_[index_]) != 0) |
| 188 return; | 411 return; |
| 189 index_++; | 412 index_++; |
| 190 } | 413 } |
| 191 } | 414 } |
| 192 | 415 |
| 193 } // namespace base | 416 } // namespace base |
| OLD | NEW |