Chromium Code Reviews| Index: gpu/command_buffer/client/query_tracker.cc |
| diff --git a/gpu/command_buffer/client/query_tracker.cc b/gpu/command_buffer/client/query_tracker.cc |
| index e960d139f9c50200baa0634f3ba9d6d05cd37323..44b25a2ee21ee6463ef28fec314bfe26d6d56efb 100644 |
| --- a/gpu/command_buffer/client/query_tracker.cc |
| +++ b/gpu/command_buffer/client/query_tracker.cc |
| @@ -26,11 +26,8 @@ QuerySyncManager::Bucket::Bucket(QuerySync* sync_mem, |
| : syncs(sync_mem), |
| shm_id(shm_id), |
| base_shm_offset(shm_offset), |
| - free_queries(kSyncsPerBucket) { |
| - static_assert(kSyncsPerBucket <= USHRT_MAX, |
| - "Can't fit kSyncsPerBucket in unsigned short"); |
| - for (size_t ii = 0; ii < kSyncsPerBucket; ++ii) |
| - free_queries[ii] = ii; |
| + in_use_query_count(0), |
| + next_query_scan_pos(0) { |
| } |
| QuerySyncManager::Bucket::~Bucket() = default; |
| @@ -52,7 +49,7 @@ bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) { |
| DCHECK(info); |
| Bucket* bucket = nullptr; |
| for (Bucket* bucket_candidate : buckets_) { |
| - if (!bucket_candidate->free_queries.empty()) { |
| + if (bucket_candidate->in_use_query_count != kSyncsPerBucket) { |
| bucket = bucket_candidate; |
| break; |
| } |
| @@ -70,27 +67,45 @@ bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) { |
| buckets_.push_back(bucket); |
| } |
| - unsigned short index_in_bucket = bucket->free_queries.back(); |
| + unsigned short index_in_bucket = 0; |
| + size_t start_and_end_i = bucket->next_query_scan_pos; |
| + |
| + // Find a free Query in the bucket by scanning the bitset of free |
| + // flags. Resume the search where we ended last time to avoid |
| + // O(n^2) for the common scenario. |
|
reveman
2015/05/18 19:58:42
is this really worthwhile? i would prefer if we ke
|
| + size_t i = start_and_end_i; |
| + do { |
| + if (!bucket->in_use_queries[i]) { |
| + index_in_bucket = i; |
| + bucket->next_query_scan_pos = i; |
| + break; |
| + } |
| + i = (i + 1) % kSyncsPerBucket; |
| + } while (i != start_and_end_i); |
| + |
| uint32 shm_offset = |
| bucket->base_shm_offset + index_in_bucket * sizeof(QuerySync); |
| QuerySync* sync = bucket->syncs + index_in_bucket; |
| *info = QueryInfo(bucket, bucket->shm_id, shm_offset, sync); |
| info->sync->Reset(); |
| - bucket->free_queries.pop_back(); |
| + bucket->in_use_queries[index_in_bucket] = true; |
| + bucket->in_use_query_count++; |
| return true; |
| } |
| void QuerySyncManager::Free(const QuerySyncManager::QueryInfo& info) { |
| - DCHECK(info.bucket->free_queries.size() < kSyncsPerBucket); |
| + DCHECK(info.bucket->in_use_query_count > 0); |
| unsigned short index_in_bucket = info.sync - info.bucket->syncs; |
| - info.bucket->free_queries.push_back(index_in_bucket); |
| + DCHECK(info.bucket->in_use_queries[index_in_bucket] == true); |
| + info.bucket->in_use_queries[index_in_bucket] = false; |
| + info.bucket->in_use_query_count--; |
| } |
| void QuerySyncManager::Shrink() { |
| std::deque<Bucket*> new_buckets; |
| while (!buckets_.empty()) { |
| Bucket* bucket = buckets_.front(); |
| - if (bucket->free_queries.size() < kSyncsPerBucket) { |
| + if (bucket->in_use_query_count > 0) { |
| new_buckets.push_back(bucket); |
| } else { |
| mapped_memory_->Free(bucket->syncs); |