Chromium Code Reviews| Index: gpu/command_buffer/service/sync_point_manager.cc |
| diff --git a/gpu/command_buffer/service/sync_point_manager.cc b/gpu/command_buffer/service/sync_point_manager.cc |
| index 039e494de8b1d4bd5ea3df100c89ec10cb508690..071829923469222e1f95784eb8e230fb977b3008 100644 |
| --- a/gpu/command_buffer/service/sync_point_manager.cc |
| +++ b/gpu/command_buffer/service/sync_point_manager.cc |
| @@ -6,44 +6,281 @@ |
| #include <climits> |
| +#include "base/bind.h" |
| +#include "base/containers/hash_tables.h" |
| +#include "base/location.h" |
| #include "base/logging.h" |
| #include "base/rand_util.h" |
| #include "base/sequence_checker.h" |
| +#include "base/single_thread_task_runner.h" |
| namespace gpu { |
| static const int kMaxSyncBase = INT_MAX; |
| -scoped_refptr<SyncPointClientState> SyncPointClientState::Create() { |
| - return new SyncPointClientState; |
| +namespace { |
| + |
| +void RunOnThread(scoped_refptr<base::SingleThreadTaskRunner> task_runner, |
| + const base::Closure& callback) { |
| + if (task_runner->BelongsToCurrentThread()) { |
| + callback.Run(); |
| + } else { |
| + task_runner->PostTask(FROM_HERE, callback); |
| + } |
| +} |
| + |
| +} // namespace |
| + |
| +scoped_refptr<SyncPointOrderData> SyncPointOrderData::Create() { |
| + return new SyncPointOrderData; |
| } |
| -uint32_t SyncPointClientState::GenerateUnprocessedOrderNumber( |
| +void SyncPointOrderData::Destroy() { |
| + // Because of circular references between the SyncPointOrderData and |
| + // SyncPointClientState, we must remove the references on destroy. Releasing |
| + // the fence syncs in the order fence queue would be redundant at this point |
| + // because they are assumed to be released on the destruction of the |
| + // SyncPointClient. |
| + base::AutoLock auto_lock(lock_); |
| + destroyed_ = true; |
| + while (!order_fence_queue_.empty()) { |
| + order_fence_queue_.pop(); |
| + } |
| +} |
| + |
| +uint32_t SyncPointOrderData::GenerateUnprocessedOrderNumber( |
| SyncPointManager* sync_point_manager) { |
| const uint32_t order_num = sync_point_manager->GenerateOrderNumber(); |
| - base::subtle::Release_Store(&unprocessed_order_num_, order_num); |
| + base::AutoLock auto_lock(lock_); |
| + unprocessed_order_num_ = order_num; |
| return order_num; |
| } |
| -SyncPointClientState::SyncPointClientState() |
| - : processed_order_num_(0), |
| - unprocessed_order_num_(0), |
| - current_order_num_(0) { |
| +void SyncPointOrderData::BeginProcessingOrderNumber(uint32_t order_num) { |
| + DCHECK(processing_thread_checker_.CalledOnValidThread()); |
| + DCHECK_GE(order_num, current_order_num_); |
| + current_order_num_ = order_num; |
| + |
| + // Catch invalid waits which were waiting on fence syncs that do not exist. |
| + // When we begin processing an order number, we should release any fence |
| + // syncs which were enqueued but the order number never existed. |
| + // Release without the lock to avoid possible deadlocks. |
| + std::vector<OrderFence> ensure_releases; |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + while (!order_fence_queue_.empty()) { |
| + const OrderFence& order_fence = order_fence_queue_.top(); |
| + if (order_fence_queue_.top().order_num < order_num) { |
| + ensure_releases.push_back(order_fence); |
| + order_fence_queue_.pop(); |
| + continue; |
| + } |
| + break; |
| + } |
| + } |
| + |
| + for (OrderFence& order_fence : ensure_releases) { |
| + order_fence.client_state->EnsureReleased(order_fence.fence_release); |
| + } |
| +} |
| + |
| +void SyncPointOrderData::FinishProcessingOrderNumber(uint32_t order_num) { |
| + DCHECK(processing_thread_checker_.CalledOnValidThread()); |
| + DCHECK_EQ(current_order_num_, order_num); |
| + |
| + // Catch invalid waits which were waiting on fence syncs that do not exist. |
| + // When we end processing an order number, we should release any fence syncs |
| + // which were suppose to be released during this order number. |
| + // Release without the lock to avoid possible deadlocks. |
| + std::vector<OrderFence> ensure_releases; |
| + { |
| + base::AutoLock auto_lock(lock_); |
| + DCHECK_GT(order_num, processed_order_num_); |
| + processed_order_num_ = order_num; |
| + |
| + while (!order_fence_queue_.empty()) { |
|
piman
2015/10/01 17:58:16
Do we need this any more?
The only case not handle
David Yen
2015/10/01 18:12:00
The way I have come to understand these 2 cases is
David Yen
2015/10/01 18:56:32
I also added a check if a client is waiting on its
|
| + const OrderFence& order_fence = order_fence_queue_.top(); |
| + if (order_fence_queue_.top().order_num <= order_num) { |
| + ensure_releases.push_back(order_fence); |
| + order_fence_queue_.pop(); |
| + continue; |
| + } |
| + break; |
| + } |
| + } |
| + |
| + for (OrderFence& order_fence : ensure_releases) { |
| + order_fence.client_state->EnsureReleased(order_fence.fence_release); |
| + } |
| +} |
| + |
| +SyncPointOrderData::OrderFence::OrderFence( |
| + uint32_t order, uint64_t release, scoped_refptr<SyncPointClientState> state) |
| + : order_num(order), |
| + fence_release(release), |
| + client_state(state) { |
| +} |
| + |
| +SyncPointOrderData::OrderFence::~OrderFence() { |
| +} |
| + |
| +SyncPointOrderData::SyncPointOrderData() |
| + : current_order_num_(0), |
| + destroyed_(false), |
| + processed_order_num_(0), |
| + unprocessed_order_num_(0) { |
| +} |
| + |
| +SyncPointOrderData::~SyncPointOrderData() { |
| +} |
| + |
| +bool SyncPointOrderData::ValidateReleaseOrderNumber( |
| + scoped_refptr<SyncPointClientState> client_state, |
| + uint32_t wait_order_num, |
| + uint64_t fence_release) { |
| + base::AutoLock auto_lock(lock_); |
| + if (destroyed_) |
| + return false; |
| + |
| + // Release should have a possible unprocessed order number lower |
| + // than the wait order number. |
| + if ((processed_order_num_ + 1) >= wait_order_num) |
| + return false; |
| + |
| + // Release should have more unprocessed numbers if we are waiting. |
| + if (unprocessed_order_num_ <= processed_order_num_) |
| + return false; |
| + |
| + // So far it could be valid, but add an order fence guard to be sure it |
| + // gets released eventually. |
| + const uint32_t expected_order_num = std::min(unprocessed_order_num_, |
| + wait_order_num); |
| + order_fence_queue_.push(OrderFence(expected_order_num, fence_release, |
| + client_state)); |
| + return true; |
| +} |
| + |
| +SyncPointClientState::ReleaseCallback::ReleaseCallback( |
| + uint64_t release, const base::Closure& callback) |
| + : release_count(release), |
| + callback_closure(callback) { |
| +} |
| + |
| +SyncPointClientState::ReleaseCallback::~ReleaseCallback() { |
| +} |
| + |
| +SyncPointClientState::SyncPointClientState( |
| + scoped_refptr<SyncPointOrderData> order_data) |
| + : order_data_(order_data), |
| + fence_sync_release_(0) { |
| } |
| SyncPointClientState::~SyncPointClientState() { |
| } |
| +bool SyncPointClientState::WaitForRelease(uint32_t wait_order_num, |
| + uint64_t release, |
| + const base::Closure& callback) { |
| + // Lock must be held the whole time while we validate otherwise it could be |
| + // released while we are checking. |
| + { |
| + base::AutoLock auto_lock(fence_sync_lock_); |
| + if (release > fence_sync_release_) { |
| + if (!order_data_->ValidateReleaseOrderNumber(this, wait_order_num, |
| + release)) { |
| + return false; |
| + } else { |
| + // Add the callback which will be called upon release. |
| + release_callback_queue_.push(ReleaseCallback(release, callback)); |
| + return true; |
| + } |
| + } |
| + } |
| + |
| + // Already released, run the callback now. |
| + callback.Run(); |
| + return true; |
| +} |
| + |
| +void SyncPointClientState::ReleaseFenceSync(uint64_t release) { |
| + // Call callbacks without the lock to avoid possible deadlocks. |
| + std::vector<base::Closure> callback_list; |
| + { |
| + base::AutoLock auto_lock(fence_sync_lock_); |
| + ReleaseFenceSyncLocked(release, &callback_list); |
| + } |
| + |
| + for (const base::Closure& closure : callback_list) { |
| + closure.Run(); |
| + } |
| +} |
| + |
| +void SyncPointClientState::EnsureReleased(uint64_t release) { |
| + // Call callbacks without the lock to avoid possible deadlocks. |
| + std::vector<base::Closure> callback_list; |
| + { |
| + base::AutoLock auto_lock(fence_sync_lock_); |
| + if (release <= fence_sync_release_) |
| + return; |
| + |
| + ReleaseFenceSyncLocked(release, &callback_list); |
| + } |
| + |
| + for (const base::Closure& closure : callback_list) { |
| + closure.Run(); |
| + } |
| +} |
| + |
| +void SyncPointClientState::ReleaseFenceSyncLocked( |
| + uint64_t release, std::vector<base::Closure>* callback_list) { |
| + fence_sync_lock_.AssertAcquired(); |
| + DCHECK_GT(release, fence_sync_release_); |
| + |
| + fence_sync_release_ = release; |
| + while (!release_callback_queue_.empty() && |
| + release_callback_queue_.top().release_count <= release) { |
| + callback_list->push_back(release_callback_queue_.top().callback_closure); |
| + release_callback_queue_.pop(); |
| + } |
| +} |
| + |
| SyncPointClient::~SyncPointClient() { |
| + // Release all fences on destruction. |
| + ReleaseFenceSync(UINT64_MAX); |
| + |
| sync_point_manager_->DestroySyncPointClient(namespace_id_, client_id_); |
| } |
| +bool SyncPointClient::Wait(scoped_refptr<SyncPointClientState> release_state, |
| + uint64_t release_count, |
| + const base::Closure& wait_complete_callback) { |
| + const uint32_t wait_order_number = |
| + client_state_->order_data()->current_order_num(); |
| + return release_state->WaitForRelease(wait_order_number, |
| + release_count, |
| + wait_complete_callback); |
| +} |
| + |
| +bool SyncPointClient::WaitNonThreadSafe( |
| + scoped_refptr<SyncPointClientState> release_state, |
| + uint64_t release_count, |
| + scoped_refptr<base::SingleThreadTaskRunner> runner, |
| + const base::Closure& wait_complete_callback) { |
| + return Wait(release_state, |
| + release_count, |
| + base::Bind(&RunOnThread, runner, wait_complete_callback)); |
| +} |
| + |
| +void SyncPointClient::ReleaseFenceSync(uint64_t release) { |
| + client_state_->ReleaseFenceSync(release); |
| +} |
| + |
| SyncPointClient::SyncPointClient(SyncPointManager* sync_point_manager, |
| - scoped_refptr<SyncPointClientState> state, |
| + scoped_refptr<SyncPointOrderData> order_data, |
| CommandBufferNamespace namespace_id, |
| uint64_t client_id) |
| : sync_point_manager_(sync_point_manager), |
| - client_state_(state), |
| + client_state_(new SyncPointClientState(order_data)), |
| namespace_id_(namespace_id), |
| client_id_(client_id) { |
| } |
| @@ -65,7 +302,7 @@ SyncPointManager::~SyncPointManager() { |
| } |
| scoped_ptr<SyncPointClient> SyncPointManager::CreateSyncPointClient( |
| - scoped_refptr<SyncPointClientState> client_state, |
| + scoped_refptr<SyncPointOrderData> order_data, |
| CommandBufferNamespace namespace_id, uint64_t client_id) { |
| DCHECK_GE(namespace_id, 0); |
| DCHECK_LT(static_cast<size_t>(namespace_id), arraysize(client_maps_)); |
| @@ -74,7 +311,7 @@ scoped_ptr<SyncPointClient> SyncPointManager::CreateSyncPointClient( |
| ClientMap& client_map = client_maps_[namespace_id]; |
| std::pair<ClientMap::iterator, bool> result = client_map.insert( |
| std::make_pair(client_id, new SyncPointClient(this, |
| - client_state, |
| + order_data, |
| namespace_id, |
| client_id))); |
| DCHECK(result.second); |