Index: gpu/command_buffer/service/sync_point_manager.h |
diff --git a/gpu/command_buffer/service/sync_point_manager.h b/gpu/command_buffer/service/sync_point_manager.h |
index 3f11e05dabbe407ea9c27e018c759701e3fedbb3..d6a8c7a65a833cc5349f8fa1c3ce850030ee67be 100644 |
--- a/gpu/command_buffer/service/sync_point_manager.h |
+++ b/gpu/command_buffer/service/sync_point_manager.h |
@@ -5,6 +5,8 @@ |
#ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ |
#define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ |
+#include <functional> |
+#include <queue> |
#include <vector> |
#include "base/atomic_sequence_num.h" |
@@ -28,19 +30,13 @@ class GPU_EXPORT SyncPointClientState |
: public base::RefCountedThreadSafe<SyncPointClientState> { |
public: |
static scoped_refptr<SyncPointClientState> Create(); |
- uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager); |
- void BeginProcessingOrderNumber(uint32_t order_num) { |
- DCHECK(processing_thread_checker_.CalledOnValidThread()); |
- DCHECK_GE(order_num, current_order_num_); |
- current_order_num_ = order_num; |
- } |
+ uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager); |
+ void BeginProcessingOrderNumber(uint32_t order_num); |
+ void FinishProcessingOrderNumber(uint32_t order_num); |
- void FinishProcessingOrderNumber(uint32_t order_num) { |
- DCHECK(processing_thread_checker_.CalledOnValidThread()); |
- DCHECK_EQ(current_order_num_, order_num); |
- DCHECK_GT(order_num, processed_order_num()); |
- base::subtle::Release_Store(&processed_order_num_, order_num); |
+ bool IsFenceSyncReleased(uint32_t release) { |
+ return release <= fence_sync_release(); |
} |
uint32_t processed_order_num() const { |
@@ -51,6 +47,11 @@ class GPU_EXPORT SyncPointClientState |
return base::subtle::Acquire_Load(&unprocessed_order_num_); |
} |
+ uint32_t fence_sync_release() { |
+ base::AutoLock auto_lock(fence_sync_lock_); |
+ return fence_sync_release_; |
+ } |
+ |
uint32_t current_order_num() const { |
DCHECK(processing_thread_checker_.CalledOnValidThread()); |
return current_order_num_; |
@@ -60,9 +61,49 @@ class GPU_EXPORT SyncPointClientState |
friend class base::RefCountedThreadSafe<SyncPointClientState>; |
friend class SyncPointClient; |
+ struct ReleaseCallback { |
+ uint32_t release_count; |
+ base::Closure callback_closure; |
+ |
+ ReleaseCallback(uint32_t release, const base::Closure& callback); |
+ ~ReleaseCallback(); |
+ |
+ bool operator>(const ReleaseCallback& rhs) const { |
+ return release_count > rhs.release_count; |
+ } |
+ }; |
+ typedef std::priority_queue< |
+ ReleaseCallback, |
+ std::vector<ReleaseCallback>, |
+ std::greater<ReleaseCallback>> ReleaseCallbackQueue; |
+ |
+ struct OrderFence { |
+ uint32_t order_num; |
+ uint32_t fence_release; |
+ |
+ OrderFence(uint32_t order, uint32_t release) |
+ : order_num(order), |
+ fence_release(release) {} |
+ |
+ bool operator>(const OrderFence& rhs) const { |
+ return (order_num > rhs.order_num) || |
+ ((order_num == rhs.order_num) && |
+ (fence_release > rhs.fence_release)); |
+ } |
+ }; |
+ typedef std::priority_queue<OrderFence, |
+ std::vector<OrderFence>, |
+ std::greater<OrderFence>> OrderFenceQueue; |
+ |
SyncPointClientState(); |
virtual ~SyncPointClientState(); |
+ bool WaitForRelease(uint32_t wait_order_num, |
+ uint32_t release, |
+ const base::Closure& callback); |
+ |
+ void ReleaseFenceSync(uint32_t release); |
+ |
// Last finished IPC order number. |
base::subtle::Atomic32 processed_order_num_; |
@@ -75,6 +116,25 @@ class GPU_EXPORT SyncPointClientState |
// Current IPC order number being processed (only used on processing thread). |
uint32_t current_order_num_; |
+ // Protects fence_sync_release_, fence_callback_queue_ and order_fence_queue_. |
+ base::Lock fence_sync_lock_; |
+ |
+ // Current fence sync release that has been signaled. |
+ uint32_t fence_sync_release_; |
+ |
+ // In well defined fence sync operations, fence syncs are released in order |
+ // so simply having a priority queue for callbacks is enough. |
+ ReleaseCallbackQueue release_callback_queue_; |
+ |
+ // In situations where we are waiting on fence syncs that do not exist, we |
+ // validate by making sure the order number does not pass the order number |
+ // which the wait command was issued. If the order number reaches the |
+ // wait command's, we should automatically release up to the expected |
+ // release count. Note that this also releases other lower release counts, |
+ // so a single misbehaved fence sync is enough to invalidate/signal all |
+ // previous fence syncs. |
+ OrderFenceQueue order_fence_queue_; |
+ |
DISALLOW_COPY_AND_ASSIGN(SyncPointClientState); |
}; |
@@ -84,6 +144,18 @@ class GPU_EXPORT SyncPointClient { |
scoped_refptr<SyncPointClientState> client_state() { return client_state_; } |
+ // Wait for a release count to be reached on a SyncPointClientState. If this |
+ // function returns false, that means the wait was invalid. Otherwise if it |
+ // returns True it means wait_complete_callback will be called eventually. |
+ // In the case where the release happened already, it will call the callback |
+ // immediately before returning. |
+ bool Wait(scoped_refptr<SyncPointClientState> release_state, |
+ uint32_t release_count, |
+ const base::Closure& wait_complete_callback); |
+ |
+ |
+ void ReleaseFenceSync(uint32_t release); |
+ |
private: |
friend class SyncPointManager; |