Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(18)

Side by Side Diff: gpu/command_buffer/service/sync_point_manager.h

Issue 1331843005: Implemented new fence syncs which replaces the old sync points. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Improved commented out sample mojo code Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 5 #ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
6 #define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 6 #define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
7 7
8 #include <functional>
9 #include <queue>
8 #include <vector> 10 #include <vector>
9 11
10 #include "base/atomic_sequence_num.h" 12 #include "base/atomic_sequence_num.h"
11 #include "base/callback.h" 13 #include "base/callback.h"
12 #include "base/containers/hash_tables.h" 14 #include "base/containers/hash_tables.h"
13 #include "base/logging.h" 15 #include "base/logging.h"
14 #include "base/memory/ref_counted.h" 16 #include "base/memory/ref_counted.h"
15 #include "base/memory/scoped_ptr.h" 17 #include "base/memory/scoped_ptr.h"
16 #include "base/synchronization/condition_variable.h" 18 #include "base/synchronization/condition_variable.h"
17 #include "base/synchronization/lock.h" 19 #include "base/synchronization/lock.h"
18 #include "base/threading/thread_checker.h" 20 #include "base/threading/thread_checker.h"
19 #include "gpu/command_buffer/common/constants.h" 21 #include "gpu/command_buffer/common/constants.h"
20 #include "gpu/gpu_export.h" 22 #include "gpu/gpu_export.h"
21 23
22 namespace gpu { 24 namespace gpu {
23 25
24 class SyncPointClient; 26 class SyncPointClient;
25 class SyncPointManager; 27 class SyncPointManager;
26 28
27 class GPU_EXPORT SyncPointClientState 29 class GPU_EXPORT SyncPointClientState
28 : public base::RefCountedThreadSafe<SyncPointClientState> { 30 : public base::RefCountedThreadSafe<SyncPointClientState> {
29 public: 31 public:
30 static scoped_refptr<SyncPointClientState> Create(); 32 static scoped_refptr<SyncPointClientState> Create();
33
31 uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager); 34 uint32_t GenerateUnprocessedOrderNumber(SyncPointManager* sync_point_manager);
35 void BeginProcessingOrderNumber(uint32_t order_num);
36 void FinishProcessingOrderNumber(uint32_t order_num);
32 37
33 void BeginProcessingOrderNumber(uint32_t order_num) { 38 bool IsFenceSyncReleased(uint32_t release) {
34 DCHECK(processing_thread_checker_.CalledOnValidThread()); 39 return release <= fence_sync_release();
35 DCHECK_GE(order_num, current_order_num_);
36 current_order_num_ = order_num;
37 }
38
39 void FinishProcessingOrderNumber(uint32_t order_num) {
40 DCHECK(processing_thread_checker_.CalledOnValidThread());
41 DCHECK_EQ(current_order_num_, order_num);
42 DCHECK_GT(order_num, processed_order_num());
43 base::subtle::Release_Store(&processed_order_num_, order_num);
44 } 40 }
45 41
46 uint32_t processed_order_num() const { 42 uint32_t processed_order_num() const {
47 return base::subtle::Acquire_Load(&processed_order_num_); 43 return base::subtle::Acquire_Load(&processed_order_num_);
48 } 44 }
49 45
50 uint32_t unprocessed_order_num() const { 46 uint32_t unprocessed_order_num() const {
51 return base::subtle::Acquire_Load(&unprocessed_order_num_); 47 return base::subtle::Acquire_Load(&unprocessed_order_num_);
52 } 48 }
53 49
50 uint32_t fence_sync_release() {
51 base::AutoLock auto_lock(fence_sync_lock_);
52 return fence_sync_release_;
53 }
54
54 uint32_t current_order_num() const { 55 uint32_t current_order_num() const {
55 DCHECK(processing_thread_checker_.CalledOnValidThread()); 56 DCHECK(processing_thread_checker_.CalledOnValidThread());
56 return current_order_num_; 57 return current_order_num_;
57 } 58 }
58 59
59 protected: 60 protected:
60 friend class base::RefCountedThreadSafe<SyncPointClientState>; 61 friend class base::RefCountedThreadSafe<SyncPointClientState>;
61 friend class SyncPointClient; 62 friend class SyncPointClient;
62 63
64 struct ReleaseCallback {
65 uint32_t release_count;
66 base::Closure callback_closure;
67
68 ReleaseCallback(uint32_t release, const base::Closure& callback);
69 ~ReleaseCallback();
70
71 bool operator>(const ReleaseCallback& rhs) const {
72 return release_count > rhs.release_count;
73 }
74 };
75 typedef std::priority_queue<
76 ReleaseCallback,
77 std::vector<ReleaseCallback>,
78 std::greater<ReleaseCallback>> ReleaseCallbackQueue;
79
80 struct OrderFence {
81 uint32_t order_num;
82 uint32_t fence_release;
83
84 OrderFence(uint32_t order, uint32_t release)
85 : order_num(order),
86 fence_release(release) {}
87
88 bool operator>(const OrderFence& rhs) const {
89 return (order_num > rhs.order_num) ||
90 ((order_num == rhs.order_num) &&
91 (fence_release > rhs.fence_release));
92 }
93 };
94 typedef std::priority_queue<OrderFence,
95 std::vector<OrderFence>,
96 std::greater<OrderFence>> OrderFenceQueue;
97
63 SyncPointClientState(); 98 SyncPointClientState();
64 virtual ~SyncPointClientState(); 99 virtual ~SyncPointClientState();
65 100
101 bool WaitForRelease(uint32_t wait_order_num,
102 uint32_t release,
103 const base::Closure& callback);
104
105 void ReleaseFenceSync(uint32_t release);
106
66 // Last finished IPC order number. 107 // Last finished IPC order number.
67 base::subtle::Atomic32 processed_order_num_; 108 base::subtle::Atomic32 processed_order_num_;
68 109
69 // Unprocessed order number expected to be processed under normal execution. 110 // Unprocessed order number expected to be processed under normal execution.
70 base::subtle::Atomic32 unprocessed_order_num_; 111 base::subtle::Atomic32 unprocessed_order_num_;
71 112
72 // Non thread-safe functions need to be called from a single thread. 113 // Non thread-safe functions need to be called from a single thread.
73 base::ThreadChecker processing_thread_checker_; 114 base::ThreadChecker processing_thread_checker_;
74 115
75 // Current IPC order number being processed (only used on processing thread). 116 // Current IPC order number being processed (only used on processing thread).
76 uint32_t current_order_num_; 117 uint32_t current_order_num_;
77 118
119 // Protects fence_sync_release_, fence_callback_queue_ and order_fence_queue_.
120 base::Lock fence_sync_lock_;
121
122 // Current fence sync release that has been signaled.
123 uint32_t fence_sync_release_;
124
125 // In well defined fence sync operations, fence syncs are released in order
126 // so simply having a priority queue for callbacks is enough.
127 ReleaseCallbackQueue release_callback_queue_;
128
129 // In situations where we are waiting on fence syncs that do not exist, we
130 // validate by making sure the order number does not pass the order number
131 // which the wait command was issued. If the order number reaches the
132 // wait command's, we should automatically release up to the expected
133 // release count. Note that this also releases other lower release counts,
134 // so a single misbehaved fence sync is enough to invalidate/signal all
135 // previous fence syncs.
136 OrderFenceQueue order_fence_queue_;
137
78 DISALLOW_COPY_AND_ASSIGN(SyncPointClientState); 138 DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
79 }; 139 };
80 140
81 class GPU_EXPORT SyncPointClient { 141 class GPU_EXPORT SyncPointClient {
82 public: 142 public:
83 ~SyncPointClient(); 143 ~SyncPointClient();
84 144
85 scoped_refptr<SyncPointClientState> client_state() { return client_state_; } 145 scoped_refptr<SyncPointClientState> client_state() { return client_state_; }
86 146
147 // Wait for a release count to be reached on a SyncPointClientState. If this
148 // function returns false, that means the wait was invalid. Otherwise if it
149 // returns True it means wait_complete_callback will be called eventually.
150 // In the case where the release happened already, it will call the callback
151 // immediately before returning.
152 bool Wait(scoped_refptr<SyncPointClientState> release_state,
153 uint32_t release_count,
154 const base::Closure& wait_complete_callback);
155
156
157 void ReleaseFenceSync(uint32_t release);
158
87 private: 159 private:
88 friend class SyncPointManager; 160 friend class SyncPointManager;
89 161
90 SyncPointClient(SyncPointManager* sync_point_manager, 162 SyncPointClient(SyncPointManager* sync_point_manager,
91 scoped_refptr<SyncPointClientState> state, 163 scoped_refptr<SyncPointClientState> state,
92 CommandBufferNamespace namespace_id, uint64_t client_id); 164 CommandBufferNamespace namespace_id, uint64_t client_id);
93 165
94 // Sync point manager is guaranteed to exist in the lifetime of the client. 166 // Sync point manager is guaranteed to exist in the lifetime of the client.
95 SyncPointManager* sync_point_manager_; 167 SyncPointManager* sync_point_manager_;
96 168
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 SyncPointMap sync_point_map_; 240 SyncPointMap sync_point_map_;
169 uint32 next_sync_point_; 241 uint32 next_sync_point_;
170 base::ConditionVariable retire_cond_var_; 242 base::ConditionVariable retire_cond_var_;
171 243
172 DISALLOW_COPY_AND_ASSIGN(SyncPointManager); 244 DISALLOW_COPY_AND_ASSIGN(SyncPointManager);
173 }; 245 };
174 246
175 } // namespace gpu 247 } // namespace gpu
176 248
177 #endif // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 249 #endif // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698