Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(345)

Side by Side Diff: gpu/command_buffer/service/sync_point_manager.h

Issue 2722883002: gpu: Allow waiting on sync tokens without sync token client. (Closed)
Patch Set: oops Created 3 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 5 #ifndef GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
6 #define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 6 #define GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
7 7
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include <functional> 10 #include <functional>
11 #include <memory> 11 #include <memory>
12 #include <queue> 12 #include <queue>
13 #include <unordered_map> 13 #include <unordered_map>
14 #include <vector> 14 #include <vector>
15 15
16 #include "base/atomic_sequence_num.h" 16 #include "base/atomic_sequence_num.h"
17 #include "base/callback.h" 17 #include "base/callback.h"
18 #include "base/logging.h" 18 #include "base/logging.h"
19 #include "base/macros.h" 19 #include "base/macros.h"
20 #include "base/memory/ref_counted.h" 20 #include "base/memory/ref_counted.h"
21 #include "base/synchronization/condition_variable.h" 21 #include "base/synchronization/condition_variable.h"
22 #include "base/synchronization/lock.h" 22 #include "base/synchronization/lock.h"
23 #include "base/threading/thread_checker.h" 23 #include "base/threading/thread_checker.h"
24 #include "gpu/command_buffer/common/command_buffer_id.h" 24 #include "gpu/command_buffer/common/command_buffer_id.h"
25 #include "gpu/command_buffer/common/constants.h" 25 #include "gpu/command_buffer/common/constants.h"
26 #include "gpu/command_buffer/common/sync_token.h"
26 #include "gpu/gpu_export.h" 27 #include "gpu/gpu_export.h"
27 28
28 namespace base { 29 namespace base {
29 class SingleThreadTaskRunner; 30 class SingleThreadTaskRunner;
30 } // namespace base 31 } // namespace base
31 32
32 namespace gpu { 33 namespace gpu {
33 34
34 class SyncPointClient; 35 class SyncPointClient;
35 class SyncPointClientState; 36 class SyncPointClientState;
(...skipping 23 matching lines...) Expand all
59 uint32_t current_order_num() const { 60 uint32_t current_order_num() const {
60 DCHECK(processing_thread_checker_.CalledOnValidThread()); 61 DCHECK(processing_thread_checker_.CalledOnValidThread());
61 return current_order_num_; 62 return current_order_num_;
62 } 63 }
63 64
64 bool IsProcessingOrderNumber() { 65 bool IsProcessingOrderNumber() {
65 DCHECK(processing_thread_checker_.CalledOnValidThread()); 66 DCHECK(processing_thread_checker_.CalledOnValidThread());
66 return !paused_ && current_order_num_ > processed_order_num(); 67 return !paused_ && current_order_num_ > processed_order_num();
67 } 68 }
68 69
70 bool ValidateReleaseOrderNumber(
71 scoped_refptr<SyncPointClientState> client_state,
72 uint32_t wait_order_num,
73 uint64_t fence_release,
74 const base::Closure& release_callback);
75
69 private: 76 private:
70 friend class base::RefCountedThreadSafe<SyncPointOrderData>; 77 friend class base::RefCountedThreadSafe<SyncPointOrderData>;
71 friend class SyncPointClientState;
72 78
73 struct OrderFence { 79 struct OrderFence {
74 uint32_t order_num; 80 uint32_t order_num;
75 uint64_t fence_release; 81 uint64_t fence_release;
76 base::Closure release_callback; 82 base::Closure release_callback;
77 scoped_refptr<SyncPointClientState> client_state; 83 scoped_refptr<SyncPointClientState> client_state;
78 84
79 OrderFence(uint32_t order, 85 OrderFence(uint32_t order,
80 uint64_t release, 86 uint64_t release,
81 const base::Closure& release_callback, 87 const base::Closure& release_callback,
82 scoped_refptr<SyncPointClientState> state); 88 scoped_refptr<SyncPointClientState> state);
83 OrderFence(const OrderFence& other); 89 OrderFence(const OrderFence& other);
84 ~OrderFence(); 90 ~OrderFence();
85 91
86 bool operator>(const OrderFence& rhs) const { 92 bool operator>(const OrderFence& rhs) const {
87 return (order_num > rhs.order_num) || 93 return std::tie(order_num, fence_release) >
88 ((order_num == rhs.order_num) && 94 std::tie(rhs.order_num, rhs.fence_release);
89 (fence_release > rhs.fence_release));
90 } 95 }
91 }; 96 };
92 typedef std::priority_queue<OrderFence, 97 typedef std::priority_queue<OrderFence,
93 std::vector<OrderFence>, 98 std::vector<OrderFence>,
94 std::greater<OrderFence>> OrderFenceQueue; 99 std::greater<OrderFence>>
100 OrderFenceQueue;
95 101
96 SyncPointOrderData(); 102 SyncPointOrderData();
97 ~SyncPointOrderData(); 103 ~SyncPointOrderData();
98 104
99 bool ValidateReleaseOrderNumber(
100 scoped_refptr<SyncPointClientState> client_state,
101 uint32_t wait_order_num,
102 uint64_t fence_release,
103 const base::Closure& release_callback);
104
105 // Non thread-safe functions need to be called from a single thread. 105 // Non thread-safe functions need to be called from a single thread.
106 base::ThreadChecker processing_thread_checker_; 106 base::ThreadChecker processing_thread_checker_;
107 107
108 // Current IPC order number being processed (only used on processing thread). 108 // Current IPC order number being processed (only used on processing thread).
109 uint32_t current_order_num_; 109 uint32_t current_order_num_ = 0;
110 110
111 // Whether or not the current order number is being processed or paused. 111 // Whether or not the current order number is being processed or paused.
112 bool paused_; 112 bool paused_ = false;
113 113
114 // This lock protects destroyed_, processed_order_num_, 114 // This lock protects destroyed_, processed_order_num_,
115 // unprocessed_order_num_, and order_fence_queue_. All order numbers (n) in 115 // unprocessed_order_num_, and order_fence_queue_. All order numbers (n) in
116 // order_fence_queue_ must follow the invariant: 116 // order_fence_queue_ must follow the invariant:
117 // processed_order_num_ < n <= unprocessed_order_num_. 117 // processed_order_num_ < n <= unprocessed_order_num_.
118 mutable base::Lock lock_; 118 mutable base::Lock lock_;
119 119
120 bool destroyed_; 120 bool destroyed_ = false;
121 121
122 // Last finished IPC order number. 122 // Last finished IPC order number.
123 uint32_t processed_order_num_; 123 uint32_t processed_order_num_ = 0;
124 124
125 // Unprocessed order number expected to be processed under normal execution. 125 // Unprocessed order number expected to be processed under normal execution.
126 uint32_t unprocessed_order_num_; 126 uint32_t unprocessed_order_num_ = 0;
127 127
128 // In situations where we are waiting on fence syncs that do not exist, we 128 // In situations where we are waiting on fence syncs that do not exist, we
129 // validate by making sure the order number does not pass the order number 129 // validate by making sure the order number does not pass the order number
130 // which the wait command was issued. If the order number reaches the 130 // which the wait command was issued. If the order number reaches the
131 // wait command's, we should automatically release up to the expected 131 // wait command's, we should automatically release up to the expected
132 // release count. Note that this also releases other lower release counts, 132 // release count. Note that this also releases other lower release counts,
133 // so a single misbehaved fence sync is enough to invalidate/signal all 133 // so a single misbehaved fence sync is enough to invalidate/signal all
134 // previous fence syncs. 134 // previous fence syncs.
135 OrderFenceQueue order_fence_queue_; 135 OrderFenceQueue order_fence_queue_;
136 136
137 DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData); 137 DISALLOW_COPY_AND_ASSIGN(SyncPointOrderData);
138 }; 138 };
139 139
140 // Internal state for sync point clients.
140 class GPU_EXPORT SyncPointClientState 141 class GPU_EXPORT SyncPointClientState
141 : public base::RefCountedThreadSafe<SyncPointClientState> { 142 : public base::RefCountedThreadSafe<SyncPointClientState> {
142 public: 143 public:
143 scoped_refptr<SyncPointOrderData> order_data() { return order_data_; } 144 SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
jbauman 2017/03/02 00:31:48 explicit
sunnyps 2017/03/02 01:26:44 Done.
144 145
145 bool IsFenceSyncReleased(uint64_t release) { 146 bool IsFenceSyncReleased(uint64_t release);
146 return release <= fence_sync_release();
147 }
148 147
149 uint64_t fence_sync_release() { 148 // Queues the callback to be called if the release is valid. If the release
150 base::AutoLock auto_lock(fence_sync_lock_); 149 // is invalid this function will return False and the callback will never
151 return fence_sync_release_; 150 // be called.
152 } 151 bool WaitForRelease(uint64_t release,
152 uint32_t wait_order_num,
153 const base::Closure& callback);
154
155 // Releases a fence sync and all fence syncs below.
156 void ReleaseFenceSync(uint64_t release);
157
158 // Does not release the fence sync, but releases callbacks waiting on that
159 // fence sync.
160 void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
153 161
154 private: 162 private:
155 friend class base::RefCountedThreadSafe<SyncPointClientState>; 163 friend class base::RefCountedThreadSafe<SyncPointClientState>;
156 friend class SyncPointClient;
157 friend class SyncPointOrderData;
158 164
159 struct ReleaseCallback { 165 struct ReleaseCallback {
160 uint64_t release_count; 166 uint64_t release_count;
161 base::Closure callback_closure; 167 base::Closure callback_closure;
162 168
163 ReleaseCallback(uint64_t release, const base::Closure& callback); 169 ReleaseCallback(uint64_t release, const base::Closure& callback);
164 ReleaseCallback(const ReleaseCallback& other); 170 ReleaseCallback(const ReleaseCallback& other);
165 ~ReleaseCallback(); 171 ~ReleaseCallback();
166 172
167 bool operator>(const ReleaseCallback& rhs) const { 173 bool operator>(const ReleaseCallback& rhs) const {
168 return release_count > rhs.release_count; 174 return release_count > rhs.release_count;
169 } 175 }
170 }; 176 };
171 typedef std::priority_queue<ReleaseCallback, 177 typedef std::priority_queue<ReleaseCallback,
172 std::vector<ReleaseCallback>, 178 std::vector<ReleaseCallback>,
173 std::greater<ReleaseCallback>> 179 std::greater<ReleaseCallback>>
174 ReleaseCallbackQueue; 180 ReleaseCallbackQueue;
175 181
176 SyncPointClientState(scoped_refptr<SyncPointOrderData> order_data);
177 ~SyncPointClientState(); 182 ~SyncPointClientState();
178 183
179 // Queues the callback to be called if the release is valid. If the release
180 // is invalid this function will return False and the callback will never
181 // be called.
182 bool WaitForRelease(CommandBufferNamespace namespace_id,
183 CommandBufferId client_id,
184 uint32_t wait_order_num,
185 uint64_t release,
186 const base::Closure& callback);
187
188 // Releases a fence sync and all fence syncs below.
189 void ReleaseFenceSync(uint64_t release);
190
191 // Does not release the fence sync, but releases callbacks waiting on that
192 // fence sync.
193 void EnsureWaitReleased(uint64_t release, const base::Closure& callback);
194
195 typedef base::Callback<void(CommandBufferNamespace, CommandBufferId)>
196 OnWaitCallback;
197 void SetOnWaitCallback(const OnWaitCallback& callback);
198
199 // Global order data where releases will originate from. 184 // Global order data where releases will originate from.
200 scoped_refptr<SyncPointOrderData> order_data_; 185 scoped_refptr<SyncPointOrderData> order_data_;
201 186
202 // Protects fence_sync_release_, fence_callback_queue_. 187 // Protects fence_sync_release_, fence_callback_queue_.
203 base::Lock fence_sync_lock_; 188 base::Lock fence_sync_lock_;
204 189
205 // Current fence sync release that has been signaled. 190 // Current fence sync release that has been signaled.
206 uint64_t fence_sync_release_; 191 uint64_t fence_sync_release_ = 0;
207 192
208 // In well defined fence sync operations, fence syncs are released in order 193 // In well defined fence sync operations, fence syncs are released in order
209 // so simply having a priority queue for callbacks is enough. 194 // so simply having a priority queue for callbacks is enough.
210 ReleaseCallbackQueue release_callback_queue_; 195 ReleaseCallbackQueue release_callback_queue_;
211 196
212 // Called when a release callback is queued.
213 OnWaitCallback on_wait_callback_;
214
215 DISALLOW_COPY_AND_ASSIGN(SyncPointClientState); 197 DISALLOW_COPY_AND_ASSIGN(SyncPointClientState);
216 }; 198 };
217 199
218 class GPU_EXPORT SyncPointClient { 200 class GPU_EXPORT SyncPointClient {
219 public: 201 public:
220 ~SyncPointClient();
221
222 scoped_refptr<SyncPointClientState> client_state() { return client_state_; }
223
224 // Wait for a release count to be reached on a SyncPointClientState. If this
225 // function returns false, that means the wait was invalid. Otherwise if it
226 // returns True it means the release was valid. In the case where the release
227 // is valid but has happened already, it will still return true. In all cases
228 // wait_complete_callback will be called eventually. The callback function
229 // may be called on another thread so it should be thread-safe. For
230 // convenience, another non-threadsafe version is defined below where you
231 // can supply a task runner.
232 bool Wait(SyncPointClientState* release_state,
233 uint64_t release_count,
234 const base::Closure& wait_complete_callback);
235
236 bool WaitNonThreadSafe(SyncPointClientState* release_state,
237 uint64_t release_count,
238 scoped_refptr<base::SingleThreadTaskRunner> runner,
239 const base::Closure& wait_complete_callback);
240
241 // Unordered waits are waits which do not occur within the global order number
242 // processing order (IE. Not between the corresponding
243 // SyncPointOrderData::BeginProcessingOrderNumber() and
244 // SyncPointOrderData::FinishProcessingOrderNumber() calls). Because fence
245 // sync releases must occur within a corresponding order number, these waits
246 // cannot deadlock because they can never depend on any fence sync releases.
247 // This is useful for IPC messages that may be processed out of order with
248 // respect to regular command buffer processing.
249 bool WaitOutOfOrder(SyncPointClientState* release_state,
250 uint64_t release_count,
251 const base::Closure& wait_complete_callback);
252
253 bool WaitOutOfOrderNonThreadSafe(
254 SyncPointClientState* release_state,
255 uint64_t release_count,
256 scoped_refptr<base::SingleThreadTaskRunner> runner,
257 const base::Closure& wait_complete_callback);
258
259 void ReleaseFenceSync(uint64_t release);
260
261 // This callback is called with the namespace and id of the waiting client
262 // when a release callback is queued. The callback is called on the thread
263 // where the Wait... happens and synchronization is the responsibility of the
264 // caller.
265 typedef base::Callback<void(CommandBufferNamespace, CommandBufferId)>
266 OnWaitCallback;
267 void SetOnWaitCallback(const OnWaitCallback& callback);
268
269 private:
270 friend class SyncPointManager;
271
272 SyncPointClient();
273 SyncPointClient(SyncPointManager* sync_point_manager, 202 SyncPointClient(SyncPointManager* sync_point_manager,
274 scoped_refptr<SyncPointOrderData> order_data, 203 scoped_refptr<SyncPointOrderData> order_data,
275 CommandBufferNamespace namespace_id, 204 CommandBufferNamespace namespace_id,
276 CommandBufferId client_id); 205 CommandBufferId command_buffer_id);
206 ~SyncPointClient();
277 207
208 // This behaves similarly to SyncPointManager::Wait but uses the order data
209 // to guarantee no deadlocks with other clients.
210 bool Wait(const SyncToken& sync_token, const base::Closure& callback);
211
212 // Like Wait but runs the callback on the given task runner's thread.
213 bool WaitNonThreadSafe(
214 const SyncToken& sync_token,
215 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
216 const base::Closure& callback);
217
218 // Release fence sync and run queued callbacks.
219 void ReleaseFenceSync(uint64_t release);
220
221 private:
278 // Sync point manager is guaranteed to exist in the lifetime of the client. 222 // Sync point manager is guaranteed to exist in the lifetime of the client.
279 SyncPointManager* sync_point_manager_; 223 SyncPointManager* const sync_point_manager_;
280 224
281 // Keep the state that is sharable across multiple threads. 225 scoped_refptr<SyncPointOrderData> order_data_;
226
282 scoped_refptr<SyncPointClientState> client_state_; 227 scoped_refptr<SyncPointClientState> client_state_;
283 228
284 // Unique namespace/client id pair for this sync point client. 229 // Unique namespace/client id pair for this sync point client.
285 const CommandBufferNamespace namespace_id_; 230 const CommandBufferNamespace namespace_id_;
286 const CommandBufferId client_id_; 231 const CommandBufferId command_buffer_id_;
287 232
288 DISALLOW_COPY_AND_ASSIGN(SyncPointClient); 233 DISALLOW_COPY_AND_ASSIGN(SyncPointClient);
289 }; 234 };
290 235
291 // This class manages the sync points, which allow cross-channel 236 // This class manages the sync points, which allow cross-channel
292 // synchronization. 237 // synchronization.
293 class GPU_EXPORT SyncPointManager { 238 class GPU_EXPORT SyncPointManager {
294 public: 239 public:
295 explicit SyncPointManager(bool allow_threaded_wait); 240 SyncPointManager();
296 ~SyncPointManager(); 241 ~SyncPointManager();
297 242
298 // Creates/Destroy a sync point client which message processors should hold. 243 // Returns true if the sync token has been released or if the command buffer
299 std::unique_ptr<SyncPointClient> CreateSyncPointClient( 244 // does not exist.
300 scoped_refptr<SyncPointOrderData> order_data, 245 bool IsSyncTokenReleased(const SyncToken& sync_token);
301 CommandBufferNamespace namespace_id,
302 CommandBufferId client_id);
303 246
304 // Creates a sync point client which cannot process order numbers but can only 247 // If the wait is valid (sync token hasn't been processed or command buffer
305 // Wait out of order. 248 // does not exist), the callback is queued to run when the sync point is
306 std::unique_ptr<SyncPointClient> CreateSyncPointClientWaiter(); 249 // released. If the wait is invalid, the callback is NOT run. The callback
250 // runs on the thread the sync point is released. Clients should use
251 // SyncPointClient::Wait because that uses order data to prevent deadlocks.
252 bool Wait(const SyncToken& sync_token,
253 uint32_t wait_order_num,
254 const base::Closure& callback);
307 255
308 // Finds the state of an already created sync point client. 256 // Like Wait but runs the callback on the given task runner's thread.
257 bool WaitNonThreadSafe(
258 const SyncToken& sync_token,
259 uint32_t wait_order_num,
260 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
261 const base::Closure& callback);
262
263 // WaitOutOfOrder allows waiting for a sync token indefinitely, so it
264 // should be used with trusted sync tokens only.
265 bool WaitOutOfOrder(const SyncToken& trusted_sync_token,
266 const base::Closure& callback);
267
268 // Like WaitOutOfOrder but runs the callback on the given task runner's
269 // thread.
270 bool WaitOutOfOrderNonThreadSafe(
271 const SyncToken& trusted_sync_token,
272 scoped_refptr<base::SingleThreadTaskRunner> task_runner,
273 const base::Closure& callback);
274
275 // Used by SyncPointClient.
276 void RegisterSyncPointClient(scoped_refptr<SyncPointClientState> client_state,
277 CommandBufferNamespace namespace_id,
278 CommandBufferId command_buffer_id);
279
280 void DeregisterSyncPointClient(CommandBufferNamespace namespace_id,
281 CommandBufferId command_buffer_id);
282
283 // Used by SyncPointOrderData.
284 uint32_t GenerateOrderNumber();
285
286 private:
287 using ClientStateMap = std::unordered_map<CommandBufferId,
288 scoped_refptr<SyncPointClientState>,
289 CommandBufferId::Hasher>;
290
309 scoped_refptr<SyncPointClientState> GetSyncPointClientState( 291 scoped_refptr<SyncPointClientState> GetSyncPointClientState(
310 CommandBufferNamespace namespace_id, 292 CommandBufferNamespace namespace_id,
311 CommandBufferId client_id); 293 CommandBufferId command_buffer_id);
312
313 private:
314 friend class SyncPointClient;
315 friend class SyncPointOrderData;
316
317 using ClientMap = std::unordered_map<CommandBufferId,
318 SyncPointClient*,
319 CommandBufferId::Hasher>;
320
321 uint32_t GenerateOrderNumber();
322 void DestroySyncPointClient(CommandBufferNamespace namespace_id,
323 CommandBufferId client_id);
324 294
325 // Order number is global for all clients. 295 // Order number is global for all clients.
326 base::AtomicSequenceNumber global_order_num_; 296 base::AtomicSequenceNumber global_order_num_;
327 297
328 // Client map holds a map of clients id to client for each namespace. 298 // Client map holds a map of clients id to client for each namespace.
329 base::Lock client_maps_lock_; 299 base::Lock client_state_maps_lock_;
330 ClientMap client_maps_[NUM_COMMAND_BUFFER_NAMESPACES]; 300 ClientStateMap client_state_maps_[NUM_COMMAND_BUFFER_NAMESPACES];
331 301
332 DISALLOW_COPY_AND_ASSIGN(SyncPointManager); 302 DISALLOW_COPY_AND_ASSIGN(SyncPointManager);
333 }; 303 };
334 304
335 } // namespace gpu 305 } // namespace gpu
336 306
337 #endif // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_ 307 #endif // GPU_COMMAND_BUFFER_SERVICE_SYNC_POINT_MANAGER_H_
OLDNEW
« no previous file with comments | « gpu/command_buffer/service/gles2_cmd_decoder_passthrough_doers.cc ('k') | gpu/command_buffer/service/sync_point_manager.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698