Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(150)

Side by Side Diff: gpu/command_buffer/client/cmd_buffer_helper.cc

Issue 2550583002: gpu: Thread-safe command buffer state lookup. (Closed)
Patch Set: jbauman's review Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the command buffer helper class. 5 // This file contains the implementation of the command buffer helper class.
6 6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 8
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include <algorithm> 11 #include <algorithm>
12 #include "base/logging.h" 12 #include "base/logging.h"
13 #include "base/strings/stringprintf.h" 13 #include "base/strings/stringprintf.h"
14 #include "base/threading/thread_task_runner_handle.h" 14 #include "base/threading/thread_task_runner_handle.h"
15 #include "base/time/time.h" 15 #include "base/time/time.h"
16 #include "base/trace_event/memory_allocator_dump.h" 16 #include "base/trace_event/memory_allocator_dump.h"
17 #include "base/trace_event/memory_dump_manager.h" 17 #include "base/trace_event/memory_dump_manager.h"
18 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
19 #include "base/trace_event/trace_event.h" 19 #include "base/trace_event/trace_event.h"
20 #include "gpu/command_buffer/common/buffer.h" 20 #include "gpu/command_buffer/common/buffer.h"
21 #include "gpu/command_buffer/common/command_buffer.h" 21 #include "gpu/command_buffer/common/command_buffer.h"
22 #include "gpu/command_buffer/common/constants.h" 22 #include "gpu/command_buffer/common/constants.h"
23 23
24 namespace gpu { 24 namespace gpu {
25 25
26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) 26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
27 : command_buffer_(command_buffer), 27 : command_buffer_(command_buffer),
28 ring_buffer_id_(-1), 28 ring_buffer_id_(-1),
29 ring_buffer_size_(0), 29 ring_buffer_size_(0),
30 entries_(NULL), 30 entries_(nullptr),
31 total_entry_count_(0), 31 total_entry_count_(0),
32 immediate_entry_count_(0), 32 immediate_entry_count_(0),
33 token_(0), 33 token_(0),
34 put_(0), 34 put_(0),
35 last_put_sent_(0), 35 last_put_sent_(0),
36 cached_last_token_read_(0),
37 cached_get_offset_(0),
36 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) 38 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
37 commands_issued_(0), 39 commands_issued_(0),
38 #endif 40 #endif
39 usable_(true), 41 usable_(true),
40 context_lost_(false), 42 context_lost_(false),
41 flush_automatically_(true), 43 flush_automatically_(true),
42 flush_generation_(0) { 44 flush_generation_(0) {
43 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
44 // Don't register a dump provider in these cases.
45 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
46 if (base::ThreadTaskRunnerHandle::IsSet()) {
47 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
48 this, "gpu::CommandBufferHelper", base::ThreadTaskRunnerHandle::Get());
49 }
50 } 45 }
51 46
52 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) { 47 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
53 flush_automatically_ = enabled; 48 flush_automatically_ = enabled;
54 CalcImmediateEntries(0); 49 CalcImmediateEntries(0);
55 } 50 }
56 51
57 bool CommandBufferHelper::IsContextLost() { 52 bool CommandBufferHelper::IsContextLost() {
58 if (!context_lost_) { 53 if (!context_lost_)
59 context_lost_ = error::IsError(command_buffer()->GetLastError()); 54 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
60 }
61 return context_lost_; 55 return context_lost_;
62 } 56 }
63 57
64 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { 58 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
65 DCHECK_GE(waiting_count, 0); 59 DCHECK_GE(waiting_count, 0);
66 60
67 // Check if usable & allocated. 61 // Check if usable & allocated.
68 if (!usable() || !HaveRingBuffer()) { 62 if (!usable() || !HaveRingBuffer()) {
69 immediate_entry_count_ = 0; 63 immediate_entry_count_ = 0;
70 return; 64 return;
71 } 65 }
72 66
73 // Get maximum safe contiguous entries. 67 // Get maximum safe contiguous entries.
74 const int32_t curr_get = get_offset(); 68 const int32_t curr_get = cached_get_offset_;
75 if (curr_get > put_) { 69 if (curr_get > put_) {
76 immediate_entry_count_ = curr_get - put_ - 1; 70 immediate_entry_count_ = curr_get - put_ - 1;
77 } else { 71 } else {
78 immediate_entry_count_ = 72 immediate_entry_count_ =
79 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); 73 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
80 } 74 }
81 75
82 // Limit entry count to force early flushing. 76 // Limit entry count to force early flushing.
83 if (flush_automatically_) { 77 if (flush_automatically_) {
84 int32_t limit = 78 int32_t limit =
(...skipping 24 matching lines...) Expand all
109 103
110 if (HaveRingBuffer()) { 104 if (HaveRingBuffer()) {
111 return true; 105 return true;
112 } 106 }
113 107
114 int32_t id = -1; 108 int32_t id = -1;
115 scoped_refptr<Buffer> buffer = 109 scoped_refptr<Buffer> buffer =
116 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id); 110 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
117 if (id < 0) { 111 if (id < 0) {
118 ClearUsable(); 112 ClearUsable();
119 DCHECK(error::IsError(command_buffer()->GetLastError())); 113 DCHECK(context_lost_);
120 return false; 114 return false;
121 } 115 }
122 116
123 ring_buffer_ = buffer; 117 ring_buffer_ = buffer;
124 ring_buffer_id_ = id; 118 ring_buffer_id_ = id;
125 command_buffer_->SetGetBuffer(id); 119 command_buffer_->SetGetBuffer(id);
126 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory()); 120 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
127 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry); 121 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
128 // Call to SetGetBuffer(id) above resets get and put offsets to 0. 122 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
129 // No need to query it through IPC. 123 // No need to query it through IPC.
130 put_ = 0; 124 put_ = 0;
125 cached_get_offset_ = 0;
131 CalcImmediateEntries(0); 126 CalcImmediateEntries(0);
132 return true; 127 return true;
133 } 128 }
134 129
135 void CommandBufferHelper::FreeResources() { 130 void CommandBufferHelper::FreeResources() {
136 if (HaveRingBuffer()) { 131 if (HaveRingBuffer()) {
137 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); 132 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
138 ring_buffer_id_ = -1; 133 ring_buffer_id_ = -1;
139 CalcImmediateEntries(0); 134 CalcImmediateEntries(0);
140 entries_ = nullptr; 135 entries_ = nullptr;
141 ring_buffer_ = nullptr; 136 ring_buffer_ = nullptr;
142 } 137 }
143 } 138 }
144 139
145 void CommandBufferHelper::FreeRingBuffer() { 140 void CommandBufferHelper::FreeRingBuffer() {
146 CHECK((put_ == get_offset()) || 141 CHECK((put_ == cached_get_offset_) ||
147 error::IsError(command_buffer_->GetLastState().error)); 142 error::IsError(command_buffer_->GetLastState().error));
148 FreeResources(); 143 FreeResources();
149 } 144 }
150 145
151 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) { 146 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
152 ring_buffer_size_ = ring_buffer_size; 147 ring_buffer_size_ = ring_buffer_size;
153 return AllocateRingBuffer(); 148 return AllocateRingBuffer();
154 } 149 }
155 150
156 CommandBufferHelper::~CommandBufferHelper() { 151 CommandBufferHelper::~CommandBufferHelper() {
157 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
158 this);
159 FreeResources(); 152 FreeResources();
160 } 153 }
161 154
155 void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
156 cached_get_offset_ = state.get_offset;
157 cached_last_token_read_ = state.token;
158 context_lost_ = error::IsError(state.error);
159 }
160
162 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) { 161 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
163 DCHECK(start >= 0 && start <= total_entry_count_); 162 DCHECK(start >= 0 && start <= total_entry_count_);
164 DCHECK(end >= 0 && end <= total_entry_count_); 163 DCHECK(end >= 0 && end <= total_entry_count_);
165 if (!usable()) { 164 if (!usable()) {
166 return false; 165 return false;
167 } 166 }
168 command_buffer_->WaitForGetOffsetInRange(start, end); 167 CommandBuffer::State last_state =
169 return command_buffer_->GetLastError() == gpu::error::kNoError; 168 command_buffer_->WaitForGetOffsetInRange(start, end);
169 UpdateCachedState(last_state);
170 return !context_lost_;
170 } 171 }
171 172
172 void CommandBufferHelper::Flush() { 173 void CommandBufferHelper::Flush() {
173 // Wrap put_ before flush. 174 // Wrap put_ before flush.
174 if (put_ == total_entry_count_) 175 if (put_ == total_entry_count_)
175 put_ = 0; 176 put_ = 0;
176 177
177 if (usable()) { 178 if (usable()) {
178 last_flush_time_ = base::TimeTicks::Now(); 179 last_flush_time_ = base::TimeTicks::Now();
179 last_put_sent_ = put_; 180 last_put_sent_ = put_;
(...skipping 26 matching lines...) Expand all
206 #endif 207 #endif
207 208
208 // Calls Flush() and then waits until the buffer is empty. Break early if the 209 // Calls Flush() and then waits until the buffer is empty. Break early if the
209 // error is set. 210 // error is set.
210 bool CommandBufferHelper::Finish() { 211 bool CommandBufferHelper::Finish() {
211 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); 212 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
212 if (!usable()) { 213 if (!usable()) {
213 return false; 214 return false;
214 } 215 }
215 // If there is no work just exit. 216 // If there is no work just exit.
216 if (put_ == get_offset()) { 217 if (put_ == cached_get_offset_) {
217 return true; 218 return true;
218 } 219 }
219 DCHECK(HaveRingBuffer() || 220 DCHECK(HaveRingBuffer() ||
220 error::IsError(command_buffer_->GetLastState().error)); 221 error::IsError(command_buffer_->GetLastState().error));
221 Flush(); 222 Flush();
222 if (!WaitForGetOffsetInRange(put_, put_)) 223 if (!WaitForGetOffsetInRange(put_, put_))
223 return false; 224 return false;
224 DCHECK_EQ(get_offset(), put_); 225 DCHECK_EQ(cached_get_offset_, put_);
225 226
226 CalcImmediateEntries(0); 227 CalcImmediateEntries(0);
227 228
228 return true; 229 return true;
229 } 230 }
230 231
231 // Inserts a new token into the command stream. It uses an increasing value 232 // Inserts a new token into the command stream. It uses an increasing value
232 // scheme so that we don't lose tokens (a token has passed if the current token 233 // scheme so that we don't lose tokens (a token has passed if the current token
233 // value is higher than that token). Calls Finish() if the token value wraps, 234 // value is higher than that token). Calls Finish() if the token value wraps,
234 // which will be rare. 235 // which will be rare.
235 int32_t CommandBufferHelper::InsertToken() { 236 int32_t CommandBufferHelper::InsertToken() {
236 AllocateRingBuffer(); 237 AllocateRingBuffer();
237 if (!usable()) { 238 if (!usable()) {
238 return token_; 239 return token_;
239 } 240 }
240 DCHECK(HaveRingBuffer()); 241 DCHECK(HaveRingBuffer());
241 // Increment token as 31-bit integer. Negative values are used to signal an 242 // Increment token as 31-bit integer. Negative values are used to signal an
242 // error. 243 // error.
243 token_ = (token_ + 1) & 0x7FFFFFFF; 244 token_ = (token_ + 1) & 0x7FFFFFFF;
244 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>(); 245 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
245 if (cmd) { 246 if (cmd) {
246 cmd->Init(token_); 247 cmd->Init(token_);
247 if (token_ == 0) { 248 if (token_ == 0) {
248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); 249 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
249 // we wrapped 250 bool finished = Finish(); // we wrapped
250 Finish(); 251 DCHECK(!finished || (cached_last_token_read_ == 0));
251 DCHECK_EQ(token_, last_token_read());
252 } 252 }
253 } 253 }
254 return token_; 254 return token_;
255 } 255 }
256 256
257 bool CommandBufferHelper::HasTokenPassed(int32_t token) {
258 // If token_ wrapped around we Finish'd.
259 if (token > token_)
260 return true;
261 // Don't update state if we don't have to.
262 if (token <= cached_last_token_read_)
263 return true;
264 CommandBuffer::State last_state = command_buffer_->GetLastState();
265 UpdateCachedState(last_state);
266 return token <= cached_last_token_read_;
267 }
268
257 // Waits until the current token value is greater or equal to the value passed 269 // Waits until the current token value is greater or equal to the value passed
258 // in argument. 270 // in argument.
259 void CommandBufferHelper::WaitForToken(int32_t token) { 271 void CommandBufferHelper::WaitForToken(int32_t token) {
260 if (!usable() || !HaveRingBuffer()) { 272 if (!usable() || !HaveRingBuffer()) {
261 return; 273 return;
262 } 274 }
263 // Return immediately if corresponding InsertToken failed. 275 // Return immediately if corresponding InsertToken failed.
264 if (token < 0) 276 if (token < 0)
265 return; 277 return;
266 if (token > token_) return; // we wrapped 278 if (token > token_)
267 if (last_token_read() >= token) 279 return; // we wrapped
280 if (cached_last_token_read_ >= token)
281 return;
282 UpdateCachedState(command_buffer_->GetLastState());
283 if (cached_last_token_read_ >= token)
268 return; 284 return;
269 Flush(); 285 Flush();
270 command_buffer_->WaitForTokenInRange(token, token_); 286 CommandBuffer::State last_state =
287 command_buffer_->WaitForTokenInRange(token, token_);
288 UpdateCachedState(last_state);
271 } 289 }
272 290
273 // Waits for available entries, basically waiting until get >= put + count + 1. 291 // Waits for available entries, basically waiting until get >= put + count + 1.
274 // It actually waits for contiguous entries, so it may need to wrap the buffer 292 // It actually waits for contiguous entries, so it may need to wrap the buffer
275 // around, adding a noops. Thus this function may change the value of put_. The 293 // around, adding a noops. Thus this function may change the value of put_. The
276 // function will return early if an error occurs, in which case the available 294 // function will return early if an error occurs, in which case the available
277 // space may not be available. 295 // space may not be available.
278 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) { 296 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
279 AllocateRingBuffer(); 297 AllocateRingBuffer();
280 if (!usable()) { 298 if (!usable()) {
281 return; 299 return;
282 } 300 }
283 DCHECK(HaveRingBuffer()); 301 DCHECK(HaveRingBuffer());
284 DCHECK(count < total_entry_count_); 302 DCHECK(count < total_entry_count_);
285 if (put_ + count > total_entry_count_) { 303 if (put_ + count > total_entry_count_) {
286 // There's not enough room between the current put and the end of the 304 // There's not enough room between the current put and the end of the
287 // buffer, so we need to wrap. We will add noops all the way to the end, 305 // buffer, so we need to wrap. We will add noops all the way to the end,
288 // but we need to make sure get wraps first, actually that get is 1 or 306 // but we need to make sure get wraps first, actually that get is 1 or
289 // more (since put will wrap to 0 after we add the noops). 307 // more (since put will wrap to 0 after we add the noops).
290 DCHECK_LE(1, put_); 308 DCHECK_LE(1, put_);
291 int32_t curr_get = get_offset(); 309 int32_t curr_get = cached_get_offset_;
292 if (curr_get > put_ || curr_get == 0) { 310 if (curr_get > put_ || curr_get == 0) {
293 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); 311 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
294 Flush(); 312 Flush();
295 if (!WaitForGetOffsetInRange(1, put_)) 313 if (!WaitForGetOffsetInRange(1, put_))
296 return; 314 return;
297 curr_get = get_offset(); 315 curr_get = cached_get_offset_;
298 DCHECK_LE(curr_get, put_); 316 DCHECK_LE(curr_get, put_);
299 DCHECK_NE(0, curr_get); 317 DCHECK_NE(0, curr_get);
300 } 318 }
301 // Insert Noops to fill out the buffer. 319 // Insert Noops to fill out the buffer.
302 int32_t num_entries = total_entry_count_ - put_; 320 int32_t num_entries = total_entry_count_ - put_;
303 while (num_entries > 0) { 321 while (num_entries > 0) {
304 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); 322 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
305 cmd::Noop::Set(&entries_[put_], num_to_skip); 323 cmd::Noop::Set(&entries_[put_], num_to_skip);
306 put_ += num_to_skip; 324 put_ += num_to_skip;
307 num_entries -= num_to_skip; 325 num_entries -= num_to_skip;
(...skipping 13 matching lines...) Expand all
321 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, 339 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
322 put_)) 340 put_))
323 return; 341 return;
324 CalcImmediateEntries(count); 342 CalcImmediateEntries(count);
325 DCHECK_GE(immediate_entry_count_, count); 343 DCHECK_GE(immediate_entry_count_, count);
326 } 344 }
327 } 345 }
328 } 346 }
329 347
330 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { 348 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
331 int32_t current_get_offset = get_offset(); 349 int32_t current_get_offset = cached_get_offset_;
332 if (current_get_offset > put_) { 350 if (current_get_offset > put_) {
333 return current_get_offset - put_ - 1; 351 return current_get_offset - put_ - 1;
334 } else { 352 } else {
335 return current_get_offset + total_entry_count_ - put_ - 353 return current_get_offset + total_entry_count_ - put_ -
336 (current_get_offset == 0 ? 1 : 0); 354 (current_get_offset == 0 ? 1 : 0);
337 } 355 }
338 } 356 }
339 357
340 bool CommandBufferHelper::OnMemoryDump( 358 bool CommandBufferHelper::OnMemoryDump(
341 const base::trace_event::MemoryDumpArgs& args, 359 const base::trace_event::MemoryDumpArgs& args,
(...skipping 20 matching lines...) Expand all
362 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); 380 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
363 const int kImportance = 2; 381 const int kImportance = 2;
364 pmd->CreateSharedGlobalAllocatorDump(guid); 382 pmd->CreateSharedGlobalAllocatorDump(guid);
365 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 383 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
366 } 384 }
367 385
368 return true; 386 return true;
369 } 387 }
370 388
371 } // namespace gpu 389 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/cmd_buffer_helper.h ('k') | gpu/command_buffer/client/cmd_buffer_helper_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698