Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(934)

Side by Side Diff: gpu/command_buffer/client/cmd_buffer_helper.cc

Issue 2550583002: gpu: Thread-safe command buffer state lookup. (Closed)
Patch Set: state_lock_ -> last_state_lock_ Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the command buffer helper class. 5 // This file contains the implementation of the command buffer helper class.
6 6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 8
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include <algorithm> 11 #include <algorithm>
12 #include "base/logging.h" 12 #include "base/logging.h"
13 #include "base/strings/stringprintf.h" 13 #include "base/strings/stringprintf.h"
14 #include "base/threading/thread_task_runner_handle.h" 14 #include "base/threading/thread_task_runner_handle.h"
15 #include "base/time/time.h" 15 #include "base/time/time.h"
16 #include "base/trace_event/memory_allocator_dump.h" 16 #include "base/trace_event/memory_allocator_dump.h"
17 #include "base/trace_event/memory_dump_manager.h" 17 #include "base/trace_event/memory_dump_manager.h"
18 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
19 #include "base/trace_event/trace_event.h" 19 #include "base/trace_event/trace_event.h"
20 #include "gpu/command_buffer/common/buffer.h" 20 #include "gpu/command_buffer/common/buffer.h"
21 #include "gpu/command_buffer/common/command_buffer.h" 21 #include "gpu/command_buffer/common/command_buffer.h"
22 #include "gpu/command_buffer/common/constants.h" 22 #include "gpu/command_buffer/common/constants.h"
23 23
24 namespace gpu { 24 namespace gpu {
25 25
26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) 26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
27 : command_buffer_(command_buffer), 27 : command_buffer_(command_buffer),
28 ring_buffer_id_(-1), 28 ring_buffer_id_(-1),
29 ring_buffer_size_(0), 29 ring_buffer_size_(0),
30 entries_(NULL), 30 entries_(nullptr),
31 total_entry_count_(0), 31 total_entry_count_(0),
32 immediate_entry_count_(0), 32 immediate_entry_count_(0),
33 token_(0), 33 token_(0),
34 put_(0), 34 put_(0),
35 last_put_sent_(0), 35 last_put_sent_(0),
36 cached_last_token_read_(0),
37 cached_get_offset_(0),
36 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) 38 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
37 commands_issued_(0), 39 commands_issued_(0),
38 #endif 40 #endif
39 usable_(true), 41 usable_(true),
40 context_lost_(false), 42 context_lost_(false),
41 flush_automatically_(true), 43 flush_automatically_(true),
42 flush_generation_(0) { 44 flush_generation_(0) {
43 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). 45 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
44 // Don't register a dump provider in these cases. 46 // Don't register a dump provider in these cases.
45 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156 47 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
46 if (base::ThreadTaskRunnerHandle::IsSet()) { 48 if (base::ThreadTaskRunnerHandle::IsSet()) {
47 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider( 49 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
48 this, "gpu::CommandBufferHelper", base::ThreadTaskRunnerHandle::Get()); 50 this, "gpu::CommandBufferHelper", base::ThreadTaskRunnerHandle::Get());
49 } 51 }
50 } 52 }
51 53
52 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) { 54 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
53 flush_automatically_ = enabled; 55 flush_automatically_ = enabled;
54 CalcImmediateEntries(0); 56 CalcImmediateEntries(0);
55 } 57 }
56 58
57 bool CommandBufferHelper::IsContextLost() { 59 bool CommandBufferHelper::IsContextLost() {
58 if (!context_lost_) { 60 if (!context_lost_)
59 context_lost_ = error::IsError(command_buffer()->GetLastError()); 61 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
60 }
61 return context_lost_; 62 return context_lost_;
62 } 63 }
63 64
64 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { 65 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
65 DCHECK_GE(waiting_count, 0); 66 DCHECK_GE(waiting_count, 0);
66 67
67 // Check if usable & allocated. 68 // Check if usable & allocated.
68 if (!usable() || !HaveRingBuffer()) { 69 if (!usable() || !HaveRingBuffer()) {
69 immediate_entry_count_ = 0; 70 immediate_entry_count_ = 0;
70 return; 71 return;
71 } 72 }
72 73
73 // Get maximum safe contiguous entries. 74 // Get maximum safe contiguous entries.
74 const int32_t curr_get = get_offset(); 75 const int32_t curr_get = cached_get_offset_;
75 if (curr_get > put_) { 76 if (curr_get > put_) {
76 immediate_entry_count_ = curr_get - put_ - 1; 77 immediate_entry_count_ = curr_get - put_ - 1;
77 } else { 78 } else {
78 immediate_entry_count_ = 79 immediate_entry_count_ =
79 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); 80 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
80 } 81 }
81 82
82 // Limit entry count to force early flushing. 83 // Limit entry count to force early flushing.
83 if (flush_automatically_) { 84 if (flush_automatically_) {
84 int32_t limit = 85 int32_t limit =
(...skipping 24 matching lines...) Expand all
109 110
110 if (HaveRingBuffer()) { 111 if (HaveRingBuffer()) {
111 return true; 112 return true;
112 } 113 }
113 114
114 int32_t id = -1; 115 int32_t id = -1;
115 scoped_refptr<Buffer> buffer = 116 scoped_refptr<Buffer> buffer =
116 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id); 117 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
117 if (id < 0) { 118 if (id < 0) {
118 ClearUsable(); 119 ClearUsable();
119 DCHECK(error::IsError(command_buffer()->GetLastError())); 120 DCHECK(context_lost_);
120 return false; 121 return false;
121 } 122 }
122 123
123 ring_buffer_ = buffer; 124 ring_buffer_ = buffer;
124 ring_buffer_id_ = id; 125 ring_buffer_id_ = id;
125 command_buffer_->SetGetBuffer(id); 126 command_buffer_->SetGetBuffer(id);
126 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory()); 127 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
127 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry); 128 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
128 // Call to SetGetBuffer(id) above resets get and put offsets to 0. 129 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
129 // No need to query it through IPC. 130 // No need to query it through IPC.
130 put_ = 0; 131 put_ = 0;
132 cached_get_offset_ = 0;
131 CalcImmediateEntries(0); 133 CalcImmediateEntries(0);
132 return true; 134 return true;
133 } 135 }
134 136
135 void CommandBufferHelper::FreeResources() { 137 void CommandBufferHelper::FreeResources() {
136 if (HaveRingBuffer()) { 138 if (HaveRingBuffer()) {
137 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); 139 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
138 ring_buffer_id_ = -1; 140 ring_buffer_id_ = -1;
139 CalcImmediateEntries(0); 141 CalcImmediateEntries(0);
140 entries_ = nullptr; 142 entries_ = nullptr;
141 ring_buffer_ = nullptr; 143 ring_buffer_ = nullptr;
142 } 144 }
143 } 145 }
144 146
145 void CommandBufferHelper::FreeRingBuffer() { 147 void CommandBufferHelper::FreeRingBuffer() {
146 CHECK((put_ == get_offset()) || 148 CHECK((put_ == cached_get_offset_) ||
147 error::IsError(command_buffer_->GetLastState().error)); 149 error::IsError(command_buffer_->GetLastState().error));
148 FreeResources(); 150 FreeResources();
149 } 151 }
150 152
151 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) { 153 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
152 ring_buffer_size_ = ring_buffer_size; 154 ring_buffer_size_ = ring_buffer_size;
153 return AllocateRingBuffer(); 155 return AllocateRingBuffer();
154 } 156 }
155 157
156 CommandBufferHelper::~CommandBufferHelper() { 158 CommandBufferHelper::~CommandBufferHelper() {
157 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 159 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
158 this); 160 this);
159 FreeResources(); 161 FreeResources();
160 } 162 }
161 163
162 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) { 164 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
163 DCHECK(start >= 0 && start <= total_entry_count_); 165 DCHECK(start >= 0 && start <= total_entry_count_);
164 DCHECK(end >= 0 && end <= total_entry_count_); 166 DCHECK(end >= 0 && end <= total_entry_count_);
165 if (!usable()) { 167 if (!usable()) {
166 return false; 168 return false;
167 } 169 }
168 command_buffer_->WaitForGetOffsetInRange(start, end); 170 CommandBuffer::State last_state =
169 return command_buffer_->GetLastError() == gpu::error::kNoError; 171 command_buffer_->WaitForGetOffsetInRange(start, end);
172 cached_last_token_read_ = last_state.token;
173 cached_get_offset_ = last_state.get_offset;
174 context_lost_ = gpu::error::IsError(last_state.error);
175 return !context_lost_;
170 } 176 }
171 177
172 void CommandBufferHelper::Flush() { 178 void CommandBufferHelper::Flush() {
173 // Wrap put_ before flush. 179 // Wrap put_ before flush.
174 if (put_ == total_entry_count_) 180 if (put_ == total_entry_count_)
175 put_ = 0; 181 put_ = 0;
176 182
177 if (usable()) { 183 if (usable()) {
178 last_flush_time_ = base::TimeTicks::Now(); 184 last_flush_time_ = base::TimeTicks::Now();
179 last_put_sent_ = put_; 185 last_put_sent_ = put_;
(...skipping 26 matching lines...) Expand all
206 #endif 212 #endif
207 213
208 // Calls Flush() and then waits until the buffer is empty. Break early if the 214 // Calls Flush() and then waits until the buffer is empty. Break early if the
209 // error is set. 215 // error is set.
210 bool CommandBufferHelper::Finish() { 216 bool CommandBufferHelper::Finish() {
211 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); 217 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
212 if (!usable()) { 218 if (!usable()) {
213 return false; 219 return false;
214 } 220 }
215 // If there is no work just exit. 221 // If there is no work just exit.
216 if (put_ == get_offset()) { 222 if (put_ == cached_get_offset_) {
217 return true; 223 return true;
218 } 224 }
219 DCHECK(HaveRingBuffer() || 225 DCHECK(HaveRingBuffer() ||
220 error::IsError(command_buffer_->GetLastState().error)); 226 error::IsError(command_buffer_->GetLastState().error));
221 Flush(); 227 Flush();
222 if (!WaitForGetOffsetInRange(put_, put_)) 228 if (!WaitForGetOffsetInRange(put_, put_))
223 return false; 229 return false;
224 DCHECK_EQ(get_offset(), put_); 230 DCHECK_EQ(cached_get_offset_, put_);
225 231
226 CalcImmediateEntries(0); 232 CalcImmediateEntries(0);
227 233
228 return true; 234 return true;
229 } 235 }
230 236
231 // Inserts a new token into the command stream. It uses an increasing value 237 // Inserts a new token into the command stream. It uses an increasing value
232 // scheme so that we don't lose tokens (a token has passed if the current token 238 // scheme so that we don't lose tokens (a token has passed if the current token
233 // value is higher than that token). Calls Finish() if the token value wraps, 239 // value is higher than that token). Calls Finish() if the token value wraps,
234 // which will be rare. 240 // which will be rare.
235 int32_t CommandBufferHelper::InsertToken() { 241 int32_t CommandBufferHelper::InsertToken() {
236 AllocateRingBuffer(); 242 AllocateRingBuffer();
237 if (!usable()) { 243 if (!usable()) {
238 return token_; 244 return token_;
239 } 245 }
240 DCHECK(HaveRingBuffer()); 246 DCHECK(HaveRingBuffer());
241 // Increment token as 31-bit integer. Negative values are used to signal an 247 // Increment token as 31-bit integer. Negative values are used to signal an
242 // error. 248 // error.
243 token_ = (token_ + 1) & 0x7FFFFFFF; 249 token_ = (token_ + 1) & 0x7FFFFFFF;
244 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>(); 250 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
245 if (cmd) { 251 if (cmd) {
246 cmd->Init(token_); 252 cmd->Init(token_);
247 if (token_ == 0) { 253 if (token_ == 0) {
248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); 254 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
249 // we wrapped 255 bool finished = Finish(); // we wrapped
250 Finish(); 256 DCHECK(!finished || (cached_last_token_read_ == 0));
251 DCHECK_EQ(token_, last_token_read());
252 } 257 }
253 } 258 }
254 return token_; 259 return token_;
255 } 260 }
256 261
262 bool CommandBufferHelper::HasTokenPassed(int32_t token) {
263 // If token_ wrapped around we Finish'd.
264 if (token > token_)
265 return true;
266 // Don't update state if we don't have to.
267 if (token <= cached_last_token_read_)
268 return true;
269 CommandBuffer::State last_state = command_buffer_->GetLastState();
270 cached_last_token_read_ = last_state.token;
271 cached_get_offset_ = last_state.get_offset;
272 context_lost_ = gpu::error::IsError(last_state.error);
273 return token <= cached_last_token_read_;
274 }
275
257 // Waits until the current token value is greater or equal to the value passed 276 // Waits until the current token value is greater or equal to the value passed
258 // in argument. 277 // in argument.
259 void CommandBufferHelper::WaitForToken(int32_t token) { 278 void CommandBufferHelper::WaitForToken(int32_t token) {
260 if (!usable() || !HaveRingBuffer()) { 279 if (!usable() || !HaveRingBuffer()) {
261 return; 280 return;
262 } 281 }
263 // Return immediately if corresponding InsertToken failed. 282 // Return immediately if corresponding InsertToken failed.
264 if (token < 0) 283 if (token < 0)
265 return; 284 return;
266 if (token > token_) return; // we wrapped 285 if (token > token_)
267 if (last_token_read() >= token) 286 return; // we wrapped
287 if (cached_last_token_read_ >= token)
268 return; 288 return;
269 Flush(); 289 Flush();
270 command_buffer_->WaitForTokenInRange(token, token_); 290 CommandBuffer::State last_state =
291 command_buffer_->WaitForTokenInRange(token, token_);
292 cached_last_token_read_ = last_state.token;
293 cached_get_offset_ = last_state.get_offset;
294 context_lost_ = gpu::error::IsError(last_state.error);
271 } 295 }
272 296
273 // Waits for available entries, basically waiting until get >= put + count + 1. 297 // Waits for available entries, basically waiting until get >= put + count + 1.
274 // It actually waits for contiguous entries, so it may need to wrap the buffer 298 // It actually waits for contiguous entries, so it may need to wrap the buffer
275 // around, adding a noops. Thus this function may change the value of put_. The 299 // around, adding a noops. Thus this function may change the value of put_. The
276 // function will return early if an error occurs, in which case the available 300 // function will return early if an error occurs, in which case the available
277 // space may not be available. 301 // space may not be available.
278 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) { 302 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
279 AllocateRingBuffer(); 303 AllocateRingBuffer();
280 if (!usable()) { 304 if (!usable()) {
281 return; 305 return;
282 } 306 }
283 DCHECK(HaveRingBuffer()); 307 DCHECK(HaveRingBuffer());
284 DCHECK(count < total_entry_count_); 308 DCHECK(count < total_entry_count_);
285 if (put_ + count > total_entry_count_) { 309 if (put_ + count > total_entry_count_) {
286 // There's not enough room between the current put and the end of the 310 // There's not enough room between the current put and the end of the
287 // buffer, so we need to wrap. We will add noops all the way to the end, 311 // buffer, so we need to wrap. We will add noops all the way to the end,
288 // but we need to make sure get wraps first, actually that get is 1 or 312 // but we need to make sure get wraps first, actually that get is 1 or
289 // more (since put will wrap to 0 after we add the noops). 313 // more (since put will wrap to 0 after we add the noops).
290 DCHECK_LE(1, put_); 314 DCHECK_LE(1, put_);
291 int32_t curr_get = get_offset(); 315 int32_t curr_get = cached_get_offset_;
292 if (curr_get > put_ || curr_get == 0) { 316 if (curr_get > put_ || curr_get == 0) {
293 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); 317 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
294 Flush(); 318 Flush();
295 if (!WaitForGetOffsetInRange(1, put_)) 319 if (!WaitForGetOffsetInRange(1, put_))
296 return; 320 return;
297 curr_get = get_offset(); 321 curr_get = cached_get_offset_;
298 DCHECK_LE(curr_get, put_); 322 DCHECK_LE(curr_get, put_);
299 DCHECK_NE(0, curr_get); 323 DCHECK_NE(0, curr_get);
300 } 324 }
301 // Insert Noops to fill out the buffer. 325 // Insert Noops to fill out the buffer.
302 int32_t num_entries = total_entry_count_ - put_; 326 int32_t num_entries = total_entry_count_ - put_;
303 while (num_entries > 0) { 327 while (num_entries > 0) {
304 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); 328 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
305 cmd::Noop::Set(&entries_[put_], num_to_skip); 329 cmd::Noop::Set(&entries_[put_], num_to_skip);
306 put_ += num_to_skip; 330 put_ += num_to_skip;
307 num_entries -= num_to_skip; 331 num_entries -= num_to_skip;
(...skipping 13 matching lines...) Expand all
321 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, 345 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
322 put_)) 346 put_))
323 return; 347 return;
324 CalcImmediateEntries(count); 348 CalcImmediateEntries(count);
325 DCHECK_GE(immediate_entry_count_, count); 349 DCHECK_GE(immediate_entry_count_, count);
326 } 350 }
327 } 351 }
328 } 352 }
329 353
330 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { 354 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
331 int32_t current_get_offset = get_offset(); 355 int32_t current_get_offset = cached_get_offset_;
332 if (current_get_offset > put_) { 356 if (current_get_offset > put_) {
333 return current_get_offset - put_ - 1; 357 return current_get_offset - put_ - 1;
334 } else { 358 } else {
335 return current_get_offset + total_entry_count_ - put_ - 359 return current_get_offset + total_entry_count_ - put_ -
336 (current_get_offset == 0 ? 1 : 0); 360 (current_get_offset == 0 ? 1 : 0);
337 } 361 }
338 } 362 }
339 363
340 bool CommandBufferHelper::OnMemoryDump( 364 bool CommandBufferHelper::OnMemoryDump(
341 const base::trace_event::MemoryDumpArgs& args, 365 const base::trace_event::MemoryDumpArgs& args,
(...skipping 20 matching lines...) Expand all
362 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); 386 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
363 const int kImportance = 2; 387 const int kImportance = 2;
364 pmd->CreateSharedGlobalAllocatorDump(guid); 388 pmd->CreateSharedGlobalAllocatorDump(guid);
365 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 389 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
366 } 390 }
367 391
368 return true; 392 return true;
369 } 393 }
370 394
371 } // namespace gpu 395 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698