Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(224)

Side by Side Diff: gpu/command_buffer/client/cmd_buffer_helper.cc

Issue 2550583002: gpu: Thread-safe command buffer state lookup. (Closed)
Patch Set: move cmd buffer helper memory dump to context provider Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the command buffer helper class. 5 // This file contains the implementation of the command buffer helper class.
6 6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 8
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include <algorithm> 11 #include <algorithm>
12 #include "base/logging.h" 12 #include "base/logging.h"
13 #include "base/strings/stringprintf.h" 13 #include "base/strings/stringprintf.h"
14 #include "base/threading/thread_task_runner_handle.h" 14 #include "base/threading/thread_task_runner_handle.h"
15 #include "base/time/time.h" 15 #include "base/time/time.h"
16 #include "base/trace_event/memory_allocator_dump.h" 16 #include "base/trace_event/memory_allocator_dump.h"
17 #include "base/trace_event/memory_dump_manager.h" 17 #include "base/trace_event/memory_dump_manager.h"
18 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
19 #include "base/trace_event/trace_event.h" 19 #include "base/trace_event/trace_event.h"
20 #include "gpu/command_buffer/common/buffer.h" 20 #include "gpu/command_buffer/common/buffer.h"
21 #include "gpu/command_buffer/common/command_buffer.h" 21 #include "gpu/command_buffer/common/command_buffer.h"
22 #include "gpu/command_buffer/common/constants.h" 22 #include "gpu/command_buffer/common/constants.h"
23 23
24 namespace gpu { 24 namespace gpu {
25 25
26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) 26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
27 : command_buffer_(command_buffer), 27 : command_buffer_(command_buffer),
28 ring_buffer_id_(-1), 28 ring_buffer_id_(-1),
29 ring_buffer_size_(0), 29 ring_buffer_size_(0),
30 entries_(NULL), 30 entries_(nullptr),
31 total_entry_count_(0), 31 total_entry_count_(0),
32 immediate_entry_count_(0), 32 immediate_entry_count_(0),
33 token_(0), 33 token_(0),
34 put_(0), 34 put_(0),
35 last_put_sent_(0), 35 last_put_sent_(0),
36 cached_last_token_read_(0),
37 cached_get_offset_(0),
36 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) 38 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
37 commands_issued_(0), 39 commands_issued_(0),
38 #endif 40 #endif
39 usable_(true), 41 usable_(true),
40 context_lost_(false), 42 context_lost_(false),
41 flush_automatically_(true), 43 flush_automatically_(true),
42 flush_generation_(0) { 44 flush_generation_(0) {
43 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview).
44 // Don't register a dump provider in these cases.
45 // TODO(ericrk): Get this working in Android Webview. crbug.com/517156
46 if (base::ThreadTaskRunnerHandle::IsSet()) {
47 base::trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
48 this, "gpu::CommandBufferHelper", base::ThreadTaskRunnerHandle::Get());
49 }
50 } 45 }
51 46
52 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) { 47 void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
53 flush_automatically_ = enabled; 48 flush_automatically_ = enabled;
54 CalcImmediateEntries(0); 49 CalcImmediateEntries(0);
55 } 50 }
56 51
57 bool CommandBufferHelper::IsContextLost() { 52 bool CommandBufferHelper::IsContextLost() {
58 if (!context_lost_) { 53 if (!context_lost_)
59 context_lost_ = error::IsError(command_buffer()->GetLastError()); 54 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
60 }
61 return context_lost_; 55 return context_lost_;
62 } 56 }
63 57
64 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { 58 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
65 DCHECK_GE(waiting_count, 0); 59 DCHECK_GE(waiting_count, 0);
66 60
67 // Check if usable & allocated. 61 // Check if usable & allocated.
68 if (!usable() || !HaveRingBuffer()) { 62 if (!usable() || !HaveRingBuffer()) {
69 immediate_entry_count_ = 0; 63 immediate_entry_count_ = 0;
70 return; 64 return;
71 } 65 }
72 66
73 // Get maximum safe contiguous entries. 67 // Get maximum safe contiguous entries.
74 const int32_t curr_get = get_offset(); 68 const int32_t curr_get = cached_get_offset_;
75 if (curr_get > put_) { 69 if (curr_get > put_) {
76 immediate_entry_count_ = curr_get - put_ - 1; 70 immediate_entry_count_ = curr_get - put_ - 1;
77 } else { 71 } else {
78 immediate_entry_count_ = 72 immediate_entry_count_ =
79 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); 73 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
80 } 74 }
81 75
82 // Limit entry count to force early flushing. 76 // Limit entry count to force early flushing.
83 if (flush_automatically_) { 77 if (flush_automatically_) {
84 int32_t limit = 78 int32_t limit =
(...skipping 24 matching lines...) Expand all
109 103
110 if (HaveRingBuffer()) { 104 if (HaveRingBuffer()) {
111 return true; 105 return true;
112 } 106 }
113 107
114 int32_t id = -1; 108 int32_t id = -1;
115 scoped_refptr<Buffer> buffer = 109 scoped_refptr<Buffer> buffer =
116 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id); 110 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
117 if (id < 0) { 111 if (id < 0) {
118 ClearUsable(); 112 ClearUsable();
119 DCHECK(error::IsError(command_buffer()->GetLastError())); 113 DCHECK(context_lost_);
120 return false; 114 return false;
121 } 115 }
122 116
123 ring_buffer_ = buffer; 117 ring_buffer_ = buffer;
124 ring_buffer_id_ = id; 118 ring_buffer_id_ = id;
125 command_buffer_->SetGetBuffer(id); 119 command_buffer_->SetGetBuffer(id);
126 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory()); 120 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
127 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry); 121 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
128 // Call to SetGetBuffer(id) above resets get and put offsets to 0. 122 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
129 // No need to query it through IPC. 123 // No need to query it through IPC.
130 put_ = 0; 124 put_ = 0;
125 cached_get_offset_ = 0;
131 CalcImmediateEntries(0); 126 CalcImmediateEntries(0);
132 return true; 127 return true;
133 } 128 }
134 129
135 void CommandBufferHelper::FreeResources() { 130 void CommandBufferHelper::FreeResources() {
136 if (HaveRingBuffer()) { 131 if (HaveRingBuffer()) {
137 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); 132 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
138 ring_buffer_id_ = -1; 133 ring_buffer_id_ = -1;
139 CalcImmediateEntries(0); 134 CalcImmediateEntries(0);
140 entries_ = nullptr; 135 entries_ = nullptr;
141 ring_buffer_ = nullptr; 136 ring_buffer_ = nullptr;
142 } 137 }
143 } 138 }
144 139
145 void CommandBufferHelper::FreeRingBuffer() { 140 void CommandBufferHelper::FreeRingBuffer() {
146 CHECK((put_ == get_offset()) || 141 CHECK((put_ == cached_get_offset_) ||
147 error::IsError(command_buffer_->GetLastState().error)); 142 error::IsError(command_buffer_->GetLastState().error));
148 FreeResources(); 143 FreeResources();
149 } 144 }
150 145
151 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) { 146 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
152 ring_buffer_size_ = ring_buffer_size; 147 ring_buffer_size_ = ring_buffer_size;
153 return AllocateRingBuffer(); 148 return AllocateRingBuffer();
154 } 149 }
155 150
156 CommandBufferHelper::~CommandBufferHelper() { 151 CommandBufferHelper::~CommandBufferHelper() {
157 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
158 this);
159 FreeResources(); 152 FreeResources();
160 } 153 }
161 154
162 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) { 155 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
163 DCHECK(start >= 0 && start <= total_entry_count_); 156 DCHECK(start >= 0 && start <= total_entry_count_);
164 DCHECK(end >= 0 && end <= total_entry_count_); 157 DCHECK(end >= 0 && end <= total_entry_count_);
165 if (!usable()) { 158 if (!usable()) {
166 return false; 159 return false;
167 } 160 }
168 command_buffer_->WaitForGetOffsetInRange(start, end); 161 CommandBuffer::State last_state =
169 return command_buffer_->GetLastError() == gpu::error::kNoError; 162 command_buffer_->WaitForGetOffsetInRange(start, end);
163 cached_last_token_read_ = last_state.token;
164 cached_get_offset_ = last_state.get_offset;
165 context_lost_ = gpu::error::IsError(last_state.error);
166 return !context_lost_;
170 } 167 }
171 168
172 void CommandBufferHelper::Flush() { 169 void CommandBufferHelper::Flush() {
173 // Wrap put_ before flush. 170 // Wrap put_ before flush.
174 if (put_ == total_entry_count_) 171 if (put_ == total_entry_count_)
175 put_ = 0; 172 put_ = 0;
176 173
177 if (usable()) { 174 if (usable()) {
178 last_flush_time_ = base::TimeTicks::Now(); 175 last_flush_time_ = base::TimeTicks::Now();
179 last_put_sent_ = put_; 176 last_put_sent_ = put_;
(...skipping 26 matching lines...) Expand all
206 #endif 203 #endif
207 204
208 // Calls Flush() and then waits until the buffer is empty. Break early if the 205 // Calls Flush() and then waits until the buffer is empty. Break early if the
209 // error is set. 206 // error is set.
210 bool CommandBufferHelper::Finish() { 207 bool CommandBufferHelper::Finish() {
211 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); 208 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
212 if (!usable()) { 209 if (!usable()) {
213 return false; 210 return false;
214 } 211 }
215 // If there is no work just exit. 212 // If there is no work just exit.
216 if (put_ == get_offset()) { 213 if (put_ == cached_get_offset_) {
217 return true; 214 return true;
218 } 215 }
219 DCHECK(HaveRingBuffer() || 216 DCHECK(HaveRingBuffer() ||
220 error::IsError(command_buffer_->GetLastState().error)); 217 error::IsError(command_buffer_->GetLastState().error));
221 Flush(); 218 Flush();
222 if (!WaitForGetOffsetInRange(put_, put_)) 219 if (!WaitForGetOffsetInRange(put_, put_))
223 return false; 220 return false;
224 DCHECK_EQ(get_offset(), put_); 221 DCHECK_EQ(cached_get_offset_, put_);
225 222
226 CalcImmediateEntries(0); 223 CalcImmediateEntries(0);
227 224
228 return true; 225 return true;
229 } 226 }
230 227
231 // Inserts a new token into the command stream. It uses an increasing value 228 // Inserts a new token into the command stream. It uses an increasing value
232 // scheme so that we don't lose tokens (a token has passed if the current token 229 // scheme so that we don't lose tokens (a token has passed if the current token
233 // value is higher than that token). Calls Finish() if the token value wraps, 230 // value is higher than that token). Calls Finish() if the token value wraps,
234 // which will be rare. 231 // which will be rare.
235 int32_t CommandBufferHelper::InsertToken() { 232 int32_t CommandBufferHelper::InsertToken() {
236 AllocateRingBuffer(); 233 AllocateRingBuffer();
237 if (!usable()) { 234 if (!usable()) {
238 return token_; 235 return token_;
239 } 236 }
240 DCHECK(HaveRingBuffer()); 237 DCHECK(HaveRingBuffer());
241 // Increment token as 31-bit integer. Negative values are used to signal an 238 // Increment token as 31-bit integer. Negative values are used to signal an
242 // error. 239 // error.
243 token_ = (token_ + 1) & 0x7FFFFFFF; 240 token_ = (token_ + 1) & 0x7FFFFFFF;
244 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>(); 241 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
245 if (cmd) { 242 if (cmd) {
246 cmd->Init(token_); 243 cmd->Init(token_);
247 if (token_ == 0) { 244 if (token_ == 0) {
248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); 245 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
249 // we wrapped 246 bool finished = Finish(); // we wrapped
250 Finish(); 247 DCHECK(!finished || (cached_last_token_read_ == 0));
251 DCHECK_EQ(token_, last_token_read());
252 } 248 }
253 } 249 }
254 return token_; 250 return token_;
255 } 251 }
256 252
253 bool CommandBufferHelper::HasTokenPassed(int32_t token) {
254 // If token_ wrapped around we Finish'd.
255 if (token > token_)
256 return true;
257 // Don't update state if we don't have to.
258 if (token <= cached_last_token_read_)
259 return true;
260 CommandBuffer::State last_state = command_buffer_->GetLastState();
261 cached_last_token_read_ = last_state.token;
262 cached_get_offset_ = last_state.get_offset;
263 context_lost_ = gpu::error::IsError(last_state.error);
264 return token <= cached_last_token_read_;
265 }
266
257 // Waits until the current token value is greater or equal to the value passed 267 // Waits until the current token value is greater or equal to the value passed
258 // in argument. 268 // in argument.
259 void CommandBufferHelper::WaitForToken(int32_t token) { 269 void CommandBufferHelper::WaitForToken(int32_t token) {
260 if (!usable() || !HaveRingBuffer()) { 270 if (!usable() || !HaveRingBuffer()) {
261 return; 271 return;
262 } 272 }
263 // Return immediately if corresponding InsertToken failed. 273 // Return immediately if corresponding InsertToken failed.
264 if (token < 0) 274 if (token < 0)
265 return; 275 return;
266 if (token > token_) return; // we wrapped 276 if (token > token_)
267 if (last_token_read() >= token) 277 return; // we wrapped
278 if (cached_last_token_read_ >= token)
268 return; 279 return;
jbauman 2016/12/09 03:16:03 We should try to update the state here one last ti
sunnyps 2016/12/09 04:09:37 Done.
269 Flush(); 280 Flush();
270 command_buffer_->WaitForTokenInRange(token, token_); 281 CommandBuffer::State last_state =
282 command_buffer_->WaitForTokenInRange(token, token_);
283 cached_last_token_read_ = last_state.token;
284 cached_get_offset_ = last_state.get_offset;
285 context_lost_ = gpu::error::IsError(last_state.error);
271 } 286 }
272 287
273 // Waits for available entries, basically waiting until get >= put + count + 1. 288 // Waits for available entries, basically waiting until get >= put + count + 1.
274 // It actually waits for contiguous entries, so it may need to wrap the buffer 289 // It actually waits for contiguous entries, so it may need to wrap the buffer
275 // around, adding a noops. Thus this function may change the value of put_. The 290 // around, adding a noops. Thus this function may change the value of put_. The
276 // function will return early if an error occurs, in which case the available 291 // function will return early if an error occurs, in which case the available
277 // space may not be available. 292 // space may not be available.
278 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) { 293 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
279 AllocateRingBuffer(); 294 AllocateRingBuffer();
280 if (!usable()) { 295 if (!usable()) {
281 return; 296 return;
282 } 297 }
283 DCHECK(HaveRingBuffer()); 298 DCHECK(HaveRingBuffer());
284 DCHECK(count < total_entry_count_); 299 DCHECK(count < total_entry_count_);
285 if (put_ + count > total_entry_count_) { 300 if (put_ + count > total_entry_count_) {
286 // There's not enough room between the current put and the end of the 301 // There's not enough room between the current put and the end of the
287 // buffer, so we need to wrap. We will add noops all the way to the end, 302 // buffer, so we need to wrap. We will add noops all the way to the end,
288 // but we need to make sure get wraps first, actually that get is 1 or 303 // but we need to make sure get wraps first, actually that get is 1 or
289 // more (since put will wrap to 0 after we add the noops). 304 // more (since put will wrap to 0 after we add the noops).
290 DCHECK_LE(1, put_); 305 DCHECK_LE(1, put_);
291 int32_t curr_get = get_offset(); 306 int32_t curr_get = cached_get_offset_;
292 if (curr_get > put_ || curr_get == 0) { 307 if (curr_get > put_ || curr_get == 0) {
293 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); 308 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
294 Flush(); 309 Flush();
295 if (!WaitForGetOffsetInRange(1, put_)) 310 if (!WaitForGetOffsetInRange(1, put_))
296 return; 311 return;
297 curr_get = get_offset(); 312 curr_get = cached_get_offset_;
298 DCHECK_LE(curr_get, put_); 313 DCHECK_LE(curr_get, put_);
299 DCHECK_NE(0, curr_get); 314 DCHECK_NE(0, curr_get);
300 } 315 }
301 // Insert Noops to fill out the buffer. 316 // Insert Noops to fill out the buffer.
302 int32_t num_entries = total_entry_count_ - put_; 317 int32_t num_entries = total_entry_count_ - put_;
303 while (num_entries > 0) { 318 while (num_entries > 0) {
304 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); 319 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
305 cmd::Noop::Set(&entries_[put_], num_to_skip); 320 cmd::Noop::Set(&entries_[put_], num_to_skip);
306 put_ += num_to_skip; 321 put_ += num_to_skip;
307 num_entries -= num_to_skip; 322 num_entries -= num_to_skip;
(...skipping 13 matching lines...) Expand all
321 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, 336 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
322 put_)) 337 put_))
323 return; 338 return;
324 CalcImmediateEntries(count); 339 CalcImmediateEntries(count);
325 DCHECK_GE(immediate_entry_count_, count); 340 DCHECK_GE(immediate_entry_count_, count);
326 } 341 }
327 } 342 }
328 } 343 }
329 344
330 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { 345 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
331 int32_t current_get_offset = get_offset(); 346 int32_t current_get_offset = cached_get_offset_;
332 if (current_get_offset > put_) { 347 if (current_get_offset > put_) {
333 return current_get_offset - put_ - 1; 348 return current_get_offset - put_ - 1;
334 } else { 349 } else {
335 return current_get_offset + total_entry_count_ - put_ - 350 return current_get_offset + total_entry_count_ - put_ -
336 (current_get_offset == 0 ? 1 : 0); 351 (current_get_offset == 0 ? 1 : 0);
337 } 352 }
338 } 353 }
339 354
340 bool CommandBufferHelper::OnMemoryDump( 355 bool CommandBufferHelper::OnMemoryDump(
341 const base::trace_event::MemoryDumpArgs& args, 356 const base::trace_event::MemoryDumpArgs& args,
(...skipping 20 matching lines...) Expand all
362 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); 377 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
363 const int kImportance = 2; 378 const int kImportance = 2;
364 pmd->CreateSharedGlobalAllocatorDump(guid); 379 pmd->CreateSharedGlobalAllocatorDump(guid);
365 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 380 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
366 } 381 }
367 382
368 return true; 383 return true;
369 } 384 }
370 385
371 } // namespace gpu 386 } // namespace gpu
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698