Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains the implementation of the command buffer helper class. | 5 // This file contains the implementation of the command buffer helper class. |
| 6 | 6 |
| 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" | 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" |
| 8 | 8 |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 25 | 25 |
| 26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) | 26 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) |
| 27 : command_buffer_(command_buffer), | 27 : command_buffer_(command_buffer), |
| 28 ring_buffer_id_(-1), | 28 ring_buffer_id_(-1), |
| 29 ring_buffer_size_(0), | 29 ring_buffer_size_(0), |
| 30 entries_(NULL), | 30 entries_(NULL), |
| 31 total_entry_count_(0), | 31 total_entry_count_(0), |
| 32 immediate_entry_count_(0), | 32 immediate_entry_count_(0), |
| 33 token_(0), | 33 token_(0), |
| 34 put_(0), | 34 put_(0), |
| 35 last_get_(0), | |
| 35 last_put_sent_(0), | 36 last_put_sent_(0), |
| 36 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) | 37 #if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK) |
| 37 commands_issued_(0), | 38 commands_issued_(0), |
| 38 #endif | 39 #endif |
| 39 usable_(true), | 40 usable_(true), |
| 40 context_lost_(false), | 41 context_lost_(false), |
| 41 flush_automatically_(true), | 42 flush_automatically_(true), |
| 42 flush_generation_(0) { | 43 flush_generation_(0) { |
| 43 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). | 44 // In certain cases, ThreadTaskRunnerHandle isn't set (Android Webview). |
| 44 // Don't register a dump provider in these cases. | 45 // Don't register a dump provider in these cases. |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 64 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { | 65 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { |
| 65 DCHECK_GE(waiting_count, 0); | 66 DCHECK_GE(waiting_count, 0); |
| 66 | 67 |
| 67 // Check if usable & allocated. | 68 // Check if usable & allocated. |
| 68 if (!usable() || !HaveRingBuffer()) { | 69 if (!usable() || !HaveRingBuffer()) { |
| 69 immediate_entry_count_ = 0; | 70 immediate_entry_count_ = 0; |
| 70 return; | 71 return; |
| 71 } | 72 } |
| 72 | 73 |
| 73 // Get maximum safe contiguous entries. | 74 // Get maximum safe contiguous entries. |
| 74 const int32_t curr_get = get_offset(); | 75 const int32_t curr_get = last_get_; |
| 75 if (curr_get > put_) { | 76 if (curr_get > put_) { |
| 76 immediate_entry_count_ = curr_get - put_ - 1; | 77 immediate_entry_count_ = curr_get - put_ - 1; |
| 77 } else { | 78 } else { |
| 78 immediate_entry_count_ = | 79 immediate_entry_count_ = |
| 79 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); | 80 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); |
| 80 } | 81 } |
| 81 | 82 |
| 82 // Limit entry count to force early flushing. | 83 // Limit entry count to force early flushing. |
| 83 if (flush_automatically_) { | 84 if (flush_automatically_) { |
| 84 int32_t limit = | 85 int32_t limit = |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 121 } | 122 } |
| 122 | 123 |
| 123 ring_buffer_ = buffer; | 124 ring_buffer_ = buffer; |
| 124 ring_buffer_id_ = id; | 125 ring_buffer_id_ = id; |
| 125 command_buffer_->SetGetBuffer(id); | 126 command_buffer_->SetGetBuffer(id); |
| 126 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory()); | 127 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory()); |
| 127 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry); | 128 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry); |
| 128 // Call to SetGetBuffer(id) above resets get and put offsets to 0. | 129 // Call to SetGetBuffer(id) above resets get and put offsets to 0. |
| 129 // No need to query it through IPC. | 130 // No need to query it through IPC. |
| 130 put_ = 0; | 131 put_ = 0; |
| 132 last_get_ = 0; | |
| 131 CalcImmediateEntries(0); | 133 CalcImmediateEntries(0); |
| 132 return true; | 134 return true; |
| 133 } | 135 } |
| 134 | 136 |
| 135 void CommandBufferHelper::FreeResources() { | 137 void CommandBufferHelper::FreeResources() { |
| 136 if (HaveRingBuffer()) { | 138 if (HaveRingBuffer()) { |
| 137 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); | 139 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); |
| 138 ring_buffer_id_ = -1; | 140 ring_buffer_id_ = -1; |
| 139 CalcImmediateEntries(0); | 141 CalcImmediateEntries(0); |
| 140 entries_ = nullptr; | 142 entries_ = nullptr; |
| 141 ring_buffer_ = nullptr; | 143 ring_buffer_ = nullptr; |
| 142 } | 144 } |
| 143 } | 145 } |
| 144 | 146 |
| 145 void CommandBufferHelper::FreeRingBuffer() { | 147 void CommandBufferHelper::FreeRingBuffer() { |
| 146 CHECK((put_ == get_offset()) || | 148 CHECK((put_ == last_get_) || |
| 147 error::IsError(command_buffer_->GetLastState().error)); | 149 error::IsError(command_buffer_->GetLastState().error)); |
| 148 FreeResources(); | 150 FreeResources(); |
| 149 } | 151 } |
| 150 | 152 |
| 151 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) { | 153 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) { |
| 152 ring_buffer_size_ = ring_buffer_size; | 154 ring_buffer_size_ = ring_buffer_size; |
| 153 return AllocateRingBuffer(); | 155 return AllocateRingBuffer(); |
| 154 } | 156 } |
| 155 | 157 |
| 156 CommandBufferHelper::~CommandBufferHelper() { | 158 CommandBufferHelper::~CommandBufferHelper() { |
| 157 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( | 159 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( |
| 158 this); | 160 this); |
| 159 FreeResources(); | 161 FreeResources(); |
| 160 } | 162 } |
| 161 | 163 |
| 162 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) { | 164 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) { |
| 163 DCHECK(start >= 0 && start <= total_entry_count_); | 165 DCHECK(start >= 0 && start <= total_entry_count_); |
| 164 DCHECK(end >= 0 && end <= total_entry_count_); | 166 DCHECK(end >= 0 && end <= total_entry_count_); |
| 165 if (!usable()) { | 167 if (!usable()) { |
| 166 return false; | 168 return false; |
| 167 } | 169 } |
| 168 command_buffer_->WaitForGetOffsetInRange(start, end); | 170 CommandBuffer::State last_state = |
| 169 return command_buffer_->GetLastError() == gpu::error::kNoError; | 171 command_buffer_->WaitForGetOffsetInRange(start, end); |
| 172 last_get_ = last_state.get_offset; | |
| 173 return last_state.error == gpu::error::kNoError; | |
| 170 } | 174 } |
| 171 | 175 |
| 172 void CommandBufferHelper::Flush() { | 176 void CommandBufferHelper::Flush() { |
| 173 // Wrap put_ before flush. | 177 // Wrap put_ before flush. |
| 174 if (put_ == total_entry_count_) | 178 if (put_ == total_entry_count_) |
| 175 put_ = 0; | 179 put_ = 0; |
| 176 | 180 |
| 177 if (usable()) { | 181 if (usable()) { |
| 178 last_flush_time_ = base::TimeTicks::Now(); | 182 last_flush_time_ = base::TimeTicks::Now(); |
| 179 last_put_sent_ = put_; | 183 last_put_sent_ = put_; |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 206 #endif | 210 #endif |
| 207 | 211 |
| 208 // Calls Flush() and then waits until the buffer is empty. Break early if the | 212 // Calls Flush() and then waits until the buffer is empty. Break early if the |
| 209 // error is set. | 213 // error is set. |
| 210 bool CommandBufferHelper::Finish() { | 214 bool CommandBufferHelper::Finish() { |
| 211 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); | 215 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); |
| 212 if (!usable()) { | 216 if (!usable()) { |
| 213 return false; | 217 return false; |
| 214 } | 218 } |
| 215 // If there is no work just exit. | 219 // If there is no work just exit. |
| 216 if (put_ == get_offset()) { | 220 if (put_ == last_get_) { |
| 217 return true; | 221 return true; |
| 218 } | 222 } |
| 219 DCHECK(HaveRingBuffer() || | 223 DCHECK(HaveRingBuffer() || |
| 220 error::IsError(command_buffer_->GetLastState().error)); | 224 error::IsError(command_buffer_->GetLastState().error)); |
| 221 Flush(); | 225 Flush(); |
| 222 if (!WaitForGetOffsetInRange(put_, put_)) | 226 if (!WaitForGetOffsetInRange(put_, put_)) |
| 223 return false; | 227 return false; |
| 224 DCHECK_EQ(get_offset(), put_); | 228 DCHECK_EQ(last_get_, put_); |
| 225 | 229 |
| 226 CalcImmediateEntries(0); | 230 CalcImmediateEntries(0); |
| 227 | 231 |
| 228 return true; | 232 return true; |
| 229 } | 233 } |
| 230 | 234 |
| 231 // Inserts a new token into the command stream. It uses an increasing value | 235 // Inserts a new token into the command stream. It uses an increasing value |
| 232 // scheme so that we don't lose tokens (a token has passed if the current token | 236 // scheme so that we don't lose tokens (a token has passed if the current token |
| 233 // value is higher than that token). Calls Finish() if the token value wraps, | 237 // value is higher than that token). Calls Finish() if the token value wraps, |
| 234 // which will be rare. | 238 // which will be rare. |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 256 | 260 |
| 257 // Waits until the current token value is greater or equal to the value passed | 261 // Waits until the current token value is greater or equal to the value passed |
| 258 // in argument. | 262 // in argument. |
| 259 void CommandBufferHelper::WaitForToken(int32_t token) { | 263 void CommandBufferHelper::WaitForToken(int32_t token) { |
| 260 if (!usable() || !HaveRingBuffer()) { | 264 if (!usable() || !HaveRingBuffer()) { |
| 261 return; | 265 return; |
| 262 } | 266 } |
| 263 // Return immediately if corresponding InsertToken failed. | 267 // Return immediately if corresponding InsertToken failed. |
| 264 if (token < 0) | 268 if (token < 0) |
| 265 return; | 269 return; |
| 266 if (token > token_) return; // we wrapped | 270 if (token > token_) |
| 271 return; // we wrapped | |
| 267 if (last_token_read() >= token) | 272 if (last_token_read() >= token) |
| 268 return; | 273 return; |
| 269 Flush(); | 274 Flush(); |
| 270 command_buffer_->WaitForTokenInRange(token, token_); | 275 command_buffer_->WaitForTokenInRange(token, token_); |
|
piman
2016/12/02 19:21:48
Let's cache the new get (and token if we decide to
sunnyps
2016/12/07 03:31:22
Done.
| |
| 271 } | 276 } |
| 272 | 277 |
| 273 // Waits for available entries, basically waiting until get >= put + count + 1. | 278 // Waits for available entries, basically waiting until get >= put + count + 1. |
| 274 // It actually waits for contiguous entries, so it may need to wrap the buffer | 279 // It actually waits for contiguous entries, so it may need to wrap the buffer |
| 275 // around, adding a noops. Thus this function may change the value of put_. The | 280 // around, adding a noops. Thus this function may change the value of put_. The |
| 276 // function will return early if an error occurs, in which case the available | 281 // function will return early if an error occurs, in which case the available |
| 277 // space may not be available. | 282 // space may not be available. |
| 278 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) { | 283 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) { |
| 279 AllocateRingBuffer(); | 284 AllocateRingBuffer(); |
| 280 if (!usable()) { | 285 if (!usable()) { |
| 281 return; | 286 return; |
| 282 } | 287 } |
| 283 DCHECK(HaveRingBuffer()); | 288 DCHECK(HaveRingBuffer()); |
| 284 DCHECK(count < total_entry_count_); | 289 DCHECK(count < total_entry_count_); |
| 285 if (put_ + count > total_entry_count_) { | 290 if (put_ + count > total_entry_count_) { |
| 286 // There's not enough room between the current put and the end of the | 291 // There's not enough room between the current put and the end of the |
| 287 // buffer, so we need to wrap. We will add noops all the way to the end, | 292 // buffer, so we need to wrap. We will add noops all the way to the end, |
| 288 // but we need to make sure get wraps first, actually that get is 1 or | 293 // but we need to make sure get wraps first, actually that get is 1 or |
| 289 // more (since put will wrap to 0 after we add the noops). | 294 // more (since put will wrap to 0 after we add the noops). |
| 290 DCHECK_LE(1, put_); | 295 DCHECK_LE(1, put_); |
| 291 int32_t curr_get = get_offset(); | 296 int32_t curr_get = last_get_; |
| 292 if (curr_get > put_ || curr_get == 0) { | 297 if (curr_get > put_ || curr_get == 0) { |
| 293 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); | 298 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); |
| 294 Flush(); | 299 Flush(); |
| 295 if (!WaitForGetOffsetInRange(1, put_)) | 300 if (!WaitForGetOffsetInRange(1, put_)) |
| 296 return; | 301 return; |
| 297 curr_get = get_offset(); | 302 curr_get = last_get_; |
| 298 DCHECK_LE(curr_get, put_); | 303 DCHECK_LE(curr_get, put_); |
| 299 DCHECK_NE(0, curr_get); | 304 DCHECK_NE(0, curr_get); |
| 300 } | 305 } |
| 301 // Insert Noops to fill out the buffer. | 306 // Insert Noops to fill out the buffer. |
| 302 int32_t num_entries = total_entry_count_ - put_; | 307 int32_t num_entries = total_entry_count_ - put_; |
| 303 while (num_entries > 0) { | 308 while (num_entries > 0) { |
| 304 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); | 309 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); |
| 305 cmd::Noop::Set(&entries_[put_], num_to_skip); | 310 cmd::Noop::Set(&entries_[put_], num_to_skip); |
| 306 put_ += num_to_skip; | 311 put_ += num_to_skip; |
| 307 num_entries -= num_to_skip; | 312 num_entries -= num_to_skip; |
| (...skipping 13 matching lines...) Expand all Loading... | |
| 321 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, | 326 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, |
| 322 put_)) | 327 put_)) |
| 323 return; | 328 return; |
| 324 CalcImmediateEntries(count); | 329 CalcImmediateEntries(count); |
| 325 DCHECK_GE(immediate_entry_count_, count); | 330 DCHECK_GE(immediate_entry_count_, count); |
| 326 } | 331 } |
| 327 } | 332 } |
| 328 } | 333 } |
| 329 | 334 |
| 330 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { | 335 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { |
| 331 int32_t current_get_offset = get_offset(); | 336 int32_t current_get_offset = last_get_; |
| 332 if (current_get_offset > put_) { | 337 if (current_get_offset > put_) { |
| 333 return current_get_offset - put_ - 1; | 338 return current_get_offset - put_ - 1; |
| 334 } else { | 339 } else { |
| 335 return current_get_offset + total_entry_count_ - put_ - | 340 return current_get_offset + total_entry_count_ - put_ - |
| 336 (current_get_offset == 0 ? 1 : 0); | 341 (current_get_offset == 0 ? 1 : 0); |
| 337 } | 342 } |
| 338 } | 343 } |
| 339 | 344 |
| 340 bool CommandBufferHelper::OnMemoryDump( | 345 bool CommandBufferHelper::OnMemoryDump( |
| 341 const base::trace_event::MemoryDumpArgs& args, | 346 const base::trace_event::MemoryDumpArgs& args, |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 362 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); | 367 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); |
| 363 const int kImportance = 2; | 368 const int kImportance = 2; |
| 364 pmd->CreateSharedGlobalAllocatorDump(guid); | 369 pmd->CreateSharedGlobalAllocatorDump(guid); |
| 365 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); | 370 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); |
| 366 } | 371 } |
| 367 | 372 |
| 368 return true; | 373 return true; |
| 369 } | 374 } |
| 370 | 375 |
| 371 } // namespace gpu | 376 } // namespace gpu |
| OLD | NEW |