| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file contains the implementation of the command buffer helper class. | 5 // This file contains the implementation of the command buffer helper class. |
| 6 | 6 |
| 7 #include "../client/cmd_buffer_helper.h" | 7 #include "../client/cmd_buffer_helper.h" |
| 8 #include "../common/command_buffer.h" | 8 #include "../common/command_buffer.h" |
| 9 #include "../common/trace_event.h" | 9 #include "../common/trace_event.h" |
| 10 | 10 |
| 11 namespace gpu { | 11 namespace gpu { |
| 12 | 12 |
| 13 namespace { | 13 namespace { |
| 14 const int kCommandsPerFlushCheck = 100; | 14 const int kCommandsPerFlushCheck = 100; |
| 15 const double kFlushDelay = 1.0 / (5.0 * 60.0); | 15 const double kFlushDelay = 1.0 / (5.0 * 60.0); |
| 16 } | 16 } |
| 17 | 17 |
| 18 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) | 18 CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer) |
| 19 : command_buffer_(command_buffer), | 19 : command_buffer_(command_buffer), |
| 20 ring_buffer_id_(-1), | 20 ring_buffer_id_(-1), |
| 21 ring_buffer_size_(0), | 21 ring_buffer_size_(0), |
| 22 entries_(NULL), | 22 entries_(NULL), |
| 23 total_entry_count_(0), | 23 total_entry_count_(0), |
| 24 usable_entry_count_(0), | |
| 25 token_(0), | 24 token_(0), |
| 26 put_(0), | 25 put_(0), |
| 27 last_put_sent_(0), | 26 last_put_sent_(0), |
| 28 commands_issued_(0), | 27 commands_issued_(0), |
| 29 usable_(true), | 28 usable_(true), |
| 30 context_lost_(false), | 29 context_lost_(false), |
| 31 flush_automatically_(true), | 30 flush_automatically_(true), |
| 32 last_flush_time_(0) { | 31 last_flush_time_(0) { |
| 33 } | 32 } |
| 34 | 33 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 67 // Also do we need to check state.num_entries? | 66 // Also do we need to check state.num_entries? |
| 68 CommandBuffer::State state = command_buffer_->GetState(); | 67 CommandBuffer::State state = command_buffer_->GetState(); |
| 69 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_.ptr); | 68 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_.ptr); |
| 70 int32 num_ring_buffer_entries = | 69 int32 num_ring_buffer_entries = |
| 71 ring_buffer_size_ / sizeof(CommandBufferEntry); | 70 ring_buffer_size_ / sizeof(CommandBufferEntry); |
| 72 if (num_ring_buffer_entries > state.num_entries) { | 71 if (num_ring_buffer_entries > state.num_entries) { |
| 73 ClearUsable(); | 72 ClearUsable(); |
| 74 return false; | 73 return false; |
| 75 } | 74 } |
| 76 | 75 |
| 77 const int32 kJumpEntries = | |
| 78 sizeof(cmd::Jump) / sizeof(*entries_); // NOLINT | |
| 79 | |
| 80 total_entry_count_ = num_ring_buffer_entries; | 76 total_entry_count_ = num_ring_buffer_entries; |
| 81 usable_entry_count_ = total_entry_count_ - kJumpEntries; | |
| 82 put_ = state.put_offset; | 77 put_ = state.put_offset; |
| 83 return true; | 78 return true; |
| 84 } | 79 } |
| 85 | 80 |
| 86 void CommandBufferHelper::FreeResources() { | 81 void CommandBufferHelper::FreeResources() { |
| 87 if (HaveRingBuffer()) { | 82 if (HaveRingBuffer()) { |
| 88 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); | 83 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); |
| 89 ring_buffer_id_ = -1; | 84 ring_buffer_id_ = -1; |
| 90 } | 85 } |
| 91 } | 86 } |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 188 } | 183 } |
| 189 // Do not loop forever if the flush fails, meaning the command buffer reader | 184 // Do not loop forever if the flush fails, meaning the command buffer reader |
| 190 // has shutdown. | 185 // has shutdown. |
| 191 if (!FlushSync()) | 186 if (!FlushSync()) |
| 192 return; | 187 return; |
| 193 } | 188 } |
| 194 } | 189 } |
| 195 | 190 |
| 196 // Waits for available entries, basically waiting until get >= put + count + 1. | 191 // Waits for available entries, basically waiting until get >= put + count + 1. |
| 197 // It actually waits for contiguous entries, so it may need to wrap the buffer | 192 // It actually waits for contiguous entries, so it may need to wrap the buffer |
| 198 // around, adding a jump. Thus this function may change the value of put_. The | 193 // around, adding a noops. Thus this function may change the value of put_. The |
| 199 // function will return early if an error occurs, in which case the available | 194 // function will return early if an error occurs, in which case the available |
| 200 // space may not be available. | 195 // space may not be available. |
| 201 void CommandBufferHelper::WaitForAvailableEntries(int32 count) { | 196 void CommandBufferHelper::WaitForAvailableEntries(int32 count) { |
| 202 AllocateRingBuffer(); | 197 AllocateRingBuffer(); |
| 203 if (!usable()) { | 198 if (!usable()) { |
| 204 return; | 199 return; |
| 205 } | 200 } |
| 206 GPU_DCHECK(HaveRingBuffer()); | 201 GPU_DCHECK(HaveRingBuffer()); |
| 207 GPU_DCHECK(count < usable_entry_count_); | 202 GPU_DCHECK(count < total_entry_count_); |
| 208 if (put_ + count > usable_entry_count_) { | 203 if (put_ + count > total_entry_count_) { |
| 209 // There's not enough room between the current put and the end of the | 204 // There's not enough room between the current put and the end of the |
| 210 // buffer, so we need to wrap. We will add a jump back to the start, but we | 205 // buffer, so we need to wrap. We will add noops all the way to the end, |
| 211 // need to make sure get wraps first, actually that get is 1 or more (since | 206 // but we need to make sure get wraps first, actually that get is 1 or |
| 212 // put will wrap to 0 after we add the jump). | 207 // more (since put will wrap to 0 after we add the noops). |
| 213 GPU_DCHECK_LE(1, put_); | 208 GPU_DCHECK_LE(1, put_); |
| 214 if (get_offset() > put_ || get_offset() == 0) { | 209 if (get_offset() > put_ || get_offset() == 0) { |
| 215 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); | 210 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); |
| 216 while (get_offset() > put_ || get_offset() == 0) { | 211 while (get_offset() > put_ || get_offset() == 0) { |
| 217 // Do not loop forever if the flush fails, meaning the command buffer | 212 // Do not loop forever if the flush fails, meaning the command buffer |
| 218 // reader has shutdown. | 213 // reader has shutdown. |
| 219 if (!FlushSync()) | 214 if (!FlushSync()) |
| 220 return; | 215 return; |
| 221 } | 216 } |
| 222 } | 217 } |
| 223 // Insert a jump back to the beginning. | 218 // Insert Noops to fill out the buffer. |
| 224 cmd::Jump::Set(&entries_[put_], 0); | 219 int32 num_entries = total_entry_count_ - put_; |
| 220 while (num_entries > 0) { |
| 221 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); |
| 222 cmd::Noop::Set(&entries_[put_], num_to_skip); |
| 223 put_ += num_to_skip; |
| 224 num_entries -= num_to_skip; |
| 225 } |
| 225 put_ = 0; | 226 put_ = 0; |
| 226 } | 227 } |
| 227 if (AvailableEntries() < count) { | 228 if (AvailableEntries() < count) { |
| 228 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1"); | 229 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1"); |
| 229 while (AvailableEntries() < count) { | 230 while (AvailableEntries() < count) { |
| 230 // Do not loop forever if the flush fails, meaning the command buffer | 231 // Do not loop forever if the flush fails, meaning the command buffer |
| 231 // reader has shutdown. | 232 // reader has shutdown. |
| 232 if (!FlushSync()) | 233 if (!FlushSync()) |
| 233 return; | 234 return; |
| 234 } | 235 } |
| 235 } | 236 } |
| 236 // Force a flush if the buffer is getting half full, or even earlier if the | 237 // Force a flush if the buffer is getting half full, or even earlier if the |
| 237 // reader is known to be idle. | 238 // reader is known to be idle. |
| 238 int32 pending = | 239 int32 pending = |
| 239 (put_ + usable_entry_count_ - last_put_sent_) % usable_entry_count_; | 240 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_; |
| 240 int32 limit = usable_entry_count_ / | 241 int32 limit = total_entry_count_ / |
| 241 ((get_offset() == last_put_sent_) ? 16 : 2); | 242 ((get_offset() == last_put_sent_) ? 16 : 2); |
| 242 if (pending > limit) { | 243 if (pending > limit) { |
| 243 Flush(); | 244 Flush(); |
| 244 } else if (flush_automatically_ && | 245 } else if (flush_automatically_ && |
| 245 (commands_issued_ % kCommandsPerFlushCheck == 0)) { | 246 (commands_issued_ % kCommandsPerFlushCheck == 0)) { |
| 246 #if !defined(OS_ANDROID) | 247 #if !defined(OS_ANDROID) |
| 247 // Allow this command buffer to be pre-empted by another if a "reasonable" | 248 // Allow this command buffer to be pre-empted by another if a "reasonable" |
| 248 // amount of work has been done. On highend machines, this reduces the | 249 // amount of work has been done. On highend machines, this reduces the |
| 249 // latency of GPU commands. However, on Android, this can cause the | 250 // latency of GPU commands. However, on Android, this can cause the |
| 250 // kernel to thrash between generating GPU commands and executing them. | 251 // kernel to thrash between generating GPU commands and executing them. |
| 251 clock_t current_time = clock(); | 252 clock_t current_time = clock(); |
| 252 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC) | 253 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC) |
| 253 Flush(); | 254 Flush(); |
| 254 #endif | 255 #endif |
| 255 } | 256 } |
| 256 } | 257 } |
| 257 | 258 |
| 258 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { | 259 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { |
| 259 AllocateRingBuffer(); | 260 AllocateRingBuffer(); |
| 260 if (!usable()) { | 261 if (!usable()) { |
| 261 return NULL; | 262 return NULL; |
| 262 } | 263 } |
| 263 GPU_DCHECK(HaveRingBuffer()); | 264 GPU_DCHECK(HaveRingBuffer()); |
| 264 ++commands_issued_; | 265 ++commands_issued_; |
| 265 WaitForAvailableEntries(entries); | 266 WaitForAvailableEntries(entries); |
| 266 CommandBufferEntry* space = &entries_[put_]; | 267 CommandBufferEntry* space = &entries_[put_]; |
| 267 put_ += entries; | 268 put_ += entries; |
| 268 GPU_DCHECK_LE(put_, usable_entry_count_); | 269 GPU_DCHECK_LE(put_, total_entry_count_); |
| 269 if (put_ == usable_entry_count_) { | 270 if (put_ == total_entry_count_) { |
| 270 cmd::Jump::Set(&entries_[put_], 0); | |
| 271 put_ = 0; | 271 put_ = 0; |
| 272 } | 272 } |
| 273 return space; | 273 return space; |
| 274 } | 274 } |
| 275 | 275 |
| 276 error::Error CommandBufferHelper::GetError() { | 276 error::Error CommandBufferHelper::GetError() { |
| 277 CommandBuffer::State state = command_buffer_->GetState(); | 277 CommandBuffer::State state = command_buffer_->GetState(); |
| 278 return static_cast<error::Error>(state.error); | 278 return static_cast<error::Error>(state.error); |
| 279 } | 279 } |
| 280 | 280 |
| 281 } // namespace gpu | 281 } // namespace gpu |
| OLD | NEW |