Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(116)

Side by Side Diff: gpu/command_buffer/client/cmd_buffer_helper.cc

Issue 1542513002: Switch to standard integer types in gpu/. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix Created 4 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file contains the implementation of the command buffer helper class. 5 // This file contains the implementation of the command buffer helper class.
6 6
7 #include "gpu/command_buffer/client/cmd_buffer_helper.h" 7 #include "gpu/command_buffer/client/cmd_buffer_helper.h"
8 8
9 #include <stdint.h>
10
9 #include <algorithm> 11 #include <algorithm>
10 #include "base/logging.h" 12 #include "base/logging.h"
11 #include "base/strings/stringprintf.h" 13 #include "base/strings/stringprintf.h"
12 #include "base/thread_task_runner_handle.h" 14 #include "base/thread_task_runner_handle.h"
13 #include "base/time/time.h" 15 #include "base/time/time.h"
14 #include "base/trace_event/memory_allocator_dump.h" 16 #include "base/trace_event/memory_allocator_dump.h"
15 #include "base/trace_event/memory_dump_manager.h" 17 #include "base/trace_event/memory_dump_manager.h"
16 #include "base/trace_event/process_memory_dump.h" 18 #include "base/trace_event/process_memory_dump.h"
17 #include "gpu/command_buffer/common/buffer.h" 19 #include "gpu/command_buffer/common/buffer.h"
18 #include "gpu/command_buffer/common/command_buffer.h" 20 #include "gpu/command_buffer/common/command_buffer.h"
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
62 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) { 64 void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
63 DCHECK_GE(waiting_count, 0); 65 DCHECK_GE(waiting_count, 0);
64 66
65 // Check if usable & allocated. 67 // Check if usable & allocated.
66 if (!usable() || !HaveRingBuffer()) { 68 if (!usable() || !HaveRingBuffer()) {
67 immediate_entry_count_ = 0; 69 immediate_entry_count_ = 0;
68 return; 70 return;
69 } 71 }
70 72
71 // Get maximum safe contiguous entries. 73 // Get maximum safe contiguous entries.
72 const int32 curr_get = get_offset(); 74 const int32_t curr_get = get_offset();
73 if (curr_get > put_) { 75 if (curr_get > put_) {
74 immediate_entry_count_ = curr_get - put_ - 1; 76 immediate_entry_count_ = curr_get - put_ - 1;
75 } else { 77 } else {
76 immediate_entry_count_ = 78 immediate_entry_count_ =
77 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0); 79 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
78 } 80 }
79 81
80 // Limit entry count to force early flushing. 82 // Limit entry count to force early flushing.
81 if (flush_automatically_) { 83 if (flush_automatically_) {
82 int32 limit = 84 int32_t limit =
83 total_entry_count_ / 85 total_entry_count_ /
84 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig); 86 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
85 87
86 int32 pending = 88 int32_t pending =
87 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_; 89 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
88 90
89 if (pending > 0 && pending >= limit) { 91 if (pending > 0 && pending >= limit) {
90 // Time to force flush. 92 // Time to force flush.
91 immediate_entry_count_ = 0; 93 immediate_entry_count_ = 0;
92 } else { 94 } else {
93 // Limit remaining entries, but not lower than waiting_count entries to 95 // Limit remaining entries, but not lower than waiting_count entries to
94 // prevent deadlock when command size is greater than the flush limit. 96 // prevent deadlock when command size is greater than the flush limit.
95 limit -= pending; 97 limit -= pending;
96 limit = limit < waiting_count ? waiting_count : limit; 98 limit = limit < waiting_count ? waiting_count : limit;
97 immediate_entry_count_ = 99 immediate_entry_count_ =
98 immediate_entry_count_ > limit ? limit : immediate_entry_count_; 100 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
99 } 101 }
100 } 102 }
101 } 103 }
102 104
103 bool CommandBufferHelper::AllocateRingBuffer() { 105 bool CommandBufferHelper::AllocateRingBuffer() {
104 if (!usable()) { 106 if (!usable()) {
105 return false; 107 return false;
106 } 108 }
107 109
108 if (HaveRingBuffer()) { 110 if (HaveRingBuffer()) {
109 return true; 111 return true;
110 } 112 }
111 113
112 int32 id = -1; 114 int32_t id = -1;
113 scoped_refptr<Buffer> buffer = 115 scoped_refptr<Buffer> buffer =
114 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id); 116 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
115 if (id < 0) { 117 if (id < 0) {
116 ClearUsable(); 118 ClearUsable();
117 DCHECK(error::IsError(command_buffer()->GetLastError())); 119 DCHECK(error::IsError(command_buffer()->GetLastError()));
118 return false; 120 return false;
119 } 121 }
120 122
121 ring_buffer_ = buffer; 123 ring_buffer_ = buffer;
122 ring_buffer_id_ = id; 124 ring_buffer_id_ = id;
(...skipping 16 matching lines...) Expand all
139 ring_buffer_ = nullptr; 141 ring_buffer_ = nullptr;
140 } 142 }
141 } 143 }
142 144
143 void CommandBufferHelper::FreeRingBuffer() { 145 void CommandBufferHelper::FreeRingBuffer() {
144 CHECK((put_ == get_offset()) || 146 CHECK((put_ == get_offset()) ||
145 error::IsError(command_buffer_->GetLastState().error)); 147 error::IsError(command_buffer_->GetLastState().error));
146 FreeResources(); 148 FreeResources();
147 } 149 }
148 150
149 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) { 151 bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
150 ring_buffer_size_ = ring_buffer_size; 152 ring_buffer_size_ = ring_buffer_size;
151 return AllocateRingBuffer(); 153 return AllocateRingBuffer();
152 } 154 }
153 155
154 CommandBufferHelper::~CommandBufferHelper() { 156 CommandBufferHelper::~CommandBufferHelper() {
155 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider( 157 base::trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
156 this); 158 this);
157 FreeResources(); 159 FreeResources();
158 } 160 }
159 161
160 bool CommandBufferHelper::WaitForGetOffsetInRange(int32 start, int32 end) { 162 bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
161 DCHECK(start >= 0 && start <= total_entry_count_); 163 DCHECK(start >= 0 && start <= total_entry_count_);
162 DCHECK(end >= 0 && end <= total_entry_count_); 164 DCHECK(end >= 0 && end <= total_entry_count_);
163 if (!usable()) { 165 if (!usable()) {
164 return false; 166 return false;
165 } 167 }
166 command_buffer_->WaitForGetOffsetInRange(start, end); 168 command_buffer_->WaitForGetOffsetInRange(start, end);
167 return command_buffer_->GetLastError() == gpu::error::kNoError; 169 return command_buffer_->GetLastError() == gpu::error::kNoError;
168 } 170 }
169 171
170 void CommandBufferHelper::Flush() { 172 void CommandBufferHelper::Flush() {
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
223 225
224 CalcImmediateEntries(0); 226 CalcImmediateEntries(0);
225 227
226 return true; 228 return true;
227 } 229 }
228 230
229 // Inserts a new token into the command stream. It uses an increasing value 231 // Inserts a new token into the command stream. It uses an increasing value
230 // scheme so that we don't lose tokens (a token has passed if the current token 232 // scheme so that we don't lose tokens (a token has passed if the current token
231 // value is higher than that token). Calls Finish() if the token value wraps, 233 // value is higher than that token). Calls Finish() if the token value wraps,
232 // which will be rare. 234 // which will be rare.
233 int32 CommandBufferHelper::InsertToken() { 235 int32_t CommandBufferHelper::InsertToken() {
234 AllocateRingBuffer(); 236 AllocateRingBuffer();
235 if (!usable()) { 237 if (!usable()) {
236 return token_; 238 return token_;
237 } 239 }
238 DCHECK(HaveRingBuffer()); 240 DCHECK(HaveRingBuffer());
239 // Increment token as 31-bit integer. Negative values are used to signal an 241 // Increment token as 31-bit integer. Negative values are used to signal an
240 // error. 242 // error.
241 token_ = (token_ + 1) & 0x7FFFFFFF; 243 token_ = (token_ + 1) & 0x7FFFFFFF;
242 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>(); 244 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
243 if (cmd) { 245 if (cmd) {
244 cmd->Init(token_); 246 cmd->Init(token_);
245 if (token_ == 0) { 247 if (token_ == 0) {
246 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); 248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
247 // we wrapped 249 // we wrapped
248 Finish(); 250 Finish();
249 DCHECK_EQ(token_, last_token_read()); 251 DCHECK_EQ(token_, last_token_read());
250 } 252 }
251 } 253 }
252 return token_; 254 return token_;
253 } 255 }
254 256
255 // Waits until the current token value is greater or equal to the value passed 257 // Waits until the current token value is greater or equal to the value passed
256 // in argument. 258 // in argument.
257 void CommandBufferHelper::WaitForToken(int32 token) { 259 void CommandBufferHelper::WaitForToken(int32_t token) {
258 if (!usable() || !HaveRingBuffer()) { 260 if (!usable() || !HaveRingBuffer()) {
259 return; 261 return;
260 } 262 }
261 // Return immediately if corresponding InsertToken failed. 263 // Return immediately if corresponding InsertToken failed.
262 if (token < 0) 264 if (token < 0)
263 return; 265 return;
264 if (token > token_) return; // we wrapped 266 if (token > token_) return; // we wrapped
265 if (last_token_read() >= token) 267 if (last_token_read() >= token)
266 return; 268 return;
267 Flush(); 269 Flush();
268 command_buffer_->WaitForTokenInRange(token, token_); 270 command_buffer_->WaitForTokenInRange(token, token_);
269 } 271 }
270 272
271 // Waits for available entries, basically waiting until get >= put + count + 1. 273 // Waits for available entries, basically waiting until get >= put + count + 1.
272 // It actually waits for contiguous entries, so it may need to wrap the buffer 274 // It actually waits for contiguous entries, so it may need to wrap the buffer
273 // around, adding a noops. Thus this function may change the value of put_. The 275 // around, adding a noops. Thus this function may change the value of put_. The
274 // function will return early if an error occurs, in which case the available 276 // function will return early if an error occurs, in which case the available
275 // space may not be available. 277 // space may not be available.
276 void CommandBufferHelper::WaitForAvailableEntries(int32 count) { 278 void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
277 AllocateRingBuffer(); 279 AllocateRingBuffer();
278 if (!usable()) { 280 if (!usable()) {
279 return; 281 return;
280 } 282 }
281 DCHECK(HaveRingBuffer()); 283 DCHECK(HaveRingBuffer());
282 DCHECK(count < total_entry_count_); 284 DCHECK(count < total_entry_count_);
283 if (put_ + count > total_entry_count_) { 285 if (put_ + count > total_entry_count_) {
284 // There's not enough room between the current put and the end of the 286 // There's not enough room between the current put and the end of the
285 // buffer, so we need to wrap. We will add noops all the way to the end, 287 // buffer, so we need to wrap. We will add noops all the way to the end,
286 // but we need to make sure get wraps first, actually that get is 1 or 288 // but we need to make sure get wraps first, actually that get is 1 or
287 // more (since put will wrap to 0 after we add the noops). 289 // more (since put will wrap to 0 after we add the noops).
288 DCHECK_LE(1, put_); 290 DCHECK_LE(1, put_);
289 int32 curr_get = get_offset(); 291 int32_t curr_get = get_offset();
290 if (curr_get > put_ || curr_get == 0) { 292 if (curr_get > put_ || curr_get == 0) {
291 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); 293 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
292 Flush(); 294 Flush();
293 if (!WaitForGetOffsetInRange(1, put_)) 295 if (!WaitForGetOffsetInRange(1, put_))
294 return; 296 return;
295 curr_get = get_offset(); 297 curr_get = get_offset();
296 DCHECK_LE(curr_get, put_); 298 DCHECK_LE(curr_get, put_);
297 DCHECK_NE(0, curr_get); 299 DCHECK_NE(0, curr_get);
298 } 300 }
299 // Insert Noops to fill out the buffer. 301 // Insert Noops to fill out the buffer.
300 int32 num_entries = total_entry_count_ - put_; 302 int32_t num_entries = total_entry_count_ - put_;
301 while (num_entries > 0) { 303 while (num_entries > 0) {
302 int32 num_to_skip = std::min(CommandHeader::kMaxSize, num_entries); 304 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
303 cmd::Noop::Set(&entries_[put_], num_to_skip); 305 cmd::Noop::Set(&entries_[put_], num_to_skip);
304 put_ += num_to_skip; 306 put_ += num_to_skip;
305 num_entries -= num_to_skip; 307 num_entries -= num_to_skip;
306 } 308 }
307 put_ = 0; 309 put_ = 0;
308 } 310 }
309 311
310 // Try to get 'count' entries without flushing. 312 // Try to get 'count' entries without flushing.
311 CalcImmediateEntries(count); 313 CalcImmediateEntries(count);
312 if (immediate_entry_count_ < count) { 314 if (immediate_entry_count_ < count) {
313 // Try again with a shallow Flush(). 315 // Try again with a shallow Flush().
314 Flush(); 316 Flush();
315 CalcImmediateEntries(count); 317 CalcImmediateEntries(count);
316 if (immediate_entry_count_ < count) { 318 if (immediate_entry_count_ < count) {
317 // Buffer is full. Need to wait for entries. 319 // Buffer is full. Need to wait for entries.
318 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1"); 320 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
319 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_, 321 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
320 put_)) 322 put_))
321 return; 323 return;
322 CalcImmediateEntries(count); 324 CalcImmediateEntries(count);
323 DCHECK_GE(immediate_entry_count_, count); 325 DCHECK_GE(immediate_entry_count_, count);
324 } 326 }
325 } 327 }
326 } 328 }
327 329
328 int32 CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const { 330 int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
329 int32 current_get_offset = get_offset(); 331 int32_t current_get_offset = get_offset();
330 if (current_get_offset > put_) { 332 if (current_get_offset > put_) {
331 return current_get_offset - put_ - 1; 333 return current_get_offset - put_ - 1;
332 } else { 334 } else {
333 return current_get_offset + total_entry_count_ - put_ - 335 return current_get_offset + total_entry_count_ - put_ -
334 (current_get_offset == 0 ? 1 : 0); 336 (current_get_offset == 0 ? 1 : 0);
335 } 337 }
336 } 338 }
337 339
338 bool CommandBufferHelper::OnMemoryDump( 340 bool CommandBufferHelper::OnMemoryDump(
339 const base::trace_event::MemoryDumpArgs& args, 341 const base::trace_event::MemoryDumpArgs& args,
340 base::trace_event::ProcessMemoryDump* pmd) { 342 base::trace_event::ProcessMemoryDump* pmd) {
341 if (!HaveRingBuffer()) 343 if (!HaveRingBuffer())
342 return true; 344 return true;
343 345
344 const uint64 tracing_process_id = 346 const uint64_t tracing_process_id =
345 base::trace_event::MemoryDumpManager::GetInstance() 347 base::trace_event::MemoryDumpManager::GetInstance()
346 ->GetTracingProcessId(); 348 ->GetTracingProcessId();
347 349
348 base::trace_event::MemoryAllocatorDump* dump = 350 base::trace_event::MemoryAllocatorDump* dump =
349 pmd->CreateAllocatorDump(base::StringPrintf( 351 pmd->CreateAllocatorDump(base::StringPrintf(
350 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_)); 352 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
351 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize, 353 dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
352 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 354 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
353 ring_buffer_size_); 355 ring_buffer_size_);
354 dump->AddScalar("free_size", 356 dump->AddScalar("free_size",
355 base::trace_event::MemoryAllocatorDump::kUnitsBytes, 357 base::trace_event::MemoryAllocatorDump::kUnitsBytes,
356 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry)); 358 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
357 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_); 359 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
358 const int kImportance = 2; 360 const int kImportance = 2;
359 pmd->CreateSharedGlobalAllocatorDump(guid); 361 pmd->CreateSharedGlobalAllocatorDump(guid);
360 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance); 362 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
361 363
362 return true; 364 return true;
363 } 365 }
364 366
365 } // namespace gpu 367 } // namespace gpu
OLDNEW
« no previous file with comments | « gpu/command_buffer/client/cmd_buffer_helper.h ('k') | gpu/command_buffer/client/cmd_buffer_helper_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698