OLD | NEW |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file contains the implementation of the command buffer helper class. | 5 // This file contains the implementation of the command buffer helper class. |
6 | 6 |
7 #include "../client/cmd_buffer_helper.h" | 7 #include "../client/cmd_buffer_helper.h" |
8 #include "../common/command_buffer.h" | 8 #include "../common/command_buffer.h" |
9 #include "../common/trace_event.h" | 9 #include "../common/trace_event.h" |
10 | 10 |
(...skipping 12 matching lines...) Expand all Loading... |
23 total_entry_count_(0), | 23 total_entry_count_(0), |
24 usable_entry_count_(0), | 24 usable_entry_count_(0), |
25 token_(0), | 25 token_(0), |
26 put_(0), | 26 put_(0), |
27 last_put_sent_(0), | 27 last_put_sent_(0), |
28 commands_issued_(0), | 28 commands_issued_(0), |
29 last_flush_time_(0) { | 29 last_flush_time_(0) { |
30 } | 30 } |
31 | 31 |
32 bool CommandBufferHelper::AllocateRingBuffer() { | 32 bool CommandBufferHelper::AllocateRingBuffer() { |
| 33 if (HaveRingBuffer()) { |
| 34 return true; |
| 35 } |
| 36 |
33 int32 id = command_buffer_->CreateTransferBuffer(ring_buffer_size_, -1); | 37 int32 id = command_buffer_->CreateTransferBuffer(ring_buffer_size_, -1); |
34 if (id < 0) { | 38 if (id < 0) { |
35 return false; | 39 return false; |
36 } | 40 } |
37 | 41 |
38 ring_buffer_ = command_buffer_->GetTransferBuffer(id); | 42 ring_buffer_ = command_buffer_->GetTransferBuffer(id); |
39 if (!ring_buffer_.ptr) | 43 if (!ring_buffer_.ptr) |
40 return false; | 44 return false; |
41 | 45 |
42 ring_buffer_id_ = id; | 46 ring_buffer_id_ = id; |
(...skipping 11 matching lines...) Expand all Loading... |
54 | 58 |
55 const int32 kJumpEntries = | 59 const int32 kJumpEntries = |
56 sizeof(cmd::Jump) / sizeof(*entries_); // NOLINT | 60 sizeof(cmd::Jump) / sizeof(*entries_); // NOLINT |
57 | 61 |
58 total_entry_count_ = num_ring_buffer_entries; | 62 total_entry_count_ = num_ring_buffer_entries; |
59 usable_entry_count_ = total_entry_count_ - kJumpEntries; | 63 usable_entry_count_ = total_entry_count_ - kJumpEntries; |
60 put_ = state.put_offset; | 64 put_ = state.put_offset; |
61 return true; | 65 return true; |
62 } | 66 } |
63 | 67 |
| 68 void CommandBufferHelper::FreeRingBuffer() { |
| 69 GPU_CHECK_EQ(put_, get_offset()); |
| 70 if (HaveRingBuffer()) { |
| 71 command_buffer_->DestroyTransferBuffer(ring_buffer_id_); |
| 72 ring_buffer_id_ = -1; |
| 73 } |
| 74 } |
| 75 |
64 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) { | 76 bool CommandBufferHelper::Initialize(int32 ring_buffer_size) { |
65 ring_buffer_size_ = ring_buffer_size; | 77 ring_buffer_size_ = ring_buffer_size; |
66 return AllocateRingBuffer(); | 78 return AllocateRingBuffer(); |
67 } | 79 } |
68 | 80 |
69 CommandBufferHelper::~CommandBufferHelper() { | 81 CommandBufferHelper::~CommandBufferHelper() { |
70 } | 82 } |
71 | 83 |
72 bool CommandBufferHelper::FlushSync() { | 84 bool CommandBufferHelper::FlushSync() { |
| 85 GPU_DCHECK(HaveRingBuffer()); |
73 last_flush_time_ = clock(); | 86 last_flush_time_ = clock(); |
74 last_put_sent_ = put_; | 87 last_put_sent_ = put_; |
75 CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset()); | 88 CommandBuffer::State state = command_buffer_->FlushSync(put_, get_offset()); |
76 return state.error == error::kNoError; | 89 return state.error == error::kNoError; |
77 } | 90 } |
78 | 91 |
79 void CommandBufferHelper::Flush() { | 92 void CommandBufferHelper::Flush() { |
| 93 GPU_DCHECK(HaveRingBuffer()); |
80 last_flush_time_ = clock(); | 94 last_flush_time_ = clock(); |
81 last_put_sent_ = put_; | 95 last_put_sent_ = put_; |
82 command_buffer_->Flush(put_); | 96 command_buffer_->Flush(put_); |
83 } | 97 } |
84 | 98 |
85 // Calls Flush() and then waits until the buffer is empty. Break early if the | 99 // Calls Flush() and then waits until the buffer is empty. Break early if the |
86 // error is set. | 100 // error is set. |
87 bool CommandBufferHelper::Finish() { | 101 bool CommandBufferHelper::Finish() { |
88 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); | 102 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish"); |
| 103 GPU_DCHECK(HaveRingBuffer()); |
89 do { | 104 do { |
90 // Do not loop forever if the flush fails, meaning the command buffer reader | 105 // Do not loop forever if the flush fails, meaning the command buffer reader |
91 // has shutdown. | 106 // has shutdown. |
92 if (!FlushSync()) | 107 if (!FlushSync()) |
93 return false; | 108 return false; |
94 } while (put_ != get_offset()); | 109 } while (put_ != get_offset()); |
95 | 110 |
96 return true; | 111 return true; |
97 } | 112 } |
98 | 113 |
99 // Inserts a new token into the command stream. It uses an increasing value | 114 // Inserts a new token into the command stream. It uses an increasing value |
100 // scheme so that we don't lose tokens (a token has passed if the current token | 115 // scheme so that we don't lose tokens (a token has passed if the current token |
101 // value is higher than that token). Calls Finish() if the token value wraps, | 116 // value is higher than that token). Calls Finish() if the token value wraps, |
102 // which will be rare. | 117 // which will be rare. |
103 int32 CommandBufferHelper::InsertToken() { | 118 int32 CommandBufferHelper::InsertToken() { |
| 119 AllocateRingBuffer(); |
| 120 GPU_DCHECK(HaveRingBuffer()); |
104 // Increment token as 31-bit integer. Negative values are used to signal an | 121 // Increment token as 31-bit integer. Negative values are used to signal an |
105 // error. | 122 // error. |
106 token_ = (token_ + 1) & 0x7FFFFFFF; | 123 token_ = (token_ + 1) & 0x7FFFFFFF; |
107 cmd::SetToken& cmd = GetCmdSpace<cmd::SetToken>(); | 124 cmd::SetToken& cmd = GetCmdSpace<cmd::SetToken>(); |
108 cmd.Init(token_); | 125 cmd.Init(token_); |
109 if (token_ == 0) { | 126 if (token_ == 0) { |
110 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); | 127 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)"); |
111 // we wrapped | 128 // we wrapped |
112 Finish(); | 129 Finish(); |
113 GPU_DCHECK_EQ(token_, last_token_read()); | 130 GPU_DCHECK_EQ(token_, last_token_read()); |
114 } | 131 } |
115 return token_; | 132 return token_; |
116 } | 133 } |
117 | 134 |
118 // Waits until the current token value is greater or equal to the value passed | 135 // Waits until the current token value is greater or equal to the value passed |
119 // in argument. | 136 // in argument. |
120 void CommandBufferHelper::WaitForToken(int32 token) { | 137 void CommandBufferHelper::WaitForToken(int32 token) { |
| 138 GPU_DCHECK(HaveRingBuffer()); |
121 TRACE_EVENT_IF_LONGER_THAN0(50, "gpu", "CommandBufferHelper::WaitForToken"); | 139 TRACE_EVENT_IF_LONGER_THAN0(50, "gpu", "CommandBufferHelper::WaitForToken"); |
122 // Return immediately if corresponding InsertToken failed. | 140 // Return immediately if corresponding InsertToken failed. |
123 if (token < 0) | 141 if (token < 0) |
124 return; | 142 return; |
125 if (token > token_) return; // we wrapped | 143 if (token > token_) return; // we wrapped |
126 while (last_token_read() < token) { | 144 while (last_token_read() < token) { |
127 if (get_offset() == put_) { | 145 if (get_offset() == put_) { |
128 GPU_LOG(FATAL) << "Empty command buffer while waiting on a token."; | 146 GPU_LOG(FATAL) << "Empty command buffer while waiting on a token."; |
129 return; | 147 return; |
130 } | 148 } |
131 // Do not loop forever if the flush fails, meaning the command buffer reader | 149 // Do not loop forever if the flush fails, meaning the command buffer reader |
132 // has shutdown. | 150 // has shutdown. |
133 if (!FlushSync()) | 151 if (!FlushSync()) |
134 return; | 152 return; |
135 } | 153 } |
136 } | 154 } |
137 | 155 |
138 // Waits for available entries, basically waiting until get >= put + count + 1. | 156 // Waits for available entries, basically waiting until get >= put + count + 1. |
139 // It actually waits for contiguous entries, so it may need to wrap the buffer | 157 // It actually waits for contiguous entries, so it may need to wrap the buffer |
140 // around, adding a jump. Thus this function may change the value of put_. The | 158 // around, adding a jump. Thus this function may change the value of put_. The |
141 // function will return early if an error occurs, in which case the available | 159 // function will return early if an error occurs, in which case the available |
142 // space may not be available. | 160 // space may not be available. |
143 void CommandBufferHelper::WaitForAvailableEntries(int32 count) { | 161 void CommandBufferHelper::WaitForAvailableEntries(int32 count) { |
| 162 AllocateRingBuffer(); |
| 163 GPU_DCHECK(HaveRingBuffer()); |
144 GPU_DCHECK(count < usable_entry_count_); | 164 GPU_DCHECK(count < usable_entry_count_); |
145 if (put_ + count > usable_entry_count_) { | 165 if (put_ + count > usable_entry_count_) { |
146 // There's not enough room between the current put and the end of the | 166 // There's not enough room between the current put and the end of the |
147 // buffer, so we need to wrap. We will add a jump back to the start, but we | 167 // buffer, so we need to wrap. We will add a jump back to the start, but we |
148 // need to make sure get wraps first, actually that get is 1 or more (since | 168 // need to make sure get wraps first, actually that get is 1 or more (since |
149 // put will wrap to 0 after we add the jump). | 169 // put will wrap to 0 after we add the jump). |
150 GPU_DCHECK_LE(1, put_); | 170 GPU_DCHECK_LE(1, put_); |
151 if (get_offset() > put_ || get_offset() == 0) { | 171 if (get_offset() > put_ || get_offset() == 0) { |
152 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); | 172 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries"); |
153 while (get_offset() > put_ || get_offset() == 0) { | 173 while (get_offset() > put_ || get_offset() == 0) { |
(...skipping 27 matching lines...) Expand all Loading... |
181 } else if (commands_issued_ % kCommandsPerFlushCheck == 0) { | 201 } else if (commands_issued_ % kCommandsPerFlushCheck == 0) { |
182 // Allow this command buffer to be pre-empted by another if a "reasonable" | 202 // Allow this command buffer to be pre-empted by another if a "reasonable" |
183 // amount of work has been done. | 203 // amount of work has been done. |
184 clock_t current_time = clock(); | 204 clock_t current_time = clock(); |
185 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC) | 205 if (current_time - last_flush_time_ > kFlushDelay * CLOCKS_PER_SEC) |
186 Flush(); | 206 Flush(); |
187 } | 207 } |
188 } | 208 } |
189 | 209 |
190 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { | 210 CommandBufferEntry* CommandBufferHelper::GetSpace(uint32 entries) { |
| 211 AllocateRingBuffer(); |
| 212 GPU_DCHECK(HaveRingBuffer()); |
191 ++commands_issued_; | 213 ++commands_issued_; |
192 WaitForAvailableEntries(entries); | 214 WaitForAvailableEntries(entries); |
193 CommandBufferEntry* space = &entries_[put_]; | 215 CommandBufferEntry* space = &entries_[put_]; |
194 put_ += entries; | 216 put_ += entries; |
195 GPU_DCHECK_LE(put_, usable_entry_count_); | 217 GPU_DCHECK_LE(put_, usable_entry_count_); |
196 if (put_ == usable_entry_count_) { | 218 if (put_ == usable_entry_count_) { |
197 cmd::Jump::Set(&entries_[put_], 0); | 219 cmd::Jump::Set(&entries_[put_], 0); |
198 put_ = 0; | 220 put_ = 0; |
199 } | 221 } |
200 return space; | 222 return space; |
201 } | 223 } |
202 | 224 |
203 error::Error CommandBufferHelper::GetError() { | 225 error::Error CommandBufferHelper::GetError() { |
204 CommandBuffer::State state = command_buffer_->GetState(); | 226 CommandBuffer::State state = command_buffer_->GetState(); |
205 return static_cast<error::Error>(state.error); | 227 return static_cast<error::Error>(state.error); |
206 } | 228 } |
207 | 229 |
208 } // namespace gpu | 230 } // namespace gpu |
OLD | NEW |