Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(335)

Side by Side Diff: content/common/gpu/texture_image_transport_surface.cc

Issue 11194042: Implement TextureImageTransportSurface using texture mailbox (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "content/common/gpu/texture_image_transport_surface.h" 5 #include "content/common/gpu/texture_image_transport_surface.h"
6 6
7 #include <string> 7 #include <string>
8 #include <vector> 8 #include <vector>
9 9
10 #include "base/command_line.h" 10 #include "base/command_line.h"
11 #include "content/common/gpu/gl_scoped_binders.h" 11 #include "content/common/gpu/gl_scoped_binders.h"
12 #include "content/common/gpu/gpu_channel.h" 12 #include "content/common/gpu/gpu_channel.h"
13 #include "content/common/gpu/gpu_channel_manager.h" 13 #include "content/common/gpu/gpu_channel_manager.h"
14 #include "content/common/gpu/gpu_messages.h" 14 #include "content/common/gpu/gpu_messages.h"
15 #include "content/common/gpu/sync_point_manager.h" 15 #include "content/common/gpu/sync_point_manager.h"
16 #include "content/public/common/content_switches.h" 16 #include "content/public/common/content_switches.h"
17 #include "gpu/command_buffer/service/context_group.h" 17 #include "gpu/command_buffer/service/context_group.h"
18 #include "gpu/command_buffer/service/gpu_scheduler.h" 18 #include "gpu/command_buffer/service/gpu_scheduler.h"
19 #include "gpu/command_buffer/service/texture_manager.h" 19 #include "gpu/command_buffer/service/texture_definition.h"
20 20
21 using gpu::gles2::ContextGroup; 21 using gpu::gles2::ContextGroup;
22 using gpu::gles2::MailboxManager;
23 using gpu::gles2::MailboxName;
24 using gpu::gles2::TextureDefinition;
22 using gpu::gles2::TextureManager; 25 using gpu::gles2::TextureManager;
23 typedef TextureManager::TextureInfo TextureInfo;
24 26
25 namespace content { 27 namespace content {
26 28
27 TextureImageTransportSurface::Texture::Texture() 29 TextureImageTransportSurface::Texture::Texture()
28 : client_id(0), 30 : service_id(0),
29 sent_to_client(false) { 31 identifier(0) {
30 } 32 }
31 33
32 TextureImageTransportSurface::Texture::~Texture() { 34 TextureImageTransportSurface::Texture::~Texture() {
33 } 35 }
34 36
35 TextureImageTransportSurface::TextureImageTransportSurface( 37 TextureImageTransportSurface::TextureImageTransportSurface(
36 GpuChannelManager* manager, 38 GpuChannelManager* manager,
37 GpuCommandBufferStub* stub, 39 GpuCommandBufferStub* stub,
38 const gfx::GLSurfaceHandle& handle) 40 const gfx::GLSurfaceHandle& handle)
39 : fbo_id_(0), 41 : fbo_id_(0),
40 front_(0),
41 stub_destroyed_(false), 42 stub_destroyed_(false),
42 backbuffer_suggested_allocation_(true), 43 backbuffer_suggested_allocation_(true),
43 frontbuffer_suggested_allocation_(true), 44 frontbuffer_suggested_allocation_(true),
44 frontbuffer_is_protected_(true),
45 protection_state_id_(0),
46 handle_(handle), 45 handle_(handle),
47 parent_stub_(NULL),
48 is_swap_buffers_pending_(false), 46 is_swap_buffers_pending_(false),
49 did_unschedule_(false), 47 did_unschedule_(false) {
50 did_flip_(false) {
51 helper_.reset(new ImageTransportHelper(this, 48 helper_.reset(new ImageTransportHelper(this,
52 manager, 49 manager,
53 stub, 50 stub,
54 gfx::kNullPluginWindow)); 51 gfx::kNullPluginWindow));
55 } 52 }
56 53
57 TextureImageTransportSurface::~TextureImageTransportSurface() { 54 TextureImageTransportSurface::~TextureImageTransportSurface() {
58 DCHECK(stub_destroyed_); 55 DCHECK(stub_destroyed_);
59 Destroy(); 56 Destroy();
60 } 57 }
61 58
62 bool TextureImageTransportSurface::Initialize() { 59 bool TextureImageTransportSurface::Initialize() {
60 mailbox_manager_ =
61 helper_->stub()->decoder()->GetContextGroup()->mailbox_manager();
62
63 backbuffer_.identifier = 1;
64
63 GpuChannelManager* manager = helper_->manager(); 65 GpuChannelManager* manager = helper_->manager();
64 GpuChannel* parent_channel = manager->LookupChannel(handle_.parent_client_id);
65 if (!parent_channel)
66 return false;
67
68 parent_stub_ = parent_channel->LookupCommandBuffer(handle_.parent_context_id);
69 if (!parent_stub_)
70 return false;
71
72 parent_stub_->AddDestructionObserver(this);
73 TextureManager* texture_manager =
74 parent_stub_->decoder()->GetContextGroup()->texture_manager();
75 DCHECK(texture_manager);
76
77 for (int i = 0; i < 2; ++i) {
78 Texture& texture = textures_[i];
79 texture.client_id = handle_.parent_texture_id[i];
80 texture.info = texture_manager->GetTextureInfo(texture.client_id);
81 if (!texture.info)
82 return false;
83
84 if (!texture.info->target())
85 texture_manager->SetInfoTarget(texture.info, GL_TEXTURE_2D);
86 texture_manager->SetParameter(
87 texture.info, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
88 texture_manager->SetParameter(
89 texture.info, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
90 texture_manager->SetParameter(
91 texture.info, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
92 texture_manager->SetParameter(
93 texture.info, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
94 }
95
96 surface_ = manager->GetDefaultOffscreenSurface(); 66 surface_ = manager->GetDefaultOffscreenSurface();
97 if (!surface_.get()) 67 if (!surface_.get())
98 return false; 68 return false;
99 69
100 if (!helper_->Initialize()) 70 if (!helper_->Initialize())
101 return false; 71 return false;
102 72
103 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 73 GpuChannel* parent_channel = manager->LookupChannel(handle_.parent_client_id);
104 if (command_line->HasSwitch(switches::kUIPrioritizeInGpuProcess)) 74 if (parent_channel) {
105 helper_->SetPreemptByCounter(parent_channel->MessagesPendingCount()); 75 const CommandLine* command_line = CommandLine::ForCurrentProcess();
76 if (command_line->HasSwitch(switches::kUIPrioritizeInGpuProcess))
77 helper_->SetPreemptByCounter(parent_channel->MessagesPendingCount());
78 }
106 79
107 return true; 80 return true;
108 } 81 }
109 82
110 void TextureImageTransportSurface::Destroy() { 83 void TextureImageTransportSurface::Destroy() {
111 if (parent_stub_) { 84 if (!stub_destroyed_) {
112 parent_stub_->decoder()->MakeCurrent(); 85 GpuHostMsg_AcceleratedSurfaceRelease_Params params;
113 ReleaseParentStub(); 86 helper_->SendAcceleratedSurfaceRelease(params);
114 } 87 }
115 88
jonathan.backer 2012/12/04 19:15:13 Do we need a |ReleaseBackTexture()| here or in OnW
no sievers 2012/12/05 22:02:34 Good catch. I moved it (and the SendAcceleratedSur
116 if (surface_.get()) 89 if (surface_.get())
117 surface_ = NULL; 90 surface_ = NULL;
118 91
119 helper_->Destroy(); 92 helper_->Destroy();
120 } 93 }
121 94
122 bool TextureImageTransportSurface::DeferDraws() { 95 bool TextureImageTransportSurface::DeferDraws() {
123 // The command buffer hit a draw/clear command that could clobber the 96 // The command buffer hit a draw/clear command that could clobber the
124 // texture in use by the UI compositor. If a Swap is pending, abort 97 // texture in use by the UI compositor. If a Swap is pending, abort
125 // processing of the command by returning true and unschedule until the Swap 98 // processing of the command by returning true and unschedule until the Swap
(...skipping 16 matching lines...) Expand all
142 } 115 }
143 116
144 bool TextureImageTransportSurface::OnMakeCurrent(gfx::GLContext* context) { 117 bool TextureImageTransportSurface::OnMakeCurrent(gfx::GLContext* context) {
145 if (stub_destroyed_) { 118 if (stub_destroyed_) {
146 // Early-exit so that we don't recreate the fbo. We still want to return 119 // Early-exit so that we don't recreate the fbo. We still want to return
147 // true, so that the context is made current and the GLES2DecoderImpl can 120 // true, so that the context is made current and the GLES2DecoderImpl can
148 // release its own resources. 121 // release its own resources.
149 return true; 122 return true;
150 } 123 }
151 124
125 if (!context_.get()) {
126 DCHECK(helper_->stub());
127 context_ = helper_->stub()->decoder()->GetGLContext();
128 }
129
152 if (!fbo_id_) { 130 if (!fbo_id_) {
153 glGenFramebuffersEXT(1, &fbo_id_); 131 glGenFramebuffersEXT(1, &fbo_id_);
154 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_); 132 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_);
155 CreateBackTexture(gfx::Size(1, 1)); 133 current_size_ = gfx::Size(1, 1);
134 helper_->stub()->AddDestructionObserver(this);
135 }
136
137 // We could be receiving non-deferred GL commands, that is anything that does
138 // not need a framebuffer.
139 if (!backbuffer_.service_id && !is_swap_buffers_pending_ &&
140 backbuffer_suggested_allocation_) {
141 CreateBackTexture();
156 142
157 #ifndef NDEBUG 143 #ifndef NDEBUG
158 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); 144 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
159 if (status != GL_FRAMEBUFFER_COMPLETE) { 145 if (status != GL_FRAMEBUFFER_COMPLETE) {
160 DLOG(ERROR) << "Framebuffer incomplete."; 146 DLOG(ERROR) << "Framebuffer incomplete.";
161 glDeleteFramebuffersEXT(1, &fbo_id_); 147 glDeleteFramebuffersEXT(1, &fbo_id_);
162 fbo_id_ = 0; 148 fbo_id_ = 0;
163 return false; 149 return false;
164 } 150 }
165 #endif 151 #endif
166 DCHECK(helper_->stub());
167 helper_->stub()->AddDestructionObserver(this);
168 } 152 }
169
170 return true; 153 return true;
171 } 154 }
172 155
173 unsigned int TextureImageTransportSurface::GetBackingFrameBufferObject() { 156 unsigned int TextureImageTransportSurface::GetBackingFrameBufferObject() {
174 return fbo_id_; 157 return fbo_id_;
175 } 158 }
176 159
177 void TextureImageTransportSurface::SetBackbufferAllocation(bool allocation) { 160 void TextureImageTransportSurface::SetBackbufferAllocation(bool allocation) {
178 DCHECK(!is_swap_buffers_pending_); 161 DCHECK(!is_swap_buffers_pending_);
179 if (backbuffer_suggested_allocation_ == allocation) 162 if (backbuffer_suggested_allocation_ == allocation)
180 return; 163 return;
181 backbuffer_suggested_allocation_ = allocation; 164 backbuffer_suggested_allocation_ = allocation;
182 165
183 if (!helper_->MakeCurrent())
184 return;
185
186 if (backbuffer_suggested_allocation_) { 166 if (backbuffer_suggested_allocation_) {
187 DCHECK(!textures_[back()].info->service_id() || 167 DCHECK(!backbuffer_.service_id);
188 !textures_[back()].sent_to_client); 168 CreateBackTexture();
189 CreateBackTexture(textures_[back()].size);
190 } else { 169 } else {
191 ReleaseTexture(back()); 170 ReleaseBackTexture();
192 } 171 }
193 } 172 }
194 173
195 void TextureImageTransportSurface::SetFrontbufferAllocation(bool allocation) { 174 void TextureImageTransportSurface::SetFrontbufferAllocation(bool allocation) {
196 if (frontbuffer_suggested_allocation_ == allocation) 175 if (frontbuffer_suggested_allocation_ == allocation)
197 return; 176 return;
198 frontbuffer_suggested_allocation_ = allocation; 177 frontbuffer_suggested_allocation_ = allocation;
199 AdjustFrontBufferAllocation(); 178 AdjustFrontBufferAllocation();
200 } 179 }
201 180
202 void TextureImageTransportSurface::AdjustFrontBufferAllocation() { 181 void TextureImageTransportSurface::AdjustFrontBufferAllocation() {
jonathan.backer 2012/12/04 19:15:13 Can we nuke this separate method? AFAICT, it's onl
no sievers 2012/12/05 22:02:34 Done.
203 if (!helper_->MakeCurrent()) 182 if (!frontbuffer_suggested_allocation_) {
204 return; 183 GpuHostMsg_AcceleratedSurfaceRelease_Params params;
205 184 helper_->SendAcceleratedSurfaceRelease(params);
206 if (!frontbuffer_suggested_allocation_ && !frontbuffer_is_protected_ &&
207 textures_[front()].info->service_id()) {
208 ReleaseTexture(front());
209 if (textures_[front()].sent_to_client) {
210 GpuHostMsg_AcceleratedSurfaceRelease_Params params;
211 params.identifier = textures_[front()].client_id;
212 helper_->SendAcceleratedSurfaceRelease(params);
213 textures_[front()].sent_to_client = false;
214 }
215 } 185 }
216 } 186 }
217 187
218 void* TextureImageTransportSurface::GetShareHandle() { 188 void* TextureImageTransportSurface::GetShareHandle() {
219 return GetHandle(); 189 return GetHandle();
220 } 190 }
221 191
222 void* TextureImageTransportSurface::GetDisplay() { 192 void* TextureImageTransportSurface::GetDisplay() {
223 return surface_.get() ? surface_->GetDisplay() : NULL; 193 return surface_.get() ? surface_->GetDisplay() : NULL;
224 } 194 }
225 195
226 void* TextureImageTransportSurface::GetConfig() { 196 void* TextureImageTransportSurface::GetConfig() {
227 return surface_.get() ? surface_->GetConfig() : NULL; 197 return surface_.get() ? surface_->GetConfig() : NULL;
228 } 198 }
229 199
230 void TextureImageTransportSurface::OnResize(gfx::Size size) { 200 void TextureImageTransportSurface::OnResize(gfx::Size size) {
231 CreateBackTexture(size); 201 current_size_ = size;
202 CreateBackTexture();
232 } 203 }
233 204
234 void TextureImageTransportSurface::OnWillDestroyStub( 205 void TextureImageTransportSurface::OnWillDestroyStub(
235 GpuCommandBufferStub* stub) { 206 GpuCommandBufferStub* stub) {
236 if (stub == parent_stub_) { 207 DCHECK(stub == helper_->stub());
237 ReleaseParentStub(); 208 stub->RemoveDestructionObserver(this);
238 helper_->SetPreemptByCounter(NULL);
239 } else {
240 DCHECK(stub == helper_->stub());
241 stub->RemoveDestructionObserver(this);
242 209
243 // We are losing the stub owning us, this is our last chance to clean up the 210 // We are losing the stub owning us, this is our last chance to clean up the
244 // resources we allocated in the stub's context. 211 // resources we allocated in the stub's context.
245 if (fbo_id_) { 212 if (fbo_id_) {
246 glDeleteFramebuffersEXT(1, &fbo_id_); 213 glDeleteFramebuffersEXT(1, &fbo_id_);
247 CHECK_GL_ERROR(); 214 CHECK_GL_ERROR();
248 fbo_id_ = 0; 215 fbo_id_ = 0;
249 } 216 }
250 217
251 stub_destroyed_ = true; 218 stub_destroyed_ = true;
252 }
253 } 219 }
254 220
255 bool TextureImageTransportSurface::SwapBuffers() { 221 bool TextureImageTransportSurface::SwapBuffers() {
256 DCHECK(backbuffer_suggested_allocation_); 222 DCHECK(backbuffer_suggested_allocation_);
257 if (!frontbuffer_suggested_allocation_ || !frontbuffer_is_protected_) 223 if (!frontbuffer_suggested_allocation_)
258 return true; 224 return true;
259 if (!parent_stub_) {
260 LOG(ERROR) << "SwapBuffers failed because no parent stub.";
261 return false;
262 }
263 225
264 glFlush(); 226 glFlush();
265 front_ = back(); 227 const uint64 identifier = backbuffer_.identifier;
jonathan.backer 2012/12/04 19:15:13 nit: nix identifier
no sievers 2012/12/05 22:02:34 Done.
266 previous_damage_rect_ = gfx::Rect(textures_[front()].size); 228 ProduceTexture(backbuffer_);
267 229
268 DCHECK(textures_[front()].client_id != 0); 230 // Do not allow destruction while we are still waiting for a swap ACK,
231 // so we do not leak a texture in the mailbox.
232 AddRef();
269 233
270 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params; 234 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
271 params.surface_handle = textures_[front()].client_id; 235 params.surface_handle = identifier;
272 params.size = textures_[front()].size; 236 params.size = current_size_;
jonathan.backer 2012/12/04 19:15:13 |backbuffer_.size| instead (they should be equal)
no sievers 2012/12/05 22:02:34 Done.
273 params.protection_state_id = protection_state_id_;
274 params.skip_ack = false;
275 helper_->SendAcceleratedSurfaceBuffersSwapped(params); 237 helper_->SendAcceleratedSurfaceBuffersSwapped(params);
276 238
277 DCHECK(!is_swap_buffers_pending_); 239 DCHECK(!is_swap_buffers_pending_);
278 is_swap_buffers_pending_ = true; 240 is_swap_buffers_pending_ = true;
279 return true; 241 return true;
280 } 242 }
281 243
282 bool TextureImageTransportSurface::PostSubBuffer( 244 bool TextureImageTransportSurface::PostSubBuffer(
283 int x, int y, int width, int height) { 245 int x, int y, int width, int height) {
284 DCHECK(backbuffer_suggested_allocation_); 246 DCHECK(backbuffer_suggested_allocation_);
285 DCHECK(textures_[back()].info->service_id()); 247 if (!frontbuffer_suggested_allocation_)
286 if (!frontbuffer_suggested_allocation_ || !frontbuffer_is_protected_)
287 return true; 248 return true;
288 // If we are recreating the frontbuffer with this swap, make sure we are
289 // drawing a full frame.
290 DCHECK(textures_[front()].info->service_id() ||
291 (!x && !y && gfx::Size(width, height) == textures_[back()].size));
292 if (!parent_stub_) {
293 LOG(ERROR) << "PostSubBuffer failed because no parent stub.";
294 return false;
295 }
296
297 const gfx::Rect new_damage_rect(x, y, width, height); 249 const gfx::Rect new_damage_rect(x, y, width, height);
250 DCHECK(gfx::Rect(gfx::Point(), current_size_).Contains(new_damage_rect));
298 251
299 // An empty damage rect is a successful no-op. 252 // An empty damage rect is a successful no-op.
300 if (new_damage_rect.IsEmpty()) 253 if (new_damage_rect.IsEmpty())
301 return true; 254 return true;
302 255
303 int back_texture_service_id = textures_[back()].info->service_id(); 256 glFlush();
304 int front_texture_service_id = textures_[front()].info->service_id(); 257 const uint64 identifier = backbuffer_.identifier;
jonathan.backer 2012/12/04 19:15:13 nit: nix identifier
no sievers 2012/12/05 22:02:34 Done.
258 ProduceTexture(backbuffer_);
305 259
306 gfx::Size expected_size = textures_[back()].size; 260 // Do not allow destruction while we are still waiting for a swap ACK,
307 bool surfaces_same_size = textures_[front()].size == expected_size; 261 // so we do not leak a texture in the mailbox.
308 262 AddRef();
309 if (surfaces_same_size) {
310 std::vector<gfx::Rect> regions_to_copy;
311 GetRegionsToCopy(previous_damage_rect_, new_damage_rect, &regions_to_copy);
312
313 ScopedFrameBufferBinder fbo_binder(fbo_id_);
314 glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
315 GL_COLOR_ATTACHMENT0,
316 GL_TEXTURE_2D,
317 front_texture_service_id,
318 0);
319 ScopedTextureBinder texture_binder(back_texture_service_id);
320
321 for (size_t i = 0; i < regions_to_copy.size(); ++i) {
322 const gfx::Rect& region_to_copy = regions_to_copy[i];
323 if (!region_to_copy.IsEmpty()) {
324 glCopyTexSubImage2D(GL_TEXTURE_2D, 0, region_to_copy.x(),
325 region_to_copy.y(), region_to_copy.x(), region_to_copy.y(),
326 region_to_copy.width(), region_to_copy.height());
327 }
328 }
329 } else if (!surfaces_same_size && did_flip_) {
330 DCHECK(new_damage_rect == gfx::Rect(expected_size));
331 }
332
333 glFlush();
334 front_ = back();
335 previous_damage_rect_ = new_damage_rect;
336
337 DCHECK(textures_[front()].client_id);
338 263
339 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params; 264 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params;
340 params.surface_handle = textures_[front()].client_id; 265 params.surface_handle = identifier;
341 params.surface_size = textures_[front()].size; 266 params.surface_size = current_size_;
jonathan.backer 2012/12/04 19:15:13 |backbuffer_.size| instead?
no sievers 2012/12/05 22:02:34 Done.
342 params.x = x; 267 params.x = x;
343 params.y = y; 268 params.y = y;
344 params.width = width; 269 params.width = width;
345 params.height = height; 270 params.height = height;
346 params.protection_state_id = protection_state_id_;
347 helper_->SendAcceleratedSurfacePostSubBuffer(params); 271 helper_->SendAcceleratedSurfacePostSubBuffer(params);
348 272
349 DCHECK(!is_swap_buffers_pending_); 273 DCHECK(!is_swap_buffers_pending_);
350 is_swap_buffers_pending_ = true; 274 is_swap_buffers_pending_ = true;
351 return true; 275 return true;
352 } 276 }
353 277
354 std::string TextureImageTransportSurface::GetExtensions() { 278 std::string TextureImageTransportSurface::GetExtensions() {
355 std::string extensions = gfx::GLSurface::GetExtensions(); 279 std::string extensions = gfx::GLSurface::GetExtensions();
356 extensions += extensions.empty() ? "" : " "; 280 extensions += extensions.empty() ? "" : " ";
357 extensions += "GL_CHROMIUM_front_buffer_cached "; 281 extensions += "GL_CHROMIUM_front_buffer_cached ";
358 extensions += "GL_CHROMIUM_post_sub_buffer"; 282 extensions += "GL_CHROMIUM_post_sub_buffer";
359 return extensions; 283 return extensions;
360 } 284 }
361 285
362 gfx::Size TextureImageTransportSurface::GetSize() { 286 gfx::Size TextureImageTransportSurface::GetSize() {
363 gfx::Size size = textures_[back()].size; 287 gfx::Size size = current_size_;
364 288
365 // OSMesa expects a non-zero size. 289 // OSMesa expects a non-zero size.
366 return gfx::Size(size.width() == 0 ? 1 : size.width(), 290 return gfx::Size(size.width() == 0 ? 1 : size.width(),
367 size.height() == 0 ? 1 : size.height()); 291 size.height() == 0 ? 1 : size.height());
368 } 292 }
369 293
370 void* TextureImageTransportSurface::GetHandle() { 294 void* TextureImageTransportSurface::GetHandle() {
371 return surface_.get() ? surface_->GetHandle() : NULL; 295 return surface_.get() ? surface_->GetHandle() : NULL;
372 } 296 }
373 297
374 unsigned TextureImageTransportSurface::GetFormat() { 298 unsigned TextureImageTransportSurface::GetFormat() {
375 return surface_.get() ? surface_->GetFormat() : 0; 299 return surface_.get() ? surface_->GetFormat() : 0;
376 } 300 }
377 301
378 void TextureImageTransportSurface::OnSetFrontSurfaceIsProtected( 302 void TextureImageTransportSurface::OnBufferPresented(uint64 surface_handle,
379 bool is_protected, uint32 protection_state_id) {
380 protection_state_id_ = protection_state_id;
381 if (frontbuffer_is_protected_ == is_protected)
382 return;
383 frontbuffer_is_protected_ = is_protected;
384 AdjustFrontBufferAllocation();
385
386 // If surface is set to protected, and we haven't actually released it yet,
387 // we can set the ui surface handle now just by sending a swap message.
388 if (is_protected && textures_[front()].info->service_id() &&
389 textures_[front()].sent_to_client) {
390 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
391 params.surface_handle = textures_[front()].client_id;
392 params.size = textures_[front()].size;
393 params.protection_state_id = protection_state_id_;
394 params.skip_ack = true;
395 helper_->SendAcceleratedSurfaceBuffersSwapped(params);
396 }
397 }
398
399 void TextureImageTransportSurface::OnBufferPresented(bool presented,
400 uint32 sync_point) { 303 uint32 sync_point) {
401 if (sync_point == 0) { 304 if (sync_point == 0) {
402 BufferPresentedImpl(presented); 305 BufferPresentedImpl(surface_handle);
403 } else { 306 } else {
404 helper_->manager()->sync_point_manager()->AddSyncPointCallback( 307 helper_->manager()->sync_point_manager()->AddSyncPointCallback(
405 sync_point, 308 sync_point,
406 base::Bind(&TextureImageTransportSurface::BufferPresentedImpl, 309 base::Bind(&TextureImageTransportSurface::BufferPresentedImpl,
407 this->AsWeakPtr(), 310 this,
408 presented)); 311 surface_handle));
409 } 312 }
313
314 // Careful, we might get deleted now if we were only waiting for
315 // a final swap ACK.
316 Release();
410 } 317 }
411 318
412 void TextureImageTransportSurface::BufferPresentedImpl(bool presented) { 319 void TextureImageTransportSurface::BufferPresentedImpl(uint64 surface_handle) {
320 DCHECK(!backbuffer_.service_id);
321 if (surface_handle) {
322 DCHECK(surface_handle == 1 || surface_handle == 2);
323 backbuffer_.identifier = surface_handle;
324 ConsumeTexture(backbuffer_);
325 } else {
326 // We didn't get back a texture, so allocate 'the other' buffer.
327 backbuffer_.identifier = (backbuffer_.identifier == 1) ? 2 : 1;
328 mailbox_name(backbuffer_.identifier) = MailboxName();
329 }
330
331 if (stub_destroyed_ && backbuffer_.service_id) {
332 // TODO(sievers): Remove this after changes to the mailbox to take ownership
333 // of the service ids.
334 DCHECK(context_.get() && surface_.get());
335 if (context_->MakeCurrent(surface_.get()))
336 glDeleteTextures(1, &backbuffer_.service_id);
337
338 return;
339 }
340
413 DCHECK(is_swap_buffers_pending_); 341 DCHECK(is_swap_buffers_pending_);
414 is_swap_buffers_pending_ = false; 342 is_swap_buffers_pending_ = false;
415 343
416 if (presented) { 344 // We should not have allowed the backbuffer to be discarded while the ack
417 // If we had not flipped, the two frame damage tracking is inconsistent. 345 // was pending.
418 // So conservatively take the whole frame. 346 DCHECK(backbuffer_suggested_allocation_);
419 if (!did_flip_)
420 previous_damage_rect_ = gfx::Rect(textures_[front()].size);
421 } else {
422 front_ = back();
423 previous_damage_rect_ = gfx::Rect(0, 0, 0, 0);
424 }
425
426 did_flip_ = presented;
427 347
428 // We're relying on the fact that the parent context is 348 // We're relying on the fact that the parent context is
429 // finished with it's context when it inserts the sync point that 349 // finished with it's context when it inserts the sync point that
430 // triggers this callback. 350 // triggers this callback.
431 if (helper_->MakeCurrent()) { 351 if (helper_->MakeCurrent()) {
432 if (textures_[front()].size != textures_[back()].size || 352 if (backbuffer_.size != current_size_ || !backbuffer_.service_id)
jonathan.backer 2012/12/04 19:15:13 AFAICT, it's only in this method and OnResize that
no sievers 2012/12/05 22:02:34 But we used to have the frontbuffer size also whic
433 !textures_[back()].info->service_id() || 353 CreateBackTexture();
434 !textures_[back()].sent_to_client) { 354 else
435 // We may get an ACK from a stale swap just to reschedule. In that case,
436 // we may not have a backbuffer suggestion and should not recreate one.
437 if (backbuffer_suggested_allocation_)
438 CreateBackTexture(textures_[front()].size);
439 } else {
440 AttachBackTextureToFBO(); 355 AttachBackTextureToFBO();
441 }
442 } 356 }
443 357
444 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context 358 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context
445 // logic. 359 // logic.
446 if (did_unschedule_) { 360 if (did_unschedule_) {
447 did_unschedule_ = false; 361 did_unschedule_ = false;
448 helper_->SetScheduled(true); 362 helper_->SetScheduled(true);
449 } 363 }
450 } 364 }
451 365
452 void TextureImageTransportSurface::OnResizeViewACK() { 366 void TextureImageTransportSurface::OnResizeViewACK() {
453 NOTREACHED(); 367 NOTREACHED();
454 } 368 }
455 369
456 void TextureImageTransportSurface::ReleaseTexture(int id) { 370 void TextureImageTransportSurface::ReleaseBackTexture() {
jonathan.backer 2012/12/04 19:15:13 Modulo the comment I made about leaking before, cu
457 if (!parent_stub_) 371 if (!backbuffer_.service_id)
458 return; 372 return;
459 Texture& texture = textures_[id];
460 TextureInfo* info = texture.info;
461 DCHECK(info);
462 373
463 GLuint service_id = info->service_id(); 374 glDeleteTextures(1, &backbuffer_.service_id);
464 if (!service_id) 375 backbuffer_.service_id = 0;
465 return; 376 mailbox_name(backbuffer_.identifier) = MailboxName();
466 info->SetServiceId(0);
467
468 {
469 ScopedFrameBufferBinder fbo_binder(fbo_id_);
470 glDeleteTextures(1, &service_id);
471 }
472 glFlush(); 377 glFlush();
473 CHECK_GL_ERROR(); 378 CHECK_GL_ERROR();
474 } 379 }
475 380
476 void TextureImageTransportSurface::CreateBackTexture(const gfx::Size& size) { 381 void TextureImageTransportSurface::CreateBackTexture() {
477 if (!parent_stub_) 382 // If |is_swap_buffers_pending| we are waiting for our backbuffer
478 return; 383 // in the mailbox, so we shouldn't be reallocating it now.
479 Texture& texture = textures_[back()]; 384 DCHECK(!is_swap_buffers_pending_);
480 TextureInfo* info = texture.info;
481 DCHECK(info);
482 385
483 GLuint service_id = info->service_id(); 386 if (backbuffer_.service_id && backbuffer_.size == current_size_)
484
485 if (service_id && texture.size == size && texture.sent_to_client)
486 return; 387 return;
487 388
488 if (!service_id) { 389 if (!backbuffer_.service_id) {
489 glGenTextures(1, &service_id); 390 MailboxName new_mailbox_name;
490 info->SetServiceId(service_id); 391 MailboxName& name = mailbox_name(backbuffer_.identifier);
392 // This slot should be uninitialized.
393 DCHECK(!memcmp(&name, &new_mailbox_name, sizeof(MailboxName)));
394 mailbox_manager_->GenerateMailboxName(&new_mailbox_name);
395 name = new_mailbox_name;
396 glGenTextures(1, &backbuffer_.service_id);
491 } 397 }
492 398
493 if (size != texture.size) { 399 backbuffer_.size = current_size_;
494 texture.size = size;
495 TextureManager* texture_manager =
496 parent_stub_->decoder()->GetContextGroup()->texture_manager();
497 texture_manager->SetLevelInfo(
498 info,
499 GL_TEXTURE_2D,
500 0,
501 GL_RGBA,
502 size.width(),
503 size.height(),
504 1,
505 0,
506 GL_RGBA,
507 GL_UNSIGNED_BYTE,
508 true);
509 }
510 400
511 { 401 {
512 ScopedTextureBinder texture_binder(service_id); 402 ScopedTextureBinder texture_binder(backbuffer_.service_id);
513 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 403 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
514 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 404 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
515 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 405 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
516 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 406 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
517 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 407 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
518 size.width(), size.height(), 0, 408 current_size_.width(), current_size_.height(), 0,
519 GL_RGBA, GL_UNSIGNED_BYTE, NULL); 409 GL_RGBA, GL_UNSIGNED_BYTE, NULL);
520 CHECK_GL_ERROR(); 410 CHECK_GL_ERROR();
521 } 411 }
522 412
523 AttachBackTextureToFBO(); 413 AttachBackTextureToFBO();
524 414
415 const MailboxName& name = mailbox_name(backbuffer_.identifier);
416
525 GpuHostMsg_AcceleratedSurfaceNew_Params params; 417 GpuHostMsg_AcceleratedSurfaceNew_Params params;
526 params.width = size.width(); 418 params.width = current_size_.width();
527 params.height = size.height(); 419 params.height = current_size_.height();
528 params.surface_handle = texture.client_id; 420 params.surface_handle = backbuffer_.identifier;
421 params.mailbox_name.append(
422 reinterpret_cast<const char*>(&name), sizeof(name));
529 helper_->SendAcceleratedSurfaceNew(params); 423 helper_->SendAcceleratedSurfaceNew(params);
530 texture.sent_to_client = true;
531 } 424 }
532 425
533 void TextureImageTransportSurface::AttachBackTextureToFBO() { 426 void TextureImageTransportSurface::AttachBackTextureToFBO() {
534 if (!parent_stub_) 427 DCHECK(backbuffer_.service_id);
535 return;
536 TextureInfo* info = textures_[back()].info;
537 DCHECK(info);
538
539 ScopedFrameBufferBinder fbo_binder(fbo_id_); 428 ScopedFrameBufferBinder fbo_binder(fbo_id_);
540 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, 429 glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
541 GL_COLOR_ATTACHMENT0, 430 GL_COLOR_ATTACHMENT0,
542 GL_TEXTURE_2D, 431 GL_TEXTURE_2D,
543 info->service_id(), 432 backbuffer_.service_id,
544 0); 433 0);
545 glFlush(); 434 glFlush();
546 CHECK_GL_ERROR(); 435 CHECK_GL_ERROR();
547 436
548 #ifndef NDEBUG 437 #ifndef NDEBUG
549 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); 438 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
550 if (status != GL_FRAMEBUFFER_COMPLETE) { 439 if (status != GL_FRAMEBUFFER_COMPLETE) {
551 DLOG(ERROR) << "Framebuffer incomplete."; 440 DLOG(FATAL) << "Framebuffer incomplete: " << status;
552 } 441 }
553 #endif 442 #endif
554 } 443 }
555 444
556 void TextureImageTransportSurface::ReleaseParentStub() { 445 void TextureImageTransportSurface::ConsumeTexture(Texture& texture) {
557 DCHECK(parent_stub_); 446 DCHECK(!texture.service_id);
558 parent_stub_->RemoveDestructionObserver(this); 447 DCHECK(texture.identifier == 1 || texture.identifier == 2);
559 for (int i = 0; i < 2; ++i) { 448
560 Texture& texture = textures_[i]; 449 scoped_ptr<TextureDefinition> definition(mailbox_manager_->ConsumeTexture(
561 texture.info = NULL; 450 GL_TEXTURE_2D, mailbox_name(texture.identifier)));
562 if (!texture.sent_to_client) 451 if (definition.get()) {
563 continue; 452 texture.service_id = definition->ReleaseServiceId();
564 GpuHostMsg_AcceleratedSurfaceRelease_Params params; 453 texture.size = gfx::Size(definition->level_infos()[0][0].width,
565 params.identifier = texture.client_id; 454 definition->level_infos()[0][0].height);
566 helper_->SendAcceleratedSurfaceRelease(params);
567 } 455 }
568 parent_stub_ = NULL; 456 }
457
458 void TextureImageTransportSurface::ProduceTexture(Texture& texture) {
459 DCHECK(texture.service_id);
460 DCHECK(texture.identifier == 1 || texture.identifier == 2);
461 TextureManager* texture_manager =
462 helper_->stub()->decoder()->GetContextGroup()->texture_manager();
463 DCHECK(texture.size.width() > 0 && texture.size.height() > 0);
464 TextureDefinition::LevelInfo info(
465 GL_TEXTURE_2D, GL_RGBA, texture.size.width(), texture.size.height(), 1,
466 0, GL_RGBA, GL_UNSIGNED_BYTE, true);
467
468 TextureDefinition::LevelInfos level_infos;
469 level_infos.resize(1);
470 level_infos[0].resize(texture_manager->MaxLevelsForTarget(GL_TEXTURE_2D));
471 level_infos[0][0] = info;
472 scoped_ptr<TextureDefinition> definition(new TextureDefinition(
473 GL_TEXTURE_2D,
474 texture.service_id,
475 true,
476 level_infos));
477 // Pass NULL as |owner| here to avoid errors from glConsumeTextureCHROMIUM()
478 // when the renderer context group goes away before the RWHV handles a pending
479 // ACK. We avoid leaking a texture in the mailbox by waiting for the final ACK
480 // at which point we consume the correct texture back.
481 mailbox_manager_->ProduceTexture(
482 GL_TEXTURE_2D,
483 mailbox_name(texture.identifier),
484 definition.release(),
485 NULL);
486 texture.service_id = 0;
569 } 487 }
570 488
571 } // namespace content 489 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698