Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(489)

Side by Side Diff: content/common/gpu/texture_image_transport_surface.cc

Issue 11194042: Implement TextureImageTransportSurface using texture mailbox (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: rebased, fixed post sub buffer, use multiple mailbox names Created 8 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
apatrick_chromium 2012/11/09 19:19:04 Deleted this line?
no sievers 2012/11/09 21:53:02 Oops done.
2 // Use of this source code is governed by a BSD-style license that can be 1 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 2 // found in the LICENSE file.
4 3
5 #include "content/common/gpu/texture_image_transport_surface.h" 4 #include "content/common/gpu/texture_image_transport_surface.h"
6 5
7 #include <string> 6 #include <string>
8 #include <vector> 7 #include <vector>
9 8
10 #include "base/command_line.h" 9 #include "base/command_line.h"
11 #include "content/common/gpu/gl_scoped_binders.h" 10 #include "content/common/gpu/gl_scoped_binders.h"
12 #include "content/common/gpu/gpu_channel.h" 11 #include "content/common/gpu/gpu_channel.h"
13 #include "content/common/gpu/gpu_channel_manager.h" 12 #include "content/common/gpu/gpu_channel_manager.h"
14 #include "content/common/gpu/gpu_messages.h" 13 #include "content/common/gpu/gpu_messages.h"
15 #include "content/common/gpu/sync_point_manager.h" 14 #include "content/common/gpu/sync_point_manager.h"
16 #include "content/public/common/content_switches.h" 15 #include "content/public/common/content_switches.h"
17 #include "gpu/command_buffer/service/context_group.h" 16 #include "gpu/command_buffer/service/context_group.h"
18 #include "gpu/command_buffer/service/gpu_scheduler.h" 17 #include "gpu/command_buffer/service/gpu_scheduler.h"
19 #include "gpu/command_buffer/service/texture_manager.h" 18 #include "gpu/command_buffer/service/texture_definition.h"
20 19
21 using gpu::gles2::ContextGroup; 20 using gpu::gles2::ContextGroup;
21 using gpu::gles2::MailboxManager;
22 using gpu::gles2::MailboxName;
23 using gpu::gles2::TextureDefinition;
22 using gpu::gles2::TextureManager; 24 using gpu::gles2::TextureManager;
23 typedef TextureManager::TextureInfo TextureInfo;
24 25
25 namespace content { 26 namespace content {
26 27
27 TextureImageTransportSurface::Texture::Texture() 28 TextureImageTransportSurface::Texture::Texture()
28 : client_id(0), 29 : service_id(0),
29 sent_to_client(false) { 30 identifier(0) {
30 } 31 }
31 32
32 TextureImageTransportSurface::Texture::~Texture() { 33 TextureImageTransportSurface::Texture::~Texture() {
33 } 34 }
34 35
35 TextureImageTransportSurface::TextureImageTransportSurface( 36 TextureImageTransportSurface::TextureImageTransportSurface(
36 GpuChannelManager* manager, 37 GpuChannelManager* manager,
37 GpuCommandBufferStub* stub, 38 GpuCommandBufferStub* stub,
38 const gfx::GLSurfaceHandle& handle) 39 const gfx::GLSurfaceHandle& handle)
39 : fbo_id_(0), 40 : fbo_id_(0),
40 front_(0),
41 stub_destroyed_(false), 41 stub_destroyed_(false),
42 backbuffer_suggested_allocation_(true), 42 backbuffer_suggested_allocation_(true),
43 frontbuffer_suggested_allocation_(true), 43 frontbuffer_suggested_allocation_(true),
44 frontbuffer_is_protected_(true),
45 protection_state_id_(0),
46 handle_(handle), 44 handle_(handle),
47 parent_stub_(NULL),
48 is_swap_buffers_pending_(false), 45 is_swap_buffers_pending_(false),
49 did_unschedule_(false), 46 did_unschedule_(false) {
50 did_flip_(false) {
51 helper_.reset(new ImageTransportHelper(this, 47 helper_.reset(new ImageTransportHelper(this,
52 manager, 48 manager,
53 stub, 49 stub,
54 gfx::kNullPluginWindow)); 50 gfx::kNullPluginWindow));
55 } 51 }
56 52
57 TextureImageTransportSurface::~TextureImageTransportSurface() { 53 TextureImageTransportSurface::~TextureImageTransportSurface() {
58 DCHECK(stub_destroyed_); 54 DCHECK(stub_destroyed_);
59 Destroy(); 55 Destroy();
60 } 56 }
61 57
62 bool TextureImageTransportSurface::Initialize() { 58 bool TextureImageTransportSurface::Initialize() {
59 mailbox_manager_ =
60 helper_->stub()->decoder()->GetContextGroup()->mailbox_manager();
61
62 backbuffer_.identifier = 1;
63
63 GpuChannelManager* manager = helper_->manager(); 64 GpuChannelManager* manager = helper_->manager();
64 GpuChannel* parent_channel = manager->LookupChannel(handle_.parent_client_id);
65 if (!parent_channel)
66 return false;
67
68 parent_stub_ = parent_channel->LookupCommandBuffer(handle_.parent_context_id);
69 if (!parent_stub_)
70 return false;
71
72 parent_stub_->AddDestructionObserver(this);
73 TextureManager* texture_manager =
74 parent_stub_->decoder()->GetContextGroup()->texture_manager();
75 DCHECK(texture_manager);
76
77 for (int i = 0; i < 2; ++i) {
78 Texture& texture = textures_[i];
79 texture.client_id = handle_.parent_texture_id[i];
80 texture.info = texture_manager->GetTextureInfo(texture.client_id);
81 if (!texture.info)
82 return false;
83
84 if (!texture.info->target())
85 texture_manager->SetInfoTarget(texture.info, GL_TEXTURE_2D);
86 texture_manager->SetParameter(
87 texture.info, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
88 texture_manager->SetParameter(
89 texture.info, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
90 texture_manager->SetParameter(
91 texture.info, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
92 texture_manager->SetParameter(
93 texture.info, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
94 }
95
96 surface_ = manager->GetDefaultOffscreenSurface(); 65 surface_ = manager->GetDefaultOffscreenSurface();
97 if (!surface_.get()) 66 if (!surface_.get())
98 return false; 67 return false;
99 68
100 if (!helper_->Initialize()) 69 if (!helper_->Initialize())
101 return false; 70 return false;
102 71
103 const CommandLine* command_line = CommandLine::ForCurrentProcess(); 72 // TODO: Move this somewhere else.
jonathan.backer 2012/11/12 16:52:15 Not sure it belongs anywhere else. We only want pr
no sievers 2012/11/19 20:30:44 I've removed the TODO for now. But maybe a more ge
piman 2012/11/19 22:09:52 The preemption concept is orthogonal to whether we
104 if (command_line->HasSwitch(switches::kUIPrioritizeInGpuProcess)) 73 GpuChannel* parent_channel = manager->LookupChannel(handle_.parent_client_id);
105 helper_->SetPreemptByCounter(parent_channel->MessagesPendingCount()); 74 if (parent_channel) {
75 const CommandLine* command_line = CommandLine::ForCurrentProcess();
76 if (command_line->HasSwitch(switches::kUIPrioritizeInGpuProcess))
77 helper_->SetPreemptByCounter(parent_channel->MessagesPendingCount());
78 }
106 79
107 return true; 80 return true;
108 } 81 }
109 82
110 void TextureImageTransportSurface::Destroy() { 83 void TextureImageTransportSurface::Destroy() {
111 if (parent_stub_) {
112 parent_stub_->decoder()->MakeCurrent();
113 ReleaseParentStub();
114 }
115
116 if (surface_.get()) 84 if (surface_.get())
117 surface_ = NULL; 85 surface_ = NULL;
118 86
119 helper_->Destroy(); 87 helper_->Destroy();
120 } 88 }
121 89
122 bool TextureImageTransportSurface::DeferDraws() { 90 bool TextureImageTransportSurface::DeferDraws() {
123 // The command buffer hit a draw/clear command that could clobber the 91 // The command buffer hit a draw/clear command that could clobber the
124 // texture in use by the UI compositor. If a Swap is pending, abort 92 // texture in use by the UI compositor. If a Swap is pending, abort
125 // processing of the command by returning true and unschedule until the Swap 93 // processing of the command by returning true and unschedule until the Swap
(...skipping 18 matching lines...) Expand all
144 bool TextureImageTransportSurface::OnMakeCurrent(gfx::GLContext* context) { 112 bool TextureImageTransportSurface::OnMakeCurrent(gfx::GLContext* context) {
145 if (stub_destroyed_) { 113 if (stub_destroyed_) {
146 // Early-exit so that we don't recreate the fbo. We still want to return 114 // Early-exit so that we don't recreate the fbo. We still want to return
147 // true, so that the context is made current and the GLES2DecoderImpl can 115 // true, so that the context is made current and the GLES2DecoderImpl can
148 // release its own resources. 116 // release its own resources.
149 return true; 117 return true;
150 } 118 }
151 119
152 if (!fbo_id_) { 120 if (!fbo_id_) {
153 glGenFramebuffersEXT(1, &fbo_id_); 121 glGenFramebuffersEXT(1, &fbo_id_);
154 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_); 122 glBindFramebufferEXT(GL_FRAMEBUFFER, fbo_id_);
apatrick_chromium 2012/11/09 19:19:04 This changes the FBO binding for the context and i
no sievers 2012/11/09 21:53:02 Hmm if it's a problem, it should exist before my p
piman 2012/11/09 22:02:01 It's probably not a problem, since this codes exec
jonathan.backer 2012/11/12 16:52:15 So this is the first time that MakeCurrent is ever
piman 2012/11/19 22:09:52 Yes.
155 CreateBackTexture(gfx::Size(1, 1)); 123 current_size_ = gfx::Size(1, 1);
124 CreateBackTexture();
156 125
157 #ifndef NDEBUG 126 #ifndef NDEBUG
158 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); 127 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
159 if (status != GL_FRAMEBUFFER_COMPLETE) { 128 if (status != GL_FRAMEBUFFER_COMPLETE) {
160 DLOG(ERROR) << "Framebuffer incomplete."; 129 DLOG(ERROR) << "Framebuffer incomplete.";
161 glDeleteFramebuffersEXT(1, &fbo_id_); 130 glDeleteFramebuffersEXT(1, &fbo_id_);
162 fbo_id_ = 0; 131 fbo_id_ = 0;
163 return false; 132 return false;
164 } 133 }
165 #endif 134 #endif
(...skipping 10 matching lines...) Expand all
176 145
177 void TextureImageTransportSurface::SetBackbufferAllocation(bool allocation) { 146 void TextureImageTransportSurface::SetBackbufferAllocation(bool allocation) {
178 if (backbuffer_suggested_allocation_ == allocation) 147 if (backbuffer_suggested_allocation_ == allocation)
179 return; 148 return;
180 backbuffer_suggested_allocation_ = allocation; 149 backbuffer_suggested_allocation_ = allocation;
181 150
182 if (!helper_->MakeCurrent()) 151 if (!helper_->MakeCurrent())
183 return; 152 return;
184 153
185 if (backbuffer_suggested_allocation_) { 154 if (backbuffer_suggested_allocation_) {
186 DCHECK(!textures_[back()].info->service_id() || 155 DCHECK(!backbuffer_.service_id);
187 !textures_[back()].sent_to_client); 156 CreateBackTexture();
188 CreateBackTexture(textures_[back()].size);
189 } else { 157 } else {
190 ReleaseTexture(back()); 158 ReleaseBackBuffer();
191 } 159 }
192 } 160 }
193 161
194 void TextureImageTransportSurface::SetFrontbufferAllocation(bool allocation) { 162 void TextureImageTransportSurface::SetFrontbufferAllocation(bool allocation) {
195 if (frontbuffer_suggested_allocation_ == allocation) 163 if (frontbuffer_suggested_allocation_ == allocation)
196 return; 164 return;
197 frontbuffer_suggested_allocation_ = allocation; 165 frontbuffer_suggested_allocation_ = allocation;
198 AdjustFrontBufferAllocation(); 166 AdjustFrontBufferAllocation();
199 } 167 }
200 168
201 void TextureImageTransportSurface::AdjustFrontBufferAllocation() { 169 void TextureImageTransportSurface::AdjustFrontBufferAllocation() {
170 DCHECK(!is_swap_buffers_pending_);
piman 2012/11/09 22:02:01 I don't think this check is valid. This can be cal
no sievers 2012/11/19 20:30:44 Done.
202 if (!helper_->MakeCurrent()) 171 if (!helper_->MakeCurrent())
piman 2012/11/09 22:02:01 We don't need this anymore.
no sievers 2012/11/19 20:30:44 Done.
203 return; 172 return;
204 173
205 if (!frontbuffer_suggested_allocation_ && !frontbuffer_is_protected_ && 174 if (!frontbuffer_suggested_allocation_) {
206 textures_[front()].info->service_id()) { 175 GpuHostMsg_AcceleratedSurfaceRelease_Params params;
207 ReleaseTexture(front()); 176 helper_->SendAcceleratedSurfaceRelease(params);
208 if (textures_[front()].sent_to_client) {
209 GpuHostMsg_AcceleratedSurfaceRelease_Params params;
210 params.identifier = textures_[front()].client_id;
211 helper_->SendAcceleratedSurfaceRelease(params);
212 textures_[front()].sent_to_client = false;
213 }
214 } 177 }
215 } 178 }
216 179
217 void* TextureImageTransportSurface::GetShareHandle() { 180 void* TextureImageTransportSurface::GetShareHandle() {
218 return GetHandle(); 181 return GetHandle();
219 } 182 }
220 183
221 void* TextureImageTransportSurface::GetDisplay() { 184 void* TextureImageTransportSurface::GetDisplay() {
222 return surface_.get() ? surface_->GetDisplay() : NULL; 185 return surface_.get() ? surface_->GetDisplay() : NULL;
223 } 186 }
224 187
225 void* TextureImageTransportSurface::GetConfig() { 188 void* TextureImageTransportSurface::GetConfig() {
226 return surface_.get() ? surface_->GetConfig() : NULL; 189 return surface_.get() ? surface_->GetConfig() : NULL;
227 } 190 }
228 191
229 void TextureImageTransportSurface::OnResize(gfx::Size size) { 192 void TextureImageTransportSurface::OnResize(gfx::Size size) {
230 CreateBackTexture(size); 193 current_size_ = size;
194 CreateBackTexture();
231 } 195 }
232 196
233 void TextureImageTransportSurface::OnWillDestroyStub( 197 void TextureImageTransportSurface::OnWillDestroyStub(
234 GpuCommandBufferStub* stub) { 198 GpuCommandBufferStub* stub) {
235 if (stub == parent_stub_) { 199 DCHECK(stub == helper_->stub());
236 ReleaseParentStub(); 200 stub->RemoveDestructionObserver(this);
237 helper_->SetPreemptByCounter(NULL);
238 } else {
239 DCHECK(stub == helper_->stub());
240 stub->RemoveDestructionObserver(this);
241 201
242 // We are losing the stub owning us, this is our last chance to clean up the 202 // We are losing the stub owning us, this is our last chance to clean up the
243 // resources we allocated in the stub's context. 203 // resources we allocated in the stub's context.
244 if (fbo_id_) { 204 if (fbo_id_) {
245 glDeleteFramebuffersEXT(1, &fbo_id_); 205 glDeleteFramebuffersEXT(1, &fbo_id_);
246 CHECK_GL_ERROR(); 206 CHECK_GL_ERROR();
247 fbo_id_ = 0; 207 fbo_id_ = 0;
248 } 208 }
249 209
250 stub_destroyed_ = true; 210 stub_destroyed_ = true;
251 }
252 } 211 }
253 212
254 bool TextureImageTransportSurface::SwapBuffers() { 213 bool TextureImageTransportSurface::SwapBuffers() {
255 DCHECK(backbuffer_suggested_allocation_); 214 DCHECK(backbuffer_suggested_allocation_);
256 if (!frontbuffer_suggested_allocation_ || !frontbuffer_is_protected_) 215 if (!frontbuffer_suggested_allocation_)
257 return true; 216 return true;
258 if (!parent_stub_) {
259 LOG(ERROR) << "SwapBuffers failed because no parent stub.";
260 return false;
261 }
262 217
263 glFlush(); 218 glFlush();
264 front_ = back(); 219 const uint64 identifier = backbuffer_.identifier;
265 previous_damage_rect_ = gfx::Rect(textures_[front()].size); 220 ProduceTexture(backbuffer_);
266 221
267 DCHECK(textures_[front()].client_id != 0); 222 // Do not allow destruction while we are still waiting for a swap ACK.
223 AddRef();
piman 2012/11/09 22:02:01 I'm not sure you'll actually be getting an ACK if
jonathan.backer 2012/11/12 16:52:15 We have a default ACK in GpuProcessHostUIShim. I t
no sievers 2012/11/19 20:30:44 Yes, agreed that it's not pretty. Although I think
268 224
269 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params; 225 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
270 params.surface_handle = textures_[front()].client_id; 226 params.surface_handle = identifier;
271 params.size = textures_[front()].size; 227 params.size = current_size_;
272 params.protection_state_id = protection_state_id_;
273 params.skip_ack = false;
274 helper_->SendAcceleratedSurfaceBuffersSwapped(params); 228 helper_->SendAcceleratedSurfaceBuffersSwapped(params);
275 229
276 DCHECK(!is_swap_buffers_pending_); 230 DCHECK(!is_swap_buffers_pending_);
277 is_swap_buffers_pending_ = true; 231 is_swap_buffers_pending_ = true;
278 return true; 232 return true;
279 } 233 }
280 234
281 bool TextureImageTransportSurface::PostSubBuffer( 235 bool TextureImageTransportSurface::PostSubBuffer(
282 int x, int y, int width, int height) { 236 int x, int y, int width, int height) {
283 DCHECK(backbuffer_suggested_allocation_); 237 DCHECK(backbuffer_suggested_allocation_);
284 DCHECK(textures_[back()].info->service_id()); 238 DCHECK(backbuffer_.service_id);
285 if (!frontbuffer_suggested_allocation_ || !frontbuffer_is_protected_) 239 if (!frontbuffer_suggested_allocation_)
286 return true; 240 return true;
287 // If we are recreating the frontbuffer with this swap, make sure we are
288 // drawing a full frame.
289 DCHECK(textures_[front()].info->service_id() ||
290 (!x && !y && gfx::Size(width, height) == textures_[back()].size));
291 if (!parent_stub_) {
292 LOG(ERROR) << "PostSubBuffer failed because no parent stub.";
293 return false;
294 }
295
296 const gfx::Rect new_damage_rect(x, y, width, height); 241 const gfx::Rect new_damage_rect(x, y, width, height);
242 DCHECK(gfx::Rect(gfx::Point(), current_size_).Contains(new_damage_rect));
297 243
298 // An empty damage rect is a successful no-op. 244 // An empty damage rect is a successful no-op.
299 if (new_damage_rect.IsEmpty()) 245 if (new_damage_rect.IsEmpty())
300 return true; 246 return true;
301 247
302 int back_texture_service_id = textures_[back()].info->service_id(); 248 glFlush();
303 int front_texture_service_id = textures_[front()].info->service_id(); 249 const uint64 identifier = backbuffer_.identifier;
250 ProduceTexture(backbuffer_);
304 251
305 gfx::Size expected_size = textures_[back()].size; 252 // Do not allow destruction while we are still waiting for a swap ACK.
306 bool surfaces_same_size = textures_[front()].size == expected_size; 253 AddRef();
307
308 if (surfaces_same_size) {
309 std::vector<gfx::Rect> regions_to_copy;
310 GetRegionsToCopy(previous_damage_rect_, new_damage_rect, &regions_to_copy);
311
312 ScopedFrameBufferBinder fbo_binder(fbo_id_);
313 glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
314 GL_COLOR_ATTACHMENT0,
315 GL_TEXTURE_2D,
316 front_texture_service_id,
317 0);
318 ScopedTextureBinder texture_binder(back_texture_service_id);
319
320 for (size_t i = 0; i < regions_to_copy.size(); ++i) {
321 const gfx::Rect& region_to_copy = regions_to_copy[i];
322 if (!region_to_copy.IsEmpty()) {
323 glCopyTexSubImage2D(GL_TEXTURE_2D, 0, region_to_copy.x(),
324 region_to_copy.y(), region_to_copy.x(), region_to_copy.y(),
325 region_to_copy.width(), region_to_copy.height());
326 }
327 }
328 } else if (!surfaces_same_size && did_flip_) {
329 DCHECK(new_damage_rect == gfx::Rect(expected_size));
330 }
331
332 glFlush();
333 front_ = back();
334 previous_damage_rect_ = new_damage_rect;
335
336 DCHECK(textures_[front()].client_id);
337 254
338 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params; 255 GpuHostMsg_AcceleratedSurfacePostSubBuffer_Params params;
339 params.surface_handle = textures_[front()].client_id; 256 params.surface_handle = identifier;
340 params.surface_size = textures_[front()].size; 257 params.surface_size = current_size_;
341 params.x = x; 258 params.x = x;
342 params.y = y; 259 params.y = y;
343 params.width = width; 260 params.width = width;
344 params.height = height; 261 params.height = height;
345 params.protection_state_id = protection_state_id_;
346 helper_->SendAcceleratedSurfacePostSubBuffer(params); 262 helper_->SendAcceleratedSurfacePostSubBuffer(params);
347 263
348 DCHECK(!is_swap_buffers_pending_); 264 DCHECK(!is_swap_buffers_pending_);
349 is_swap_buffers_pending_ = true; 265 is_swap_buffers_pending_ = true;
350 return true; 266 return true;
351 } 267 }
352 268
353 std::string TextureImageTransportSurface::GetExtensions() { 269 std::string TextureImageTransportSurface::GetExtensions() {
354 std::string extensions = gfx::GLSurface::GetExtensions(); 270 std::string extensions = gfx::GLSurface::GetExtensions();
355 extensions += extensions.empty() ? "" : " "; 271 extensions += extensions.empty() ? "" : " ";
356 extensions += "GL_CHROMIUM_front_buffer_cached "; 272 extensions += "GL_CHROMIUM_front_buffer_cached ";
357 extensions += "GL_CHROMIUM_post_sub_buffer"; 273 extensions += "GL_CHROMIUM_post_sub_buffer";
358 return extensions; 274 return extensions;
359 } 275 }
360 276
361 gfx::Size TextureImageTransportSurface::GetSize() { 277 gfx::Size TextureImageTransportSurface::GetSize() {
362 gfx::Size size = textures_[back()].size; 278 gfx::Size size = current_size_;
363 279
364 // OSMesa expects a non-zero size. 280 // OSMesa expects a non-zero size.
365 return gfx::Size(size.width() == 0 ? 1 : size.width(), 281 return gfx::Size(size.width() == 0 ? 1 : size.width(),
366 size.height() == 0 ? 1 : size.height()); 282 size.height() == 0 ? 1 : size.height());
367 } 283 }
368 284
369 void* TextureImageTransportSurface::GetHandle() { 285 void* TextureImageTransportSurface::GetHandle() {
370 return surface_.get() ? surface_->GetHandle() : NULL; 286 return surface_.get() ? surface_->GetHandle() : NULL;
371 } 287 }
372 288
373 unsigned TextureImageTransportSurface::GetFormat() { 289 unsigned TextureImageTransportSurface::GetFormat() {
374 return surface_.get() ? surface_->GetFormat() : 0; 290 return surface_.get() ? surface_->GetFormat() : 0;
375 } 291 }
376 292
377 void TextureImageTransportSurface::OnSetFrontSurfaceIsProtected( 293 void TextureImageTransportSurface::OnBufferPresented(uint64 surface_handle,
378 bool is_protected, uint32 protection_state_id) {
379 protection_state_id_ = protection_state_id;
380 if (frontbuffer_is_protected_ == is_protected)
381 return;
382 frontbuffer_is_protected_ = is_protected;
383 AdjustFrontBufferAllocation();
384
385 // If surface is set to protected, and we haven't actually released it yet,
386 // we can set the ui surface handle now just by sending a swap message.
387 if (is_protected && textures_[front()].info->service_id() &&
388 textures_[front()].sent_to_client) {
389 GpuHostMsg_AcceleratedSurfaceBuffersSwapped_Params params;
390 params.surface_handle = textures_[front()].client_id;
391 params.size = textures_[front()].size;
392 params.protection_state_id = protection_state_id_;
393 params.skip_ack = true;
394 helper_->SendAcceleratedSurfaceBuffersSwapped(params);
395 }
396 }
397
398 void TextureImageTransportSurface::OnBufferPresented(bool presented,
399 uint32 sync_point) { 294 uint32 sync_point) {
400 if (sync_point == 0) { 295 if (sync_point == 0) {
401 BufferPresentedImpl(presented); 296 BufferPresentedImpl(surface_handle);
402 } else { 297 } else {
403 helper_->manager()->sync_point_manager()->AddSyncPointCallback( 298 helper_->manager()->sync_point_manager()->AddSyncPointCallback(
404 sync_point, 299 sync_point,
405 base::Bind(&TextureImageTransportSurface::BufferPresentedImpl, 300 base::Bind(&TextureImageTransportSurface::BufferPresentedImpl,
406 this->AsWeakPtr(), 301 this,
407 presented)); 302 surface_handle));
408 } 303 }
304
305 // Careful, we might get deleted now if we were only waiting for
306 // a final swap ACK.
307 Release();
409 } 308 }
410 309
411 void TextureImageTransportSurface::BufferPresentedImpl(bool presented) { 310 void TextureImageTransportSurface::BufferPresentedImpl(uint64 surface_handle) {
311 DCHECK(!backbuffer_.service_id);
312 if (surface_handle) {
313 backbuffer_.identifier = surface_handle;
314 ConsumeTexture(backbuffer_);
315 } else {
316 // We didn't get back a texture, so allocate 'the other' buffer.
317 backbuffer_.identifier = (backbuffer_.identifier == 1) ? 2 : 1;
318 }
319
320 if (stub_destroyed_ && backbuffer_.service_id) {
321 GpuChannelManager* manager = helper_->manager();
322 DCHECK(manager);
323 if (manager->MakeCurrent(surface_.get()))
324 glDeleteTextures(1, &backbuffer_.service_id);
piman 2012/11/09 22:02:01 Al is working on changes that will make it so that
no sievers 2012/11/19 20:30:44 Ok, sounds good and like it also will render the A
325
326 return;
327 }
328
412 DCHECK(is_swap_buffers_pending_); 329 DCHECK(is_swap_buffers_pending_);
413 is_swap_buffers_pending_ = false; 330 is_swap_buffers_pending_ = false;
414 331
415 if (presented) {
416 // If we had not flipped, the two frame damage tracking is inconsistent.
417 // So conservatively take the whole frame.
418 if (!did_flip_)
419 previous_damage_rect_ = gfx::Rect(textures_[front()].size);
420 } else {
421 front_ = back();
422 previous_damage_rect_ = gfx::Rect(0, 0, 0, 0);
423 }
424
425 did_flip_ = presented;
426
427 // We're relying on the fact that the parent context is 332 // We're relying on the fact that the parent context is
428 // finished with it's context when it inserts the sync point that 333 // finished with it's context when it inserts the sync point that
429 // triggers this callback. 334 // triggers this callback.
430 if (helper_->MakeCurrent()) { 335 if (helper_->MakeCurrent()) {
431 if (textures_[front()].size != textures_[back()].size || 336 if (backbuffer_.size != current_size_ ||
432 !textures_[back()].info->service_id() || 337 !backbuffer_.service_id) {
433 !textures_[back()].sent_to_client) {
434 // We may get an ACK from a stale swap just to reschedule. In that case, 338 // We may get an ACK from a stale swap just to reschedule. In that case,
435 // we may not have a backbuffer suggestion and should not recreate one. 339 // we may not have a backbuffer suggestion and should not recreate one.
436 if (backbuffer_suggested_allocation_) 340 if (backbuffer_suggested_allocation_)
437 CreateBackTexture(textures_[front()].size); 341 CreateBackTexture();
438 } else { 342 } else {
439 AttachBackTextureToFBO(); 343 AttachBackTextureToFBO();
440 } 344 }
441 } 345 }
442 346
443 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context 347 // Even if MakeCurrent fails, schedule anyway, to trigger the lost context
444 // logic. 348 // logic.
445 if (did_unschedule_) { 349 if (did_unschedule_) {
446 did_unschedule_ = false; 350 did_unschedule_ = false;
447 helper_->SetScheduled(true); 351 helper_->SetScheduled(true);
448 } 352 }
449 } 353 }
450 354
451 void TextureImageTransportSurface::OnResizeViewACK() { 355 void TextureImageTransportSurface::OnResizeViewACK() {
452 NOTREACHED(); 356 NOTREACHED();
453 } 357 }
454 358
455 void TextureImageTransportSurface::ReleaseTexture(int id) { 359 void TextureImageTransportSurface::ReleaseBackBuffer() {
456 if (!parent_stub_) 360 if (!backbuffer_.service_id)
457 return; 361 return;
458 Texture& texture = textures_[id];
459 TextureInfo* info = texture.info;
460 DCHECK(info);
461
462 GLuint service_id = info->service_id();
463 if (!service_id)
464 return;
465 info->SetServiceId(0);
466 362
467 { 363 {
468 ScopedFrameBufferBinder fbo_binder(fbo_id_); 364 ScopedFrameBufferBinder fbo_binder(fbo_id_);
jonathan.backer 2012/11/12 16:52:15 Why the framebuffer binder? Seems like a no-op. Ma
no sievers 2012/11/19 20:30:44 Done.
469 glDeleteTextures(1, &service_id); 365 glDeleteTextures(1, &backbuffer_.service_id);
366 backbuffer_.service_id = 0;
470 } 367 }
471 glFlush(); 368 glFlush();
472 CHECK_GL_ERROR(); 369 CHECK_GL_ERROR();
473 } 370 }
474 371
475 void TextureImageTransportSurface::CreateBackTexture(const gfx::Size& size) { 372 void TextureImageTransportSurface::CreateBackTexture() {
476 if (!parent_stub_) 373 // We are waiting for our backbuffer in the mailbox, so we shouldn't be
477 return; 374 // reallocating the backbuffer now.
jonathan.backer 2012/11/12 16:52:15 comment nit: "If |is_swap_buffers_pending|, we are
no sievers 2012/11/19 20:30:44 Done.
478 Texture& texture = textures_[back()]; 375 DCHECK(!is_swap_buffers_pending_);
479 TextureInfo* info = texture.info;
480 DCHECK(info);
481 376
482 GLuint service_id = info->service_id(); 377 gfx::Size& size = current_size_;
jonathan.backer 2012/11/12 16:52:15 Got tired of typing?
no sievers 2012/11/19 20:30:44 Done.
378 Texture& texture = backbuffer_;
piman 2012/11/09 22:02:01 nit: you have a single texture now, so that indire
no sievers 2012/11/19 20:30:44 Done.
483 379
484 if (service_id && texture.size == size && texture.sent_to_client) 380 if (texture.service_id && texture.size == size)
485 return; 381 return;
486 382
487 if (!service_id) { 383 if (!texture.service_id) {
piman 2012/11/09 22:02:01 nit: no need for braces any more.
no sievers 2012/11/19 20:30:44 Done.
488 glGenTextures(1, &service_id); 384 glGenTextures(1, &texture.service_id);
489 info->SetServiceId(service_id);
490 } 385 }
491 386
492 if (size != texture.size) { 387 if (size != texture.size) {
493 texture.size = size; 388 texture.size = size;
jonathan.backer 2012/11/12 16:52:15 nit: no if necessary.
no sievers 2012/11/19 20:30:44 Done.
494 TextureManager* texture_manager =
495 parent_stub_->decoder()->GetContextGroup()->texture_manager();
496 texture_manager->SetLevelInfo(
497 info,
498 GL_TEXTURE_2D,
499 0,
500 GL_RGBA,
501 size.width(),
502 size.height(),
503 1,
504 0,
505 GL_RGBA,
506 GL_UNSIGNED_BYTE,
507 true);
508 } 389 }
509 390
510 { 391 {
511 ScopedTextureBinder texture_binder(service_id); 392 ScopedTextureBinder texture_binder(texture.service_id);
512 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); 393 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
513 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); 394 glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
514 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); 395 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
515 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); 396 glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
516 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 397 glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
517 size.width(), size.height(), 0, 398 size.width(), size.height(), 0,
518 GL_RGBA, GL_UNSIGNED_BYTE, NULL); 399 GL_RGBA, GL_UNSIGNED_BYTE, NULL);
519 CHECK_GL_ERROR(); 400 CHECK_GL_ERROR();
520 } 401 }
521 402
522 AttachBackTextureToFBO(); 403 AttachBackTextureToFBO();
523 404
405 MailboxName mailbox_name;
406 mailbox_manager_->GenerateMailboxName(&mailbox_name);
407 mailbox_names_[texture.identifier] = mailbox_name;
piman 2012/11/09 22:02:01 Al can confirm but I don't think it's ok to change
no sievers 2012/11/19 20:30:44 Done.
408
524 GpuHostMsg_AcceleratedSurfaceNew_Params params; 409 GpuHostMsg_AcceleratedSurfaceNew_Params params;
525 params.width = size.width(); 410 params.width = size.width();
526 params.height = size.height(); 411 params.height = size.height();
527 params.surface_handle = texture.client_id; 412 params.surface_handle = texture.identifier;
413 params.mailbox_name.resize(sizeof(mailbox_name));
414 memcpy(params.mailbox_name.data(), &mailbox_name, sizeof(mailbox_name));
528 helper_->SendAcceleratedSurfaceNew(params); 415 helper_->SendAcceleratedSurfaceNew(params);
529 texture.sent_to_client = true;
530 } 416 }
531 417
532 void TextureImageTransportSurface::AttachBackTextureToFBO() { 418 void TextureImageTransportSurface::AttachBackTextureToFBO() {
533 if (!parent_stub_) 419 DCHECK(backbuffer_.service_id);
534 return;
535 TextureInfo* info = textures_[back()].info;
536 DCHECK(info);
537
538 ScopedFrameBufferBinder fbo_binder(fbo_id_); 420 ScopedFrameBufferBinder fbo_binder(fbo_id_);
539 glFramebufferTexture2DEXT(GL_FRAMEBUFFER, 421 glFramebufferTexture2DEXT(GL_FRAMEBUFFER,
540 GL_COLOR_ATTACHMENT0, 422 GL_COLOR_ATTACHMENT0,
541 GL_TEXTURE_2D, 423 GL_TEXTURE_2D,
542 info->service_id(), 424 backbuffer_.service_id,
543 0); 425 0);
544 glFlush(); 426 glFlush();
545 CHECK_GL_ERROR(); 427 CHECK_GL_ERROR();
546 428
547 #ifndef NDEBUG 429 #ifndef NDEBUG
548 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER); 430 GLenum status = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER);
549 if (status != GL_FRAMEBUFFER_COMPLETE) { 431 if (status != GL_FRAMEBUFFER_COMPLETE) {
550 DLOG(ERROR) << "Framebuffer incomplete."; 432 DLOG(ERROR) << "Framebuffer incomplete: " << status;
433 DCHECK(false);
piman 2012/11/09 22:02:01 nit: just make the above DLOG(FATAL)?
no sievers 2012/11/19 20:30:44 Done.
551 } 434 }
552 #endif 435 #endif
553 } 436 }
554 437
555 void TextureImageTransportSurface::ReleaseParentStub() { 438 void TextureImageTransportSurface::ConsumeTexture(Texture& texture) {
556 DCHECK(parent_stub_); 439 DCHECK(!texture.service_id);
557 parent_stub_->RemoveDestructionObserver(this); 440 DCHECK(texture.identifier == 1 || texture.identifier == 2);
558 for (int i = 0; i < 2; ++i) { 441
559 Texture& texture = textures_[i]; 442 scoped_ptr<TextureDefinition> definition(mailbox_manager_->ConsumeTexture(
560 texture.info = NULL; 443 GL_TEXTURE_2D, mailbox_names_[texture.identifier]));
561 if (!texture.sent_to_client) 444 if (definition.get()) {
562 continue; 445 texture.service_id = definition->ReleaseServiceId();
563 GpuHostMsg_AcceleratedSurfaceRelease_Params params; 446 texture.size = gfx::Size(definition->level_infos()[0][0].width,
564 params.identifier = texture.client_id; 447 definition->level_infos()[0][0].height);
565 helper_->SendAcceleratedSurfaceRelease(params);
piman 2012/11/09 22:02:01 This should still happen somewhere. If we switch o
566 } 448 }
567 parent_stub_ = NULL; 449 }
450
451 void TextureImageTransportSurface::ProduceTexture(Texture& texture) {
452 DCHECK(texture.service_id);
453 DCHECK(texture.identifier == 1 || texture.identifier == 2);
454 TextureManager* texture_manager =
455 helper_->stub()->decoder()->GetContextGroup()->texture_manager();
456 DCHECK(texture.size.width() > 0 && texture.size.height() > 0);
457 TextureDefinition::LevelInfo info(
458 GL_TEXTURE_2D, GL_RGBA, texture.size.width(), texture.size.height(), 1,
459 0, GL_RGBA, GL_UNSIGNED_BYTE, true);
460
461 TextureDefinition::LevelInfos level_infos;
462 level_infos.resize(1);
463 level_infos[0].resize(texture_manager->MaxLevelsForTarget(GL_TEXTURE_2D));
464 level_infos[0][0] = info;
465 scoped_ptr<TextureDefinition> definition(new TextureDefinition(
466 GL_TEXTURE_2D,
467 texture.service_id,
468 true,
469 level_infos));
470 mailbox_manager_->ProduceTexture(
471 GL_TEXTURE_2D,
472 mailbox_names_[texture.identifier],
473 definition.release(),
474 helper_->stub()->decoder()->GetContextGroup()->texture_manager());
475 texture.service_id = 0;
568 } 476 }
569 477
570 } // namespace content 478 } // namespace content
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698